Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2021, MediaTek Inc.
   4 * Copyright (c) 2021-2022, Intel Corporation.
   5 *
   6 * Authors:
   7 *  Amir Hanania <amir.hanania@intel.com>
   8 *  Haijun Liu <haijun.liu@mediatek.com>
   9 *  Moises Veleta <moises.veleta@intel.com>
  10 *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
  11 *  Sreehari Kancharla <sreehari.kancharla@intel.com>
  12 *
  13 * Contributors:
  14 *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  15 *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
  16 *  Eliot Lee <eliot.lee@intel.com>
  17 */
  18
  19#include <linux/bits.h>
  20#include <linux/bitops.h>
  21#include <linux/delay.h>
  22#include <linux/device.h>
  23#include <linux/dmapool.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/dma-direction.h>
  26#include <linux/gfp.h>
  27#include <linux/io.h>
  28#include <linux/io-64-nonatomic-lo-hi.h>
  29#include <linux/iopoll.h>
  30#include <linux/irqreturn.h>
  31#include <linux/kernel.h>
  32#include <linux/kthread.h>
  33#include <linux/list.h>
  34#include <linux/netdevice.h>
  35#include <linux/pci.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/sched.h>
  38#include <linux/skbuff.h>
  39#include <linux/slab.h>
  40#include <linux/spinlock.h>
  41#include <linux/types.h>
  42#include <linux/wait.h>
  43#include <linux/workqueue.h>
  44
  45#include "t7xx_cldma.h"
  46#include "t7xx_hif_cldma.h"
  47#include "t7xx_mhccif.h"
  48#include "t7xx_pci.h"
  49#include "t7xx_pcie_mac.h"
  50#include "t7xx_port_proxy.h"
  51#include "t7xx_reg.h"
  52#include "t7xx_state_monitor.h"
  53
  54#define MAX_TX_BUDGET			16
  55#define MAX_RX_BUDGET			16
  56
  57#define CHECK_Q_STOP_TIMEOUT_US		1000000
  58#define CHECK_Q_STOP_STEP_US		10000
  59
  60#define CLDMA_JUMBO_BUFF_SZ		(63 * 1024 + sizeof(struct ccci_header))
  61
  62static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
  63				     enum mtk_txrx tx_rx, unsigned int index)
  64{
  65	queue->dir = tx_rx;
  66	queue->index = index;
  67	queue->md_ctrl = md_ctrl;
  68	queue->tr_ring = NULL;
  69	queue->tr_done = NULL;
  70	queue->tx_next = NULL;
  71}
  72
  73static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
  74				    enum mtk_txrx tx_rx, unsigned int index)
  75{
  76	md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
  77	init_waitqueue_head(&queue->req_wq);
  78	spin_lock_init(&queue->ring_lock);
  79}
  80
  81static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
  82{
  83	gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
  84	gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
  85}
  86
  87static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
  88{
  89	gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
  90	gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
  91}
  92
  93static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
  94					size_t size, gfp_t gfp_mask)
  95{
  96	req->skb = __dev_alloc_skb(size, gfp_mask);
  97	if (!req->skb)
  98		return -ENOMEM;
  99
 100	req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
 101	if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
 102		dev_kfree_skb_any(req->skb);
 103		req->skb = NULL;
 104		req->mapped_buff = 0;
 105		dev_err(md_ctrl->dev, "DMA mapping failed\n");
 106		return -ENOMEM;
 107	}
 108
 109	return 0;
 110}
 111
 112static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
 113{
 114	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 115	unsigned int hwo_polling_count = 0;
 116	struct t7xx_cldma_hw *hw_info;
 117	bool rx_not_done = true;
 118	unsigned long flags;
 119	int count = 0;
 120
 121	hw_info = &md_ctrl->hw_info;
 122
 123	do {
 124		struct cldma_request *req;
 125		struct cldma_gpd *gpd;
 126		struct sk_buff *skb;
 127		int ret;
 128
 129		req = queue->tr_done;
 130		if (!req)
 131			return -ENODATA;
 132
 133		gpd = req->gpd;
 134		if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
 135			dma_addr_t gpd_addr;
 136
 137			if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
 138				dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
 139				return -ENODEV;
 140			}
 141
 142			gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
 143					    queue->index * sizeof(u64));
 144			if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
 145				return 0;
 146
 147			udelay(1);
 148			continue;
 149		}
 150
 151		hwo_polling_count = 0;
 152		skb = req->skb;
 153
 154		if (req->mapped_buff) {
 155			dma_unmap_single(md_ctrl->dev, req->mapped_buff,
 156					 queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
 157			req->mapped_buff = 0;
 158		}
 159
 160		skb->len = 0;
 161		skb_reset_tail_pointer(skb);
 162		skb_put(skb, le16_to_cpu(gpd->data_buff_len));
 163
 164		ret = md_ctrl->recv_skb(queue, skb);
 165		/* Break processing, will try again later */
 166		if (ret < 0)
 167			return ret;
 168
 169		req->skb = NULL;
 170		t7xx_cldma_gpd_set_data_ptr(gpd, 0);
 171
 172		spin_lock_irqsave(&queue->ring_lock, flags);
 173		queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
 174		spin_unlock_irqrestore(&queue->ring_lock, flags);
 175		req = queue->rx_refill;
 176
 177		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
 178		if (ret)
 179			return ret;
 180
 181		gpd = req->gpd;
 182		t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
 183		gpd->data_buff_len = 0;
 184		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
 185
 186		spin_lock_irqsave(&queue->ring_lock, flags);
 187		queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
 188		spin_unlock_irqrestore(&queue->ring_lock, flags);
 189
 190		rx_not_done = ++count < budget || !need_resched();
 191	} while (rx_not_done);
 192
 193	*over_budget = true;
 194	return 0;
 195}
 196
 197static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
 198{
 199	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 200	struct t7xx_cldma_hw *hw_info;
 201	unsigned int pending_rx_int;
 202	bool over_budget = false;
 203	unsigned long flags;
 204	int ret;
 205
 206	hw_info = &md_ctrl->hw_info;
 207
 208	do {
 209		ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
 210		if (ret == -ENODATA)
 211			return 0;
 212		else if (ret)
 213			return ret;
 214
 215		pending_rx_int = 0;
 216
 217		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 218		if (md_ctrl->rxq_active & BIT(queue->index)) {
 219			if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
 220				t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
 221
 222			pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
 223								  MTK_RX);
 224			if (pending_rx_int) {
 225				t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
 226
 227				if (over_budget) {
 228					spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 229					return -EAGAIN;
 230				}
 231			}
 232		}
 233		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 234	} while (pending_rx_int);
 235
 236	return 0;
 237}
 238
 239static void t7xx_cldma_rx_done(struct work_struct *work)
 240{
 241	struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
 242	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 243	int value;
 244
 245	value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
 246	if (value && md_ctrl->rxq_active & BIT(queue->index)) {
 247		queue_work(queue->worker, &queue->cldma_work);
 248		return;
 249	}
 250
 251	t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
 252	t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
 253	t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
 254	pm_runtime_mark_last_busy(md_ctrl->dev);
 255	pm_runtime_put_autosuspend(md_ctrl->dev);
 256}
 257
 258static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
 259{
 260	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 261	unsigned int dma_len, count = 0;
 262	struct cldma_request *req;
 263	struct cldma_gpd *gpd;
 264	unsigned long flags;
 265	dma_addr_t dma_free;
 266	struct sk_buff *skb;
 267
 268	while (!kthread_should_stop()) {
 269		spin_lock_irqsave(&queue->ring_lock, flags);
 270		req = queue->tr_done;
 271		if (!req) {
 272			spin_unlock_irqrestore(&queue->ring_lock, flags);
 273			break;
 274		}
 275		gpd = req->gpd;
 276		if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
 277			spin_unlock_irqrestore(&queue->ring_lock, flags);
 278			break;
 279		}
 280		queue->budget++;
 281		dma_free = req->mapped_buff;
 282		dma_len = le16_to_cpu(gpd->data_buff_len);
 283		skb = req->skb;
 284		req->skb = NULL;
 285		queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
 286		spin_unlock_irqrestore(&queue->ring_lock, flags);
 287
 288		count++;
 289		dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
 290		dev_kfree_skb_any(skb);
 291	}
 292
 293	if (count)
 294		wake_up_nr(&queue->req_wq, count);
 295
 296	return count;
 297}
 298
 299static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
 300{
 301	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 302	struct cldma_request *req;
 303	dma_addr_t ul_curr_addr;
 304	unsigned long flags;
 305	bool pending_gpd;
 306
 307	if (!(md_ctrl->txq_active & BIT(queue->index)))
 308		return;
 309
 310	spin_lock_irqsave(&queue->ring_lock, flags);
 311	req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
 312	spin_unlock_irqrestore(&queue->ring_lock, flags);
 313
 314	pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
 315
 316	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 317	if (pending_gpd) {
 318		struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 319
 320		/* Check current processing TGPD, 64-bit address is in a table by Q index */
 321		ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
 322					queue->index * sizeof(u64));
 323		if (req->gpd_addr != ul_curr_addr) {
 324			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 325			dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
 326				md_ctrl->hif_id, queue->index);
 327			return;
 328		}
 329
 330		t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
 331	}
 332	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 333}
 334
 335static void t7xx_cldma_tx_done(struct work_struct *work)
 336{
 337	struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
 338	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 339	struct t7xx_cldma_hw *hw_info;
 340	unsigned int l2_tx_int;
 341	unsigned long flags;
 342
 343	hw_info = &md_ctrl->hw_info;
 344	t7xx_cldma_gpd_tx_collect(queue);
 345	l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
 346					     MTK_TX);
 347	if (l2_tx_int & EQ_STA_BIT(queue->index)) {
 348		t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
 349		t7xx_cldma_txq_empty_hndl(queue);
 350	}
 351
 352	if (l2_tx_int & BIT(queue->index)) {
 353		t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
 354		queue_work(queue->worker, &queue->cldma_work);
 355		return;
 356	}
 357
 358	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 359	if (md_ctrl->txq_active & BIT(queue->index)) {
 360		t7xx_cldma_clear_ip_busy(hw_info);
 361		t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
 362		t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
 363	}
 364	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 365
 366	pm_runtime_mark_last_busy(md_ctrl->dev);
 367	pm_runtime_put_autosuspend(md_ctrl->dev);
 368}
 369
 370static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
 371				 struct cldma_ring *ring, enum dma_data_direction tx_rx)
 372{
 373	struct cldma_request *req_cur, *req_next;
 374
 375	list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
 376		if (req_cur->mapped_buff && req_cur->skb) {
 377			dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
 378					 ring->pkt_size, tx_rx);
 379			req_cur->mapped_buff = 0;
 380		}
 381
 382		dev_kfree_skb_any(req_cur->skb);
 383
 384		if (req_cur->gpd)
 385			dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
 386
 387		list_del(&req_cur->entry);
 388		kfree(req_cur);
 389	}
 390}
 391
 392static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
 393{
 394	struct cldma_request *req;
 395	int val;
 396
 397	req = kzalloc(sizeof(*req), GFP_KERNEL);
 398	if (!req)
 399		return NULL;
 400
 401	req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
 402	if (!req->gpd)
 403		goto err_free_req;
 404
 405	val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
 406	if (val)
 407		goto err_free_pool;
 408
 409	return req;
 410
 411err_free_pool:
 412	dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
 413
 414err_free_req:
 415	kfree(req);
 416
 417	return NULL;
 418}
 419
 420static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
 421{
 422	struct cldma_request *req;
 423	struct cldma_gpd *gpd;
 424	int i;
 425
 426	INIT_LIST_HEAD(&ring->gpd_ring);
 427	ring->length = MAX_RX_BUDGET;
 428
 429	for (i = 0; i < ring->length; i++) {
 430		req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
 431		if (!req) {
 432			t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
 433			return -ENOMEM;
 434		}
 435
 436		gpd = req->gpd;
 437		t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
 438		gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
 439		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
 440		INIT_LIST_HEAD(&req->entry);
 441		list_add_tail(&req->entry, &ring->gpd_ring);
 442	}
 443
 444	/* Link previous GPD to next GPD, circular */
 445	list_for_each_entry(req, &ring->gpd_ring, entry) {
 446		t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
 447		gpd = req->gpd;
 448	}
 449
 450	return 0;
 451}
 452
 453static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
 454{
 455	struct cldma_request *req;
 456
 457	req = kzalloc(sizeof(*req), GFP_KERNEL);
 458	if (!req)
 459		return NULL;
 460
 461	req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
 462	if (!req->gpd) {
 463		kfree(req);
 464		return NULL;
 465	}
 466
 467	return req;
 468}
 469
 470static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
 471{
 472	struct cldma_request *req;
 473	struct cldma_gpd *gpd;
 474	int i;
 475
 476	INIT_LIST_HEAD(&ring->gpd_ring);
 477	ring->length = MAX_TX_BUDGET;
 478
 479	for (i = 0; i < ring->length; i++) {
 480		req = t7xx_alloc_tx_request(md_ctrl);
 481		if (!req) {
 482			t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
 483			return -ENOMEM;
 484		}
 485
 486		gpd = req->gpd;
 487		gpd->flags = GPD_FLAGS_IOC;
 488		INIT_LIST_HEAD(&req->entry);
 489		list_add_tail(&req->entry, &ring->gpd_ring);
 490	}
 491
 492	/* Link previous GPD to next GPD, circular */
 493	list_for_each_entry(req, &ring->gpd_ring, entry) {
 494		t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
 495		gpd = req->gpd;
 496	}
 497
 498	return 0;
 499}
 500
 501/**
 502 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
 503 * @queue: Pointer to the queue structure.
 504 *
 505 * Called with ring_lock (unless called during initialization phase)
 506 */
 507static void t7xx_cldma_q_reset(struct cldma_queue *queue)
 508{
 509	struct cldma_request *req;
 510
 511	req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
 512	queue->tr_done = req;
 513	queue->budget = queue->tr_ring->length;
 514
 515	if (queue->dir == MTK_TX)
 516		queue->tx_next = req;
 517	else
 518		queue->rx_refill = req;
 519}
 520
 521static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
 522{
 523	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 524
 525	queue->dir = MTK_RX;
 526	queue->tr_ring = &md_ctrl->rx_ring[queue->index];
 527	t7xx_cldma_q_reset(queue);
 528}
 529
 530static void t7xx_cldma_txq_init(struct cldma_queue *queue)
 531{
 532	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 533
 534	queue->dir = MTK_TX;
 535	queue->tr_ring = &md_ctrl->tx_ring[queue->index];
 536	t7xx_cldma_q_reset(queue);
 537}
 538
 539static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
 540{
 541	t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
 542}
 543
 544static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
 545{
 546	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
 547}
 548
 549static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
 550{
 551	unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
 552	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 553	int i;
 554
 555	/* L2 raw interrupt status */
 556	l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
 557	l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
 558	l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
 559	l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
 560	l2_tx_int &= ~l2_tx_int_msk;
 561	l2_rx_int &= ~l2_rx_int_msk;
 562
 563	if (l2_tx_int) {
 564		if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
 565			/* Read and clear L3 TX interrupt status */
 566			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
 567			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
 568			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
 569			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
 570		}
 571
 572		t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
 573		if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
 574			for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
 575				if (i < CLDMA_TXQ_NUM) {
 576					pm_runtime_get(md_ctrl->dev);
 577					t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
 578					t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
 579					queue_work(md_ctrl->txq[i].worker,
 580						   &md_ctrl->txq[i].cldma_work);
 581				} else {
 582					t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
 583				}
 584			}
 585		}
 586	}
 587
 588	if (l2_rx_int) {
 589		if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
 590			/* Read and clear L3 RX interrupt status */
 591			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
 592			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
 593			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
 594			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
 595		}
 596
 597		t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
 598		if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
 599			l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
 600			for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
 601				pm_runtime_get(md_ctrl->dev);
 602				t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
 603				t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
 604				queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
 605			}
 606		}
 607	}
 608}
 609
 610static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
 611{
 612	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 613	unsigned int tx_active;
 614	unsigned int rx_active;
 615
 616	if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
 617		return false;
 618
 619	tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
 620	rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
 621
 622	return tx_active || rx_active;
 623}
 624
 625/**
 626 * t7xx_cldma_stop() - Stop CLDMA.
 627 * @md_ctrl: CLDMA context structure.
 628 *
 629 * Stop TX and RX queues. Disable L1 and L2 interrupts.
 630 * Clear status registers.
 631 *
 632 * Return:
 633 * * 0		- Success.
 634 * * -ERROR	- Error code from polling cldma_queues_active.
 635 */
 636int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
 637{
 638	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 639	bool active;
 640	int i, ret;
 641
 642	md_ctrl->rxq_active = 0;
 643	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
 644	md_ctrl->txq_active = 0;
 645	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
 646	md_ctrl->txq_started = 0;
 647	t7xx_cldma_disable_irq(md_ctrl);
 648	t7xx_cldma_hw_stop(hw_info, MTK_RX);
 649	t7xx_cldma_hw_stop(hw_info, MTK_TX);
 650	t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
 651	t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
 652
 653	if (md_ctrl->is_late_init) {
 654		for (i = 0; i < CLDMA_TXQ_NUM; i++)
 655			flush_work(&md_ctrl->txq[i].cldma_work);
 656
 657		for (i = 0; i < CLDMA_RXQ_NUM; i++)
 658			flush_work(&md_ctrl->rxq[i].cldma_work);
 659	}
 660
 661	ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
 662				CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
 663	if (ret)
 664		dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
 665
 666	return ret;
 667}
 668
 669static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
 670{
 671	int i;
 672
 673	if (!md_ctrl->is_late_init)
 674		return;
 675
 676	for (i = 0; i < CLDMA_TXQ_NUM; i++)
 677		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
 678
 679	for (i = 0; i < CLDMA_RXQ_NUM; i++)
 680		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
 681
 682	dma_pool_destroy(md_ctrl->gpd_dmapool);
 683	md_ctrl->gpd_dmapool = NULL;
 684	md_ctrl->is_late_init = false;
 685}
 686
 687void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
 688{
 689	unsigned long flags;
 690	int i;
 691
 692	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 693	md_ctrl->txq_active = 0;
 694	md_ctrl->rxq_active = 0;
 695	t7xx_cldma_disable_irq(md_ctrl);
 696	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 697
 698	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
 699		cancel_work_sync(&md_ctrl->txq[i].cldma_work);
 700
 701		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 702		md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
 703		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 704	}
 705
 706	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
 707		cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
 708
 709		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 710		md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
 711		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 712	}
 713
 714	t7xx_cldma_late_release(md_ctrl);
 715}
 716
 717/**
 718 * t7xx_cldma_start() - Start CLDMA.
 719 * @md_ctrl: CLDMA context structure.
 720 *
 721 * Set TX/RX start address.
 722 * Start all RX queues and enable L2 interrupt.
 723 */
 724void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
 725{
 726	unsigned long flags;
 727
 728	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 729	if (md_ctrl->is_late_init) {
 730		struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 731		int i;
 732
 733		t7xx_cldma_enable_irq(md_ctrl);
 734
 735		for (i = 0; i < CLDMA_TXQ_NUM; i++) {
 736			if (md_ctrl->txq[i].tr_done)
 737				t7xx_cldma_hw_set_start_addr(hw_info, i,
 738							     md_ctrl->txq[i].tr_done->gpd_addr,
 739							     MTK_TX);
 740		}
 741
 742		for (i = 0; i < CLDMA_RXQ_NUM; i++) {
 743			if (md_ctrl->rxq[i].tr_done)
 744				t7xx_cldma_hw_set_start_addr(hw_info, i,
 745							     md_ctrl->rxq[i].tr_done->gpd_addr,
 746							     MTK_RX);
 747		}
 748
 749		/* Enable L2 interrupt */
 750		t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
 751		t7xx_cldma_hw_start(hw_info);
 752		md_ctrl->txq_started = 0;
 753		md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
 754		md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
 755	}
 756	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 757}
 758
 759static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
 760{
 761	struct cldma_queue *txq = &md_ctrl->txq[qnum];
 762	struct cldma_request *req;
 763	struct cldma_gpd *gpd;
 764	unsigned long flags;
 765
 766	spin_lock_irqsave(&txq->ring_lock, flags);
 767	t7xx_cldma_q_reset(txq);
 768	list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
 769		gpd = req->gpd;
 770		gpd->flags &= ~GPD_FLAGS_HWO;
 771		t7xx_cldma_gpd_set_data_ptr(gpd, 0);
 772		gpd->data_buff_len = 0;
 773		dev_kfree_skb_any(req->skb);
 774		req->skb = NULL;
 775	}
 776	spin_unlock_irqrestore(&txq->ring_lock, flags);
 777}
 778
 779static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
 780{
 781	struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
 782	struct cldma_request *req;
 783	struct cldma_gpd *gpd;
 784	unsigned long flags;
 785	int ret = 0;
 786
 787	spin_lock_irqsave(&rxq->ring_lock, flags);
 788	t7xx_cldma_q_reset(rxq);
 789	list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
 790		gpd = req->gpd;
 791		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
 792		gpd->data_buff_len = 0;
 793
 794		if (req->skb) {
 795			req->skb->len = 0;
 796			skb_reset_tail_pointer(req->skb);
 797		}
 798	}
 799
 800	list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
 801		if (req->skb)
 802			continue;
 803
 804		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
 805		if (ret)
 806			break;
 807
 808		t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
 809	}
 810	spin_unlock_irqrestore(&rxq->ring_lock, flags);
 811
 812	return ret;
 813}
 814
 815void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
 816{
 817	int i;
 818
 819	if (tx_rx == MTK_TX) {
 820		for (i = 0; i < CLDMA_TXQ_NUM; i++)
 821			t7xx_cldma_clear_txq(md_ctrl, i);
 822	} else {
 823		for (i = 0; i < CLDMA_RXQ_NUM; i++)
 824			t7xx_cldma_clear_rxq(md_ctrl, i);
 825	}
 826}
 827
 828void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
 829{
 830	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 831	unsigned long flags;
 832
 833	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 834	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
 835	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
 836	if (tx_rx == MTK_RX)
 837		md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
 838	else
 839		md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
 840	t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
 841	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 842}
 843
 844static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
 845					    struct sk_buff *skb)
 846{
 847	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
 848	struct cldma_gpd *gpd = tx_req->gpd;
 849	unsigned long flags;
 850
 851	/* Update GPD */
 852	tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
 853
 854	if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
 855		dev_err(md_ctrl->dev, "DMA mapping failed\n");
 856		return -ENOMEM;
 857	}
 858
 859	t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
 860	gpd->data_buff_len = cpu_to_le16(skb->len);
 861
 862	/* This lock must cover TGPD setting, as even without a resume operation,
 863	 * CLDMA can send next HWO=1 if last TGPD just finished.
 864	 */
 865	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 866	if (md_ctrl->txq_active & BIT(queue->index))
 867		gpd->flags |= GPD_FLAGS_HWO;
 868
 869	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 870
 871	tx_req->skb = skb;
 872	return 0;
 873}
 874
 875/* Called with cldma_lock */
 876static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
 877				     struct cldma_request *prev_req)
 878{
 879	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
 880
 881	/* Check whether the device was powered off (CLDMA start address is not set) */
 882	if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
 883		t7xx_cldma_hw_init(hw_info);
 884		t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
 885		md_ctrl->txq_started &= ~BIT(qno);
 886	}
 887
 888	if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
 889		if (md_ctrl->txq_started & BIT(qno))
 890			t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
 891		else
 892			t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
 893
 894		md_ctrl->txq_started |= BIT(qno);
 895	}
 896}
 897
 898/**
 899 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
 900 * @md_ctrl: CLDMA context structure.
 901 * @recv_skb: Receiving skb callback.
 902 */
 903void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
 904			     int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
 905{
 906	md_ctrl->recv_skb = recv_skb;
 907}
 908
 909/**
 910 * t7xx_cldma_send_skb() - Send control data to modem.
 911 * @md_ctrl: CLDMA context structure.
 912 * @qno: Queue number.
 913 * @skb: Socket buffer.
 914 *
 915 * Return:
 916 * * 0		- Success.
 917 * * -ENOMEM	- Allocation failure.
 918 * * -EINVAL	- Invalid queue request.
 919 * * -EIO	- Queue is not active.
 920 * * -ETIMEDOUT	- Timeout waiting for the device to wake up.
 921 */
 922int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
 923{
 924	struct cldma_request *tx_req;
 925	struct cldma_queue *queue;
 926	unsigned long flags;
 927	int ret;
 928
 929	if (qno >= CLDMA_TXQ_NUM)
 930		return -EINVAL;
 931
 932	ret = pm_runtime_resume_and_get(md_ctrl->dev);
 933	if (ret < 0 && ret != -EACCES)
 934		return ret;
 935
 936	t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
 937	queue = &md_ctrl->txq[qno];
 938
 939	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 940	if (!(md_ctrl->txq_active & BIT(qno))) {
 941		ret = -EIO;
 942		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 943		goto allow_sleep;
 944	}
 945	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 946
 947	do {
 948		spin_lock_irqsave(&queue->ring_lock, flags);
 949		tx_req = queue->tx_next;
 950		if (queue->budget > 0 && !tx_req->skb) {
 951			struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
 952
 953			queue->budget--;
 954			t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
 955			queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
 956			spin_unlock_irqrestore(&queue->ring_lock, flags);
 957
 958			if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
 959				ret = -ETIMEDOUT;
 960				break;
 961			}
 962
 963			/* Protect the access to the modem for queues operations (resume/start)
 964			 * which access shared locations by all the queues.
 965			 * cldma_lock is independent of ring_lock which is per queue.
 966			 */
 967			spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 968			t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
 969			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 970
 971			break;
 972		}
 973		spin_unlock_irqrestore(&queue->ring_lock, flags);
 974
 975		if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
 976			ret = -ETIMEDOUT;
 977			break;
 978		}
 979
 980		if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
 981			spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
 982			t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
 983			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
 984		}
 985
 986		ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
 987	} while (!ret);
 988
 989allow_sleep:
 990	t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
 991	pm_runtime_mark_last_busy(md_ctrl->dev);
 992	pm_runtime_put_autosuspend(md_ctrl->dev);
 993	return ret;
 994}
 995
 996static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
 997{
 998	char dma_pool_name[32];
 999	int i, j, ret;
1000
1001	if (md_ctrl->is_late_init) {
1002		dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
1003		return -EALREADY;
1004	}
1005
1006	snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
1007
1008	md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
1009					       sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
1010	if (!md_ctrl->gpd_dmapool) {
1011		dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
1012		return -ENOMEM;
1013	}
1014
1015	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1016		ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
1017		if (ret) {
1018			dev_err(md_ctrl->dev, "control TX ring init fail\n");
1019			goto err_free_tx_ring;
1020		}
1021
1022		md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
1023	}
1024
1025	for (j = 0; j < CLDMA_RXQ_NUM; j++) {
1026		md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
1027
1028		if (j == CLDMA_RXQ_NUM - 1)
1029			md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
1030
1031		ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
1032		if (ret) {
1033			dev_err(md_ctrl->dev, "Control RX ring init fail\n");
1034			goto err_free_rx_ring;
1035		}
1036	}
1037
1038	for (i = 0; i < CLDMA_TXQ_NUM; i++)
1039		t7xx_cldma_txq_init(&md_ctrl->txq[i]);
1040
1041	for (j = 0; j < CLDMA_RXQ_NUM; j++)
1042		t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
1043
1044	md_ctrl->is_late_init = true;
1045	return 0;
1046
1047err_free_rx_ring:
1048	while (j--)
1049		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
1050
1051err_free_tx_ring:
1052	while (i--)
1053		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
1054
1055	return ret;
1056}
1057
1058static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
1059{
1060	return addr + phy_addr - addr_trs1;
1061}
1062
1063static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
1064{
1065	struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
1066	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1067	u32 phy_ao_base, phy_pd_base;
1068
1069	hw_info->hw_mode = MODE_BIT_64;
1070
1071	if (md_ctrl->hif_id == CLDMA_ID_MD) {
1072		phy_ao_base = CLDMA1_AO_BASE;
1073		phy_pd_base = CLDMA1_PD_BASE;
1074		hw_info->phy_interrupt_id = CLDMA1_INT;
1075	} else {
1076		phy_ao_base = CLDMA0_AO_BASE;
1077		phy_pd_base = CLDMA0_PD_BASE;
1078		hw_info->phy_interrupt_id = CLDMA0_INT;
1079	}
1080
1081	hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1082						      pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
1083	hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1084						       pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
1085}
1086
1087static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
1088{
1089	dev_kfree_skb_any(skb);
1090	return 0;
1091}
1092
1093int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
1094{
1095	struct device *dev = &t7xx_dev->pdev->dev;
1096	struct cldma_ctrl *md_ctrl;
1097
1098	md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
1099	if (!md_ctrl)
1100		return -ENOMEM;
1101
1102	md_ctrl->t7xx_dev = t7xx_dev;
1103	md_ctrl->dev = dev;
1104	md_ctrl->hif_id = hif_id;
1105	md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
1106	t7xx_hw_info_init(md_ctrl);
1107	t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
1108	return 0;
1109}
1110
1111static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1112{
1113	struct cldma_ctrl *md_ctrl = entity_param;
1114	struct t7xx_cldma_hw *hw_info;
1115	unsigned long flags;
1116	int qno_t;
1117
1118	hw_info = &md_ctrl->hw_info;
1119
1120	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1121	t7xx_cldma_hw_restore(hw_info);
1122	for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
1123		t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
1124					     MTK_TX);
1125		t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
1126					     MTK_RX);
1127	}
1128	t7xx_cldma_enable_irq(md_ctrl);
1129	t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
1130	md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
1131	t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1132	t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1133	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1134}
1135
1136static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1137{
1138	struct cldma_ctrl *md_ctrl = entity_param;
1139	unsigned long flags;
1140
1141	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1142	md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
1143	t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1144	t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1145	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1146
1147	if (md_ctrl->hif_id == CLDMA_ID_MD)
1148		t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
1149
1150	return 0;
1151}
1152
1153static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1154{
1155	struct cldma_ctrl *md_ctrl = entity_param;
1156	struct t7xx_cldma_hw *hw_info;
1157	unsigned long flags;
1158
1159	hw_info = &md_ctrl->hw_info;
1160
1161	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1162	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1163	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1164	md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
1165	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
1166	t7xx_cldma_clear_ip_busy(hw_info);
1167	t7xx_cldma_disable_irq(md_ctrl);
1168	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1169}
1170
1171static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1172{
1173	struct cldma_ctrl *md_ctrl = entity_param;
1174	struct t7xx_cldma_hw *hw_info;
1175	unsigned long flags;
1176
1177	if (md_ctrl->hif_id == CLDMA_ID_MD)
1178		t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
1179
1180	hw_info = &md_ctrl->hw_info;
1181
1182	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1183	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
1184	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
1185	md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
1186	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
1187	md_ctrl->txq_started = 0;
1188	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1189
1190	return 0;
1191}
1192
1193static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
1194{
1195	md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
1196	if (!md_ctrl->pm_entity)
1197		return -ENOMEM;
1198
1199	md_ctrl->pm_entity->entity_param = md_ctrl;
1200
1201	if (md_ctrl->hif_id == CLDMA_ID_MD)
1202		md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
1203	else
1204		md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
1205
1206	md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
1207	md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
1208	md_ctrl->pm_entity->resume = t7xx_cldma_resume;
1209	md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
1210
1211	return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1212}
1213
1214static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
1215{
1216	if (!md_ctrl->pm_entity)
1217		return -EINVAL;
1218
1219	t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1220	kfree(md_ctrl->pm_entity);
1221	md_ctrl->pm_entity = NULL;
1222	return 0;
1223}
1224
1225void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
1226{
1227	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1228	unsigned long flags;
1229
1230	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1231	t7xx_cldma_hw_stop(hw_info, MTK_TX);
1232	t7xx_cldma_hw_stop(hw_info, MTK_RX);
1233	t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1234	t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1235	t7xx_cldma_hw_init(hw_info);
1236	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1237}
1238
1239static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
1240{
1241	struct cldma_ctrl *md_ctrl = data;
1242	u32 interrupt;
1243
1244	interrupt = md_ctrl->hw_info.phy_interrupt_id;
1245	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
1246	t7xx_cldma_irq_work_cb(md_ctrl);
1247	t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
1248	t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
1249	return IRQ_HANDLED;
1250}
1251
1252static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
1253{
1254	int i;
1255
1256	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1257		if (md_ctrl->txq[i].worker) {
1258			destroy_workqueue(md_ctrl->txq[i].worker);
1259			md_ctrl->txq[i].worker = NULL;
1260		}
1261	}
1262
1263	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1264		if (md_ctrl->rxq[i].worker) {
1265			destroy_workqueue(md_ctrl->rxq[i].worker);
1266			md_ctrl->rxq[i].worker = NULL;
1267		}
1268	}
1269}
1270
1271/**
1272 * t7xx_cldma_init() - Initialize CLDMA.
1273 * @md_ctrl: CLDMA context structure.
1274 *
1275 * Allocate and initialize device power management entity.
1276 * Initialize HIF TX/RX queue structure.
1277 * Register CLDMA callback ISR with PCIe driver.
1278 *
1279 * Return:
1280 * * 0		- Success.
1281 * * -ERROR	- Error code from failure sub-initializations.
1282 */
1283int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
1284{
1285	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1286	int ret, i;
1287
1288	md_ctrl->txq_active = 0;
1289	md_ctrl->rxq_active = 0;
1290	md_ctrl->is_late_init = false;
1291
1292	ret = t7xx_cldma_pm_init(md_ctrl);
1293	if (ret)
1294		return ret;
1295
1296	spin_lock_init(&md_ctrl->cldma_lock);
1297
1298	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1299		md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
1300		md_ctrl->txq[i].worker =
1301			alloc_ordered_workqueue("md_hif%d_tx%d_worker",
1302					WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1303					md_ctrl->hif_id, i);
1304		if (!md_ctrl->txq[i].worker)
1305			goto err_workqueue;
1306
1307		INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
1308	}
1309
1310	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1311		md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
1312		INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
1313
1314		md_ctrl->rxq[i].worker =
1315			alloc_ordered_workqueue("md_hif%d_rx%d_worker",
1316						WQ_MEM_RECLAIM,
1317						md_ctrl->hif_id, i);
1318		if (!md_ctrl->rxq[i].worker)
1319			goto err_workqueue;
1320	}
1321
1322	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1323	md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
1324	md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
1325	md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
1326	t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1327	return 0;
1328
1329err_workqueue:
1330	t7xx_cldma_destroy_wqs(md_ctrl);
1331	t7xx_cldma_pm_uninit(md_ctrl);
1332	return -ENOMEM;
1333}
1334
1335void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
1336{
1337	t7xx_cldma_late_release(md_ctrl);
1338	t7xx_cldma_late_init(md_ctrl);
1339}
1340
1341void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
1342{
1343	t7xx_cldma_stop(md_ctrl);
1344	t7xx_cldma_late_release(md_ctrl);
1345	t7xx_cldma_destroy_wqs(md_ctrl);
1346	t7xx_cldma_pm_uninit(md_ctrl);
1347}