Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Texas Instruments CPDMA Driver
4 *
5 * Copyright (C) 2010 Texas Instruments
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/spinlock.h>
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/dma-mapping.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/genalloc.h>
18#include "davinci_cpdma.h"
19
20/* DMA Registers */
21#define CPDMA_TXIDVER 0x00
22#define CPDMA_TXCONTROL 0x04
23#define CPDMA_TXTEARDOWN 0x08
24#define CPDMA_RXIDVER 0x10
25#define CPDMA_RXCONTROL 0x14
26#define CPDMA_SOFTRESET 0x1c
27#define CPDMA_RXTEARDOWN 0x18
28#define CPDMA_TX_PRI0_RATE 0x30
29#define CPDMA_TXINTSTATRAW 0x80
30#define CPDMA_TXINTSTATMASKED 0x84
31#define CPDMA_TXINTMASKSET 0x88
32#define CPDMA_TXINTMASKCLEAR 0x8c
33#define CPDMA_MACINVECTOR 0x90
34#define CPDMA_MACEOIVECTOR 0x94
35#define CPDMA_RXINTSTATRAW 0xa0
36#define CPDMA_RXINTSTATMASKED 0xa4
37#define CPDMA_RXINTMASKSET 0xa8
38#define CPDMA_RXINTMASKCLEAR 0xac
39#define CPDMA_DMAINTSTATRAW 0xb0
40#define CPDMA_DMAINTSTATMASKED 0xb4
41#define CPDMA_DMAINTMASKSET 0xb8
42#define CPDMA_DMAINTMASKCLEAR 0xbc
43#define CPDMA_DMAINT_HOSTERR BIT(1)
44
45/* the following exist only if has_ext_regs is set */
46#define CPDMA_DMACONTROL 0x20
47#define CPDMA_DMASTATUS 0x24
48#define CPDMA_RXBUFFOFS 0x28
49#define CPDMA_EM_CONTROL 0x2c
50
51/* Descriptor mode bits */
52#define CPDMA_DESC_SOP BIT(31)
53#define CPDMA_DESC_EOP BIT(30)
54#define CPDMA_DESC_OWNER BIT(29)
55#define CPDMA_DESC_EOQ BIT(28)
56#define CPDMA_DESC_TD_COMPLETE BIT(27)
57#define CPDMA_DESC_PASS_CRC BIT(26)
58#define CPDMA_DESC_TO_PORT_EN BIT(20)
59#define CPDMA_TO_PORT_SHIFT 16
60#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
61#define CPDMA_DESC_CRC_LEN 4
62
63#define CPDMA_TEARDOWN_VALUE 0xfffffffc
64
65#define CPDMA_MAX_RLIM_CNT 16384
66
67struct cpdma_desc {
68 /* hardware fields */
69 u32 hw_next;
70 u32 hw_buffer;
71 u32 hw_len;
72 u32 hw_mode;
73 /* software fields */
74 void *sw_token;
75 u32 sw_buffer;
76 u32 sw_len;
77};
78
79struct cpdma_desc_pool {
80 phys_addr_t phys;
81 dma_addr_t hw_addr;
82 void __iomem *iomap; /* ioremap map */
83 void *cpumap; /* dma_alloc map */
84 int desc_size, mem_size;
85 int num_desc;
86 struct device *dev;
87 struct gen_pool *gen_pool;
88};
89
90enum cpdma_state {
91 CPDMA_STATE_IDLE,
92 CPDMA_STATE_ACTIVE,
93 CPDMA_STATE_TEARDOWN,
94};
95
96struct cpdma_ctlr {
97 enum cpdma_state state;
98 struct cpdma_params params;
99 struct device *dev;
100 struct cpdma_desc_pool *pool;
101 spinlock_t lock;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
103 int chan_num;
104 int num_rx_desc; /* RX descriptors number */
105 int num_tx_desc; /* TX descriptors number */
106};
107
108struct cpdma_chan {
109 struct cpdma_desc __iomem *head, *tail;
110 void __iomem *hdp, *cp, *rxfree;
111 enum cpdma_state state;
112 struct cpdma_ctlr *ctlr;
113 int chan_num;
114 spinlock_t lock;
115 int count;
116 u32 desc_num;
117 u32 mask;
118 cpdma_handler_fn handler;
119 enum dma_data_direction dir;
120 struct cpdma_chan_stats stats;
121 /* offsets into dmaregs */
122 int int_set, int_clear, td;
123 int weight;
124 u32 rate_factor;
125 u32 rate;
126};
127
128struct cpdma_control_info {
129 u32 reg;
130 u32 shift, mask;
131 int access;
132#define ACCESS_RO BIT(0)
133#define ACCESS_WO BIT(1)
134#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
135};
136
137struct submit_info {
138 struct cpdma_chan *chan;
139 int directed;
140 void *token;
141 void *data_virt;
142 dma_addr_t data_dma;
143 int len;
144};
145
146static struct cpdma_control_info controls[] = {
147 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
148 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
149 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
150 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
151 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
152 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
153 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
154 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
155 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
156 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
157 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
158 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
159};
160
161#define tx_chan_num(chan) (chan)
162#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
163#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
164#define is_tx_chan(chan) (!is_rx_chan(chan))
165#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166#define chan_linear(chan) __chan_linear((chan)->chan_num)
167
168/* The following make access to common cpdma_ctlr params more readable */
169#define dmaregs params.dmaregs
170#define num_chan params.num_chan
171
172/* various accessors */
173#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
174#define chan_read(chan, fld) readl((chan)->fld)
175#define desc_read(desc, fld) readl(&(desc)->fld)
176#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
177#define chan_write(chan, fld, v) writel(v, (chan)->fld)
178#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
179
180#define cpdma_desc_to_port(chan, mode, directed) \
181 do { \
182 if (!is_rx_chan(chan) && ((directed == 1) || \
183 (directed == 2))) \
184 mode |= (CPDMA_DESC_TO_PORT_EN | \
185 (directed << CPDMA_TO_PORT_SHIFT)); \
186 } while (0)
187
188#define CPDMA_DMA_EXT_MAP BIT(16)
189
190static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
191{
192 struct cpdma_desc_pool *pool = ctlr->pool;
193
194 if (!pool)
195 return;
196
197 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
198 "cpdma_desc_pool size %zd != avail %zd",
199 gen_pool_size(pool->gen_pool),
200 gen_pool_avail(pool->gen_pool));
201 if (pool->cpumap)
202 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
203 pool->phys);
204}
205
206/*
207 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
208 * emac) have dedicated on-chip memory for these descriptors. Some other
209 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
210 * abstract out these details
211 */
212static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
213{
214 struct cpdma_params *cpdma_params = &ctlr->params;
215 struct cpdma_desc_pool *pool;
216 int ret = -ENOMEM;
217
218 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
219 if (!pool)
220 goto gen_pool_create_fail;
221 ctlr->pool = pool;
222
223 pool->mem_size = cpdma_params->desc_mem_size;
224 pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
225 cpdma_params->desc_align);
226 pool->num_desc = pool->mem_size / pool->desc_size;
227
228 if (cpdma_params->descs_pool_size) {
229 /* recalculate memory size required cpdma descriptor pool
230 * basing on number of descriptors specified by user and
231 * if memory size > CPPI internal RAM size (desc_mem_size)
232 * then switch to use DDR
233 */
234 pool->num_desc = cpdma_params->descs_pool_size;
235 pool->mem_size = pool->desc_size * pool->num_desc;
236 if (pool->mem_size > cpdma_params->desc_mem_size)
237 cpdma_params->desc_mem_phys = 0;
238 }
239
240 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
241 -1, "cpdma");
242 if (IS_ERR(pool->gen_pool)) {
243 ret = PTR_ERR(pool->gen_pool);
244 dev_err(ctlr->dev, "pool create failed %d\n", ret);
245 goto gen_pool_create_fail;
246 }
247
248 if (cpdma_params->desc_mem_phys) {
249 pool->phys = cpdma_params->desc_mem_phys;
250 pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
251 pool->mem_size);
252 pool->hw_addr = cpdma_params->desc_hw_addr;
253 } else {
254 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
255 &pool->hw_addr, GFP_KERNEL);
256 pool->iomap = (void __iomem __force *)pool->cpumap;
257 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
258 }
259
260 if (!pool->iomap)
261 goto gen_pool_create_fail;
262
263 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
264 pool->phys, pool->mem_size, -1);
265 if (ret < 0) {
266 dev_err(ctlr->dev, "pool add failed %d\n", ret);
267 goto gen_pool_add_virt_fail;
268 }
269
270 return 0;
271
272gen_pool_add_virt_fail:
273 cpdma_desc_pool_destroy(ctlr);
274gen_pool_create_fail:
275 ctlr->pool = NULL;
276 return ret;
277}
278
279static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
280 struct cpdma_desc __iomem *desc)
281{
282 if (!desc)
283 return 0;
284 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
285}
286
287static inline struct cpdma_desc __iomem *
288desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
289{
290 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
291}
292
293static struct cpdma_desc __iomem *
294cpdma_desc_alloc(struct cpdma_desc_pool *pool)
295{
296 return (struct cpdma_desc __iomem *)
297 gen_pool_alloc(pool->gen_pool, pool->desc_size);
298}
299
300static void cpdma_desc_free(struct cpdma_desc_pool *pool,
301 struct cpdma_desc __iomem *desc, int num_desc)
302{
303 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
304}
305
306static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
307{
308 struct cpdma_control_info *info = &controls[control];
309 u32 val;
310
311 if (!ctlr->params.has_ext_regs)
312 return -ENOTSUPP;
313
314 if (ctlr->state != CPDMA_STATE_ACTIVE)
315 return -EINVAL;
316
317 if (control < 0 || control >= ARRAY_SIZE(controls))
318 return -ENOENT;
319
320 if ((info->access & ACCESS_WO) != ACCESS_WO)
321 return -EPERM;
322
323 val = dma_reg_read(ctlr, info->reg);
324 val &= ~(info->mask << info->shift);
325 val |= (value & info->mask) << info->shift;
326 dma_reg_write(ctlr, info->reg, val);
327
328 return 0;
329}
330
331static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
332{
333 struct cpdma_control_info *info = &controls[control];
334 int ret;
335
336 if (!ctlr->params.has_ext_regs)
337 return -ENOTSUPP;
338
339 if (ctlr->state != CPDMA_STATE_ACTIVE)
340 return -EINVAL;
341
342 if (control < 0 || control >= ARRAY_SIZE(controls))
343 return -ENOENT;
344
345 if ((info->access & ACCESS_RO) != ACCESS_RO)
346 return -EPERM;
347
348 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
349 return ret;
350}
351
352/* cpdma_chan_set_chan_shaper - set shaper for a channel
353 * Has to be called under ctlr lock
354 */
355static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
356{
357 struct cpdma_ctlr *ctlr = chan->ctlr;
358 u32 rate_reg;
359 u32 rmask;
360 int ret;
361
362 if (!chan->rate)
363 return 0;
364
365 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
366 dma_reg_write(ctlr, rate_reg, chan->rate_factor);
367
368 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
369 rmask |= chan->mask;
370
371 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
372 return ret;
373}
374
375static int cpdma_chan_on(struct cpdma_chan *chan)
376{
377 struct cpdma_ctlr *ctlr = chan->ctlr;
378 struct cpdma_desc_pool *pool = ctlr->pool;
379 unsigned long flags;
380
381 spin_lock_irqsave(&chan->lock, flags);
382 if (chan->state != CPDMA_STATE_IDLE) {
383 spin_unlock_irqrestore(&chan->lock, flags);
384 return -EBUSY;
385 }
386 if (ctlr->state != CPDMA_STATE_ACTIVE) {
387 spin_unlock_irqrestore(&chan->lock, flags);
388 return -EINVAL;
389 }
390 dma_reg_write(ctlr, chan->int_set, chan->mask);
391 chan->state = CPDMA_STATE_ACTIVE;
392 if (chan->head) {
393 chan_write(chan, hdp, desc_phys(pool, chan->head));
394 if (chan->rxfree)
395 chan_write(chan, rxfree, chan->count);
396 }
397
398 spin_unlock_irqrestore(&chan->lock, flags);
399 return 0;
400}
401
402/* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
403 * rmask - mask of rate limited channels
404 * Returns min rate in Kb/s
405 */
406static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
407 u32 *rmask, int *prio_mode)
408{
409 struct cpdma_ctlr *ctlr = ch->ctlr;
410 struct cpdma_chan *chan;
411 u32 old_rate = ch->rate;
412 u32 new_rmask = 0;
413 int rlim = 0;
414 int i;
415
416 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
417 chan = ctlr->channels[i];
418 if (!chan)
419 continue;
420
421 if (chan == ch)
422 chan->rate = rate;
423
424 if (chan->rate) {
425 rlim = 1;
426 new_rmask |= chan->mask;
427 continue;
428 }
429
430 if (rlim)
431 goto err;
432 }
433
434 *rmask = new_rmask;
435 *prio_mode = rlim;
436 return 0;
437
438err:
439 ch->rate = old_rate;
440 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
441 chan->chan_num);
442 return -EINVAL;
443}
444
445static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
446 struct cpdma_chan *ch)
447{
448 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
449 u32 best_send_cnt = 0, best_idle_cnt = 0;
450 u32 new_rate, best_rate = 0, rate_reg;
451 u64 send_cnt, idle_cnt;
452 u32 min_send_cnt, freq;
453 u64 divident, divisor;
454
455 if (!ch->rate) {
456 ch->rate_factor = 0;
457 goto set_factor;
458 }
459
460 freq = ctlr->params.bus_freq_mhz * 1000 * 32;
461 if (!freq) {
462 dev_err(ctlr->dev, "The bus frequency is not set\n");
463 return -EINVAL;
464 }
465
466 min_send_cnt = freq - ch->rate;
467 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
468 while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
469 divident = ch->rate * send_cnt;
470 divisor = min_send_cnt;
471 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
472
473 divident = freq * idle_cnt;
474 divisor = idle_cnt + send_cnt;
475 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
476
477 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
478 if (delta < best_delta) {
479 best_delta = delta;
480 best_send_cnt = send_cnt;
481 best_idle_cnt = idle_cnt;
482 best_rate = new_rate;
483
484 if (!delta)
485 break;
486 }
487
488 if (prev_delta >= delta) {
489 prev_delta = delta;
490 send_cnt++;
491 continue;
492 }
493
494 idle_cnt++;
495 divident = freq * idle_cnt;
496 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
497 send_cnt -= idle_cnt;
498 prev_delta = UINT_MAX;
499 }
500
501 ch->rate = best_rate;
502 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
503
504set_factor:
505 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
506 dma_reg_write(ctlr, rate_reg, ch->rate_factor);
507 return 0;
508}
509
510struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
511{
512 struct cpdma_ctlr *ctlr;
513
514 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
515 if (!ctlr)
516 return NULL;
517
518 ctlr->state = CPDMA_STATE_IDLE;
519 ctlr->params = *params;
520 ctlr->dev = params->dev;
521 ctlr->chan_num = 0;
522 spin_lock_init(&ctlr->lock);
523
524 if (cpdma_desc_pool_create(ctlr))
525 return NULL;
526 /* split pool equally between RX/TX by default */
527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
529
530 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
531 ctlr->num_chan = CPDMA_MAX_CHANNELS;
532 return ctlr;
533}
534
535int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
536{
537 struct cpdma_chan *chan;
538 unsigned long flags;
539 int i, prio_mode;
540
541 spin_lock_irqsave(&ctlr->lock, flags);
542 if (ctlr->state != CPDMA_STATE_IDLE) {
543 spin_unlock_irqrestore(&ctlr->lock, flags);
544 return -EBUSY;
545 }
546
547 if (ctlr->params.has_soft_reset) {
548 unsigned timeout = 10 * 100;
549
550 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
551 while (timeout) {
552 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
553 break;
554 udelay(10);
555 timeout--;
556 }
557 WARN_ON(!timeout);
558 }
559
560 for (i = 0; i < ctlr->num_chan; i++) {
561 writel(0, ctlr->params.txhdp + 4 * i);
562 writel(0, ctlr->params.rxhdp + 4 * i);
563 writel(0, ctlr->params.txcp + 4 * i);
564 writel(0, ctlr->params.rxcp + 4 * i);
565 }
566
567 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
568 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
569
570 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
571 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
572
573 ctlr->state = CPDMA_STATE_ACTIVE;
574
575 prio_mode = 0;
576 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
577 chan = ctlr->channels[i];
578 if (chan) {
579 cpdma_chan_set_chan_shaper(chan);
580 cpdma_chan_on(chan);
581
582 /* off prio mode if all tx channels are rate limited */
583 if (is_tx_chan(chan) && !chan->rate)
584 prio_mode = 1;
585 }
586 }
587
588 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
589 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
590
591 spin_unlock_irqrestore(&ctlr->lock, flags);
592 return 0;
593}
594
595int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
596{
597 unsigned long flags;
598 int i;
599
600 spin_lock_irqsave(&ctlr->lock, flags);
601 if (ctlr->state != CPDMA_STATE_ACTIVE) {
602 spin_unlock_irqrestore(&ctlr->lock, flags);
603 return -EINVAL;
604 }
605
606 ctlr->state = CPDMA_STATE_TEARDOWN;
607 spin_unlock_irqrestore(&ctlr->lock, flags);
608
609 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
610 if (ctlr->channels[i])
611 cpdma_chan_stop(ctlr->channels[i]);
612 }
613
614 spin_lock_irqsave(&ctlr->lock, flags);
615 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
616 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
617
618 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
619 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
620
621 ctlr->state = CPDMA_STATE_IDLE;
622
623 spin_unlock_irqrestore(&ctlr->lock, flags);
624 return 0;
625}
626
627int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
628{
629 int ret = 0, i;
630
631 if (!ctlr)
632 return -EINVAL;
633
634 if (ctlr->state != CPDMA_STATE_IDLE)
635 cpdma_ctlr_stop(ctlr);
636
637 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
638 cpdma_chan_destroy(ctlr->channels[i]);
639
640 cpdma_desc_pool_destroy(ctlr);
641 return ret;
642}
643
644int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
645{
646 unsigned long flags;
647 int i;
648
649 spin_lock_irqsave(&ctlr->lock, flags);
650 if (ctlr->state != CPDMA_STATE_ACTIVE) {
651 spin_unlock_irqrestore(&ctlr->lock, flags);
652 return -EINVAL;
653 }
654
655 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
656 if (ctlr->channels[i])
657 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
658 }
659
660 spin_unlock_irqrestore(&ctlr->lock, flags);
661 return 0;
662}
663
664void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
665{
666 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
667}
668
669u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
670{
671 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
672}
673
674u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
675{
676 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
677}
678
679static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
680 int rx, int desc_num,
681 int per_ch_desc)
682{
683 struct cpdma_chan *chan, *most_chan = NULL;
684 int desc_cnt = desc_num;
685 int most_dnum = 0;
686 int min, max, i;
687
688 if (!desc_num)
689 return;
690
691 if (rx) {
692 min = rx_chan_num(0);
693 max = rx_chan_num(CPDMA_MAX_CHANNELS);
694 } else {
695 min = tx_chan_num(0);
696 max = tx_chan_num(CPDMA_MAX_CHANNELS);
697 }
698
699 for (i = min; i < max; i++) {
700 chan = ctlr->channels[i];
701 if (!chan)
702 continue;
703
704 if (chan->weight)
705 chan->desc_num = (chan->weight * desc_num) / 100;
706 else
707 chan->desc_num = per_ch_desc;
708
709 desc_cnt -= chan->desc_num;
710
711 if (most_dnum < chan->desc_num) {
712 most_dnum = chan->desc_num;
713 most_chan = chan;
714 }
715 }
716 /* use remains */
717 if (most_chan)
718 most_chan->desc_num += desc_cnt;
719}
720
721/*
722 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
723 * Has to be called under ctlr lock
724 */
725static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
726{
727 int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
728 int free_rx_num = 0, free_tx_num = 0;
729 int rx_weight = 0, tx_weight = 0;
730 int tx_desc_num, rx_desc_num;
731 struct cpdma_chan *chan;
732 int i;
733
734 if (!ctlr->chan_num)
735 return 0;
736
737 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
738 chan = ctlr->channels[i];
739 if (!chan)
740 continue;
741
742 if (is_rx_chan(chan)) {
743 if (!chan->weight)
744 free_rx_num++;
745 rx_weight += chan->weight;
746 } else {
747 if (!chan->weight)
748 free_tx_num++;
749 tx_weight += chan->weight;
750 }
751 }
752
753 if (rx_weight > 100 || tx_weight > 100)
754 return -EINVAL;
755
756 tx_desc_num = ctlr->num_tx_desc;
757 rx_desc_num = ctlr->num_rx_desc;
758
759 if (free_tx_num) {
760 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
761 tx_per_ch_desc /= free_tx_num;
762 }
763 if (free_rx_num) {
764 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
765 rx_per_ch_desc /= free_rx_num;
766 }
767
768 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
769 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
770
771 return 0;
772}
773
774
775/* cpdma_chan_set_weight - set weight of a channel in percentage.
776 * Tx and Rx channels have separate weights. That is 100% for RX
777 * and 100% for Tx. The weight is used to split cpdma resources
778 * in correct proportion required by the channels, including number
779 * of descriptors. The channel rate is not enough to know the
780 * weight of a channel as the maximum rate of an interface is needed.
781 * If weight = 0, then channel uses rest of descriptors leaved by
782 * weighted channels.
783 */
784int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
785{
786 struct cpdma_ctlr *ctlr = ch->ctlr;
787 unsigned long flags, ch_flags;
788 int ret;
789
790 spin_lock_irqsave(&ctlr->lock, flags);
791 spin_lock_irqsave(&ch->lock, ch_flags);
792 if (ch->weight == weight) {
793 spin_unlock_irqrestore(&ch->lock, ch_flags);
794 spin_unlock_irqrestore(&ctlr->lock, flags);
795 return 0;
796 }
797 ch->weight = weight;
798 spin_unlock_irqrestore(&ch->lock, ch_flags);
799
800 /* re-split pool using new channel weight */
801 ret = cpdma_chan_split_pool(ctlr);
802 spin_unlock_irqrestore(&ctlr->lock, flags);
803 return ret;
804}
805
806/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
807 * Should be called before cpdma_chan_set_rate.
808 * Returns min rate in Kb/s
809 */
810u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
811{
812 unsigned int divident, divisor;
813
814 divident = ctlr->params.bus_freq_mhz * 32 * 1000;
815 divisor = 1 + CPDMA_MAX_RLIM_CNT;
816
817 return DIV_ROUND_UP(divident, divisor);
818}
819
820/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
821 * The bandwidth * limited channels have to be in order beginning from lowest.
822 * ch - transmit channel the bandwidth is configured for
823 * rate - bandwidth in Kb/s, if 0 - then off shaper
824 */
825int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
826{
827 unsigned long flags, ch_flags;
828 struct cpdma_ctlr *ctlr;
829 int ret, prio_mode;
830 u32 rmask;
831
832 if (!ch || !is_tx_chan(ch))
833 return -EINVAL;
834
835 if (ch->rate == rate)
836 return rate;
837
838 ctlr = ch->ctlr;
839 spin_lock_irqsave(&ctlr->lock, flags);
840 spin_lock_irqsave(&ch->lock, ch_flags);
841
842 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
843 if (ret)
844 goto err;
845
846 ret = cpdma_chan_set_factors(ctlr, ch);
847 if (ret)
848 goto err;
849
850 spin_unlock_irqrestore(&ch->lock, ch_flags);
851
852 /* on shapers */
853 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
854 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
855 spin_unlock_irqrestore(&ctlr->lock, flags);
856 return ret;
857
858err:
859 spin_unlock_irqrestore(&ch->lock, ch_flags);
860 spin_unlock_irqrestore(&ctlr->lock, flags);
861 return ret;
862}
863
864u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
865{
866 unsigned long flags;
867 u32 rate;
868
869 spin_lock_irqsave(&ch->lock, flags);
870 rate = ch->rate;
871 spin_unlock_irqrestore(&ch->lock, flags);
872
873 return rate;
874}
875
876struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
877 cpdma_handler_fn handler, int rx_type)
878{
879 int offset = chan_num * 4;
880 struct cpdma_chan *chan;
881 unsigned long flags;
882
883 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
884
885 if (__chan_linear(chan_num) >= ctlr->num_chan)
886 return ERR_PTR(-EINVAL);
887
888 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
889 if (!chan)
890 return ERR_PTR(-ENOMEM);
891
892 spin_lock_irqsave(&ctlr->lock, flags);
893 if (ctlr->channels[chan_num]) {
894 spin_unlock_irqrestore(&ctlr->lock, flags);
895 devm_kfree(ctlr->dev, chan);
896 return ERR_PTR(-EBUSY);
897 }
898
899 chan->ctlr = ctlr;
900 chan->state = CPDMA_STATE_IDLE;
901 chan->chan_num = chan_num;
902 chan->handler = handler;
903 chan->rate = 0;
904 chan->weight = 0;
905
906 if (is_rx_chan(chan)) {
907 chan->hdp = ctlr->params.rxhdp + offset;
908 chan->cp = ctlr->params.rxcp + offset;
909 chan->rxfree = ctlr->params.rxfree + offset;
910 chan->int_set = CPDMA_RXINTMASKSET;
911 chan->int_clear = CPDMA_RXINTMASKCLEAR;
912 chan->td = CPDMA_RXTEARDOWN;
913 chan->dir = DMA_FROM_DEVICE;
914 } else {
915 chan->hdp = ctlr->params.txhdp + offset;
916 chan->cp = ctlr->params.txcp + offset;
917 chan->int_set = CPDMA_TXINTMASKSET;
918 chan->int_clear = CPDMA_TXINTMASKCLEAR;
919 chan->td = CPDMA_TXTEARDOWN;
920 chan->dir = DMA_TO_DEVICE;
921 }
922 chan->mask = BIT(chan_linear(chan));
923
924 spin_lock_init(&chan->lock);
925
926 ctlr->channels[chan_num] = chan;
927 ctlr->chan_num++;
928
929 cpdma_chan_split_pool(ctlr);
930
931 spin_unlock_irqrestore(&ctlr->lock, flags);
932 return chan;
933}
934
935int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
936{
937 unsigned long flags;
938 int desc_num;
939
940 spin_lock_irqsave(&chan->lock, flags);
941 desc_num = chan->desc_num;
942 spin_unlock_irqrestore(&chan->lock, flags);
943
944 return desc_num;
945}
946
947int cpdma_chan_destroy(struct cpdma_chan *chan)
948{
949 struct cpdma_ctlr *ctlr;
950 unsigned long flags;
951
952 if (!chan)
953 return -EINVAL;
954 ctlr = chan->ctlr;
955
956 spin_lock_irqsave(&ctlr->lock, flags);
957 if (chan->state != CPDMA_STATE_IDLE)
958 cpdma_chan_stop(chan);
959 ctlr->channels[chan->chan_num] = NULL;
960 ctlr->chan_num--;
961 devm_kfree(ctlr->dev, chan);
962 cpdma_chan_split_pool(ctlr);
963
964 spin_unlock_irqrestore(&ctlr->lock, flags);
965 return 0;
966}
967
968int cpdma_chan_get_stats(struct cpdma_chan *chan,
969 struct cpdma_chan_stats *stats)
970{
971 unsigned long flags;
972 if (!chan)
973 return -EINVAL;
974 spin_lock_irqsave(&chan->lock, flags);
975 memcpy(stats, &chan->stats, sizeof(*stats));
976 spin_unlock_irqrestore(&chan->lock, flags);
977 return 0;
978}
979
980static void __cpdma_chan_submit(struct cpdma_chan *chan,
981 struct cpdma_desc __iomem *desc)
982{
983 struct cpdma_ctlr *ctlr = chan->ctlr;
984 struct cpdma_desc __iomem *prev = chan->tail;
985 struct cpdma_desc_pool *pool = ctlr->pool;
986 dma_addr_t desc_dma;
987 u32 mode;
988
989 desc_dma = desc_phys(pool, desc);
990
991 /* simple case - idle channel */
992 if (!chan->head) {
993 chan->stats.head_enqueue++;
994 chan->head = desc;
995 chan->tail = desc;
996 if (chan->state == CPDMA_STATE_ACTIVE)
997 chan_write(chan, hdp, desc_dma);
998 return;
999 }
1000
1001 /* first chain the descriptor at the tail of the list */
1002 desc_write(prev, hw_next, desc_dma);
1003 chan->tail = desc;
1004 chan->stats.tail_enqueue++;
1005
1006 /* next check if EOQ has been triggered already */
1007 mode = desc_read(prev, hw_mode);
1008 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1009 (chan->state == CPDMA_STATE_ACTIVE)) {
1010 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1011 chan_write(chan, hdp, desc_dma);
1012 chan->stats.misqueued++;
1013 }
1014}
1015
1016static int cpdma_chan_submit_si(struct submit_info *si)
1017{
1018 struct cpdma_chan *chan = si->chan;
1019 struct cpdma_ctlr *ctlr = chan->ctlr;
1020 int len = si->len;
1021 struct cpdma_desc __iomem *desc;
1022 dma_addr_t buffer;
1023 u32 mode;
1024 int ret;
1025
1026 if (chan->count >= chan->desc_num) {
1027 chan->stats.desc_alloc_fail++;
1028 return -ENOMEM;
1029 }
1030
1031 desc = cpdma_desc_alloc(ctlr->pool);
1032 if (!desc) {
1033 chan->stats.desc_alloc_fail++;
1034 return -ENOMEM;
1035 }
1036
1037 if (len < ctlr->params.min_packet_size) {
1038 len = ctlr->params.min_packet_size;
1039 chan->stats.runt_transmit_buff++;
1040 }
1041
1042 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1043 cpdma_desc_to_port(chan, mode, si->directed);
1044
1045 if (si->data_dma) {
1046 buffer = si->data_dma;
1047 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1048 } else {
1049 buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
1050 ret = dma_mapping_error(ctlr->dev, buffer);
1051 if (ret) {
1052 cpdma_desc_free(ctlr->pool, desc, 1);
1053 return -EINVAL;
1054 }
1055 }
1056
1057 /* Relaxed IO accessors can be used here as there is read barrier
1058 * at the end of write sequence.
1059 */
1060 writel_relaxed(0, &desc->hw_next);
1061 writel_relaxed(buffer, &desc->hw_buffer);
1062 writel_relaxed(len, &desc->hw_len);
1063 writel_relaxed(mode | len, &desc->hw_mode);
1064 writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1065 writel_relaxed(buffer, &desc->sw_buffer);
1066 writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
1067 &desc->sw_len);
1068 desc_read(desc, sw_len);
1069
1070 __cpdma_chan_submit(chan, desc);
1071
1072 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1073 chan_write(chan, rxfree, 1);
1074
1075 chan->count++;
1076 return 0;
1077}
1078
1079int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1080 int len, int directed)
1081{
1082 struct submit_info si;
1083 unsigned long flags;
1084 int ret;
1085
1086 si.chan = chan;
1087 si.token = token;
1088 si.data_virt = data;
1089 si.data_dma = 0;
1090 si.len = len;
1091 si.directed = directed;
1092
1093 spin_lock_irqsave(&chan->lock, flags);
1094 if (chan->state == CPDMA_STATE_TEARDOWN) {
1095 spin_unlock_irqrestore(&chan->lock, flags);
1096 return -EINVAL;
1097 }
1098
1099 ret = cpdma_chan_submit_si(&si);
1100 spin_unlock_irqrestore(&chan->lock, flags);
1101 return ret;
1102}
1103
1104int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
1105 dma_addr_t data, int len, int directed)
1106{
1107 struct submit_info si;
1108 unsigned long flags;
1109 int ret;
1110
1111 si.chan = chan;
1112 si.token = token;
1113 si.data_virt = NULL;
1114 si.data_dma = data;
1115 si.len = len;
1116 si.directed = directed;
1117
1118 spin_lock_irqsave(&chan->lock, flags);
1119 if (chan->state == CPDMA_STATE_TEARDOWN) {
1120 spin_unlock_irqrestore(&chan->lock, flags);
1121 return -EINVAL;
1122 }
1123
1124 ret = cpdma_chan_submit_si(&si);
1125 spin_unlock_irqrestore(&chan->lock, flags);
1126 return ret;
1127}
1128
1129int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1130 int len, int directed)
1131{
1132 struct submit_info si;
1133 unsigned long flags;
1134 int ret;
1135
1136 si.chan = chan;
1137 si.token = token;
1138 si.data_virt = data;
1139 si.data_dma = 0;
1140 si.len = len;
1141 si.directed = directed;
1142
1143 spin_lock_irqsave(&chan->lock, flags);
1144 if (chan->state != CPDMA_STATE_ACTIVE) {
1145 spin_unlock_irqrestore(&chan->lock, flags);
1146 return -EINVAL;
1147 }
1148
1149 ret = cpdma_chan_submit_si(&si);
1150 spin_unlock_irqrestore(&chan->lock, flags);
1151 return ret;
1152}
1153
1154int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
1155 dma_addr_t data, int len, int directed)
1156{
1157 struct submit_info si;
1158 unsigned long flags;
1159 int ret;
1160
1161 si.chan = chan;
1162 si.token = token;
1163 si.data_virt = NULL;
1164 si.data_dma = data;
1165 si.len = len;
1166 si.directed = directed;
1167
1168 spin_lock_irqsave(&chan->lock, flags);
1169 if (chan->state != CPDMA_STATE_ACTIVE) {
1170 spin_unlock_irqrestore(&chan->lock, flags);
1171 return -EINVAL;
1172 }
1173
1174 ret = cpdma_chan_submit_si(&si);
1175 spin_unlock_irqrestore(&chan->lock, flags);
1176 return ret;
1177}
1178
1179bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1180{
1181 struct cpdma_ctlr *ctlr = chan->ctlr;
1182 struct cpdma_desc_pool *pool = ctlr->pool;
1183 bool free_tx_desc;
1184 unsigned long flags;
1185
1186 spin_lock_irqsave(&chan->lock, flags);
1187 free_tx_desc = (chan->count < chan->desc_num) &&
1188 gen_pool_avail(pool->gen_pool);
1189 spin_unlock_irqrestore(&chan->lock, flags);
1190 return free_tx_desc;
1191}
1192
1193static void __cpdma_chan_free(struct cpdma_chan *chan,
1194 struct cpdma_desc __iomem *desc,
1195 int outlen, int status)
1196{
1197 struct cpdma_ctlr *ctlr = chan->ctlr;
1198 struct cpdma_desc_pool *pool = ctlr->pool;
1199 dma_addr_t buff_dma;
1200 int origlen;
1201 uintptr_t token;
1202
1203 token = desc_read(desc, sw_token);
1204 origlen = desc_read(desc, sw_len);
1205
1206 buff_dma = desc_read(desc, sw_buffer);
1207 if (origlen & CPDMA_DMA_EXT_MAP) {
1208 origlen &= ~CPDMA_DMA_EXT_MAP;
1209 dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
1210 chan->dir);
1211 } else {
1212 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1213 }
1214
1215 cpdma_desc_free(pool, desc, 1);
1216 (*chan->handler)((void *)token, outlen, status);
1217}
1218
1219static int __cpdma_chan_process(struct cpdma_chan *chan)
1220{
1221 struct cpdma_ctlr *ctlr = chan->ctlr;
1222 struct cpdma_desc __iomem *desc;
1223 int status, outlen;
1224 int cb_status = 0;
1225 struct cpdma_desc_pool *pool = ctlr->pool;
1226 dma_addr_t desc_dma;
1227 unsigned long flags;
1228
1229 spin_lock_irqsave(&chan->lock, flags);
1230
1231 desc = chan->head;
1232 if (!desc) {
1233 chan->stats.empty_dequeue++;
1234 status = -ENOENT;
1235 goto unlock_ret;
1236 }
1237 desc_dma = desc_phys(pool, desc);
1238
1239 status = desc_read(desc, hw_mode);
1240 outlen = status & 0x7ff;
1241 if (status & CPDMA_DESC_OWNER) {
1242 chan->stats.busy_dequeue++;
1243 status = -EBUSY;
1244 goto unlock_ret;
1245 }
1246
1247 if (status & CPDMA_DESC_PASS_CRC)
1248 outlen -= CPDMA_DESC_CRC_LEN;
1249
1250 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1251 CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1252
1253 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1254 chan_write(chan, cp, desc_dma);
1255 chan->count--;
1256 chan->stats.good_dequeue++;
1257
1258 if ((status & CPDMA_DESC_EOQ) && chan->head) {
1259 chan->stats.requeue++;
1260 chan_write(chan, hdp, desc_phys(pool, chan->head));
1261 }
1262
1263 spin_unlock_irqrestore(&chan->lock, flags);
1264 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1265 cb_status = -ENOSYS;
1266 else
1267 cb_status = status;
1268
1269 __cpdma_chan_free(chan, desc, outlen, cb_status);
1270 return status;
1271
1272unlock_ret:
1273 spin_unlock_irqrestore(&chan->lock, flags);
1274 return status;
1275}
1276
1277int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1278{
1279 int used = 0, ret = 0;
1280
1281 if (chan->state != CPDMA_STATE_ACTIVE)
1282 return -EINVAL;
1283
1284 while (used < quota) {
1285 ret = __cpdma_chan_process(chan);
1286 if (ret < 0)
1287 break;
1288 used++;
1289 }
1290 return used;
1291}
1292
1293int cpdma_chan_start(struct cpdma_chan *chan)
1294{
1295 struct cpdma_ctlr *ctlr = chan->ctlr;
1296 unsigned long flags;
1297 int ret;
1298
1299 spin_lock_irqsave(&ctlr->lock, flags);
1300 ret = cpdma_chan_set_chan_shaper(chan);
1301 spin_unlock_irqrestore(&ctlr->lock, flags);
1302 if (ret)
1303 return ret;
1304
1305 ret = cpdma_chan_on(chan);
1306 if (ret)
1307 return ret;
1308
1309 return 0;
1310}
1311
1312int cpdma_chan_stop(struct cpdma_chan *chan)
1313{
1314 struct cpdma_ctlr *ctlr = chan->ctlr;
1315 struct cpdma_desc_pool *pool = ctlr->pool;
1316 unsigned long flags;
1317 int ret;
1318 unsigned timeout;
1319
1320 spin_lock_irqsave(&chan->lock, flags);
1321 if (chan->state == CPDMA_STATE_TEARDOWN) {
1322 spin_unlock_irqrestore(&chan->lock, flags);
1323 return -EINVAL;
1324 }
1325
1326 chan->state = CPDMA_STATE_TEARDOWN;
1327 dma_reg_write(ctlr, chan->int_clear, chan->mask);
1328
1329 /* trigger teardown */
1330 dma_reg_write(ctlr, chan->td, chan_linear(chan));
1331
1332 /* wait for teardown complete */
1333 timeout = 100 * 100; /* 100 ms */
1334 while (timeout) {
1335 u32 cp = chan_read(chan, cp);
1336 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1337 break;
1338 udelay(10);
1339 timeout--;
1340 }
1341 WARN_ON(!timeout);
1342 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1343
1344 /* handle completed packets */
1345 spin_unlock_irqrestore(&chan->lock, flags);
1346 do {
1347 ret = __cpdma_chan_process(chan);
1348 if (ret < 0)
1349 break;
1350 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1351 spin_lock_irqsave(&chan->lock, flags);
1352
1353 /* remaining packets haven't been tx/rx'ed, clean them up */
1354 while (chan->head) {
1355 struct cpdma_desc __iomem *desc = chan->head;
1356 dma_addr_t next_dma;
1357
1358 next_dma = desc_read(desc, hw_next);
1359 chan->head = desc_from_phys(pool, next_dma);
1360 chan->count--;
1361 chan->stats.teardown_dequeue++;
1362
1363 /* issue callback without locks held */
1364 spin_unlock_irqrestore(&chan->lock, flags);
1365 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1366 spin_lock_irqsave(&chan->lock, flags);
1367 }
1368
1369 chan->state = CPDMA_STATE_IDLE;
1370 spin_unlock_irqrestore(&chan->lock, flags);
1371 return 0;
1372}
1373
1374int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1375{
1376 unsigned long flags;
1377
1378 spin_lock_irqsave(&chan->lock, flags);
1379 if (chan->state != CPDMA_STATE_ACTIVE) {
1380 spin_unlock_irqrestore(&chan->lock, flags);
1381 return -EINVAL;
1382 }
1383
1384 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1385 chan->mask);
1386 spin_unlock_irqrestore(&chan->lock, flags);
1387
1388 return 0;
1389}
1390
1391int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1392{
1393 unsigned long flags;
1394 int ret;
1395
1396 spin_lock_irqsave(&ctlr->lock, flags);
1397 ret = _cpdma_control_get(ctlr, control);
1398 spin_unlock_irqrestore(&ctlr->lock, flags);
1399
1400 return ret;
1401}
1402
1403int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1404{
1405 unsigned long flags;
1406 int ret;
1407
1408 spin_lock_irqsave(&ctlr->lock, flags);
1409 ret = _cpdma_control_set(ctlr, control, value);
1410 spin_unlock_irqrestore(&ctlr->lock, flags);
1411
1412 return ret;
1413}
1414
1415int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1416{
1417 return ctlr->num_rx_desc;
1418}
1419
1420int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1421{
1422 return ctlr->num_tx_desc;
1423}
1424
1425int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1426{
1427 unsigned long flags;
1428 int temp, ret;
1429
1430 spin_lock_irqsave(&ctlr->lock, flags);
1431
1432 temp = ctlr->num_rx_desc;
1433 ctlr->num_rx_desc = num_rx_desc;
1434 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1435 ret = cpdma_chan_split_pool(ctlr);
1436 if (ret) {
1437 ctlr->num_rx_desc = temp;
1438 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1439 }
1440
1441 spin_unlock_irqrestore(&ctlr->lock, flags);
1442
1443 return ret;
1444}
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/delay.h>
24
25#include "davinci_cpdma.h"
26
27/* DMA Registers */
28#define CPDMA_TXIDVER 0x00
29#define CPDMA_TXCONTROL 0x04
30#define CPDMA_TXTEARDOWN 0x08
31#define CPDMA_RXIDVER 0x10
32#define CPDMA_RXCONTROL 0x14
33#define CPDMA_SOFTRESET 0x1c
34#define CPDMA_RXTEARDOWN 0x18
35#define CPDMA_TXINTSTATRAW 0x80
36#define CPDMA_TXINTSTATMASKED 0x84
37#define CPDMA_TXINTMASKSET 0x88
38#define CPDMA_TXINTMASKCLEAR 0x8c
39#define CPDMA_MACINVECTOR 0x90
40#define CPDMA_MACEOIVECTOR 0x94
41#define CPDMA_RXINTSTATRAW 0xa0
42#define CPDMA_RXINTSTATMASKED 0xa4
43#define CPDMA_RXINTMASKSET 0xa8
44#define CPDMA_RXINTMASKCLEAR 0xac
45#define CPDMA_DMAINTSTATRAW 0xb0
46#define CPDMA_DMAINTSTATMASKED 0xb4
47#define CPDMA_DMAINTMASKSET 0xb8
48#define CPDMA_DMAINTMASKCLEAR 0xbc
49#define CPDMA_DMAINT_HOSTERR BIT(1)
50
51/* the following exist only if has_ext_regs is set */
52#define CPDMA_DMACONTROL 0x20
53#define CPDMA_DMASTATUS 0x24
54#define CPDMA_RXBUFFOFS 0x28
55#define CPDMA_EM_CONTROL 0x2c
56
57/* Descriptor mode bits */
58#define CPDMA_DESC_SOP BIT(31)
59#define CPDMA_DESC_EOP BIT(30)
60#define CPDMA_DESC_OWNER BIT(29)
61#define CPDMA_DESC_EOQ BIT(28)
62#define CPDMA_DESC_TD_COMPLETE BIT(27)
63#define CPDMA_DESC_PASS_CRC BIT(26)
64#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67#define CPDMA_DESC_CRC_LEN 4
68
69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
70
71struct cpdma_desc {
72 /* hardware fields */
73 u32 hw_next;
74 u32 hw_buffer;
75 u32 hw_len;
76 u32 hw_mode;
77 /* software fields */
78 void *sw_token;
79 u32 sw_buffer;
80 u32 sw_len;
81};
82
83struct cpdma_desc_pool {
84 phys_addr_t phys;
85 dma_addr_t hw_addr;
86 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
89 int num_desc, used_desc;
90 unsigned long *bitmap;
91 struct device *dev;
92 spinlock_t lock;
93};
94
95enum cpdma_state {
96 CPDMA_STATE_IDLE,
97 CPDMA_STATE_ACTIVE,
98 CPDMA_STATE_TEARDOWN,
99};
100
101static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
102
103struct cpdma_ctlr {
104 enum cpdma_state state;
105 struct cpdma_params params;
106 struct device *dev;
107 struct cpdma_desc_pool *pool;
108 spinlock_t lock;
109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
110};
111
112struct cpdma_chan {
113 struct cpdma_desc __iomem *head, *tail;
114 void __iomem *hdp, *cp, *rxfree;
115 enum cpdma_state state;
116 struct cpdma_ctlr *ctlr;
117 int chan_num;
118 spinlock_t lock;
119 int count;
120 u32 mask;
121 cpdma_handler_fn handler;
122 enum dma_data_direction dir;
123 struct cpdma_chan_stats stats;
124 /* offsets into dmaregs */
125 int int_set, int_clear, td;
126};
127
128/* The following make access to common cpdma_ctlr params more readable */
129#define dmaregs params.dmaregs
130#define num_chan params.num_chan
131
132/* various accessors */
133#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
134#define chan_read(chan, fld) __raw_readl((chan)->fld)
135#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
136#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
137#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
138#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
139
140#define cpdma_desc_to_port(chan, mode, directed) \
141 do { \
142 if (!is_rx_chan(chan) && ((directed == 1) || \
143 (directed == 2))) \
144 mode |= (CPDMA_DESC_TO_PORT_EN | \
145 (directed << CPDMA_TO_PORT_SHIFT)); \
146 } while (0)
147
148/*
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
150 * emac) have dedicated on-chip memory for these descriptors. Some other
151 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
152 * abstract out these details
153 */
154static struct cpdma_desc_pool *
155cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
156 int size, int align)
157{
158 int bitmap_size;
159 struct cpdma_desc_pool *pool;
160
161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool)
163 goto fail;
164
165 spin_lock_init(&pool->lock);
166
167 pool->dev = dev;
168 pool->mem_size = size;
169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
170 pool->num_desc = size / pool->desc_size;
171
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
174 if (!pool->bitmap)
175 goto fail;
176
177 if (phys) {
178 pool->phys = phys;
179 pool->iomap = ioremap(phys, size); /* should be memremap? */
180 pool->hw_addr = hw_addr;
181 } else {
182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
183 GFP_KERNEL);
184 pool->iomap = (void __iomem __force *)pool->cpumap;
185 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
186 }
187
188 if (pool->iomap)
189 return pool;
190fail:
191 return NULL;
192}
193
194static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
195{
196 if (!pool)
197 return;
198
199 WARN_ON(pool->used_desc);
200 if (pool->cpumap) {
201 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
202 pool->phys);
203 } else {
204 iounmap(pool->iomap);
205 }
206}
207
208static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
209 struct cpdma_desc __iomem *desc)
210{
211 if (!desc)
212 return 0;
213 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
214}
215
216static inline struct cpdma_desc __iomem *
217desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
218{
219 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
220}
221
222static struct cpdma_desc __iomem *
223cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
224{
225 unsigned long flags;
226 int index;
227 int desc_start;
228 int desc_end;
229 struct cpdma_desc __iomem *desc = NULL;
230
231 spin_lock_irqsave(&pool->lock, flags);
232
233 if (is_rx) {
234 desc_start = 0;
235 desc_end = pool->num_desc/2;
236 } else {
237 desc_start = pool->num_desc/2;
238 desc_end = pool->num_desc;
239 }
240
241 index = bitmap_find_next_zero_area(pool->bitmap,
242 desc_end, desc_start, num_desc, 0);
243 if (index < desc_end) {
244 bitmap_set(pool->bitmap, index, num_desc);
245 desc = pool->iomap + pool->desc_size * index;
246 pool->used_desc++;
247 }
248
249 spin_unlock_irqrestore(&pool->lock, flags);
250 return desc;
251}
252
253static void cpdma_desc_free(struct cpdma_desc_pool *pool,
254 struct cpdma_desc __iomem *desc, int num_desc)
255{
256 unsigned long flags, index;
257
258 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
259 pool->desc_size;
260 spin_lock_irqsave(&pool->lock, flags);
261 bitmap_clear(pool->bitmap, index, num_desc);
262 pool->used_desc--;
263 spin_unlock_irqrestore(&pool->lock, flags);
264}
265
266struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
267{
268 struct cpdma_ctlr *ctlr;
269
270 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
271 if (!ctlr)
272 return NULL;
273
274 ctlr->state = CPDMA_STATE_IDLE;
275 ctlr->params = *params;
276 ctlr->dev = params->dev;
277 spin_lock_init(&ctlr->lock);
278
279 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
280 ctlr->params.desc_mem_phys,
281 ctlr->params.desc_hw_addr,
282 ctlr->params.desc_mem_size,
283 ctlr->params.desc_align);
284 if (!ctlr->pool)
285 return NULL;
286
287 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
288 ctlr->num_chan = CPDMA_MAX_CHANNELS;
289 return ctlr;
290}
291EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
292
293int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
294{
295 unsigned long flags;
296 int i;
297
298 spin_lock_irqsave(&ctlr->lock, flags);
299 if (ctlr->state != CPDMA_STATE_IDLE) {
300 spin_unlock_irqrestore(&ctlr->lock, flags);
301 return -EBUSY;
302 }
303
304 if (ctlr->params.has_soft_reset) {
305 unsigned timeout = 10 * 100;
306
307 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
308 while (timeout) {
309 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
310 break;
311 udelay(10);
312 timeout--;
313 }
314 WARN_ON(!timeout);
315 }
316
317 for (i = 0; i < ctlr->num_chan; i++) {
318 __raw_writel(0, ctlr->params.txhdp + 4 * i);
319 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
320 __raw_writel(0, ctlr->params.txcp + 4 * i);
321 __raw_writel(0, ctlr->params.rxcp + 4 * i);
322 }
323
324 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
325 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
326
327 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
328 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
329
330 ctlr->state = CPDMA_STATE_ACTIVE;
331
332 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
333 if (ctlr->channels[i])
334 cpdma_chan_start(ctlr->channels[i]);
335 }
336 spin_unlock_irqrestore(&ctlr->lock, flags);
337 return 0;
338}
339EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
340
341int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
342{
343 unsigned long flags;
344 int i;
345
346 spin_lock_irqsave(&ctlr->lock, flags);
347 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
348 spin_unlock_irqrestore(&ctlr->lock, flags);
349 return -EINVAL;
350 }
351
352 ctlr->state = CPDMA_STATE_TEARDOWN;
353
354 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
355 if (ctlr->channels[i])
356 cpdma_chan_stop(ctlr->channels[i]);
357 }
358
359 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
360 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
361
362 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
363 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
364
365 ctlr->state = CPDMA_STATE_IDLE;
366
367 spin_unlock_irqrestore(&ctlr->lock, flags);
368 return 0;
369}
370EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
371
372int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
373{
374 struct device *dev = ctlr->dev;
375 unsigned long flags;
376 int i;
377
378 spin_lock_irqsave(&ctlr->lock, flags);
379
380 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
381
382 dev_info(dev, "CPDMA: txidver: %x",
383 dma_reg_read(ctlr, CPDMA_TXIDVER));
384 dev_info(dev, "CPDMA: txcontrol: %x",
385 dma_reg_read(ctlr, CPDMA_TXCONTROL));
386 dev_info(dev, "CPDMA: txteardown: %x",
387 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
388 dev_info(dev, "CPDMA: rxidver: %x",
389 dma_reg_read(ctlr, CPDMA_RXIDVER));
390 dev_info(dev, "CPDMA: rxcontrol: %x",
391 dma_reg_read(ctlr, CPDMA_RXCONTROL));
392 dev_info(dev, "CPDMA: softreset: %x",
393 dma_reg_read(ctlr, CPDMA_SOFTRESET));
394 dev_info(dev, "CPDMA: rxteardown: %x",
395 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
396 dev_info(dev, "CPDMA: txintstatraw: %x",
397 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
398 dev_info(dev, "CPDMA: txintstatmasked: %x",
399 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
400 dev_info(dev, "CPDMA: txintmaskset: %x",
401 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
402 dev_info(dev, "CPDMA: txintmaskclear: %x",
403 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
404 dev_info(dev, "CPDMA: macinvector: %x",
405 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
406 dev_info(dev, "CPDMA: maceoivector: %x",
407 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
408 dev_info(dev, "CPDMA: rxintstatraw: %x",
409 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
410 dev_info(dev, "CPDMA: rxintstatmasked: %x",
411 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
412 dev_info(dev, "CPDMA: rxintmaskset: %x",
413 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
414 dev_info(dev, "CPDMA: rxintmaskclear: %x",
415 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
416 dev_info(dev, "CPDMA: dmaintstatraw: %x",
417 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
418 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
419 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
420 dev_info(dev, "CPDMA: dmaintmaskset: %x",
421 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
422 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
423 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
424
425 if (!ctlr->params.has_ext_regs) {
426 dev_info(dev, "CPDMA: dmacontrol: %x",
427 dma_reg_read(ctlr, CPDMA_DMACONTROL));
428 dev_info(dev, "CPDMA: dmastatus: %x",
429 dma_reg_read(ctlr, CPDMA_DMASTATUS));
430 dev_info(dev, "CPDMA: rxbuffofs: %x",
431 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
432 }
433
434 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
435 if (ctlr->channels[i])
436 cpdma_chan_dump(ctlr->channels[i]);
437
438 spin_unlock_irqrestore(&ctlr->lock, flags);
439 return 0;
440}
441EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
442
443int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
444{
445 unsigned long flags;
446 int ret = 0, i;
447
448 if (!ctlr)
449 return -EINVAL;
450
451 spin_lock_irqsave(&ctlr->lock, flags);
452 if (ctlr->state != CPDMA_STATE_IDLE)
453 cpdma_ctlr_stop(ctlr);
454
455 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
456 cpdma_chan_destroy(ctlr->channels[i]);
457
458 cpdma_desc_pool_destroy(ctlr->pool);
459 spin_unlock_irqrestore(&ctlr->lock, flags);
460 return ret;
461}
462EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
463
464int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
465{
466 unsigned long flags;
467 int i, reg;
468
469 spin_lock_irqsave(&ctlr->lock, flags);
470 if (ctlr->state != CPDMA_STATE_ACTIVE) {
471 spin_unlock_irqrestore(&ctlr->lock, flags);
472 return -EINVAL;
473 }
474
475 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
476 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
477
478 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
479 if (ctlr->channels[i])
480 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
481 }
482
483 spin_unlock_irqrestore(&ctlr->lock, flags);
484 return 0;
485}
486EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
487
488void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
489{
490 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
491}
492EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
493
494struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
495 cpdma_handler_fn handler)
496{
497 struct cpdma_chan *chan;
498 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
499 unsigned long flags;
500
501 if (__chan_linear(chan_num) >= ctlr->num_chan)
502 return NULL;
503
504 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
505 if (!chan)
506 return ERR_PTR(-ENOMEM);
507
508 spin_lock_irqsave(&ctlr->lock, flags);
509 if (ctlr->channels[chan_num]) {
510 spin_unlock_irqrestore(&ctlr->lock, flags);
511 devm_kfree(ctlr->dev, chan);
512 return ERR_PTR(-EBUSY);
513 }
514
515 chan->ctlr = ctlr;
516 chan->state = CPDMA_STATE_IDLE;
517 chan->chan_num = chan_num;
518 chan->handler = handler;
519
520 if (is_rx_chan(chan)) {
521 chan->hdp = ctlr->params.rxhdp + offset;
522 chan->cp = ctlr->params.rxcp + offset;
523 chan->rxfree = ctlr->params.rxfree + offset;
524 chan->int_set = CPDMA_RXINTMASKSET;
525 chan->int_clear = CPDMA_RXINTMASKCLEAR;
526 chan->td = CPDMA_RXTEARDOWN;
527 chan->dir = DMA_FROM_DEVICE;
528 } else {
529 chan->hdp = ctlr->params.txhdp + offset;
530 chan->cp = ctlr->params.txcp + offset;
531 chan->int_set = CPDMA_TXINTMASKSET;
532 chan->int_clear = CPDMA_TXINTMASKCLEAR;
533 chan->td = CPDMA_TXTEARDOWN;
534 chan->dir = DMA_TO_DEVICE;
535 }
536 chan->mask = BIT(chan_linear(chan));
537
538 spin_lock_init(&chan->lock);
539
540 ctlr->channels[chan_num] = chan;
541 spin_unlock_irqrestore(&ctlr->lock, flags);
542 return chan;
543}
544EXPORT_SYMBOL_GPL(cpdma_chan_create);
545
546int cpdma_chan_destroy(struct cpdma_chan *chan)
547{
548 struct cpdma_ctlr *ctlr;
549 unsigned long flags;
550
551 if (!chan)
552 return -EINVAL;
553 ctlr = chan->ctlr;
554
555 spin_lock_irqsave(&ctlr->lock, flags);
556 if (chan->state != CPDMA_STATE_IDLE)
557 cpdma_chan_stop(chan);
558 ctlr->channels[chan->chan_num] = NULL;
559 spin_unlock_irqrestore(&ctlr->lock, flags);
560 return 0;
561}
562EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
563
564int cpdma_chan_get_stats(struct cpdma_chan *chan,
565 struct cpdma_chan_stats *stats)
566{
567 unsigned long flags;
568 if (!chan)
569 return -EINVAL;
570 spin_lock_irqsave(&chan->lock, flags);
571 memcpy(stats, &chan->stats, sizeof(*stats));
572 spin_unlock_irqrestore(&chan->lock, flags);
573 return 0;
574}
575EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
576
577int cpdma_chan_dump(struct cpdma_chan *chan)
578{
579 unsigned long flags;
580 struct device *dev = chan->ctlr->dev;
581
582 spin_lock_irqsave(&chan->lock, flags);
583
584 dev_info(dev, "channel %d (%s %d) state %s",
585 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
586 chan_linear(chan), cpdma_state_str[chan->state]);
587 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
588 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
589 if (chan->rxfree) {
590 dev_info(dev, "\trxfree: %x\n",
591 chan_read(chan, rxfree));
592 }
593
594 dev_info(dev, "\tstats head_enqueue: %d\n",
595 chan->stats.head_enqueue);
596 dev_info(dev, "\tstats tail_enqueue: %d\n",
597 chan->stats.tail_enqueue);
598 dev_info(dev, "\tstats pad_enqueue: %d\n",
599 chan->stats.pad_enqueue);
600 dev_info(dev, "\tstats misqueued: %d\n",
601 chan->stats.misqueued);
602 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
603 chan->stats.desc_alloc_fail);
604 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
605 chan->stats.pad_alloc_fail);
606 dev_info(dev, "\tstats runt_receive_buff: %d\n",
607 chan->stats.runt_receive_buff);
608 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
609 chan->stats.runt_transmit_buff);
610 dev_info(dev, "\tstats empty_dequeue: %d\n",
611 chan->stats.empty_dequeue);
612 dev_info(dev, "\tstats busy_dequeue: %d\n",
613 chan->stats.busy_dequeue);
614 dev_info(dev, "\tstats good_dequeue: %d\n",
615 chan->stats.good_dequeue);
616 dev_info(dev, "\tstats requeue: %d\n",
617 chan->stats.requeue);
618 dev_info(dev, "\tstats teardown_dequeue: %d\n",
619 chan->stats.teardown_dequeue);
620
621 spin_unlock_irqrestore(&chan->lock, flags);
622 return 0;
623}
624
625static void __cpdma_chan_submit(struct cpdma_chan *chan,
626 struct cpdma_desc __iomem *desc)
627{
628 struct cpdma_ctlr *ctlr = chan->ctlr;
629 struct cpdma_desc __iomem *prev = chan->tail;
630 struct cpdma_desc_pool *pool = ctlr->pool;
631 dma_addr_t desc_dma;
632 u32 mode;
633
634 desc_dma = desc_phys(pool, desc);
635
636 /* simple case - idle channel */
637 if (!chan->head) {
638 chan->stats.head_enqueue++;
639 chan->head = desc;
640 chan->tail = desc;
641 if (chan->state == CPDMA_STATE_ACTIVE)
642 chan_write(chan, hdp, desc_dma);
643 return;
644 }
645
646 /* first chain the descriptor at the tail of the list */
647 desc_write(prev, hw_next, desc_dma);
648 chan->tail = desc;
649 chan->stats.tail_enqueue++;
650
651 /* next check if EOQ has been triggered already */
652 mode = desc_read(prev, hw_mode);
653 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
654 (chan->state == CPDMA_STATE_ACTIVE)) {
655 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
656 chan_write(chan, hdp, desc_dma);
657 chan->stats.misqueued++;
658 }
659}
660
661int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
662 int len, int directed)
663{
664 struct cpdma_ctlr *ctlr = chan->ctlr;
665 struct cpdma_desc __iomem *desc;
666 dma_addr_t buffer;
667 unsigned long flags;
668 u32 mode;
669 int ret = 0;
670
671 spin_lock_irqsave(&chan->lock, flags);
672
673 if (chan->state == CPDMA_STATE_TEARDOWN) {
674 ret = -EINVAL;
675 goto unlock_ret;
676 }
677
678 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
679 if (!desc) {
680 chan->stats.desc_alloc_fail++;
681 ret = -ENOMEM;
682 goto unlock_ret;
683 }
684
685 if (len < ctlr->params.min_packet_size) {
686 len = ctlr->params.min_packet_size;
687 chan->stats.runt_transmit_buff++;
688 }
689
690 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
691 ret = dma_mapping_error(ctlr->dev, buffer);
692 if (ret) {
693 cpdma_desc_free(ctlr->pool, desc, 1);
694 ret = -EINVAL;
695 goto unlock_ret;
696 }
697
698 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
699 cpdma_desc_to_port(chan, mode, directed);
700
701 desc_write(desc, hw_next, 0);
702 desc_write(desc, hw_buffer, buffer);
703 desc_write(desc, hw_len, len);
704 desc_write(desc, hw_mode, mode | len);
705 desc_write(desc, sw_token, token);
706 desc_write(desc, sw_buffer, buffer);
707 desc_write(desc, sw_len, len);
708
709 __cpdma_chan_submit(chan, desc);
710
711 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
712 chan_write(chan, rxfree, 1);
713
714 chan->count++;
715
716unlock_ret:
717 spin_unlock_irqrestore(&chan->lock, flags);
718 return ret;
719}
720EXPORT_SYMBOL_GPL(cpdma_chan_submit);
721
722bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
723{
724 unsigned long flags;
725 int index;
726 bool ret;
727 struct cpdma_ctlr *ctlr = chan->ctlr;
728 struct cpdma_desc_pool *pool = ctlr->pool;
729
730 spin_lock_irqsave(&pool->lock, flags);
731
732 index = bitmap_find_next_zero_area(pool->bitmap,
733 pool->num_desc, pool->num_desc/2, 1, 0);
734
735 if (index < pool->num_desc)
736 ret = true;
737 else
738 ret = false;
739
740 spin_unlock_irqrestore(&pool->lock, flags);
741 return ret;
742}
743EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
744
745static void __cpdma_chan_free(struct cpdma_chan *chan,
746 struct cpdma_desc __iomem *desc,
747 int outlen, int status)
748{
749 struct cpdma_ctlr *ctlr = chan->ctlr;
750 struct cpdma_desc_pool *pool = ctlr->pool;
751 dma_addr_t buff_dma;
752 int origlen;
753 void *token;
754
755 token = (void *)desc_read(desc, sw_token);
756 buff_dma = desc_read(desc, sw_buffer);
757 origlen = desc_read(desc, sw_len);
758
759 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
760 cpdma_desc_free(pool, desc, 1);
761 (*chan->handler)(token, outlen, status);
762}
763
764static int __cpdma_chan_process(struct cpdma_chan *chan)
765{
766 struct cpdma_ctlr *ctlr = chan->ctlr;
767 struct cpdma_desc __iomem *desc;
768 int status, outlen;
769 int cb_status = 0;
770 struct cpdma_desc_pool *pool = ctlr->pool;
771 dma_addr_t desc_dma;
772 unsigned long flags;
773
774 spin_lock_irqsave(&chan->lock, flags);
775
776 desc = chan->head;
777 if (!desc) {
778 chan->stats.empty_dequeue++;
779 status = -ENOENT;
780 goto unlock_ret;
781 }
782 desc_dma = desc_phys(pool, desc);
783
784 status = __raw_readl(&desc->hw_mode);
785 outlen = status & 0x7ff;
786 if (status & CPDMA_DESC_OWNER) {
787 chan->stats.busy_dequeue++;
788 status = -EBUSY;
789 goto unlock_ret;
790 }
791
792 if (status & CPDMA_DESC_PASS_CRC)
793 outlen -= CPDMA_DESC_CRC_LEN;
794
795 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
796 CPDMA_DESC_PORT_MASK);
797
798 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
799 chan_write(chan, cp, desc_dma);
800 chan->count--;
801 chan->stats.good_dequeue++;
802
803 if (status & CPDMA_DESC_EOQ) {
804 chan->stats.requeue++;
805 chan_write(chan, hdp, desc_phys(pool, chan->head));
806 }
807
808 spin_unlock_irqrestore(&chan->lock, flags);
809 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
810 cb_status = -ENOSYS;
811 else
812 cb_status = status;
813
814 __cpdma_chan_free(chan, desc, outlen, cb_status);
815 return status;
816
817unlock_ret:
818 spin_unlock_irqrestore(&chan->lock, flags);
819 return status;
820}
821
822int cpdma_chan_process(struct cpdma_chan *chan, int quota)
823{
824 int used = 0, ret = 0;
825
826 if (chan->state != CPDMA_STATE_ACTIVE)
827 return -EINVAL;
828
829 while (used < quota) {
830 ret = __cpdma_chan_process(chan);
831 if (ret < 0)
832 break;
833 used++;
834 }
835 return used;
836}
837EXPORT_SYMBOL_GPL(cpdma_chan_process);
838
839int cpdma_chan_start(struct cpdma_chan *chan)
840{
841 struct cpdma_ctlr *ctlr = chan->ctlr;
842 struct cpdma_desc_pool *pool = ctlr->pool;
843 unsigned long flags;
844
845 spin_lock_irqsave(&chan->lock, flags);
846 if (chan->state != CPDMA_STATE_IDLE) {
847 spin_unlock_irqrestore(&chan->lock, flags);
848 return -EBUSY;
849 }
850 if (ctlr->state != CPDMA_STATE_ACTIVE) {
851 spin_unlock_irqrestore(&chan->lock, flags);
852 return -EINVAL;
853 }
854 dma_reg_write(ctlr, chan->int_set, chan->mask);
855 chan->state = CPDMA_STATE_ACTIVE;
856 if (chan->head) {
857 chan_write(chan, hdp, desc_phys(pool, chan->head));
858 if (chan->rxfree)
859 chan_write(chan, rxfree, chan->count);
860 }
861
862 spin_unlock_irqrestore(&chan->lock, flags);
863 return 0;
864}
865EXPORT_SYMBOL_GPL(cpdma_chan_start);
866
867int cpdma_chan_stop(struct cpdma_chan *chan)
868{
869 struct cpdma_ctlr *ctlr = chan->ctlr;
870 struct cpdma_desc_pool *pool = ctlr->pool;
871 unsigned long flags;
872 int ret;
873 unsigned timeout;
874
875 spin_lock_irqsave(&chan->lock, flags);
876 if (chan->state == CPDMA_STATE_TEARDOWN) {
877 spin_unlock_irqrestore(&chan->lock, flags);
878 return -EINVAL;
879 }
880
881 chan->state = CPDMA_STATE_TEARDOWN;
882 dma_reg_write(ctlr, chan->int_clear, chan->mask);
883
884 /* trigger teardown */
885 dma_reg_write(ctlr, chan->td, chan_linear(chan));
886
887 /* wait for teardown complete */
888 timeout = 100 * 100; /* 100 ms */
889 while (timeout) {
890 u32 cp = chan_read(chan, cp);
891 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
892 break;
893 udelay(10);
894 timeout--;
895 }
896 WARN_ON(!timeout);
897 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
898
899 /* handle completed packets */
900 spin_unlock_irqrestore(&chan->lock, flags);
901 do {
902 ret = __cpdma_chan_process(chan);
903 if (ret < 0)
904 break;
905 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
906 spin_lock_irqsave(&chan->lock, flags);
907
908 /* remaining packets haven't been tx/rx'ed, clean them up */
909 while (chan->head) {
910 struct cpdma_desc __iomem *desc = chan->head;
911 dma_addr_t next_dma;
912
913 next_dma = desc_read(desc, hw_next);
914 chan->head = desc_from_phys(pool, next_dma);
915 chan->count--;
916 chan->stats.teardown_dequeue++;
917
918 /* issue callback without locks held */
919 spin_unlock_irqrestore(&chan->lock, flags);
920 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
921 spin_lock_irqsave(&chan->lock, flags);
922 }
923
924 chan->state = CPDMA_STATE_IDLE;
925 spin_unlock_irqrestore(&chan->lock, flags);
926 return 0;
927}
928EXPORT_SYMBOL_GPL(cpdma_chan_stop);
929
930int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
931{
932 unsigned long flags;
933
934 spin_lock_irqsave(&chan->lock, flags);
935 if (chan->state != CPDMA_STATE_ACTIVE) {
936 spin_unlock_irqrestore(&chan->lock, flags);
937 return -EINVAL;
938 }
939
940 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
941 chan->mask);
942 spin_unlock_irqrestore(&chan->lock, flags);
943
944 return 0;
945}
946
947struct cpdma_control_info {
948 u32 reg;
949 u32 shift, mask;
950 int access;
951#define ACCESS_RO BIT(0)
952#define ACCESS_WO BIT(1)
953#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
954};
955
956static struct cpdma_control_info controls[] = {
957 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
958 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
959 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
960 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
961 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
962 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
963 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
964 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
965 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
966 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
967 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
968};
969
970int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
971{
972 unsigned long flags;
973 struct cpdma_control_info *info = &controls[control];
974 int ret;
975
976 spin_lock_irqsave(&ctlr->lock, flags);
977
978 ret = -ENOTSUPP;
979 if (!ctlr->params.has_ext_regs)
980 goto unlock_ret;
981
982 ret = -EINVAL;
983 if (ctlr->state != CPDMA_STATE_ACTIVE)
984 goto unlock_ret;
985
986 ret = -ENOENT;
987 if (control < 0 || control >= ARRAY_SIZE(controls))
988 goto unlock_ret;
989
990 ret = -EPERM;
991 if ((info->access & ACCESS_RO) != ACCESS_RO)
992 goto unlock_ret;
993
994 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
995
996unlock_ret:
997 spin_unlock_irqrestore(&ctlr->lock, flags);
998 return ret;
999}
1000
1001int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1002{
1003 unsigned long flags;
1004 struct cpdma_control_info *info = &controls[control];
1005 int ret;
1006 u32 val;
1007
1008 spin_lock_irqsave(&ctlr->lock, flags);
1009
1010 ret = -ENOTSUPP;
1011 if (!ctlr->params.has_ext_regs)
1012 goto unlock_ret;
1013
1014 ret = -EINVAL;
1015 if (ctlr->state != CPDMA_STATE_ACTIVE)
1016 goto unlock_ret;
1017
1018 ret = -ENOENT;
1019 if (control < 0 || control >= ARRAY_SIZE(controls))
1020 goto unlock_ret;
1021
1022 ret = -EPERM;
1023 if ((info->access & ACCESS_WO) != ACCESS_WO)
1024 goto unlock_ret;
1025
1026 val = dma_reg_read(ctlr, info->reg);
1027 val &= ~(info->mask << info->shift);
1028 val |= (value & info->mask) << info->shift;
1029 dma_reg_write(ctlr, info->reg, val);
1030 ret = 0;
1031
1032unlock_ret:
1033 spin_unlock_irqrestore(&ctlr->lock, flags);
1034 return ret;
1035}
1036EXPORT_SYMBOL_GPL(cpdma_control_set);
1037
1038MODULE_LICENSE("GPL");