Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
5 *
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 */
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
20#include <linux/ktime.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/prandom.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/card.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/sd.h>
34#include <linux/mmc/sdio.h>
35#include <linux/bitops.h>
36#include <linux/regulator/consumer.h>
37#include <linux/of.h>
38#include <linux/of_gpio.h>
39#include <linux/mmc/slot-gpio.h>
40
41#include "dw_mmc.h"
42
43/* Common flag combinations */
44#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
45 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE | SDMMC_INT_HLE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
55#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
57
58#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 SDMMC_IDMAC_INT_TI)
62
63#define DESC_RING_BUF_SZ PAGE_SIZE
64
65struct idmac_desc_64addr {
66 u32 des0; /* Control Descriptor */
67#define IDMAC_OWN_CLR64(x) \
68 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
69
70 u32 des1; /* Reserved */
71
72 u32 des2; /*Buffer sizes */
73#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
74 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
75 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
76
77 u32 des3; /* Reserved */
78
79 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
80 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
81
82 u32 des6; /* Lower 32-bits of Next Descriptor Address */
83 u32 des7; /* Upper 32-bits of Next Descriptor Address */
84};
85
86struct idmac_desc {
87 __le32 des0; /* Control Descriptor */
88#define IDMAC_DES0_DIC BIT(1)
89#define IDMAC_DES0_LD BIT(2)
90#define IDMAC_DES0_FD BIT(3)
91#define IDMAC_DES0_CH BIT(4)
92#define IDMAC_DES0_ER BIT(5)
93#define IDMAC_DES0_CES BIT(30)
94#define IDMAC_DES0_OWN BIT(31)
95
96 __le32 des1; /* Buffer sizes */
97#define IDMAC_SET_BUFFER1_SIZE(d, s) \
98 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
99
100 __le32 des2; /* buffer 1 physical address */
101
102 __le32 des3; /* buffer 2 physical address */
103};
104
105/* Each descriptor can transfer up to 4KB of data in chained mode */
106#define DW_MCI_DESC_DATA_LENGTH 0x1000
107
108#if defined(CONFIG_DEBUG_FS)
109static int dw_mci_req_show(struct seq_file *s, void *v)
110{
111 struct dw_mci_slot *slot = s->private;
112 struct mmc_request *mrq;
113 struct mmc_command *cmd;
114 struct mmc_command *stop;
115 struct mmc_data *data;
116
117 /* Make sure we get a consistent snapshot */
118 spin_lock_bh(&slot->host->lock);
119 mrq = slot->mrq;
120
121 if (mrq) {
122 cmd = mrq->cmd;
123 data = mrq->data;
124 stop = mrq->stop;
125
126 if (cmd)
127 seq_printf(s,
128 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
129 cmd->opcode, cmd->arg, cmd->flags,
130 cmd->resp[0], cmd->resp[1], cmd->resp[2],
131 cmd->resp[2], cmd->error);
132 if (data)
133 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
134 data->bytes_xfered, data->blocks,
135 data->blksz, data->flags, data->error);
136 if (stop)
137 seq_printf(s,
138 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
139 stop->opcode, stop->arg, stop->flags,
140 stop->resp[0], stop->resp[1], stop->resp[2],
141 stop->resp[2], stop->error);
142 }
143
144 spin_unlock_bh(&slot->host->lock);
145
146 return 0;
147}
148DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
149
150static int dw_mci_regs_show(struct seq_file *s, void *v)
151{
152 struct dw_mci *host = s->private;
153
154 pm_runtime_get_sync(host->dev);
155
156 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
157 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
158 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
159 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
160 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
161 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
162
163 pm_runtime_put_autosuspend(host->dev);
164
165 return 0;
166}
167DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
168
169static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
170{
171 struct mmc_host *mmc = slot->mmc;
172 struct dw_mci *host = slot->host;
173 struct dentry *root;
174
175 root = mmc->debugfs_root;
176 if (!root)
177 return;
178
179 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
180 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
181 debugfs_create_u32("state", S_IRUSR, root, &host->state);
182 debugfs_create_xul("pending_events", S_IRUSR, root,
183 &host->pending_events);
184 debugfs_create_xul("completed_events", S_IRUSR, root,
185 &host->completed_events);
186#ifdef CONFIG_FAULT_INJECTION
187 fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
188#endif
189}
190#endif /* defined(CONFIG_DEBUG_FS) */
191
192static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
193{
194 u32 ctrl;
195
196 ctrl = mci_readl(host, CTRL);
197 ctrl |= reset;
198 mci_writel(host, CTRL, ctrl);
199
200 /* wait till resets clear */
201 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
202 !(ctrl & reset),
203 1, 500 * USEC_PER_MSEC)) {
204 dev_err(host->dev,
205 "Timeout resetting block (ctrl reset %#x)\n",
206 ctrl & reset);
207 return false;
208 }
209
210 return true;
211}
212
213static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
214{
215 u32 status;
216
217 /*
218 * Databook says that before issuing a new data transfer command
219 * we need to check to see if the card is busy. Data transfer commands
220 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
221 *
222 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
223 * expected.
224 */
225 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
226 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
227 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
228 status,
229 !(status & SDMMC_STATUS_BUSY),
230 10, 500 * USEC_PER_MSEC))
231 dev_err(host->dev, "Busy; trying anyway\n");
232 }
233}
234
235static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
236{
237 struct dw_mci *host = slot->host;
238 unsigned int cmd_status = 0;
239
240 mci_writel(host, CMDARG, arg);
241 wmb(); /* drain writebuffer */
242 dw_mci_wait_while_busy(host, cmd);
243 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
244
245 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
246 !(cmd_status & SDMMC_CMD_START),
247 1, 500 * USEC_PER_MSEC))
248 dev_err(&slot->mmc->class_dev,
249 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
250 cmd, arg, cmd_status);
251}
252
253static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
254{
255 struct dw_mci_slot *slot = mmc_priv(mmc);
256 struct dw_mci *host = slot->host;
257 u32 cmdr;
258
259 cmd->error = -EINPROGRESS;
260 cmdr = cmd->opcode;
261
262 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
263 cmd->opcode == MMC_GO_IDLE_STATE ||
264 cmd->opcode == MMC_GO_INACTIVE_STATE ||
265 (cmd->opcode == SD_IO_RW_DIRECT &&
266 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
267 cmdr |= SDMMC_CMD_STOP;
268 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
269 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
270
271 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
272 u32 clk_en_a;
273
274 /* Special bit makes CMD11 not die */
275 cmdr |= SDMMC_CMD_VOLT_SWITCH;
276
277 /* Change state to continue to handle CMD11 weirdness */
278 WARN_ON(slot->host->state != STATE_SENDING_CMD);
279 slot->host->state = STATE_SENDING_CMD11;
280
281 /*
282 * We need to disable low power mode (automatic clock stop)
283 * while doing voltage switch so we don't confuse the card,
284 * since stopping the clock is a specific part of the UHS
285 * voltage change dance.
286 *
287 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
288 * unconditionally turned back on in dw_mci_setup_bus() if it's
289 * ever called with a non-zero clock. That shouldn't happen
290 * until the voltage change is all done.
291 */
292 clk_en_a = mci_readl(host, CLKENA);
293 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
294 mci_writel(host, CLKENA, clk_en_a);
295 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
296 SDMMC_CMD_PRV_DAT_WAIT, 0);
297 }
298
299 if (cmd->flags & MMC_RSP_PRESENT) {
300 /* We expect a response, so set this bit */
301 cmdr |= SDMMC_CMD_RESP_EXP;
302 if (cmd->flags & MMC_RSP_136)
303 cmdr |= SDMMC_CMD_RESP_LONG;
304 }
305
306 if (cmd->flags & MMC_RSP_CRC)
307 cmdr |= SDMMC_CMD_RESP_CRC;
308
309 if (cmd->data) {
310 cmdr |= SDMMC_CMD_DAT_EXP;
311 if (cmd->data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
313 }
314
315 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
316 cmdr |= SDMMC_CMD_USE_HOLD_REG;
317
318 return cmdr;
319}
320
321static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
322{
323 struct mmc_command *stop;
324 u32 cmdr;
325
326 if (!cmd->data)
327 return 0;
328
329 stop = &host->stop_abort;
330 cmdr = cmd->opcode;
331 memset(stop, 0, sizeof(struct mmc_command));
332
333 if (cmdr == MMC_READ_SINGLE_BLOCK ||
334 cmdr == MMC_READ_MULTIPLE_BLOCK ||
335 cmdr == MMC_WRITE_BLOCK ||
336 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
337 mmc_op_tuning(cmdr) ||
338 cmdr == MMC_GEN_CMD) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->arg = 0;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
347 } else {
348 return 0;
349 }
350
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
353
354 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
355 cmdr |= SDMMC_CMD_USE_HOLD_REG;
356
357 return cmdr;
358}
359
360static inline void dw_mci_set_cto(struct dw_mci *host)
361{
362 unsigned int cto_clks;
363 unsigned int cto_div;
364 unsigned int cto_ms;
365 unsigned long irqflags;
366
367 cto_clks = mci_readl(host, TMOUT) & 0xff;
368 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
369 if (cto_div == 0)
370 cto_div = 1;
371
372 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
373 host->bus_hz);
374
375 /* add a bit spare time */
376 cto_ms += 10;
377
378 /*
379 * The durations we're working with are fairly short so we have to be
380 * extra careful about synchronization here. Specifically in hardware a
381 * command timeout is _at most_ 5.1 ms, so that means we expect an
382 * interrupt (either command done or timeout) to come rather quickly
383 * after the mci_writel. ...but just in case we have a long interrupt
384 * latency let's add a bit of paranoia.
385 *
386 * In general we'll assume that at least an interrupt will be asserted
387 * in hardware by the time the cto_timer runs. ...and if it hasn't
388 * been asserted in hardware by that time then we'll assume it'll never
389 * come.
390 */
391 spin_lock_irqsave(&host->irq_lock, irqflags);
392 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
393 mod_timer(&host->cto_timer,
394 jiffies + msecs_to_jiffies(cto_ms) + 1);
395 spin_unlock_irqrestore(&host->irq_lock, irqflags);
396}
397
398static void dw_mci_start_command(struct dw_mci *host,
399 struct mmc_command *cmd, u32 cmd_flags)
400{
401 host->cmd = cmd;
402 dev_vdbg(host->dev,
403 "start command: ARGR=0x%08x CMDR=0x%08x\n",
404 cmd->arg, cmd_flags);
405
406 mci_writel(host, CMDARG, cmd->arg);
407 wmb(); /* drain writebuffer */
408 dw_mci_wait_while_busy(host, cmd_flags);
409
410 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
411
412 /* response expected command only */
413 if (cmd_flags & SDMMC_CMD_RESP_EXP)
414 dw_mci_set_cto(host);
415}
416
417static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
418{
419 struct mmc_command *stop = &host->stop_abort;
420
421 dw_mci_start_command(host, stop, host->stop_cmdr);
422}
423
424/* DMA interface functions */
425static void dw_mci_stop_dma(struct dw_mci *host)
426{
427 if (host->using_dma) {
428 host->dma_ops->stop(host);
429 host->dma_ops->cleanup(host);
430 }
431
432 /* Data transfer was stopped by the interrupt handler */
433 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
434}
435
436static void dw_mci_dma_cleanup(struct dw_mci *host)
437{
438 struct mmc_data *data = host->data;
439
440 if (data && data->host_cookie == COOKIE_MAPPED) {
441 dma_unmap_sg(host->dev,
442 data->sg,
443 data->sg_len,
444 mmc_get_dma_dir(data));
445 data->host_cookie = COOKIE_UNMAPPED;
446 }
447}
448
449static void dw_mci_idmac_reset(struct dw_mci *host)
450{
451 u32 bmod = mci_readl(host, BMOD);
452 /* Software reset of DMA */
453 bmod |= SDMMC_IDMAC_SWRESET;
454 mci_writel(host, BMOD, bmod);
455}
456
457static void dw_mci_idmac_stop_dma(struct dw_mci *host)
458{
459 u32 temp;
460
461 /* Disable and reset the IDMAC interface */
462 temp = mci_readl(host, CTRL);
463 temp &= ~SDMMC_CTRL_USE_IDMAC;
464 temp |= SDMMC_CTRL_DMA_RESET;
465 mci_writel(host, CTRL, temp);
466
467 /* Stop the IDMAC running */
468 temp = mci_readl(host, BMOD);
469 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
470 temp |= SDMMC_IDMAC_SWRESET;
471 mci_writel(host, BMOD, temp);
472}
473
474static void dw_mci_dmac_complete_dma(void *arg)
475{
476 struct dw_mci *host = arg;
477 struct mmc_data *data = host->data;
478
479 dev_vdbg(host->dev, "DMA complete\n");
480
481 if ((host->use_dma == TRANS_MODE_EDMAC) &&
482 data && (data->flags & MMC_DATA_READ))
483 /* Invalidate cache after read */
484 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
485 data->sg,
486 data->sg_len,
487 DMA_FROM_DEVICE);
488
489 host->dma_ops->cleanup(host);
490
491 /*
492 * If the card was removed, data will be NULL. No point in trying to
493 * send the stop command or waiting for NBUSY in this case.
494 */
495 if (data) {
496 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
497 tasklet_schedule(&host->tasklet);
498 }
499}
500
501static int dw_mci_idmac_init(struct dw_mci *host)
502{
503 int i;
504
505 if (host->dma_64bit_address == 1) {
506 struct idmac_desc_64addr *p;
507 /* Number of descriptors in the ring buffer */
508 host->ring_size =
509 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
510
511 /* Forward link the descriptor list */
512 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
513 i++, p++) {
514 p->des6 = (host->sg_dma +
515 (sizeof(struct idmac_desc_64addr) *
516 (i + 1))) & 0xffffffff;
517
518 p->des7 = (u64)(host->sg_dma +
519 (sizeof(struct idmac_desc_64addr) *
520 (i + 1))) >> 32;
521 /* Initialize reserved and buffer size fields to "0" */
522 p->des0 = 0;
523 p->des1 = 0;
524 p->des2 = 0;
525 p->des3 = 0;
526 }
527
528 /* Set the last descriptor as the end-of-ring descriptor */
529 p->des6 = host->sg_dma & 0xffffffff;
530 p->des7 = (u64)host->sg_dma >> 32;
531 p->des0 = IDMAC_DES0_ER;
532
533 } else {
534 struct idmac_desc *p;
535 /* Number of descriptors in the ring buffer */
536 host->ring_size =
537 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
538
539 /* Forward link the descriptor list */
540 for (i = 0, p = host->sg_cpu;
541 i < host->ring_size - 1;
542 i++, p++) {
543 p->des3 = cpu_to_le32(host->sg_dma +
544 (sizeof(struct idmac_desc) * (i + 1)));
545 p->des0 = 0;
546 p->des1 = 0;
547 }
548
549 /* Set the last descriptor as the end-of-ring descriptor */
550 p->des3 = cpu_to_le32(host->sg_dma);
551 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
552 }
553
554 dw_mci_idmac_reset(host);
555
556 if (host->dma_64bit_address == 1) {
557 /* Mask out interrupts - get Tx & Rx complete only */
558 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
559 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
560 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
561
562 /* Set the descriptor base address */
563 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
564 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
565
566 } else {
567 /* Mask out interrupts - get Tx & Rx complete only */
568 mci_writel(host, IDSTS, IDMAC_INT_CLR);
569 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
570 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
571
572 /* Set the descriptor base address */
573 mci_writel(host, DBADDR, host->sg_dma);
574 }
575
576 return 0;
577}
578
579static inline int dw_mci_prepare_desc64(struct dw_mci *host,
580 struct mmc_data *data,
581 unsigned int sg_len)
582{
583 unsigned int desc_len;
584 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
585 u32 val;
586 int i;
587
588 desc_first = desc_last = desc = host->sg_cpu;
589
590 for (i = 0; i < sg_len; i++) {
591 unsigned int length = sg_dma_len(&data->sg[i]);
592
593 u64 mem_addr = sg_dma_address(&data->sg[i]);
594
595 for ( ; length ; desc++) {
596 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
597 length : DW_MCI_DESC_DATA_LENGTH;
598
599 length -= desc_len;
600
601 /*
602 * Wait for the former clear OWN bit operation
603 * of IDMAC to make sure that this descriptor
604 * isn't still owned by IDMAC as IDMAC's write
605 * ops and CPU's read ops are asynchronous.
606 */
607 if (readl_poll_timeout_atomic(&desc->des0, val,
608 !(val & IDMAC_DES0_OWN),
609 10, 100 * USEC_PER_MSEC))
610 goto err_own_bit;
611
612 /*
613 * Set the OWN bit and disable interrupts
614 * for this descriptor
615 */
616 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
617 IDMAC_DES0_CH;
618
619 /* Buffer length */
620 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
621
622 /* Physical address to DMA to/from */
623 desc->des4 = mem_addr & 0xffffffff;
624 desc->des5 = mem_addr >> 32;
625
626 /* Update physical address for the next desc */
627 mem_addr += desc_len;
628
629 /* Save pointer to the last descriptor */
630 desc_last = desc;
631 }
632 }
633
634 /* Set first descriptor */
635 desc_first->des0 |= IDMAC_DES0_FD;
636
637 /* Set last descriptor */
638 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
639 desc_last->des0 |= IDMAC_DES0_LD;
640
641 return 0;
642err_own_bit:
643 /* restore the descriptor chain as it's polluted */
644 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
645 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
646 dw_mci_idmac_init(host);
647 return -EINVAL;
648}
649
650
651static inline int dw_mci_prepare_desc32(struct dw_mci *host,
652 struct mmc_data *data,
653 unsigned int sg_len)
654{
655 unsigned int desc_len;
656 struct idmac_desc *desc_first, *desc_last, *desc;
657 u32 val;
658 int i;
659
660 desc_first = desc_last = desc = host->sg_cpu;
661
662 for (i = 0; i < sg_len; i++) {
663 unsigned int length = sg_dma_len(&data->sg[i]);
664
665 u32 mem_addr = sg_dma_address(&data->sg[i]);
666
667 for ( ; length ; desc++) {
668 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
669 length : DW_MCI_DESC_DATA_LENGTH;
670
671 length -= desc_len;
672
673 /*
674 * Wait for the former clear OWN bit operation
675 * of IDMAC to make sure that this descriptor
676 * isn't still owned by IDMAC as IDMAC's write
677 * ops and CPU's read ops are asynchronous.
678 */
679 if (readl_poll_timeout_atomic(&desc->des0, val,
680 IDMAC_OWN_CLR64(val),
681 10,
682 100 * USEC_PER_MSEC))
683 goto err_own_bit;
684
685 /*
686 * Set the OWN bit and disable interrupts
687 * for this descriptor
688 */
689 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
690 IDMAC_DES0_DIC |
691 IDMAC_DES0_CH);
692
693 /* Buffer length */
694 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
695
696 /* Physical address to DMA to/from */
697 desc->des2 = cpu_to_le32(mem_addr);
698
699 /* Update physical address for the next desc */
700 mem_addr += desc_len;
701
702 /* Save pointer to the last descriptor */
703 desc_last = desc;
704 }
705 }
706
707 /* Set first descriptor */
708 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
709
710 /* Set last descriptor */
711 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
712 IDMAC_DES0_DIC));
713 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
714
715 return 0;
716err_own_bit:
717 /* restore the descriptor chain as it's polluted */
718 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
719 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
720 dw_mci_idmac_init(host);
721 return -EINVAL;
722}
723
724static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
725{
726 u32 temp;
727 int ret;
728
729 if (host->dma_64bit_address == 1)
730 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
731 else
732 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
733
734 if (ret)
735 goto out;
736
737 /* drain writebuffer */
738 wmb();
739
740 /* Make sure to reset DMA in case we did PIO before this */
741 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
742 dw_mci_idmac_reset(host);
743
744 /* Select IDMAC interface */
745 temp = mci_readl(host, CTRL);
746 temp |= SDMMC_CTRL_USE_IDMAC;
747 mci_writel(host, CTRL, temp);
748
749 /* drain writebuffer */
750 wmb();
751
752 /* Enable the IDMAC */
753 temp = mci_readl(host, BMOD);
754 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
755 mci_writel(host, BMOD, temp);
756
757 /* Start it running */
758 mci_writel(host, PLDMND, 1);
759
760out:
761 return ret;
762}
763
764static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
765 .init = dw_mci_idmac_init,
766 .start = dw_mci_idmac_start_dma,
767 .stop = dw_mci_idmac_stop_dma,
768 .complete = dw_mci_dmac_complete_dma,
769 .cleanup = dw_mci_dma_cleanup,
770};
771
772static void dw_mci_edmac_stop_dma(struct dw_mci *host)
773{
774 dmaengine_terminate_async(host->dms->ch);
775}
776
777static int dw_mci_edmac_start_dma(struct dw_mci *host,
778 unsigned int sg_len)
779{
780 struct dma_slave_config cfg;
781 struct dma_async_tx_descriptor *desc = NULL;
782 struct scatterlist *sgl = host->data->sg;
783 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
784 u32 sg_elems = host->data->sg_len;
785 u32 fifoth_val;
786 u32 fifo_offset = host->fifo_reg - host->regs;
787 int ret = 0;
788
789 /* Set external dma config: burst size, burst width */
790 memset(&cfg, 0, sizeof(cfg));
791 cfg.dst_addr = host->phy_regs + fifo_offset;
792 cfg.src_addr = cfg.dst_addr;
793 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
794 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
795
796 /* Match burst msize with external dma config */
797 fifoth_val = mci_readl(host, FIFOTH);
798 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
799 cfg.src_maxburst = cfg.dst_maxburst;
800
801 if (host->data->flags & MMC_DATA_WRITE)
802 cfg.direction = DMA_MEM_TO_DEV;
803 else
804 cfg.direction = DMA_DEV_TO_MEM;
805
806 ret = dmaengine_slave_config(host->dms->ch, &cfg);
807 if (ret) {
808 dev_err(host->dev, "Failed to config edmac.\n");
809 return -EBUSY;
810 }
811
812 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
813 sg_len, cfg.direction,
814 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
815 if (!desc) {
816 dev_err(host->dev, "Can't prepare slave sg.\n");
817 return -EBUSY;
818 }
819
820 /* Set dw_mci_dmac_complete_dma as callback */
821 desc->callback = dw_mci_dmac_complete_dma;
822 desc->callback_param = (void *)host;
823 dmaengine_submit(desc);
824
825 /* Flush cache before write */
826 if (host->data->flags & MMC_DATA_WRITE)
827 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
828 sg_elems, DMA_TO_DEVICE);
829
830 dma_async_issue_pending(host->dms->ch);
831
832 return 0;
833}
834
835static int dw_mci_edmac_init(struct dw_mci *host)
836{
837 /* Request external dma channel */
838 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
839 if (!host->dms)
840 return -ENOMEM;
841
842 host->dms->ch = dma_request_chan(host->dev, "rx-tx");
843 if (IS_ERR(host->dms->ch)) {
844 int ret = PTR_ERR(host->dms->ch);
845
846 dev_err(host->dev, "Failed to get external DMA channel.\n");
847 kfree(host->dms);
848 host->dms = NULL;
849 return ret;
850 }
851
852 return 0;
853}
854
855static void dw_mci_edmac_exit(struct dw_mci *host)
856{
857 if (host->dms) {
858 if (host->dms->ch) {
859 dma_release_channel(host->dms->ch);
860 host->dms->ch = NULL;
861 }
862 kfree(host->dms);
863 host->dms = NULL;
864 }
865}
866
867static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
868 .init = dw_mci_edmac_init,
869 .exit = dw_mci_edmac_exit,
870 .start = dw_mci_edmac_start_dma,
871 .stop = dw_mci_edmac_stop_dma,
872 .complete = dw_mci_dmac_complete_dma,
873 .cleanup = dw_mci_dma_cleanup,
874};
875
876static int dw_mci_pre_dma_transfer(struct dw_mci *host,
877 struct mmc_data *data,
878 int cookie)
879{
880 struct scatterlist *sg;
881 unsigned int i, sg_len;
882
883 if (data->host_cookie == COOKIE_PRE_MAPPED)
884 return data->sg_len;
885
886 /*
887 * We don't do DMA on "complex" transfers, i.e. with
888 * non-word-aligned buffers or lengths. Also, we don't bother
889 * with all the DMA setup overhead for short transfers.
890 */
891 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
892 return -EINVAL;
893
894 if (data->blksz & 3)
895 return -EINVAL;
896
897 for_each_sg(data->sg, sg, data->sg_len, i) {
898 if (sg->offset & 3 || sg->length & 3)
899 return -EINVAL;
900 }
901
902 sg_len = dma_map_sg(host->dev,
903 data->sg,
904 data->sg_len,
905 mmc_get_dma_dir(data));
906 if (sg_len == 0)
907 return -EINVAL;
908
909 data->host_cookie = cookie;
910
911 return sg_len;
912}
913
914static void dw_mci_pre_req(struct mmc_host *mmc,
915 struct mmc_request *mrq)
916{
917 struct dw_mci_slot *slot = mmc_priv(mmc);
918 struct mmc_data *data = mrq->data;
919
920 if (!slot->host->use_dma || !data)
921 return;
922
923 /* This data might be unmapped at this time */
924 data->host_cookie = COOKIE_UNMAPPED;
925
926 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
927 COOKIE_PRE_MAPPED) < 0)
928 data->host_cookie = COOKIE_UNMAPPED;
929}
930
931static void dw_mci_post_req(struct mmc_host *mmc,
932 struct mmc_request *mrq,
933 int err)
934{
935 struct dw_mci_slot *slot = mmc_priv(mmc);
936 struct mmc_data *data = mrq->data;
937
938 if (!slot->host->use_dma || !data)
939 return;
940
941 if (data->host_cookie != COOKIE_UNMAPPED)
942 dma_unmap_sg(slot->host->dev,
943 data->sg,
944 data->sg_len,
945 mmc_get_dma_dir(data));
946 data->host_cookie = COOKIE_UNMAPPED;
947}
948
949static int dw_mci_get_cd(struct mmc_host *mmc)
950{
951 int present;
952 struct dw_mci_slot *slot = mmc_priv(mmc);
953 struct dw_mci *host = slot->host;
954 int gpio_cd = mmc_gpio_get_cd(mmc);
955
956 /* Use platform get_cd function, else try onboard card detect */
957 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
958 || !mmc_card_is_removable(mmc))) {
959 present = 1;
960
961 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
962 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
963 dev_info(&mmc->class_dev,
964 "card is polling.\n");
965 } else {
966 dev_info(&mmc->class_dev,
967 "card is non-removable.\n");
968 }
969 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
970 }
971
972 return present;
973 } else if (gpio_cd >= 0)
974 present = gpio_cd;
975 else
976 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
977 == 0 ? 1 : 0;
978
979 spin_lock_bh(&host->lock);
980 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
981 dev_dbg(&mmc->class_dev, "card is present\n");
982 else if (!present &&
983 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
984 dev_dbg(&mmc->class_dev, "card is not present\n");
985 spin_unlock_bh(&host->lock);
986
987 return present;
988}
989
990static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
991{
992 unsigned int blksz = data->blksz;
993 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
994 u32 fifo_width = 1 << host->data_shift;
995 u32 blksz_depth = blksz / fifo_width, fifoth_val;
996 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
997 int idx = ARRAY_SIZE(mszs) - 1;
998
999 /* pio should ship this scenario */
1000 if (!host->use_dma)
1001 return;
1002
1003 tx_wmark = (host->fifo_depth) / 2;
1004 tx_wmark_invers = host->fifo_depth - tx_wmark;
1005
1006 /*
1007 * MSIZE is '1',
1008 * if blksz is not a multiple of the FIFO width
1009 */
1010 if (blksz % fifo_width)
1011 goto done;
1012
1013 do {
1014 if (!((blksz_depth % mszs[idx]) ||
1015 (tx_wmark_invers % mszs[idx]))) {
1016 msize = idx;
1017 rx_wmark = mszs[idx] - 1;
1018 break;
1019 }
1020 } while (--idx > 0);
1021 /*
1022 * If idx is '0', it won't be tried
1023 * Thus, initial values are uesed
1024 */
1025done:
1026 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1027 mci_writel(host, FIFOTH, fifoth_val);
1028}
1029
1030static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1031{
1032 unsigned int blksz = data->blksz;
1033 u32 blksz_depth, fifo_depth;
1034 u16 thld_size;
1035 u8 enable;
1036
1037 /*
1038 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1039 * in the FIFO region, so we really shouldn't access it).
1040 */
1041 if (host->verid < DW_MMC_240A ||
1042 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1043 return;
1044
1045 /*
1046 * Card write Threshold is introduced since 2.80a
1047 * It's used when HS400 mode is enabled.
1048 */
1049 if (data->flags & MMC_DATA_WRITE &&
1050 host->timing != MMC_TIMING_MMC_HS400)
1051 goto disable;
1052
1053 if (data->flags & MMC_DATA_WRITE)
1054 enable = SDMMC_CARD_WR_THR_EN;
1055 else
1056 enable = SDMMC_CARD_RD_THR_EN;
1057
1058 if (host->timing != MMC_TIMING_MMC_HS200 &&
1059 host->timing != MMC_TIMING_UHS_SDR104 &&
1060 host->timing != MMC_TIMING_MMC_HS400)
1061 goto disable;
1062
1063 blksz_depth = blksz / (1 << host->data_shift);
1064 fifo_depth = host->fifo_depth;
1065
1066 if (blksz_depth > fifo_depth)
1067 goto disable;
1068
1069 /*
1070 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1071 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1072 * Currently just choose blksz.
1073 */
1074 thld_size = blksz;
1075 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1076 return;
1077
1078disable:
1079 mci_writel(host, CDTHRCTL, 0);
1080}
1081
1082static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1083{
1084 unsigned long irqflags;
1085 int sg_len;
1086 u32 temp;
1087
1088 host->using_dma = 0;
1089
1090 /* If we don't have a channel, we can't do DMA */
1091 if (!host->use_dma)
1092 return -ENODEV;
1093
1094 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1095 if (sg_len < 0) {
1096 host->dma_ops->stop(host);
1097 return sg_len;
1098 }
1099
1100 host->using_dma = 1;
1101
1102 if (host->use_dma == TRANS_MODE_IDMAC)
1103 dev_vdbg(host->dev,
1104 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1105 (unsigned long)host->sg_cpu,
1106 (unsigned long)host->sg_dma,
1107 sg_len);
1108
1109 /*
1110 * Decide the MSIZE and RX/TX Watermark.
1111 * If current block size is same with previous size,
1112 * no need to update fifoth.
1113 */
1114 if (host->prev_blksz != data->blksz)
1115 dw_mci_adjust_fifoth(host, data);
1116
1117 /* Enable the DMA interface */
1118 temp = mci_readl(host, CTRL);
1119 temp |= SDMMC_CTRL_DMA_ENABLE;
1120 mci_writel(host, CTRL, temp);
1121
1122 /* Disable RX/TX IRQs, let DMA handle it */
1123 spin_lock_irqsave(&host->irq_lock, irqflags);
1124 temp = mci_readl(host, INTMASK);
1125 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1126 mci_writel(host, INTMASK, temp);
1127 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1128
1129 if (host->dma_ops->start(host, sg_len)) {
1130 host->dma_ops->stop(host);
1131 /* We can't do DMA, try PIO for this one */
1132 dev_dbg(host->dev,
1133 "%s: fall back to PIO mode for current transfer\n",
1134 __func__);
1135 return -ENODEV;
1136 }
1137
1138 return 0;
1139}
1140
1141static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1142{
1143 unsigned long irqflags;
1144 int flags = SG_MITER_ATOMIC;
1145 u32 temp;
1146
1147 data->error = -EINPROGRESS;
1148
1149 WARN_ON(host->data);
1150 host->sg = NULL;
1151 host->data = data;
1152
1153 if (data->flags & MMC_DATA_READ)
1154 host->dir_status = DW_MCI_RECV_STATUS;
1155 else
1156 host->dir_status = DW_MCI_SEND_STATUS;
1157
1158 dw_mci_ctrl_thld(host, data);
1159
1160 if (dw_mci_submit_data_dma(host, data)) {
1161 if (host->data->flags & MMC_DATA_READ)
1162 flags |= SG_MITER_TO_SG;
1163 else
1164 flags |= SG_MITER_FROM_SG;
1165
1166 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1167 host->sg = data->sg;
1168 host->part_buf_start = 0;
1169 host->part_buf_count = 0;
1170
1171 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1172
1173 spin_lock_irqsave(&host->irq_lock, irqflags);
1174 temp = mci_readl(host, INTMASK);
1175 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1176 mci_writel(host, INTMASK, temp);
1177 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1178
1179 temp = mci_readl(host, CTRL);
1180 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1181 mci_writel(host, CTRL, temp);
1182
1183 /*
1184 * Use the initial fifoth_val for PIO mode. If wm_algined
1185 * is set, we set watermark same as data size.
1186 * If next issued data may be transfered by DMA mode,
1187 * prev_blksz should be invalidated.
1188 */
1189 if (host->wm_aligned)
1190 dw_mci_adjust_fifoth(host, data);
1191 else
1192 mci_writel(host, FIFOTH, host->fifoth_val);
1193 host->prev_blksz = 0;
1194 } else {
1195 /*
1196 * Keep the current block size.
1197 * It will be used to decide whether to update
1198 * fifoth register next time.
1199 */
1200 host->prev_blksz = data->blksz;
1201 }
1202}
1203
1204static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1205{
1206 struct dw_mci *host = slot->host;
1207 unsigned int clock = slot->clock;
1208 u32 div;
1209 u32 clk_en_a;
1210 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1211
1212 /* We must continue to set bit 28 in CMD until the change is complete */
1213 if (host->state == STATE_WAITING_CMD11_DONE)
1214 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1215
1216 slot->mmc->actual_clock = 0;
1217
1218 if (!clock) {
1219 mci_writel(host, CLKENA, 0);
1220 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1221 } else if (clock != host->current_speed || force_clkinit) {
1222 div = host->bus_hz / clock;
1223 if (host->bus_hz % clock && host->bus_hz > clock)
1224 /*
1225 * move the + 1 after the divide to prevent
1226 * over-clocking the card.
1227 */
1228 div += 1;
1229
1230 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1231
1232 if ((clock != slot->__clk_old &&
1233 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1234 force_clkinit) {
1235 /* Silent the verbose log if calling from PM context */
1236 if (!force_clkinit)
1237 dev_info(&slot->mmc->class_dev,
1238 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1239 slot->id, host->bus_hz, clock,
1240 div ? ((host->bus_hz / div) >> 1) :
1241 host->bus_hz, div);
1242
1243 /*
1244 * If card is polling, display the message only
1245 * one time at boot time.
1246 */
1247 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1248 slot->mmc->f_min == clock)
1249 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1250 }
1251
1252 /* disable clock */
1253 mci_writel(host, CLKENA, 0);
1254 mci_writel(host, CLKSRC, 0);
1255
1256 /* inform CIU */
1257 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1258
1259 /* set clock to desired speed */
1260 mci_writel(host, CLKDIV, div);
1261
1262 /* inform CIU */
1263 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1264
1265 /* enable clock; only low power if no SDIO */
1266 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1267 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1268 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1269 mci_writel(host, CLKENA, clk_en_a);
1270
1271 /* inform CIU */
1272 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1273
1274 /* keep the last clock value that was requested from core */
1275 slot->__clk_old = clock;
1276 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1277 host->bus_hz;
1278 }
1279
1280 host->current_speed = clock;
1281
1282 /* Set the current slot bus width */
1283 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1284}
1285
1286static void dw_mci_set_data_timeout(struct dw_mci *host,
1287 unsigned int timeout_ns)
1288{
1289 const struct dw_mci_drv_data *drv_data = host->drv_data;
1290 u32 clk_div, tmout;
1291 u64 tmp;
1292
1293 if (drv_data && drv_data->set_data_timeout)
1294 return drv_data->set_data_timeout(host, timeout_ns);
1295
1296 clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
1297 if (clk_div == 0)
1298 clk_div = 1;
1299
1300 tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
1301 tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
1302
1303 /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1304 tmout = 0xFF; /* Set maximum */
1305
1306 /* TMOUT[31:8] (DATA_TIMEOUT) */
1307 if (!tmp || tmp > 0xFFFFFF)
1308 tmout |= (0xFFFFFF << 8);
1309 else
1310 tmout |= (tmp & 0xFFFFFF) << 8;
1311
1312 mci_writel(host, TMOUT, tmout);
1313 dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1314 timeout_ns, tmout >> 8);
1315}
1316
1317static void __dw_mci_start_request(struct dw_mci *host,
1318 struct dw_mci_slot *slot,
1319 struct mmc_command *cmd)
1320{
1321 struct mmc_request *mrq;
1322 struct mmc_data *data;
1323 u32 cmdflags;
1324
1325 mrq = slot->mrq;
1326
1327 host->mrq = mrq;
1328
1329 host->pending_events = 0;
1330 host->completed_events = 0;
1331 host->cmd_status = 0;
1332 host->data_status = 0;
1333 host->dir_status = 0;
1334
1335 data = cmd->data;
1336 if (data) {
1337 dw_mci_set_data_timeout(host, data->timeout_ns);
1338 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1339 mci_writel(host, BLKSIZ, data->blksz);
1340 }
1341
1342 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1343
1344 /* this is the first command, send the initialization clock */
1345 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1346 cmdflags |= SDMMC_CMD_INIT;
1347
1348 if (data) {
1349 dw_mci_submit_data(host, data);
1350 wmb(); /* drain writebuffer */
1351 }
1352
1353 dw_mci_start_command(host, cmd, cmdflags);
1354
1355 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1356 unsigned long irqflags;
1357
1358 /*
1359 * Databook says to fail after 2ms w/ no response, but evidence
1360 * shows that sometimes the cmd11 interrupt takes over 130ms.
1361 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1362 * is just about to roll over.
1363 *
1364 * We do this whole thing under spinlock and only if the
1365 * command hasn't already completed (indicating the irq
1366 * already ran so we don't want the timeout).
1367 */
1368 spin_lock_irqsave(&host->irq_lock, irqflags);
1369 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1370 mod_timer(&host->cmd11_timer,
1371 jiffies + msecs_to_jiffies(500) + 1);
1372 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1373 }
1374
1375 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1376}
1377
1378static void dw_mci_start_request(struct dw_mci *host,
1379 struct dw_mci_slot *slot)
1380{
1381 struct mmc_request *mrq = slot->mrq;
1382 struct mmc_command *cmd;
1383
1384 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1385 __dw_mci_start_request(host, slot, cmd);
1386}
1387
1388/* must be called with host->lock held */
1389static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1390 struct mmc_request *mrq)
1391{
1392 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1393 host->state);
1394
1395 slot->mrq = mrq;
1396
1397 if (host->state == STATE_WAITING_CMD11_DONE) {
1398 dev_warn(&slot->mmc->class_dev,
1399 "Voltage change didn't complete\n");
1400 /*
1401 * this case isn't expected to happen, so we can
1402 * either crash here or just try to continue on
1403 * in the closest possible state
1404 */
1405 host->state = STATE_IDLE;
1406 }
1407
1408 if (host->state == STATE_IDLE) {
1409 host->state = STATE_SENDING_CMD;
1410 dw_mci_start_request(host, slot);
1411 } else {
1412 list_add_tail(&slot->queue_node, &host->queue);
1413 }
1414}
1415
1416static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1417{
1418 struct dw_mci_slot *slot = mmc_priv(mmc);
1419 struct dw_mci *host = slot->host;
1420
1421 WARN_ON(slot->mrq);
1422
1423 /*
1424 * The check for card presence and queueing of the request must be
1425 * atomic, otherwise the card could be removed in between and the
1426 * request wouldn't fail until another card was inserted.
1427 */
1428
1429 if (!dw_mci_get_cd(mmc)) {
1430 mrq->cmd->error = -ENOMEDIUM;
1431 mmc_request_done(mmc, mrq);
1432 return;
1433 }
1434
1435 spin_lock_bh(&host->lock);
1436
1437 dw_mci_queue_request(host, slot, mrq);
1438
1439 spin_unlock_bh(&host->lock);
1440}
1441
1442static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1443{
1444 struct dw_mci_slot *slot = mmc_priv(mmc);
1445 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1446 u32 regs;
1447 int ret;
1448
1449 switch (ios->bus_width) {
1450 case MMC_BUS_WIDTH_4:
1451 slot->ctype = SDMMC_CTYPE_4BIT;
1452 break;
1453 case MMC_BUS_WIDTH_8:
1454 slot->ctype = SDMMC_CTYPE_8BIT;
1455 break;
1456 default:
1457 /* set default 1 bit mode */
1458 slot->ctype = SDMMC_CTYPE_1BIT;
1459 }
1460
1461 regs = mci_readl(slot->host, UHS_REG);
1462
1463 /* DDR mode set */
1464 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1465 ios->timing == MMC_TIMING_UHS_DDR50 ||
1466 ios->timing == MMC_TIMING_MMC_HS400)
1467 regs |= ((0x1 << slot->id) << 16);
1468 else
1469 regs &= ~((0x1 << slot->id) << 16);
1470
1471 mci_writel(slot->host, UHS_REG, regs);
1472 slot->host->timing = ios->timing;
1473
1474 /*
1475 * Use mirror of ios->clock to prevent race with mmc
1476 * core ios update when finding the minimum.
1477 */
1478 slot->clock = ios->clock;
1479
1480 if (drv_data && drv_data->set_ios)
1481 drv_data->set_ios(slot->host, ios);
1482
1483 switch (ios->power_mode) {
1484 case MMC_POWER_UP:
1485 if (!IS_ERR(mmc->supply.vmmc)) {
1486 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1487 ios->vdd);
1488 if (ret) {
1489 dev_err(slot->host->dev,
1490 "failed to enable vmmc regulator\n");
1491 /*return, if failed turn on vmmc*/
1492 return;
1493 }
1494 }
1495 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1496 regs = mci_readl(slot->host, PWREN);
1497 regs |= (1 << slot->id);
1498 mci_writel(slot->host, PWREN, regs);
1499 break;
1500 case MMC_POWER_ON:
1501 if (!slot->host->vqmmc_enabled) {
1502 if (!IS_ERR(mmc->supply.vqmmc)) {
1503 ret = regulator_enable(mmc->supply.vqmmc);
1504 if (ret < 0)
1505 dev_err(slot->host->dev,
1506 "failed to enable vqmmc\n");
1507 else
1508 slot->host->vqmmc_enabled = true;
1509
1510 } else {
1511 /* Keep track so we don't reset again */
1512 slot->host->vqmmc_enabled = true;
1513 }
1514
1515 /* Reset our state machine after powering on */
1516 dw_mci_ctrl_reset(slot->host,
1517 SDMMC_CTRL_ALL_RESET_FLAGS);
1518 }
1519
1520 /* Adjust clock / bus width after power is up */
1521 dw_mci_setup_bus(slot, false);
1522
1523 break;
1524 case MMC_POWER_OFF:
1525 /* Turn clock off before power goes down */
1526 dw_mci_setup_bus(slot, false);
1527
1528 if (!IS_ERR(mmc->supply.vmmc))
1529 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1530
1531 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1532 regulator_disable(mmc->supply.vqmmc);
1533 slot->host->vqmmc_enabled = false;
1534
1535 regs = mci_readl(slot->host, PWREN);
1536 regs &= ~(1 << slot->id);
1537 mci_writel(slot->host, PWREN, regs);
1538 break;
1539 default:
1540 break;
1541 }
1542
1543 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1544 slot->host->state = STATE_IDLE;
1545}
1546
1547static int dw_mci_card_busy(struct mmc_host *mmc)
1548{
1549 struct dw_mci_slot *slot = mmc_priv(mmc);
1550 u32 status;
1551
1552 /*
1553 * Check the busy bit which is low when DAT[3:0]
1554 * (the data lines) are 0000
1555 */
1556 status = mci_readl(slot->host, STATUS);
1557
1558 return !!(status & SDMMC_STATUS_BUSY);
1559}
1560
1561static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1562{
1563 struct dw_mci_slot *slot = mmc_priv(mmc);
1564 struct dw_mci *host = slot->host;
1565 const struct dw_mci_drv_data *drv_data = host->drv_data;
1566 u32 uhs;
1567 u32 v18 = SDMMC_UHS_18V << slot->id;
1568 int ret;
1569
1570 if (drv_data && drv_data->switch_voltage)
1571 return drv_data->switch_voltage(mmc, ios);
1572
1573 /*
1574 * Program the voltage. Note that some instances of dw_mmc may use
1575 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1576 * does no harm but you need to set the regulator directly. Try both.
1577 */
1578 uhs = mci_readl(host, UHS_REG);
1579 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1580 uhs &= ~v18;
1581 else
1582 uhs |= v18;
1583
1584 if (!IS_ERR(mmc->supply.vqmmc)) {
1585 ret = mmc_regulator_set_vqmmc(mmc, ios);
1586 if (ret < 0) {
1587 dev_dbg(&mmc->class_dev,
1588 "Regulator set error %d - %s V\n",
1589 ret, uhs & v18 ? "1.8" : "3.3");
1590 return ret;
1591 }
1592 }
1593 mci_writel(host, UHS_REG, uhs);
1594
1595 return 0;
1596}
1597
1598static int dw_mci_get_ro(struct mmc_host *mmc)
1599{
1600 int read_only;
1601 struct dw_mci_slot *slot = mmc_priv(mmc);
1602 int gpio_ro = mmc_gpio_get_ro(mmc);
1603
1604 /* Use platform get_ro function, else try on board write protect */
1605 if (gpio_ro >= 0)
1606 read_only = gpio_ro;
1607 else
1608 read_only =
1609 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1610
1611 dev_dbg(&mmc->class_dev, "card is %s\n",
1612 read_only ? "read-only" : "read-write");
1613
1614 return read_only;
1615}
1616
1617static void dw_mci_hw_reset(struct mmc_host *mmc)
1618{
1619 struct dw_mci_slot *slot = mmc_priv(mmc);
1620 struct dw_mci *host = slot->host;
1621 int reset;
1622
1623 if (host->use_dma == TRANS_MODE_IDMAC)
1624 dw_mci_idmac_reset(host);
1625
1626 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1627 SDMMC_CTRL_FIFO_RESET))
1628 return;
1629
1630 /*
1631 * According to eMMC spec, card reset procedure:
1632 * tRstW >= 1us: RST_n pulse width
1633 * tRSCA >= 200us: RST_n to Command time
1634 * tRSTH >= 1us: RST_n high period
1635 */
1636 reset = mci_readl(host, RST_N);
1637 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1638 mci_writel(host, RST_N, reset);
1639 usleep_range(1, 2);
1640 reset |= SDMMC_RST_HWACTIVE << slot->id;
1641 mci_writel(host, RST_N, reset);
1642 usleep_range(200, 300);
1643}
1644
1645static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
1646{
1647 struct dw_mci *host = slot->host;
1648 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1649 u32 clk_en_a_old;
1650 u32 clk_en_a;
1651
1652 /*
1653 * Low power mode will stop the card clock when idle. According to the
1654 * description of the CLKENA register we should disable low power mode
1655 * for SDIO cards if we need SDIO interrupts to work.
1656 */
1657
1658 clk_en_a_old = mci_readl(host, CLKENA);
1659 if (prepare) {
1660 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1661 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1662 } else {
1663 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1664 clk_en_a = clk_en_a_old | clken_low_pwr;
1665 }
1666
1667 if (clk_en_a != clk_en_a_old) {
1668 mci_writel(host, CLKENA, clk_en_a);
1669 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
1670 0);
1671 }
1672}
1673
1674static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1675{
1676 struct dw_mci *host = slot->host;
1677 unsigned long irqflags;
1678 u32 int_mask;
1679
1680 spin_lock_irqsave(&host->irq_lock, irqflags);
1681
1682 /* Enable/disable Slot Specific SDIO interrupt */
1683 int_mask = mci_readl(host, INTMASK);
1684 if (enb)
1685 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1686 else
1687 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1688 mci_writel(host, INTMASK, int_mask);
1689
1690 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1691}
1692
1693static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1694{
1695 struct dw_mci_slot *slot = mmc_priv(mmc);
1696 struct dw_mci *host = slot->host;
1697
1698 dw_mci_prepare_sdio_irq(slot, enb);
1699 __dw_mci_enable_sdio_irq(slot, enb);
1700
1701 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1702 if (enb)
1703 pm_runtime_get_noresume(host->dev);
1704 else
1705 pm_runtime_put_noidle(host->dev);
1706}
1707
1708static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1709{
1710 struct dw_mci_slot *slot = mmc_priv(mmc);
1711
1712 __dw_mci_enable_sdio_irq(slot, 1);
1713}
1714
1715static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1716{
1717 struct dw_mci_slot *slot = mmc_priv(mmc);
1718 struct dw_mci *host = slot->host;
1719 const struct dw_mci_drv_data *drv_data = host->drv_data;
1720 int err = -EINVAL;
1721
1722 if (drv_data && drv_data->execute_tuning)
1723 err = drv_data->execute_tuning(slot, opcode);
1724 return err;
1725}
1726
1727static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1728 struct mmc_ios *ios)
1729{
1730 struct dw_mci_slot *slot = mmc_priv(mmc);
1731 struct dw_mci *host = slot->host;
1732 const struct dw_mci_drv_data *drv_data = host->drv_data;
1733
1734 if (drv_data && drv_data->prepare_hs400_tuning)
1735 return drv_data->prepare_hs400_tuning(host, ios);
1736
1737 return 0;
1738}
1739
1740static bool dw_mci_reset(struct dw_mci *host)
1741{
1742 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1743 bool ret = false;
1744 u32 status = 0;
1745
1746 /*
1747 * Resetting generates a block interrupt, hence setting
1748 * the scatter-gather pointer to NULL.
1749 */
1750 if (host->sg) {
1751 sg_miter_stop(&host->sg_miter);
1752 host->sg = NULL;
1753 }
1754
1755 if (host->use_dma)
1756 flags |= SDMMC_CTRL_DMA_RESET;
1757
1758 if (dw_mci_ctrl_reset(host, flags)) {
1759 /*
1760 * In all cases we clear the RAWINTS
1761 * register to clear any interrupts.
1762 */
1763 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1764
1765 if (!host->use_dma) {
1766 ret = true;
1767 goto ciu_out;
1768 }
1769
1770 /* Wait for dma_req to be cleared */
1771 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1772 status,
1773 !(status & SDMMC_STATUS_DMA_REQ),
1774 1, 500 * USEC_PER_MSEC)) {
1775 dev_err(host->dev,
1776 "%s: Timeout waiting for dma_req to be cleared\n",
1777 __func__);
1778 goto ciu_out;
1779 }
1780
1781 /* when using DMA next we reset the fifo again */
1782 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1783 goto ciu_out;
1784 } else {
1785 /* if the controller reset bit did clear, then set clock regs */
1786 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1787 dev_err(host->dev,
1788 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1789 __func__);
1790 goto ciu_out;
1791 }
1792 }
1793
1794 if (host->use_dma == TRANS_MODE_IDMAC)
1795 /* It is also required that we reinit idmac */
1796 dw_mci_idmac_init(host);
1797
1798 ret = true;
1799
1800ciu_out:
1801 /* After a CTRL reset we need to have CIU set clock registers */
1802 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1803
1804 return ret;
1805}
1806
1807static const struct mmc_host_ops dw_mci_ops = {
1808 .request = dw_mci_request,
1809 .pre_req = dw_mci_pre_req,
1810 .post_req = dw_mci_post_req,
1811 .set_ios = dw_mci_set_ios,
1812 .get_ro = dw_mci_get_ro,
1813 .get_cd = dw_mci_get_cd,
1814 .card_hw_reset = dw_mci_hw_reset,
1815 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1816 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1817 .execute_tuning = dw_mci_execute_tuning,
1818 .card_busy = dw_mci_card_busy,
1819 .start_signal_voltage_switch = dw_mci_switch_voltage,
1820 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1821};
1822
1823#ifdef CONFIG_FAULT_INJECTION
1824static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1825{
1826 struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1827 unsigned long flags;
1828
1829 spin_lock_irqsave(&host->irq_lock, flags);
1830
1831 /*
1832 * Only inject an error if we haven't already got an error or data over
1833 * interrupt.
1834 */
1835 if (!host->data_status) {
1836 host->data_status = SDMMC_INT_DCRC;
1837 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1838 tasklet_schedule(&host->tasklet);
1839 }
1840
1841 spin_unlock_irqrestore(&host->irq_lock, flags);
1842
1843 return HRTIMER_NORESTART;
1844}
1845
1846static void dw_mci_start_fault_timer(struct dw_mci *host)
1847{
1848 struct mmc_data *data = host->data;
1849
1850 if (!data || data->blocks <= 1)
1851 return;
1852
1853 if (!should_fail(&host->fail_data_crc, 1))
1854 return;
1855
1856 /*
1857 * Try to inject the error at random points during the data transfer.
1858 */
1859 hrtimer_start(&host->fault_timer,
1860 ms_to_ktime(get_random_u32_below(25)),
1861 HRTIMER_MODE_REL);
1862}
1863
1864static void dw_mci_stop_fault_timer(struct dw_mci *host)
1865{
1866 hrtimer_cancel(&host->fault_timer);
1867}
1868
1869static void dw_mci_init_fault(struct dw_mci *host)
1870{
1871 host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1872
1873 hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1874 host->fault_timer.function = dw_mci_fault_timer;
1875}
1876#else
1877static void dw_mci_init_fault(struct dw_mci *host)
1878{
1879}
1880
1881static void dw_mci_start_fault_timer(struct dw_mci *host)
1882{
1883}
1884
1885static void dw_mci_stop_fault_timer(struct dw_mci *host)
1886{
1887}
1888#endif
1889
1890static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1891 __releases(&host->lock)
1892 __acquires(&host->lock)
1893{
1894 struct dw_mci_slot *slot;
1895 struct mmc_host *prev_mmc = host->slot->mmc;
1896
1897 WARN_ON(host->cmd || host->data);
1898
1899 host->slot->mrq = NULL;
1900 host->mrq = NULL;
1901 if (!list_empty(&host->queue)) {
1902 slot = list_entry(host->queue.next,
1903 struct dw_mci_slot, queue_node);
1904 list_del(&slot->queue_node);
1905 dev_vdbg(host->dev, "list not empty: %s is next\n",
1906 mmc_hostname(slot->mmc));
1907 host->state = STATE_SENDING_CMD;
1908 dw_mci_start_request(host, slot);
1909 } else {
1910 dev_vdbg(host->dev, "list empty\n");
1911
1912 if (host->state == STATE_SENDING_CMD11)
1913 host->state = STATE_WAITING_CMD11_DONE;
1914 else
1915 host->state = STATE_IDLE;
1916 }
1917
1918 spin_unlock(&host->lock);
1919 mmc_request_done(prev_mmc, mrq);
1920 spin_lock(&host->lock);
1921}
1922
1923static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1924{
1925 u32 status = host->cmd_status;
1926
1927 host->cmd_status = 0;
1928
1929 /* Read the response from the card (up to 16 bytes) */
1930 if (cmd->flags & MMC_RSP_PRESENT) {
1931 if (cmd->flags & MMC_RSP_136) {
1932 cmd->resp[3] = mci_readl(host, RESP0);
1933 cmd->resp[2] = mci_readl(host, RESP1);
1934 cmd->resp[1] = mci_readl(host, RESP2);
1935 cmd->resp[0] = mci_readl(host, RESP3);
1936 } else {
1937 cmd->resp[0] = mci_readl(host, RESP0);
1938 cmd->resp[1] = 0;
1939 cmd->resp[2] = 0;
1940 cmd->resp[3] = 0;
1941 }
1942 }
1943
1944 if (status & SDMMC_INT_RTO)
1945 cmd->error = -ETIMEDOUT;
1946 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1947 cmd->error = -EILSEQ;
1948 else if (status & SDMMC_INT_RESP_ERR)
1949 cmd->error = -EIO;
1950 else
1951 cmd->error = 0;
1952
1953 return cmd->error;
1954}
1955
1956static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1957{
1958 u32 status = host->data_status;
1959
1960 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1961 if (status & SDMMC_INT_DRTO) {
1962 data->error = -ETIMEDOUT;
1963 } else if (status & SDMMC_INT_DCRC) {
1964 data->error = -EILSEQ;
1965 } else if (status & SDMMC_INT_EBE) {
1966 if (host->dir_status ==
1967 DW_MCI_SEND_STATUS) {
1968 /*
1969 * No data CRC status was returned.
1970 * The number of bytes transferred
1971 * will be exaggerated in PIO mode.
1972 */
1973 data->bytes_xfered = 0;
1974 data->error = -ETIMEDOUT;
1975 } else if (host->dir_status ==
1976 DW_MCI_RECV_STATUS) {
1977 data->error = -EILSEQ;
1978 }
1979 } else {
1980 /* SDMMC_INT_SBE is included */
1981 data->error = -EILSEQ;
1982 }
1983
1984 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1985
1986 /*
1987 * After an error, there may be data lingering
1988 * in the FIFO
1989 */
1990 dw_mci_reset(host);
1991 } else {
1992 data->bytes_xfered = data->blocks * data->blksz;
1993 data->error = 0;
1994 }
1995
1996 return data->error;
1997}
1998
1999static void dw_mci_set_drto(struct dw_mci *host)
2000{
2001 const struct dw_mci_drv_data *drv_data = host->drv_data;
2002 unsigned int drto_clks;
2003 unsigned int drto_div;
2004 unsigned int drto_ms;
2005 unsigned long irqflags;
2006
2007 if (drv_data && drv_data->get_drto_clks)
2008 drto_clks = drv_data->get_drto_clks(host);
2009 else
2010 drto_clks = mci_readl(host, TMOUT) >> 8;
2011 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2012 if (drto_div == 0)
2013 drto_div = 1;
2014
2015 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
2016 host->bus_hz);
2017
2018 dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
2019
2020 /* add a bit spare time */
2021 drto_ms += 10;
2022
2023 spin_lock_irqsave(&host->irq_lock, irqflags);
2024 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2025 mod_timer(&host->dto_timer,
2026 jiffies + msecs_to_jiffies(drto_ms));
2027 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2028}
2029
2030static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2031{
2032 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2033 return false;
2034
2035 /*
2036 * Really be certain that the timer has stopped. This is a bit of
2037 * paranoia and could only really happen if we had really bad
2038 * interrupt latency and the interrupt routine and timeout were
2039 * running concurrently so that the del_timer() in the interrupt
2040 * handler couldn't run.
2041 */
2042 WARN_ON(del_timer_sync(&host->cto_timer));
2043 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2044
2045 return true;
2046}
2047
2048static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2049{
2050 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2051 return false;
2052
2053 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2054 WARN_ON(del_timer_sync(&host->dto_timer));
2055 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2056
2057 return true;
2058}
2059
2060static void dw_mci_tasklet_func(struct tasklet_struct *t)
2061{
2062 struct dw_mci *host = from_tasklet(host, t, tasklet);
2063 struct mmc_data *data;
2064 struct mmc_command *cmd;
2065 struct mmc_request *mrq;
2066 enum dw_mci_state state;
2067 enum dw_mci_state prev_state;
2068 unsigned int err;
2069
2070 spin_lock(&host->lock);
2071
2072 state = host->state;
2073 data = host->data;
2074 mrq = host->mrq;
2075
2076 do {
2077 prev_state = state;
2078
2079 switch (state) {
2080 case STATE_IDLE:
2081 case STATE_WAITING_CMD11_DONE:
2082 break;
2083
2084 case STATE_SENDING_CMD11:
2085 case STATE_SENDING_CMD:
2086 if (!dw_mci_clear_pending_cmd_complete(host))
2087 break;
2088
2089 cmd = host->cmd;
2090 host->cmd = NULL;
2091 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2092 err = dw_mci_command_complete(host, cmd);
2093 if (cmd == mrq->sbc && !err) {
2094 __dw_mci_start_request(host, host->slot,
2095 mrq->cmd);
2096 goto unlock;
2097 }
2098
2099 if (cmd->data && err) {
2100 /*
2101 * During UHS tuning sequence, sending the stop
2102 * command after the response CRC error would
2103 * throw the system into a confused state
2104 * causing all future tuning phases to report
2105 * failure.
2106 *
2107 * In such case controller will move into a data
2108 * transfer state after a response error or
2109 * response CRC error. Let's let that finish
2110 * before trying to send a stop, so we'll go to
2111 * STATE_SENDING_DATA.
2112 *
2113 * Although letting the data transfer take place
2114 * will waste a bit of time (we already know
2115 * the command was bad), it can't cause any
2116 * errors since it's possible it would have
2117 * taken place anyway if this tasklet got
2118 * delayed. Allowing the transfer to take place
2119 * avoids races and keeps things simple.
2120 */
2121 if (err != -ETIMEDOUT &&
2122 host->dir_status == DW_MCI_RECV_STATUS) {
2123 state = STATE_SENDING_DATA;
2124 continue;
2125 }
2126
2127 send_stop_abort(host, data);
2128 dw_mci_stop_dma(host);
2129 state = STATE_SENDING_STOP;
2130 break;
2131 }
2132
2133 if (!cmd->data || err) {
2134 dw_mci_request_end(host, mrq);
2135 goto unlock;
2136 }
2137
2138 prev_state = state = STATE_SENDING_DATA;
2139 fallthrough;
2140
2141 case STATE_SENDING_DATA:
2142 /*
2143 * We could get a data error and never a transfer
2144 * complete so we'd better check for it here.
2145 *
2146 * Note that we don't really care if we also got a
2147 * transfer complete; stopping the DMA and sending an
2148 * abort won't hurt.
2149 */
2150 if (test_and_clear_bit(EVENT_DATA_ERROR,
2151 &host->pending_events)) {
2152 if (!(host->data_status & (SDMMC_INT_DRTO |
2153 SDMMC_INT_EBE)))
2154 send_stop_abort(host, data);
2155 dw_mci_stop_dma(host);
2156 state = STATE_DATA_ERROR;
2157 break;
2158 }
2159
2160 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2161 &host->pending_events)) {
2162 /*
2163 * If all data-related interrupts don't come
2164 * within the given time in reading data state.
2165 */
2166 if (host->dir_status == DW_MCI_RECV_STATUS)
2167 dw_mci_set_drto(host);
2168 break;
2169 }
2170
2171 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2172
2173 /*
2174 * Handle an EVENT_DATA_ERROR that might have shown up
2175 * before the transfer completed. This might not have
2176 * been caught by the check above because the interrupt
2177 * could have gone off between the previous check and
2178 * the check for transfer complete.
2179 *
2180 * Technically this ought not be needed assuming we
2181 * get a DATA_COMPLETE eventually (we'll notice the
2182 * error and end the request), but it shouldn't hurt.
2183 *
2184 * This has the advantage of sending the stop command.
2185 */
2186 if (test_and_clear_bit(EVENT_DATA_ERROR,
2187 &host->pending_events)) {
2188 if (!(host->data_status & (SDMMC_INT_DRTO |
2189 SDMMC_INT_EBE)))
2190 send_stop_abort(host, data);
2191 dw_mci_stop_dma(host);
2192 state = STATE_DATA_ERROR;
2193 break;
2194 }
2195 prev_state = state = STATE_DATA_BUSY;
2196
2197 fallthrough;
2198
2199 case STATE_DATA_BUSY:
2200 if (!dw_mci_clear_pending_data_complete(host)) {
2201 /*
2202 * If data error interrupt comes but data over
2203 * interrupt doesn't come within the given time.
2204 * in reading data state.
2205 */
2206 if (host->dir_status == DW_MCI_RECV_STATUS)
2207 dw_mci_set_drto(host);
2208 break;
2209 }
2210
2211 dw_mci_stop_fault_timer(host);
2212 host->data = NULL;
2213 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2214 err = dw_mci_data_complete(host, data);
2215
2216 if (!err) {
2217 if (!data->stop || mrq->sbc) {
2218 if (mrq->sbc && data->stop)
2219 data->stop->error = 0;
2220 dw_mci_request_end(host, mrq);
2221 goto unlock;
2222 }
2223
2224 /* stop command for open-ended transfer*/
2225 if (data->stop)
2226 send_stop_abort(host, data);
2227 } else {
2228 /*
2229 * If we don't have a command complete now we'll
2230 * never get one since we just reset everything;
2231 * better end the request.
2232 *
2233 * If we do have a command complete we'll fall
2234 * through to the SENDING_STOP command and
2235 * everything will be peachy keen.
2236 */
2237 if (!test_bit(EVENT_CMD_COMPLETE,
2238 &host->pending_events)) {
2239 host->cmd = NULL;
2240 dw_mci_request_end(host, mrq);
2241 goto unlock;
2242 }
2243 }
2244
2245 /*
2246 * If err has non-zero,
2247 * stop-abort command has been already issued.
2248 */
2249 prev_state = state = STATE_SENDING_STOP;
2250
2251 fallthrough;
2252
2253 case STATE_SENDING_STOP:
2254 if (!dw_mci_clear_pending_cmd_complete(host))
2255 break;
2256
2257 /* CMD error in data command */
2258 if (mrq->cmd->error && mrq->data)
2259 dw_mci_reset(host);
2260
2261 dw_mci_stop_fault_timer(host);
2262 host->cmd = NULL;
2263 host->data = NULL;
2264
2265 if (!mrq->sbc && mrq->stop)
2266 dw_mci_command_complete(host, mrq->stop);
2267 else
2268 host->cmd_status = 0;
2269
2270 dw_mci_request_end(host, mrq);
2271 goto unlock;
2272
2273 case STATE_DATA_ERROR:
2274 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2275 &host->pending_events))
2276 break;
2277
2278 state = STATE_DATA_BUSY;
2279 break;
2280 }
2281 } while (state != prev_state);
2282
2283 host->state = state;
2284unlock:
2285 spin_unlock(&host->lock);
2286
2287}
2288
2289/* push final bytes to part_buf, only use during push */
2290static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2291{
2292 memcpy((void *)&host->part_buf, buf, cnt);
2293 host->part_buf_count = cnt;
2294}
2295
2296/* append bytes to part_buf, only use during push */
2297static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2298{
2299 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2300 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2301 host->part_buf_count += cnt;
2302 return cnt;
2303}
2304
2305/* pull first bytes from part_buf, only use during pull */
2306static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2307{
2308 cnt = min_t(int, cnt, host->part_buf_count);
2309 if (cnt) {
2310 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2311 cnt);
2312 host->part_buf_count -= cnt;
2313 host->part_buf_start += cnt;
2314 }
2315 return cnt;
2316}
2317
2318/* pull final bytes from the part_buf, assuming it's just been filled */
2319static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2320{
2321 memcpy(buf, &host->part_buf, cnt);
2322 host->part_buf_start = cnt;
2323 host->part_buf_count = (1 << host->data_shift) - cnt;
2324}
2325
2326static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2327{
2328 struct mmc_data *data = host->data;
2329 int init_cnt = cnt;
2330
2331 /* try and push anything in the part_buf */
2332 if (unlikely(host->part_buf_count)) {
2333 int len = dw_mci_push_part_bytes(host, buf, cnt);
2334
2335 buf += len;
2336 cnt -= len;
2337 if (host->part_buf_count == 2) {
2338 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2339 host->part_buf_count = 0;
2340 }
2341 }
2342#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2343 if (unlikely((unsigned long)buf & 0x1)) {
2344 while (cnt >= 2) {
2345 u16 aligned_buf[64];
2346 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2347 int items = len >> 1;
2348 int i;
2349 /* memcpy from input buffer into aligned buffer */
2350 memcpy(aligned_buf, buf, len);
2351 buf += len;
2352 cnt -= len;
2353 /* push data from aligned buffer into fifo */
2354 for (i = 0; i < items; ++i)
2355 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2356 }
2357 } else
2358#endif
2359 {
2360 u16 *pdata = buf;
2361
2362 for (; cnt >= 2; cnt -= 2)
2363 mci_fifo_writew(host->fifo_reg, *pdata++);
2364 buf = pdata;
2365 }
2366 /* put anything remaining in the part_buf */
2367 if (cnt) {
2368 dw_mci_set_part_bytes(host, buf, cnt);
2369 /* Push data if we have reached the expected data length */
2370 if ((data->bytes_xfered + init_cnt) ==
2371 (data->blksz * data->blocks))
2372 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2373 }
2374}
2375
2376static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2377{
2378#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2379 if (unlikely((unsigned long)buf & 0x1)) {
2380 while (cnt >= 2) {
2381 /* pull data from fifo into aligned buffer */
2382 u16 aligned_buf[64];
2383 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2384 int items = len >> 1;
2385 int i;
2386
2387 for (i = 0; i < items; ++i)
2388 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2389 /* memcpy from aligned buffer into output buffer */
2390 memcpy(buf, aligned_buf, len);
2391 buf += len;
2392 cnt -= len;
2393 }
2394 } else
2395#endif
2396 {
2397 u16 *pdata = buf;
2398
2399 for (; cnt >= 2; cnt -= 2)
2400 *pdata++ = mci_fifo_readw(host->fifo_reg);
2401 buf = pdata;
2402 }
2403 if (cnt) {
2404 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2405 dw_mci_pull_final_bytes(host, buf, cnt);
2406 }
2407}
2408
2409static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2410{
2411 struct mmc_data *data = host->data;
2412 int init_cnt = cnt;
2413
2414 /* try and push anything in the part_buf */
2415 if (unlikely(host->part_buf_count)) {
2416 int len = dw_mci_push_part_bytes(host, buf, cnt);
2417
2418 buf += len;
2419 cnt -= len;
2420 if (host->part_buf_count == 4) {
2421 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2422 host->part_buf_count = 0;
2423 }
2424 }
2425#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2426 if (unlikely((unsigned long)buf & 0x3)) {
2427 while (cnt >= 4) {
2428 u32 aligned_buf[32];
2429 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2430 int items = len >> 2;
2431 int i;
2432 /* memcpy from input buffer into aligned buffer */
2433 memcpy(aligned_buf, buf, len);
2434 buf += len;
2435 cnt -= len;
2436 /* push data from aligned buffer into fifo */
2437 for (i = 0; i < items; ++i)
2438 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2439 }
2440 } else
2441#endif
2442 {
2443 u32 *pdata = buf;
2444
2445 for (; cnt >= 4; cnt -= 4)
2446 mci_fifo_writel(host->fifo_reg, *pdata++);
2447 buf = pdata;
2448 }
2449 /* put anything remaining in the part_buf */
2450 if (cnt) {
2451 dw_mci_set_part_bytes(host, buf, cnt);
2452 /* Push data if we have reached the expected data length */
2453 if ((data->bytes_xfered + init_cnt) ==
2454 (data->blksz * data->blocks))
2455 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2456 }
2457}
2458
2459static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2460{
2461#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2462 if (unlikely((unsigned long)buf & 0x3)) {
2463 while (cnt >= 4) {
2464 /* pull data from fifo into aligned buffer */
2465 u32 aligned_buf[32];
2466 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2467 int items = len >> 2;
2468 int i;
2469
2470 for (i = 0; i < items; ++i)
2471 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2472 /* memcpy from aligned buffer into output buffer */
2473 memcpy(buf, aligned_buf, len);
2474 buf += len;
2475 cnt -= len;
2476 }
2477 } else
2478#endif
2479 {
2480 u32 *pdata = buf;
2481
2482 for (; cnt >= 4; cnt -= 4)
2483 *pdata++ = mci_fifo_readl(host->fifo_reg);
2484 buf = pdata;
2485 }
2486 if (cnt) {
2487 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2488 dw_mci_pull_final_bytes(host, buf, cnt);
2489 }
2490}
2491
2492static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2493{
2494 struct mmc_data *data = host->data;
2495 int init_cnt = cnt;
2496
2497 /* try and push anything in the part_buf */
2498 if (unlikely(host->part_buf_count)) {
2499 int len = dw_mci_push_part_bytes(host, buf, cnt);
2500
2501 buf += len;
2502 cnt -= len;
2503
2504 if (host->part_buf_count == 8) {
2505 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2506 host->part_buf_count = 0;
2507 }
2508 }
2509#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x7)) {
2511 while (cnt >= 8) {
2512 u64 aligned_buf[16];
2513 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2514 int items = len >> 3;
2515 int i;
2516 /* memcpy from input buffer into aligned buffer */
2517 memcpy(aligned_buf, buf, len);
2518 buf += len;
2519 cnt -= len;
2520 /* push data from aligned buffer into fifo */
2521 for (i = 0; i < items; ++i)
2522 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2523 }
2524 } else
2525#endif
2526 {
2527 u64 *pdata = buf;
2528
2529 for (; cnt >= 8; cnt -= 8)
2530 mci_fifo_writeq(host->fifo_reg, *pdata++);
2531 buf = pdata;
2532 }
2533 /* put anything remaining in the part_buf */
2534 if (cnt) {
2535 dw_mci_set_part_bytes(host, buf, cnt);
2536 /* Push data if we have reached the expected data length */
2537 if ((data->bytes_xfered + init_cnt) ==
2538 (data->blksz * data->blocks))
2539 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2540 }
2541}
2542
2543static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2544{
2545#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2546 if (unlikely((unsigned long)buf & 0x7)) {
2547 while (cnt >= 8) {
2548 /* pull data from fifo into aligned buffer */
2549 u64 aligned_buf[16];
2550 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2551 int items = len >> 3;
2552 int i;
2553
2554 for (i = 0; i < items; ++i)
2555 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2556
2557 /* memcpy from aligned buffer into output buffer */
2558 memcpy(buf, aligned_buf, len);
2559 buf += len;
2560 cnt -= len;
2561 }
2562 } else
2563#endif
2564 {
2565 u64 *pdata = buf;
2566
2567 for (; cnt >= 8; cnt -= 8)
2568 *pdata++ = mci_fifo_readq(host->fifo_reg);
2569 buf = pdata;
2570 }
2571 if (cnt) {
2572 host->part_buf = mci_fifo_readq(host->fifo_reg);
2573 dw_mci_pull_final_bytes(host, buf, cnt);
2574 }
2575}
2576
2577static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2578{
2579 int len;
2580
2581 /* get remaining partial bytes */
2582 len = dw_mci_pull_part_bytes(host, buf, cnt);
2583 if (unlikely(len == cnt))
2584 return;
2585 buf += len;
2586 cnt -= len;
2587
2588 /* get the rest of the data */
2589 host->pull_data(host, buf, cnt);
2590}
2591
2592static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2593{
2594 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2595 void *buf;
2596 unsigned int offset;
2597 struct mmc_data *data = host->data;
2598 int shift = host->data_shift;
2599 u32 status;
2600 unsigned int len;
2601 unsigned int remain, fcnt;
2602
2603 do {
2604 if (!sg_miter_next(sg_miter))
2605 goto done;
2606
2607 host->sg = sg_miter->piter.sg;
2608 buf = sg_miter->addr;
2609 remain = sg_miter->length;
2610 offset = 0;
2611
2612 do {
2613 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2614 << shift) + host->part_buf_count;
2615 len = min(remain, fcnt);
2616 if (!len)
2617 break;
2618 dw_mci_pull_data(host, (void *)(buf + offset), len);
2619 data->bytes_xfered += len;
2620 offset += len;
2621 remain -= len;
2622 } while (remain);
2623
2624 sg_miter->consumed = offset;
2625 status = mci_readl(host, MINTSTS);
2626 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2627 /* if the RXDR is ready read again */
2628 } while ((status & SDMMC_INT_RXDR) ||
2629 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2630
2631 if (!remain) {
2632 if (!sg_miter_next(sg_miter))
2633 goto done;
2634 sg_miter->consumed = 0;
2635 }
2636 sg_miter_stop(sg_miter);
2637 return;
2638
2639done:
2640 sg_miter_stop(sg_miter);
2641 host->sg = NULL;
2642 smp_wmb(); /* drain writebuffer */
2643 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2644}
2645
2646static void dw_mci_write_data_pio(struct dw_mci *host)
2647{
2648 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2649 void *buf;
2650 unsigned int offset;
2651 struct mmc_data *data = host->data;
2652 int shift = host->data_shift;
2653 u32 status;
2654 unsigned int len;
2655 unsigned int fifo_depth = host->fifo_depth;
2656 unsigned int remain, fcnt;
2657
2658 do {
2659 if (!sg_miter_next(sg_miter))
2660 goto done;
2661
2662 host->sg = sg_miter->piter.sg;
2663 buf = sg_miter->addr;
2664 remain = sg_miter->length;
2665 offset = 0;
2666
2667 do {
2668 fcnt = ((fifo_depth -
2669 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2670 << shift) - host->part_buf_count;
2671 len = min(remain, fcnt);
2672 if (!len)
2673 break;
2674 host->push_data(host, (void *)(buf + offset), len);
2675 data->bytes_xfered += len;
2676 offset += len;
2677 remain -= len;
2678 } while (remain);
2679
2680 sg_miter->consumed = offset;
2681 status = mci_readl(host, MINTSTS);
2682 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2683 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2684
2685 if (!remain) {
2686 if (!sg_miter_next(sg_miter))
2687 goto done;
2688 sg_miter->consumed = 0;
2689 }
2690 sg_miter_stop(sg_miter);
2691 return;
2692
2693done:
2694 sg_miter_stop(sg_miter);
2695 host->sg = NULL;
2696 smp_wmb(); /* drain writebuffer */
2697 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2698}
2699
2700static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2701{
2702 del_timer(&host->cto_timer);
2703
2704 if (!host->cmd_status)
2705 host->cmd_status = status;
2706
2707 smp_wmb(); /* drain writebuffer */
2708
2709 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2710 tasklet_schedule(&host->tasklet);
2711
2712 dw_mci_start_fault_timer(host);
2713}
2714
2715static void dw_mci_handle_cd(struct dw_mci *host)
2716{
2717 struct dw_mci_slot *slot = host->slot;
2718
2719 mmc_detect_change(slot->mmc,
2720 msecs_to_jiffies(host->pdata->detect_delay_ms));
2721}
2722
2723static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2724{
2725 struct dw_mci *host = dev_id;
2726 u32 pending;
2727 struct dw_mci_slot *slot = host->slot;
2728
2729 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2730
2731 if (pending) {
2732 /* Check volt switch first, since it can look like an error */
2733 if ((host->state == STATE_SENDING_CMD11) &&
2734 (pending & SDMMC_INT_VOLT_SWITCH)) {
2735 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2736 pending &= ~SDMMC_INT_VOLT_SWITCH;
2737
2738 /*
2739 * Hold the lock; we know cmd11_timer can't be kicked
2740 * off after the lock is released, so safe to delete.
2741 */
2742 spin_lock(&host->irq_lock);
2743 dw_mci_cmd_interrupt(host, pending);
2744 spin_unlock(&host->irq_lock);
2745
2746 del_timer(&host->cmd11_timer);
2747 }
2748
2749 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2750 spin_lock(&host->irq_lock);
2751
2752 del_timer(&host->cto_timer);
2753 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2754 host->cmd_status = pending;
2755 smp_wmb(); /* drain writebuffer */
2756 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2757
2758 spin_unlock(&host->irq_lock);
2759 }
2760
2761 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2762 spin_lock(&host->irq_lock);
2763
2764 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2765 del_timer(&host->dto_timer);
2766
2767 /* if there is an error report DATA_ERROR */
2768 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2769 host->data_status = pending;
2770 smp_wmb(); /* drain writebuffer */
2771 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2772
2773 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2774 /* In case of error, we cannot expect a DTO */
2775 set_bit(EVENT_DATA_COMPLETE,
2776 &host->pending_events);
2777
2778 tasklet_schedule(&host->tasklet);
2779
2780 spin_unlock(&host->irq_lock);
2781 }
2782
2783 if (pending & SDMMC_INT_DATA_OVER) {
2784 spin_lock(&host->irq_lock);
2785
2786 del_timer(&host->dto_timer);
2787
2788 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2789 if (!host->data_status)
2790 host->data_status = pending;
2791 smp_wmb(); /* drain writebuffer */
2792 if (host->dir_status == DW_MCI_RECV_STATUS) {
2793 if (host->sg != NULL)
2794 dw_mci_read_data_pio(host, true);
2795 }
2796 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2797 tasklet_schedule(&host->tasklet);
2798
2799 spin_unlock(&host->irq_lock);
2800 }
2801
2802 if (pending & SDMMC_INT_RXDR) {
2803 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2804 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2805 dw_mci_read_data_pio(host, false);
2806 }
2807
2808 if (pending & SDMMC_INT_TXDR) {
2809 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2810 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2811 dw_mci_write_data_pio(host);
2812 }
2813
2814 if (pending & SDMMC_INT_CMD_DONE) {
2815 spin_lock(&host->irq_lock);
2816
2817 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2818 dw_mci_cmd_interrupt(host, pending);
2819
2820 spin_unlock(&host->irq_lock);
2821 }
2822
2823 if (pending & SDMMC_INT_CD) {
2824 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2825 dw_mci_handle_cd(host);
2826 }
2827
2828 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2829 mci_writel(host, RINTSTS,
2830 SDMMC_INT_SDIO(slot->sdio_id));
2831 __dw_mci_enable_sdio_irq(slot, 0);
2832 sdio_signal_irq(slot->mmc);
2833 }
2834
2835 }
2836
2837 if (host->use_dma != TRANS_MODE_IDMAC)
2838 return IRQ_HANDLED;
2839
2840 /* Handle IDMA interrupts */
2841 if (host->dma_64bit_address == 1) {
2842 pending = mci_readl(host, IDSTS64);
2843 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2844 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2845 SDMMC_IDMAC_INT_RI);
2846 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2847 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2848 host->dma_ops->complete((void *)host);
2849 }
2850 } else {
2851 pending = mci_readl(host, IDSTS);
2852 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2853 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2854 SDMMC_IDMAC_INT_RI);
2855 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2856 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2857 host->dma_ops->complete((void *)host);
2858 }
2859 }
2860
2861 return IRQ_HANDLED;
2862}
2863
2864static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2865{
2866 struct dw_mci *host = slot->host;
2867 const struct dw_mci_drv_data *drv_data = host->drv_data;
2868 struct mmc_host *mmc = slot->mmc;
2869 int ctrl_id;
2870
2871 if (host->pdata->caps)
2872 mmc->caps = host->pdata->caps;
2873
2874 if (host->pdata->pm_caps)
2875 mmc->pm_caps = host->pdata->pm_caps;
2876
2877 if (drv_data)
2878 mmc->caps |= drv_data->common_caps;
2879
2880 if (host->dev->of_node) {
2881 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2882 if (ctrl_id < 0)
2883 ctrl_id = 0;
2884 } else {
2885 ctrl_id = to_platform_device(host->dev)->id;
2886 }
2887
2888 if (drv_data && drv_data->caps) {
2889 if (ctrl_id >= drv_data->num_caps) {
2890 dev_err(host->dev, "invalid controller id %d\n",
2891 ctrl_id);
2892 return -EINVAL;
2893 }
2894 mmc->caps |= drv_data->caps[ctrl_id];
2895 }
2896
2897 if (host->pdata->caps2)
2898 mmc->caps2 = host->pdata->caps2;
2899
2900 /* if host has set a minimum_freq, we should respect it */
2901 if (host->minimum_speed)
2902 mmc->f_min = host->minimum_speed;
2903 else
2904 mmc->f_min = DW_MCI_FREQ_MIN;
2905
2906 if (!mmc->f_max)
2907 mmc->f_max = DW_MCI_FREQ_MAX;
2908
2909 /* Process SDIO IRQs through the sdio_irq_work. */
2910 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2911 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2912
2913 return 0;
2914}
2915
2916static int dw_mci_init_slot(struct dw_mci *host)
2917{
2918 struct mmc_host *mmc;
2919 struct dw_mci_slot *slot;
2920 int ret;
2921
2922 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2923 if (!mmc)
2924 return -ENOMEM;
2925
2926 slot = mmc_priv(mmc);
2927 slot->id = 0;
2928 slot->sdio_id = host->sdio_id0 + slot->id;
2929 slot->mmc = mmc;
2930 slot->host = host;
2931 host->slot = slot;
2932
2933 mmc->ops = &dw_mci_ops;
2934
2935 /*if there are external regulators, get them*/
2936 ret = mmc_regulator_get_supply(mmc);
2937 if (ret)
2938 goto err_host_allocated;
2939
2940 if (!mmc->ocr_avail)
2941 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2942
2943 ret = mmc_of_parse(mmc);
2944 if (ret)
2945 goto err_host_allocated;
2946
2947 ret = dw_mci_init_slot_caps(slot);
2948 if (ret)
2949 goto err_host_allocated;
2950
2951 /* Useful defaults if platform data is unset. */
2952 if (host->use_dma == TRANS_MODE_IDMAC) {
2953 mmc->max_segs = host->ring_size;
2954 mmc->max_blk_size = 65535;
2955 mmc->max_seg_size = 0x1000;
2956 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2957 mmc->max_blk_count = mmc->max_req_size / 512;
2958 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2959 mmc->max_segs = 64;
2960 mmc->max_blk_size = 65535;
2961 mmc->max_blk_count = 65535;
2962 mmc->max_req_size =
2963 mmc->max_blk_size * mmc->max_blk_count;
2964 mmc->max_seg_size = mmc->max_req_size;
2965 } else {
2966 /* TRANS_MODE_PIO */
2967 mmc->max_segs = 64;
2968 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2969 mmc->max_blk_count = 512;
2970 mmc->max_req_size = mmc->max_blk_size *
2971 mmc->max_blk_count;
2972 mmc->max_seg_size = mmc->max_req_size;
2973 }
2974
2975 dw_mci_get_cd(mmc);
2976
2977 ret = mmc_add_host(mmc);
2978 if (ret)
2979 goto err_host_allocated;
2980
2981#if defined(CONFIG_DEBUG_FS)
2982 dw_mci_init_debugfs(slot);
2983#endif
2984
2985 return 0;
2986
2987err_host_allocated:
2988 mmc_free_host(mmc);
2989 return ret;
2990}
2991
2992static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2993{
2994 /* Debugfs stuff is cleaned up by mmc core */
2995 mmc_remove_host(slot->mmc);
2996 slot->host->slot = NULL;
2997 mmc_free_host(slot->mmc);
2998}
2999
3000static void dw_mci_init_dma(struct dw_mci *host)
3001{
3002 int addr_config;
3003 struct device *dev = host->dev;
3004
3005 /*
3006 * Check tansfer mode from HCON[17:16]
3007 * Clear the ambiguous description of dw_mmc databook:
3008 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
3009 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3010 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3011 * 2b'11: Non DW DMA Interface -> pio only
3012 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
3013 * simpler request/acknowledge handshake mechanism and both of them
3014 * are regarded as external dma master for dw_mmc.
3015 */
3016 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3017 if (host->use_dma == DMA_INTERFACE_IDMA) {
3018 host->use_dma = TRANS_MODE_IDMAC;
3019 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3020 host->use_dma == DMA_INTERFACE_GDMA) {
3021 host->use_dma = TRANS_MODE_EDMAC;
3022 } else {
3023 goto no_dma;
3024 }
3025
3026 /* Determine which DMA interface to use */
3027 if (host->use_dma == TRANS_MODE_IDMAC) {
3028 /*
3029 * Check ADDR_CONFIG bit in HCON to find
3030 * IDMAC address bus width
3031 */
3032 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3033
3034 if (addr_config == 1) {
3035 /* host supports IDMAC in 64-bit address mode */
3036 host->dma_64bit_address = 1;
3037 dev_info(host->dev,
3038 "IDMAC supports 64-bit address mode.\n");
3039 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3040 dma_set_coherent_mask(host->dev,
3041 DMA_BIT_MASK(64));
3042 } else {
3043 /* host supports IDMAC in 32-bit address mode */
3044 host->dma_64bit_address = 0;
3045 dev_info(host->dev,
3046 "IDMAC supports 32-bit address mode.\n");
3047 }
3048
3049 /* Alloc memory for sg translation */
3050 host->sg_cpu = dmam_alloc_coherent(host->dev,
3051 DESC_RING_BUF_SZ,
3052 &host->sg_dma, GFP_KERNEL);
3053 if (!host->sg_cpu) {
3054 dev_err(host->dev,
3055 "%s: could not alloc DMA memory\n",
3056 __func__);
3057 goto no_dma;
3058 }
3059
3060 host->dma_ops = &dw_mci_idmac_ops;
3061 dev_info(host->dev, "Using internal DMA controller.\n");
3062 } else {
3063 /* TRANS_MODE_EDMAC: check dma bindings again */
3064 if ((device_property_string_array_count(dev, "dma-names") < 0) ||
3065 !device_property_present(dev, "dmas")) {
3066 goto no_dma;
3067 }
3068 host->dma_ops = &dw_mci_edmac_ops;
3069 dev_info(host->dev, "Using external DMA controller.\n");
3070 }
3071
3072 if (host->dma_ops->init && host->dma_ops->start &&
3073 host->dma_ops->stop && host->dma_ops->cleanup) {
3074 if (host->dma_ops->init(host)) {
3075 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3076 __func__);
3077 goto no_dma;
3078 }
3079 } else {
3080 dev_err(host->dev, "DMA initialization not found.\n");
3081 goto no_dma;
3082 }
3083
3084 return;
3085
3086no_dma:
3087 dev_info(host->dev, "Using PIO mode.\n");
3088 host->use_dma = TRANS_MODE_PIO;
3089}
3090
3091static void dw_mci_cmd11_timer(struct timer_list *t)
3092{
3093 struct dw_mci *host = from_timer(host, t, cmd11_timer);
3094
3095 if (host->state != STATE_SENDING_CMD11) {
3096 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3097 return;
3098 }
3099
3100 host->cmd_status = SDMMC_INT_RTO;
3101 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3102 tasklet_schedule(&host->tasklet);
3103}
3104
3105static void dw_mci_cto_timer(struct timer_list *t)
3106{
3107 struct dw_mci *host = from_timer(host, t, cto_timer);
3108 unsigned long irqflags;
3109 u32 pending;
3110
3111 spin_lock_irqsave(&host->irq_lock, irqflags);
3112
3113 /*
3114 * If somehow we have very bad interrupt latency it's remotely possible
3115 * that the timer could fire while the interrupt is still pending or
3116 * while the interrupt is midway through running. Let's be paranoid
3117 * and detect those two cases. Note that this is paranoia is somewhat
3118 * justified because in this function we don't actually cancel the
3119 * pending command in the controller--we just assume it will never come.
3120 */
3121 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3122 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3123 /* The interrupt should fire; no need to act but we can warn */
3124 dev_warn(host->dev, "Unexpected interrupt latency\n");
3125 goto exit;
3126 }
3127 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3128 /* Presumably interrupt handler couldn't delete the timer */
3129 dev_warn(host->dev, "CTO timeout when already completed\n");
3130 goto exit;
3131 }
3132
3133 /*
3134 * Continued paranoia to make sure we're in the state we expect.
3135 * This paranoia isn't really justified but it seems good to be safe.
3136 */
3137 switch (host->state) {
3138 case STATE_SENDING_CMD11:
3139 case STATE_SENDING_CMD:
3140 case STATE_SENDING_STOP:
3141 /*
3142 * If CMD_DONE interrupt does NOT come in sending command
3143 * state, we should notify the driver to terminate current
3144 * transfer and report a command timeout to the core.
3145 */
3146 host->cmd_status = SDMMC_INT_RTO;
3147 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3148 tasklet_schedule(&host->tasklet);
3149 break;
3150 default:
3151 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3152 host->state);
3153 break;
3154 }
3155
3156exit:
3157 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3158}
3159
3160static void dw_mci_dto_timer(struct timer_list *t)
3161{
3162 struct dw_mci *host = from_timer(host, t, dto_timer);
3163 unsigned long irqflags;
3164 u32 pending;
3165
3166 spin_lock_irqsave(&host->irq_lock, irqflags);
3167
3168 /*
3169 * The DTO timer is much longer than the CTO timer, so it's even less
3170 * likely that we'll these cases, but it pays to be paranoid.
3171 */
3172 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3173 if (pending & SDMMC_INT_DATA_OVER) {
3174 /* The interrupt should fire; no need to act but we can warn */
3175 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3176 goto exit;
3177 }
3178 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3179 /* Presumably interrupt handler couldn't delete the timer */
3180 dev_warn(host->dev, "DTO timeout when already completed\n");
3181 goto exit;
3182 }
3183
3184 /*
3185 * Continued paranoia to make sure we're in the state we expect.
3186 * This paranoia isn't really justified but it seems good to be safe.
3187 */
3188 switch (host->state) {
3189 case STATE_SENDING_DATA:
3190 case STATE_DATA_BUSY:
3191 /*
3192 * If DTO interrupt does NOT come in sending data state,
3193 * we should notify the driver to terminate current transfer
3194 * and report a data timeout to the core.
3195 */
3196 host->data_status = SDMMC_INT_DRTO;
3197 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3198 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3199 tasklet_schedule(&host->tasklet);
3200 break;
3201 default:
3202 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3203 host->state);
3204 break;
3205 }
3206
3207exit:
3208 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3209}
3210
3211#ifdef CONFIG_OF
3212static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3213{
3214 struct dw_mci_board *pdata;
3215 struct device *dev = host->dev;
3216 const struct dw_mci_drv_data *drv_data = host->drv_data;
3217 int ret;
3218 u32 clock_frequency;
3219
3220 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3221 if (!pdata)
3222 return ERR_PTR(-ENOMEM);
3223
3224 /* find reset controller when exist */
3225 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3226 if (IS_ERR(pdata->rstc))
3227 return ERR_CAST(pdata->rstc);
3228
3229 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3230 dev_info(dev,
3231 "fifo-depth property not found, using value of FIFOTH register as default\n");
3232
3233 device_property_read_u32(dev, "card-detect-delay",
3234 &pdata->detect_delay_ms);
3235
3236 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3237
3238 if (device_property_present(dev, "fifo-watermark-aligned"))
3239 host->wm_aligned = true;
3240
3241 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3242 pdata->bus_hz = clock_frequency;
3243
3244 if (drv_data && drv_data->parse_dt) {
3245 ret = drv_data->parse_dt(host);
3246 if (ret)
3247 return ERR_PTR(ret);
3248 }
3249
3250 return pdata;
3251}
3252
3253#else /* CONFIG_OF */
3254static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3255{
3256 return ERR_PTR(-EINVAL);
3257}
3258#endif /* CONFIG_OF */
3259
3260static void dw_mci_enable_cd(struct dw_mci *host)
3261{
3262 unsigned long irqflags;
3263 u32 temp;
3264
3265 /*
3266 * No need for CD if all slots have a non-error GPIO
3267 * as well as broken card detection is found.
3268 */
3269 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3270 return;
3271
3272 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3273 spin_lock_irqsave(&host->irq_lock, irqflags);
3274 temp = mci_readl(host, INTMASK);
3275 temp |= SDMMC_INT_CD;
3276 mci_writel(host, INTMASK, temp);
3277 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3278 }
3279}
3280
3281int dw_mci_probe(struct dw_mci *host)
3282{
3283 const struct dw_mci_drv_data *drv_data = host->drv_data;
3284 int width, i, ret = 0;
3285 u32 fifo_size;
3286
3287 if (!host->pdata) {
3288 host->pdata = dw_mci_parse_dt(host);
3289 if (IS_ERR(host->pdata))
3290 return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3291 "platform data not available\n");
3292 }
3293
3294 host->biu_clk = devm_clk_get(host->dev, "biu");
3295 if (IS_ERR(host->biu_clk)) {
3296 dev_dbg(host->dev, "biu clock not available\n");
3297 } else {
3298 ret = clk_prepare_enable(host->biu_clk);
3299 if (ret) {
3300 dev_err(host->dev, "failed to enable biu clock\n");
3301 return ret;
3302 }
3303 }
3304
3305 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3306 if (IS_ERR(host->ciu_clk)) {
3307 dev_dbg(host->dev, "ciu clock not available\n");
3308 host->bus_hz = host->pdata->bus_hz;
3309 } else {
3310 ret = clk_prepare_enable(host->ciu_clk);
3311 if (ret) {
3312 dev_err(host->dev, "failed to enable ciu clock\n");
3313 goto err_clk_biu;
3314 }
3315
3316 if (host->pdata->bus_hz) {
3317 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3318 if (ret)
3319 dev_warn(host->dev,
3320 "Unable to set bus rate to %uHz\n",
3321 host->pdata->bus_hz);
3322 }
3323 host->bus_hz = clk_get_rate(host->ciu_clk);
3324 }
3325
3326 if (!host->bus_hz) {
3327 dev_err(host->dev,
3328 "Platform data must supply bus speed\n");
3329 ret = -ENODEV;
3330 goto err_clk_ciu;
3331 }
3332
3333 if (host->pdata->rstc) {
3334 reset_control_assert(host->pdata->rstc);
3335 usleep_range(10, 50);
3336 reset_control_deassert(host->pdata->rstc);
3337 }
3338
3339 if (drv_data && drv_data->init) {
3340 ret = drv_data->init(host);
3341 if (ret) {
3342 dev_err(host->dev,
3343 "implementation specific init failed\n");
3344 goto err_clk_ciu;
3345 }
3346 }
3347
3348 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3349 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3350 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3351
3352 spin_lock_init(&host->lock);
3353 spin_lock_init(&host->irq_lock);
3354 INIT_LIST_HEAD(&host->queue);
3355
3356 dw_mci_init_fault(host);
3357
3358 /*
3359 * Get the host data width - this assumes that HCON has been set with
3360 * the correct values.
3361 */
3362 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3363 if (!i) {
3364 host->push_data = dw_mci_push_data16;
3365 host->pull_data = dw_mci_pull_data16;
3366 width = 16;
3367 host->data_shift = 1;
3368 } else if (i == 2) {
3369 host->push_data = dw_mci_push_data64;
3370 host->pull_data = dw_mci_pull_data64;
3371 width = 64;
3372 host->data_shift = 3;
3373 } else {
3374 /* Check for a reserved value, and warn if it is */
3375 WARN((i != 1),
3376 "HCON reports a reserved host data width!\n"
3377 "Defaulting to 32-bit access.\n");
3378 host->push_data = dw_mci_push_data32;
3379 host->pull_data = dw_mci_pull_data32;
3380 width = 32;
3381 host->data_shift = 2;
3382 }
3383
3384 /* Reset all blocks */
3385 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3386 ret = -ENODEV;
3387 goto err_clk_ciu;
3388 }
3389
3390 host->dma_ops = host->pdata->dma_ops;
3391 dw_mci_init_dma(host);
3392
3393 /* Clear the interrupts for the host controller */
3394 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3395 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3396
3397 /* Put in max timeout */
3398 mci_writel(host, TMOUT, 0xFFFFFFFF);
3399
3400 /*
3401 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3402 * Tx Mark = fifo_size / 2 DMA Size = 8
3403 */
3404 if (!host->pdata->fifo_depth) {
3405 /*
3406 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3407 * have been overwritten by the bootloader, just like we're
3408 * about to do, so if you know the value for your hardware, you
3409 * should put it in the platform data.
3410 */
3411 fifo_size = mci_readl(host, FIFOTH);
3412 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3413 } else {
3414 fifo_size = host->pdata->fifo_depth;
3415 }
3416 host->fifo_depth = fifo_size;
3417 host->fifoth_val =
3418 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3419 mci_writel(host, FIFOTH, host->fifoth_val);
3420
3421 /* disable clock to CIU */
3422 mci_writel(host, CLKENA, 0);
3423 mci_writel(host, CLKSRC, 0);
3424
3425 /*
3426 * In 2.40a spec, Data offset is changed.
3427 * Need to check the version-id and set data-offset for DATA register.
3428 */
3429 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3430 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3431
3432 if (host->data_addr_override)
3433 host->fifo_reg = host->regs + host->data_addr_override;
3434 else if (host->verid < DW_MMC_240A)
3435 host->fifo_reg = host->regs + DATA_OFFSET;
3436 else
3437 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3438
3439 tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
3440 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3441 host->irq_flags, "dw-mci", host);
3442 if (ret)
3443 goto err_dmaunmap;
3444
3445 /*
3446 * Enable interrupts for command done, data over, data empty,
3447 * receive ready and error such as transmit, receive timeout, crc error
3448 */
3449 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3450 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3451 DW_MCI_ERROR_FLAGS);
3452 /* Enable mci interrupt */
3453 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3454
3455 dev_info(host->dev,
3456 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3457 host->irq, width, fifo_size);
3458
3459 /* We need at least one slot to succeed */
3460 ret = dw_mci_init_slot(host);
3461 if (ret) {
3462 dev_dbg(host->dev, "slot %d init failed\n", i);
3463 goto err_dmaunmap;
3464 }
3465
3466 /* Now that slots are all setup, we can enable card detect */
3467 dw_mci_enable_cd(host);
3468
3469 return 0;
3470
3471err_dmaunmap:
3472 if (host->use_dma && host->dma_ops->exit)
3473 host->dma_ops->exit(host);
3474
3475 reset_control_assert(host->pdata->rstc);
3476
3477err_clk_ciu:
3478 clk_disable_unprepare(host->ciu_clk);
3479
3480err_clk_biu:
3481 clk_disable_unprepare(host->biu_clk);
3482
3483 return ret;
3484}
3485EXPORT_SYMBOL(dw_mci_probe);
3486
3487void dw_mci_remove(struct dw_mci *host)
3488{
3489 dev_dbg(host->dev, "remove slot\n");
3490 if (host->slot)
3491 dw_mci_cleanup_slot(host->slot);
3492
3493 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3494 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3495
3496 /* disable clock to CIU */
3497 mci_writel(host, CLKENA, 0);
3498 mci_writel(host, CLKSRC, 0);
3499
3500 if (host->use_dma && host->dma_ops->exit)
3501 host->dma_ops->exit(host);
3502
3503 reset_control_assert(host->pdata->rstc);
3504
3505 clk_disable_unprepare(host->ciu_clk);
3506 clk_disable_unprepare(host->biu_clk);
3507}
3508EXPORT_SYMBOL(dw_mci_remove);
3509
3510
3511
3512#ifdef CONFIG_PM
3513int dw_mci_runtime_suspend(struct device *dev)
3514{
3515 struct dw_mci *host = dev_get_drvdata(dev);
3516
3517 if (host->use_dma && host->dma_ops->exit)
3518 host->dma_ops->exit(host);
3519
3520 clk_disable_unprepare(host->ciu_clk);
3521
3522 if (host->slot &&
3523 (mmc_can_gpio_cd(host->slot->mmc) ||
3524 !mmc_card_is_removable(host->slot->mmc)))
3525 clk_disable_unprepare(host->biu_clk);
3526
3527 return 0;
3528}
3529EXPORT_SYMBOL(dw_mci_runtime_suspend);
3530
3531int dw_mci_runtime_resume(struct device *dev)
3532{
3533 int ret = 0;
3534 struct dw_mci *host = dev_get_drvdata(dev);
3535
3536 if (host->slot &&
3537 (mmc_can_gpio_cd(host->slot->mmc) ||
3538 !mmc_card_is_removable(host->slot->mmc))) {
3539 ret = clk_prepare_enable(host->biu_clk);
3540 if (ret)
3541 return ret;
3542 }
3543
3544 ret = clk_prepare_enable(host->ciu_clk);
3545 if (ret)
3546 goto err;
3547
3548 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3549 clk_disable_unprepare(host->ciu_clk);
3550 ret = -ENODEV;
3551 goto err;
3552 }
3553
3554 if (host->use_dma && host->dma_ops->init)
3555 host->dma_ops->init(host);
3556
3557 /*
3558 * Restore the initial value at FIFOTH register
3559 * And Invalidate the prev_blksz with zero
3560 */
3561 mci_writel(host, FIFOTH, host->fifoth_val);
3562 host->prev_blksz = 0;
3563
3564 /* Put in max timeout */
3565 mci_writel(host, TMOUT, 0xFFFFFFFF);
3566
3567 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3568 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3569 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3570 DW_MCI_ERROR_FLAGS);
3571 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3572
3573
3574 if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3575 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3576
3577 /* Force setup bus to guarantee available clock output */
3578 dw_mci_setup_bus(host->slot, true);
3579
3580 /* Re-enable SDIO interrupts. */
3581 if (sdio_irq_claimed(host->slot->mmc))
3582 __dw_mci_enable_sdio_irq(host->slot, 1);
3583
3584 /* Now that slots are all setup, we can enable card detect */
3585 dw_mci_enable_cd(host);
3586
3587 return 0;
3588
3589err:
3590 if (host->slot &&
3591 (mmc_can_gpio_cd(host->slot->mmc) ||
3592 !mmc_card_is_removable(host->slot->mmc)))
3593 clk_disable_unprepare(host->biu_clk);
3594
3595 return ret;
3596}
3597EXPORT_SYMBOL(dw_mci_runtime_resume);
3598#endif /* CONFIG_PM */
3599
3600static int __init dw_mci_init(void)
3601{
3602 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3603 return 0;
3604}
3605
3606static void __exit dw_mci_exit(void)
3607{
3608}
3609
3610module_init(dw_mci_init);
3611module_exit(dw_mci_exit);
3612
3613MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3614MODULE_AUTHOR("NXP Semiconductor VietNam");
3615MODULE_AUTHOR("Imagination Technologies Ltd");
3616MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
5 *
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 */
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/iopoll.h>
19#include <linux/ioport.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/stat.h>
26#include <linux/delay.h>
27#include <linux/irq.h>
28#include <linux/mmc/card.h>
29#include <linux/mmc/host.h>
30#include <linux/mmc/mmc.h>
31#include <linux/mmc/sd.h>
32#include <linux/mmc/sdio.h>
33#include <linux/bitops.h>
34#include <linux/regulator/consumer.h>
35#include <linux/of.h>
36#include <linux/of_gpio.h>
37#include <linux/mmc/slot-gpio.h>
38
39#include "dw_mmc.h"
40
41/* Common flag combinations */
42#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
43 SDMMC_INT_HTO | SDMMC_INT_SBE | \
44 SDMMC_INT_EBE | SDMMC_INT_HLE)
45#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
47#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS)
49#define DW_MCI_SEND_STATUS 1
50#define DW_MCI_RECV_STATUS 2
51#define DW_MCI_DMA_THRESHOLD 16
52
53#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
55
56#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
57 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
58 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
59 SDMMC_IDMAC_INT_TI)
60
61#define DESC_RING_BUF_SZ PAGE_SIZE
62
63struct idmac_desc_64addr {
64 u32 des0; /* Control Descriptor */
65#define IDMAC_OWN_CLR64(x) \
66 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
67
68 u32 des1; /* Reserved */
69
70 u32 des2; /*Buffer sizes */
71#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
72 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
73 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
74
75 u32 des3; /* Reserved */
76
77 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
78 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
79
80 u32 des6; /* Lower 32-bits of Next Descriptor Address */
81 u32 des7; /* Upper 32-bits of Next Descriptor Address */
82};
83
84struct idmac_desc {
85 __le32 des0; /* Control Descriptor */
86#define IDMAC_DES0_DIC BIT(1)
87#define IDMAC_DES0_LD BIT(2)
88#define IDMAC_DES0_FD BIT(3)
89#define IDMAC_DES0_CH BIT(4)
90#define IDMAC_DES0_ER BIT(5)
91#define IDMAC_DES0_CES BIT(30)
92#define IDMAC_DES0_OWN BIT(31)
93
94 __le32 des1; /* Buffer sizes */
95#define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
97
98 __le32 des2; /* buffer 1 physical address */
99
100 __le32 des3; /* buffer 2 physical address */
101};
102
103/* Each descriptor can transfer up to 4KB of data in chained mode */
104#define DW_MCI_DESC_DATA_LENGTH 0x1000
105
106#if defined(CONFIG_DEBUG_FS)
107static int dw_mci_req_show(struct seq_file *s, void *v)
108{
109 struct dw_mci_slot *slot = s->private;
110 struct mmc_request *mrq;
111 struct mmc_command *cmd;
112 struct mmc_command *stop;
113 struct mmc_data *data;
114
115 /* Make sure we get a consistent snapshot */
116 spin_lock_bh(&slot->host->lock);
117 mrq = slot->mrq;
118
119 if (mrq) {
120 cmd = mrq->cmd;
121 data = mrq->data;
122 stop = mrq->stop;
123
124 if (cmd)
125 seq_printf(s,
126 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
127 cmd->opcode, cmd->arg, cmd->flags,
128 cmd->resp[0], cmd->resp[1], cmd->resp[2],
129 cmd->resp[2], cmd->error);
130 if (data)
131 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
132 data->bytes_xfered, data->blocks,
133 data->blksz, data->flags, data->error);
134 if (stop)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 stop->opcode, stop->arg, stop->flags,
138 stop->resp[0], stop->resp[1], stop->resp[2],
139 stop->resp[2], stop->error);
140 }
141
142 spin_unlock_bh(&slot->host->lock);
143
144 return 0;
145}
146DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
147
148static int dw_mci_regs_show(struct seq_file *s, void *v)
149{
150 struct dw_mci *host = s->private;
151
152 pm_runtime_get_sync(host->dev);
153
154 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
155 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
156 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
157 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
158 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
159 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
160
161 pm_runtime_put_autosuspend(host->dev);
162
163 return 0;
164}
165DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
166
167static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
168{
169 struct mmc_host *mmc = slot->mmc;
170 struct dw_mci *host = slot->host;
171 struct dentry *root;
172
173 root = mmc->debugfs_root;
174 if (!root)
175 return;
176
177 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
178 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
179 debugfs_create_u32("state", S_IRUSR, root, &host->state);
180 debugfs_create_xul("pending_events", S_IRUSR, root,
181 &host->pending_events);
182 debugfs_create_xul("completed_events", S_IRUSR, root,
183 &host->completed_events);
184}
185#endif /* defined(CONFIG_DEBUG_FS) */
186
187static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
188{
189 u32 ctrl;
190
191 ctrl = mci_readl(host, CTRL);
192 ctrl |= reset;
193 mci_writel(host, CTRL, ctrl);
194
195 /* wait till resets clear */
196 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
197 !(ctrl & reset),
198 1, 500 * USEC_PER_MSEC)) {
199 dev_err(host->dev,
200 "Timeout resetting block (ctrl reset %#x)\n",
201 ctrl & reset);
202 return false;
203 }
204
205 return true;
206}
207
208static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
209{
210 u32 status;
211
212 /*
213 * Databook says that before issuing a new data transfer command
214 * we need to check to see if the card is busy. Data transfer commands
215 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
216 *
217 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
218 * expected.
219 */
220 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
221 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
222 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
223 status,
224 !(status & SDMMC_STATUS_BUSY),
225 10, 500 * USEC_PER_MSEC))
226 dev_err(host->dev, "Busy; trying anyway\n");
227 }
228}
229
230static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
231{
232 struct dw_mci *host = slot->host;
233 unsigned int cmd_status = 0;
234
235 mci_writel(host, CMDARG, arg);
236 wmb(); /* drain writebuffer */
237 dw_mci_wait_while_busy(host, cmd);
238 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
239
240 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
241 !(cmd_status & SDMMC_CMD_START),
242 1, 500 * USEC_PER_MSEC))
243 dev_err(&slot->mmc->class_dev,
244 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
245 cmd, arg, cmd_status);
246}
247
248static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
249{
250 struct dw_mci_slot *slot = mmc_priv(mmc);
251 struct dw_mci *host = slot->host;
252 u32 cmdr;
253
254 cmd->error = -EINPROGRESS;
255 cmdr = cmd->opcode;
256
257 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
258 cmd->opcode == MMC_GO_IDLE_STATE ||
259 cmd->opcode == MMC_GO_INACTIVE_STATE ||
260 (cmd->opcode == SD_IO_RW_DIRECT &&
261 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
262 cmdr |= SDMMC_CMD_STOP;
263 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
264 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
265
266 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
267 u32 clk_en_a;
268
269 /* Special bit makes CMD11 not die */
270 cmdr |= SDMMC_CMD_VOLT_SWITCH;
271
272 /* Change state to continue to handle CMD11 weirdness */
273 WARN_ON(slot->host->state != STATE_SENDING_CMD);
274 slot->host->state = STATE_SENDING_CMD11;
275
276 /*
277 * We need to disable low power mode (automatic clock stop)
278 * while doing voltage switch so we don't confuse the card,
279 * since stopping the clock is a specific part of the UHS
280 * voltage change dance.
281 *
282 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
283 * unconditionally turned back on in dw_mci_setup_bus() if it's
284 * ever called with a non-zero clock. That shouldn't happen
285 * until the voltage change is all done.
286 */
287 clk_en_a = mci_readl(host, CLKENA);
288 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
289 mci_writel(host, CLKENA, clk_en_a);
290 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
291 SDMMC_CMD_PRV_DAT_WAIT, 0);
292 }
293
294 if (cmd->flags & MMC_RSP_PRESENT) {
295 /* We expect a response, so set this bit */
296 cmdr |= SDMMC_CMD_RESP_EXP;
297 if (cmd->flags & MMC_RSP_136)
298 cmdr |= SDMMC_CMD_RESP_LONG;
299 }
300
301 if (cmd->flags & MMC_RSP_CRC)
302 cmdr |= SDMMC_CMD_RESP_CRC;
303
304 if (cmd->data) {
305 cmdr |= SDMMC_CMD_DAT_EXP;
306 if (cmd->data->flags & MMC_DATA_WRITE)
307 cmdr |= SDMMC_CMD_DAT_WR;
308 }
309
310 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
311 cmdr |= SDMMC_CMD_USE_HOLD_REG;
312
313 return cmdr;
314}
315
316static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
317{
318 struct mmc_command *stop;
319 u32 cmdr;
320
321 if (!cmd->data)
322 return 0;
323
324 stop = &host->stop_abort;
325 cmdr = cmd->opcode;
326 memset(stop, 0, sizeof(struct mmc_command));
327
328 if (cmdr == MMC_READ_SINGLE_BLOCK ||
329 cmdr == MMC_READ_MULTIPLE_BLOCK ||
330 cmdr == MMC_WRITE_BLOCK ||
331 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
332 cmdr == MMC_SEND_TUNING_BLOCK ||
333 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
334 stop->opcode = MMC_STOP_TRANSMISSION;
335 stop->arg = 0;
336 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
337 } else if (cmdr == SD_IO_RW_EXTENDED) {
338 stop->opcode = SD_IO_RW_DIRECT;
339 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
340 ((cmd->arg >> 28) & 0x7);
341 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
342 } else {
343 return 0;
344 }
345
346 cmdr = stop->opcode | SDMMC_CMD_STOP |
347 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
348
349 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
350 cmdr |= SDMMC_CMD_USE_HOLD_REG;
351
352 return cmdr;
353}
354
355static inline void dw_mci_set_cto(struct dw_mci *host)
356{
357 unsigned int cto_clks;
358 unsigned int cto_div;
359 unsigned int cto_ms;
360 unsigned long irqflags;
361
362 cto_clks = mci_readl(host, TMOUT) & 0xff;
363 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
364 if (cto_div == 0)
365 cto_div = 1;
366
367 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
368 host->bus_hz);
369
370 /* add a bit spare time */
371 cto_ms += 10;
372
373 /*
374 * The durations we're working with are fairly short so we have to be
375 * extra careful about synchronization here. Specifically in hardware a
376 * command timeout is _at most_ 5.1 ms, so that means we expect an
377 * interrupt (either command done or timeout) to come rather quickly
378 * after the mci_writel. ...but just in case we have a long interrupt
379 * latency let's add a bit of paranoia.
380 *
381 * In general we'll assume that at least an interrupt will be asserted
382 * in hardware by the time the cto_timer runs. ...and if it hasn't
383 * been asserted in hardware by that time then we'll assume it'll never
384 * come.
385 */
386 spin_lock_irqsave(&host->irq_lock, irqflags);
387 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
388 mod_timer(&host->cto_timer,
389 jiffies + msecs_to_jiffies(cto_ms) + 1);
390 spin_unlock_irqrestore(&host->irq_lock, irqflags);
391}
392
393static void dw_mci_start_command(struct dw_mci *host,
394 struct mmc_command *cmd, u32 cmd_flags)
395{
396 host->cmd = cmd;
397 dev_vdbg(host->dev,
398 "start command: ARGR=0x%08x CMDR=0x%08x\n",
399 cmd->arg, cmd_flags);
400
401 mci_writel(host, CMDARG, cmd->arg);
402 wmb(); /* drain writebuffer */
403 dw_mci_wait_while_busy(host, cmd_flags);
404
405 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
406
407 /* response expected command only */
408 if (cmd_flags & SDMMC_CMD_RESP_EXP)
409 dw_mci_set_cto(host);
410}
411
412static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
413{
414 struct mmc_command *stop = &host->stop_abort;
415
416 dw_mci_start_command(host, stop, host->stop_cmdr);
417}
418
419/* DMA interface functions */
420static void dw_mci_stop_dma(struct dw_mci *host)
421{
422 if (host->using_dma) {
423 host->dma_ops->stop(host);
424 host->dma_ops->cleanup(host);
425 }
426
427 /* Data transfer was stopped by the interrupt handler */
428 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
429}
430
431static void dw_mci_dma_cleanup(struct dw_mci *host)
432{
433 struct mmc_data *data = host->data;
434
435 if (data && data->host_cookie == COOKIE_MAPPED) {
436 dma_unmap_sg(host->dev,
437 data->sg,
438 data->sg_len,
439 mmc_get_dma_dir(data));
440 data->host_cookie = COOKIE_UNMAPPED;
441 }
442}
443
444static void dw_mci_idmac_reset(struct dw_mci *host)
445{
446 u32 bmod = mci_readl(host, BMOD);
447 /* Software reset of DMA */
448 bmod |= SDMMC_IDMAC_SWRESET;
449 mci_writel(host, BMOD, bmod);
450}
451
452static void dw_mci_idmac_stop_dma(struct dw_mci *host)
453{
454 u32 temp;
455
456 /* Disable and reset the IDMAC interface */
457 temp = mci_readl(host, CTRL);
458 temp &= ~SDMMC_CTRL_USE_IDMAC;
459 temp |= SDMMC_CTRL_DMA_RESET;
460 mci_writel(host, CTRL, temp);
461
462 /* Stop the IDMAC running */
463 temp = mci_readl(host, BMOD);
464 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
465 temp |= SDMMC_IDMAC_SWRESET;
466 mci_writel(host, BMOD, temp);
467}
468
469static void dw_mci_dmac_complete_dma(void *arg)
470{
471 struct dw_mci *host = arg;
472 struct mmc_data *data = host->data;
473
474 dev_vdbg(host->dev, "DMA complete\n");
475
476 if ((host->use_dma == TRANS_MODE_EDMAC) &&
477 data && (data->flags & MMC_DATA_READ))
478 /* Invalidate cache after read */
479 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
480 data->sg,
481 data->sg_len,
482 DMA_FROM_DEVICE);
483
484 host->dma_ops->cleanup(host);
485
486 /*
487 * If the card was removed, data will be NULL. No point in trying to
488 * send the stop command or waiting for NBUSY in this case.
489 */
490 if (data) {
491 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
492 tasklet_schedule(&host->tasklet);
493 }
494}
495
496static int dw_mci_idmac_init(struct dw_mci *host)
497{
498 int i;
499
500 if (host->dma_64bit_address == 1) {
501 struct idmac_desc_64addr *p;
502 /* Number of descriptors in the ring buffer */
503 host->ring_size =
504 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
505
506 /* Forward link the descriptor list */
507 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
508 i++, p++) {
509 p->des6 = (host->sg_dma +
510 (sizeof(struct idmac_desc_64addr) *
511 (i + 1))) & 0xffffffff;
512
513 p->des7 = (u64)(host->sg_dma +
514 (sizeof(struct idmac_desc_64addr) *
515 (i + 1))) >> 32;
516 /* Initialize reserved and buffer size fields to "0" */
517 p->des0 = 0;
518 p->des1 = 0;
519 p->des2 = 0;
520 p->des3 = 0;
521 }
522
523 /* Set the last descriptor as the end-of-ring descriptor */
524 p->des6 = host->sg_dma & 0xffffffff;
525 p->des7 = (u64)host->sg_dma >> 32;
526 p->des0 = IDMAC_DES0_ER;
527
528 } else {
529 struct idmac_desc *p;
530 /* Number of descriptors in the ring buffer */
531 host->ring_size =
532 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
533
534 /* Forward link the descriptor list */
535 for (i = 0, p = host->sg_cpu;
536 i < host->ring_size - 1;
537 i++, p++) {
538 p->des3 = cpu_to_le32(host->sg_dma +
539 (sizeof(struct idmac_desc) * (i + 1)));
540 p->des0 = 0;
541 p->des1 = 0;
542 }
543
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = cpu_to_le32(host->sg_dma);
546 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
547 }
548
549 dw_mci_idmac_reset(host);
550
551 if (host->dma_64bit_address == 1) {
552 /* Mask out interrupts - get Tx & Rx complete only */
553 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
554 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
555 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
556
557 /* Set the descriptor base address */
558 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
559 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
560
561 } else {
562 /* Mask out interrupts - get Tx & Rx complete only */
563 mci_writel(host, IDSTS, IDMAC_INT_CLR);
564 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
565 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
566
567 /* Set the descriptor base address */
568 mci_writel(host, DBADDR, host->sg_dma);
569 }
570
571 return 0;
572}
573
574static inline int dw_mci_prepare_desc64(struct dw_mci *host,
575 struct mmc_data *data,
576 unsigned int sg_len)
577{
578 unsigned int desc_len;
579 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
580 u32 val;
581 int i;
582
583 desc_first = desc_last = desc = host->sg_cpu;
584
585 for (i = 0; i < sg_len; i++) {
586 unsigned int length = sg_dma_len(&data->sg[i]);
587
588 u64 mem_addr = sg_dma_address(&data->sg[i]);
589
590 for ( ; length ; desc++) {
591 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
592 length : DW_MCI_DESC_DATA_LENGTH;
593
594 length -= desc_len;
595
596 /*
597 * Wait for the former clear OWN bit operation
598 * of IDMAC to make sure that this descriptor
599 * isn't still owned by IDMAC as IDMAC's write
600 * ops and CPU's read ops are asynchronous.
601 */
602 if (readl_poll_timeout_atomic(&desc->des0, val,
603 !(val & IDMAC_DES0_OWN),
604 10, 100 * USEC_PER_MSEC))
605 goto err_own_bit;
606
607 /*
608 * Set the OWN bit and disable interrupts
609 * for this descriptor
610 */
611 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
612 IDMAC_DES0_CH;
613
614 /* Buffer length */
615 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
616
617 /* Physical address to DMA to/from */
618 desc->des4 = mem_addr & 0xffffffff;
619 desc->des5 = mem_addr >> 32;
620
621 /* Update physical address for the next desc */
622 mem_addr += desc_len;
623
624 /* Save pointer to the last descriptor */
625 desc_last = desc;
626 }
627 }
628
629 /* Set first descriptor */
630 desc_first->des0 |= IDMAC_DES0_FD;
631
632 /* Set last descriptor */
633 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
634 desc_last->des0 |= IDMAC_DES0_LD;
635
636 return 0;
637err_own_bit:
638 /* restore the descriptor chain as it's polluted */
639 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
640 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
641 dw_mci_idmac_init(host);
642 return -EINVAL;
643}
644
645
646static inline int dw_mci_prepare_desc32(struct dw_mci *host,
647 struct mmc_data *data,
648 unsigned int sg_len)
649{
650 unsigned int desc_len;
651 struct idmac_desc *desc_first, *desc_last, *desc;
652 u32 val;
653 int i;
654
655 desc_first = desc_last = desc = host->sg_cpu;
656
657 for (i = 0; i < sg_len; i++) {
658 unsigned int length = sg_dma_len(&data->sg[i]);
659
660 u32 mem_addr = sg_dma_address(&data->sg[i]);
661
662 for ( ; length ; desc++) {
663 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
664 length : DW_MCI_DESC_DATA_LENGTH;
665
666 length -= desc_len;
667
668 /*
669 * Wait for the former clear OWN bit operation
670 * of IDMAC to make sure that this descriptor
671 * isn't still owned by IDMAC as IDMAC's write
672 * ops and CPU's read ops are asynchronous.
673 */
674 if (readl_poll_timeout_atomic(&desc->des0, val,
675 IDMAC_OWN_CLR64(val),
676 10,
677 100 * USEC_PER_MSEC))
678 goto err_own_bit;
679
680 /*
681 * Set the OWN bit and disable interrupts
682 * for this descriptor
683 */
684 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
685 IDMAC_DES0_DIC |
686 IDMAC_DES0_CH);
687
688 /* Buffer length */
689 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
690
691 /* Physical address to DMA to/from */
692 desc->des2 = cpu_to_le32(mem_addr);
693
694 /* Update physical address for the next desc */
695 mem_addr += desc_len;
696
697 /* Save pointer to the last descriptor */
698 desc_last = desc;
699 }
700 }
701
702 /* Set first descriptor */
703 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
704
705 /* Set last descriptor */
706 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
707 IDMAC_DES0_DIC));
708 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
709
710 return 0;
711err_own_bit:
712 /* restore the descriptor chain as it's polluted */
713 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
714 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
715 dw_mci_idmac_init(host);
716 return -EINVAL;
717}
718
719static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
720{
721 u32 temp;
722 int ret;
723
724 if (host->dma_64bit_address == 1)
725 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
726 else
727 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
728
729 if (ret)
730 goto out;
731
732 /* drain writebuffer */
733 wmb();
734
735 /* Make sure to reset DMA in case we did PIO before this */
736 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
737 dw_mci_idmac_reset(host);
738
739 /* Select IDMAC interface */
740 temp = mci_readl(host, CTRL);
741 temp |= SDMMC_CTRL_USE_IDMAC;
742 mci_writel(host, CTRL, temp);
743
744 /* drain writebuffer */
745 wmb();
746
747 /* Enable the IDMAC */
748 temp = mci_readl(host, BMOD);
749 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
750 mci_writel(host, BMOD, temp);
751
752 /* Start it running */
753 mci_writel(host, PLDMND, 1);
754
755out:
756 return ret;
757}
758
759static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
760 .init = dw_mci_idmac_init,
761 .start = dw_mci_idmac_start_dma,
762 .stop = dw_mci_idmac_stop_dma,
763 .complete = dw_mci_dmac_complete_dma,
764 .cleanup = dw_mci_dma_cleanup,
765};
766
767static void dw_mci_edmac_stop_dma(struct dw_mci *host)
768{
769 dmaengine_terminate_async(host->dms->ch);
770}
771
772static int dw_mci_edmac_start_dma(struct dw_mci *host,
773 unsigned int sg_len)
774{
775 struct dma_slave_config cfg;
776 struct dma_async_tx_descriptor *desc = NULL;
777 struct scatterlist *sgl = host->data->sg;
778 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
779 u32 sg_elems = host->data->sg_len;
780 u32 fifoth_val;
781 u32 fifo_offset = host->fifo_reg - host->regs;
782 int ret = 0;
783
784 /* Set external dma config: burst size, burst width */
785 cfg.dst_addr = host->phy_regs + fifo_offset;
786 cfg.src_addr = cfg.dst_addr;
787 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
788 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
789
790 /* Match burst msize with external dma config */
791 fifoth_val = mci_readl(host, FIFOTH);
792 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
793 cfg.src_maxburst = cfg.dst_maxburst;
794
795 if (host->data->flags & MMC_DATA_WRITE)
796 cfg.direction = DMA_MEM_TO_DEV;
797 else
798 cfg.direction = DMA_DEV_TO_MEM;
799
800 ret = dmaengine_slave_config(host->dms->ch, &cfg);
801 if (ret) {
802 dev_err(host->dev, "Failed to config edmac.\n");
803 return -EBUSY;
804 }
805
806 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
807 sg_len, cfg.direction,
808 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
809 if (!desc) {
810 dev_err(host->dev, "Can't prepare slave sg.\n");
811 return -EBUSY;
812 }
813
814 /* Set dw_mci_dmac_complete_dma as callback */
815 desc->callback = dw_mci_dmac_complete_dma;
816 desc->callback_param = (void *)host;
817 dmaengine_submit(desc);
818
819 /* Flush cache before write */
820 if (host->data->flags & MMC_DATA_WRITE)
821 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
822 sg_elems, DMA_TO_DEVICE);
823
824 dma_async_issue_pending(host->dms->ch);
825
826 return 0;
827}
828
829static int dw_mci_edmac_init(struct dw_mci *host)
830{
831 /* Request external dma channel */
832 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
833 if (!host->dms)
834 return -ENOMEM;
835
836 host->dms->ch = dma_request_chan(host->dev, "rx-tx");
837 if (IS_ERR(host->dms->ch)) {
838 int ret = PTR_ERR(host->dms->ch);
839
840 dev_err(host->dev, "Failed to get external DMA channel.\n");
841 kfree(host->dms);
842 host->dms = NULL;
843 return ret;
844 }
845
846 return 0;
847}
848
849static void dw_mci_edmac_exit(struct dw_mci *host)
850{
851 if (host->dms) {
852 if (host->dms->ch) {
853 dma_release_channel(host->dms->ch);
854 host->dms->ch = NULL;
855 }
856 kfree(host->dms);
857 host->dms = NULL;
858 }
859}
860
861static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
862 .init = dw_mci_edmac_init,
863 .exit = dw_mci_edmac_exit,
864 .start = dw_mci_edmac_start_dma,
865 .stop = dw_mci_edmac_stop_dma,
866 .complete = dw_mci_dmac_complete_dma,
867 .cleanup = dw_mci_dma_cleanup,
868};
869
870static int dw_mci_pre_dma_transfer(struct dw_mci *host,
871 struct mmc_data *data,
872 int cookie)
873{
874 struct scatterlist *sg;
875 unsigned int i, sg_len;
876
877 if (data->host_cookie == COOKIE_PRE_MAPPED)
878 return data->sg_len;
879
880 /*
881 * We don't do DMA on "complex" transfers, i.e. with
882 * non-word-aligned buffers or lengths. Also, we don't bother
883 * with all the DMA setup overhead for short transfers.
884 */
885 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
886 return -EINVAL;
887
888 if (data->blksz & 3)
889 return -EINVAL;
890
891 for_each_sg(data->sg, sg, data->sg_len, i) {
892 if (sg->offset & 3 || sg->length & 3)
893 return -EINVAL;
894 }
895
896 sg_len = dma_map_sg(host->dev,
897 data->sg,
898 data->sg_len,
899 mmc_get_dma_dir(data));
900 if (sg_len == 0)
901 return -EINVAL;
902
903 data->host_cookie = cookie;
904
905 return sg_len;
906}
907
908static void dw_mci_pre_req(struct mmc_host *mmc,
909 struct mmc_request *mrq)
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct mmc_data *data = mrq->data;
913
914 if (!slot->host->use_dma || !data)
915 return;
916
917 /* This data might be unmapped at this time */
918 data->host_cookie = COOKIE_UNMAPPED;
919
920 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
921 COOKIE_PRE_MAPPED) < 0)
922 data->host_cookie = COOKIE_UNMAPPED;
923}
924
925static void dw_mci_post_req(struct mmc_host *mmc,
926 struct mmc_request *mrq,
927 int err)
928{
929 struct dw_mci_slot *slot = mmc_priv(mmc);
930 struct mmc_data *data = mrq->data;
931
932 if (!slot->host->use_dma || !data)
933 return;
934
935 if (data->host_cookie != COOKIE_UNMAPPED)
936 dma_unmap_sg(slot->host->dev,
937 data->sg,
938 data->sg_len,
939 mmc_get_dma_dir(data));
940 data->host_cookie = COOKIE_UNMAPPED;
941}
942
943static int dw_mci_get_cd(struct mmc_host *mmc)
944{
945 int present;
946 struct dw_mci_slot *slot = mmc_priv(mmc);
947 struct dw_mci *host = slot->host;
948 int gpio_cd = mmc_gpio_get_cd(mmc);
949
950 /* Use platform get_cd function, else try onboard card detect */
951 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
952 || !mmc_card_is_removable(mmc))) {
953 present = 1;
954
955 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
956 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
957 dev_info(&mmc->class_dev,
958 "card is polling.\n");
959 } else {
960 dev_info(&mmc->class_dev,
961 "card is non-removable.\n");
962 }
963 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
964 }
965
966 return present;
967 } else if (gpio_cd >= 0)
968 present = gpio_cd;
969 else
970 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
971 == 0 ? 1 : 0;
972
973 spin_lock_bh(&host->lock);
974 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
975 dev_dbg(&mmc->class_dev, "card is present\n");
976 else if (!present &&
977 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
978 dev_dbg(&mmc->class_dev, "card is not present\n");
979 spin_unlock_bh(&host->lock);
980
981 return present;
982}
983
984static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
985{
986 unsigned int blksz = data->blksz;
987 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
988 u32 fifo_width = 1 << host->data_shift;
989 u32 blksz_depth = blksz / fifo_width, fifoth_val;
990 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
991 int idx = ARRAY_SIZE(mszs) - 1;
992
993 /* pio should ship this scenario */
994 if (!host->use_dma)
995 return;
996
997 tx_wmark = (host->fifo_depth) / 2;
998 tx_wmark_invers = host->fifo_depth - tx_wmark;
999
1000 /*
1001 * MSIZE is '1',
1002 * if blksz is not a multiple of the FIFO width
1003 */
1004 if (blksz % fifo_width)
1005 goto done;
1006
1007 do {
1008 if (!((blksz_depth % mszs[idx]) ||
1009 (tx_wmark_invers % mszs[idx]))) {
1010 msize = idx;
1011 rx_wmark = mszs[idx] - 1;
1012 break;
1013 }
1014 } while (--idx > 0);
1015 /*
1016 * If idx is '0', it won't be tried
1017 * Thus, initial values are uesed
1018 */
1019done:
1020 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1021 mci_writel(host, FIFOTH, fifoth_val);
1022}
1023
1024static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1025{
1026 unsigned int blksz = data->blksz;
1027 u32 blksz_depth, fifo_depth;
1028 u16 thld_size;
1029 u8 enable;
1030
1031 /*
1032 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1033 * in the FIFO region, so we really shouldn't access it).
1034 */
1035 if (host->verid < DW_MMC_240A ||
1036 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1037 return;
1038
1039 /*
1040 * Card write Threshold is introduced since 2.80a
1041 * It's used when HS400 mode is enabled.
1042 */
1043 if (data->flags & MMC_DATA_WRITE &&
1044 host->timing != MMC_TIMING_MMC_HS400)
1045 goto disable;
1046
1047 if (data->flags & MMC_DATA_WRITE)
1048 enable = SDMMC_CARD_WR_THR_EN;
1049 else
1050 enable = SDMMC_CARD_RD_THR_EN;
1051
1052 if (host->timing != MMC_TIMING_MMC_HS200 &&
1053 host->timing != MMC_TIMING_UHS_SDR104 &&
1054 host->timing != MMC_TIMING_MMC_HS400)
1055 goto disable;
1056
1057 blksz_depth = blksz / (1 << host->data_shift);
1058 fifo_depth = host->fifo_depth;
1059
1060 if (blksz_depth > fifo_depth)
1061 goto disable;
1062
1063 /*
1064 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1065 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1066 * Currently just choose blksz.
1067 */
1068 thld_size = blksz;
1069 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1070 return;
1071
1072disable:
1073 mci_writel(host, CDTHRCTL, 0);
1074}
1075
1076static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1077{
1078 unsigned long irqflags;
1079 int sg_len;
1080 u32 temp;
1081
1082 host->using_dma = 0;
1083
1084 /* If we don't have a channel, we can't do DMA */
1085 if (!host->use_dma)
1086 return -ENODEV;
1087
1088 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1089 if (sg_len < 0) {
1090 host->dma_ops->stop(host);
1091 return sg_len;
1092 }
1093
1094 host->using_dma = 1;
1095
1096 if (host->use_dma == TRANS_MODE_IDMAC)
1097 dev_vdbg(host->dev,
1098 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1099 (unsigned long)host->sg_cpu,
1100 (unsigned long)host->sg_dma,
1101 sg_len);
1102
1103 /*
1104 * Decide the MSIZE and RX/TX Watermark.
1105 * If current block size is same with previous size,
1106 * no need to update fifoth.
1107 */
1108 if (host->prev_blksz != data->blksz)
1109 dw_mci_adjust_fifoth(host, data);
1110
1111 /* Enable the DMA interface */
1112 temp = mci_readl(host, CTRL);
1113 temp |= SDMMC_CTRL_DMA_ENABLE;
1114 mci_writel(host, CTRL, temp);
1115
1116 /* Disable RX/TX IRQs, let DMA handle it */
1117 spin_lock_irqsave(&host->irq_lock, irqflags);
1118 temp = mci_readl(host, INTMASK);
1119 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1120 mci_writel(host, INTMASK, temp);
1121 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1122
1123 if (host->dma_ops->start(host, sg_len)) {
1124 host->dma_ops->stop(host);
1125 /* We can't do DMA, try PIO for this one */
1126 dev_dbg(host->dev,
1127 "%s: fall back to PIO mode for current transfer\n",
1128 __func__);
1129 return -ENODEV;
1130 }
1131
1132 return 0;
1133}
1134
1135static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1136{
1137 unsigned long irqflags;
1138 int flags = SG_MITER_ATOMIC;
1139 u32 temp;
1140
1141 data->error = -EINPROGRESS;
1142
1143 WARN_ON(host->data);
1144 host->sg = NULL;
1145 host->data = data;
1146
1147 if (data->flags & MMC_DATA_READ)
1148 host->dir_status = DW_MCI_RECV_STATUS;
1149 else
1150 host->dir_status = DW_MCI_SEND_STATUS;
1151
1152 dw_mci_ctrl_thld(host, data);
1153
1154 if (dw_mci_submit_data_dma(host, data)) {
1155 if (host->data->flags & MMC_DATA_READ)
1156 flags |= SG_MITER_TO_SG;
1157 else
1158 flags |= SG_MITER_FROM_SG;
1159
1160 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1161 host->sg = data->sg;
1162 host->part_buf_start = 0;
1163 host->part_buf_count = 0;
1164
1165 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1166
1167 spin_lock_irqsave(&host->irq_lock, irqflags);
1168 temp = mci_readl(host, INTMASK);
1169 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1170 mci_writel(host, INTMASK, temp);
1171 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1172
1173 temp = mci_readl(host, CTRL);
1174 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1175 mci_writel(host, CTRL, temp);
1176
1177 /*
1178 * Use the initial fifoth_val for PIO mode. If wm_algined
1179 * is set, we set watermark same as data size.
1180 * If next issued data may be transfered by DMA mode,
1181 * prev_blksz should be invalidated.
1182 */
1183 if (host->wm_aligned)
1184 dw_mci_adjust_fifoth(host, data);
1185 else
1186 mci_writel(host, FIFOTH, host->fifoth_val);
1187 host->prev_blksz = 0;
1188 } else {
1189 /*
1190 * Keep the current block size.
1191 * It will be used to decide whether to update
1192 * fifoth register next time.
1193 */
1194 host->prev_blksz = data->blksz;
1195 }
1196}
1197
1198static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1199{
1200 struct dw_mci *host = slot->host;
1201 unsigned int clock = slot->clock;
1202 u32 div;
1203 u32 clk_en_a;
1204 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1205
1206 /* We must continue to set bit 28 in CMD until the change is complete */
1207 if (host->state == STATE_WAITING_CMD11_DONE)
1208 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1209
1210 slot->mmc->actual_clock = 0;
1211
1212 if (!clock) {
1213 mci_writel(host, CLKENA, 0);
1214 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1215 } else if (clock != host->current_speed || force_clkinit) {
1216 div = host->bus_hz / clock;
1217 if (host->bus_hz % clock && host->bus_hz > clock)
1218 /*
1219 * move the + 1 after the divide to prevent
1220 * over-clocking the card.
1221 */
1222 div += 1;
1223
1224 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1225
1226 if ((clock != slot->__clk_old &&
1227 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1228 force_clkinit) {
1229 /* Silent the verbose log if calling from PM context */
1230 if (!force_clkinit)
1231 dev_info(&slot->mmc->class_dev,
1232 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1233 slot->id, host->bus_hz, clock,
1234 div ? ((host->bus_hz / div) >> 1) :
1235 host->bus_hz, div);
1236
1237 /*
1238 * If card is polling, display the message only
1239 * one time at boot time.
1240 */
1241 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1242 slot->mmc->f_min == clock)
1243 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1244 }
1245
1246 /* disable clock */
1247 mci_writel(host, CLKENA, 0);
1248 mci_writel(host, CLKSRC, 0);
1249
1250 /* inform CIU */
1251 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1252
1253 /* set clock to desired speed */
1254 mci_writel(host, CLKDIV, div);
1255
1256 /* inform CIU */
1257 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1258
1259 /* enable clock; only low power if no SDIO */
1260 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1261 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1262 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1263 mci_writel(host, CLKENA, clk_en_a);
1264
1265 /* inform CIU */
1266 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1267
1268 /* keep the last clock value that was requested from core */
1269 slot->__clk_old = clock;
1270 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1271 host->bus_hz;
1272 }
1273
1274 host->current_speed = clock;
1275
1276 /* Set the current slot bus width */
1277 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1278}
1279
1280static void __dw_mci_start_request(struct dw_mci *host,
1281 struct dw_mci_slot *slot,
1282 struct mmc_command *cmd)
1283{
1284 struct mmc_request *mrq;
1285 struct mmc_data *data;
1286 u32 cmdflags;
1287
1288 mrq = slot->mrq;
1289
1290 host->mrq = mrq;
1291
1292 host->pending_events = 0;
1293 host->completed_events = 0;
1294 host->cmd_status = 0;
1295 host->data_status = 0;
1296 host->dir_status = 0;
1297
1298 data = cmd->data;
1299 if (data) {
1300 mci_writel(host, TMOUT, 0xFFFFFFFF);
1301 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1302 mci_writel(host, BLKSIZ, data->blksz);
1303 }
1304
1305 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1306
1307 /* this is the first command, send the initialization clock */
1308 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1309 cmdflags |= SDMMC_CMD_INIT;
1310
1311 if (data) {
1312 dw_mci_submit_data(host, data);
1313 wmb(); /* drain writebuffer */
1314 }
1315
1316 dw_mci_start_command(host, cmd, cmdflags);
1317
1318 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1319 unsigned long irqflags;
1320
1321 /*
1322 * Databook says to fail after 2ms w/ no response, but evidence
1323 * shows that sometimes the cmd11 interrupt takes over 130ms.
1324 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1325 * is just about to roll over.
1326 *
1327 * We do this whole thing under spinlock and only if the
1328 * command hasn't already completed (indicating the the irq
1329 * already ran so we don't want the timeout).
1330 */
1331 spin_lock_irqsave(&host->irq_lock, irqflags);
1332 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1333 mod_timer(&host->cmd11_timer,
1334 jiffies + msecs_to_jiffies(500) + 1);
1335 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1336 }
1337
1338 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1339}
1340
1341static void dw_mci_start_request(struct dw_mci *host,
1342 struct dw_mci_slot *slot)
1343{
1344 struct mmc_request *mrq = slot->mrq;
1345 struct mmc_command *cmd;
1346
1347 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1348 __dw_mci_start_request(host, slot, cmd);
1349}
1350
1351/* must be called with host->lock held */
1352static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1353 struct mmc_request *mrq)
1354{
1355 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1356 host->state);
1357
1358 slot->mrq = mrq;
1359
1360 if (host->state == STATE_WAITING_CMD11_DONE) {
1361 dev_warn(&slot->mmc->class_dev,
1362 "Voltage change didn't complete\n");
1363 /*
1364 * this case isn't expected to happen, so we can
1365 * either crash here or just try to continue on
1366 * in the closest possible state
1367 */
1368 host->state = STATE_IDLE;
1369 }
1370
1371 if (host->state == STATE_IDLE) {
1372 host->state = STATE_SENDING_CMD;
1373 dw_mci_start_request(host, slot);
1374 } else {
1375 list_add_tail(&slot->queue_node, &host->queue);
1376 }
1377}
1378
1379static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1380{
1381 struct dw_mci_slot *slot = mmc_priv(mmc);
1382 struct dw_mci *host = slot->host;
1383
1384 WARN_ON(slot->mrq);
1385
1386 /*
1387 * The check for card presence and queueing of the request must be
1388 * atomic, otherwise the card could be removed in between and the
1389 * request wouldn't fail until another card was inserted.
1390 */
1391
1392 if (!dw_mci_get_cd(mmc)) {
1393 mrq->cmd->error = -ENOMEDIUM;
1394 mmc_request_done(mmc, mrq);
1395 return;
1396 }
1397
1398 spin_lock_bh(&host->lock);
1399
1400 dw_mci_queue_request(host, slot, mrq);
1401
1402 spin_unlock_bh(&host->lock);
1403}
1404
1405static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1406{
1407 struct dw_mci_slot *slot = mmc_priv(mmc);
1408 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1409 u32 regs;
1410 int ret;
1411
1412 switch (ios->bus_width) {
1413 case MMC_BUS_WIDTH_4:
1414 slot->ctype = SDMMC_CTYPE_4BIT;
1415 break;
1416 case MMC_BUS_WIDTH_8:
1417 slot->ctype = SDMMC_CTYPE_8BIT;
1418 break;
1419 default:
1420 /* set default 1 bit mode */
1421 slot->ctype = SDMMC_CTYPE_1BIT;
1422 }
1423
1424 regs = mci_readl(slot->host, UHS_REG);
1425
1426 /* DDR mode set */
1427 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1428 ios->timing == MMC_TIMING_UHS_DDR50 ||
1429 ios->timing == MMC_TIMING_MMC_HS400)
1430 regs |= ((0x1 << slot->id) << 16);
1431 else
1432 regs &= ~((0x1 << slot->id) << 16);
1433
1434 mci_writel(slot->host, UHS_REG, regs);
1435 slot->host->timing = ios->timing;
1436
1437 /*
1438 * Use mirror of ios->clock to prevent race with mmc
1439 * core ios update when finding the minimum.
1440 */
1441 slot->clock = ios->clock;
1442
1443 if (drv_data && drv_data->set_ios)
1444 drv_data->set_ios(slot->host, ios);
1445
1446 switch (ios->power_mode) {
1447 case MMC_POWER_UP:
1448 if (!IS_ERR(mmc->supply.vmmc)) {
1449 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1450 ios->vdd);
1451 if (ret) {
1452 dev_err(slot->host->dev,
1453 "failed to enable vmmc regulator\n");
1454 /*return, if failed turn on vmmc*/
1455 return;
1456 }
1457 }
1458 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1459 regs = mci_readl(slot->host, PWREN);
1460 regs |= (1 << slot->id);
1461 mci_writel(slot->host, PWREN, regs);
1462 break;
1463 case MMC_POWER_ON:
1464 if (!slot->host->vqmmc_enabled) {
1465 if (!IS_ERR(mmc->supply.vqmmc)) {
1466 ret = regulator_enable(mmc->supply.vqmmc);
1467 if (ret < 0)
1468 dev_err(slot->host->dev,
1469 "failed to enable vqmmc\n");
1470 else
1471 slot->host->vqmmc_enabled = true;
1472
1473 } else {
1474 /* Keep track so we don't reset again */
1475 slot->host->vqmmc_enabled = true;
1476 }
1477
1478 /* Reset our state machine after powering on */
1479 dw_mci_ctrl_reset(slot->host,
1480 SDMMC_CTRL_ALL_RESET_FLAGS);
1481 }
1482
1483 /* Adjust clock / bus width after power is up */
1484 dw_mci_setup_bus(slot, false);
1485
1486 break;
1487 case MMC_POWER_OFF:
1488 /* Turn clock off before power goes down */
1489 dw_mci_setup_bus(slot, false);
1490
1491 if (!IS_ERR(mmc->supply.vmmc))
1492 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1493
1494 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1495 regulator_disable(mmc->supply.vqmmc);
1496 slot->host->vqmmc_enabled = false;
1497
1498 regs = mci_readl(slot->host, PWREN);
1499 regs &= ~(1 << slot->id);
1500 mci_writel(slot->host, PWREN, regs);
1501 break;
1502 default:
1503 break;
1504 }
1505
1506 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1507 slot->host->state = STATE_IDLE;
1508}
1509
1510static int dw_mci_card_busy(struct mmc_host *mmc)
1511{
1512 struct dw_mci_slot *slot = mmc_priv(mmc);
1513 u32 status;
1514
1515 /*
1516 * Check the busy bit which is low when DAT[3:0]
1517 * (the data lines) are 0000
1518 */
1519 status = mci_readl(slot->host, STATUS);
1520
1521 return !!(status & SDMMC_STATUS_BUSY);
1522}
1523
1524static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1525{
1526 struct dw_mci_slot *slot = mmc_priv(mmc);
1527 struct dw_mci *host = slot->host;
1528 const struct dw_mci_drv_data *drv_data = host->drv_data;
1529 u32 uhs;
1530 u32 v18 = SDMMC_UHS_18V << slot->id;
1531 int ret;
1532
1533 if (drv_data && drv_data->switch_voltage)
1534 return drv_data->switch_voltage(mmc, ios);
1535
1536 /*
1537 * Program the voltage. Note that some instances of dw_mmc may use
1538 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1539 * does no harm but you need to set the regulator directly. Try both.
1540 */
1541 uhs = mci_readl(host, UHS_REG);
1542 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1543 uhs &= ~v18;
1544 else
1545 uhs |= v18;
1546
1547 if (!IS_ERR(mmc->supply.vqmmc)) {
1548 ret = mmc_regulator_set_vqmmc(mmc, ios);
1549 if (ret < 0) {
1550 dev_dbg(&mmc->class_dev,
1551 "Regulator set error %d - %s V\n",
1552 ret, uhs & v18 ? "1.8" : "3.3");
1553 return ret;
1554 }
1555 }
1556 mci_writel(host, UHS_REG, uhs);
1557
1558 return 0;
1559}
1560
1561static int dw_mci_get_ro(struct mmc_host *mmc)
1562{
1563 int read_only;
1564 struct dw_mci_slot *slot = mmc_priv(mmc);
1565 int gpio_ro = mmc_gpio_get_ro(mmc);
1566
1567 /* Use platform get_ro function, else try on board write protect */
1568 if (gpio_ro >= 0)
1569 read_only = gpio_ro;
1570 else
1571 read_only =
1572 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1573
1574 dev_dbg(&mmc->class_dev, "card is %s\n",
1575 read_only ? "read-only" : "read-write");
1576
1577 return read_only;
1578}
1579
1580static void dw_mci_hw_reset(struct mmc_host *mmc)
1581{
1582 struct dw_mci_slot *slot = mmc_priv(mmc);
1583 struct dw_mci *host = slot->host;
1584 int reset;
1585
1586 if (host->use_dma == TRANS_MODE_IDMAC)
1587 dw_mci_idmac_reset(host);
1588
1589 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1590 SDMMC_CTRL_FIFO_RESET))
1591 return;
1592
1593 /*
1594 * According to eMMC spec, card reset procedure:
1595 * tRstW >= 1us: RST_n pulse width
1596 * tRSCA >= 200us: RST_n to Command time
1597 * tRSTH >= 1us: RST_n high period
1598 */
1599 reset = mci_readl(host, RST_N);
1600 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1601 mci_writel(host, RST_N, reset);
1602 usleep_range(1, 2);
1603 reset |= SDMMC_RST_HWACTIVE << slot->id;
1604 mci_writel(host, RST_N, reset);
1605 usleep_range(200, 300);
1606}
1607
1608static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1609{
1610 struct dw_mci_slot *slot = mmc_priv(mmc);
1611 struct dw_mci *host = slot->host;
1612
1613 /*
1614 * Low power mode will stop the card clock when idle. According to the
1615 * description of the CLKENA register we should disable low power mode
1616 * for SDIO cards if we need SDIO interrupts to work.
1617 */
1618 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1619 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1620 u32 clk_en_a_old;
1621 u32 clk_en_a;
1622
1623 clk_en_a_old = mci_readl(host, CLKENA);
1624
1625 if (card->type == MMC_TYPE_SDIO ||
1626 card->type == MMC_TYPE_SD_COMBO) {
1627 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1628 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1629 } else {
1630 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1631 clk_en_a = clk_en_a_old | clken_low_pwr;
1632 }
1633
1634 if (clk_en_a != clk_en_a_old) {
1635 mci_writel(host, CLKENA, clk_en_a);
1636 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1637 SDMMC_CMD_PRV_DAT_WAIT, 0);
1638 }
1639 }
1640}
1641
1642static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1643{
1644 struct dw_mci *host = slot->host;
1645 unsigned long irqflags;
1646 u32 int_mask;
1647
1648 spin_lock_irqsave(&host->irq_lock, irqflags);
1649
1650 /* Enable/disable Slot Specific SDIO interrupt */
1651 int_mask = mci_readl(host, INTMASK);
1652 if (enb)
1653 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1654 else
1655 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1656 mci_writel(host, INTMASK, int_mask);
1657
1658 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1659}
1660
1661static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1662{
1663 struct dw_mci_slot *slot = mmc_priv(mmc);
1664 struct dw_mci *host = slot->host;
1665
1666 __dw_mci_enable_sdio_irq(slot, enb);
1667
1668 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1669 if (enb)
1670 pm_runtime_get_noresume(host->dev);
1671 else
1672 pm_runtime_put_noidle(host->dev);
1673}
1674
1675static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1676{
1677 struct dw_mci_slot *slot = mmc_priv(mmc);
1678
1679 __dw_mci_enable_sdio_irq(slot, 1);
1680}
1681
1682static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1683{
1684 struct dw_mci_slot *slot = mmc_priv(mmc);
1685 struct dw_mci *host = slot->host;
1686 const struct dw_mci_drv_data *drv_data = host->drv_data;
1687 int err = -EINVAL;
1688
1689 if (drv_data && drv_data->execute_tuning)
1690 err = drv_data->execute_tuning(slot, opcode);
1691 return err;
1692}
1693
1694static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1695 struct mmc_ios *ios)
1696{
1697 struct dw_mci_slot *slot = mmc_priv(mmc);
1698 struct dw_mci *host = slot->host;
1699 const struct dw_mci_drv_data *drv_data = host->drv_data;
1700
1701 if (drv_data && drv_data->prepare_hs400_tuning)
1702 return drv_data->prepare_hs400_tuning(host, ios);
1703
1704 return 0;
1705}
1706
1707static bool dw_mci_reset(struct dw_mci *host)
1708{
1709 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1710 bool ret = false;
1711 u32 status = 0;
1712
1713 /*
1714 * Resetting generates a block interrupt, hence setting
1715 * the scatter-gather pointer to NULL.
1716 */
1717 if (host->sg) {
1718 sg_miter_stop(&host->sg_miter);
1719 host->sg = NULL;
1720 }
1721
1722 if (host->use_dma)
1723 flags |= SDMMC_CTRL_DMA_RESET;
1724
1725 if (dw_mci_ctrl_reset(host, flags)) {
1726 /*
1727 * In all cases we clear the RAWINTS
1728 * register to clear any interrupts.
1729 */
1730 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1731
1732 if (!host->use_dma) {
1733 ret = true;
1734 goto ciu_out;
1735 }
1736
1737 /* Wait for dma_req to be cleared */
1738 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1739 status,
1740 !(status & SDMMC_STATUS_DMA_REQ),
1741 1, 500 * USEC_PER_MSEC)) {
1742 dev_err(host->dev,
1743 "%s: Timeout waiting for dma_req to be cleared\n",
1744 __func__);
1745 goto ciu_out;
1746 }
1747
1748 /* when using DMA next we reset the fifo again */
1749 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1750 goto ciu_out;
1751 } else {
1752 /* if the controller reset bit did clear, then set clock regs */
1753 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1754 dev_err(host->dev,
1755 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1756 __func__);
1757 goto ciu_out;
1758 }
1759 }
1760
1761 if (host->use_dma == TRANS_MODE_IDMAC)
1762 /* It is also required that we reinit idmac */
1763 dw_mci_idmac_init(host);
1764
1765 ret = true;
1766
1767ciu_out:
1768 /* After a CTRL reset we need to have CIU set clock registers */
1769 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1770
1771 return ret;
1772}
1773
1774static const struct mmc_host_ops dw_mci_ops = {
1775 .request = dw_mci_request,
1776 .pre_req = dw_mci_pre_req,
1777 .post_req = dw_mci_post_req,
1778 .set_ios = dw_mci_set_ios,
1779 .get_ro = dw_mci_get_ro,
1780 .get_cd = dw_mci_get_cd,
1781 .hw_reset = dw_mci_hw_reset,
1782 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1783 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1784 .execute_tuning = dw_mci_execute_tuning,
1785 .card_busy = dw_mci_card_busy,
1786 .start_signal_voltage_switch = dw_mci_switch_voltage,
1787 .init_card = dw_mci_init_card,
1788 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1789};
1790
1791static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1792 __releases(&host->lock)
1793 __acquires(&host->lock)
1794{
1795 struct dw_mci_slot *slot;
1796 struct mmc_host *prev_mmc = host->slot->mmc;
1797
1798 WARN_ON(host->cmd || host->data);
1799
1800 host->slot->mrq = NULL;
1801 host->mrq = NULL;
1802 if (!list_empty(&host->queue)) {
1803 slot = list_entry(host->queue.next,
1804 struct dw_mci_slot, queue_node);
1805 list_del(&slot->queue_node);
1806 dev_vdbg(host->dev, "list not empty: %s is next\n",
1807 mmc_hostname(slot->mmc));
1808 host->state = STATE_SENDING_CMD;
1809 dw_mci_start_request(host, slot);
1810 } else {
1811 dev_vdbg(host->dev, "list empty\n");
1812
1813 if (host->state == STATE_SENDING_CMD11)
1814 host->state = STATE_WAITING_CMD11_DONE;
1815 else
1816 host->state = STATE_IDLE;
1817 }
1818
1819 spin_unlock(&host->lock);
1820 mmc_request_done(prev_mmc, mrq);
1821 spin_lock(&host->lock);
1822}
1823
1824static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1825{
1826 u32 status = host->cmd_status;
1827
1828 host->cmd_status = 0;
1829
1830 /* Read the response from the card (up to 16 bytes) */
1831 if (cmd->flags & MMC_RSP_PRESENT) {
1832 if (cmd->flags & MMC_RSP_136) {
1833 cmd->resp[3] = mci_readl(host, RESP0);
1834 cmd->resp[2] = mci_readl(host, RESP1);
1835 cmd->resp[1] = mci_readl(host, RESP2);
1836 cmd->resp[0] = mci_readl(host, RESP3);
1837 } else {
1838 cmd->resp[0] = mci_readl(host, RESP0);
1839 cmd->resp[1] = 0;
1840 cmd->resp[2] = 0;
1841 cmd->resp[3] = 0;
1842 }
1843 }
1844
1845 if (status & SDMMC_INT_RTO)
1846 cmd->error = -ETIMEDOUT;
1847 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1848 cmd->error = -EILSEQ;
1849 else if (status & SDMMC_INT_RESP_ERR)
1850 cmd->error = -EIO;
1851 else
1852 cmd->error = 0;
1853
1854 return cmd->error;
1855}
1856
1857static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1858{
1859 u32 status = host->data_status;
1860
1861 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1862 if (status & SDMMC_INT_DRTO) {
1863 data->error = -ETIMEDOUT;
1864 } else if (status & SDMMC_INT_DCRC) {
1865 data->error = -EILSEQ;
1866 } else if (status & SDMMC_INT_EBE) {
1867 if (host->dir_status ==
1868 DW_MCI_SEND_STATUS) {
1869 /*
1870 * No data CRC status was returned.
1871 * The number of bytes transferred
1872 * will be exaggerated in PIO mode.
1873 */
1874 data->bytes_xfered = 0;
1875 data->error = -ETIMEDOUT;
1876 } else if (host->dir_status ==
1877 DW_MCI_RECV_STATUS) {
1878 data->error = -EILSEQ;
1879 }
1880 } else {
1881 /* SDMMC_INT_SBE is included */
1882 data->error = -EILSEQ;
1883 }
1884
1885 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1886
1887 /*
1888 * After an error, there may be data lingering
1889 * in the FIFO
1890 */
1891 dw_mci_reset(host);
1892 } else {
1893 data->bytes_xfered = data->blocks * data->blksz;
1894 data->error = 0;
1895 }
1896
1897 return data->error;
1898}
1899
1900static void dw_mci_set_drto(struct dw_mci *host)
1901{
1902 unsigned int drto_clks;
1903 unsigned int drto_div;
1904 unsigned int drto_ms;
1905 unsigned long irqflags;
1906
1907 drto_clks = mci_readl(host, TMOUT) >> 8;
1908 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1909 if (drto_div == 0)
1910 drto_div = 1;
1911
1912 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1913 host->bus_hz);
1914
1915 /* add a bit spare time */
1916 drto_ms += 10;
1917
1918 spin_lock_irqsave(&host->irq_lock, irqflags);
1919 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1920 mod_timer(&host->dto_timer,
1921 jiffies + msecs_to_jiffies(drto_ms));
1922 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1923}
1924
1925static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
1926{
1927 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1928 return false;
1929
1930 /*
1931 * Really be certain that the timer has stopped. This is a bit of
1932 * paranoia and could only really happen if we had really bad
1933 * interrupt latency and the interrupt routine and timeout were
1934 * running concurrently so that the del_timer() in the interrupt
1935 * handler couldn't run.
1936 */
1937 WARN_ON(del_timer_sync(&host->cto_timer));
1938 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1939
1940 return true;
1941}
1942
1943static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
1944{
1945 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1946 return false;
1947
1948 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
1949 WARN_ON(del_timer_sync(&host->dto_timer));
1950 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1951
1952 return true;
1953}
1954
1955static void dw_mci_tasklet_func(unsigned long priv)
1956{
1957 struct dw_mci *host = (struct dw_mci *)priv;
1958 struct mmc_data *data;
1959 struct mmc_command *cmd;
1960 struct mmc_request *mrq;
1961 enum dw_mci_state state;
1962 enum dw_mci_state prev_state;
1963 unsigned int err;
1964
1965 spin_lock(&host->lock);
1966
1967 state = host->state;
1968 data = host->data;
1969 mrq = host->mrq;
1970
1971 do {
1972 prev_state = state;
1973
1974 switch (state) {
1975 case STATE_IDLE:
1976 case STATE_WAITING_CMD11_DONE:
1977 break;
1978
1979 case STATE_SENDING_CMD11:
1980 case STATE_SENDING_CMD:
1981 if (!dw_mci_clear_pending_cmd_complete(host))
1982 break;
1983
1984 cmd = host->cmd;
1985 host->cmd = NULL;
1986 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1987 err = dw_mci_command_complete(host, cmd);
1988 if (cmd == mrq->sbc && !err) {
1989 __dw_mci_start_request(host, host->slot,
1990 mrq->cmd);
1991 goto unlock;
1992 }
1993
1994 if (cmd->data && err) {
1995 /*
1996 * During UHS tuning sequence, sending the stop
1997 * command after the response CRC error would
1998 * throw the system into a confused state
1999 * causing all future tuning phases to report
2000 * failure.
2001 *
2002 * In such case controller will move into a data
2003 * transfer state after a response error or
2004 * response CRC error. Let's let that finish
2005 * before trying to send a stop, so we'll go to
2006 * STATE_SENDING_DATA.
2007 *
2008 * Although letting the data transfer take place
2009 * will waste a bit of time (we already know
2010 * the command was bad), it can't cause any
2011 * errors since it's possible it would have
2012 * taken place anyway if this tasklet got
2013 * delayed. Allowing the transfer to take place
2014 * avoids races and keeps things simple.
2015 */
2016 if (err != -ETIMEDOUT) {
2017 state = STATE_SENDING_DATA;
2018 continue;
2019 }
2020
2021 dw_mci_stop_dma(host);
2022 send_stop_abort(host, data);
2023 state = STATE_SENDING_STOP;
2024 break;
2025 }
2026
2027 if (!cmd->data || err) {
2028 dw_mci_request_end(host, mrq);
2029 goto unlock;
2030 }
2031
2032 prev_state = state = STATE_SENDING_DATA;
2033 fallthrough;
2034
2035 case STATE_SENDING_DATA:
2036 /*
2037 * We could get a data error and never a transfer
2038 * complete so we'd better check for it here.
2039 *
2040 * Note that we don't really care if we also got a
2041 * transfer complete; stopping the DMA and sending an
2042 * abort won't hurt.
2043 */
2044 if (test_and_clear_bit(EVENT_DATA_ERROR,
2045 &host->pending_events)) {
2046 dw_mci_stop_dma(host);
2047 if (!(host->data_status & (SDMMC_INT_DRTO |
2048 SDMMC_INT_EBE)))
2049 send_stop_abort(host, data);
2050 state = STATE_DATA_ERROR;
2051 break;
2052 }
2053
2054 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2055 &host->pending_events)) {
2056 /*
2057 * If all data-related interrupts don't come
2058 * within the given time in reading data state.
2059 */
2060 if (host->dir_status == DW_MCI_RECV_STATUS)
2061 dw_mci_set_drto(host);
2062 break;
2063 }
2064
2065 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2066
2067 /*
2068 * Handle an EVENT_DATA_ERROR that might have shown up
2069 * before the transfer completed. This might not have
2070 * been caught by the check above because the interrupt
2071 * could have gone off between the previous check and
2072 * the check for transfer complete.
2073 *
2074 * Technically this ought not be needed assuming we
2075 * get a DATA_COMPLETE eventually (we'll notice the
2076 * error and end the request), but it shouldn't hurt.
2077 *
2078 * This has the advantage of sending the stop command.
2079 */
2080 if (test_and_clear_bit(EVENT_DATA_ERROR,
2081 &host->pending_events)) {
2082 dw_mci_stop_dma(host);
2083 if (!(host->data_status & (SDMMC_INT_DRTO |
2084 SDMMC_INT_EBE)))
2085 send_stop_abort(host, data);
2086 state = STATE_DATA_ERROR;
2087 break;
2088 }
2089 prev_state = state = STATE_DATA_BUSY;
2090
2091 fallthrough;
2092
2093 case STATE_DATA_BUSY:
2094 if (!dw_mci_clear_pending_data_complete(host)) {
2095 /*
2096 * If data error interrupt comes but data over
2097 * interrupt doesn't come within the given time.
2098 * in reading data state.
2099 */
2100 if (host->dir_status == DW_MCI_RECV_STATUS)
2101 dw_mci_set_drto(host);
2102 break;
2103 }
2104
2105 host->data = NULL;
2106 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2107 err = dw_mci_data_complete(host, data);
2108
2109 if (!err) {
2110 if (!data->stop || mrq->sbc) {
2111 if (mrq->sbc && data->stop)
2112 data->stop->error = 0;
2113 dw_mci_request_end(host, mrq);
2114 goto unlock;
2115 }
2116
2117 /* stop command for open-ended transfer*/
2118 if (data->stop)
2119 send_stop_abort(host, data);
2120 } else {
2121 /*
2122 * If we don't have a command complete now we'll
2123 * never get one since we just reset everything;
2124 * better end the request.
2125 *
2126 * If we do have a command complete we'll fall
2127 * through to the SENDING_STOP command and
2128 * everything will be peachy keen.
2129 */
2130 if (!test_bit(EVENT_CMD_COMPLETE,
2131 &host->pending_events)) {
2132 host->cmd = NULL;
2133 dw_mci_request_end(host, mrq);
2134 goto unlock;
2135 }
2136 }
2137
2138 /*
2139 * If err has non-zero,
2140 * stop-abort command has been already issued.
2141 */
2142 prev_state = state = STATE_SENDING_STOP;
2143
2144 fallthrough;
2145
2146 case STATE_SENDING_STOP:
2147 if (!dw_mci_clear_pending_cmd_complete(host))
2148 break;
2149
2150 /* CMD error in data command */
2151 if (mrq->cmd->error && mrq->data)
2152 dw_mci_reset(host);
2153
2154 host->cmd = NULL;
2155 host->data = NULL;
2156
2157 if (!mrq->sbc && mrq->stop)
2158 dw_mci_command_complete(host, mrq->stop);
2159 else
2160 host->cmd_status = 0;
2161
2162 dw_mci_request_end(host, mrq);
2163 goto unlock;
2164
2165 case STATE_DATA_ERROR:
2166 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2167 &host->pending_events))
2168 break;
2169
2170 state = STATE_DATA_BUSY;
2171 break;
2172 }
2173 } while (state != prev_state);
2174
2175 host->state = state;
2176unlock:
2177 spin_unlock(&host->lock);
2178
2179}
2180
2181/* push final bytes to part_buf, only use during push */
2182static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2183{
2184 memcpy((void *)&host->part_buf, buf, cnt);
2185 host->part_buf_count = cnt;
2186}
2187
2188/* append bytes to part_buf, only use during push */
2189static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2190{
2191 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2192 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2193 host->part_buf_count += cnt;
2194 return cnt;
2195}
2196
2197/* pull first bytes from part_buf, only use during pull */
2198static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2199{
2200 cnt = min_t(int, cnt, host->part_buf_count);
2201 if (cnt) {
2202 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2203 cnt);
2204 host->part_buf_count -= cnt;
2205 host->part_buf_start += cnt;
2206 }
2207 return cnt;
2208}
2209
2210/* pull final bytes from the part_buf, assuming it's just been filled */
2211static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2212{
2213 memcpy(buf, &host->part_buf, cnt);
2214 host->part_buf_start = cnt;
2215 host->part_buf_count = (1 << host->data_shift) - cnt;
2216}
2217
2218static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2219{
2220 struct mmc_data *data = host->data;
2221 int init_cnt = cnt;
2222
2223 /* try and push anything in the part_buf */
2224 if (unlikely(host->part_buf_count)) {
2225 int len = dw_mci_push_part_bytes(host, buf, cnt);
2226
2227 buf += len;
2228 cnt -= len;
2229 if (host->part_buf_count == 2) {
2230 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2231 host->part_buf_count = 0;
2232 }
2233 }
2234#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2235 if (unlikely((unsigned long)buf & 0x1)) {
2236 while (cnt >= 2) {
2237 u16 aligned_buf[64];
2238 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2239 int items = len >> 1;
2240 int i;
2241 /* memcpy from input buffer into aligned buffer */
2242 memcpy(aligned_buf, buf, len);
2243 buf += len;
2244 cnt -= len;
2245 /* push data from aligned buffer into fifo */
2246 for (i = 0; i < items; ++i)
2247 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2248 }
2249 } else
2250#endif
2251 {
2252 u16 *pdata = buf;
2253
2254 for (; cnt >= 2; cnt -= 2)
2255 mci_fifo_writew(host->fifo_reg, *pdata++);
2256 buf = pdata;
2257 }
2258 /* put anything remaining in the part_buf */
2259 if (cnt) {
2260 dw_mci_set_part_bytes(host, buf, cnt);
2261 /* Push data if we have reached the expected data length */
2262 if ((data->bytes_xfered + init_cnt) ==
2263 (data->blksz * data->blocks))
2264 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2265 }
2266}
2267
2268static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2269{
2270#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2271 if (unlikely((unsigned long)buf & 0x1)) {
2272 while (cnt >= 2) {
2273 /* pull data from fifo into aligned buffer */
2274 u16 aligned_buf[64];
2275 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2276 int items = len >> 1;
2277 int i;
2278
2279 for (i = 0; i < items; ++i)
2280 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2281 /* memcpy from aligned buffer into output buffer */
2282 memcpy(buf, aligned_buf, len);
2283 buf += len;
2284 cnt -= len;
2285 }
2286 } else
2287#endif
2288 {
2289 u16 *pdata = buf;
2290
2291 for (; cnt >= 2; cnt -= 2)
2292 *pdata++ = mci_fifo_readw(host->fifo_reg);
2293 buf = pdata;
2294 }
2295 if (cnt) {
2296 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2297 dw_mci_pull_final_bytes(host, buf, cnt);
2298 }
2299}
2300
2301static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2302{
2303 struct mmc_data *data = host->data;
2304 int init_cnt = cnt;
2305
2306 /* try and push anything in the part_buf */
2307 if (unlikely(host->part_buf_count)) {
2308 int len = dw_mci_push_part_bytes(host, buf, cnt);
2309
2310 buf += len;
2311 cnt -= len;
2312 if (host->part_buf_count == 4) {
2313 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2314 host->part_buf_count = 0;
2315 }
2316 }
2317#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2318 if (unlikely((unsigned long)buf & 0x3)) {
2319 while (cnt >= 4) {
2320 u32 aligned_buf[32];
2321 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2322 int items = len >> 2;
2323 int i;
2324 /* memcpy from input buffer into aligned buffer */
2325 memcpy(aligned_buf, buf, len);
2326 buf += len;
2327 cnt -= len;
2328 /* push data from aligned buffer into fifo */
2329 for (i = 0; i < items; ++i)
2330 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2331 }
2332 } else
2333#endif
2334 {
2335 u32 *pdata = buf;
2336
2337 for (; cnt >= 4; cnt -= 4)
2338 mci_fifo_writel(host->fifo_reg, *pdata++);
2339 buf = pdata;
2340 }
2341 /* put anything remaining in the part_buf */
2342 if (cnt) {
2343 dw_mci_set_part_bytes(host, buf, cnt);
2344 /* Push data if we have reached the expected data length */
2345 if ((data->bytes_xfered + init_cnt) ==
2346 (data->blksz * data->blocks))
2347 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2348 }
2349}
2350
2351static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2352{
2353#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2354 if (unlikely((unsigned long)buf & 0x3)) {
2355 while (cnt >= 4) {
2356 /* pull data from fifo into aligned buffer */
2357 u32 aligned_buf[32];
2358 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2359 int items = len >> 2;
2360 int i;
2361
2362 for (i = 0; i < items; ++i)
2363 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2364 /* memcpy from aligned buffer into output buffer */
2365 memcpy(buf, aligned_buf, len);
2366 buf += len;
2367 cnt -= len;
2368 }
2369 } else
2370#endif
2371 {
2372 u32 *pdata = buf;
2373
2374 for (; cnt >= 4; cnt -= 4)
2375 *pdata++ = mci_fifo_readl(host->fifo_reg);
2376 buf = pdata;
2377 }
2378 if (cnt) {
2379 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2380 dw_mci_pull_final_bytes(host, buf, cnt);
2381 }
2382}
2383
2384static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2385{
2386 struct mmc_data *data = host->data;
2387 int init_cnt = cnt;
2388
2389 /* try and push anything in the part_buf */
2390 if (unlikely(host->part_buf_count)) {
2391 int len = dw_mci_push_part_bytes(host, buf, cnt);
2392
2393 buf += len;
2394 cnt -= len;
2395
2396 if (host->part_buf_count == 8) {
2397 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2398 host->part_buf_count = 0;
2399 }
2400 }
2401#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2402 if (unlikely((unsigned long)buf & 0x7)) {
2403 while (cnt >= 8) {
2404 u64 aligned_buf[16];
2405 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2406 int items = len >> 3;
2407 int i;
2408 /* memcpy from input buffer into aligned buffer */
2409 memcpy(aligned_buf, buf, len);
2410 buf += len;
2411 cnt -= len;
2412 /* push data from aligned buffer into fifo */
2413 for (i = 0; i < items; ++i)
2414 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2415 }
2416 } else
2417#endif
2418 {
2419 u64 *pdata = buf;
2420
2421 for (; cnt >= 8; cnt -= 8)
2422 mci_fifo_writeq(host->fifo_reg, *pdata++);
2423 buf = pdata;
2424 }
2425 /* put anything remaining in the part_buf */
2426 if (cnt) {
2427 dw_mci_set_part_bytes(host, buf, cnt);
2428 /* Push data if we have reached the expected data length */
2429 if ((data->bytes_xfered + init_cnt) ==
2430 (data->blksz * data->blocks))
2431 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2432 }
2433}
2434
2435static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2436{
2437#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2438 if (unlikely((unsigned long)buf & 0x7)) {
2439 while (cnt >= 8) {
2440 /* pull data from fifo into aligned buffer */
2441 u64 aligned_buf[16];
2442 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2443 int items = len >> 3;
2444 int i;
2445
2446 for (i = 0; i < items; ++i)
2447 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2448
2449 /* memcpy from aligned buffer into output buffer */
2450 memcpy(buf, aligned_buf, len);
2451 buf += len;
2452 cnt -= len;
2453 }
2454 } else
2455#endif
2456 {
2457 u64 *pdata = buf;
2458
2459 for (; cnt >= 8; cnt -= 8)
2460 *pdata++ = mci_fifo_readq(host->fifo_reg);
2461 buf = pdata;
2462 }
2463 if (cnt) {
2464 host->part_buf = mci_fifo_readq(host->fifo_reg);
2465 dw_mci_pull_final_bytes(host, buf, cnt);
2466 }
2467}
2468
2469static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2470{
2471 int len;
2472
2473 /* get remaining partial bytes */
2474 len = dw_mci_pull_part_bytes(host, buf, cnt);
2475 if (unlikely(len == cnt))
2476 return;
2477 buf += len;
2478 cnt -= len;
2479
2480 /* get the rest of the data */
2481 host->pull_data(host, buf, cnt);
2482}
2483
2484static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2485{
2486 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2487 void *buf;
2488 unsigned int offset;
2489 struct mmc_data *data = host->data;
2490 int shift = host->data_shift;
2491 u32 status;
2492 unsigned int len;
2493 unsigned int remain, fcnt;
2494
2495 do {
2496 if (!sg_miter_next(sg_miter))
2497 goto done;
2498
2499 host->sg = sg_miter->piter.sg;
2500 buf = sg_miter->addr;
2501 remain = sg_miter->length;
2502 offset = 0;
2503
2504 do {
2505 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2506 << shift) + host->part_buf_count;
2507 len = min(remain, fcnt);
2508 if (!len)
2509 break;
2510 dw_mci_pull_data(host, (void *)(buf + offset), len);
2511 data->bytes_xfered += len;
2512 offset += len;
2513 remain -= len;
2514 } while (remain);
2515
2516 sg_miter->consumed = offset;
2517 status = mci_readl(host, MINTSTS);
2518 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2519 /* if the RXDR is ready read again */
2520 } while ((status & SDMMC_INT_RXDR) ||
2521 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2522
2523 if (!remain) {
2524 if (!sg_miter_next(sg_miter))
2525 goto done;
2526 sg_miter->consumed = 0;
2527 }
2528 sg_miter_stop(sg_miter);
2529 return;
2530
2531done:
2532 sg_miter_stop(sg_miter);
2533 host->sg = NULL;
2534 smp_wmb(); /* drain writebuffer */
2535 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2536}
2537
2538static void dw_mci_write_data_pio(struct dw_mci *host)
2539{
2540 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2541 void *buf;
2542 unsigned int offset;
2543 struct mmc_data *data = host->data;
2544 int shift = host->data_shift;
2545 u32 status;
2546 unsigned int len;
2547 unsigned int fifo_depth = host->fifo_depth;
2548 unsigned int remain, fcnt;
2549
2550 do {
2551 if (!sg_miter_next(sg_miter))
2552 goto done;
2553
2554 host->sg = sg_miter->piter.sg;
2555 buf = sg_miter->addr;
2556 remain = sg_miter->length;
2557 offset = 0;
2558
2559 do {
2560 fcnt = ((fifo_depth -
2561 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2562 << shift) - host->part_buf_count;
2563 len = min(remain, fcnt);
2564 if (!len)
2565 break;
2566 host->push_data(host, (void *)(buf + offset), len);
2567 data->bytes_xfered += len;
2568 offset += len;
2569 remain -= len;
2570 } while (remain);
2571
2572 sg_miter->consumed = offset;
2573 status = mci_readl(host, MINTSTS);
2574 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2575 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2576
2577 if (!remain) {
2578 if (!sg_miter_next(sg_miter))
2579 goto done;
2580 sg_miter->consumed = 0;
2581 }
2582 sg_miter_stop(sg_miter);
2583 return;
2584
2585done:
2586 sg_miter_stop(sg_miter);
2587 host->sg = NULL;
2588 smp_wmb(); /* drain writebuffer */
2589 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2590}
2591
2592static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2593{
2594 del_timer(&host->cto_timer);
2595
2596 if (!host->cmd_status)
2597 host->cmd_status = status;
2598
2599 smp_wmb(); /* drain writebuffer */
2600
2601 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2602 tasklet_schedule(&host->tasklet);
2603}
2604
2605static void dw_mci_handle_cd(struct dw_mci *host)
2606{
2607 struct dw_mci_slot *slot = host->slot;
2608
2609 if (slot->mmc->ops->card_event)
2610 slot->mmc->ops->card_event(slot->mmc);
2611 mmc_detect_change(slot->mmc,
2612 msecs_to_jiffies(host->pdata->detect_delay_ms));
2613}
2614
2615static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2616{
2617 struct dw_mci *host = dev_id;
2618 u32 pending;
2619 struct dw_mci_slot *slot = host->slot;
2620 unsigned long irqflags;
2621
2622 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2623
2624 if (pending) {
2625 /* Check volt switch first, since it can look like an error */
2626 if ((host->state == STATE_SENDING_CMD11) &&
2627 (pending & SDMMC_INT_VOLT_SWITCH)) {
2628 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2629 pending &= ~SDMMC_INT_VOLT_SWITCH;
2630
2631 /*
2632 * Hold the lock; we know cmd11_timer can't be kicked
2633 * off after the lock is released, so safe to delete.
2634 */
2635 spin_lock_irqsave(&host->irq_lock, irqflags);
2636 dw_mci_cmd_interrupt(host, pending);
2637 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2638
2639 del_timer(&host->cmd11_timer);
2640 }
2641
2642 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2643 spin_lock_irqsave(&host->irq_lock, irqflags);
2644
2645 del_timer(&host->cto_timer);
2646 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2647 host->cmd_status = pending;
2648 smp_wmb(); /* drain writebuffer */
2649 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2650
2651 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2652 }
2653
2654 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2655 /* if there is an error report DATA_ERROR */
2656 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2657 host->data_status = pending;
2658 smp_wmb(); /* drain writebuffer */
2659 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2660 tasklet_schedule(&host->tasklet);
2661 }
2662
2663 if (pending & SDMMC_INT_DATA_OVER) {
2664 spin_lock_irqsave(&host->irq_lock, irqflags);
2665
2666 del_timer(&host->dto_timer);
2667
2668 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2669 if (!host->data_status)
2670 host->data_status = pending;
2671 smp_wmb(); /* drain writebuffer */
2672 if (host->dir_status == DW_MCI_RECV_STATUS) {
2673 if (host->sg != NULL)
2674 dw_mci_read_data_pio(host, true);
2675 }
2676 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2677 tasklet_schedule(&host->tasklet);
2678
2679 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2680 }
2681
2682 if (pending & SDMMC_INT_RXDR) {
2683 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2684 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2685 dw_mci_read_data_pio(host, false);
2686 }
2687
2688 if (pending & SDMMC_INT_TXDR) {
2689 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2690 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2691 dw_mci_write_data_pio(host);
2692 }
2693
2694 if (pending & SDMMC_INT_CMD_DONE) {
2695 spin_lock_irqsave(&host->irq_lock, irqflags);
2696
2697 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2698 dw_mci_cmd_interrupt(host, pending);
2699
2700 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2701 }
2702
2703 if (pending & SDMMC_INT_CD) {
2704 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2705 dw_mci_handle_cd(host);
2706 }
2707
2708 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2709 mci_writel(host, RINTSTS,
2710 SDMMC_INT_SDIO(slot->sdio_id));
2711 __dw_mci_enable_sdio_irq(slot, 0);
2712 sdio_signal_irq(slot->mmc);
2713 }
2714
2715 }
2716
2717 if (host->use_dma != TRANS_MODE_IDMAC)
2718 return IRQ_HANDLED;
2719
2720 /* Handle IDMA interrupts */
2721 if (host->dma_64bit_address == 1) {
2722 pending = mci_readl(host, IDSTS64);
2723 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2724 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2725 SDMMC_IDMAC_INT_RI);
2726 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2727 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2728 host->dma_ops->complete((void *)host);
2729 }
2730 } else {
2731 pending = mci_readl(host, IDSTS);
2732 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2733 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2734 SDMMC_IDMAC_INT_RI);
2735 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2736 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2737 host->dma_ops->complete((void *)host);
2738 }
2739 }
2740
2741 return IRQ_HANDLED;
2742}
2743
2744static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2745{
2746 struct dw_mci *host = slot->host;
2747 const struct dw_mci_drv_data *drv_data = host->drv_data;
2748 struct mmc_host *mmc = slot->mmc;
2749 int ctrl_id;
2750
2751 if (host->pdata->caps)
2752 mmc->caps = host->pdata->caps;
2753
2754 if (host->pdata->pm_caps)
2755 mmc->pm_caps = host->pdata->pm_caps;
2756
2757 if (host->dev->of_node) {
2758 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2759 if (ctrl_id < 0)
2760 ctrl_id = 0;
2761 } else {
2762 ctrl_id = to_platform_device(host->dev)->id;
2763 }
2764
2765 if (drv_data && drv_data->caps) {
2766 if (ctrl_id >= drv_data->num_caps) {
2767 dev_err(host->dev, "invalid controller id %d\n",
2768 ctrl_id);
2769 return -EINVAL;
2770 }
2771 mmc->caps |= drv_data->caps[ctrl_id];
2772 }
2773
2774 if (host->pdata->caps2)
2775 mmc->caps2 = host->pdata->caps2;
2776
2777 mmc->f_min = DW_MCI_FREQ_MIN;
2778 if (!mmc->f_max)
2779 mmc->f_max = DW_MCI_FREQ_MAX;
2780
2781 /* Process SDIO IRQs through the sdio_irq_work. */
2782 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2783 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2784
2785 return 0;
2786}
2787
2788static int dw_mci_init_slot(struct dw_mci *host)
2789{
2790 struct mmc_host *mmc;
2791 struct dw_mci_slot *slot;
2792 int ret;
2793
2794 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2795 if (!mmc)
2796 return -ENOMEM;
2797
2798 slot = mmc_priv(mmc);
2799 slot->id = 0;
2800 slot->sdio_id = host->sdio_id0 + slot->id;
2801 slot->mmc = mmc;
2802 slot->host = host;
2803 host->slot = slot;
2804
2805 mmc->ops = &dw_mci_ops;
2806
2807 /*if there are external regulators, get them*/
2808 ret = mmc_regulator_get_supply(mmc);
2809 if (ret)
2810 goto err_host_allocated;
2811
2812 if (!mmc->ocr_avail)
2813 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2814
2815 ret = mmc_of_parse(mmc);
2816 if (ret)
2817 goto err_host_allocated;
2818
2819 ret = dw_mci_init_slot_caps(slot);
2820 if (ret)
2821 goto err_host_allocated;
2822
2823 /* Useful defaults if platform data is unset. */
2824 if (host->use_dma == TRANS_MODE_IDMAC) {
2825 mmc->max_segs = host->ring_size;
2826 mmc->max_blk_size = 65535;
2827 mmc->max_seg_size = 0x1000;
2828 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2829 mmc->max_blk_count = mmc->max_req_size / 512;
2830 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2831 mmc->max_segs = 64;
2832 mmc->max_blk_size = 65535;
2833 mmc->max_blk_count = 65535;
2834 mmc->max_req_size =
2835 mmc->max_blk_size * mmc->max_blk_count;
2836 mmc->max_seg_size = mmc->max_req_size;
2837 } else {
2838 /* TRANS_MODE_PIO */
2839 mmc->max_segs = 64;
2840 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2841 mmc->max_blk_count = 512;
2842 mmc->max_req_size = mmc->max_blk_size *
2843 mmc->max_blk_count;
2844 mmc->max_seg_size = mmc->max_req_size;
2845 }
2846
2847 dw_mci_get_cd(mmc);
2848
2849 ret = mmc_add_host(mmc);
2850 if (ret)
2851 goto err_host_allocated;
2852
2853#if defined(CONFIG_DEBUG_FS)
2854 dw_mci_init_debugfs(slot);
2855#endif
2856
2857 return 0;
2858
2859err_host_allocated:
2860 mmc_free_host(mmc);
2861 return ret;
2862}
2863
2864static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2865{
2866 /* Debugfs stuff is cleaned up by mmc core */
2867 mmc_remove_host(slot->mmc);
2868 slot->host->slot = NULL;
2869 mmc_free_host(slot->mmc);
2870}
2871
2872static void dw_mci_init_dma(struct dw_mci *host)
2873{
2874 int addr_config;
2875 struct device *dev = host->dev;
2876
2877 /*
2878 * Check tansfer mode from HCON[17:16]
2879 * Clear the ambiguous description of dw_mmc databook:
2880 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
2881 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
2882 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
2883 * 2b'11: Non DW DMA Interface -> pio only
2884 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
2885 * simpler request/acknowledge handshake mechanism and both of them
2886 * are regarded as external dma master for dw_mmc.
2887 */
2888 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
2889 if (host->use_dma == DMA_INTERFACE_IDMA) {
2890 host->use_dma = TRANS_MODE_IDMAC;
2891 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
2892 host->use_dma == DMA_INTERFACE_GDMA) {
2893 host->use_dma = TRANS_MODE_EDMAC;
2894 } else {
2895 goto no_dma;
2896 }
2897
2898 /* Determine which DMA interface to use */
2899 if (host->use_dma == TRANS_MODE_IDMAC) {
2900 /*
2901 * Check ADDR_CONFIG bit in HCON to find
2902 * IDMAC address bus width
2903 */
2904 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
2905
2906 if (addr_config == 1) {
2907 /* host supports IDMAC in 64-bit address mode */
2908 host->dma_64bit_address = 1;
2909 dev_info(host->dev,
2910 "IDMAC supports 64-bit address mode.\n");
2911 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
2912 dma_set_coherent_mask(host->dev,
2913 DMA_BIT_MASK(64));
2914 } else {
2915 /* host supports IDMAC in 32-bit address mode */
2916 host->dma_64bit_address = 0;
2917 dev_info(host->dev,
2918 "IDMAC supports 32-bit address mode.\n");
2919 }
2920
2921 /* Alloc memory for sg translation */
2922 host->sg_cpu = dmam_alloc_coherent(host->dev,
2923 DESC_RING_BUF_SZ,
2924 &host->sg_dma, GFP_KERNEL);
2925 if (!host->sg_cpu) {
2926 dev_err(host->dev,
2927 "%s: could not alloc DMA memory\n",
2928 __func__);
2929 goto no_dma;
2930 }
2931
2932 host->dma_ops = &dw_mci_idmac_ops;
2933 dev_info(host->dev, "Using internal DMA controller.\n");
2934 } else {
2935 /* TRANS_MODE_EDMAC: check dma bindings again */
2936 if ((device_property_read_string_array(dev, "dma-names",
2937 NULL, 0) < 0) ||
2938 !device_property_present(dev, "dmas")) {
2939 goto no_dma;
2940 }
2941 host->dma_ops = &dw_mci_edmac_ops;
2942 dev_info(host->dev, "Using external DMA controller.\n");
2943 }
2944
2945 if (host->dma_ops->init && host->dma_ops->start &&
2946 host->dma_ops->stop && host->dma_ops->cleanup) {
2947 if (host->dma_ops->init(host)) {
2948 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
2949 __func__);
2950 goto no_dma;
2951 }
2952 } else {
2953 dev_err(host->dev, "DMA initialization not found.\n");
2954 goto no_dma;
2955 }
2956
2957 return;
2958
2959no_dma:
2960 dev_info(host->dev, "Using PIO mode.\n");
2961 host->use_dma = TRANS_MODE_PIO;
2962}
2963
2964static void dw_mci_cmd11_timer(struct timer_list *t)
2965{
2966 struct dw_mci *host = from_timer(host, t, cmd11_timer);
2967
2968 if (host->state != STATE_SENDING_CMD11) {
2969 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
2970 return;
2971 }
2972
2973 host->cmd_status = SDMMC_INT_RTO;
2974 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2975 tasklet_schedule(&host->tasklet);
2976}
2977
2978static void dw_mci_cto_timer(struct timer_list *t)
2979{
2980 struct dw_mci *host = from_timer(host, t, cto_timer);
2981 unsigned long irqflags;
2982 u32 pending;
2983
2984 spin_lock_irqsave(&host->irq_lock, irqflags);
2985
2986 /*
2987 * If somehow we have very bad interrupt latency it's remotely possible
2988 * that the timer could fire while the interrupt is still pending or
2989 * while the interrupt is midway through running. Let's be paranoid
2990 * and detect those two cases. Note that this is paranoia is somewhat
2991 * justified because in this function we don't actually cancel the
2992 * pending command in the controller--we just assume it will never come.
2993 */
2994 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2995 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
2996 /* The interrupt should fire; no need to act but we can warn */
2997 dev_warn(host->dev, "Unexpected interrupt latency\n");
2998 goto exit;
2999 }
3000 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3001 /* Presumably interrupt handler couldn't delete the timer */
3002 dev_warn(host->dev, "CTO timeout when already completed\n");
3003 goto exit;
3004 }
3005
3006 /*
3007 * Continued paranoia to make sure we're in the state we expect.
3008 * This paranoia isn't really justified but it seems good to be safe.
3009 */
3010 switch (host->state) {
3011 case STATE_SENDING_CMD11:
3012 case STATE_SENDING_CMD:
3013 case STATE_SENDING_STOP:
3014 /*
3015 * If CMD_DONE interrupt does NOT come in sending command
3016 * state, we should notify the driver to terminate current
3017 * transfer and report a command timeout to the core.
3018 */
3019 host->cmd_status = SDMMC_INT_RTO;
3020 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3021 tasklet_schedule(&host->tasklet);
3022 break;
3023 default:
3024 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3025 host->state);
3026 break;
3027 }
3028
3029exit:
3030 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3031}
3032
3033static void dw_mci_dto_timer(struct timer_list *t)
3034{
3035 struct dw_mci *host = from_timer(host, t, dto_timer);
3036 unsigned long irqflags;
3037 u32 pending;
3038
3039 spin_lock_irqsave(&host->irq_lock, irqflags);
3040
3041 /*
3042 * The DTO timer is much longer than the CTO timer, so it's even less
3043 * likely that we'll these cases, but it pays to be paranoid.
3044 */
3045 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3046 if (pending & SDMMC_INT_DATA_OVER) {
3047 /* The interrupt should fire; no need to act but we can warn */
3048 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3049 goto exit;
3050 }
3051 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3052 /* Presumably interrupt handler couldn't delete the timer */
3053 dev_warn(host->dev, "DTO timeout when already completed\n");
3054 goto exit;
3055 }
3056
3057 /*
3058 * Continued paranoia to make sure we're in the state we expect.
3059 * This paranoia isn't really justified but it seems good to be safe.
3060 */
3061 switch (host->state) {
3062 case STATE_SENDING_DATA:
3063 case STATE_DATA_BUSY:
3064 /*
3065 * If DTO interrupt does NOT come in sending data state,
3066 * we should notify the driver to terminate current transfer
3067 * and report a data timeout to the core.
3068 */
3069 host->data_status = SDMMC_INT_DRTO;
3070 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3071 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3072 tasklet_schedule(&host->tasklet);
3073 break;
3074 default:
3075 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3076 host->state);
3077 break;
3078 }
3079
3080exit:
3081 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3082}
3083
3084#ifdef CONFIG_OF
3085static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3086{
3087 struct dw_mci_board *pdata;
3088 struct device *dev = host->dev;
3089 const struct dw_mci_drv_data *drv_data = host->drv_data;
3090 int ret;
3091 u32 clock_frequency;
3092
3093 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3094 if (!pdata)
3095 return ERR_PTR(-ENOMEM);
3096
3097 /* find reset controller when exist */
3098 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3099 if (IS_ERR(pdata->rstc)) {
3100 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3101 return ERR_PTR(-EPROBE_DEFER);
3102 }
3103
3104 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3105 dev_info(dev,
3106 "fifo-depth property not found, using value of FIFOTH register as default\n");
3107
3108 device_property_read_u32(dev, "card-detect-delay",
3109 &pdata->detect_delay_ms);
3110
3111 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3112
3113 if (device_property_present(dev, "fifo-watermark-aligned"))
3114 host->wm_aligned = true;
3115
3116 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3117 pdata->bus_hz = clock_frequency;
3118
3119 if (drv_data && drv_data->parse_dt) {
3120 ret = drv_data->parse_dt(host);
3121 if (ret)
3122 return ERR_PTR(ret);
3123 }
3124
3125 return pdata;
3126}
3127
3128#else /* CONFIG_OF */
3129static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3130{
3131 return ERR_PTR(-EINVAL);
3132}
3133#endif /* CONFIG_OF */
3134
3135static void dw_mci_enable_cd(struct dw_mci *host)
3136{
3137 unsigned long irqflags;
3138 u32 temp;
3139
3140 /*
3141 * No need for CD if all slots have a non-error GPIO
3142 * as well as broken card detection is found.
3143 */
3144 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3145 return;
3146
3147 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3148 spin_lock_irqsave(&host->irq_lock, irqflags);
3149 temp = mci_readl(host, INTMASK);
3150 temp |= SDMMC_INT_CD;
3151 mci_writel(host, INTMASK, temp);
3152 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3153 }
3154}
3155
3156int dw_mci_probe(struct dw_mci *host)
3157{
3158 const struct dw_mci_drv_data *drv_data = host->drv_data;
3159 int width, i, ret = 0;
3160 u32 fifo_size;
3161
3162 if (!host->pdata) {
3163 host->pdata = dw_mci_parse_dt(host);
3164 if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
3165 return -EPROBE_DEFER;
3166 } else if (IS_ERR(host->pdata)) {
3167 dev_err(host->dev, "platform data not available\n");
3168 return -EINVAL;
3169 }
3170 }
3171
3172 host->biu_clk = devm_clk_get(host->dev, "biu");
3173 if (IS_ERR(host->biu_clk)) {
3174 dev_dbg(host->dev, "biu clock not available\n");
3175 } else {
3176 ret = clk_prepare_enable(host->biu_clk);
3177 if (ret) {
3178 dev_err(host->dev, "failed to enable biu clock\n");
3179 return ret;
3180 }
3181 }
3182
3183 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3184 if (IS_ERR(host->ciu_clk)) {
3185 dev_dbg(host->dev, "ciu clock not available\n");
3186 host->bus_hz = host->pdata->bus_hz;
3187 } else {
3188 ret = clk_prepare_enable(host->ciu_clk);
3189 if (ret) {
3190 dev_err(host->dev, "failed to enable ciu clock\n");
3191 goto err_clk_biu;
3192 }
3193
3194 if (host->pdata->bus_hz) {
3195 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3196 if (ret)
3197 dev_warn(host->dev,
3198 "Unable to set bus rate to %uHz\n",
3199 host->pdata->bus_hz);
3200 }
3201 host->bus_hz = clk_get_rate(host->ciu_clk);
3202 }
3203
3204 if (!host->bus_hz) {
3205 dev_err(host->dev,
3206 "Platform data must supply bus speed\n");
3207 ret = -ENODEV;
3208 goto err_clk_ciu;
3209 }
3210
3211 if (!IS_ERR(host->pdata->rstc)) {
3212 reset_control_assert(host->pdata->rstc);
3213 usleep_range(10, 50);
3214 reset_control_deassert(host->pdata->rstc);
3215 }
3216
3217 if (drv_data && drv_data->init) {
3218 ret = drv_data->init(host);
3219 if (ret) {
3220 dev_err(host->dev,
3221 "implementation specific init failed\n");
3222 goto err_clk_ciu;
3223 }
3224 }
3225
3226 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3227 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3228 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3229
3230 spin_lock_init(&host->lock);
3231 spin_lock_init(&host->irq_lock);
3232 INIT_LIST_HEAD(&host->queue);
3233
3234 /*
3235 * Get the host data width - this assumes that HCON has been set with
3236 * the correct values.
3237 */
3238 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3239 if (!i) {
3240 host->push_data = dw_mci_push_data16;
3241 host->pull_data = dw_mci_pull_data16;
3242 width = 16;
3243 host->data_shift = 1;
3244 } else if (i == 2) {
3245 host->push_data = dw_mci_push_data64;
3246 host->pull_data = dw_mci_pull_data64;
3247 width = 64;
3248 host->data_shift = 3;
3249 } else {
3250 /* Check for a reserved value, and warn if it is */
3251 WARN((i != 1),
3252 "HCON reports a reserved host data width!\n"
3253 "Defaulting to 32-bit access.\n");
3254 host->push_data = dw_mci_push_data32;
3255 host->pull_data = dw_mci_pull_data32;
3256 width = 32;
3257 host->data_shift = 2;
3258 }
3259
3260 /* Reset all blocks */
3261 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3262 ret = -ENODEV;
3263 goto err_clk_ciu;
3264 }
3265
3266 host->dma_ops = host->pdata->dma_ops;
3267 dw_mci_init_dma(host);
3268
3269 /* Clear the interrupts for the host controller */
3270 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3271 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3272
3273 /* Put in max timeout */
3274 mci_writel(host, TMOUT, 0xFFFFFFFF);
3275
3276 /*
3277 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3278 * Tx Mark = fifo_size / 2 DMA Size = 8
3279 */
3280 if (!host->pdata->fifo_depth) {
3281 /*
3282 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3283 * have been overwritten by the bootloader, just like we're
3284 * about to do, so if you know the value for your hardware, you
3285 * should put it in the platform data.
3286 */
3287 fifo_size = mci_readl(host, FIFOTH);
3288 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3289 } else {
3290 fifo_size = host->pdata->fifo_depth;
3291 }
3292 host->fifo_depth = fifo_size;
3293 host->fifoth_val =
3294 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3295 mci_writel(host, FIFOTH, host->fifoth_val);
3296
3297 /* disable clock to CIU */
3298 mci_writel(host, CLKENA, 0);
3299 mci_writel(host, CLKSRC, 0);
3300
3301 /*
3302 * In 2.40a spec, Data offset is changed.
3303 * Need to check the version-id and set data-offset for DATA register.
3304 */
3305 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3306 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3307
3308 if (host->data_addr_override)
3309 host->fifo_reg = host->regs + host->data_addr_override;
3310 else if (host->verid < DW_MMC_240A)
3311 host->fifo_reg = host->regs + DATA_OFFSET;
3312 else
3313 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3314
3315 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3316 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3317 host->irq_flags, "dw-mci", host);
3318 if (ret)
3319 goto err_dmaunmap;
3320
3321 /*
3322 * Enable interrupts for command done, data over, data empty,
3323 * receive ready and error such as transmit, receive timeout, crc error
3324 */
3325 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3326 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3327 DW_MCI_ERROR_FLAGS);
3328 /* Enable mci interrupt */
3329 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3330
3331 dev_info(host->dev,
3332 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3333 host->irq, width, fifo_size);
3334
3335 /* We need at least one slot to succeed */
3336 ret = dw_mci_init_slot(host);
3337 if (ret) {
3338 dev_dbg(host->dev, "slot %d init failed\n", i);
3339 goto err_dmaunmap;
3340 }
3341
3342 /* Now that slots are all setup, we can enable card detect */
3343 dw_mci_enable_cd(host);
3344
3345 return 0;
3346
3347err_dmaunmap:
3348 if (host->use_dma && host->dma_ops->exit)
3349 host->dma_ops->exit(host);
3350
3351 if (!IS_ERR(host->pdata->rstc))
3352 reset_control_assert(host->pdata->rstc);
3353
3354err_clk_ciu:
3355 clk_disable_unprepare(host->ciu_clk);
3356
3357err_clk_biu:
3358 clk_disable_unprepare(host->biu_clk);
3359
3360 return ret;
3361}
3362EXPORT_SYMBOL(dw_mci_probe);
3363
3364void dw_mci_remove(struct dw_mci *host)
3365{
3366 dev_dbg(host->dev, "remove slot\n");
3367 if (host->slot)
3368 dw_mci_cleanup_slot(host->slot);
3369
3370 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3371 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3372
3373 /* disable clock to CIU */
3374 mci_writel(host, CLKENA, 0);
3375 mci_writel(host, CLKSRC, 0);
3376
3377 if (host->use_dma && host->dma_ops->exit)
3378 host->dma_ops->exit(host);
3379
3380 if (!IS_ERR(host->pdata->rstc))
3381 reset_control_assert(host->pdata->rstc);
3382
3383 clk_disable_unprepare(host->ciu_clk);
3384 clk_disable_unprepare(host->biu_clk);
3385}
3386EXPORT_SYMBOL(dw_mci_remove);
3387
3388
3389
3390#ifdef CONFIG_PM
3391int dw_mci_runtime_suspend(struct device *dev)
3392{
3393 struct dw_mci *host = dev_get_drvdata(dev);
3394
3395 if (host->use_dma && host->dma_ops->exit)
3396 host->dma_ops->exit(host);
3397
3398 clk_disable_unprepare(host->ciu_clk);
3399
3400 if (host->slot &&
3401 (mmc_can_gpio_cd(host->slot->mmc) ||
3402 !mmc_card_is_removable(host->slot->mmc)))
3403 clk_disable_unprepare(host->biu_clk);
3404
3405 return 0;
3406}
3407EXPORT_SYMBOL(dw_mci_runtime_suspend);
3408
3409int dw_mci_runtime_resume(struct device *dev)
3410{
3411 int ret = 0;
3412 struct dw_mci *host = dev_get_drvdata(dev);
3413
3414 if (host->slot &&
3415 (mmc_can_gpio_cd(host->slot->mmc) ||
3416 !mmc_card_is_removable(host->slot->mmc))) {
3417 ret = clk_prepare_enable(host->biu_clk);
3418 if (ret)
3419 return ret;
3420 }
3421
3422 ret = clk_prepare_enable(host->ciu_clk);
3423 if (ret)
3424 goto err;
3425
3426 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3427 clk_disable_unprepare(host->ciu_clk);
3428 ret = -ENODEV;
3429 goto err;
3430 }
3431
3432 if (host->use_dma && host->dma_ops->init)
3433 host->dma_ops->init(host);
3434
3435 /*
3436 * Restore the initial value at FIFOTH register
3437 * And Invalidate the prev_blksz with zero
3438 */
3439 mci_writel(host, FIFOTH, host->fifoth_val);
3440 host->prev_blksz = 0;
3441
3442 /* Put in max timeout */
3443 mci_writel(host, TMOUT, 0xFFFFFFFF);
3444
3445 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3446 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3447 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3448 DW_MCI_ERROR_FLAGS);
3449 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3450
3451
3452 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3453 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3454
3455 /* Force setup bus to guarantee available clock output */
3456 dw_mci_setup_bus(host->slot, true);
3457
3458 /* Re-enable SDIO interrupts. */
3459 if (sdio_irq_claimed(host->slot->mmc))
3460 __dw_mci_enable_sdio_irq(host->slot, 1);
3461
3462 /* Now that slots are all setup, we can enable card detect */
3463 dw_mci_enable_cd(host);
3464
3465 return 0;
3466
3467err:
3468 if (host->slot &&
3469 (mmc_can_gpio_cd(host->slot->mmc) ||
3470 !mmc_card_is_removable(host->slot->mmc)))
3471 clk_disable_unprepare(host->biu_clk);
3472
3473 return ret;
3474}
3475EXPORT_SYMBOL(dw_mci_runtime_resume);
3476#endif /* CONFIG_PM */
3477
3478static int __init dw_mci_init(void)
3479{
3480 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3481 return 0;
3482}
3483
3484static void __exit dw_mci_exit(void)
3485{
3486}
3487
3488module_init(dw_mci_init);
3489module_exit(dw_mci_exit);
3490
3491MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3492MODULE_AUTHOR("NXP Semiconductor VietNam");
3493MODULE_AUTHOR("Imagination Technologies Ltd");
3494MODULE_LICENSE("GPL v2");