Loading...
1/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Driver for the MMC / SD / SDIO IP found in:
13 *
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
15 *
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
19 *
20 * TODO:
21 * Investigate using a workqueue for PIO transfers
22 * Eliminate FIXMEs
23 * SDIO support
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
27 *
28 */
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/host.h>
38#include <linux/mmc/tmio.h>
39#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/platform_device.h>
42#include <linux/pm_runtime.h>
43#include <linux/scatterlist.h>
44#include <linux/workqueue.h>
45#include <linux/spinlock.h>
46
47#include "tmio_mmc.h"
48
49void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
50{
51 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
52 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
53}
54
55void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
56{
57 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
58 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
59}
60
61static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
62{
63 sd_ctrl_write32(host, CTL_STATUS, ~i);
64}
65
66static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
67{
68 host->sg_len = data->sg_len;
69 host->sg_ptr = data->sg;
70 host->sg_orig = data->sg;
71 host->sg_off = 0;
72}
73
74static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
75{
76 host->sg_ptr = sg_next(host->sg_ptr);
77 host->sg_off = 0;
78 return --host->sg_len;
79}
80
81#ifdef CONFIG_MMC_DEBUG
82
83#define STATUS_TO_TEXT(a, status, i) \
84 do { \
85 if (status & TMIO_STAT_##a) { \
86 if (i++) \
87 printk(" | "); \
88 printk(#a); \
89 } \
90 } while (0)
91
92static void pr_debug_status(u32 status)
93{
94 int i = 0;
95 printk(KERN_DEBUG "status: %08x = ", status);
96 STATUS_TO_TEXT(CARD_REMOVE, status, i);
97 STATUS_TO_TEXT(CARD_INSERT, status, i);
98 STATUS_TO_TEXT(SIGSTATE, status, i);
99 STATUS_TO_TEXT(WRPROTECT, status, i);
100 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
101 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
102 STATUS_TO_TEXT(SIGSTATE_A, status, i);
103 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
104 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
105 STATUS_TO_TEXT(ILL_FUNC, status, i);
106 STATUS_TO_TEXT(CMD_BUSY, status, i);
107 STATUS_TO_TEXT(CMDRESPEND, status, i);
108 STATUS_TO_TEXT(DATAEND, status, i);
109 STATUS_TO_TEXT(CRCFAIL, status, i);
110 STATUS_TO_TEXT(DATATIMEOUT, status, i);
111 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
112 STATUS_TO_TEXT(RXOVERFLOW, status, i);
113 STATUS_TO_TEXT(TXUNDERRUN, status, i);
114 STATUS_TO_TEXT(RXRDY, status, i);
115 STATUS_TO_TEXT(TXRQ, status, i);
116 STATUS_TO_TEXT(ILL_ACCESS, status, i);
117 printk("\n");
118}
119
120#else
121#define pr_debug_status(s) do { } while (0)
122#endif
123
124static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
125{
126 struct tmio_mmc_host *host = mmc_priv(mmc);
127
128 if (enable) {
129 host->sdio_irq_enabled = 1;
130 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
131 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
132 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
133 } else {
134 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
135 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
136 host->sdio_irq_enabled = 0;
137 }
138}
139
140static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
141{
142 u32 clk = 0, clock;
143
144 if (new_clock) {
145 for (clock = host->mmc->f_min, clk = 0x80000080;
146 new_clock >= (clock<<1); clk >>= 1)
147 clock <<= 1;
148 clk |= 0x100;
149 }
150
151 if (host->set_clk_div)
152 host->set_clk_div(host->pdev, (clk>>22) & 1);
153
154 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
155}
156
157static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
158{
159 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
160
161 /* implicit BUG_ON(!res) */
162 if (resource_size(res) > 0x100) {
163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
164 msleep(10);
165 }
166
167 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
168 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
169 msleep(10);
170}
171
172static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
173{
174 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
175
176 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
177 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
178 msleep(10);
179
180 /* implicit BUG_ON(!res) */
181 if (resource_size(res) > 0x100) {
182 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
183 msleep(10);
184 }
185}
186
187static void tmio_mmc_reset(struct tmio_mmc_host *host)
188{
189 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
190
191 /* FIXME - should we set stop clock reg here */
192 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
193 /* implicit BUG_ON(!res) */
194 if (resource_size(res) > 0x100)
195 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
196 msleep(10);
197 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
198 if (resource_size(res) > 0x100)
199 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
200 msleep(10);
201}
202
203static void tmio_mmc_reset_work(struct work_struct *work)
204{
205 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
206 delayed_reset_work.work);
207 struct mmc_request *mrq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&host->lock, flags);
211 mrq = host->mrq;
212
213 /*
214 * is request already finished? Since we use a non-blocking
215 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
216 * us, so, have to check for IS_ERR(host->mrq)
217 */
218 if (IS_ERR_OR_NULL(mrq)
219 || time_is_after_jiffies(host->last_req_ts +
220 msecs_to_jiffies(2000))) {
221 spin_unlock_irqrestore(&host->lock, flags);
222 return;
223 }
224
225 dev_warn(&host->pdev->dev,
226 "timeout waiting for hardware interrupt (CMD%u)\n",
227 mrq->cmd->opcode);
228
229 if (host->data)
230 host->data->error = -ETIMEDOUT;
231 else if (host->cmd)
232 host->cmd->error = -ETIMEDOUT;
233 else
234 mrq->cmd->error = -ETIMEDOUT;
235
236 host->cmd = NULL;
237 host->data = NULL;
238 host->force_pio = false;
239
240 spin_unlock_irqrestore(&host->lock, flags);
241
242 tmio_mmc_reset(host);
243
244 /* Ready for new calls */
245 host->mrq = NULL;
246
247 mmc_request_done(host->mmc, mrq);
248}
249
250/* called with host->lock held, interrupts disabled */
251static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
252{
253 struct mmc_request *mrq;
254 unsigned long flags;
255
256 spin_lock_irqsave(&host->lock, flags);
257
258 mrq = host->mrq;
259 if (IS_ERR_OR_NULL(mrq)) {
260 spin_unlock_irqrestore(&host->lock, flags);
261 return;
262 }
263
264 host->cmd = NULL;
265 host->data = NULL;
266 host->force_pio = false;
267
268 cancel_delayed_work(&host->delayed_reset_work);
269
270 host->mrq = NULL;
271 spin_unlock_irqrestore(&host->lock, flags);
272
273 mmc_request_done(host->mmc, mrq);
274}
275
276static void tmio_mmc_done_work(struct work_struct *work)
277{
278 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
279 done);
280 tmio_mmc_finish_request(host);
281}
282
283/* These are the bitmasks the tmio chip requires to implement the MMC response
284 * types. Note that R1 and R6 are the same in this scheme. */
285#define APP_CMD 0x0040
286#define RESP_NONE 0x0300
287#define RESP_R1 0x0400
288#define RESP_R1B 0x0500
289#define RESP_R2 0x0600
290#define RESP_R3 0x0700
291#define DATA_PRESENT 0x0800
292#define TRANSFER_READ 0x1000
293#define TRANSFER_MULTI 0x2000
294#define SECURITY_CMD 0x4000
295
296static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
297{
298 struct mmc_data *data = host->data;
299 int c = cmd->opcode;
300
301 /* Command 12 is handled by hardware */
302 if (cmd->opcode == 12 && !cmd->arg) {
303 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
304 return 0;
305 }
306
307 switch (mmc_resp_type(cmd)) {
308 case MMC_RSP_NONE: c |= RESP_NONE; break;
309 case MMC_RSP_R1: c |= RESP_R1; break;
310 case MMC_RSP_R1B: c |= RESP_R1B; break;
311 case MMC_RSP_R2: c |= RESP_R2; break;
312 case MMC_RSP_R3: c |= RESP_R3; break;
313 default:
314 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
315 return -EINVAL;
316 }
317
318 host->cmd = cmd;
319
320/* FIXME - this seems to be ok commented out but the spec suggest this bit
321 * should be set when issuing app commands.
322 * if(cmd->flags & MMC_FLAG_ACMD)
323 * c |= APP_CMD;
324 */
325 if (data) {
326 c |= DATA_PRESENT;
327 if (data->blocks > 1) {
328 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
329 c |= TRANSFER_MULTI;
330 }
331 if (data->flags & MMC_DATA_READ)
332 c |= TRANSFER_READ;
333 }
334
335 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
336
337 /* Fire off the command */
338 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
339 sd_ctrl_write16(host, CTL_SD_CMD, c);
340
341 return 0;
342}
343
344/*
345 * This chip always returns (at least?) as much data as you ask for.
346 * I'm unsure what happens if you ask for less than a block. This should be
347 * looked into to ensure that a funny length read doesn't hose the controller.
348 */
349static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
350{
351 struct mmc_data *data = host->data;
352 void *sg_virt;
353 unsigned short *buf;
354 unsigned int count;
355 unsigned long flags;
356
357 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
358 pr_err("PIO IRQ in DMA mode!\n");
359 return;
360 } else if (!data) {
361 pr_debug("Spurious PIO IRQ\n");
362 return;
363 }
364
365 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
366 buf = (unsigned short *)(sg_virt + host->sg_off);
367
368 count = host->sg_ptr->length - host->sg_off;
369 if (count > data->blksz)
370 count = data->blksz;
371
372 pr_debug("count: %08x offset: %08x flags %08x\n",
373 count, host->sg_off, data->flags);
374
375 /* Transfer the data */
376 if (data->flags & MMC_DATA_READ)
377 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
378 else
379 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
380
381 host->sg_off += count;
382
383 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
384
385 if (host->sg_off == host->sg_ptr->length)
386 tmio_mmc_next_sg(host);
387
388 return;
389}
390
391static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
392{
393 if (host->sg_ptr == &host->bounce_sg) {
394 unsigned long flags;
395 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
396 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
397 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
398 }
399}
400
401/* needs to be called with host->lock held */
402void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
403{
404 struct mmc_data *data = host->data;
405 struct mmc_command *stop;
406
407 host->data = NULL;
408
409 if (!data) {
410 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
411 return;
412 }
413 stop = data->stop;
414
415 /* FIXME - return correct transfer count on errors */
416 if (!data->error)
417 data->bytes_xfered = data->blocks * data->blksz;
418 else
419 data->bytes_xfered = 0;
420
421 pr_debug("Completed data request\n");
422
423 /*
424 * FIXME: other drivers allow an optional stop command of any given type
425 * which we dont do, as the chip can auto generate them.
426 * Perhaps we can be smarter about when to use auto CMD12 and
427 * only issue the auto request when we know this is the desired
428 * stop command, allowing fallback to the stop command the
429 * upper layers expect. For now, we do what works.
430 */
431
432 if (data->flags & MMC_DATA_READ) {
433 if (host->chan_rx && !host->force_pio)
434 tmio_mmc_check_bounce_buffer(host);
435 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
436 host->mrq);
437 } else {
438 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
439 host->mrq);
440 }
441
442 if (stop) {
443 if (stop->opcode == 12 && !stop->arg)
444 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
445 else
446 BUG();
447 }
448
449 schedule_work(&host->done);
450}
451
452static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
453{
454 struct mmc_data *data;
455 spin_lock(&host->lock);
456 data = host->data;
457
458 if (!data)
459 goto out;
460
461 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
462 /*
463 * Has all data been written out yet? Testing on SuperH showed,
464 * that in most cases the first interrupt comes already with the
465 * BUSY status bit clear, but on some operations, like mount or
466 * in the beginning of a write / sync / umount, there is one
467 * DATAEND interrupt with the BUSY bit set, in this cases
468 * waiting for one more interrupt fixes the problem.
469 */
470 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
471 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
472 tasklet_schedule(&host->dma_complete);
473 }
474 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
475 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
476 tasklet_schedule(&host->dma_complete);
477 } else {
478 tmio_mmc_do_data_irq(host);
479 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
480 }
481out:
482 spin_unlock(&host->lock);
483}
484
485static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
486 unsigned int stat)
487{
488 struct mmc_command *cmd = host->cmd;
489 int i, addr;
490
491 spin_lock(&host->lock);
492
493 if (!host->cmd) {
494 pr_debug("Spurious CMD irq\n");
495 goto out;
496 }
497
498 host->cmd = NULL;
499
500 /* This controller is sicker than the PXA one. Not only do we need to
501 * drop the top 8 bits of the first response word, we also need to
502 * modify the order of the response for short response command types.
503 */
504
505 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
506 cmd->resp[i] = sd_ctrl_read32(host, addr);
507
508 if (cmd->flags & MMC_RSP_136) {
509 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
510 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
511 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
512 cmd->resp[3] <<= 8;
513 } else if (cmd->flags & MMC_RSP_R3) {
514 cmd->resp[0] = cmd->resp[3];
515 }
516
517 if (stat & TMIO_STAT_CMDTIMEOUT)
518 cmd->error = -ETIMEDOUT;
519 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
520 cmd->error = -EILSEQ;
521
522 /* If there is data to handle we enable data IRQs here, and
523 * we will ultimatley finish the request in the data_end handler.
524 * If theres no data or we encountered an error, finish now.
525 */
526 if (host->data && !cmd->error) {
527 if (host->data->flags & MMC_DATA_READ) {
528 if (host->force_pio || !host->chan_rx)
529 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
530 else
531 tasklet_schedule(&host->dma_issue);
532 } else {
533 if (host->force_pio || !host->chan_tx)
534 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
535 else
536 tasklet_schedule(&host->dma_issue);
537 }
538 } else {
539 schedule_work(&host->done);
540 }
541
542out:
543 spin_unlock(&host->lock);
544}
545
546irqreturn_t tmio_mmc_irq(int irq, void *devid)
547{
548 struct tmio_mmc_host *host = devid;
549 struct mmc_host *mmc = host->mmc;
550 struct tmio_mmc_data *pdata = host->pdata;
551 unsigned int ireg, irq_mask, status;
552 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
553
554 pr_debug("MMC IRQ begin\n");
555
556 status = sd_ctrl_read32(host, CTL_STATUS);
557 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
558 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
559
560 sdio_ireg = 0;
561 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
562 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
563 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
564 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
565
566 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
567
568 if (sdio_ireg && !host->sdio_irq_enabled) {
569 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
570 sdio_status, sdio_irq_mask, sdio_ireg);
571 tmio_mmc_enable_sdio_irq(mmc, 0);
572 goto out;
573 }
574
575 if (mmc->caps & MMC_CAP_SDIO_IRQ &&
576 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
577 mmc_signal_sdio_irq(mmc);
578
579 if (sdio_ireg)
580 goto out;
581 }
582
583 pr_debug_status(status);
584 pr_debug_status(ireg);
585
586 /* Card insert / remove attempts */
587 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
588 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
589 TMIO_STAT_CARD_REMOVE);
590 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
591 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
592 !work_pending(&mmc->detect.work))
593 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
594 goto out;
595 }
596
597 /* CRC and other errors */
598/* if (ireg & TMIO_STAT_ERR_IRQ)
599 * handled |= tmio_error_irq(host, irq, stat);
600 */
601
602 /* Command completion */
603 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
604 tmio_mmc_ack_mmc_irqs(host,
605 TMIO_STAT_CMDRESPEND |
606 TMIO_STAT_CMDTIMEOUT);
607 tmio_mmc_cmd_irq(host, status);
608 goto out;
609 }
610
611 /* Data transfer */
612 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
613 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
614 tmio_mmc_pio_irq(host);
615 goto out;
616 }
617
618 /* Data transfer completion */
619 if (ireg & TMIO_STAT_DATAEND) {
620 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
621 tmio_mmc_data_irq(host);
622 goto out;
623 }
624
625 pr_warning("tmio_mmc: Spurious irq, disabling! "
626 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
627 pr_debug_status(status);
628 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
629
630out:
631 return IRQ_HANDLED;
632}
633EXPORT_SYMBOL(tmio_mmc_irq);
634
635static int tmio_mmc_start_data(struct tmio_mmc_host *host,
636 struct mmc_data *data)
637{
638 struct tmio_mmc_data *pdata = host->pdata;
639
640 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
641 data->blksz, data->blocks);
642
643 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
644 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
645 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
646
647 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
648 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
649 mmc_hostname(host->mmc), data->blksz);
650 return -EINVAL;
651 }
652 }
653
654 tmio_mmc_init_sg(host, data);
655 host->data = data;
656
657 /* Set transfer length / blocksize */
658 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
659 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
660
661 tmio_mmc_start_dma(host, data);
662
663 return 0;
664}
665
666/* Process requests from the MMC layer */
667static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
668{
669 struct tmio_mmc_host *host = mmc_priv(mmc);
670 unsigned long flags;
671 int ret;
672
673 spin_lock_irqsave(&host->lock, flags);
674
675 if (host->mrq) {
676 pr_debug("request not null\n");
677 if (IS_ERR(host->mrq)) {
678 spin_unlock_irqrestore(&host->lock, flags);
679 mrq->cmd->error = -EAGAIN;
680 mmc_request_done(mmc, mrq);
681 return;
682 }
683 }
684
685 host->last_req_ts = jiffies;
686 wmb();
687 host->mrq = mrq;
688
689 spin_unlock_irqrestore(&host->lock, flags);
690
691 if (mrq->data) {
692 ret = tmio_mmc_start_data(host, mrq->data);
693 if (ret)
694 goto fail;
695 }
696
697 ret = tmio_mmc_start_command(host, mrq->cmd);
698 if (!ret) {
699 schedule_delayed_work(&host->delayed_reset_work,
700 msecs_to_jiffies(2000));
701 return;
702 }
703
704fail:
705 host->force_pio = false;
706 host->mrq = NULL;
707 mrq->cmd->error = ret;
708 mmc_request_done(mmc, mrq);
709}
710
711/* Set MMC clock / power.
712 * Note: This controller uses a simple divider scheme therefore it cannot
713 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
714 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
715 * slowest setting.
716 */
717static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct tmio_mmc_host *host = mmc_priv(mmc);
720 struct tmio_mmc_data *pdata = host->pdata;
721 unsigned long flags;
722
723 mutex_lock(&host->ios_lock);
724
725 spin_lock_irqsave(&host->lock, flags);
726 if (host->mrq) {
727 if (IS_ERR(host->mrq)) {
728 dev_dbg(&host->pdev->dev,
729 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
730 current->comm, task_pid_nr(current),
731 ios->clock, ios->power_mode);
732 host->mrq = ERR_PTR(-EINTR);
733 } else {
734 dev_dbg(&host->pdev->dev,
735 "%s.%d: CMD%u active since %lu, now %lu!\n",
736 current->comm, task_pid_nr(current),
737 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
738 }
739 spin_unlock_irqrestore(&host->lock, flags);
740
741 mutex_unlock(&host->ios_lock);
742 return;
743 }
744
745 host->mrq = ERR_PTR(-EBUSY);
746
747 spin_unlock_irqrestore(&host->lock, flags);
748
749 /*
750 * pdata->power == false only if COLD_CD is available, otherwise only
751 * in short time intervals during probing or resuming
752 */
753 if (ios->power_mode == MMC_POWER_ON && ios->clock) {
754 if (!pdata->power) {
755 pm_runtime_get_sync(&host->pdev->dev);
756 pdata->power = true;
757 }
758 tmio_mmc_set_clock(host, ios->clock);
759 /* power up SD bus */
760 if (host->set_pwr)
761 host->set_pwr(host->pdev, 1);
762 /* start bus clock */
763 tmio_mmc_clk_start(host);
764 } else if (ios->power_mode != MMC_POWER_UP) {
765 if (host->set_pwr)
766 host->set_pwr(host->pdev, 0);
767 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
768 pdata->power) {
769 pdata->power = false;
770 pm_runtime_put(&host->pdev->dev);
771 }
772 tmio_mmc_clk_stop(host);
773 }
774
775 switch (ios->bus_width) {
776 case MMC_BUS_WIDTH_1:
777 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
778 break;
779 case MMC_BUS_WIDTH_4:
780 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
781 break;
782 }
783
784 /* Let things settle. delay taken from winCE driver */
785 udelay(140);
786 if (PTR_ERR(host->mrq) == -EINTR)
787 dev_dbg(&host->pdev->dev,
788 "%s.%d: IOS interrupted: clk %u, mode %u",
789 current->comm, task_pid_nr(current),
790 ios->clock, ios->power_mode);
791 host->mrq = NULL;
792
793 mutex_unlock(&host->ios_lock);
794}
795
796static int tmio_mmc_get_ro(struct mmc_host *mmc)
797{
798 struct tmio_mmc_host *host = mmc_priv(mmc);
799 struct tmio_mmc_data *pdata = host->pdata;
800
801 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
802 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
803}
804
805static int tmio_mmc_get_cd(struct mmc_host *mmc)
806{
807 struct tmio_mmc_host *host = mmc_priv(mmc);
808 struct tmio_mmc_data *pdata = host->pdata;
809
810 if (!pdata->get_cd)
811 return -ENOSYS;
812 else
813 return pdata->get_cd(host->pdev);
814}
815
816static const struct mmc_host_ops tmio_mmc_ops = {
817 .request = tmio_mmc_request,
818 .set_ios = tmio_mmc_set_ios,
819 .get_ro = tmio_mmc_get_ro,
820 .get_cd = tmio_mmc_get_cd,
821 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
822};
823
824int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
825 struct platform_device *pdev,
826 struct tmio_mmc_data *pdata)
827{
828 struct tmio_mmc_host *_host;
829 struct mmc_host *mmc;
830 struct resource *res_ctl;
831 int ret;
832 u32 irq_mask = TMIO_MASK_CMD;
833
834 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
835 if (!res_ctl)
836 return -EINVAL;
837
838 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
839 if (!mmc)
840 return -ENOMEM;
841
842 pdata->dev = &pdev->dev;
843 _host = mmc_priv(mmc);
844 _host->pdata = pdata;
845 _host->mmc = mmc;
846 _host->pdev = pdev;
847 platform_set_drvdata(pdev, mmc);
848
849 _host->set_pwr = pdata->set_pwr;
850 _host->set_clk_div = pdata->set_clk_div;
851
852 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
853 _host->bus_shift = resource_size(res_ctl) >> 10;
854
855 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
856 if (!_host->ctl) {
857 ret = -ENOMEM;
858 goto host_free;
859 }
860
861 mmc->ops = &tmio_mmc_ops;
862 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
863 mmc->f_max = pdata->hclk;
864 mmc->f_min = mmc->f_max / 512;
865 mmc->max_segs = 32;
866 mmc->max_blk_size = 512;
867 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
868 mmc->max_segs;
869 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
870 mmc->max_seg_size = mmc->max_req_size;
871 if (pdata->ocr_mask)
872 mmc->ocr_avail = pdata->ocr_mask;
873 else
874 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
875
876 pdata->power = false;
877 pm_runtime_enable(&pdev->dev);
878 ret = pm_runtime_resume(&pdev->dev);
879 if (ret < 0)
880 goto pm_disable;
881
882 tmio_mmc_clk_stop(_host);
883 tmio_mmc_reset(_host);
884
885 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
886 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
887 tmio_mmc_enable_sdio_irq(mmc, 0);
888
889 spin_lock_init(&_host->lock);
890 mutex_init(&_host->ios_lock);
891
892 /* Init delayed work for request timeouts */
893 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
894 INIT_WORK(&_host->done, tmio_mmc_done_work);
895
896 /* See if we also get DMA */
897 tmio_mmc_request_dma(_host, pdata);
898
899 /* We have to keep the device powered for its card detection to work */
900 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
901 pdata->power = true;
902 pm_runtime_get_noresume(&pdev->dev);
903 }
904
905 mmc_add_host(mmc);
906
907 /* Unmask the IRQs we want to know about */
908 if (!_host->chan_rx)
909 irq_mask |= TMIO_MASK_READOP;
910 if (!_host->chan_tx)
911 irq_mask |= TMIO_MASK_WRITEOP;
912
913 tmio_mmc_enable_mmc_irqs(_host, irq_mask);
914
915 *host = _host;
916
917 return 0;
918
919pm_disable:
920 pm_runtime_disable(&pdev->dev);
921 iounmap(_host->ctl);
922host_free:
923 mmc_free_host(mmc);
924
925 return ret;
926}
927EXPORT_SYMBOL(tmio_mmc_host_probe);
928
929void tmio_mmc_host_remove(struct tmio_mmc_host *host)
930{
931 struct platform_device *pdev = host->pdev;
932
933 /*
934 * We don't have to manipulate pdata->power here: if there is a card in
935 * the slot, the runtime PM is active and our .runtime_resume() will not
936 * be run. If there is no card in the slot and the platform can suspend
937 * the controller, the runtime PM is suspended and pdata->power == false,
938 * so, our .runtime_resume() will not try to detect a card in the slot.
939 */
940 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
941 pm_runtime_get_sync(&pdev->dev);
942
943 mmc_remove_host(host->mmc);
944 cancel_work_sync(&host->done);
945 cancel_delayed_work_sync(&host->delayed_reset_work);
946 tmio_mmc_release_dma(host);
947
948 pm_runtime_put_sync(&pdev->dev);
949 pm_runtime_disable(&pdev->dev);
950
951 iounmap(host->ctl);
952 mmc_free_host(host->mmc);
953}
954EXPORT_SYMBOL(tmio_mmc_host_remove);
955
956#ifdef CONFIG_PM
957int tmio_mmc_host_suspend(struct device *dev)
958{
959 struct mmc_host *mmc = dev_get_drvdata(dev);
960 struct tmio_mmc_host *host = mmc_priv(mmc);
961 int ret = mmc_suspend_host(mmc);
962
963 if (!ret)
964 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
965
966 host->pm_error = pm_runtime_put_sync(dev);
967
968 return ret;
969}
970EXPORT_SYMBOL(tmio_mmc_host_suspend);
971
972int tmio_mmc_host_resume(struct device *dev)
973{
974 struct mmc_host *mmc = dev_get_drvdata(dev);
975 struct tmio_mmc_host *host = mmc_priv(mmc);
976
977 /* The MMC core will perform the complete set up */
978 host->pdata->power = false;
979
980 host->pm_global = true;
981 if (!host->pm_error)
982 pm_runtime_get_sync(dev);
983
984 if (host->pm_global) {
985 /* Runtime PM resume callback didn't run */
986 tmio_mmc_reset(host);
987 tmio_mmc_enable_dma(host, true);
988 host->pm_global = false;
989 }
990
991 return mmc_resume_host(mmc);
992}
993EXPORT_SYMBOL(tmio_mmc_host_resume);
994
995#endif /* CONFIG_PM */
996
997int tmio_mmc_host_runtime_suspend(struct device *dev)
998{
999 return 0;
1000}
1001EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1002
1003int tmio_mmc_host_runtime_resume(struct device *dev)
1004{
1005 struct mmc_host *mmc = dev_get_drvdata(dev);
1006 struct tmio_mmc_host *host = mmc_priv(mmc);
1007 struct tmio_mmc_data *pdata = host->pdata;
1008
1009 tmio_mmc_reset(host);
1010 tmio_mmc_enable_dma(host, true);
1011
1012 if (pdata->power) {
1013 /* Only entered after a card-insert interrupt */
1014 if (!mmc->card)
1015 tmio_mmc_set_ios(mmc, &mmc->ios);
1016 mmc_detect_change(mmc, msecs_to_jiffies(100));
1017 }
1018 host->pm_global = false;
1019
1020 return 0;
1021}
1022EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1023
1024MODULE_LICENSE("GPL v2");
1/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
4 * Copyright (C) 2016 Sang Engineering, Wolfram Sang
5 * Copyright (C) 2015-16 Renesas Electronics Corporation
6 * Copyright (C) 2011 Guennadi Liakhovetski
7 * Copyright (C) 2007 Ian Molton
8 * Copyright (C) 2004 Ian Molton
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Driver for the MMC / SD / SDIO IP found in:
15 *
16 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
17 *
18 * This driver draws mainly on scattered spec sheets, Reverse engineering
19 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
20 * support). (Further 4 bit support from a later datasheet).
21 *
22 * TODO:
23 * Investigate using a workqueue for PIO transfers
24 * Eliminate FIXMEs
25 * Better Power management
26 * Handle MMC errors better
27 * double buffer support
28 *
29 */
30
31#include <linux/delay.h>
32#include <linux/device.h>
33#include <linux/highmem.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
36#include <linux/irq.h>
37#include <linux/mfd/tmio.h>
38#include <linux/mmc/card.h>
39#include <linux/mmc/host.h>
40#include <linux/mmc/mmc.h>
41#include <linux/mmc/slot-gpio.h>
42#include <linux/module.h>
43#include <linux/pagemap.h>
44#include <linux/platform_device.h>
45#include <linux/pm_qos.h>
46#include <linux/pm_runtime.h>
47#include <linux/regulator/consumer.h>
48#include <linux/mmc/sdio.h>
49#include <linux/scatterlist.h>
50#include <linux/spinlock.h>
51#include <linux/workqueue.h>
52
53#include "tmio_mmc.h"
54
55void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
56{
57 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
58 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
59}
60
61void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
62{
63 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
64 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
65}
66
67static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
68{
69 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
70}
71
72static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
73{
74 host->sg_len = data->sg_len;
75 host->sg_ptr = data->sg;
76 host->sg_orig = data->sg;
77 host->sg_off = 0;
78}
79
80static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
81{
82 host->sg_ptr = sg_next(host->sg_ptr);
83 host->sg_off = 0;
84 return --host->sg_len;
85}
86
87#define CMDREQ_TIMEOUT 5000
88
89#ifdef CONFIG_MMC_DEBUG
90
91#define STATUS_TO_TEXT(a, status, i) \
92 do { \
93 if (status & TMIO_STAT_##a) { \
94 if (i++) \
95 printk(" | "); \
96 printk(#a); \
97 } \
98 } while (0)
99
100static void pr_debug_status(u32 status)
101{
102 int i = 0;
103 pr_debug("status: %08x = ", status);
104 STATUS_TO_TEXT(CARD_REMOVE, status, i);
105 STATUS_TO_TEXT(CARD_INSERT, status, i);
106 STATUS_TO_TEXT(SIGSTATE, status, i);
107 STATUS_TO_TEXT(WRPROTECT, status, i);
108 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
109 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
110 STATUS_TO_TEXT(SIGSTATE_A, status, i);
111 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
112 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
113 STATUS_TO_TEXT(ILL_FUNC, status, i);
114 STATUS_TO_TEXT(CMD_BUSY, status, i);
115 STATUS_TO_TEXT(CMDRESPEND, status, i);
116 STATUS_TO_TEXT(DATAEND, status, i);
117 STATUS_TO_TEXT(CRCFAIL, status, i);
118 STATUS_TO_TEXT(DATATIMEOUT, status, i);
119 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
120 STATUS_TO_TEXT(RXOVERFLOW, status, i);
121 STATUS_TO_TEXT(TXUNDERRUN, status, i);
122 STATUS_TO_TEXT(RXRDY, status, i);
123 STATUS_TO_TEXT(TXRQ, status, i);
124 STATUS_TO_TEXT(ILL_ACCESS, status, i);
125 printk("\n");
126}
127
128#else
129#define pr_debug_status(s) do { } while (0)
130#endif
131
132static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
133{
134 struct tmio_mmc_host *host = mmc_priv(mmc);
135
136 if (enable && !host->sdio_irq_enabled) {
137 /* Keep device active while SDIO irq is enabled */
138 pm_runtime_get_sync(mmc_dev(mmc));
139 host->sdio_irq_enabled = true;
140
141 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
142 ~TMIO_SDIO_STAT_IOIRQ;
143 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
144 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
145 } else if (!enable && host->sdio_irq_enabled) {
146 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
147 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
148 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
149
150 host->sdio_irq_enabled = false;
151 pm_runtime_mark_last_busy(mmc_dev(mmc));
152 pm_runtime_put_autosuspend(mmc_dev(mmc));
153 }
154}
155
156static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
157{
158 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
159 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
160 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
161
162 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
164 msleep(10);
165 }
166}
167
168static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
169{
170 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
171 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
172 msleep(10);
173 }
174
175 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
176 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
177 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
178}
179
180static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
181 unsigned int new_clock)
182{
183 u32 clk = 0, clock;
184
185 if (new_clock == 0) {
186 tmio_mmc_clk_stop(host);
187 return;
188 }
189
190 if (host->clk_update)
191 clock = host->clk_update(host, new_clock) / 512;
192 else
193 clock = host->mmc->f_min;
194
195 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
196 clock <<= 1;
197
198 /* 1/1 clock is option */
199 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
200 clk |= 0xff;
201
202 if (host->set_clk_div)
203 host->set_clk_div(host->pdev, (clk >> 22) & 1);
204
205 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
206 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
207 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
208 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
209 msleep(10);
210
211 tmio_mmc_clk_start(host);
212}
213
214static void tmio_mmc_reset(struct tmio_mmc_host *host)
215{
216 /* FIXME - should we set stop clock reg here */
217 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
218 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
219 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
220 msleep(10);
221 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
222 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
223 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
224 msleep(10);
225}
226
227static void tmio_mmc_reset_work(struct work_struct *work)
228{
229 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
230 delayed_reset_work.work);
231 struct mmc_request *mrq;
232 unsigned long flags;
233
234 spin_lock_irqsave(&host->lock, flags);
235 mrq = host->mrq;
236
237 /*
238 * is request already finished? Since we use a non-blocking
239 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
240 * us, so, have to check for IS_ERR(host->mrq)
241 */
242 if (IS_ERR_OR_NULL(mrq)
243 || time_is_after_jiffies(host->last_req_ts +
244 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
245 spin_unlock_irqrestore(&host->lock, flags);
246 return;
247 }
248
249 dev_warn(&host->pdev->dev,
250 "timeout waiting for hardware interrupt (CMD%u)\n",
251 mrq->cmd->opcode);
252
253 if (host->data)
254 host->data->error = -ETIMEDOUT;
255 else if (host->cmd)
256 host->cmd->error = -ETIMEDOUT;
257 else
258 mrq->cmd->error = -ETIMEDOUT;
259
260 host->cmd = NULL;
261 host->data = NULL;
262 host->force_pio = false;
263
264 spin_unlock_irqrestore(&host->lock, flags);
265
266 tmio_mmc_reset(host);
267
268 /* Ready for new calls */
269 host->mrq = NULL;
270
271 tmio_mmc_abort_dma(host);
272 mmc_request_done(host->mmc, mrq);
273}
274
275/* called with host->lock held, interrupts disabled */
276static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
277{
278 struct mmc_request *mrq;
279 unsigned long flags;
280
281 spin_lock_irqsave(&host->lock, flags);
282
283 mrq = host->mrq;
284 if (IS_ERR_OR_NULL(mrq)) {
285 spin_unlock_irqrestore(&host->lock, flags);
286 return;
287 }
288
289 host->cmd = NULL;
290 host->data = NULL;
291 host->force_pio = false;
292
293 cancel_delayed_work(&host->delayed_reset_work);
294
295 host->mrq = NULL;
296 spin_unlock_irqrestore(&host->lock, flags);
297
298 if (mrq->cmd->error || (mrq->data && mrq->data->error))
299 tmio_mmc_abort_dma(host);
300
301 if (host->check_scc_error)
302 host->check_scc_error(host);
303
304 mmc_request_done(host->mmc, mrq);
305}
306
307static void tmio_mmc_done_work(struct work_struct *work)
308{
309 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
310 done);
311 tmio_mmc_finish_request(host);
312}
313
314/* These are the bitmasks the tmio chip requires to implement the MMC response
315 * types. Note that R1 and R6 are the same in this scheme. */
316#define APP_CMD 0x0040
317#define RESP_NONE 0x0300
318#define RESP_R1 0x0400
319#define RESP_R1B 0x0500
320#define RESP_R2 0x0600
321#define RESP_R3 0x0700
322#define DATA_PRESENT 0x0800
323#define TRANSFER_READ 0x1000
324#define TRANSFER_MULTI 0x2000
325#define SECURITY_CMD 0x4000
326#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
327
328static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
329{
330 struct mmc_data *data = host->data;
331 int c = cmd->opcode;
332 u32 irq_mask = TMIO_MASK_CMD;
333
334 /* CMD12 is handled by hardware */
335 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
336 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
337 return 0;
338 }
339
340 switch (mmc_resp_type(cmd)) {
341 case MMC_RSP_NONE: c |= RESP_NONE; break;
342 case MMC_RSP_R1:
343 case MMC_RSP_R1_NO_CRC:
344 c |= RESP_R1; break;
345 case MMC_RSP_R1B: c |= RESP_R1B; break;
346 case MMC_RSP_R2: c |= RESP_R2; break;
347 case MMC_RSP_R3: c |= RESP_R3; break;
348 default:
349 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
350 return -EINVAL;
351 }
352
353 host->cmd = cmd;
354
355/* FIXME - this seems to be ok commented out but the spec suggest this bit
356 * should be set when issuing app commands.
357 * if(cmd->flags & MMC_FLAG_ACMD)
358 * c |= APP_CMD;
359 */
360 if (data) {
361 c |= DATA_PRESENT;
362 if (data->blocks > 1) {
363 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
364 c |= TRANSFER_MULTI;
365
366 /*
367 * Disable auto CMD12 at IO_RW_EXTENDED when
368 * multiple block transfer
369 */
370 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
371 (cmd->opcode == SD_IO_RW_EXTENDED))
372 c |= NO_CMD12_ISSUE;
373 }
374 if (data->flags & MMC_DATA_READ)
375 c |= TRANSFER_READ;
376 }
377
378 if (!host->native_hotplug)
379 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
380 tmio_mmc_enable_mmc_irqs(host, irq_mask);
381
382 /* Fire off the command */
383 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
384 sd_ctrl_write16(host, CTL_SD_CMD, c);
385
386 return 0;
387}
388
389static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
390 unsigned short *buf,
391 unsigned int count)
392{
393 int is_read = host->data->flags & MMC_DATA_READ;
394 u8 *buf8;
395
396 /*
397 * Transfer the data
398 */
399 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
400 u8 data[4] = { };
401
402 if (is_read)
403 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
404 count >> 2);
405 else
406 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
407 count >> 2);
408
409 /* if count was multiple of 4 */
410 if (!(count & 0x3))
411 return;
412
413 buf8 = (u8 *)(buf + (count >> 2));
414 count %= 4;
415
416 if (is_read) {
417 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
418 (u32 *)data, 1);
419 memcpy(buf8, data, count);
420 } else {
421 memcpy(data, buf8, count);
422 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
423 (u32 *)data, 1);
424 }
425
426 return;
427 }
428
429 if (is_read)
430 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
431 else
432 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
433
434 /* if count was even number */
435 if (!(count & 0x1))
436 return;
437
438 /* if count was odd number */
439 buf8 = (u8 *)(buf + (count >> 1));
440
441 /*
442 * FIXME
443 *
444 * driver and this function are assuming that
445 * it is used as little endian
446 */
447 if (is_read)
448 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
449 else
450 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
451}
452
453/*
454 * This chip always returns (at least?) as much data as you ask for.
455 * I'm unsure what happens if you ask for less than a block. This should be
456 * looked into to ensure that a funny length read doesn't hose the controller.
457 */
458static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
459{
460 struct mmc_data *data = host->data;
461 void *sg_virt;
462 unsigned short *buf;
463 unsigned int count;
464 unsigned long flags;
465
466 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
467 pr_err("PIO IRQ in DMA mode!\n");
468 return;
469 } else if (!data) {
470 pr_debug("Spurious PIO IRQ\n");
471 return;
472 }
473
474 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
475 buf = (unsigned short *)(sg_virt + host->sg_off);
476
477 count = host->sg_ptr->length - host->sg_off;
478 if (count > data->blksz)
479 count = data->blksz;
480
481 pr_debug("count: %08x offset: %08x flags %08x\n",
482 count, host->sg_off, data->flags);
483
484 /* Transfer the data */
485 tmio_mmc_transfer_data(host, buf, count);
486
487 host->sg_off += count;
488
489 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
490
491 if (host->sg_off == host->sg_ptr->length)
492 tmio_mmc_next_sg(host);
493
494 return;
495}
496
497static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
498{
499 if (host->sg_ptr == &host->bounce_sg) {
500 unsigned long flags;
501 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
502 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
503 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
504 }
505}
506
507/* needs to be called with host->lock held */
508void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
509{
510 struct mmc_data *data = host->data;
511 struct mmc_command *stop;
512
513 host->data = NULL;
514
515 if (!data) {
516 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
517 return;
518 }
519 stop = data->stop;
520
521 /* FIXME - return correct transfer count on errors */
522 if (!data->error)
523 data->bytes_xfered = data->blocks * data->blksz;
524 else
525 data->bytes_xfered = 0;
526
527 pr_debug("Completed data request\n");
528
529 /*
530 * FIXME: other drivers allow an optional stop command of any given type
531 * which we dont do, as the chip can auto generate them.
532 * Perhaps we can be smarter about when to use auto CMD12 and
533 * only issue the auto request when we know this is the desired
534 * stop command, allowing fallback to the stop command the
535 * upper layers expect. For now, we do what works.
536 */
537
538 if (data->flags & MMC_DATA_READ) {
539 if (host->chan_rx && !host->force_pio)
540 tmio_mmc_check_bounce_buffer(host);
541 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
542 host->mrq);
543 } else {
544 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
545 host->mrq);
546 }
547
548 if (stop) {
549 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
550 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
551 else
552 BUG();
553 }
554
555 schedule_work(&host->done);
556}
557
558static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
559{
560 struct mmc_data *data;
561 spin_lock(&host->lock);
562 data = host->data;
563
564 if (!data)
565 goto out;
566
567 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
568 stat & TMIO_STAT_TXUNDERRUN)
569 data->error = -EILSEQ;
570 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
571 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
572 bool done = false;
573
574 /*
575 * Has all data been written out yet? Testing on SuperH showed,
576 * that in most cases the first interrupt comes already with the
577 * BUSY status bit clear, but on some operations, like mount or
578 * in the beginning of a write / sync / umount, there is one
579 * DATAEND interrupt with the BUSY bit set, in this cases
580 * waiting for one more interrupt fixes the problem.
581 */
582 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
583 if (status & TMIO_STAT_SCLKDIVEN)
584 done = true;
585 } else {
586 if (!(status & TMIO_STAT_CMD_BUSY))
587 done = true;
588 }
589
590 if (done) {
591 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
592 tasklet_schedule(&host->dma_complete);
593 }
594 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
595 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
596 tasklet_schedule(&host->dma_complete);
597 } else {
598 tmio_mmc_do_data_irq(host);
599 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
600 }
601out:
602 spin_unlock(&host->lock);
603}
604
605static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
606 unsigned int stat)
607{
608 struct mmc_command *cmd = host->cmd;
609 int i, addr;
610
611 spin_lock(&host->lock);
612
613 if (!host->cmd) {
614 pr_debug("Spurious CMD irq\n");
615 goto out;
616 }
617
618 /* This controller is sicker than the PXA one. Not only do we need to
619 * drop the top 8 bits of the first response word, we also need to
620 * modify the order of the response for short response command types.
621 */
622
623 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
624 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
625
626 if (cmd->flags & MMC_RSP_136) {
627 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
628 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
629 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
630 cmd->resp[3] <<= 8;
631 } else if (cmd->flags & MMC_RSP_R3) {
632 cmd->resp[0] = cmd->resp[3];
633 }
634
635 if (stat & TMIO_STAT_CMDTIMEOUT)
636 cmd->error = -ETIMEDOUT;
637 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
638 stat & TMIO_STAT_STOPBIT_ERR ||
639 stat & TMIO_STAT_CMD_IDX_ERR)
640 cmd->error = -EILSEQ;
641
642 /* If there is data to handle we enable data IRQs here, and
643 * we will ultimatley finish the request in the data_end handler.
644 * If theres no data or we encountered an error, finish now.
645 */
646 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
647 if (host->data->flags & MMC_DATA_READ) {
648 if (host->force_pio || !host->chan_rx)
649 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
650 else
651 tasklet_schedule(&host->dma_issue);
652 } else {
653 if (host->force_pio || !host->chan_tx)
654 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
655 else
656 tasklet_schedule(&host->dma_issue);
657 }
658 } else {
659 schedule_work(&host->done);
660 }
661
662out:
663 spin_unlock(&host->lock);
664}
665
666static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
667 int ireg, int status)
668{
669 struct mmc_host *mmc = host->mmc;
670
671 /* Card insert / remove attempts */
672 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
673 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
674 TMIO_STAT_CARD_REMOVE);
675 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
676 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
677 !work_pending(&mmc->detect.work))
678 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
679 return true;
680 }
681
682 return false;
683}
684
685static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
686 int ireg, int status)
687{
688 /* Command completion */
689 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
690 tmio_mmc_ack_mmc_irqs(host,
691 TMIO_STAT_CMDRESPEND |
692 TMIO_STAT_CMDTIMEOUT);
693 tmio_mmc_cmd_irq(host, status);
694 return true;
695 }
696
697 /* Data transfer */
698 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
699 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
700 tmio_mmc_pio_irq(host);
701 return true;
702 }
703
704 /* Data transfer completion */
705 if (ireg & TMIO_STAT_DATAEND) {
706 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
707 tmio_mmc_data_irq(host, status);
708 return true;
709 }
710
711 return false;
712}
713
714static void tmio_mmc_sdio_irq(int irq, void *devid)
715{
716 struct tmio_mmc_host *host = devid;
717 struct mmc_host *mmc = host->mmc;
718 struct tmio_mmc_data *pdata = host->pdata;
719 unsigned int ireg, status;
720 unsigned int sdio_status;
721
722 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
723 return;
724
725 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
726 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
727
728 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
729 if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
730 sdio_status |= 6;
731
732 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
733
734 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
735 mmc_signal_sdio_irq(mmc);
736}
737
738irqreturn_t tmio_mmc_irq(int irq, void *devid)
739{
740 struct tmio_mmc_host *host = devid;
741 unsigned int ireg, status;
742
743 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
744 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
745
746 pr_debug_status(status);
747 pr_debug_status(ireg);
748
749 /* Clear the status except the interrupt status */
750 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
751
752 if (__tmio_mmc_card_detect_irq(host, ireg, status))
753 return IRQ_HANDLED;
754 if (__tmio_mmc_sdcard_irq(host, ireg, status))
755 return IRQ_HANDLED;
756
757 tmio_mmc_sdio_irq(irq, devid);
758
759 return IRQ_HANDLED;
760}
761EXPORT_SYMBOL(tmio_mmc_irq);
762
763static int tmio_mmc_start_data(struct tmio_mmc_host *host,
764 struct mmc_data *data)
765{
766 struct tmio_mmc_data *pdata = host->pdata;
767
768 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
769 data->blksz, data->blocks);
770
771 /* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
772 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
773 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
774 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
775
776 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
777 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
778 mmc_hostname(host->mmc), data->blksz);
779 return -EINVAL;
780 }
781 }
782
783 tmio_mmc_init_sg(host, data);
784 host->data = data;
785
786 /* Set transfer length / blocksize */
787 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
788 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
789
790 tmio_mmc_start_dma(host, data);
791
792 return 0;
793}
794
795static void tmio_mmc_hw_reset(struct mmc_host *mmc)
796{
797 struct tmio_mmc_host *host = mmc_priv(mmc);
798
799 if (host->hw_reset)
800 host->hw_reset(host);
801}
802
803static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
804{
805 struct tmio_mmc_host *host = mmc_priv(mmc);
806 int i, ret = 0;
807
808 if (!host->tap_num) {
809 if (!host->init_tuning || !host->select_tuning)
810 /* Tuning is not supported */
811 goto out;
812
813 host->tap_num = host->init_tuning(host);
814 if (!host->tap_num)
815 /* Tuning is not supported */
816 goto out;
817 }
818
819 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
820 dev_warn_once(&host->pdev->dev,
821 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
822 goto out;
823 }
824
825 bitmap_zero(host->taps, host->tap_num * 2);
826
827 /* Issue CMD19 twice for each tap */
828 for (i = 0; i < 2 * host->tap_num; i++) {
829 if (host->prepare_tuning)
830 host->prepare_tuning(host, i % host->tap_num);
831
832 ret = mmc_send_tuning(mmc, opcode, NULL);
833 if (ret && ret != -EILSEQ)
834 goto out;
835 if (ret == 0)
836 set_bit(i, host->taps);
837
838 mdelay(1);
839 }
840
841 ret = host->select_tuning(host);
842
843out:
844 if (ret < 0) {
845 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
846 tmio_mmc_hw_reset(mmc);
847 }
848
849 return ret;
850}
851
852/* Process requests from the MMC layer */
853static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
854{
855 struct tmio_mmc_host *host = mmc_priv(mmc);
856 unsigned long flags;
857 int ret;
858
859 spin_lock_irqsave(&host->lock, flags);
860
861 if (host->mrq) {
862 pr_debug("request not null\n");
863 if (IS_ERR(host->mrq)) {
864 spin_unlock_irqrestore(&host->lock, flags);
865 mrq->cmd->error = -EAGAIN;
866 mmc_request_done(mmc, mrq);
867 return;
868 }
869 }
870
871 host->last_req_ts = jiffies;
872 wmb();
873 host->mrq = mrq;
874
875 spin_unlock_irqrestore(&host->lock, flags);
876
877 if (mrq->data) {
878 ret = tmio_mmc_start_data(host, mrq->data);
879 if (ret)
880 goto fail;
881 }
882
883 ret = tmio_mmc_start_command(host, mrq->cmd);
884 if (!ret) {
885 schedule_delayed_work(&host->delayed_reset_work,
886 msecs_to_jiffies(CMDREQ_TIMEOUT));
887 return;
888 }
889
890fail:
891 host->force_pio = false;
892 host->mrq = NULL;
893 mrq->cmd->error = ret;
894 mmc_request_done(mmc, mrq);
895}
896
897static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
898{
899 if (!host->clk_enable)
900 return -ENOTSUPP;
901
902 return host->clk_enable(host);
903}
904
905static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
906{
907 struct mmc_host *mmc = host->mmc;
908 int ret = 0;
909
910 /* .set_ios() is returning void, so, no chance to report an error */
911
912 if (host->set_pwr)
913 host->set_pwr(host->pdev, 1);
914
915 if (!IS_ERR(mmc->supply.vmmc)) {
916 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
917 /*
918 * Attention: empiric value. With a b43 WiFi SDIO card this
919 * delay proved necessary for reliable card-insertion probing.
920 * 100us were not enough. Is this the same 140us delay, as in
921 * tmio_mmc_set_ios()?
922 */
923 udelay(200);
924 }
925 /*
926 * It seems, VccQ should be switched on after Vcc, this is also what the
927 * omap_hsmmc.c driver does.
928 */
929 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
930 ret = regulator_enable(mmc->supply.vqmmc);
931 udelay(200);
932 }
933
934 if (ret < 0)
935 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
936 ret);
937}
938
939static void tmio_mmc_power_off(struct tmio_mmc_host *host)
940{
941 struct mmc_host *mmc = host->mmc;
942
943 if (!IS_ERR(mmc->supply.vqmmc))
944 regulator_disable(mmc->supply.vqmmc);
945
946 if (!IS_ERR(mmc->supply.vmmc))
947 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
948
949 if (host->set_pwr)
950 host->set_pwr(host->pdev, 0);
951}
952
953static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
954 unsigned char bus_width)
955{
956 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
957 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
958
959 /* reg now applies to MMC_BUS_WIDTH_4 */
960 if (bus_width == MMC_BUS_WIDTH_1)
961 reg |= CARD_OPT_WIDTH;
962 else if (bus_width == MMC_BUS_WIDTH_8)
963 reg |= CARD_OPT_WIDTH8;
964
965 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
966}
967
968/* Set MMC clock / power.
969 * Note: This controller uses a simple divider scheme therefore it cannot
970 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
971 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
972 * slowest setting.
973 */
974static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
975{
976 struct tmio_mmc_host *host = mmc_priv(mmc);
977 struct device *dev = &host->pdev->dev;
978 unsigned long flags;
979
980 mutex_lock(&host->ios_lock);
981
982 spin_lock_irqsave(&host->lock, flags);
983 if (host->mrq) {
984 if (IS_ERR(host->mrq)) {
985 dev_dbg(dev,
986 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
987 current->comm, task_pid_nr(current),
988 ios->clock, ios->power_mode);
989 host->mrq = ERR_PTR(-EINTR);
990 } else {
991 dev_dbg(dev,
992 "%s.%d: CMD%u active since %lu, now %lu!\n",
993 current->comm, task_pid_nr(current),
994 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
995 }
996 spin_unlock_irqrestore(&host->lock, flags);
997
998 mutex_unlock(&host->ios_lock);
999 return;
1000 }
1001
1002 host->mrq = ERR_PTR(-EBUSY);
1003
1004 spin_unlock_irqrestore(&host->lock, flags);
1005
1006 switch (ios->power_mode) {
1007 case MMC_POWER_OFF:
1008 tmio_mmc_power_off(host);
1009 tmio_mmc_clk_stop(host);
1010 break;
1011 case MMC_POWER_UP:
1012 tmio_mmc_power_on(host, ios->vdd);
1013 tmio_mmc_set_clock(host, ios->clock);
1014 tmio_mmc_set_bus_width(host, ios->bus_width);
1015 break;
1016 case MMC_POWER_ON:
1017 tmio_mmc_set_clock(host, ios->clock);
1018 tmio_mmc_set_bus_width(host, ios->bus_width);
1019 break;
1020 }
1021
1022 /* Let things settle. delay taken from winCE driver */
1023 udelay(140);
1024 if (PTR_ERR(host->mrq) == -EINTR)
1025 dev_dbg(&host->pdev->dev,
1026 "%s.%d: IOS interrupted: clk %u, mode %u",
1027 current->comm, task_pid_nr(current),
1028 ios->clock, ios->power_mode);
1029 host->mrq = NULL;
1030
1031 host->clk_cache = ios->clock;
1032
1033 mutex_unlock(&host->ios_lock);
1034}
1035
1036static int tmio_mmc_get_ro(struct mmc_host *mmc)
1037{
1038 struct tmio_mmc_host *host = mmc_priv(mmc);
1039 struct tmio_mmc_data *pdata = host->pdata;
1040 int ret = mmc_gpio_get_ro(mmc);
1041 if (ret >= 0)
1042 return ret;
1043
1044 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
1045 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
1046
1047 return ret;
1048}
1049
1050static int tmio_multi_io_quirk(struct mmc_card *card,
1051 unsigned int direction, int blk_size)
1052{
1053 struct tmio_mmc_host *host = mmc_priv(card->host);
1054
1055 if (host->multi_io_quirk)
1056 return host->multi_io_quirk(card, direction, blk_size);
1057
1058 return blk_size;
1059}
1060
1061static struct mmc_host_ops tmio_mmc_ops = {
1062 .request = tmio_mmc_request,
1063 .set_ios = tmio_mmc_set_ios,
1064 .get_ro = tmio_mmc_get_ro,
1065 .get_cd = mmc_gpio_get_cd,
1066 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1067 .multi_io_quirk = tmio_multi_io_quirk,
1068 .hw_reset = tmio_mmc_hw_reset,
1069 .execute_tuning = tmio_mmc_execute_tuning,
1070};
1071
1072static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1073{
1074 struct tmio_mmc_data *pdata = host->pdata;
1075 struct mmc_host *mmc = host->mmc;
1076
1077 mmc_regulator_get_supply(mmc);
1078
1079 /* use ocr_mask if no regulator */
1080 if (!mmc->ocr_avail)
1081 mmc->ocr_avail = pdata->ocr_mask;
1082
1083 /*
1084 * try again.
1085 * There is possibility that regulator has not been probed
1086 */
1087 if (!mmc->ocr_avail)
1088 return -EPROBE_DEFER;
1089
1090 return 0;
1091}
1092
1093static void tmio_mmc_of_parse(struct platform_device *pdev,
1094 struct tmio_mmc_data *pdata)
1095{
1096 const struct device_node *np = pdev->dev.of_node;
1097 if (!np)
1098 return;
1099
1100 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1101 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1102}
1103
1104struct tmio_mmc_host*
1105tmio_mmc_host_alloc(struct platform_device *pdev)
1106{
1107 struct tmio_mmc_host *host;
1108 struct mmc_host *mmc;
1109
1110 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1111 if (!mmc)
1112 return NULL;
1113
1114 host = mmc_priv(mmc);
1115 host->mmc = mmc;
1116 host->pdev = pdev;
1117
1118 return host;
1119}
1120EXPORT_SYMBOL(tmio_mmc_host_alloc);
1121
1122void tmio_mmc_host_free(struct tmio_mmc_host *host)
1123{
1124 mmc_free_host(host->mmc);
1125}
1126EXPORT_SYMBOL(tmio_mmc_host_free);
1127
1128int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1129 struct tmio_mmc_data *pdata)
1130{
1131 struct platform_device *pdev = _host->pdev;
1132 struct mmc_host *mmc = _host->mmc;
1133 struct resource *res_ctl;
1134 int ret;
1135 u32 irq_mask = TMIO_MASK_CMD;
1136
1137 tmio_mmc_of_parse(pdev, pdata);
1138
1139 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1140 _host->write16_hook = NULL;
1141
1142 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1143 if (!res_ctl)
1144 return -EINVAL;
1145
1146 ret = mmc_of_parse(mmc);
1147 if (ret < 0)
1148 goto host_free;
1149
1150 _host->pdata = pdata;
1151 platform_set_drvdata(pdev, mmc);
1152
1153 _host->set_pwr = pdata->set_pwr;
1154 _host->set_clk_div = pdata->set_clk_div;
1155
1156 ret = tmio_mmc_init_ocr(_host);
1157 if (ret < 0)
1158 goto host_free;
1159
1160 _host->ctl = devm_ioremap(&pdev->dev,
1161 res_ctl->start, resource_size(res_ctl));
1162 if (!_host->ctl) {
1163 ret = -ENOMEM;
1164 goto host_free;
1165 }
1166
1167 tmio_mmc_ops.card_busy = _host->card_busy;
1168 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1169 mmc->ops = &tmio_mmc_ops;
1170
1171 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1172 mmc->caps2 |= pdata->capabilities2;
1173 mmc->max_segs = 32;
1174 mmc->max_blk_size = 512;
1175 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1176 mmc->max_segs;
1177 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1178 mmc->max_seg_size = mmc->max_req_size;
1179
1180 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1181 mmc->caps & MMC_CAP_NEEDS_POLL ||
1182 !mmc_card_is_removable(mmc) ||
1183 mmc->slot.cd_irq >= 0);
1184
1185 /*
1186 * On Gen2+, eMMC with NONREMOVABLE currently fails because native
1187 * hotplug gets disabled. It seems RuntimePM related yet we need further
1188 * research. Since we are planning a PM overhaul anyway, let's enforce
1189 * for now the device being active by enabling native hotplug always.
1190 */
1191 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1192 _host->native_hotplug = true;
1193
1194 if (tmio_mmc_clk_enable(_host) < 0) {
1195 mmc->f_max = pdata->hclk;
1196 mmc->f_min = mmc->f_max / 512;
1197 }
1198
1199 /*
1200 * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
1201 * looping forever...
1202 */
1203 if (mmc->f_min == 0) {
1204 ret = -EINVAL;
1205 goto host_free;
1206 }
1207
1208 /*
1209 * While using internal tmio hardware logic for card detection, we need
1210 * to ensure it stays powered for it to work.
1211 */
1212 if (_host->native_hotplug)
1213 pm_runtime_get_noresume(&pdev->dev);
1214
1215 tmio_mmc_clk_stop(_host);
1216 tmio_mmc_reset(_host);
1217
1218 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1219 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1220
1221 /* Unmask the IRQs we want to know about */
1222 if (!_host->chan_rx)
1223 irq_mask |= TMIO_MASK_READOP;
1224 if (!_host->chan_tx)
1225 irq_mask |= TMIO_MASK_WRITEOP;
1226 if (!_host->native_hotplug)
1227 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1228
1229 _host->sdcard_irq_mask &= ~irq_mask;
1230
1231 _host->sdio_irq_enabled = false;
1232 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1233 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1234 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
1235 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
1236 }
1237
1238 spin_lock_init(&_host->lock);
1239 mutex_init(&_host->ios_lock);
1240
1241 /* Init delayed work for request timeouts */
1242 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1243 INIT_WORK(&_host->done, tmio_mmc_done_work);
1244
1245 /* See if we also get DMA */
1246 tmio_mmc_request_dma(_host, pdata);
1247
1248 pm_runtime_set_active(&pdev->dev);
1249 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1250 pm_runtime_use_autosuspend(&pdev->dev);
1251 pm_runtime_enable(&pdev->dev);
1252
1253 ret = mmc_add_host(mmc);
1254 if (ret < 0) {
1255 tmio_mmc_host_remove(_host);
1256 return ret;
1257 }
1258
1259 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1260
1261 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1262 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1263 if (ret < 0) {
1264 tmio_mmc_host_remove(_host);
1265 return ret;
1266 }
1267 mmc_gpiod_request_cd_irq(mmc);
1268 }
1269
1270 return 0;
1271
1272host_free:
1273
1274 return ret;
1275}
1276EXPORT_SYMBOL(tmio_mmc_host_probe);
1277
1278void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1279{
1280 struct platform_device *pdev = host->pdev;
1281 struct mmc_host *mmc = host->mmc;
1282
1283 if (!host->native_hotplug)
1284 pm_runtime_get_sync(&pdev->dev);
1285
1286 dev_pm_qos_hide_latency_limit(&pdev->dev);
1287
1288 mmc_remove_host(mmc);
1289 cancel_work_sync(&host->done);
1290 cancel_delayed_work_sync(&host->delayed_reset_work);
1291 tmio_mmc_release_dma(host);
1292
1293 pm_runtime_put_sync(&pdev->dev);
1294 pm_runtime_disable(&pdev->dev);
1295}
1296EXPORT_SYMBOL(tmio_mmc_host_remove);
1297
1298#ifdef CONFIG_PM
1299int tmio_mmc_host_runtime_suspend(struct device *dev)
1300{
1301 struct mmc_host *mmc = dev_get_drvdata(dev);
1302 struct tmio_mmc_host *host = mmc_priv(mmc);
1303
1304 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1305
1306 if (host->clk_cache)
1307 tmio_mmc_clk_stop(host);
1308
1309 if (host->clk_disable)
1310 host->clk_disable(host);
1311
1312 return 0;
1313}
1314EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1315
1316static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1317{
1318 return host->tap_num && mmc_can_retune(host->mmc);
1319}
1320
1321int tmio_mmc_host_runtime_resume(struct device *dev)
1322{
1323 struct mmc_host *mmc = dev_get_drvdata(dev);
1324 struct tmio_mmc_host *host = mmc_priv(mmc);
1325
1326 tmio_mmc_reset(host);
1327 tmio_mmc_clk_enable(host);
1328
1329 if (host->clk_cache)
1330 tmio_mmc_set_clock(host, host->clk_cache);
1331
1332 tmio_mmc_enable_dma(host, true);
1333
1334 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1335 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1336
1337 return 0;
1338}
1339EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1340#endif
1341
1342MODULE_LICENSE("GPL v2");