Loading...
1/*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/highmem.h>
35#include <linux/clk.h>
36#include <linux/err.h>
37#include <linux/completion.h>
38#include <linux/mmc/host.h>
39#include <linux/mmc/mmc.h>
40#include <linux/mmc/sdio.h>
41#include <linux/mmc/slot-gpio.h>
42#include <linux/gpio.h>
43#include <linux/regulator/consumer.h>
44#include <linux/module.h>
45#include <linux/stmp_device.h>
46#include <linux/spi/mxs-spi.h>
47
48#define DRIVER_NAME "mxs-mmc"
49
50#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
51 BM_SSP_CTRL1_RESP_ERR_IRQ | \
52 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
53 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
54 BM_SSP_CTRL1_DATA_CRC_IRQ | \
55 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
56 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
57 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
58
59/* card detect polling timeout */
60#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
61
62struct mxs_mmc_host {
63 struct mxs_ssp ssp;
64
65 struct mmc_host *mmc;
66 struct mmc_request *mrq;
67 struct mmc_command *cmd;
68 struct mmc_data *data;
69
70 unsigned char bus_width;
71 spinlock_t lock;
72 int sdio_irq_en;
73 bool broken_cd;
74};
75
76static int mxs_mmc_get_cd(struct mmc_host *mmc)
77{
78 struct mxs_mmc_host *host = mmc_priv(mmc);
79 struct mxs_ssp *ssp = &host->ssp;
80 int present, ret;
81
82 if (host->broken_cd)
83 return -ENOSYS;
84
85 ret = mmc_gpio_get_cd(mmc);
86 if (ret >= 0)
87 return ret;
88
89 present = mmc->caps & MMC_CAP_NEEDS_POLL ||
90 !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
91 BM_SSP_STATUS_CARD_DETECT);
92
93 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
94 present = !present;
95
96 return present;
97}
98
99static int mxs_mmc_reset(struct mxs_mmc_host *host)
100{
101 struct mxs_ssp *ssp = &host->ssp;
102 u32 ctrl0, ctrl1;
103 int ret;
104
105 ret = stmp_reset_block(ssp->base);
106 if (ret)
107 return ret;
108
109 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
110 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
111 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
112 BM_SSP_CTRL1_DMA_ENABLE |
113 BM_SSP_CTRL1_POLARITY |
114 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
115 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
116 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
117 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
118 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
119
120 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
121 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
122 BF_SSP(0, TIMING_CLOCK_RATE),
123 ssp->base + HW_SSP_TIMING(ssp));
124
125 if (host->sdio_irq_en) {
126 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
127 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
128 }
129
130 writel(ctrl0, ssp->base + HW_SSP_CTRL0);
131 writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
132 return 0;
133}
134
135static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
136 struct mmc_command *cmd);
137
138static void mxs_mmc_request_done(struct mxs_mmc_host *host)
139{
140 struct mmc_command *cmd = host->cmd;
141 struct mmc_data *data = host->data;
142 struct mmc_request *mrq = host->mrq;
143 struct mxs_ssp *ssp = &host->ssp;
144
145 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
146 if (mmc_resp_type(cmd) & MMC_RSP_136) {
147 cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
148 cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
149 cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
150 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
151 } else {
152 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
153 }
154 }
155
156 if (cmd == mrq->sbc) {
157 /* Finished CMD23, now send actual command. */
158 mxs_mmc_start_cmd(host, mrq->cmd);
159 return;
160 } else if (data) {
161 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
162 data->sg_len, ssp->dma_dir);
163 /*
164 * If there was an error on any block, we mark all
165 * data blocks as being in error.
166 */
167 if (!data->error)
168 data->bytes_xfered = data->blocks * data->blksz;
169 else
170 data->bytes_xfered = 0;
171
172 host->data = NULL;
173 if (data->stop && (data->error || !mrq->sbc)) {
174 mxs_mmc_start_cmd(host, mrq->stop);
175 return;
176 }
177 }
178
179 host->mrq = NULL;
180 mmc_request_done(host->mmc, mrq);
181}
182
183static void mxs_mmc_dma_irq_callback(void *param)
184{
185 struct mxs_mmc_host *host = param;
186
187 mxs_mmc_request_done(host);
188}
189
190static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
191{
192 struct mxs_mmc_host *host = dev_id;
193 struct mmc_command *cmd = host->cmd;
194 struct mmc_data *data = host->data;
195 struct mxs_ssp *ssp = &host->ssp;
196 u32 stat;
197
198 spin_lock(&host->lock);
199
200 stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
201 writel(stat & MXS_MMC_IRQ_BITS,
202 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
203
204 spin_unlock(&host->lock);
205
206 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
207 mmc_signal_sdio_irq(host->mmc);
208
209 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
210 cmd->error = -ETIMEDOUT;
211 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
212 cmd->error = -EIO;
213
214 if (data) {
215 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
216 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
217 data->error = -ETIMEDOUT;
218 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
219 data->error = -EILSEQ;
220 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
221 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
222 data->error = -EIO;
223 }
224
225 return IRQ_HANDLED;
226}
227
228static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
229 struct mxs_mmc_host *host, unsigned long flags)
230{
231 struct mxs_ssp *ssp = &host->ssp;
232 struct dma_async_tx_descriptor *desc;
233 struct mmc_data *data = host->data;
234 struct scatterlist * sgl;
235 unsigned int sg_len;
236
237 if (data) {
238 /* data */
239 dma_map_sg(mmc_dev(host->mmc), data->sg,
240 data->sg_len, ssp->dma_dir);
241 sgl = data->sg;
242 sg_len = data->sg_len;
243 } else {
244 /* pio */
245 sgl = (struct scatterlist *) ssp->ssp_pio_words;
246 sg_len = SSP_PIO_NUM;
247 }
248
249 desc = dmaengine_prep_slave_sg(ssp->dmach,
250 sgl, sg_len, ssp->slave_dirn, flags);
251 if (desc) {
252 desc->callback = mxs_mmc_dma_irq_callback;
253 desc->callback_param = host;
254 } else {
255 if (data)
256 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
257 data->sg_len, ssp->dma_dir);
258 }
259
260 return desc;
261}
262
263static void mxs_mmc_bc(struct mxs_mmc_host *host)
264{
265 struct mxs_ssp *ssp = &host->ssp;
266 struct mmc_command *cmd = host->cmd;
267 struct dma_async_tx_descriptor *desc;
268 u32 ctrl0, cmd0, cmd1;
269
270 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
271 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
272 cmd1 = cmd->arg;
273
274 if (host->sdio_irq_en) {
275 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
276 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
277 }
278
279 ssp->ssp_pio_words[0] = ctrl0;
280 ssp->ssp_pio_words[1] = cmd0;
281 ssp->ssp_pio_words[2] = cmd1;
282 ssp->dma_dir = DMA_NONE;
283 ssp->slave_dirn = DMA_TRANS_NONE;
284 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
285 if (!desc)
286 goto out;
287
288 dmaengine_submit(desc);
289 dma_async_issue_pending(ssp->dmach);
290 return;
291
292out:
293 dev_warn(mmc_dev(host->mmc),
294 "%s: failed to prep dma\n", __func__);
295}
296
297static void mxs_mmc_ac(struct mxs_mmc_host *host)
298{
299 struct mxs_ssp *ssp = &host->ssp;
300 struct mmc_command *cmd = host->cmd;
301 struct dma_async_tx_descriptor *desc;
302 u32 ignore_crc, get_resp, long_resp;
303 u32 ctrl0, cmd0, cmd1;
304
305 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
306 0 : BM_SSP_CTRL0_IGNORE_CRC;
307 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
308 BM_SSP_CTRL0_GET_RESP : 0;
309 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
310 BM_SSP_CTRL0_LONG_RESP : 0;
311
312 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
313 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
314 cmd1 = cmd->arg;
315
316 if (cmd->opcode == MMC_STOP_TRANSMISSION)
317 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
318
319 if (host->sdio_irq_en) {
320 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
321 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
322 }
323
324 ssp->ssp_pio_words[0] = ctrl0;
325 ssp->ssp_pio_words[1] = cmd0;
326 ssp->ssp_pio_words[2] = cmd1;
327 ssp->dma_dir = DMA_NONE;
328 ssp->slave_dirn = DMA_TRANS_NONE;
329 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
330 if (!desc)
331 goto out;
332
333 dmaengine_submit(desc);
334 dma_async_issue_pending(ssp->dmach);
335 return;
336
337out:
338 dev_warn(mmc_dev(host->mmc),
339 "%s: failed to prep dma\n", __func__);
340}
341
342static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
343{
344 const unsigned int ssp_timeout_mul = 4096;
345 /*
346 * Calculate ticks in ms since ns are large numbers
347 * and might overflow
348 */
349 const unsigned int clock_per_ms = clock_rate / 1000;
350 const unsigned int ms = ns / 1000;
351 const unsigned int ticks = ms * clock_per_ms;
352 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
353
354 WARN_ON(ssp_ticks == 0);
355 return ssp_ticks;
356}
357
358static void mxs_mmc_adtc(struct mxs_mmc_host *host)
359{
360 struct mmc_command *cmd = host->cmd;
361 struct mmc_data *data = cmd->data;
362 struct dma_async_tx_descriptor *desc;
363 struct scatterlist *sgl = data->sg, *sg;
364 unsigned int sg_len = data->sg_len;
365 unsigned int i;
366
367 unsigned short dma_data_dir, timeout;
368 enum dma_transfer_direction slave_dirn;
369 unsigned int data_size = 0, log2_blksz;
370 unsigned int blocks = data->blocks;
371
372 struct mxs_ssp *ssp = &host->ssp;
373
374 u32 ignore_crc, get_resp, long_resp, read;
375 u32 ctrl0, cmd0, cmd1, val;
376
377 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
378 0 : BM_SSP_CTRL0_IGNORE_CRC;
379 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
380 BM_SSP_CTRL0_GET_RESP : 0;
381 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
382 BM_SSP_CTRL0_LONG_RESP : 0;
383
384 if (data->flags & MMC_DATA_WRITE) {
385 dma_data_dir = DMA_TO_DEVICE;
386 slave_dirn = DMA_MEM_TO_DEV;
387 read = 0;
388 } else {
389 dma_data_dir = DMA_FROM_DEVICE;
390 slave_dirn = DMA_DEV_TO_MEM;
391 read = BM_SSP_CTRL0_READ;
392 }
393
394 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
395 ignore_crc | get_resp | long_resp |
396 BM_SSP_CTRL0_DATA_XFER | read |
397 BM_SSP_CTRL0_WAIT_FOR_IRQ |
398 BM_SSP_CTRL0_ENABLE;
399
400 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
401
402 /* get logarithm to base 2 of block size for setting register */
403 log2_blksz = ilog2(data->blksz);
404
405 /*
406 * take special care of the case that data size from data->sg
407 * is not equal to blocks x blksz
408 */
409 for_each_sg(sgl, sg, sg_len, i)
410 data_size += sg->length;
411
412 if (data_size != data->blocks * data->blksz)
413 blocks = 1;
414
415 /* xfer count, block size and count need to be set differently */
416 if (ssp_is_old(ssp)) {
417 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
418 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
419 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
420 } else {
421 writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
422 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
423 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
424 ssp->base + HW_SSP_BLOCK_SIZE);
425 }
426
427 if (cmd->opcode == SD_IO_RW_EXTENDED)
428 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
429
430 cmd1 = cmd->arg;
431
432 if (host->sdio_irq_en) {
433 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
434 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
435 }
436
437 /* set the timeout count */
438 timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
439 val = readl(ssp->base + HW_SSP_TIMING(ssp));
440 val &= ~(BM_SSP_TIMING_TIMEOUT);
441 val |= BF_SSP(timeout, TIMING_TIMEOUT);
442 writel(val, ssp->base + HW_SSP_TIMING(ssp));
443
444 /* pio */
445 ssp->ssp_pio_words[0] = ctrl0;
446 ssp->ssp_pio_words[1] = cmd0;
447 ssp->ssp_pio_words[2] = cmd1;
448 ssp->dma_dir = DMA_NONE;
449 ssp->slave_dirn = DMA_TRANS_NONE;
450 desc = mxs_mmc_prep_dma(host, 0);
451 if (!desc)
452 goto out;
453
454 /* append data sg */
455 WARN_ON(host->data != NULL);
456 host->data = data;
457 ssp->dma_dir = dma_data_dir;
458 ssp->slave_dirn = slave_dirn;
459 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
460 if (!desc)
461 goto out;
462
463 dmaengine_submit(desc);
464 dma_async_issue_pending(ssp->dmach);
465 return;
466out:
467 dev_warn(mmc_dev(host->mmc),
468 "%s: failed to prep dma\n", __func__);
469}
470
471static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
472 struct mmc_command *cmd)
473{
474 host->cmd = cmd;
475
476 switch (mmc_cmd_type(cmd)) {
477 case MMC_CMD_BC:
478 mxs_mmc_bc(host);
479 break;
480 case MMC_CMD_BCR:
481 mxs_mmc_ac(host);
482 break;
483 case MMC_CMD_AC:
484 mxs_mmc_ac(host);
485 break;
486 case MMC_CMD_ADTC:
487 mxs_mmc_adtc(host);
488 break;
489 default:
490 dev_warn(mmc_dev(host->mmc),
491 "%s: unknown MMC command\n", __func__);
492 break;
493 }
494}
495
496static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
497{
498 struct mxs_mmc_host *host = mmc_priv(mmc);
499
500 WARN_ON(host->mrq != NULL);
501 host->mrq = mrq;
502
503 if (mrq->sbc)
504 mxs_mmc_start_cmd(host, mrq->sbc);
505 else
506 mxs_mmc_start_cmd(host, mrq->cmd);
507}
508
509static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
510{
511 struct mxs_mmc_host *host = mmc_priv(mmc);
512
513 if (ios->bus_width == MMC_BUS_WIDTH_8)
514 host->bus_width = 2;
515 else if (ios->bus_width == MMC_BUS_WIDTH_4)
516 host->bus_width = 1;
517 else
518 host->bus_width = 0;
519
520 if (ios->clock)
521 mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
522}
523
524static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
525{
526 struct mxs_mmc_host *host = mmc_priv(mmc);
527 struct mxs_ssp *ssp = &host->ssp;
528 unsigned long flags;
529
530 spin_lock_irqsave(&host->lock, flags);
531
532 host->sdio_irq_en = enable;
533
534 if (enable) {
535 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
536 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
537 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
538 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
539 } else {
540 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
541 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
542 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
543 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
544 }
545
546 spin_unlock_irqrestore(&host->lock, flags);
547
548 if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
549 BM_SSP_STATUS_SDIO_IRQ)
550 mmc_signal_sdio_irq(host->mmc);
551
552}
553
554static const struct mmc_host_ops mxs_mmc_ops = {
555 .request = mxs_mmc_request,
556 .get_ro = mmc_gpio_get_ro,
557 .get_cd = mxs_mmc_get_cd,
558 .set_ios = mxs_mmc_set_ios,
559 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
560};
561
562static const struct platform_device_id mxs_ssp_ids[] = {
563 {
564 .name = "imx23-mmc",
565 .driver_data = IMX23_SSP,
566 }, {
567 .name = "imx28-mmc",
568 .driver_data = IMX28_SSP,
569 }, {
570 /* sentinel */
571 }
572};
573MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
574
575static const struct of_device_id mxs_mmc_dt_ids[] = {
576 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
577 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
578 { /* sentinel */ }
579};
580MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
581
582static int mxs_mmc_probe(struct platform_device *pdev)
583{
584 const struct of_device_id *of_id =
585 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
586 struct device_node *np = pdev->dev.of_node;
587 struct mxs_mmc_host *host;
588 struct mmc_host *mmc;
589 struct resource *iores;
590 int ret = 0, irq_err;
591 struct regulator *reg_vmmc;
592 struct mxs_ssp *ssp;
593
594 irq_err = platform_get_irq(pdev, 0);
595 if (irq_err < 0)
596 return irq_err;
597
598 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
599 if (!mmc)
600 return -ENOMEM;
601
602 host = mmc_priv(mmc);
603 ssp = &host->ssp;
604 ssp->dev = &pdev->dev;
605 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
606 ssp->base = devm_ioremap_resource(&pdev->dev, iores);
607 if (IS_ERR(ssp->base)) {
608 ret = PTR_ERR(ssp->base);
609 goto out_mmc_free;
610 }
611
612 ssp->devid = (enum mxs_ssp_id) of_id->data;
613
614 host->mmc = mmc;
615 host->sdio_irq_en = 0;
616
617 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
618 if (!IS_ERR(reg_vmmc)) {
619 ret = regulator_enable(reg_vmmc);
620 if (ret) {
621 dev_err(&pdev->dev,
622 "Failed to enable vmmc regulator: %d\n", ret);
623 goto out_mmc_free;
624 }
625 }
626
627 ssp->clk = devm_clk_get(&pdev->dev, NULL);
628 if (IS_ERR(ssp->clk)) {
629 ret = PTR_ERR(ssp->clk);
630 goto out_mmc_free;
631 }
632 ret = clk_prepare_enable(ssp->clk);
633 if (ret)
634 goto out_mmc_free;
635
636 ret = mxs_mmc_reset(host);
637 if (ret) {
638 dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
639 goto out_clk_disable;
640 }
641
642 ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
643 if (!ssp->dmach) {
644 dev_err(mmc_dev(host->mmc),
645 "%s: failed to request dma\n", __func__);
646 ret = -ENODEV;
647 goto out_clk_disable;
648 }
649
650 /* set mmc core parameters */
651 mmc->ops = &mxs_mmc_ops;
652 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
653 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
654
655 host->broken_cd = of_property_read_bool(np, "broken-cd");
656
657 mmc->f_min = 400000;
658 mmc->f_max = 288000000;
659
660 ret = mmc_of_parse(mmc);
661 if (ret)
662 goto out_clk_disable;
663
664 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
665
666 mmc->max_segs = 52;
667 mmc->max_blk_size = 1 << 0xf;
668 mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
669 mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
670 mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
671
672 platform_set_drvdata(pdev, mmc);
673
674 spin_lock_init(&host->lock);
675
676 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
677 dev_name(&pdev->dev), host);
678 if (ret)
679 goto out_free_dma;
680
681 ret = mmc_add_host(mmc);
682 if (ret)
683 goto out_free_dma;
684
685 dev_info(mmc_dev(host->mmc), "initialized\n");
686
687 return 0;
688
689out_free_dma:
690 dma_release_channel(ssp->dmach);
691out_clk_disable:
692 clk_disable_unprepare(ssp->clk);
693out_mmc_free:
694 mmc_free_host(mmc);
695 return ret;
696}
697
698static int mxs_mmc_remove(struct platform_device *pdev)
699{
700 struct mmc_host *mmc = platform_get_drvdata(pdev);
701 struct mxs_mmc_host *host = mmc_priv(mmc);
702 struct mxs_ssp *ssp = &host->ssp;
703
704 mmc_remove_host(mmc);
705
706 if (ssp->dmach)
707 dma_release_channel(ssp->dmach);
708
709 clk_disable_unprepare(ssp->clk);
710
711 mmc_free_host(mmc);
712
713 return 0;
714}
715
716#ifdef CONFIG_PM_SLEEP
717static int mxs_mmc_suspend(struct device *dev)
718{
719 struct mmc_host *mmc = dev_get_drvdata(dev);
720 struct mxs_mmc_host *host = mmc_priv(mmc);
721 struct mxs_ssp *ssp = &host->ssp;
722
723 clk_disable_unprepare(ssp->clk);
724 return 0;
725}
726
727static int mxs_mmc_resume(struct device *dev)
728{
729 struct mmc_host *mmc = dev_get_drvdata(dev);
730 struct mxs_mmc_host *host = mmc_priv(mmc);
731 struct mxs_ssp *ssp = &host->ssp;
732
733 return clk_prepare_enable(ssp->clk);
734}
735#endif
736
737static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
738
739static struct platform_driver mxs_mmc_driver = {
740 .probe = mxs_mmc_probe,
741 .remove = mxs_mmc_remove,
742 .id_table = mxs_ssp_ids,
743 .driver = {
744 .name = DRIVER_NAME,
745 .pm = &mxs_mmc_pm_ops,
746 .of_match_table = mxs_mmc_dt_ids,
747 },
748};
749
750module_platform_driver(mxs_mmc_driver);
751
752MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
753MODULE_AUTHOR("Freescale Semiconductor");
754MODULE_LICENSE("GPL");
755MODULE_ALIAS("platform:" DRIVER_NAME);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
4 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
5 *
6 * Copyright 2008 Embedded Alley Solutions, Inc.
7 * Copyright 2009-2011 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/ioport.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/dma/mxs-dma.h>
20#include <linux/highmem.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/completion.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/mmc.h>
26#include <linux/mmc/sdio.h>
27#include <linux/mmc/slot-gpio.h>
28#include <linux/regulator/consumer.h>
29#include <linux/module.h>
30#include <linux/stmp_device.h>
31#include <linux/spi/mxs-spi.h>
32
33#define DRIVER_NAME "mxs-mmc"
34
35#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
36 BM_SSP_CTRL1_RESP_ERR_IRQ | \
37 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
38 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
39 BM_SSP_CTRL1_DATA_CRC_IRQ | \
40 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
41 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
42 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
43
44/* card detect polling timeout */
45#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
46
47struct mxs_mmc_host {
48 struct mxs_ssp ssp;
49
50 struct mmc_host *mmc;
51 struct mmc_request *mrq;
52 struct mmc_command *cmd;
53 struct mmc_data *data;
54
55 unsigned char bus_width;
56 spinlock_t lock;
57 int sdio_irq_en;
58 bool broken_cd;
59};
60
61static int mxs_mmc_get_cd(struct mmc_host *mmc)
62{
63 struct mxs_mmc_host *host = mmc_priv(mmc);
64 struct mxs_ssp *ssp = &host->ssp;
65 int present, ret;
66
67 if (host->broken_cd)
68 return -ENOSYS;
69
70 ret = mmc_gpio_get_cd(mmc);
71 if (ret >= 0)
72 return ret;
73
74 present = mmc->caps & MMC_CAP_NEEDS_POLL ||
75 !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
76 BM_SSP_STATUS_CARD_DETECT);
77
78 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
79 present = !present;
80
81 return present;
82}
83
84static int mxs_mmc_reset(struct mxs_mmc_host *host)
85{
86 struct mxs_ssp *ssp = &host->ssp;
87 u32 ctrl0, ctrl1;
88 int ret;
89
90 ret = stmp_reset_block(ssp->base);
91 if (ret)
92 return ret;
93
94 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
95 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
96 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
97 BM_SSP_CTRL1_DMA_ENABLE |
98 BM_SSP_CTRL1_POLARITY |
99 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
100 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
101 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
102 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
103 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
104
105 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
106 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
107 BF_SSP(0, TIMING_CLOCK_RATE),
108 ssp->base + HW_SSP_TIMING(ssp));
109
110 if (host->sdio_irq_en) {
111 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
112 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
113 }
114
115 writel(ctrl0, ssp->base + HW_SSP_CTRL0);
116 writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
117 return 0;
118}
119
120static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
121 struct mmc_command *cmd);
122
123static void mxs_mmc_request_done(struct mxs_mmc_host *host)
124{
125 struct mmc_command *cmd = host->cmd;
126 struct mmc_data *data = host->data;
127 struct mmc_request *mrq = host->mrq;
128 struct mxs_ssp *ssp = &host->ssp;
129
130 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
131 if (mmc_resp_type(cmd) & MMC_RSP_136) {
132 cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
133 cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
134 cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
135 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
136 } else {
137 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
138 }
139 }
140
141 if (cmd == mrq->sbc) {
142 /* Finished CMD23, now send actual command. */
143 mxs_mmc_start_cmd(host, mrq->cmd);
144 return;
145 } else if (data) {
146 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
147 data->sg_len, ssp->dma_dir);
148 /*
149 * If there was an error on any block, we mark all
150 * data blocks as being in error.
151 */
152 if (!data->error)
153 data->bytes_xfered = data->blocks * data->blksz;
154 else
155 data->bytes_xfered = 0;
156
157 host->data = NULL;
158 if (data->stop && (data->error || !mrq->sbc)) {
159 mxs_mmc_start_cmd(host, mrq->stop);
160 return;
161 }
162 }
163
164 host->mrq = NULL;
165 mmc_request_done(host->mmc, mrq);
166}
167
168static void mxs_mmc_dma_irq_callback(void *param)
169{
170 struct mxs_mmc_host *host = param;
171
172 mxs_mmc_request_done(host);
173}
174
175static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
176{
177 struct mxs_mmc_host *host = dev_id;
178 struct mmc_command *cmd = host->cmd;
179 struct mmc_data *data = host->data;
180 struct mxs_ssp *ssp = &host->ssp;
181 u32 stat;
182
183 spin_lock(&host->lock);
184
185 stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
186 writel(stat & MXS_MMC_IRQ_BITS,
187 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
188
189 spin_unlock(&host->lock);
190
191 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
192 mmc_signal_sdio_irq(host->mmc);
193
194 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
195 cmd->error = -ETIMEDOUT;
196 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
197 cmd->error = -EIO;
198
199 if (data) {
200 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
201 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
202 data->error = -ETIMEDOUT;
203 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
204 data->error = -EILSEQ;
205 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
206 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
207 data->error = -EIO;
208 }
209
210 return IRQ_HANDLED;
211}
212
213static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
214 struct mxs_mmc_host *host, unsigned long flags)
215{
216 struct mxs_ssp *ssp = &host->ssp;
217 struct dma_async_tx_descriptor *desc;
218 struct mmc_data *data = host->data;
219 struct scatterlist * sgl;
220 unsigned int sg_len;
221
222 if (data) {
223 /* data */
224 dma_map_sg(mmc_dev(host->mmc), data->sg,
225 data->sg_len, ssp->dma_dir);
226 sgl = data->sg;
227 sg_len = data->sg_len;
228 } else {
229 /* pio */
230 sgl = (struct scatterlist *) ssp->ssp_pio_words;
231 sg_len = SSP_PIO_NUM;
232 }
233
234 desc = dmaengine_prep_slave_sg(ssp->dmach,
235 sgl, sg_len, ssp->slave_dirn, flags);
236 if (desc) {
237 desc->callback = mxs_mmc_dma_irq_callback;
238 desc->callback_param = host;
239 } else {
240 if (data)
241 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
242 data->sg_len, ssp->dma_dir);
243 }
244
245 return desc;
246}
247
248static void mxs_mmc_bc(struct mxs_mmc_host *host)
249{
250 struct mxs_ssp *ssp = &host->ssp;
251 struct mmc_command *cmd = host->cmd;
252 struct dma_async_tx_descriptor *desc;
253 u32 ctrl0, cmd0, cmd1;
254
255 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
256 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
257 cmd1 = cmd->arg;
258
259 if (host->sdio_irq_en) {
260 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
261 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
262 }
263
264 ssp->ssp_pio_words[0] = ctrl0;
265 ssp->ssp_pio_words[1] = cmd0;
266 ssp->ssp_pio_words[2] = cmd1;
267 ssp->dma_dir = DMA_NONE;
268 ssp->slave_dirn = DMA_TRANS_NONE;
269 desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
270 if (!desc)
271 goto out;
272
273 dmaengine_submit(desc);
274 dma_async_issue_pending(ssp->dmach);
275 return;
276
277out:
278 dev_warn(mmc_dev(host->mmc),
279 "%s: failed to prep dma\n", __func__);
280}
281
282static void mxs_mmc_ac(struct mxs_mmc_host *host)
283{
284 struct mxs_ssp *ssp = &host->ssp;
285 struct mmc_command *cmd = host->cmd;
286 struct dma_async_tx_descriptor *desc;
287 u32 ignore_crc, get_resp, long_resp;
288 u32 ctrl0, cmd0, cmd1;
289
290 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
291 0 : BM_SSP_CTRL0_IGNORE_CRC;
292 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
293 BM_SSP_CTRL0_GET_RESP : 0;
294 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
295 BM_SSP_CTRL0_LONG_RESP : 0;
296
297 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
298 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
299 cmd1 = cmd->arg;
300
301 if (cmd->opcode == MMC_STOP_TRANSMISSION)
302 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
303
304 if (host->sdio_irq_en) {
305 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
306 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
307 }
308
309 ssp->ssp_pio_words[0] = ctrl0;
310 ssp->ssp_pio_words[1] = cmd0;
311 ssp->ssp_pio_words[2] = cmd1;
312 ssp->dma_dir = DMA_NONE;
313 ssp->slave_dirn = DMA_TRANS_NONE;
314 desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
315 if (!desc)
316 goto out;
317
318 dmaengine_submit(desc);
319 dma_async_issue_pending(ssp->dmach);
320 return;
321
322out:
323 dev_warn(mmc_dev(host->mmc),
324 "%s: failed to prep dma\n", __func__);
325}
326
327static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
328{
329 const unsigned int ssp_timeout_mul = 4096;
330 /*
331 * Calculate ticks in ms since ns are large numbers
332 * and might overflow
333 */
334 const unsigned int clock_per_ms = clock_rate / 1000;
335 const unsigned int ms = ns / 1000;
336 const unsigned int ticks = ms * clock_per_ms;
337 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
338
339 WARN_ON(ssp_ticks == 0);
340 return ssp_ticks;
341}
342
343static void mxs_mmc_adtc(struct mxs_mmc_host *host)
344{
345 struct mmc_command *cmd = host->cmd;
346 struct mmc_data *data = cmd->data;
347 struct dma_async_tx_descriptor *desc;
348 struct scatterlist *sgl = data->sg, *sg;
349 unsigned int sg_len = data->sg_len;
350 unsigned int i;
351
352 unsigned short dma_data_dir, timeout;
353 enum dma_transfer_direction slave_dirn;
354 unsigned int data_size = 0, log2_blksz;
355 unsigned int blocks = data->blocks;
356
357 struct mxs_ssp *ssp = &host->ssp;
358
359 u32 ignore_crc, get_resp, long_resp, read;
360 u32 ctrl0, cmd0, cmd1, val;
361
362 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
363 0 : BM_SSP_CTRL0_IGNORE_CRC;
364 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
365 BM_SSP_CTRL0_GET_RESP : 0;
366 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
367 BM_SSP_CTRL0_LONG_RESP : 0;
368
369 if (data->flags & MMC_DATA_WRITE) {
370 dma_data_dir = DMA_TO_DEVICE;
371 slave_dirn = DMA_MEM_TO_DEV;
372 read = 0;
373 } else {
374 dma_data_dir = DMA_FROM_DEVICE;
375 slave_dirn = DMA_DEV_TO_MEM;
376 read = BM_SSP_CTRL0_READ;
377 }
378
379 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
380 ignore_crc | get_resp | long_resp |
381 BM_SSP_CTRL0_DATA_XFER | read |
382 BM_SSP_CTRL0_WAIT_FOR_IRQ |
383 BM_SSP_CTRL0_ENABLE;
384
385 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
386
387 /* get logarithm to base 2 of block size for setting register */
388 log2_blksz = ilog2(data->blksz);
389
390 /*
391 * take special care of the case that data size from data->sg
392 * is not equal to blocks x blksz
393 */
394 for_each_sg(sgl, sg, sg_len, i)
395 data_size += sg->length;
396
397 if (data_size != data->blocks * data->blksz)
398 blocks = 1;
399
400 /* xfer count, block size and count need to be set differently */
401 if (ssp_is_old(ssp)) {
402 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
403 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
404 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
405 } else {
406 writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
407 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
408 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
409 ssp->base + HW_SSP_BLOCK_SIZE);
410 }
411
412 if (cmd->opcode == SD_IO_RW_EXTENDED)
413 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
414
415 cmd1 = cmd->arg;
416
417 if (host->sdio_irq_en) {
418 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
419 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
420 }
421
422 /* set the timeout count */
423 timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
424 val = readl(ssp->base + HW_SSP_TIMING(ssp));
425 val &= ~(BM_SSP_TIMING_TIMEOUT);
426 val |= BF_SSP(timeout, TIMING_TIMEOUT);
427 writel(val, ssp->base + HW_SSP_TIMING(ssp));
428
429 /* pio */
430 ssp->ssp_pio_words[0] = ctrl0;
431 ssp->ssp_pio_words[1] = cmd0;
432 ssp->ssp_pio_words[2] = cmd1;
433 ssp->dma_dir = DMA_NONE;
434 ssp->slave_dirn = DMA_TRANS_NONE;
435 desc = mxs_mmc_prep_dma(host, 0);
436 if (!desc)
437 goto out;
438
439 /* append data sg */
440 WARN_ON(host->data != NULL);
441 host->data = data;
442 ssp->dma_dir = dma_data_dir;
443 ssp->slave_dirn = slave_dirn;
444 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
445 if (!desc)
446 goto out;
447
448 dmaengine_submit(desc);
449 dma_async_issue_pending(ssp->dmach);
450 return;
451out:
452 dev_warn(mmc_dev(host->mmc),
453 "%s: failed to prep dma\n", __func__);
454}
455
456static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
457 struct mmc_command *cmd)
458{
459 host->cmd = cmd;
460
461 switch (mmc_cmd_type(cmd)) {
462 case MMC_CMD_BC:
463 mxs_mmc_bc(host);
464 break;
465 case MMC_CMD_BCR:
466 mxs_mmc_ac(host);
467 break;
468 case MMC_CMD_AC:
469 mxs_mmc_ac(host);
470 break;
471 case MMC_CMD_ADTC:
472 mxs_mmc_adtc(host);
473 break;
474 default:
475 dev_warn(mmc_dev(host->mmc),
476 "%s: unknown MMC command\n", __func__);
477 break;
478 }
479}
480
481static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
482{
483 struct mxs_mmc_host *host = mmc_priv(mmc);
484
485 WARN_ON(host->mrq != NULL);
486 host->mrq = mrq;
487
488 if (mrq->sbc)
489 mxs_mmc_start_cmd(host, mrq->sbc);
490 else
491 mxs_mmc_start_cmd(host, mrq->cmd);
492}
493
494static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
495{
496 struct mxs_mmc_host *host = mmc_priv(mmc);
497
498 if (ios->bus_width == MMC_BUS_WIDTH_8)
499 host->bus_width = 2;
500 else if (ios->bus_width == MMC_BUS_WIDTH_4)
501 host->bus_width = 1;
502 else
503 host->bus_width = 0;
504
505 if (ios->clock)
506 mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
507}
508
509static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
510{
511 struct mxs_mmc_host *host = mmc_priv(mmc);
512 struct mxs_ssp *ssp = &host->ssp;
513 unsigned long flags;
514
515 spin_lock_irqsave(&host->lock, flags);
516
517 host->sdio_irq_en = enable;
518
519 if (enable) {
520 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
521 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
522 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
523 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
524 } else {
525 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
526 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
527 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
528 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
529 }
530
531 spin_unlock_irqrestore(&host->lock, flags);
532
533 if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
534 BM_SSP_STATUS_SDIO_IRQ)
535 mmc_signal_sdio_irq(host->mmc);
536
537}
538
539static const struct mmc_host_ops mxs_mmc_ops = {
540 .request = mxs_mmc_request,
541 .get_ro = mmc_gpio_get_ro,
542 .get_cd = mxs_mmc_get_cd,
543 .set_ios = mxs_mmc_set_ios,
544 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
545};
546
547static const struct of_device_id mxs_mmc_dt_ids[] = {
548 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
549 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
550 { /* sentinel */ }
551};
552MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
553
554static void mxs_mmc_regulator_disable(void *regulator)
555{
556 regulator_disable(regulator);
557}
558
559static int mxs_mmc_probe(struct platform_device *pdev)
560{
561 struct device_node *np = pdev->dev.of_node;
562 struct mxs_mmc_host *host;
563 struct mmc_host *mmc;
564 int ret = 0, irq_err;
565 struct regulator *reg_vmmc;
566 struct mxs_ssp *ssp;
567
568 irq_err = platform_get_irq(pdev, 0);
569 if (irq_err < 0)
570 return irq_err;
571
572 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
573 if (!mmc)
574 return -ENOMEM;
575
576 host = mmc_priv(mmc);
577 ssp = &host->ssp;
578 ssp->dev = &pdev->dev;
579 ssp->base = devm_platform_ioremap_resource(pdev, 0);
580 if (IS_ERR(ssp->base)) {
581 ret = PTR_ERR(ssp->base);
582 goto out_mmc_free;
583 }
584
585 ssp->devid = (enum mxs_ssp_id)of_device_get_match_data(&pdev->dev);
586
587 host->mmc = mmc;
588 host->sdio_irq_en = 0;
589
590 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
591 if (!IS_ERR(reg_vmmc)) {
592 ret = regulator_enable(reg_vmmc);
593 if (ret) {
594 dev_err(&pdev->dev,
595 "Failed to enable vmmc regulator: %d\n", ret);
596 goto out_mmc_free;
597 }
598
599 ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
600 reg_vmmc);
601 if (ret)
602 goto out_mmc_free;
603 }
604
605 ssp->clk = devm_clk_get(&pdev->dev, NULL);
606 if (IS_ERR(ssp->clk)) {
607 ret = PTR_ERR(ssp->clk);
608 goto out_mmc_free;
609 }
610 ret = clk_prepare_enable(ssp->clk);
611 if (ret)
612 goto out_mmc_free;
613
614 ret = mxs_mmc_reset(host);
615 if (ret) {
616 dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
617 goto out_clk_disable;
618 }
619
620 ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
621 if (IS_ERR(ssp->dmach)) {
622 dev_err(mmc_dev(host->mmc),
623 "%s: failed to request dma\n", __func__);
624 ret = PTR_ERR(ssp->dmach);
625 goto out_clk_disable;
626 }
627
628 /* set mmc core parameters */
629 mmc->ops = &mxs_mmc_ops;
630 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
631 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
632
633 host->broken_cd = of_property_read_bool(np, "broken-cd");
634
635 mmc->f_min = 400000;
636 mmc->f_max = 288000000;
637
638 ret = mmc_of_parse(mmc);
639 if (ret)
640 goto out_free_dma;
641
642 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
643
644 mmc->max_segs = 52;
645 mmc->max_blk_size = 1 << 0xf;
646 mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
647 mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
648 mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
649
650 platform_set_drvdata(pdev, mmc);
651
652 spin_lock_init(&host->lock);
653
654 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
655 dev_name(&pdev->dev), host);
656 if (ret)
657 goto out_free_dma;
658
659 ret = mmc_add_host(mmc);
660 if (ret)
661 goto out_free_dma;
662
663 dev_info(mmc_dev(host->mmc), "initialized\n");
664
665 return 0;
666
667out_free_dma:
668 dma_release_channel(ssp->dmach);
669out_clk_disable:
670 clk_disable_unprepare(ssp->clk);
671out_mmc_free:
672 mmc_free_host(mmc);
673 return ret;
674}
675
676static void mxs_mmc_remove(struct platform_device *pdev)
677{
678 struct mmc_host *mmc = platform_get_drvdata(pdev);
679 struct mxs_mmc_host *host = mmc_priv(mmc);
680 struct mxs_ssp *ssp = &host->ssp;
681
682 mmc_remove_host(mmc);
683
684 if (ssp->dmach)
685 dma_release_channel(ssp->dmach);
686
687 clk_disable_unprepare(ssp->clk);
688
689 mmc_free_host(mmc);
690}
691
692#ifdef CONFIG_PM_SLEEP
693static int mxs_mmc_suspend(struct device *dev)
694{
695 struct mmc_host *mmc = dev_get_drvdata(dev);
696 struct mxs_mmc_host *host = mmc_priv(mmc);
697 struct mxs_ssp *ssp = &host->ssp;
698
699 clk_disable_unprepare(ssp->clk);
700 return 0;
701}
702
703static int mxs_mmc_resume(struct device *dev)
704{
705 struct mmc_host *mmc = dev_get_drvdata(dev);
706 struct mxs_mmc_host *host = mmc_priv(mmc);
707 struct mxs_ssp *ssp = &host->ssp;
708
709 return clk_prepare_enable(ssp->clk);
710}
711#endif
712
713static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
714
715static struct platform_driver mxs_mmc_driver = {
716 .probe = mxs_mmc_probe,
717 .remove_new = mxs_mmc_remove,
718 .driver = {
719 .name = DRIVER_NAME,
720 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
721 .pm = &mxs_mmc_pm_ops,
722 .of_match_table = mxs_mmc_dt_ids,
723 },
724};
725
726module_platform_driver(mxs_mmc_driver);
727
728MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
729MODULE_AUTHOR("Freescale Semiconductor");
730MODULE_LICENSE("GPL");
731MODULE_ALIAS("platform:" DRIVER_NAME);