Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
4 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
5 *
6 * Copyright 2008 Embedded Alley Solutions, Inc.
7 * Copyright 2009-2011 Freescale Semiconductor, Inc.
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/ioport.h>
13#include <linux/of.h>
14#include <linux/of_device.h>
15#include <linux/platform_device.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/dma/mxs-dma.h>
21#include <linux/highmem.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/completion.h>
25#include <linux/mmc/host.h>
26#include <linux/mmc/mmc.h>
27#include <linux/mmc/sdio.h>
28#include <linux/mmc/slot-gpio.h>
29#include <linux/regulator/consumer.h>
30#include <linux/module.h>
31#include <linux/stmp_device.h>
32#include <linux/spi/mxs-spi.h>
33
34#define DRIVER_NAME "mxs-mmc"
35
36#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
37 BM_SSP_CTRL1_RESP_ERR_IRQ | \
38 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
39 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
40 BM_SSP_CTRL1_DATA_CRC_IRQ | \
41 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
42 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
43 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
44
45/* card detect polling timeout */
46#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
47
48struct mxs_mmc_host {
49 struct mxs_ssp ssp;
50
51 struct mmc_host *mmc;
52 struct mmc_request *mrq;
53 struct mmc_command *cmd;
54 struct mmc_data *data;
55
56 unsigned char bus_width;
57 spinlock_t lock;
58 int sdio_irq_en;
59 bool broken_cd;
60};
61
62static int mxs_mmc_get_cd(struct mmc_host *mmc)
63{
64 struct mxs_mmc_host *host = mmc_priv(mmc);
65 struct mxs_ssp *ssp = &host->ssp;
66 int present, ret;
67
68 if (host->broken_cd)
69 return -ENOSYS;
70
71 ret = mmc_gpio_get_cd(mmc);
72 if (ret >= 0)
73 return ret;
74
75 present = mmc->caps & MMC_CAP_NEEDS_POLL ||
76 !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
77 BM_SSP_STATUS_CARD_DETECT);
78
79 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
80 present = !present;
81
82 return present;
83}
84
85static int mxs_mmc_reset(struct mxs_mmc_host *host)
86{
87 struct mxs_ssp *ssp = &host->ssp;
88 u32 ctrl0, ctrl1;
89 int ret;
90
91 ret = stmp_reset_block(ssp->base);
92 if (ret)
93 return ret;
94
95 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
96 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
97 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
98 BM_SSP_CTRL1_DMA_ENABLE |
99 BM_SSP_CTRL1_POLARITY |
100 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
101 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
102 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
103 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
104 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
105
106 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
107 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
108 BF_SSP(0, TIMING_CLOCK_RATE),
109 ssp->base + HW_SSP_TIMING(ssp));
110
111 if (host->sdio_irq_en) {
112 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
113 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
114 }
115
116 writel(ctrl0, ssp->base + HW_SSP_CTRL0);
117 writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
118 return 0;
119}
120
121static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
122 struct mmc_command *cmd);
123
124static void mxs_mmc_request_done(struct mxs_mmc_host *host)
125{
126 struct mmc_command *cmd = host->cmd;
127 struct mmc_data *data = host->data;
128 struct mmc_request *mrq = host->mrq;
129 struct mxs_ssp *ssp = &host->ssp;
130
131 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
132 if (mmc_resp_type(cmd) & MMC_RSP_136) {
133 cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
134 cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
135 cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
136 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
137 } else {
138 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
139 }
140 }
141
142 if (cmd == mrq->sbc) {
143 /* Finished CMD23, now send actual command. */
144 mxs_mmc_start_cmd(host, mrq->cmd);
145 return;
146 } else if (data) {
147 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
148 data->sg_len, ssp->dma_dir);
149 /*
150 * If there was an error on any block, we mark all
151 * data blocks as being in error.
152 */
153 if (!data->error)
154 data->bytes_xfered = data->blocks * data->blksz;
155 else
156 data->bytes_xfered = 0;
157
158 host->data = NULL;
159 if (data->stop && (data->error || !mrq->sbc)) {
160 mxs_mmc_start_cmd(host, mrq->stop);
161 return;
162 }
163 }
164
165 host->mrq = NULL;
166 mmc_request_done(host->mmc, mrq);
167}
168
169static void mxs_mmc_dma_irq_callback(void *param)
170{
171 struct mxs_mmc_host *host = param;
172
173 mxs_mmc_request_done(host);
174}
175
176static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
177{
178 struct mxs_mmc_host *host = dev_id;
179 struct mmc_command *cmd = host->cmd;
180 struct mmc_data *data = host->data;
181 struct mxs_ssp *ssp = &host->ssp;
182 u32 stat;
183
184 spin_lock(&host->lock);
185
186 stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
187 writel(stat & MXS_MMC_IRQ_BITS,
188 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
189
190 spin_unlock(&host->lock);
191
192 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
193 mmc_signal_sdio_irq(host->mmc);
194
195 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
196 cmd->error = -ETIMEDOUT;
197 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
198 cmd->error = -EIO;
199
200 if (data) {
201 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
202 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
203 data->error = -ETIMEDOUT;
204 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
205 data->error = -EILSEQ;
206 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
207 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
208 data->error = -EIO;
209 }
210
211 return IRQ_HANDLED;
212}
213
214static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
215 struct mxs_mmc_host *host, unsigned long flags)
216{
217 struct mxs_ssp *ssp = &host->ssp;
218 struct dma_async_tx_descriptor *desc;
219 struct mmc_data *data = host->data;
220 struct scatterlist * sgl;
221 unsigned int sg_len;
222
223 if (data) {
224 /* data */
225 dma_map_sg(mmc_dev(host->mmc), data->sg,
226 data->sg_len, ssp->dma_dir);
227 sgl = data->sg;
228 sg_len = data->sg_len;
229 } else {
230 /* pio */
231 sgl = (struct scatterlist *) ssp->ssp_pio_words;
232 sg_len = SSP_PIO_NUM;
233 }
234
235 desc = dmaengine_prep_slave_sg(ssp->dmach,
236 sgl, sg_len, ssp->slave_dirn, flags);
237 if (desc) {
238 desc->callback = mxs_mmc_dma_irq_callback;
239 desc->callback_param = host;
240 } else {
241 if (data)
242 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
243 data->sg_len, ssp->dma_dir);
244 }
245
246 return desc;
247}
248
249static void mxs_mmc_bc(struct mxs_mmc_host *host)
250{
251 struct mxs_ssp *ssp = &host->ssp;
252 struct mmc_command *cmd = host->cmd;
253 struct dma_async_tx_descriptor *desc;
254 u32 ctrl0, cmd0, cmd1;
255
256 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
257 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
258 cmd1 = cmd->arg;
259
260 if (host->sdio_irq_en) {
261 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
262 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
263 }
264
265 ssp->ssp_pio_words[0] = ctrl0;
266 ssp->ssp_pio_words[1] = cmd0;
267 ssp->ssp_pio_words[2] = cmd1;
268 ssp->dma_dir = DMA_NONE;
269 ssp->slave_dirn = DMA_TRANS_NONE;
270 desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
271 if (!desc)
272 goto out;
273
274 dmaengine_submit(desc);
275 dma_async_issue_pending(ssp->dmach);
276 return;
277
278out:
279 dev_warn(mmc_dev(host->mmc),
280 "%s: failed to prep dma\n", __func__);
281}
282
283static void mxs_mmc_ac(struct mxs_mmc_host *host)
284{
285 struct mxs_ssp *ssp = &host->ssp;
286 struct mmc_command *cmd = host->cmd;
287 struct dma_async_tx_descriptor *desc;
288 u32 ignore_crc, get_resp, long_resp;
289 u32 ctrl0, cmd0, cmd1;
290
291 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
292 0 : BM_SSP_CTRL0_IGNORE_CRC;
293 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
294 BM_SSP_CTRL0_GET_RESP : 0;
295 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
296 BM_SSP_CTRL0_LONG_RESP : 0;
297
298 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
299 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
300 cmd1 = cmd->arg;
301
302 if (cmd->opcode == MMC_STOP_TRANSMISSION)
303 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
304
305 if (host->sdio_irq_en) {
306 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
307 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
308 }
309
310 ssp->ssp_pio_words[0] = ctrl0;
311 ssp->ssp_pio_words[1] = cmd0;
312 ssp->ssp_pio_words[2] = cmd1;
313 ssp->dma_dir = DMA_NONE;
314 ssp->slave_dirn = DMA_TRANS_NONE;
315 desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
316 if (!desc)
317 goto out;
318
319 dmaengine_submit(desc);
320 dma_async_issue_pending(ssp->dmach);
321 return;
322
323out:
324 dev_warn(mmc_dev(host->mmc),
325 "%s: failed to prep dma\n", __func__);
326}
327
328static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
329{
330 const unsigned int ssp_timeout_mul = 4096;
331 /*
332 * Calculate ticks in ms since ns are large numbers
333 * and might overflow
334 */
335 const unsigned int clock_per_ms = clock_rate / 1000;
336 const unsigned int ms = ns / 1000;
337 const unsigned int ticks = ms * clock_per_ms;
338 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
339
340 WARN_ON(ssp_ticks == 0);
341 return ssp_ticks;
342}
343
344static void mxs_mmc_adtc(struct mxs_mmc_host *host)
345{
346 struct mmc_command *cmd = host->cmd;
347 struct mmc_data *data = cmd->data;
348 struct dma_async_tx_descriptor *desc;
349 struct scatterlist *sgl = data->sg, *sg;
350 unsigned int sg_len = data->sg_len;
351 unsigned int i;
352
353 unsigned short dma_data_dir, timeout;
354 enum dma_transfer_direction slave_dirn;
355 unsigned int data_size = 0, log2_blksz;
356 unsigned int blocks = data->blocks;
357
358 struct mxs_ssp *ssp = &host->ssp;
359
360 u32 ignore_crc, get_resp, long_resp, read;
361 u32 ctrl0, cmd0, cmd1, val;
362
363 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
364 0 : BM_SSP_CTRL0_IGNORE_CRC;
365 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
366 BM_SSP_CTRL0_GET_RESP : 0;
367 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
368 BM_SSP_CTRL0_LONG_RESP : 0;
369
370 if (data->flags & MMC_DATA_WRITE) {
371 dma_data_dir = DMA_TO_DEVICE;
372 slave_dirn = DMA_MEM_TO_DEV;
373 read = 0;
374 } else {
375 dma_data_dir = DMA_FROM_DEVICE;
376 slave_dirn = DMA_DEV_TO_MEM;
377 read = BM_SSP_CTRL0_READ;
378 }
379
380 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
381 ignore_crc | get_resp | long_resp |
382 BM_SSP_CTRL0_DATA_XFER | read |
383 BM_SSP_CTRL0_WAIT_FOR_IRQ |
384 BM_SSP_CTRL0_ENABLE;
385
386 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
387
388 /* get logarithm to base 2 of block size for setting register */
389 log2_blksz = ilog2(data->blksz);
390
391 /*
392 * take special care of the case that data size from data->sg
393 * is not equal to blocks x blksz
394 */
395 for_each_sg(sgl, sg, sg_len, i)
396 data_size += sg->length;
397
398 if (data_size != data->blocks * data->blksz)
399 blocks = 1;
400
401 /* xfer count, block size and count need to be set differently */
402 if (ssp_is_old(ssp)) {
403 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
404 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
405 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
406 } else {
407 writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
408 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
409 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
410 ssp->base + HW_SSP_BLOCK_SIZE);
411 }
412
413 if (cmd->opcode == SD_IO_RW_EXTENDED)
414 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
415
416 cmd1 = cmd->arg;
417
418 if (host->sdio_irq_en) {
419 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
420 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
421 }
422
423 /* set the timeout count */
424 timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
425 val = readl(ssp->base + HW_SSP_TIMING(ssp));
426 val &= ~(BM_SSP_TIMING_TIMEOUT);
427 val |= BF_SSP(timeout, TIMING_TIMEOUT);
428 writel(val, ssp->base + HW_SSP_TIMING(ssp));
429
430 /* pio */
431 ssp->ssp_pio_words[0] = ctrl0;
432 ssp->ssp_pio_words[1] = cmd0;
433 ssp->ssp_pio_words[2] = cmd1;
434 ssp->dma_dir = DMA_NONE;
435 ssp->slave_dirn = DMA_TRANS_NONE;
436 desc = mxs_mmc_prep_dma(host, 0);
437 if (!desc)
438 goto out;
439
440 /* append data sg */
441 WARN_ON(host->data != NULL);
442 host->data = data;
443 ssp->dma_dir = dma_data_dir;
444 ssp->slave_dirn = slave_dirn;
445 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
446 if (!desc)
447 goto out;
448
449 dmaengine_submit(desc);
450 dma_async_issue_pending(ssp->dmach);
451 return;
452out:
453 dev_warn(mmc_dev(host->mmc),
454 "%s: failed to prep dma\n", __func__);
455}
456
457static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
458 struct mmc_command *cmd)
459{
460 host->cmd = cmd;
461
462 switch (mmc_cmd_type(cmd)) {
463 case MMC_CMD_BC:
464 mxs_mmc_bc(host);
465 break;
466 case MMC_CMD_BCR:
467 mxs_mmc_ac(host);
468 break;
469 case MMC_CMD_AC:
470 mxs_mmc_ac(host);
471 break;
472 case MMC_CMD_ADTC:
473 mxs_mmc_adtc(host);
474 break;
475 default:
476 dev_warn(mmc_dev(host->mmc),
477 "%s: unknown MMC command\n", __func__);
478 break;
479 }
480}
481
482static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
483{
484 struct mxs_mmc_host *host = mmc_priv(mmc);
485
486 WARN_ON(host->mrq != NULL);
487 host->mrq = mrq;
488
489 if (mrq->sbc)
490 mxs_mmc_start_cmd(host, mrq->sbc);
491 else
492 mxs_mmc_start_cmd(host, mrq->cmd);
493}
494
495static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
496{
497 struct mxs_mmc_host *host = mmc_priv(mmc);
498
499 if (ios->bus_width == MMC_BUS_WIDTH_8)
500 host->bus_width = 2;
501 else if (ios->bus_width == MMC_BUS_WIDTH_4)
502 host->bus_width = 1;
503 else
504 host->bus_width = 0;
505
506 if (ios->clock)
507 mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
508}
509
510static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
511{
512 struct mxs_mmc_host *host = mmc_priv(mmc);
513 struct mxs_ssp *ssp = &host->ssp;
514 unsigned long flags;
515
516 spin_lock_irqsave(&host->lock, flags);
517
518 host->sdio_irq_en = enable;
519
520 if (enable) {
521 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
522 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
523 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
524 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
525 } else {
526 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
527 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
528 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
529 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
530 }
531
532 spin_unlock_irqrestore(&host->lock, flags);
533
534 if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
535 BM_SSP_STATUS_SDIO_IRQ)
536 mmc_signal_sdio_irq(host->mmc);
537
538}
539
540static const struct mmc_host_ops mxs_mmc_ops = {
541 .request = mxs_mmc_request,
542 .get_ro = mmc_gpio_get_ro,
543 .get_cd = mxs_mmc_get_cd,
544 .set_ios = mxs_mmc_set_ios,
545 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
546};
547
548static const struct of_device_id mxs_mmc_dt_ids[] = {
549 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
550 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
551 { /* sentinel */ }
552};
553MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
554
555static void mxs_mmc_regulator_disable(void *regulator)
556{
557 regulator_disable(regulator);
558}
559
560static int mxs_mmc_probe(struct platform_device *pdev)
561{
562 struct device_node *np = pdev->dev.of_node;
563 struct mxs_mmc_host *host;
564 struct mmc_host *mmc;
565 int ret = 0, irq_err;
566 struct regulator *reg_vmmc;
567 struct mxs_ssp *ssp;
568
569 irq_err = platform_get_irq(pdev, 0);
570 if (irq_err < 0)
571 return irq_err;
572
573 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
574 if (!mmc)
575 return -ENOMEM;
576
577 host = mmc_priv(mmc);
578 ssp = &host->ssp;
579 ssp->dev = &pdev->dev;
580 ssp->base = devm_platform_ioremap_resource(pdev, 0);
581 if (IS_ERR(ssp->base)) {
582 ret = PTR_ERR(ssp->base);
583 goto out_mmc_free;
584 }
585
586 ssp->devid = (enum mxs_ssp_id)of_device_get_match_data(&pdev->dev);
587
588 host->mmc = mmc;
589 host->sdio_irq_en = 0;
590
591 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
592 if (!IS_ERR(reg_vmmc)) {
593 ret = regulator_enable(reg_vmmc);
594 if (ret) {
595 dev_err(&pdev->dev,
596 "Failed to enable vmmc regulator: %d\n", ret);
597 goto out_mmc_free;
598 }
599
600 ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
601 reg_vmmc);
602 if (ret)
603 goto out_mmc_free;
604 }
605
606 ssp->clk = devm_clk_get(&pdev->dev, NULL);
607 if (IS_ERR(ssp->clk)) {
608 ret = PTR_ERR(ssp->clk);
609 goto out_mmc_free;
610 }
611 ret = clk_prepare_enable(ssp->clk);
612 if (ret)
613 goto out_mmc_free;
614
615 ret = mxs_mmc_reset(host);
616 if (ret) {
617 dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
618 goto out_clk_disable;
619 }
620
621 ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
622 if (IS_ERR(ssp->dmach)) {
623 dev_err(mmc_dev(host->mmc),
624 "%s: failed to request dma\n", __func__);
625 ret = PTR_ERR(ssp->dmach);
626 goto out_clk_disable;
627 }
628
629 /* set mmc core parameters */
630 mmc->ops = &mxs_mmc_ops;
631 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
632 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
633
634 host->broken_cd = of_property_read_bool(np, "broken-cd");
635
636 mmc->f_min = 400000;
637 mmc->f_max = 288000000;
638
639 ret = mmc_of_parse(mmc);
640 if (ret)
641 goto out_free_dma;
642
643 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
644
645 mmc->max_segs = 52;
646 mmc->max_blk_size = 1 << 0xf;
647 mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
648 mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
649 mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
650
651 platform_set_drvdata(pdev, mmc);
652
653 spin_lock_init(&host->lock);
654
655 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
656 dev_name(&pdev->dev), host);
657 if (ret)
658 goto out_free_dma;
659
660 ret = mmc_add_host(mmc);
661 if (ret)
662 goto out_free_dma;
663
664 dev_info(mmc_dev(host->mmc), "initialized\n");
665
666 return 0;
667
668out_free_dma:
669 dma_release_channel(ssp->dmach);
670out_clk_disable:
671 clk_disable_unprepare(ssp->clk);
672out_mmc_free:
673 mmc_free_host(mmc);
674 return ret;
675}
676
677static int mxs_mmc_remove(struct platform_device *pdev)
678{
679 struct mmc_host *mmc = platform_get_drvdata(pdev);
680 struct mxs_mmc_host *host = mmc_priv(mmc);
681 struct mxs_ssp *ssp = &host->ssp;
682
683 mmc_remove_host(mmc);
684
685 if (ssp->dmach)
686 dma_release_channel(ssp->dmach);
687
688 clk_disable_unprepare(ssp->clk);
689
690 mmc_free_host(mmc);
691
692 return 0;
693}
694
695#ifdef CONFIG_PM_SLEEP
696static int mxs_mmc_suspend(struct device *dev)
697{
698 struct mmc_host *mmc = dev_get_drvdata(dev);
699 struct mxs_mmc_host *host = mmc_priv(mmc);
700 struct mxs_ssp *ssp = &host->ssp;
701
702 clk_disable_unprepare(ssp->clk);
703 return 0;
704}
705
706static int mxs_mmc_resume(struct device *dev)
707{
708 struct mmc_host *mmc = dev_get_drvdata(dev);
709 struct mxs_mmc_host *host = mmc_priv(mmc);
710 struct mxs_ssp *ssp = &host->ssp;
711
712 return clk_prepare_enable(ssp->clk);
713}
714#endif
715
716static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
717
718static struct platform_driver mxs_mmc_driver = {
719 .probe = mxs_mmc_probe,
720 .remove = mxs_mmc_remove,
721 .driver = {
722 .name = DRIVER_NAME,
723 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
724 .pm = &mxs_mmc_pm_ops,
725 .of_match_table = mxs_mmc_dt_ids,
726 },
727};
728
729module_platform_driver(mxs_mmc_driver);
730
731MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
732MODULE_AUTHOR("Freescale Semiconductor");
733MODULE_LICENSE("GPL");
734MODULE_ALIAS("platform:" DRIVER_NAME);
1/*
2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
4 *
5 * Copyright 2008 Embedded Alley Solutions, Inc.
6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/highmem.h>
35#include <linux/clk.h>
36#include <linux/err.h>
37#include <linux/completion.h>
38#include <linux/mmc/host.h>
39#include <linux/mmc/mmc.h>
40#include <linux/mmc/sdio.h>
41#include <linux/gpio.h>
42#include <linux/regulator/consumer.h>
43#include <linux/module.h>
44#include <linux/fsl/mxs-dma.h>
45#include <linux/pinctrl/consumer.h>
46#include <linux/stmp_device.h>
47#include <linux/mmc/mxs-mmc.h>
48
49#define DRIVER_NAME "mxs-mmc"
50
51/* card detect polling timeout */
52#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
53
54#define ssp_is_old(host) ((host)->devid == IMX23_MMC)
55
56/* SSP registers */
57#define HW_SSP_CTRL0 0x000
58#define BM_SSP_CTRL0_RUN (1 << 29)
59#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
60#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
61#define BM_SSP_CTRL0_READ (1 << 25)
62#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
63#define BP_SSP_CTRL0_BUS_WIDTH (22)
64#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
65#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
66#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
67#define BM_SSP_CTRL0_GET_RESP (1 << 17)
68#define BM_SSP_CTRL0_ENABLE (1 << 16)
69#define BP_SSP_CTRL0_XFER_COUNT (0)
70#define BM_SSP_CTRL0_XFER_COUNT (0xffff)
71#define HW_SSP_CMD0 0x010
72#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
73#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
74#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
75#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
76#define BP_SSP_CMD0_BLOCK_SIZE (16)
77#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
78#define BP_SSP_CMD0_BLOCK_COUNT (8)
79#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
80#define BP_SSP_CMD0_CMD (0)
81#define BM_SSP_CMD0_CMD (0xff)
82#define HW_SSP_CMD1 0x020
83#define HW_SSP_XFER_SIZE 0x030
84#define HW_SSP_BLOCK_SIZE 0x040
85#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4)
86#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
87#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
88#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
89#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
90#define BP_SSP_TIMING_TIMEOUT (16)
91#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
92#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
93#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
94#define BP_SSP_TIMING_CLOCK_RATE (0)
95#define BM_SSP_TIMING_CLOCK_RATE (0xff)
96#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
97#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
98#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
99#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
100#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
101#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
102#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
103#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
104#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
105#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
106#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
107#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
108#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
109#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
110#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
111#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
112#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
113#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
114#define BM_SSP_CTRL1_POLARITY (1 << 9)
115#define BP_SSP_CTRL1_WORD_LENGTH (4)
116#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
117#define BP_SSP_CTRL1_SSP_MODE (0)
118#define BM_SSP_CTRL1_SSP_MODE (0xf)
119#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
120#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
121#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
122#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
123#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
124#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
125#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
126
127#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
128
129#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
130 BM_SSP_CTRL1_RESP_ERR_IRQ | \
131 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
132 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
133 BM_SSP_CTRL1_DATA_CRC_IRQ | \
134 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
135 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
136 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
137
138#define SSP_PIO_NUM 3
139
140enum mxs_mmc_id {
141 IMX23_MMC,
142 IMX28_MMC,
143};
144
145struct mxs_mmc_host {
146 struct mmc_host *mmc;
147 struct mmc_request *mrq;
148 struct mmc_command *cmd;
149 struct mmc_data *data;
150
151 void __iomem *base;
152 int dma_channel;
153 struct clk *clk;
154 unsigned int clk_rate;
155
156 struct dma_chan *dmach;
157 struct mxs_dma_data dma_data;
158 unsigned int dma_dir;
159 enum dma_transfer_direction slave_dirn;
160 u32 ssp_pio_words[SSP_PIO_NUM];
161
162 enum mxs_mmc_id devid;
163 unsigned char bus_width;
164 spinlock_t lock;
165 int sdio_irq_en;
166 int wp_gpio;
167};
168
169static int mxs_mmc_get_ro(struct mmc_host *mmc)
170{
171 struct mxs_mmc_host *host = mmc_priv(mmc);
172
173 if (!gpio_is_valid(host->wp_gpio))
174 return -EINVAL;
175
176 return gpio_get_value(host->wp_gpio);
177}
178
179static int mxs_mmc_get_cd(struct mmc_host *mmc)
180{
181 struct mxs_mmc_host *host = mmc_priv(mmc);
182
183 return !(readl(host->base + HW_SSP_STATUS(host)) &
184 BM_SSP_STATUS_CARD_DETECT);
185}
186
187static void mxs_mmc_reset(struct mxs_mmc_host *host)
188{
189 u32 ctrl0, ctrl1;
190
191 stmp_reset_block(host->base);
192
193 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
194 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
195 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
196 BM_SSP_CTRL1_DMA_ENABLE |
197 BM_SSP_CTRL1_POLARITY |
198 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
199 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
200 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
201 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
202 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
203
204 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
205 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
206 BF_SSP(0, TIMING_CLOCK_RATE),
207 host->base + HW_SSP_TIMING(host));
208
209 if (host->sdio_irq_en) {
210 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
211 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
212 }
213
214 writel(ctrl0, host->base + HW_SSP_CTRL0);
215 writel(ctrl1, host->base + HW_SSP_CTRL1(host));
216}
217
218static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
219 struct mmc_command *cmd);
220
221static void mxs_mmc_request_done(struct mxs_mmc_host *host)
222{
223 struct mmc_command *cmd = host->cmd;
224 struct mmc_data *data = host->data;
225 struct mmc_request *mrq = host->mrq;
226
227 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
228 if (mmc_resp_type(cmd) & MMC_RSP_136) {
229 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
230 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
231 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
232 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
233 } else {
234 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
235 }
236 }
237
238 if (data) {
239 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
240 data->sg_len, host->dma_dir);
241 /*
242 * If there was an error on any block, we mark all
243 * data blocks as being in error.
244 */
245 if (!data->error)
246 data->bytes_xfered = data->blocks * data->blksz;
247 else
248 data->bytes_xfered = 0;
249
250 host->data = NULL;
251 if (mrq->stop) {
252 mxs_mmc_start_cmd(host, mrq->stop);
253 return;
254 }
255 }
256
257 host->mrq = NULL;
258 mmc_request_done(host->mmc, mrq);
259}
260
261static void mxs_mmc_dma_irq_callback(void *param)
262{
263 struct mxs_mmc_host *host = param;
264
265 mxs_mmc_request_done(host);
266}
267
268static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
269{
270 struct mxs_mmc_host *host = dev_id;
271 struct mmc_command *cmd = host->cmd;
272 struct mmc_data *data = host->data;
273 u32 stat;
274
275 spin_lock(&host->lock);
276
277 stat = readl(host->base + HW_SSP_CTRL1(host));
278 writel(stat & MXS_MMC_IRQ_BITS,
279 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
280
281 spin_unlock(&host->lock);
282
283 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
284 mmc_signal_sdio_irq(host->mmc);
285
286 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
287 cmd->error = -ETIMEDOUT;
288 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
289 cmd->error = -EIO;
290
291 if (data) {
292 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
293 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
294 data->error = -ETIMEDOUT;
295 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
296 data->error = -EILSEQ;
297 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
298 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
299 data->error = -EIO;
300 }
301
302 return IRQ_HANDLED;
303}
304
305static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
306 struct mxs_mmc_host *host, unsigned long flags)
307{
308 struct dma_async_tx_descriptor *desc;
309 struct mmc_data *data = host->data;
310 struct scatterlist * sgl;
311 unsigned int sg_len;
312
313 if (data) {
314 /* data */
315 dma_map_sg(mmc_dev(host->mmc), data->sg,
316 data->sg_len, host->dma_dir);
317 sgl = data->sg;
318 sg_len = data->sg_len;
319 } else {
320 /* pio */
321 sgl = (struct scatterlist *) host->ssp_pio_words;
322 sg_len = SSP_PIO_NUM;
323 }
324
325 desc = dmaengine_prep_slave_sg(host->dmach,
326 sgl, sg_len, host->slave_dirn, flags);
327 if (desc) {
328 desc->callback = mxs_mmc_dma_irq_callback;
329 desc->callback_param = host;
330 } else {
331 if (data)
332 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
333 data->sg_len, host->dma_dir);
334 }
335
336 return desc;
337}
338
339static void mxs_mmc_bc(struct mxs_mmc_host *host)
340{
341 struct mmc_command *cmd = host->cmd;
342 struct dma_async_tx_descriptor *desc;
343 u32 ctrl0, cmd0, cmd1;
344
345 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
346 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
347 cmd1 = cmd->arg;
348
349 if (host->sdio_irq_en) {
350 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
351 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
352 }
353
354 host->ssp_pio_words[0] = ctrl0;
355 host->ssp_pio_words[1] = cmd0;
356 host->ssp_pio_words[2] = cmd1;
357 host->dma_dir = DMA_NONE;
358 host->slave_dirn = DMA_TRANS_NONE;
359 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
360 if (!desc)
361 goto out;
362
363 dmaengine_submit(desc);
364 dma_async_issue_pending(host->dmach);
365 return;
366
367out:
368 dev_warn(mmc_dev(host->mmc),
369 "%s: failed to prep dma\n", __func__);
370}
371
372static void mxs_mmc_ac(struct mxs_mmc_host *host)
373{
374 struct mmc_command *cmd = host->cmd;
375 struct dma_async_tx_descriptor *desc;
376 u32 ignore_crc, get_resp, long_resp;
377 u32 ctrl0, cmd0, cmd1;
378
379 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
380 0 : BM_SSP_CTRL0_IGNORE_CRC;
381 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
382 BM_SSP_CTRL0_GET_RESP : 0;
383 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
384 BM_SSP_CTRL0_LONG_RESP : 0;
385
386 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
387 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
388 cmd1 = cmd->arg;
389
390 if (host->sdio_irq_en) {
391 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
392 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
393 }
394
395 host->ssp_pio_words[0] = ctrl0;
396 host->ssp_pio_words[1] = cmd0;
397 host->ssp_pio_words[2] = cmd1;
398 host->dma_dir = DMA_NONE;
399 host->slave_dirn = DMA_TRANS_NONE;
400 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
401 if (!desc)
402 goto out;
403
404 dmaengine_submit(desc);
405 dma_async_issue_pending(host->dmach);
406 return;
407
408out:
409 dev_warn(mmc_dev(host->mmc),
410 "%s: failed to prep dma\n", __func__);
411}
412
413static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
414{
415 const unsigned int ssp_timeout_mul = 4096;
416 /*
417 * Calculate ticks in ms since ns are large numbers
418 * and might overflow
419 */
420 const unsigned int clock_per_ms = clock_rate / 1000;
421 const unsigned int ms = ns / 1000;
422 const unsigned int ticks = ms * clock_per_ms;
423 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
424
425 WARN_ON(ssp_ticks == 0);
426 return ssp_ticks;
427}
428
429static void mxs_mmc_adtc(struct mxs_mmc_host *host)
430{
431 struct mmc_command *cmd = host->cmd;
432 struct mmc_data *data = cmd->data;
433 struct dma_async_tx_descriptor *desc;
434 struct scatterlist *sgl = data->sg, *sg;
435 unsigned int sg_len = data->sg_len;
436 int i;
437
438 unsigned short dma_data_dir, timeout;
439 enum dma_transfer_direction slave_dirn;
440 unsigned int data_size = 0, log2_blksz;
441 unsigned int blocks = data->blocks;
442
443 u32 ignore_crc, get_resp, long_resp, read;
444 u32 ctrl0, cmd0, cmd1, val;
445
446 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
447 0 : BM_SSP_CTRL0_IGNORE_CRC;
448 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
449 BM_SSP_CTRL0_GET_RESP : 0;
450 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
451 BM_SSP_CTRL0_LONG_RESP : 0;
452
453 if (data->flags & MMC_DATA_WRITE) {
454 dma_data_dir = DMA_TO_DEVICE;
455 slave_dirn = DMA_MEM_TO_DEV;
456 read = 0;
457 } else {
458 dma_data_dir = DMA_FROM_DEVICE;
459 slave_dirn = DMA_DEV_TO_MEM;
460 read = BM_SSP_CTRL0_READ;
461 }
462
463 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
464 ignore_crc | get_resp | long_resp |
465 BM_SSP_CTRL0_DATA_XFER | read |
466 BM_SSP_CTRL0_WAIT_FOR_IRQ |
467 BM_SSP_CTRL0_ENABLE;
468
469 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
470
471 /* get logarithm to base 2 of block size for setting register */
472 log2_blksz = ilog2(data->blksz);
473
474 /*
475 * take special care of the case that data size from data->sg
476 * is not equal to blocks x blksz
477 */
478 for_each_sg(sgl, sg, sg_len, i)
479 data_size += sg->length;
480
481 if (data_size != data->blocks * data->blksz)
482 blocks = 1;
483
484 /* xfer count, block size and count need to be set differently */
485 if (ssp_is_old(host)) {
486 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
487 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
488 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
489 } else {
490 writel(data_size, host->base + HW_SSP_XFER_SIZE);
491 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
492 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
493 host->base + HW_SSP_BLOCK_SIZE);
494 }
495
496 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
497 (cmd->opcode == SD_IO_RW_EXTENDED))
498 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
499
500 cmd1 = cmd->arg;
501
502 if (host->sdio_irq_en) {
503 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
504 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
505 }
506
507 /* set the timeout count */
508 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
509 val = readl(host->base + HW_SSP_TIMING(host));
510 val &= ~(BM_SSP_TIMING_TIMEOUT);
511 val |= BF_SSP(timeout, TIMING_TIMEOUT);
512 writel(val, host->base + HW_SSP_TIMING(host));
513
514 /* pio */
515 host->ssp_pio_words[0] = ctrl0;
516 host->ssp_pio_words[1] = cmd0;
517 host->ssp_pio_words[2] = cmd1;
518 host->dma_dir = DMA_NONE;
519 host->slave_dirn = DMA_TRANS_NONE;
520 desc = mxs_mmc_prep_dma(host, 0);
521 if (!desc)
522 goto out;
523
524 /* append data sg */
525 WARN_ON(host->data != NULL);
526 host->data = data;
527 host->dma_dir = dma_data_dir;
528 host->slave_dirn = slave_dirn;
529 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
530 if (!desc)
531 goto out;
532
533 dmaengine_submit(desc);
534 dma_async_issue_pending(host->dmach);
535 return;
536out:
537 dev_warn(mmc_dev(host->mmc),
538 "%s: failed to prep dma\n", __func__);
539}
540
541static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
542 struct mmc_command *cmd)
543{
544 host->cmd = cmd;
545
546 switch (mmc_cmd_type(cmd)) {
547 case MMC_CMD_BC:
548 mxs_mmc_bc(host);
549 break;
550 case MMC_CMD_BCR:
551 mxs_mmc_ac(host);
552 break;
553 case MMC_CMD_AC:
554 mxs_mmc_ac(host);
555 break;
556 case MMC_CMD_ADTC:
557 mxs_mmc_adtc(host);
558 break;
559 default:
560 dev_warn(mmc_dev(host->mmc),
561 "%s: unknown MMC command\n", __func__);
562 break;
563 }
564}
565
566static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
567{
568 struct mxs_mmc_host *host = mmc_priv(mmc);
569
570 WARN_ON(host->mrq != NULL);
571 host->mrq = mrq;
572 mxs_mmc_start_cmd(host, mrq->cmd);
573}
574
575static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
576{
577 unsigned int ssp_clk, ssp_sck;
578 u32 clock_divide, clock_rate;
579 u32 val;
580
581 ssp_clk = clk_get_rate(host->clk);
582
583 for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
584 clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
585 clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
586 if (clock_rate <= 255)
587 break;
588 }
589
590 if (clock_divide > 254) {
591 dev_err(mmc_dev(host->mmc),
592 "%s: cannot set clock to %d\n", __func__, rate);
593 return;
594 }
595
596 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
597
598 val = readl(host->base + HW_SSP_TIMING(host));
599 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
600 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
601 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
602 writel(val, host->base + HW_SSP_TIMING(host));
603
604 host->clk_rate = ssp_sck;
605
606 dev_dbg(mmc_dev(host->mmc),
607 "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
608 __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
609}
610
611static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
612{
613 struct mxs_mmc_host *host = mmc_priv(mmc);
614
615 if (ios->bus_width == MMC_BUS_WIDTH_8)
616 host->bus_width = 2;
617 else if (ios->bus_width == MMC_BUS_WIDTH_4)
618 host->bus_width = 1;
619 else
620 host->bus_width = 0;
621
622 if (ios->clock)
623 mxs_mmc_set_clk_rate(host, ios->clock);
624}
625
626static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
627{
628 struct mxs_mmc_host *host = mmc_priv(mmc);
629 unsigned long flags;
630
631 spin_lock_irqsave(&host->lock, flags);
632
633 host->sdio_irq_en = enable;
634
635 if (enable) {
636 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
637 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
638 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
639 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
640 } else {
641 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
642 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
643 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
644 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
645 }
646
647 spin_unlock_irqrestore(&host->lock, flags);
648
649 if (enable && readl(host->base + HW_SSP_STATUS(host)) &
650 BM_SSP_STATUS_SDIO_IRQ)
651 mmc_signal_sdio_irq(host->mmc);
652
653}
654
655static const struct mmc_host_ops mxs_mmc_ops = {
656 .request = mxs_mmc_request,
657 .get_ro = mxs_mmc_get_ro,
658 .get_cd = mxs_mmc_get_cd,
659 .set_ios = mxs_mmc_set_ios,
660 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
661};
662
663static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
664{
665 struct mxs_mmc_host *host = param;
666
667 if (!mxs_dma_is_apbh(chan))
668 return false;
669
670 if (chan->chan_id != host->dma_channel)
671 return false;
672
673 chan->private = &host->dma_data;
674
675 return true;
676}
677
678static struct platform_device_id mxs_mmc_ids[] = {
679 {
680 .name = "imx23-mmc",
681 .driver_data = IMX23_MMC,
682 }, {
683 .name = "imx28-mmc",
684 .driver_data = IMX28_MMC,
685 }, {
686 /* sentinel */
687 }
688};
689MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
690
691static const struct of_device_id mxs_mmc_dt_ids[] = {
692 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
693 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
694 { /* sentinel */ }
695};
696MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
697
698static int mxs_mmc_probe(struct platform_device *pdev)
699{
700 const struct of_device_id *of_id =
701 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
702 struct device_node *np = pdev->dev.of_node;
703 struct mxs_mmc_host *host;
704 struct mmc_host *mmc;
705 struct resource *iores, *dmares;
706 struct mxs_mmc_platform_data *pdata;
707 struct pinctrl *pinctrl;
708 int ret = 0, irq_err, irq_dma;
709 dma_cap_mask_t mask;
710
711 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
712 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
713 irq_err = platform_get_irq(pdev, 0);
714 irq_dma = platform_get_irq(pdev, 1);
715 if (!iores || irq_err < 0 || irq_dma < 0)
716 return -EINVAL;
717
718 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
719 if (!mmc)
720 return -ENOMEM;
721
722 host = mmc_priv(mmc);
723 host->base = devm_request_and_ioremap(&pdev->dev, iores);
724 if (!host->base) {
725 ret = -EADDRNOTAVAIL;
726 goto out_mmc_free;
727 }
728
729 if (np) {
730 host->devid = (enum mxs_mmc_id) of_id->data;
731 /*
732 * TODO: This is a temporary solution and should be changed
733 * to use generic DMA binding later when the helpers get in.
734 */
735 ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
736 &host->dma_channel);
737 if (ret) {
738 dev_err(mmc_dev(host->mmc),
739 "failed to get dma channel\n");
740 goto out_mmc_free;
741 }
742 } else {
743 host->devid = pdev->id_entry->driver_data;
744 host->dma_channel = dmares->start;
745 }
746
747 host->mmc = mmc;
748 host->sdio_irq_en = 0;
749
750 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
751 if (IS_ERR(pinctrl)) {
752 ret = PTR_ERR(pinctrl);
753 goto out_mmc_free;
754 }
755
756 host->clk = clk_get(&pdev->dev, NULL);
757 if (IS_ERR(host->clk)) {
758 ret = PTR_ERR(host->clk);
759 goto out_mmc_free;
760 }
761 clk_prepare_enable(host->clk);
762
763 mxs_mmc_reset(host);
764
765 dma_cap_zero(mask);
766 dma_cap_set(DMA_SLAVE, mask);
767 host->dma_data.chan_irq = irq_dma;
768 host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host);
769 if (!host->dmach) {
770 dev_err(mmc_dev(host->mmc),
771 "%s: failed to request dma\n", __func__);
772 goto out_clk_put;
773 }
774
775 /* set mmc core parameters */
776 mmc->ops = &mxs_mmc_ops;
777 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
778 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
779
780 pdata = mmc_dev(host->mmc)->platform_data;
781 if (!pdata) {
782 u32 bus_width = 0;
783 of_property_read_u32(np, "bus-width", &bus_width);
784 if (bus_width == 4)
785 mmc->caps |= MMC_CAP_4_BIT_DATA;
786 else if (bus_width == 8)
787 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
788 host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
789 } else {
790 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
791 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
792 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
793 mmc->caps |= MMC_CAP_4_BIT_DATA;
794 host->wp_gpio = pdata->wp_gpio;
795 }
796
797 mmc->f_min = 400000;
798 mmc->f_max = 288000000;
799 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
800
801 mmc->max_segs = 52;
802 mmc->max_blk_size = 1 << 0xf;
803 mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
804 mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
805 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
806
807 platform_set_drvdata(pdev, mmc);
808
809 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
810 DRIVER_NAME, host);
811 if (ret)
812 goto out_free_dma;
813
814 spin_lock_init(&host->lock);
815
816 ret = mmc_add_host(mmc);
817 if (ret)
818 goto out_free_dma;
819
820 dev_info(mmc_dev(host->mmc), "initialized\n");
821
822 return 0;
823
824out_free_dma:
825 if (host->dmach)
826 dma_release_channel(host->dmach);
827out_clk_put:
828 clk_disable_unprepare(host->clk);
829 clk_put(host->clk);
830out_mmc_free:
831 mmc_free_host(mmc);
832 return ret;
833}
834
835static int mxs_mmc_remove(struct platform_device *pdev)
836{
837 struct mmc_host *mmc = platform_get_drvdata(pdev);
838 struct mxs_mmc_host *host = mmc_priv(mmc);
839
840 mmc_remove_host(mmc);
841
842 platform_set_drvdata(pdev, NULL);
843
844 if (host->dmach)
845 dma_release_channel(host->dmach);
846
847 clk_disable_unprepare(host->clk);
848 clk_put(host->clk);
849
850 mmc_free_host(mmc);
851
852 return 0;
853}
854
855#ifdef CONFIG_PM
856static int mxs_mmc_suspend(struct device *dev)
857{
858 struct mmc_host *mmc = dev_get_drvdata(dev);
859 struct mxs_mmc_host *host = mmc_priv(mmc);
860 int ret = 0;
861
862 ret = mmc_suspend_host(mmc);
863
864 clk_disable_unprepare(host->clk);
865
866 return ret;
867}
868
869static int mxs_mmc_resume(struct device *dev)
870{
871 struct mmc_host *mmc = dev_get_drvdata(dev);
872 struct mxs_mmc_host *host = mmc_priv(mmc);
873 int ret = 0;
874
875 clk_prepare_enable(host->clk);
876
877 ret = mmc_resume_host(mmc);
878
879 return ret;
880}
881
882static const struct dev_pm_ops mxs_mmc_pm_ops = {
883 .suspend = mxs_mmc_suspend,
884 .resume = mxs_mmc_resume,
885};
886#endif
887
888static struct platform_driver mxs_mmc_driver = {
889 .probe = mxs_mmc_probe,
890 .remove = mxs_mmc_remove,
891 .id_table = mxs_mmc_ids,
892 .driver = {
893 .name = DRIVER_NAME,
894 .owner = THIS_MODULE,
895#ifdef CONFIG_PM
896 .pm = &mxs_mmc_pm_ops,
897#endif
898 .of_match_table = mxs_mmc_dt_ids,
899 },
900};
901
902module_platform_driver(mxs_mmc_driver);
903
904MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
905MODULE_AUTHOR("Freescale Semiconductor");
906MODULE_LICENSE("GPL");