Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
  3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
  4 *
  5 * Copyright 2008 Embedded Alley Solutions, Inc.
  6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 *
 13 * This program is distributed in the hope that it will be useful,
 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 * GNU General Public License for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License along
 19 * with this program; if not, write to the Free Software Foundation, Inc.,
 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 21 */
 22
 23#include <linux/kernel.h>
 24#include <linux/init.h>
 25#include <linux/ioport.h>
 26#include <linux/of.h>
 27#include <linux/of_device.h>
 28#include <linux/of_gpio.h>
 29#include <linux/platform_device.h>
 30#include <linux/delay.h>
 31#include <linux/interrupt.h>
 32#include <linux/dma-mapping.h>
 33#include <linux/dmaengine.h>
 34#include <linux/highmem.h>
 35#include <linux/clk.h>
 36#include <linux/err.h>
 37#include <linux/completion.h>
 38#include <linux/mmc/host.h>
 39#include <linux/mmc/mmc.h>
 40#include <linux/mmc/sdio.h>
 41#include <linux/mmc/slot-gpio.h>
 42#include <linux/gpio.h>
 43#include <linux/regulator/consumer.h>
 44#include <linux/module.h>
 45#include <linux/stmp_device.h>
 46#include <linux/spi/mxs-spi.h>
 47
 48#define DRIVER_NAME	"mxs-mmc"
 49
 50#define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \
 51				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \
 52				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \
 53				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \
 54				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \
 55				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \
 56				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
 57				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
 58
 59/* card detect polling timeout */
 60#define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
 61
 62struct mxs_mmc_host {
 63	struct mxs_ssp			ssp;
 64
 65	struct mmc_host			*mmc;
 66	struct mmc_request		*mrq;
 67	struct mmc_command		*cmd;
 68	struct mmc_data			*data;
 69
 70	unsigned char			bus_width;
 71	spinlock_t			lock;
 72	int				sdio_irq_en;
 73	bool				broken_cd;
 74};
 75
 76static int mxs_mmc_get_cd(struct mmc_host *mmc)
 77{
 78	struct mxs_mmc_host *host = mmc_priv(mmc);
 79	struct mxs_ssp *ssp = &host->ssp;
 80	int present, ret;
 81
 82	if (host->broken_cd)
 83		return -ENOSYS;
 84
 85	ret = mmc_gpio_get_cd(mmc);
 86	if (ret >= 0)
 87		return ret;
 88
 89	present = mmc->caps & MMC_CAP_NEEDS_POLL ||
 90		!(readl(ssp->base + HW_SSP_STATUS(ssp)) &
 91			BM_SSP_STATUS_CARD_DETECT);
 92
 93	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
 94		present = !present;
 95
 96	return present;
 97}
 98
 99static int mxs_mmc_reset(struct mxs_mmc_host *host)
100{
101	struct mxs_ssp *ssp = &host->ssp;
102	u32 ctrl0, ctrl1;
103	int ret;
104
105	ret = stmp_reset_block(ssp->base);
106	if (ret)
107		return ret;
108
109	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
110	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
111		BF_SSP(0x7, CTRL1_WORD_LENGTH) |
112		BM_SSP_CTRL1_DMA_ENABLE |
113		BM_SSP_CTRL1_POLARITY |
114		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
115		BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
116		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
117		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
118		BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
119
120	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
121	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
122	       BF_SSP(0, TIMING_CLOCK_RATE),
123	       ssp->base + HW_SSP_TIMING(ssp));
124
125	if (host->sdio_irq_en) {
126		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
127		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
128	}
129
130	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
131	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
132	return 0;
133}
134
135static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
136			      struct mmc_command *cmd);
137
138static void mxs_mmc_request_done(struct mxs_mmc_host *host)
139{
140	struct mmc_command *cmd = host->cmd;
141	struct mmc_data *data = host->data;
142	struct mmc_request *mrq = host->mrq;
143	struct mxs_ssp *ssp = &host->ssp;
144
145	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
146		if (mmc_resp_type(cmd) & MMC_RSP_136) {
147			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
148			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
149			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
150			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
151		} else {
152			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
153		}
154	}
155
156	if (data) {
157		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
158			     data->sg_len, ssp->dma_dir);
159		/*
160		 * If there was an error on any block, we mark all
161		 * data blocks as being in error.
162		 */
163		if (!data->error)
164			data->bytes_xfered = data->blocks * data->blksz;
165		else
166			data->bytes_xfered = 0;
167
168		host->data = NULL;
169		if (mrq->stop) {
170			mxs_mmc_start_cmd(host, mrq->stop);
171			return;
172		}
173	}
174
175	host->mrq = NULL;
176	mmc_request_done(host->mmc, mrq);
177}
178
179static void mxs_mmc_dma_irq_callback(void *param)
180{
181	struct mxs_mmc_host *host = param;
182
183	mxs_mmc_request_done(host);
184}
185
186static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
187{
188	struct mxs_mmc_host *host = dev_id;
189	struct mmc_command *cmd = host->cmd;
190	struct mmc_data *data = host->data;
191	struct mxs_ssp *ssp = &host->ssp;
192	u32 stat;
193
194	spin_lock(&host->lock);
195
196	stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
197	writel(stat & MXS_MMC_IRQ_BITS,
198	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
199
200	spin_unlock(&host->lock);
201
202	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
203		mmc_signal_sdio_irq(host->mmc);
204
205	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
206		cmd->error = -ETIMEDOUT;
207	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
208		cmd->error = -EIO;
209
210	if (data) {
211		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
212			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
213			data->error = -ETIMEDOUT;
214		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
215			data->error = -EILSEQ;
216		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
217				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
218			data->error = -EIO;
219	}
220
221	return IRQ_HANDLED;
222}
223
224static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
225	struct mxs_mmc_host *host, unsigned long flags)
226{
227	struct mxs_ssp *ssp = &host->ssp;
228	struct dma_async_tx_descriptor *desc;
229	struct mmc_data *data = host->data;
230	struct scatterlist * sgl;
231	unsigned int sg_len;
232
233	if (data) {
234		/* data */
235		dma_map_sg(mmc_dev(host->mmc), data->sg,
236			   data->sg_len, ssp->dma_dir);
237		sgl = data->sg;
238		sg_len = data->sg_len;
239	} else {
240		/* pio */
241		sgl = (struct scatterlist *) ssp->ssp_pio_words;
242		sg_len = SSP_PIO_NUM;
243	}
244
245	desc = dmaengine_prep_slave_sg(ssp->dmach,
246				sgl, sg_len, ssp->slave_dirn, flags);
247	if (desc) {
248		desc->callback = mxs_mmc_dma_irq_callback;
249		desc->callback_param = host;
250	} else {
251		if (data)
252			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
253				     data->sg_len, ssp->dma_dir);
254	}
255
256	return desc;
257}
258
259static void mxs_mmc_bc(struct mxs_mmc_host *host)
260{
261	struct mxs_ssp *ssp = &host->ssp;
262	struct mmc_command *cmd = host->cmd;
263	struct dma_async_tx_descriptor *desc;
264	u32 ctrl0, cmd0, cmd1;
265
266	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
267	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
268	cmd1 = cmd->arg;
269
270	if (host->sdio_irq_en) {
271		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
272		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
273	}
274
275	ssp->ssp_pio_words[0] = ctrl0;
276	ssp->ssp_pio_words[1] = cmd0;
277	ssp->ssp_pio_words[2] = cmd1;
278	ssp->dma_dir = DMA_NONE;
279	ssp->slave_dirn = DMA_TRANS_NONE;
280	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
281	if (!desc)
282		goto out;
283
284	dmaengine_submit(desc);
285	dma_async_issue_pending(ssp->dmach);
286	return;
287
288out:
289	dev_warn(mmc_dev(host->mmc),
290		 "%s: failed to prep dma\n", __func__);
291}
292
293static void mxs_mmc_ac(struct mxs_mmc_host *host)
294{
295	struct mxs_ssp *ssp = &host->ssp;
296	struct mmc_command *cmd = host->cmd;
297	struct dma_async_tx_descriptor *desc;
298	u32 ignore_crc, get_resp, long_resp;
299	u32 ctrl0, cmd0, cmd1;
300
301	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
302			0 : BM_SSP_CTRL0_IGNORE_CRC;
303	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
304			BM_SSP_CTRL0_GET_RESP : 0;
305	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
306			BM_SSP_CTRL0_LONG_RESP : 0;
307
308	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
309	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
310	cmd1 = cmd->arg;
311
 
 
 
312	if (host->sdio_irq_en) {
313		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
314		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
315	}
316
317	ssp->ssp_pio_words[0] = ctrl0;
318	ssp->ssp_pio_words[1] = cmd0;
319	ssp->ssp_pio_words[2] = cmd1;
320	ssp->dma_dir = DMA_NONE;
321	ssp->slave_dirn = DMA_TRANS_NONE;
322	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
323	if (!desc)
324		goto out;
325
326	dmaengine_submit(desc);
327	dma_async_issue_pending(ssp->dmach);
328	return;
329
330out:
331	dev_warn(mmc_dev(host->mmc),
332		 "%s: failed to prep dma\n", __func__);
333}
334
335static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
336{
337	const unsigned int ssp_timeout_mul = 4096;
338	/*
339	 * Calculate ticks in ms since ns are large numbers
340	 * and might overflow
341	 */
342	const unsigned int clock_per_ms = clock_rate / 1000;
343	const unsigned int ms = ns / 1000;
344	const unsigned int ticks = ms * clock_per_ms;
345	const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
346
347	WARN_ON(ssp_ticks == 0);
348	return ssp_ticks;
349}
350
351static void mxs_mmc_adtc(struct mxs_mmc_host *host)
352{
353	struct mmc_command *cmd = host->cmd;
354	struct mmc_data *data = cmd->data;
355	struct dma_async_tx_descriptor *desc;
356	struct scatterlist *sgl = data->sg, *sg;
357	unsigned int sg_len = data->sg_len;
358	unsigned int i;
359
360	unsigned short dma_data_dir, timeout;
361	enum dma_transfer_direction slave_dirn;
362	unsigned int data_size = 0, log2_blksz;
363	unsigned int blocks = data->blocks;
364
365	struct mxs_ssp *ssp = &host->ssp;
366
367	u32 ignore_crc, get_resp, long_resp, read;
368	u32 ctrl0, cmd0, cmd1, val;
369
370	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
371			0 : BM_SSP_CTRL0_IGNORE_CRC;
372	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
373			BM_SSP_CTRL0_GET_RESP : 0;
374	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
375			BM_SSP_CTRL0_LONG_RESP : 0;
376
377	if (data->flags & MMC_DATA_WRITE) {
378		dma_data_dir = DMA_TO_DEVICE;
379		slave_dirn = DMA_MEM_TO_DEV;
380		read = 0;
381	} else {
382		dma_data_dir = DMA_FROM_DEVICE;
383		slave_dirn = DMA_DEV_TO_MEM;
384		read = BM_SSP_CTRL0_READ;
385	}
386
387	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
388		ignore_crc | get_resp | long_resp |
389		BM_SSP_CTRL0_DATA_XFER | read |
390		BM_SSP_CTRL0_WAIT_FOR_IRQ |
391		BM_SSP_CTRL0_ENABLE;
392
393	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
394
395	/* get logarithm to base 2 of block size for setting register */
396	log2_blksz = ilog2(data->blksz);
397
398	/*
399	 * take special care of the case that data size from data->sg
400	 * is not equal to blocks x blksz
401	 */
402	for_each_sg(sgl, sg, sg_len, i)
403		data_size += sg->length;
404
405	if (data_size != data->blocks * data->blksz)
406		blocks = 1;
407
408	/* xfer count, block size and count need to be set differently */
409	if (ssp_is_old(ssp)) {
410		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
411		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
412			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
413	} else {
414		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
415		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
416		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
417		       ssp->base + HW_SSP_BLOCK_SIZE);
418	}
419
420	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
421	    (cmd->opcode == SD_IO_RW_EXTENDED))
422		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
423
424	cmd1 = cmd->arg;
425
426	if (host->sdio_irq_en) {
427		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
428		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
429	}
430
431	/* set the timeout count */
432	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
433	val = readl(ssp->base + HW_SSP_TIMING(ssp));
434	val &= ~(BM_SSP_TIMING_TIMEOUT);
435	val |= BF_SSP(timeout, TIMING_TIMEOUT);
436	writel(val, ssp->base + HW_SSP_TIMING(ssp));
437
438	/* pio */
439	ssp->ssp_pio_words[0] = ctrl0;
440	ssp->ssp_pio_words[1] = cmd0;
441	ssp->ssp_pio_words[2] = cmd1;
442	ssp->dma_dir = DMA_NONE;
443	ssp->slave_dirn = DMA_TRANS_NONE;
444	desc = mxs_mmc_prep_dma(host, 0);
445	if (!desc)
446		goto out;
447
448	/* append data sg */
449	WARN_ON(host->data != NULL);
450	host->data = data;
451	ssp->dma_dir = dma_data_dir;
452	ssp->slave_dirn = slave_dirn;
453	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
454	if (!desc)
455		goto out;
456
457	dmaengine_submit(desc);
458	dma_async_issue_pending(ssp->dmach);
459	return;
460out:
461	dev_warn(mmc_dev(host->mmc),
462		 "%s: failed to prep dma\n", __func__);
463}
464
465static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
466			      struct mmc_command *cmd)
467{
468	host->cmd = cmd;
469
470	switch (mmc_cmd_type(cmd)) {
471	case MMC_CMD_BC:
472		mxs_mmc_bc(host);
473		break;
474	case MMC_CMD_BCR:
475		mxs_mmc_ac(host);
476		break;
477	case MMC_CMD_AC:
478		mxs_mmc_ac(host);
479		break;
480	case MMC_CMD_ADTC:
481		mxs_mmc_adtc(host);
482		break;
483	default:
484		dev_warn(mmc_dev(host->mmc),
485			 "%s: unknown MMC command\n", __func__);
486		break;
487	}
488}
489
490static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
491{
492	struct mxs_mmc_host *host = mmc_priv(mmc);
493
494	WARN_ON(host->mrq != NULL);
495	host->mrq = mrq;
496	mxs_mmc_start_cmd(host, mrq->cmd);
497}
498
499static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
500{
501	struct mxs_mmc_host *host = mmc_priv(mmc);
502
503	if (ios->bus_width == MMC_BUS_WIDTH_8)
504		host->bus_width = 2;
505	else if (ios->bus_width == MMC_BUS_WIDTH_4)
506		host->bus_width = 1;
507	else
508		host->bus_width = 0;
509
510	if (ios->clock)
511		mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
512}
513
514static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
515{
516	struct mxs_mmc_host *host = mmc_priv(mmc);
517	struct mxs_ssp *ssp = &host->ssp;
518	unsigned long flags;
519
520	spin_lock_irqsave(&host->lock, flags);
521
522	host->sdio_irq_en = enable;
523
524	if (enable) {
525		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
526		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
527		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
528		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
529	} else {
530		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
531		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
532		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
533		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
534	}
535
536	spin_unlock_irqrestore(&host->lock, flags);
537
538	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
539			BM_SSP_STATUS_SDIO_IRQ)
540		mmc_signal_sdio_irq(host->mmc);
541
542}
543
544static const struct mmc_host_ops mxs_mmc_ops = {
545	.request = mxs_mmc_request,
546	.get_ro = mmc_gpio_get_ro,
547	.get_cd = mxs_mmc_get_cd,
548	.set_ios = mxs_mmc_set_ios,
549	.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
550};
551
552static const struct platform_device_id mxs_ssp_ids[] = {
553	{
554		.name = "imx23-mmc",
555		.driver_data = IMX23_SSP,
556	}, {
557		.name = "imx28-mmc",
558		.driver_data = IMX28_SSP,
559	}, {
560		/* sentinel */
561	}
562};
563MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
564
565static const struct of_device_id mxs_mmc_dt_ids[] = {
566	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
567	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
568	{ /* sentinel */ }
569};
570MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
571
572static int mxs_mmc_probe(struct platform_device *pdev)
573{
574	const struct of_device_id *of_id =
575			of_match_device(mxs_mmc_dt_ids, &pdev->dev);
576	struct device_node *np = pdev->dev.of_node;
577	struct mxs_mmc_host *host;
578	struct mmc_host *mmc;
579	struct resource *iores;
580	int ret = 0, irq_err;
581	struct regulator *reg_vmmc;
582	struct mxs_ssp *ssp;
583
584	irq_err = platform_get_irq(pdev, 0);
585	if (irq_err < 0)
586		return irq_err;
587
588	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
589	if (!mmc)
590		return -ENOMEM;
591
592	host = mmc_priv(mmc);
593	ssp = &host->ssp;
594	ssp->dev = &pdev->dev;
595	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
596	ssp->base = devm_ioremap_resource(&pdev->dev, iores);
597	if (IS_ERR(ssp->base)) {
598		ret = PTR_ERR(ssp->base);
599		goto out_mmc_free;
600	}
601
602	ssp->devid = (enum mxs_ssp_id) of_id->data;
603
604	host->mmc = mmc;
605	host->sdio_irq_en = 0;
606
607	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
608	if (!IS_ERR(reg_vmmc)) {
609		ret = regulator_enable(reg_vmmc);
610		if (ret) {
611			dev_err(&pdev->dev,
612				"Failed to enable vmmc regulator: %d\n", ret);
613			goto out_mmc_free;
614		}
615	}
616
617	ssp->clk = devm_clk_get(&pdev->dev, NULL);
618	if (IS_ERR(ssp->clk)) {
619		ret = PTR_ERR(ssp->clk);
620		goto out_mmc_free;
621	}
622	ret = clk_prepare_enable(ssp->clk);
623	if (ret)
624		goto out_mmc_free;
625
626	ret = mxs_mmc_reset(host);
627	if (ret) {
628		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
629		goto out_clk_disable;
630	}
631
632	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
633	if (!ssp->dmach) {
634		dev_err(mmc_dev(host->mmc),
635			"%s: failed to request dma\n", __func__);
636		ret = -ENODEV;
637		goto out_clk_disable;
638	}
639
640	/* set mmc core parameters */
641	mmc->ops = &mxs_mmc_ops;
642	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
643		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
644
645	host->broken_cd = of_property_read_bool(np, "broken-cd");
646
647	mmc->f_min = 400000;
648	mmc->f_max = 288000000;
649
650	ret = mmc_of_parse(mmc);
651	if (ret)
652		goto out_clk_disable;
653
654	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
655
656	mmc->max_segs = 52;
657	mmc->max_blk_size = 1 << 0xf;
658	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
659	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
660	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
661
662	platform_set_drvdata(pdev, mmc);
663
 
 
664	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
665			       dev_name(&pdev->dev), host);
666	if (ret)
667		goto out_free_dma;
668
669	spin_lock_init(&host->lock);
670
671	ret = mmc_add_host(mmc);
672	if (ret)
673		goto out_free_dma;
674
675	dev_info(mmc_dev(host->mmc), "initialized\n");
676
677	return 0;
678
679out_free_dma:
680	dma_release_channel(ssp->dmach);
681out_clk_disable:
682	clk_disable_unprepare(ssp->clk);
683out_mmc_free:
684	mmc_free_host(mmc);
685	return ret;
686}
687
688static int mxs_mmc_remove(struct platform_device *pdev)
689{
690	struct mmc_host *mmc = platform_get_drvdata(pdev);
691	struct mxs_mmc_host *host = mmc_priv(mmc);
692	struct mxs_ssp *ssp = &host->ssp;
693
694	mmc_remove_host(mmc);
695
696	if (ssp->dmach)
697		dma_release_channel(ssp->dmach);
698
699	clk_disable_unprepare(ssp->clk);
700
701	mmc_free_host(mmc);
702
703	return 0;
704}
705
706#ifdef CONFIG_PM_SLEEP
707static int mxs_mmc_suspend(struct device *dev)
708{
709	struct mmc_host *mmc = dev_get_drvdata(dev);
710	struct mxs_mmc_host *host = mmc_priv(mmc);
711	struct mxs_ssp *ssp = &host->ssp;
712
713	clk_disable_unprepare(ssp->clk);
714	return 0;
715}
716
717static int mxs_mmc_resume(struct device *dev)
718{
719	struct mmc_host *mmc = dev_get_drvdata(dev);
720	struct mxs_mmc_host *host = mmc_priv(mmc);
721	struct mxs_ssp *ssp = &host->ssp;
722
723	return clk_prepare_enable(ssp->clk);
724}
725#endif
726
727static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
728
729static struct platform_driver mxs_mmc_driver = {
730	.probe		= mxs_mmc_probe,
731	.remove		= mxs_mmc_remove,
732	.id_table	= mxs_ssp_ids,
733	.driver		= {
734		.name	= DRIVER_NAME,
735		.pm	= &mxs_mmc_pm_ops,
736		.of_match_table = mxs_mmc_dt_ids,
737	},
738};
739
740module_platform_driver(mxs_mmc_driver);
741
742MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
743MODULE_AUTHOR("Freescale Semiconductor");
744MODULE_LICENSE("GPL");
745MODULE_ALIAS("platform:" DRIVER_NAME);
v4.10.11
  1/*
  2 * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
  3 * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
  4 *
  5 * Copyright 2008 Embedded Alley Solutions, Inc.
  6 * Copyright 2009-2011 Freescale Semiconductor, Inc.
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 *
 13 * This program is distributed in the hope that it will be useful,
 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 * GNU General Public License for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License along
 19 * with this program; if not, write to the Free Software Foundation, Inc.,
 20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 21 */
 22
 23#include <linux/kernel.h>
 24#include <linux/init.h>
 25#include <linux/ioport.h>
 26#include <linux/of.h>
 27#include <linux/of_device.h>
 28#include <linux/of_gpio.h>
 29#include <linux/platform_device.h>
 30#include <linux/delay.h>
 31#include <linux/interrupt.h>
 32#include <linux/dma-mapping.h>
 33#include <linux/dmaengine.h>
 34#include <linux/highmem.h>
 35#include <linux/clk.h>
 36#include <linux/err.h>
 37#include <linux/completion.h>
 38#include <linux/mmc/host.h>
 39#include <linux/mmc/mmc.h>
 40#include <linux/mmc/sdio.h>
 41#include <linux/mmc/slot-gpio.h>
 42#include <linux/gpio.h>
 43#include <linux/regulator/consumer.h>
 44#include <linux/module.h>
 45#include <linux/stmp_device.h>
 46#include <linux/spi/mxs-spi.h>
 47
 48#define DRIVER_NAME	"mxs-mmc"
 49
 50#define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \
 51				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \
 52				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \
 53				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \
 54				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \
 55				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \
 56				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \
 57				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
 58
 59/* card detect polling timeout */
 60#define MXS_MMC_DETECT_TIMEOUT			(HZ/2)
 61
 62struct mxs_mmc_host {
 63	struct mxs_ssp			ssp;
 64
 65	struct mmc_host			*mmc;
 66	struct mmc_request		*mrq;
 67	struct mmc_command		*cmd;
 68	struct mmc_data			*data;
 69
 70	unsigned char			bus_width;
 71	spinlock_t			lock;
 72	int				sdio_irq_en;
 73	bool				broken_cd;
 74};
 75
 76static int mxs_mmc_get_cd(struct mmc_host *mmc)
 77{
 78	struct mxs_mmc_host *host = mmc_priv(mmc);
 79	struct mxs_ssp *ssp = &host->ssp;
 80	int present, ret;
 81
 82	if (host->broken_cd)
 83		return -ENOSYS;
 84
 85	ret = mmc_gpio_get_cd(mmc);
 86	if (ret >= 0)
 87		return ret;
 88
 89	present = mmc->caps & MMC_CAP_NEEDS_POLL ||
 90		!(readl(ssp->base + HW_SSP_STATUS(ssp)) &
 91			BM_SSP_STATUS_CARD_DETECT);
 92
 93	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
 94		present = !present;
 95
 96	return present;
 97}
 98
 99static int mxs_mmc_reset(struct mxs_mmc_host *host)
100{
101	struct mxs_ssp *ssp = &host->ssp;
102	u32 ctrl0, ctrl1;
103	int ret;
104
105	ret = stmp_reset_block(ssp->base);
106	if (ret)
107		return ret;
108
109	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
110	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
111		BF_SSP(0x7, CTRL1_WORD_LENGTH) |
112		BM_SSP_CTRL1_DMA_ENABLE |
113		BM_SSP_CTRL1_POLARITY |
114		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
115		BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
116		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
117		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
118		BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
119
120	writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
121	       BF_SSP(2, TIMING_CLOCK_DIVIDE) |
122	       BF_SSP(0, TIMING_CLOCK_RATE),
123	       ssp->base + HW_SSP_TIMING(ssp));
124
125	if (host->sdio_irq_en) {
126		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
127		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
128	}
129
130	writel(ctrl0, ssp->base + HW_SSP_CTRL0);
131	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
132	return 0;
133}
134
135static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
136			      struct mmc_command *cmd);
137
138static void mxs_mmc_request_done(struct mxs_mmc_host *host)
139{
140	struct mmc_command *cmd = host->cmd;
141	struct mmc_data *data = host->data;
142	struct mmc_request *mrq = host->mrq;
143	struct mxs_ssp *ssp = &host->ssp;
144
145	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
146		if (mmc_resp_type(cmd) & MMC_RSP_136) {
147			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
148			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
149			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
150			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
151		} else {
152			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
153		}
154	}
155
156	if (data) {
157		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
158			     data->sg_len, ssp->dma_dir);
159		/*
160		 * If there was an error on any block, we mark all
161		 * data blocks as being in error.
162		 */
163		if (!data->error)
164			data->bytes_xfered = data->blocks * data->blksz;
165		else
166			data->bytes_xfered = 0;
167
168		host->data = NULL;
169		if (mrq->stop) {
170			mxs_mmc_start_cmd(host, mrq->stop);
171			return;
172		}
173	}
174
175	host->mrq = NULL;
176	mmc_request_done(host->mmc, mrq);
177}
178
179static void mxs_mmc_dma_irq_callback(void *param)
180{
181	struct mxs_mmc_host *host = param;
182
183	mxs_mmc_request_done(host);
184}
185
186static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
187{
188	struct mxs_mmc_host *host = dev_id;
189	struct mmc_command *cmd = host->cmd;
190	struct mmc_data *data = host->data;
191	struct mxs_ssp *ssp = &host->ssp;
192	u32 stat;
193
194	spin_lock(&host->lock);
195
196	stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
197	writel(stat & MXS_MMC_IRQ_BITS,
198	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
199
200	spin_unlock(&host->lock);
201
202	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
203		mmc_signal_sdio_irq(host->mmc);
204
205	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
206		cmd->error = -ETIMEDOUT;
207	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
208		cmd->error = -EIO;
209
210	if (data) {
211		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
212			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
213			data->error = -ETIMEDOUT;
214		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
215			data->error = -EILSEQ;
216		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
217				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
218			data->error = -EIO;
219	}
220
221	return IRQ_HANDLED;
222}
223
224static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
225	struct mxs_mmc_host *host, unsigned long flags)
226{
227	struct mxs_ssp *ssp = &host->ssp;
228	struct dma_async_tx_descriptor *desc;
229	struct mmc_data *data = host->data;
230	struct scatterlist * sgl;
231	unsigned int sg_len;
232
233	if (data) {
234		/* data */
235		dma_map_sg(mmc_dev(host->mmc), data->sg,
236			   data->sg_len, ssp->dma_dir);
237		sgl = data->sg;
238		sg_len = data->sg_len;
239	} else {
240		/* pio */
241		sgl = (struct scatterlist *) ssp->ssp_pio_words;
242		sg_len = SSP_PIO_NUM;
243	}
244
245	desc = dmaengine_prep_slave_sg(ssp->dmach,
246				sgl, sg_len, ssp->slave_dirn, flags);
247	if (desc) {
248		desc->callback = mxs_mmc_dma_irq_callback;
249		desc->callback_param = host;
250	} else {
251		if (data)
252			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
253				     data->sg_len, ssp->dma_dir);
254	}
255
256	return desc;
257}
258
259static void mxs_mmc_bc(struct mxs_mmc_host *host)
260{
261	struct mxs_ssp *ssp = &host->ssp;
262	struct mmc_command *cmd = host->cmd;
263	struct dma_async_tx_descriptor *desc;
264	u32 ctrl0, cmd0, cmd1;
265
266	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
267	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
268	cmd1 = cmd->arg;
269
270	if (host->sdio_irq_en) {
271		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
272		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
273	}
274
275	ssp->ssp_pio_words[0] = ctrl0;
276	ssp->ssp_pio_words[1] = cmd0;
277	ssp->ssp_pio_words[2] = cmd1;
278	ssp->dma_dir = DMA_NONE;
279	ssp->slave_dirn = DMA_TRANS_NONE;
280	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
281	if (!desc)
282		goto out;
283
284	dmaengine_submit(desc);
285	dma_async_issue_pending(ssp->dmach);
286	return;
287
288out:
289	dev_warn(mmc_dev(host->mmc),
290		 "%s: failed to prep dma\n", __func__);
291}
292
293static void mxs_mmc_ac(struct mxs_mmc_host *host)
294{
295	struct mxs_ssp *ssp = &host->ssp;
296	struct mmc_command *cmd = host->cmd;
297	struct dma_async_tx_descriptor *desc;
298	u32 ignore_crc, get_resp, long_resp;
299	u32 ctrl0, cmd0, cmd1;
300
301	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
302			0 : BM_SSP_CTRL0_IGNORE_CRC;
303	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
304			BM_SSP_CTRL0_GET_RESP : 0;
305	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
306			BM_SSP_CTRL0_LONG_RESP : 0;
307
308	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
309	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
310	cmd1 = cmd->arg;
311
312	if (cmd->opcode == MMC_STOP_TRANSMISSION)
313		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
314
315	if (host->sdio_irq_en) {
316		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
317		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
318	}
319
320	ssp->ssp_pio_words[0] = ctrl0;
321	ssp->ssp_pio_words[1] = cmd0;
322	ssp->ssp_pio_words[2] = cmd1;
323	ssp->dma_dir = DMA_NONE;
324	ssp->slave_dirn = DMA_TRANS_NONE;
325	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
326	if (!desc)
327		goto out;
328
329	dmaengine_submit(desc);
330	dma_async_issue_pending(ssp->dmach);
331	return;
332
333out:
334	dev_warn(mmc_dev(host->mmc),
335		 "%s: failed to prep dma\n", __func__);
336}
337
338static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
339{
340	const unsigned int ssp_timeout_mul = 4096;
341	/*
342	 * Calculate ticks in ms since ns are large numbers
343	 * and might overflow
344	 */
345	const unsigned int clock_per_ms = clock_rate / 1000;
346	const unsigned int ms = ns / 1000;
347	const unsigned int ticks = ms * clock_per_ms;
348	const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
349
350	WARN_ON(ssp_ticks == 0);
351	return ssp_ticks;
352}
353
354static void mxs_mmc_adtc(struct mxs_mmc_host *host)
355{
356	struct mmc_command *cmd = host->cmd;
357	struct mmc_data *data = cmd->data;
358	struct dma_async_tx_descriptor *desc;
359	struct scatterlist *sgl = data->sg, *sg;
360	unsigned int sg_len = data->sg_len;
361	unsigned int i;
362
363	unsigned short dma_data_dir, timeout;
364	enum dma_transfer_direction slave_dirn;
365	unsigned int data_size = 0, log2_blksz;
366	unsigned int blocks = data->blocks;
367
368	struct mxs_ssp *ssp = &host->ssp;
369
370	u32 ignore_crc, get_resp, long_resp, read;
371	u32 ctrl0, cmd0, cmd1, val;
372
373	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
374			0 : BM_SSP_CTRL0_IGNORE_CRC;
375	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
376			BM_SSP_CTRL0_GET_RESP : 0;
377	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
378			BM_SSP_CTRL0_LONG_RESP : 0;
379
380	if (data->flags & MMC_DATA_WRITE) {
381		dma_data_dir = DMA_TO_DEVICE;
382		slave_dirn = DMA_MEM_TO_DEV;
383		read = 0;
384	} else {
385		dma_data_dir = DMA_FROM_DEVICE;
386		slave_dirn = DMA_DEV_TO_MEM;
387		read = BM_SSP_CTRL0_READ;
388	}
389
390	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
391		ignore_crc | get_resp | long_resp |
392		BM_SSP_CTRL0_DATA_XFER | read |
393		BM_SSP_CTRL0_WAIT_FOR_IRQ |
394		BM_SSP_CTRL0_ENABLE;
395
396	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
397
398	/* get logarithm to base 2 of block size for setting register */
399	log2_blksz = ilog2(data->blksz);
400
401	/*
402	 * take special care of the case that data size from data->sg
403	 * is not equal to blocks x blksz
404	 */
405	for_each_sg(sgl, sg, sg_len, i)
406		data_size += sg->length;
407
408	if (data_size != data->blocks * data->blksz)
409		blocks = 1;
410
411	/* xfer count, block size and count need to be set differently */
412	if (ssp_is_old(ssp)) {
413		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
414		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
415			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
416	} else {
417		writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
418		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
419		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
420		       ssp->base + HW_SSP_BLOCK_SIZE);
421	}
422
423	if (cmd->opcode == SD_IO_RW_EXTENDED)
 
424		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
425
426	cmd1 = cmd->arg;
427
428	if (host->sdio_irq_en) {
429		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
430		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
431	}
432
433	/* set the timeout count */
434	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
435	val = readl(ssp->base + HW_SSP_TIMING(ssp));
436	val &= ~(BM_SSP_TIMING_TIMEOUT);
437	val |= BF_SSP(timeout, TIMING_TIMEOUT);
438	writel(val, ssp->base + HW_SSP_TIMING(ssp));
439
440	/* pio */
441	ssp->ssp_pio_words[0] = ctrl0;
442	ssp->ssp_pio_words[1] = cmd0;
443	ssp->ssp_pio_words[2] = cmd1;
444	ssp->dma_dir = DMA_NONE;
445	ssp->slave_dirn = DMA_TRANS_NONE;
446	desc = mxs_mmc_prep_dma(host, 0);
447	if (!desc)
448		goto out;
449
450	/* append data sg */
451	WARN_ON(host->data != NULL);
452	host->data = data;
453	ssp->dma_dir = dma_data_dir;
454	ssp->slave_dirn = slave_dirn;
455	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
456	if (!desc)
457		goto out;
458
459	dmaengine_submit(desc);
460	dma_async_issue_pending(ssp->dmach);
461	return;
462out:
463	dev_warn(mmc_dev(host->mmc),
464		 "%s: failed to prep dma\n", __func__);
465}
466
467static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
468			      struct mmc_command *cmd)
469{
470	host->cmd = cmd;
471
472	switch (mmc_cmd_type(cmd)) {
473	case MMC_CMD_BC:
474		mxs_mmc_bc(host);
475		break;
476	case MMC_CMD_BCR:
477		mxs_mmc_ac(host);
478		break;
479	case MMC_CMD_AC:
480		mxs_mmc_ac(host);
481		break;
482	case MMC_CMD_ADTC:
483		mxs_mmc_adtc(host);
484		break;
485	default:
486		dev_warn(mmc_dev(host->mmc),
487			 "%s: unknown MMC command\n", __func__);
488		break;
489	}
490}
491
492static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
493{
494	struct mxs_mmc_host *host = mmc_priv(mmc);
495
496	WARN_ON(host->mrq != NULL);
497	host->mrq = mrq;
498	mxs_mmc_start_cmd(host, mrq->cmd);
499}
500
501static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
502{
503	struct mxs_mmc_host *host = mmc_priv(mmc);
504
505	if (ios->bus_width == MMC_BUS_WIDTH_8)
506		host->bus_width = 2;
507	else if (ios->bus_width == MMC_BUS_WIDTH_4)
508		host->bus_width = 1;
509	else
510		host->bus_width = 0;
511
512	if (ios->clock)
513		mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
514}
515
516static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
517{
518	struct mxs_mmc_host *host = mmc_priv(mmc);
519	struct mxs_ssp *ssp = &host->ssp;
520	unsigned long flags;
521
522	spin_lock_irqsave(&host->lock, flags);
523
524	host->sdio_irq_en = enable;
525
526	if (enable) {
527		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
528		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
529		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
530		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
531	} else {
532		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
533		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
534		writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
535		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
536	}
537
538	spin_unlock_irqrestore(&host->lock, flags);
539
540	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
541			BM_SSP_STATUS_SDIO_IRQ)
542		mmc_signal_sdio_irq(host->mmc);
543
544}
545
546static const struct mmc_host_ops mxs_mmc_ops = {
547	.request = mxs_mmc_request,
548	.get_ro = mmc_gpio_get_ro,
549	.get_cd = mxs_mmc_get_cd,
550	.set_ios = mxs_mmc_set_ios,
551	.enable_sdio_irq = mxs_mmc_enable_sdio_irq,
552};
553
554static const struct platform_device_id mxs_ssp_ids[] = {
555	{
556		.name = "imx23-mmc",
557		.driver_data = IMX23_SSP,
558	}, {
559		.name = "imx28-mmc",
560		.driver_data = IMX28_SSP,
561	}, {
562		/* sentinel */
563	}
564};
565MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
566
567static const struct of_device_id mxs_mmc_dt_ids[] = {
568	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
569	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
570	{ /* sentinel */ }
571};
572MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
573
574static int mxs_mmc_probe(struct platform_device *pdev)
575{
576	const struct of_device_id *of_id =
577			of_match_device(mxs_mmc_dt_ids, &pdev->dev);
578	struct device_node *np = pdev->dev.of_node;
579	struct mxs_mmc_host *host;
580	struct mmc_host *mmc;
581	struct resource *iores;
582	int ret = 0, irq_err;
583	struct regulator *reg_vmmc;
584	struct mxs_ssp *ssp;
585
586	irq_err = platform_get_irq(pdev, 0);
587	if (irq_err < 0)
588		return irq_err;
589
590	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
591	if (!mmc)
592		return -ENOMEM;
593
594	host = mmc_priv(mmc);
595	ssp = &host->ssp;
596	ssp->dev = &pdev->dev;
597	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
598	ssp->base = devm_ioremap_resource(&pdev->dev, iores);
599	if (IS_ERR(ssp->base)) {
600		ret = PTR_ERR(ssp->base);
601		goto out_mmc_free;
602	}
603
604	ssp->devid = (enum mxs_ssp_id) of_id->data;
605
606	host->mmc = mmc;
607	host->sdio_irq_en = 0;
608
609	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
610	if (!IS_ERR(reg_vmmc)) {
611		ret = regulator_enable(reg_vmmc);
612		if (ret) {
613			dev_err(&pdev->dev,
614				"Failed to enable vmmc regulator: %d\n", ret);
615			goto out_mmc_free;
616		}
617	}
618
619	ssp->clk = devm_clk_get(&pdev->dev, NULL);
620	if (IS_ERR(ssp->clk)) {
621		ret = PTR_ERR(ssp->clk);
622		goto out_mmc_free;
623	}
624	ret = clk_prepare_enable(ssp->clk);
625	if (ret)
626		goto out_mmc_free;
627
628	ret = mxs_mmc_reset(host);
629	if (ret) {
630		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
631		goto out_clk_disable;
632	}
633
634	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
635	if (!ssp->dmach) {
636		dev_err(mmc_dev(host->mmc),
637			"%s: failed to request dma\n", __func__);
638		ret = -ENODEV;
639		goto out_clk_disable;
640	}
641
642	/* set mmc core parameters */
643	mmc->ops = &mxs_mmc_ops;
644	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
645		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
646
647	host->broken_cd = of_property_read_bool(np, "broken-cd");
648
649	mmc->f_min = 400000;
650	mmc->f_max = 288000000;
651
652	ret = mmc_of_parse(mmc);
653	if (ret)
654		goto out_clk_disable;
655
656	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
657
658	mmc->max_segs = 52;
659	mmc->max_blk_size = 1 << 0xf;
660	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
661	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
662	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
663
664	platform_set_drvdata(pdev, mmc);
665
666	spin_lock_init(&host->lock);
667
668	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
669			       dev_name(&pdev->dev), host);
670	if (ret)
671		goto out_free_dma;
 
 
672
673	ret = mmc_add_host(mmc);
674	if (ret)
675		goto out_free_dma;
676
677	dev_info(mmc_dev(host->mmc), "initialized\n");
678
679	return 0;
680
681out_free_dma:
682	dma_release_channel(ssp->dmach);
683out_clk_disable:
684	clk_disable_unprepare(ssp->clk);
685out_mmc_free:
686	mmc_free_host(mmc);
687	return ret;
688}
689
690static int mxs_mmc_remove(struct platform_device *pdev)
691{
692	struct mmc_host *mmc = platform_get_drvdata(pdev);
693	struct mxs_mmc_host *host = mmc_priv(mmc);
694	struct mxs_ssp *ssp = &host->ssp;
695
696	mmc_remove_host(mmc);
697
698	if (ssp->dmach)
699		dma_release_channel(ssp->dmach);
700
701	clk_disable_unprepare(ssp->clk);
702
703	mmc_free_host(mmc);
704
705	return 0;
706}
707
708#ifdef CONFIG_PM_SLEEP
709static int mxs_mmc_suspend(struct device *dev)
710{
711	struct mmc_host *mmc = dev_get_drvdata(dev);
712	struct mxs_mmc_host *host = mmc_priv(mmc);
713	struct mxs_ssp *ssp = &host->ssp;
714
715	clk_disable_unprepare(ssp->clk);
716	return 0;
717}
718
719static int mxs_mmc_resume(struct device *dev)
720{
721	struct mmc_host *mmc = dev_get_drvdata(dev);
722	struct mxs_mmc_host *host = mmc_priv(mmc);
723	struct mxs_ssp *ssp = &host->ssp;
724
725	return clk_prepare_enable(ssp->clk);
726}
727#endif
728
729static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
730
731static struct platform_driver mxs_mmc_driver = {
732	.probe		= mxs_mmc_probe,
733	.remove		= mxs_mmc_remove,
734	.id_table	= mxs_ssp_ids,
735	.driver		= {
736		.name	= DRIVER_NAME,
737		.pm	= &mxs_mmc_pm_ops,
738		.of_match_table = mxs_mmc_dt_ids,
739	},
740};
741
742module_platform_driver(mxs_mmc_driver);
743
744MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
745MODULE_AUTHOR("Freescale Semiconductor");
746MODULE_LICENSE("GPL");
747MODULE_ALIAS("platform:" DRIVER_NAME);