Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   3 *
   4 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   5 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   6 *  Unlike the hardware found on MX1, this hardware just works and does
   7 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   8 *
   9 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  10 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  11 *
  12 *  derived from pxamci.c by Russell King
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 *
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/ioport.h>
  23#include <linux/platform_device.h>
 
  24#include <linux/interrupt.h>
  25#include <linux/irq.h>
  26#include <linux/blkdev.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mmc/host.h>
  29#include <linux/mmc/card.h>
  30#include <linux/delay.h>
  31#include <linux/clk.h>
  32#include <linux/io.h>
  33#include <linux/gpio.h>
  34#include <linux/regulator/consumer.h>
  35#include <linux/dmaengine.h>
  36#include <linux/types.h>
  37#include <linux/of.h>
  38#include <linux/of_device.h>
  39#include <linux/of_dma.h>
  40#include <linux/of_gpio.h>
  41#include <linux/mmc/slot-gpio.h>
  42
  43#include <asm/dma.h>
  44#include <asm/irq.h>
  45#include <linux/platform_data/mmc-mxcmmc.h>
  46
  47#include <linux/platform_data/dma-imx.h>
  48
  49#define DRIVER_NAME "mxc-mmc"
  50#define MXCMCI_TIMEOUT_MS 10000
  51
  52#define MMC_REG_STR_STP_CLK		0x00
  53#define MMC_REG_STATUS			0x04
  54#define MMC_REG_CLK_RATE		0x08
  55#define MMC_REG_CMD_DAT_CONT		0x0C
  56#define MMC_REG_RES_TO			0x10
  57#define MMC_REG_READ_TO			0x14
  58#define MMC_REG_BLK_LEN			0x18
  59#define MMC_REG_NOB			0x1C
  60#define MMC_REG_REV_NO			0x20
  61#define MMC_REG_INT_CNTR		0x24
  62#define MMC_REG_CMD			0x28
  63#define MMC_REG_ARG			0x2C
  64#define MMC_REG_RES_FIFO		0x34
  65#define MMC_REG_BUFFER_ACCESS		0x38
  66
  67#define STR_STP_CLK_RESET               (1 << 3)
  68#define STR_STP_CLK_START_CLK           (1 << 1)
  69#define STR_STP_CLK_STOP_CLK            (1 << 0)
  70
  71#define STATUS_CARD_INSERTION		(1 << 31)
  72#define STATUS_CARD_REMOVAL		(1 << 30)
  73#define STATUS_YBUF_EMPTY		(1 << 29)
  74#define STATUS_XBUF_EMPTY		(1 << 28)
  75#define STATUS_YBUF_FULL		(1 << 27)
  76#define STATUS_XBUF_FULL		(1 << 26)
  77#define STATUS_BUF_UND_RUN		(1 << 25)
  78#define STATUS_BUF_OVFL			(1 << 24)
  79#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  80#define STATUS_END_CMD_RESP		(1 << 13)
  81#define STATUS_WRITE_OP_DONE		(1 << 12)
  82#define STATUS_DATA_TRANS_DONE		(1 << 11)
  83#define STATUS_READ_OP_DONE		(1 << 11)
  84#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  85#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  86#define STATUS_BUF_READ_RDY		(1 << 7)
  87#define STATUS_BUF_WRITE_RDY		(1 << 6)
  88#define STATUS_RESP_CRC_ERR		(1 << 5)
  89#define STATUS_CRC_READ_ERR		(1 << 3)
  90#define STATUS_CRC_WRITE_ERR		(1 << 2)
  91#define STATUS_TIME_OUT_RESP		(1 << 1)
  92#define STATUS_TIME_OUT_READ		(1 << 0)
  93#define STATUS_ERR_MASK			0x2f
  94
  95#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  96#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  97#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  98#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  99#define CMD_DAT_CONT_INIT		(1 << 7)
 100#define CMD_DAT_CONT_WRITE		(1 << 4)
 101#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
 102#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
 103#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
 104#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
 105
 106#define INT_SDIO_INT_WKP_EN		(1 << 18)
 107#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 108#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 109#define INT_CARD_INSERTION_EN		(1 << 15)
 110#define INT_CARD_REMOVAL_EN		(1 << 14)
 111#define INT_SDIO_IRQ_EN			(1 << 13)
 112#define INT_DAT0_EN			(1 << 12)
 113#define INT_BUF_READ_EN			(1 << 4)
 114#define INT_BUF_WRITE_EN		(1 << 3)
 115#define INT_END_CMD_RES_EN		(1 << 2)
 116#define INT_WRITE_OP_DONE_EN		(1 << 1)
 117#define INT_READ_OP_EN			(1 << 0)
 118
 119enum mxcmci_type {
 120	IMX21_MMC,
 121	IMX31_MMC,
 122	MPC512X_MMC,
 123};
 124
 125struct mxcmci_host {
 126	struct mmc_host		*mmc;
 127	void __iomem		*base;
 128	dma_addr_t		phys_base;
 129	int			detect_irq;
 130	struct dma_chan		*dma;
 131	struct dma_async_tx_descriptor *desc;
 132	int			do_dma;
 133	int			default_irq_mask;
 134	int			use_sdio;
 135	unsigned int		power_mode;
 136	struct imxmmc_platform_data *pdata;
 137
 138	struct mmc_request	*req;
 139	struct mmc_command	*cmd;
 140	struct mmc_data		*data;
 141
 142	unsigned int		datasize;
 143	unsigned int		dma_dir;
 144
 145	u16			rev_no;
 146	unsigned int		cmdat;
 147
 148	struct clk		*clk_ipg;
 149	struct clk		*clk_per;
 150
 151	int			clock;
 152
 153	struct work_struct	datawork;
 154	spinlock_t		lock;
 155
 156	int			burstlen;
 157	int			dmareq;
 158	struct dma_slave_config dma_slave_config;
 159	struct imx_dma_data	dma_data;
 160
 161	struct timer_list	watchdog;
 162	enum mxcmci_type	devtype;
 163};
 164
 165static const struct platform_device_id mxcmci_devtype[] = {
 166	{
 167		.name = "imx21-mmc",
 168		.driver_data = IMX21_MMC,
 169	}, {
 170		.name = "imx31-mmc",
 171		.driver_data = IMX31_MMC,
 172	}, {
 173		.name = "mpc512x-sdhc",
 174		.driver_data = MPC512X_MMC,
 175	}, {
 176		/* sentinel */
 177	}
 178};
 179MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
 180
 181static const struct of_device_id mxcmci_of_match[] = {
 182	{
 183		.compatible = "fsl,imx21-mmc",
 184		.data = &mxcmci_devtype[IMX21_MMC],
 185	}, {
 186		.compatible = "fsl,imx31-mmc",
 187		.data = &mxcmci_devtype[IMX31_MMC],
 188	}, {
 189		.compatible = "fsl,mpc5121-sdhc",
 190		.data = &mxcmci_devtype[MPC512X_MMC],
 191	}, {
 192		/* sentinel */
 193	}
 194};
 195MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 196
 197static inline int is_imx31_mmc(struct mxcmci_host *host)
 198{
 199	return host->devtype == IMX31_MMC;
 200}
 201
 202static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 203{
 204	return host->devtype == MPC512X_MMC;
 205}
 206
 207static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 208{
 209	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 210		return ioread32be(host->base + reg);
 211	else
 212		return readl(host->base + reg);
 213}
 214
 215static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 216{
 217	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 218		iowrite32be(val, host->base + reg);
 219	else
 220		writel(val, host->base + reg);
 221}
 222
 223static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 224{
 225	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 226		return ioread32be(host->base + reg);
 227	else
 228		return readw(host->base + reg);
 229}
 230
 231static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 232{
 233	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 234		iowrite32be(val, host->base + reg);
 235	else
 236		writew(val, host->base + reg);
 237}
 238
 239static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 240
 241static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
 242{
 243	if (!IS_ERR(host->mmc->supply.vmmc)) {
 244		if (host->power_mode == MMC_POWER_UP)
 245			mmc_regulator_set_ocr(host->mmc,
 246					      host->mmc->supply.vmmc, vdd);
 247		else if (host->power_mode == MMC_POWER_OFF)
 248			mmc_regulator_set_ocr(host->mmc,
 249					      host->mmc->supply.vmmc, 0);
 250	}
 251
 252	if (host->pdata && host->pdata->setpower)
 253		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 254}
 255
 256static inline int mxcmci_use_dma(struct mxcmci_host *host)
 257{
 258	return host->do_dma;
 259}
 260
 261static void mxcmci_softreset(struct mxcmci_host *host)
 262{
 263	int i;
 264
 265	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 266
 267	/* reset sequence */
 268	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 269	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 270			MMC_REG_STR_STP_CLK);
 271
 272	for (i = 0; i < 8; i++)
 273		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 274
 275	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 276}
 277
 278#if IS_ENABLED(CONFIG_PPC_MPC512x)
 279static inline void buffer_swap32(u32 *buf, int len)
 280{
 281	int i;
 282
 283	for (i = 0; i < ((len + 3) / 4); i++) {
 284		*buf = swab32(*buf);
 285		buf++;
 286	}
 287}
 288
 289static void mxcmci_swap_buffers(struct mmc_data *data)
 290{
 291	struct scatterlist *sg;
 292	int i;
 293
 294	for_each_sg(data->sg, sg, data->sg_len, i)
 295		buffer_swap32(sg_virt(sg), sg->length);
 296}
 297#else
 298static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 299#endif
 300
 301static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 302{
 303	unsigned int nob = data->blocks;
 304	unsigned int blksz = data->blksz;
 305	unsigned int datasize = nob * blksz;
 306	struct scatterlist *sg;
 307	enum dma_transfer_direction slave_dirn;
 308	int i, nents;
 309
 310	host->data = data;
 311	data->bytes_xfered = 0;
 312
 313	mxcmci_writew(host, nob, MMC_REG_NOB);
 314	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 315	host->datasize = datasize;
 316
 317	if (!mxcmci_use_dma(host))
 318		return 0;
 319
 320	for_each_sg(data->sg, sg, data->sg_len, i) {
 321		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 322			host->do_dma = 0;
 323			return 0;
 324		}
 325	}
 326
 327	if (data->flags & MMC_DATA_READ) {
 328		host->dma_dir = DMA_FROM_DEVICE;
 329		slave_dirn = DMA_DEV_TO_MEM;
 330	} else {
 331		host->dma_dir = DMA_TO_DEVICE;
 332		slave_dirn = DMA_MEM_TO_DEV;
 333
 334		mxcmci_swap_buffers(data);
 335	}
 336
 337	nents = dma_map_sg(host->dma->device->dev, data->sg,
 338				     data->sg_len,  host->dma_dir);
 339	if (nents != data->sg_len)
 340		return -EINVAL;
 341
 342	host->desc = dmaengine_prep_slave_sg(host->dma,
 343		data->sg, data->sg_len, slave_dirn,
 344		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 345
 346	if (!host->desc) {
 347		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 348				host->dma_dir);
 349		host->do_dma = 0;
 350		return 0; /* Fall back to PIO */
 351	}
 352	wmb();
 353
 354	dmaengine_submit(host->desc);
 355	dma_async_issue_pending(host->dma);
 356
 357	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 358
 359	return 0;
 360}
 361
 362static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 363static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 364
 365static void mxcmci_dma_callback(void *data)
 366{
 367	struct mxcmci_host *host = data;
 368	u32 stat;
 369
 370	del_timer(&host->watchdog);
 371
 372	stat = mxcmci_readl(host, MMC_REG_STATUS);
 373
 374	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 375
 376	mxcmci_data_done(host, stat);
 377}
 378
 379static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 380		unsigned int cmdat)
 381{
 382	u32 int_cntr = host->default_irq_mask;
 383	unsigned long flags;
 384
 385	WARN_ON(host->cmd != NULL);
 386	host->cmd = cmd;
 387
 388	switch (mmc_resp_type(cmd)) {
 389	case MMC_RSP_R1: /* short CRC, OPCODE */
 390	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 391		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 392		break;
 393	case MMC_RSP_R2: /* long 136 bit + CRC */
 394		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 395		break;
 396	case MMC_RSP_R3: /* short */
 397		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 398		break;
 399	case MMC_RSP_NONE:
 400		break;
 401	default:
 402		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 403				mmc_resp_type(cmd));
 404		cmd->error = -EINVAL;
 405		return -EINVAL;
 406	}
 407
 408	int_cntr = INT_END_CMD_RES_EN;
 409
 410	if (mxcmci_use_dma(host)) {
 411		if (host->dma_dir == DMA_FROM_DEVICE) {
 412			host->desc->callback = mxcmci_dma_callback;
 413			host->desc->callback_param = host;
 414		} else {
 415			int_cntr |= INT_WRITE_OP_DONE_EN;
 416		}
 417	}
 418
 419	spin_lock_irqsave(&host->lock, flags);
 420	if (host->use_sdio)
 421		int_cntr |= INT_SDIO_IRQ_EN;
 422	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 423	spin_unlock_irqrestore(&host->lock, flags);
 424
 425	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 426	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 427	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 428
 429	return 0;
 430}
 431
 432static void mxcmci_finish_request(struct mxcmci_host *host,
 433		struct mmc_request *req)
 434{
 435	u32 int_cntr = host->default_irq_mask;
 436	unsigned long flags;
 437
 438	spin_lock_irqsave(&host->lock, flags);
 439	if (host->use_sdio)
 440		int_cntr |= INT_SDIO_IRQ_EN;
 441	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 442	spin_unlock_irqrestore(&host->lock, flags);
 443
 444	host->req = NULL;
 445	host->cmd = NULL;
 446	host->data = NULL;
 447
 448	mmc_request_done(host->mmc, req);
 449}
 450
 451static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 452{
 453	struct mmc_data *data = host->data;
 454	int data_error;
 455
 456	if (mxcmci_use_dma(host)) {
 457		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 458				host->dma_dir);
 459		mxcmci_swap_buffers(data);
 460	}
 461
 462	if (stat & STATUS_ERR_MASK) {
 463		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 464				stat);
 465		if (stat & STATUS_CRC_READ_ERR) {
 466			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 467			data->error = -EILSEQ;
 468		} else if (stat & STATUS_CRC_WRITE_ERR) {
 469			u32 err_code = (stat >> 9) & 0x3;
 470			if (err_code == 2) { /* No CRC response */
 471				dev_err(mmc_dev(host->mmc),
 472					"%s: No CRC -ETIMEDOUT\n", __func__);
 473				data->error = -ETIMEDOUT;
 474			} else {
 475				dev_err(mmc_dev(host->mmc),
 476					"%s: -EILSEQ\n", __func__);
 477				data->error = -EILSEQ;
 478			}
 479		} else if (stat & STATUS_TIME_OUT_READ) {
 480			dev_err(mmc_dev(host->mmc),
 481				"%s: read -ETIMEDOUT\n", __func__);
 482			data->error = -ETIMEDOUT;
 483		} else {
 484			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 485			data->error = -EIO;
 486		}
 487	} else {
 488		data->bytes_xfered = host->datasize;
 489	}
 490
 491	data_error = data->error;
 492
 493	host->data = NULL;
 494
 495	return data_error;
 496}
 497
 498static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 499{
 500	struct mmc_command *cmd = host->cmd;
 501	int i;
 502	u32 a, b, c;
 503
 504	if (!cmd)
 505		return;
 506
 507	if (stat & STATUS_TIME_OUT_RESP) {
 508		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 509		cmd->error = -ETIMEDOUT;
 510	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 511		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 512		cmd->error = -EILSEQ;
 513	}
 514
 515	if (cmd->flags & MMC_RSP_PRESENT) {
 516		if (cmd->flags & MMC_RSP_136) {
 517			for (i = 0; i < 4; i++) {
 518				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 519				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 520				cmd->resp[i] = a << 16 | b;
 521			}
 522		} else {
 523			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 524			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 525			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 526			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 527		}
 528	}
 529}
 530
 531static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 532{
 533	u32 stat;
 534	unsigned long timeout = jiffies + HZ;
 535
 536	do {
 537		stat = mxcmci_readl(host, MMC_REG_STATUS);
 538		if (stat & STATUS_ERR_MASK)
 539			return stat;
 540		if (time_after(jiffies, timeout)) {
 541			mxcmci_softreset(host);
 542			mxcmci_set_clk_rate(host, host->clock);
 543			return STATUS_TIME_OUT_READ;
 544		}
 545		if (stat & mask)
 546			return 0;
 547		cpu_relax();
 548	} while (1);
 549}
 550
 551static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 552{
 553	unsigned int stat;
 554	u32 *buf = _buf;
 555
 556	while (bytes > 3) {
 557		stat = mxcmci_poll_status(host,
 558				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 559		if (stat)
 560			return stat;
 561		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 562		bytes -= 4;
 563	}
 564
 565	if (bytes) {
 566		u8 *b = (u8 *)buf;
 567		u32 tmp;
 568
 569		stat = mxcmci_poll_status(host,
 570				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 571		if (stat)
 572			return stat;
 573		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 574		memcpy(b, &tmp, bytes);
 575	}
 576
 577	return 0;
 578}
 579
 580static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 581{
 582	unsigned int stat;
 583	u32 *buf = _buf;
 584
 585	while (bytes > 3) {
 586		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 587		if (stat)
 588			return stat;
 589		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 590		bytes -= 4;
 591	}
 592
 593	if (bytes) {
 594		u8 *b = (u8 *)buf;
 595		u32 tmp;
 596
 597		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 598		if (stat)
 599			return stat;
 600
 601		memcpy(&tmp, b, bytes);
 602		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 603	}
 604
 605	return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 606}
 607
 608static int mxcmci_transfer_data(struct mxcmci_host *host)
 609{
 610	struct mmc_data *data = host->req->data;
 611	struct scatterlist *sg;
 612	int stat, i;
 613
 614	host->data = data;
 615	host->datasize = 0;
 616
 617	if (data->flags & MMC_DATA_READ) {
 618		for_each_sg(data->sg, sg, data->sg_len, i) {
 619			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 620			if (stat)
 621				return stat;
 622			host->datasize += sg->length;
 623		}
 624	} else {
 625		for_each_sg(data->sg, sg, data->sg_len, i) {
 626			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 627			if (stat)
 628				return stat;
 629			host->datasize += sg->length;
 630		}
 631		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 632		if (stat)
 633			return stat;
 634	}
 635	return 0;
 636}
 637
 638static void mxcmci_datawork(struct work_struct *work)
 639{
 640	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 641						  datawork);
 642	int datastat = mxcmci_transfer_data(host);
 643
 644	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 645		MMC_REG_STATUS);
 646	mxcmci_finish_data(host, datastat);
 647
 648	if (host->req->stop) {
 649		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 650			mxcmci_finish_request(host, host->req);
 651			return;
 652		}
 653	} else {
 654		mxcmci_finish_request(host, host->req);
 655	}
 656}
 657
 658static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 659{
 660	struct mmc_request *req;
 661	int data_error;
 662	unsigned long flags;
 663
 664	spin_lock_irqsave(&host->lock, flags);
 665
 666	if (!host->data) {
 667		spin_unlock_irqrestore(&host->lock, flags);
 668		return;
 669	}
 670
 671	if (!host->req) {
 672		spin_unlock_irqrestore(&host->lock, flags);
 673		return;
 674	}
 675
 676	req = host->req;
 677	if (!req->stop)
 678		host->req = NULL; /* we will handle finish req below */
 679
 680	data_error = mxcmci_finish_data(host, stat);
 681
 682	spin_unlock_irqrestore(&host->lock, flags);
 683
 684	if (data_error)
 685		return;
 686
 687	mxcmci_read_response(host, stat);
 688	host->cmd = NULL;
 689
 690	if (req->stop) {
 691		if (mxcmci_start_cmd(host, req->stop, 0)) {
 692			mxcmci_finish_request(host, req);
 693			return;
 694		}
 695	} else {
 696		mxcmci_finish_request(host, req);
 697	}
 698}
 699
 700static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 701{
 702	mxcmci_read_response(host, stat);
 703	host->cmd = NULL;
 704
 705	if (!host->data && host->req) {
 706		mxcmci_finish_request(host, host->req);
 707		return;
 708	}
 709
 710	/* For the DMA case the DMA engine handles the data transfer
 711	 * automatically. For non DMA we have to do it ourselves.
 712	 * Don't do it in interrupt context though.
 713	 */
 714	if (!mxcmci_use_dma(host) && host->data)
 715		schedule_work(&host->datawork);
 716
 717}
 718
 719static irqreturn_t mxcmci_irq(int irq, void *devid)
 720{
 721	struct mxcmci_host *host = devid;
 722	unsigned long flags;
 723	bool sdio_irq;
 724	u32 stat;
 725
 726	stat = mxcmci_readl(host, MMC_REG_STATUS);
 727	mxcmci_writel(host,
 728		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 729			 STATUS_WRITE_OP_DONE),
 730		MMC_REG_STATUS);
 731
 732	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 733
 734	spin_lock_irqsave(&host->lock, flags);
 735	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 736	spin_unlock_irqrestore(&host->lock, flags);
 737
 738	if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
 739		mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
 740
 741	if (sdio_irq) {
 742		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 743		mmc_signal_sdio_irq(host->mmc);
 744	}
 745
 746	if (stat & STATUS_END_CMD_RESP)
 747		mxcmci_cmd_done(host, stat);
 748
 749	if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
 750		del_timer(&host->watchdog);
 751		mxcmci_data_done(host, stat);
 752	}
 753
 754	if (host->default_irq_mask &&
 755		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 756		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 757
 758	return IRQ_HANDLED;
 759}
 760
 761static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 762{
 763	struct mxcmci_host *host = mmc_priv(mmc);
 764	unsigned int cmdat = host->cmdat;
 765	int error;
 766
 767	WARN_ON(host->req != NULL);
 768
 769	host->req = req;
 770	host->cmdat &= ~CMD_DAT_CONT_INIT;
 771
 772	if (host->dma)
 773		host->do_dma = 1;
 774
 775	if (req->data) {
 776		error = mxcmci_setup_data(host, req->data);
 777		if (error) {
 778			req->cmd->error = error;
 779			goto out;
 780		}
 781
 782
 783		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 784
 785		if (req->data->flags & MMC_DATA_WRITE)
 786			cmdat |= CMD_DAT_CONT_WRITE;
 787	}
 788
 789	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 790
 791out:
 792	if (error)
 793		mxcmci_finish_request(host, req);
 794}
 795
 796static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 797{
 798	unsigned int divider;
 799	int prescaler = 0;
 800	unsigned int clk_in = clk_get_rate(host->clk_per);
 801
 802	while (prescaler <= 0x800) {
 803		for (divider = 1; divider <= 0xF; divider++) {
 804			int x;
 805
 806			x = (clk_in / (divider + 1));
 807
 808			if (prescaler)
 809				x /= (prescaler * 2);
 810
 811			if (x <= clk_ios)
 812				break;
 813		}
 814		if (divider < 0x10)
 815			break;
 816
 817		if (prescaler == 0)
 818			prescaler = 1;
 819		else
 820			prescaler <<= 1;
 821	}
 822
 823	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 824
 825	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 826			prescaler, divider, clk_in, clk_ios);
 827}
 828
 829static int mxcmci_setup_dma(struct mmc_host *mmc)
 830{
 831	struct mxcmci_host *host = mmc_priv(mmc);
 832	struct dma_slave_config *config = &host->dma_slave_config;
 833
 834	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 835	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 836	config->dst_addr_width = 4;
 837	config->src_addr_width = 4;
 838	config->dst_maxburst = host->burstlen;
 839	config->src_maxburst = host->burstlen;
 840	config->device_fc = false;
 841
 842	return dmaengine_slave_config(host->dma, config);
 843}
 844
 845static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 846{
 847	struct mxcmci_host *host = mmc_priv(mmc);
 848	int burstlen, ret;
 849
 850	/*
 851	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 852	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 853	 */
 854	if (ios->bus_width == MMC_BUS_WIDTH_4)
 855		burstlen = 16;
 856	else
 857		burstlen = 4;
 858
 859	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 860		host->burstlen = burstlen;
 861		ret = mxcmci_setup_dma(mmc);
 862		if (ret) {
 863			dev_err(mmc_dev(host->mmc),
 864				"failed to config DMA channel. Falling back to PIO\n");
 865			dma_release_channel(host->dma);
 866			host->do_dma = 0;
 867			host->dma = NULL;
 868		}
 869	}
 870
 871	if (ios->bus_width == MMC_BUS_WIDTH_4)
 872		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 873	else
 874		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 875
 876	if (host->power_mode != ios->power_mode) {
 877		host->power_mode = ios->power_mode;
 878		mxcmci_set_power(host, ios->vdd);
 879
 880		if (ios->power_mode == MMC_POWER_ON)
 881			host->cmdat |= CMD_DAT_CONT_INIT;
 882	}
 883
 884	if (ios->clock) {
 885		mxcmci_set_clk_rate(host, ios->clock);
 886		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 887	} else {
 888		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 889	}
 890
 891	host->clock = ios->clock;
 892}
 893
 894static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 895{
 896	struct mmc_host *mmc = data;
 897
 898	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 899
 900	mmc_detect_change(mmc, msecs_to_jiffies(250));
 901	return IRQ_HANDLED;
 902}
 903
 904static int mxcmci_get_ro(struct mmc_host *mmc)
 905{
 906	struct mxcmci_host *host = mmc_priv(mmc);
 907
 908	if (host->pdata && host->pdata->get_ro)
 909		return !!host->pdata->get_ro(mmc_dev(mmc));
 910	/*
 911	 * If board doesn't support read only detection (no mmc_gpio
 912	 * context or gpio is invalid), then let the mmc core decide
 913	 * what to do.
 914	 */
 915	return mmc_gpio_get_ro(mmc);
 916}
 917
 918static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 919{
 920	struct mxcmci_host *host = mmc_priv(mmc);
 921	unsigned long flags;
 922	u32 int_cntr;
 923
 924	spin_lock_irqsave(&host->lock, flags);
 925	host->use_sdio = enable;
 926	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 927
 928	if (enable)
 929		int_cntr |= INT_SDIO_IRQ_EN;
 930	else
 931		int_cntr &= ~INT_SDIO_IRQ_EN;
 932
 933	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 934	spin_unlock_irqrestore(&host->lock, flags);
 935}
 936
 937static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 938{
 939	struct mxcmci_host *mxcmci = mmc_priv(host);
 940
 941	/*
 942	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 943	 * multi-block transfers when connected SDIO peripheral doesn't
 944	 * drive the BUSY line as required by the specs.
 945	 * One way to prevent this is to only allow 1-bit transfers.
 946	 */
 947
 948	if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
 949		host->caps &= ~MMC_CAP_4_BIT_DATA;
 950	else
 951		host->caps |= MMC_CAP_4_BIT_DATA;
 952}
 953
 954static bool filter(struct dma_chan *chan, void *param)
 955{
 956	struct mxcmci_host *host = param;
 957
 958	if (!imx_dma_is_general_purpose(chan))
 959		return false;
 960
 961	chan->private = &host->dma_data;
 962
 963	return true;
 964}
 965
 966static void mxcmci_watchdog(struct timer_list *t)
 967{
 968	struct mxcmci_host *host = from_timer(host, t, watchdog);
 969	struct mmc_request *req = host->req;
 970	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
 971
 972	if (host->dma_dir == DMA_FROM_DEVICE) {
 973		dmaengine_terminate_all(host->dma);
 974		dev_err(mmc_dev(host->mmc),
 975			"%s: read time out (status = 0x%08x)\n",
 976			__func__, stat);
 977	} else {
 978		dev_err(mmc_dev(host->mmc),
 979			"%s: write time out (status = 0x%08x)\n",
 980			__func__, stat);
 981		mxcmci_softreset(host);
 982	}
 983
 984	/* Mark transfer as erroneus and inform the upper layers */
 985
 986	if (host->data)
 987		host->data->error = -ETIMEDOUT;
 988	host->req = NULL;
 989	host->cmd = NULL;
 990	host->data = NULL;
 991	mmc_request_done(host->mmc, req);
 992}
 993
 994static const struct mmc_host_ops mxcmci_ops = {
 995	.request		= mxcmci_request,
 996	.set_ios		= mxcmci_set_ios,
 997	.get_ro			= mxcmci_get_ro,
 998	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 999	.init_card		= mxcmci_init_card,
1000};
1001
1002static int mxcmci_probe(struct platform_device *pdev)
1003{
1004	struct mmc_host *mmc;
1005	struct mxcmci_host *host;
1006	struct resource *res;
1007	int ret = 0, irq;
1008	bool dat3_card_detect = false;
1009	dma_cap_mask_t mask;
1010	const struct of_device_id *of_id;
1011	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
1012
1013	pr_info("i.MX/MPC512x SDHC driver\n");
1014
1015	of_id = of_match_device(mxcmci_of_match, &pdev->dev);
1016
1017	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1018	irq = platform_get_irq(pdev, 0);
1019	if (irq < 0) {
1020		dev_err(&pdev->dev, "failed to get IRQ: %d\n", irq);
1021		return irq;
1022	}
1023
1024	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1025	if (!mmc)
1026		return -ENOMEM;
1027
1028	host = mmc_priv(mmc);
1029
1030	host->base = devm_ioremap_resource(&pdev->dev, res);
1031	if (IS_ERR(host->base)) {
1032		ret = PTR_ERR(host->base);
1033		goto out_free;
1034	}
1035
1036	host->phys_base = res->start;
1037
1038	ret = mmc_of_parse(mmc);
1039	if (ret)
1040		goto out_free;
1041	mmc->ops = &mxcmci_ops;
1042
1043	/* For devicetree parsing, the bus width is read from devicetree */
1044	if (pdata)
1045		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1046	else
1047		mmc->caps |= MMC_CAP_SDIO_IRQ;
1048
1049	/* MMC core transfer sizes tunable parameters */
1050	mmc->max_blk_size = 2048;
1051	mmc->max_blk_count = 65535;
1052	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1053	mmc->max_seg_size = mmc->max_req_size;
1054
1055	if (of_id) {
1056		const struct platform_device_id *id_entry = of_id->data;
1057		host->devtype = id_entry->driver_data;
1058	} else {
1059		host->devtype = pdev->id_entry->driver_data;
1060	}
1061
1062	/* adjust max_segs after devtype detection */
1063	if (!is_mpc512x_mmc(host))
1064		mmc->max_segs = 64;
1065
1066	host->mmc = mmc;
1067	host->pdata = pdata;
1068	spin_lock_init(&host->lock);
1069
1070	if (pdata)
1071		dat3_card_detect = pdata->dat3_card_detect;
1072	else if (mmc_card_is_removable(mmc)
1073			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1074		dat3_card_detect = true;
1075
1076	ret = mmc_regulator_get_supply(mmc);
1077	if (ret)
1078		goto out_free;
1079
1080	if (!mmc->ocr_avail) {
1081		if (pdata && pdata->ocr_avail)
1082			mmc->ocr_avail = pdata->ocr_avail;
1083		else
1084			mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1085	}
1086
1087	if (dat3_card_detect)
1088		host->default_irq_mask =
1089			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1090	else
1091		host->default_irq_mask = 0;
1092
1093	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1094	if (IS_ERR(host->clk_ipg)) {
1095		ret = PTR_ERR(host->clk_ipg);
1096		goto out_free;
1097	}
1098
1099	host->clk_per = devm_clk_get(&pdev->dev, "per");
1100	if (IS_ERR(host->clk_per)) {
1101		ret = PTR_ERR(host->clk_per);
1102		goto out_free;
1103	}
1104
1105	ret = clk_prepare_enable(host->clk_per);
1106	if (ret)
1107		goto out_free;
1108
1109	ret = clk_prepare_enable(host->clk_ipg);
1110	if (ret)
1111		goto out_clk_per_put;
1112
1113	mxcmci_softreset(host);
1114
1115	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1116	if (host->rev_no != 0x400) {
1117		ret = -ENODEV;
1118		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1119			host->rev_no);
1120		goto out_clk_put;
1121	}
1122
1123	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1124	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1125
1126	/* recommended in data sheet */
1127	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1128
1129	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1130
1131	if (!host->pdata) {
1132		host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
1133	} else {
1134		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1135		if (res) {
1136			host->dmareq = res->start;
1137			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1138			host->dma_data.priority = DMA_PRIO_LOW;
1139			host->dma_data.dma_request = host->dmareq;
1140			dma_cap_zero(mask);
1141			dma_cap_set(DMA_SLAVE, mask);
1142			host->dma = dma_request_channel(mask, filter, host);
1143		}
1144	}
1145	if (host->dma)
1146		mmc->max_seg_size = dma_get_max_seg_size(
1147				host->dma->device->dev);
1148	else
1149		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1150
1151	INIT_WORK(&host->datawork, mxcmci_datawork);
1152
1153	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
1154			       dev_name(&pdev->dev), host);
1155	if (ret)
1156		goto out_free_dma;
1157
1158	platform_set_drvdata(pdev, mmc);
1159
1160	if (host->pdata && host->pdata->init) {
1161		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1162				host->mmc);
1163		if (ret)
1164			goto out_free_dma;
1165	}
1166
1167	timer_setup(&host->watchdog, mxcmci_watchdog, 0);
1168
1169	mmc_add_host(mmc);
1170
1171	return 0;
1172
1173out_free_dma:
1174	if (host->dma)
1175		dma_release_channel(host->dma);
1176
1177out_clk_put:
1178	clk_disable_unprepare(host->clk_ipg);
1179out_clk_per_put:
1180	clk_disable_unprepare(host->clk_per);
1181
1182out_free:
1183	mmc_free_host(mmc);
1184
1185	return ret;
1186}
1187
1188static int mxcmci_remove(struct platform_device *pdev)
1189{
1190	struct mmc_host *mmc = platform_get_drvdata(pdev);
1191	struct mxcmci_host *host = mmc_priv(mmc);
1192
1193	mmc_remove_host(mmc);
1194
1195	if (host->pdata && host->pdata->exit)
1196		host->pdata->exit(&pdev->dev, mmc);
1197
1198	if (host->dma)
1199		dma_release_channel(host->dma);
1200
1201	clk_disable_unprepare(host->clk_per);
1202	clk_disable_unprepare(host->clk_ipg);
1203
1204	mmc_free_host(mmc);
1205
1206	return 0;
1207}
1208
1209static int __maybe_unused mxcmci_suspend(struct device *dev)
 
1210{
1211	struct mmc_host *mmc = dev_get_drvdata(dev);
1212	struct mxcmci_host *host = mmc_priv(mmc);
1213
1214	clk_disable_unprepare(host->clk_per);
1215	clk_disable_unprepare(host->clk_ipg);
1216	return 0;
1217}
1218
1219static int __maybe_unused mxcmci_resume(struct device *dev)
1220{
1221	struct mmc_host *mmc = dev_get_drvdata(dev);
1222	struct mxcmci_host *host = mmc_priv(mmc);
1223	int ret;
1224
1225	ret = clk_prepare_enable(host->clk_per);
1226	if (ret)
1227		return ret;
1228
1229	ret = clk_prepare_enable(host->clk_ipg);
1230	if (ret)
1231		clk_disable_unprepare(host->clk_per);
1232
1233	return ret;
1234}
 
1235
1236static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
1237
1238static struct platform_driver mxcmci_driver = {
1239	.probe		= mxcmci_probe,
1240	.remove		= mxcmci_remove,
1241	.id_table	= mxcmci_devtype,
1242	.driver		= {
1243		.name		= DRIVER_NAME,
1244		.pm	= &mxcmci_pm_ops,
1245		.of_match_table	= mxcmci_of_match,
1246	}
1247};
1248
1249module_platform_driver(mxcmci_driver);
1250
1251MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1252MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1253MODULE_LICENSE("GPL");
1254MODULE_ALIAS("platform:mxc-mmc");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
   4 *
   5 *  This is a driver for the SDHC controller found in Freescale MX2/MX3
   6 *  SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
   7 *  Unlike the hardware found on MX1, this hardware just works and does
   8 *  not need all the quirks found in imxmmc.c, hence the separate driver.
   9 *
  10 *  Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  11 *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
  12 *
  13 *  derived from pxamci.c by Russell King
 
 
 
 
 
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/ioport.h>
  19#include <linux/platform_device.h>
  20#include <linux/highmem.h>
  21#include <linux/interrupt.h>
  22#include <linux/irq.h>
  23#include <linux/blkdev.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/mmc/host.h>
  26#include <linux/mmc/card.h>
  27#include <linux/delay.h>
  28#include <linux/clk.h>
  29#include <linux/io.h>
 
  30#include <linux/regulator/consumer.h>
  31#include <linux/dmaengine.h>
  32#include <linux/types.h>
  33#include <linux/of.h>
  34#include <linux/of_device.h>
  35#include <linux/of_dma.h>
 
  36#include <linux/mmc/slot-gpio.h>
  37
  38#include <asm/dma.h>
  39#include <asm/irq.h>
  40#include <linux/platform_data/mmc-mxcmmc.h>
  41
  42#include <linux/platform_data/dma-imx.h>
  43
  44#define DRIVER_NAME "mxc-mmc"
  45#define MXCMCI_TIMEOUT_MS 10000
  46
  47#define MMC_REG_STR_STP_CLK		0x00
  48#define MMC_REG_STATUS			0x04
  49#define MMC_REG_CLK_RATE		0x08
  50#define MMC_REG_CMD_DAT_CONT		0x0C
  51#define MMC_REG_RES_TO			0x10
  52#define MMC_REG_READ_TO			0x14
  53#define MMC_REG_BLK_LEN			0x18
  54#define MMC_REG_NOB			0x1C
  55#define MMC_REG_REV_NO			0x20
  56#define MMC_REG_INT_CNTR		0x24
  57#define MMC_REG_CMD			0x28
  58#define MMC_REG_ARG			0x2C
  59#define MMC_REG_RES_FIFO		0x34
  60#define MMC_REG_BUFFER_ACCESS		0x38
  61
  62#define STR_STP_CLK_RESET               (1 << 3)
  63#define STR_STP_CLK_START_CLK           (1 << 1)
  64#define STR_STP_CLK_STOP_CLK            (1 << 0)
  65
  66#define STATUS_CARD_INSERTION		(1 << 31)
  67#define STATUS_CARD_REMOVAL		(1 << 30)
  68#define STATUS_YBUF_EMPTY		(1 << 29)
  69#define STATUS_XBUF_EMPTY		(1 << 28)
  70#define STATUS_YBUF_FULL		(1 << 27)
  71#define STATUS_XBUF_FULL		(1 << 26)
  72#define STATUS_BUF_UND_RUN		(1 << 25)
  73#define STATUS_BUF_OVFL			(1 << 24)
  74#define STATUS_SDIO_INT_ACTIVE		(1 << 14)
  75#define STATUS_END_CMD_RESP		(1 << 13)
  76#define STATUS_WRITE_OP_DONE		(1 << 12)
  77#define STATUS_DATA_TRANS_DONE		(1 << 11)
  78#define STATUS_READ_OP_DONE		(1 << 11)
  79#define STATUS_WR_CRC_ERROR_CODE_MASK	(3 << 10)
  80#define STATUS_CARD_BUS_CLK_RUN		(1 << 8)
  81#define STATUS_BUF_READ_RDY		(1 << 7)
  82#define STATUS_BUF_WRITE_RDY		(1 << 6)
  83#define STATUS_RESP_CRC_ERR		(1 << 5)
  84#define STATUS_CRC_READ_ERR		(1 << 3)
  85#define STATUS_CRC_WRITE_ERR		(1 << 2)
  86#define STATUS_TIME_OUT_RESP		(1 << 1)
  87#define STATUS_TIME_OUT_READ		(1 << 0)
  88#define STATUS_ERR_MASK			0x2f
  89
  90#define CMD_DAT_CONT_CMD_RESP_LONG_OFF	(1 << 12)
  91#define CMD_DAT_CONT_STOP_READWAIT	(1 << 11)
  92#define CMD_DAT_CONT_START_READWAIT	(1 << 10)
  93#define CMD_DAT_CONT_BUS_WIDTH_4	(2 << 8)
  94#define CMD_DAT_CONT_INIT		(1 << 7)
  95#define CMD_DAT_CONT_WRITE		(1 << 4)
  96#define CMD_DAT_CONT_DATA_ENABLE	(1 << 3)
  97#define CMD_DAT_CONT_RESPONSE_48BIT_CRC	(1 << 0)
  98#define CMD_DAT_CONT_RESPONSE_136BIT	(2 << 0)
  99#define CMD_DAT_CONT_RESPONSE_48BIT	(3 << 0)
 100
 101#define INT_SDIO_INT_WKP_EN		(1 << 18)
 102#define INT_CARD_INSERTION_WKP_EN	(1 << 17)
 103#define INT_CARD_REMOVAL_WKP_EN		(1 << 16)
 104#define INT_CARD_INSERTION_EN		(1 << 15)
 105#define INT_CARD_REMOVAL_EN		(1 << 14)
 106#define INT_SDIO_IRQ_EN			(1 << 13)
 107#define INT_DAT0_EN			(1 << 12)
 108#define INT_BUF_READ_EN			(1 << 4)
 109#define INT_BUF_WRITE_EN		(1 << 3)
 110#define INT_END_CMD_RES_EN		(1 << 2)
 111#define INT_WRITE_OP_DONE_EN		(1 << 1)
 112#define INT_READ_OP_EN			(1 << 0)
 113
 114enum mxcmci_type {
 115	IMX21_MMC,
 116	IMX31_MMC,
 117	MPC512X_MMC,
 118};
 119
 120struct mxcmci_host {
 121	struct mmc_host		*mmc;
 122	void __iomem		*base;
 123	dma_addr_t		phys_base;
 124	int			detect_irq;
 125	struct dma_chan		*dma;
 126	struct dma_async_tx_descriptor *desc;
 127	int			do_dma;
 128	int			default_irq_mask;
 129	int			use_sdio;
 130	unsigned int		power_mode;
 131	struct imxmmc_platform_data *pdata;
 132
 133	struct mmc_request	*req;
 134	struct mmc_command	*cmd;
 135	struct mmc_data		*data;
 136
 137	unsigned int		datasize;
 138	unsigned int		dma_dir;
 139
 140	u16			rev_no;
 141	unsigned int		cmdat;
 142
 143	struct clk		*clk_ipg;
 144	struct clk		*clk_per;
 145
 146	int			clock;
 147
 148	struct work_struct	datawork;
 149	spinlock_t		lock;
 150
 151	int			burstlen;
 152	int			dmareq;
 153	struct dma_slave_config dma_slave_config;
 154	struct imx_dma_data	dma_data;
 155
 156	struct timer_list	watchdog;
 157	enum mxcmci_type	devtype;
 158};
 159
 160static const struct platform_device_id mxcmci_devtype[] = {
 161	{
 162		.name = "imx21-mmc",
 163		.driver_data = IMX21_MMC,
 164	}, {
 165		.name = "imx31-mmc",
 166		.driver_data = IMX31_MMC,
 167	}, {
 168		.name = "mpc512x-sdhc",
 169		.driver_data = MPC512X_MMC,
 170	}, {
 171		/* sentinel */
 172	}
 173};
 174MODULE_DEVICE_TABLE(platform, mxcmci_devtype);
 175
 176static const struct of_device_id mxcmci_of_match[] = {
 177	{
 178		.compatible = "fsl,imx21-mmc",
 179		.data = &mxcmci_devtype[IMX21_MMC],
 180	}, {
 181		.compatible = "fsl,imx31-mmc",
 182		.data = &mxcmci_devtype[IMX31_MMC],
 183	}, {
 184		.compatible = "fsl,mpc5121-sdhc",
 185		.data = &mxcmci_devtype[MPC512X_MMC],
 186	}, {
 187		/* sentinel */
 188	}
 189};
 190MODULE_DEVICE_TABLE(of, mxcmci_of_match);
 191
 192static inline int is_imx31_mmc(struct mxcmci_host *host)
 193{
 194	return host->devtype == IMX31_MMC;
 195}
 196
 197static inline int is_mpc512x_mmc(struct mxcmci_host *host)
 198{
 199	return host->devtype == MPC512X_MMC;
 200}
 201
 202static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg)
 203{
 204	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 205		return ioread32be(host->base + reg);
 206	else
 207		return readl(host->base + reg);
 208}
 209
 210static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg)
 211{
 212	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 213		iowrite32be(val, host->base + reg);
 214	else
 215		writel(val, host->base + reg);
 216}
 217
 218static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg)
 219{
 220	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 221		return ioread32be(host->base + reg);
 222	else
 223		return readw(host->base + reg);
 224}
 225
 226static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg)
 227{
 228	if (IS_ENABLED(CONFIG_PPC_MPC512x))
 229		iowrite32be(val, host->base + reg);
 230	else
 231		writew(val, host->base + reg);
 232}
 233
 234static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 235
 236static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd)
 237{
 238	if (!IS_ERR(host->mmc->supply.vmmc)) {
 239		if (host->power_mode == MMC_POWER_UP)
 240			mmc_regulator_set_ocr(host->mmc,
 241					      host->mmc->supply.vmmc, vdd);
 242		else if (host->power_mode == MMC_POWER_OFF)
 243			mmc_regulator_set_ocr(host->mmc,
 244					      host->mmc->supply.vmmc, 0);
 245	}
 246
 247	if (host->pdata && host->pdata->setpower)
 248		host->pdata->setpower(mmc_dev(host->mmc), vdd);
 249}
 250
 251static inline int mxcmci_use_dma(struct mxcmci_host *host)
 252{
 253	return host->do_dma;
 254}
 255
 256static void mxcmci_softreset(struct mxcmci_host *host)
 257{
 258	int i;
 259
 260	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
 261
 262	/* reset sequence */
 263	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK);
 264	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
 265			MMC_REG_STR_STP_CLK);
 266
 267	for (i = 0; i < 8; i++)
 268		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 269
 270	mxcmci_writew(host, 0xff, MMC_REG_RES_TO);
 271}
 272
 273#if IS_ENABLED(CONFIG_PPC_MPC512x)
 274static inline void buffer_swap32(u32 *buf, int len)
 275{
 276	int i;
 277
 278	for (i = 0; i < ((len + 3) / 4); i++) {
 279		*buf = swab32(*buf);
 280		buf++;
 281	}
 282}
 283
 284static void mxcmci_swap_buffers(struct mmc_data *data)
 285{
 286	struct scatterlist *sg;
 287	int i;
 288
 289	for_each_sg(data->sg, sg, data->sg_len, i)
 290		buffer_swap32(sg_virt(sg), sg->length);
 291}
 292#else
 293static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
 294#endif
 295
 296static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
 297{
 298	unsigned int nob = data->blocks;
 299	unsigned int blksz = data->blksz;
 300	unsigned int datasize = nob * blksz;
 301	struct scatterlist *sg;
 302	enum dma_transfer_direction slave_dirn;
 303	int i, nents;
 304
 305	host->data = data;
 306	data->bytes_xfered = 0;
 307
 308	mxcmci_writew(host, nob, MMC_REG_NOB);
 309	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);
 310	host->datasize = datasize;
 311
 312	if (!mxcmci_use_dma(host))
 313		return 0;
 314
 315	for_each_sg(data->sg, sg, data->sg_len, i) {
 316		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {
 317			host->do_dma = 0;
 318			return 0;
 319		}
 320	}
 321
 322	if (data->flags & MMC_DATA_READ) {
 323		host->dma_dir = DMA_FROM_DEVICE;
 324		slave_dirn = DMA_DEV_TO_MEM;
 325	} else {
 326		host->dma_dir = DMA_TO_DEVICE;
 327		slave_dirn = DMA_MEM_TO_DEV;
 328
 329		mxcmci_swap_buffers(data);
 330	}
 331
 332	nents = dma_map_sg(host->dma->device->dev, data->sg,
 333				     data->sg_len,  host->dma_dir);
 334	if (nents != data->sg_len)
 335		return -EINVAL;
 336
 337	host->desc = dmaengine_prep_slave_sg(host->dma,
 338		data->sg, data->sg_len, slave_dirn,
 339		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 340
 341	if (!host->desc) {
 342		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 343				host->dma_dir);
 344		host->do_dma = 0;
 345		return 0; /* Fall back to PIO */
 346	}
 347	wmb();
 348
 349	dmaengine_submit(host->desc);
 350	dma_async_issue_pending(host->dma);
 351
 352	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS));
 353
 354	return 0;
 355}
 356
 357static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat);
 358static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat);
 359
 360static void mxcmci_dma_callback(void *data)
 361{
 362	struct mxcmci_host *host = data;
 363	u32 stat;
 364
 365	del_timer(&host->watchdog);
 366
 367	stat = mxcmci_readl(host, MMC_REG_STATUS);
 368
 369	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 370
 371	mxcmci_data_done(host, stat);
 372}
 373
 374static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
 375		unsigned int cmdat)
 376{
 377	u32 int_cntr = host->default_irq_mask;
 378	unsigned long flags;
 379
 380	WARN_ON(host->cmd != NULL);
 381	host->cmd = cmd;
 382
 383	switch (mmc_resp_type(cmd)) {
 384	case MMC_RSP_R1: /* short CRC, OPCODE */
 385	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
 386		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
 387		break;
 388	case MMC_RSP_R2: /* long 136 bit + CRC */
 389		cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
 390		break;
 391	case MMC_RSP_R3: /* short */
 392		cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
 393		break;
 394	case MMC_RSP_NONE:
 395		break;
 396	default:
 397		dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
 398				mmc_resp_type(cmd));
 399		cmd->error = -EINVAL;
 400		return -EINVAL;
 401	}
 402
 403	int_cntr = INT_END_CMD_RES_EN;
 404
 405	if (mxcmci_use_dma(host)) {
 406		if (host->dma_dir == DMA_FROM_DEVICE) {
 407			host->desc->callback = mxcmci_dma_callback;
 408			host->desc->callback_param = host;
 409		} else {
 410			int_cntr |= INT_WRITE_OP_DONE_EN;
 411		}
 412	}
 413
 414	spin_lock_irqsave(&host->lock, flags);
 415	if (host->use_sdio)
 416		int_cntr |= INT_SDIO_IRQ_EN;
 417	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 418	spin_unlock_irqrestore(&host->lock, flags);
 419
 420	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD);
 421	mxcmci_writel(host, cmd->arg, MMC_REG_ARG);
 422	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);
 423
 424	return 0;
 425}
 426
 427static void mxcmci_finish_request(struct mxcmci_host *host,
 428		struct mmc_request *req)
 429{
 430	u32 int_cntr = host->default_irq_mask;
 431	unsigned long flags;
 432
 433	spin_lock_irqsave(&host->lock, flags);
 434	if (host->use_sdio)
 435		int_cntr |= INT_SDIO_IRQ_EN;
 436	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 437	spin_unlock_irqrestore(&host->lock, flags);
 438
 439	host->req = NULL;
 440	host->cmd = NULL;
 441	host->data = NULL;
 442
 443	mmc_request_done(host->mmc, req);
 444}
 445
 446static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
 447{
 448	struct mmc_data *data = host->data;
 449	int data_error;
 450
 451	if (mxcmci_use_dma(host)) {
 452		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
 453				host->dma_dir);
 454		mxcmci_swap_buffers(data);
 455	}
 456
 457	if (stat & STATUS_ERR_MASK) {
 458		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
 459				stat);
 460		if (stat & STATUS_CRC_READ_ERR) {
 461			dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
 462			data->error = -EILSEQ;
 463		} else if (stat & STATUS_CRC_WRITE_ERR) {
 464			u32 err_code = (stat >> 9) & 0x3;
 465			if (err_code == 2) { /* No CRC response */
 466				dev_err(mmc_dev(host->mmc),
 467					"%s: No CRC -ETIMEDOUT\n", __func__);
 468				data->error = -ETIMEDOUT;
 469			} else {
 470				dev_err(mmc_dev(host->mmc),
 471					"%s: -EILSEQ\n", __func__);
 472				data->error = -EILSEQ;
 473			}
 474		} else if (stat & STATUS_TIME_OUT_READ) {
 475			dev_err(mmc_dev(host->mmc),
 476				"%s: read -ETIMEDOUT\n", __func__);
 477			data->error = -ETIMEDOUT;
 478		} else {
 479			dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
 480			data->error = -EIO;
 481		}
 482	} else {
 483		data->bytes_xfered = host->datasize;
 484	}
 485
 486	data_error = data->error;
 487
 488	host->data = NULL;
 489
 490	return data_error;
 491}
 492
 493static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
 494{
 495	struct mmc_command *cmd = host->cmd;
 496	int i;
 497	u32 a, b, c;
 498
 499	if (!cmd)
 500		return;
 501
 502	if (stat & STATUS_TIME_OUT_RESP) {
 503		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
 504		cmd->error = -ETIMEDOUT;
 505	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
 506		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
 507		cmd->error = -EILSEQ;
 508	}
 509
 510	if (cmd->flags & MMC_RSP_PRESENT) {
 511		if (cmd->flags & MMC_RSP_136) {
 512			for (i = 0; i < 4; i++) {
 513				a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 514				b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 515				cmd->resp[i] = a << 16 | b;
 516			}
 517		} else {
 518			a = mxcmci_readw(host, MMC_REG_RES_FIFO);
 519			b = mxcmci_readw(host, MMC_REG_RES_FIFO);
 520			c = mxcmci_readw(host, MMC_REG_RES_FIFO);
 521			cmd->resp[0] = a << 24 | b << 8 | c >> 8;
 522		}
 523	}
 524}
 525
 526static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
 527{
 528	u32 stat;
 529	unsigned long timeout = jiffies + HZ;
 530
 531	do {
 532		stat = mxcmci_readl(host, MMC_REG_STATUS);
 533		if (stat & STATUS_ERR_MASK)
 534			return stat;
 535		if (time_after(jiffies, timeout)) {
 536			mxcmci_softreset(host);
 537			mxcmci_set_clk_rate(host, host->clock);
 538			return STATUS_TIME_OUT_READ;
 539		}
 540		if (stat & mask)
 541			return 0;
 542		cpu_relax();
 543	} while (1);
 544}
 545
 546static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
 547{
 548	unsigned int stat;
 549	u32 *buf = _buf;
 550
 551	while (bytes > 3) {
 552		stat = mxcmci_poll_status(host,
 553				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 554		if (stat)
 555			return stat;
 556		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 557		bytes -= 4;
 558	}
 559
 560	if (bytes) {
 561		u8 *b = (u8 *)buf;
 562		u32 tmp;
 563
 564		stat = mxcmci_poll_status(host,
 565				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
 566		if (stat)
 567			return stat;
 568		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));
 569		memcpy(b, &tmp, bytes);
 570	}
 571
 572	return 0;
 573}
 574
 575static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
 576{
 577	unsigned int stat;
 578	u32 *buf = _buf;
 579
 580	while (bytes > 3) {
 581		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 582		if (stat)
 583			return stat;
 584		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);
 585		bytes -= 4;
 586	}
 587
 588	if (bytes) {
 589		u8 *b = (u8 *)buf;
 590		u32 tmp;
 591
 592		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 593		if (stat)
 594			return stat;
 595
 596		memcpy(&tmp, b, bytes);
 597		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);
 598	}
 599
 600	return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
 601}
 602
 603static int mxcmci_transfer_data(struct mxcmci_host *host)
 604{
 605	struct mmc_data *data = host->req->data;
 606	struct scatterlist *sg;
 607	int stat, i;
 608
 609	host->data = data;
 610	host->datasize = 0;
 611
 612	if (data->flags & MMC_DATA_READ) {
 613		for_each_sg(data->sg, sg, data->sg_len, i) {
 614			stat = mxcmci_pull(host, sg_virt(sg), sg->length);
 615			if (stat)
 616				return stat;
 617			host->datasize += sg->length;
 618		}
 619	} else {
 620		for_each_sg(data->sg, sg, data->sg_len, i) {
 621			stat = mxcmci_push(host, sg_virt(sg), sg->length);
 622			if (stat)
 623				return stat;
 624			host->datasize += sg->length;
 625		}
 626		stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
 627		if (stat)
 628			return stat;
 629	}
 630	return 0;
 631}
 632
 633static void mxcmci_datawork(struct work_struct *work)
 634{
 635	struct mxcmci_host *host = container_of(work, struct mxcmci_host,
 636						  datawork);
 637	int datastat = mxcmci_transfer_data(host);
 638
 639	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
 640		MMC_REG_STATUS);
 641	mxcmci_finish_data(host, datastat);
 642
 643	if (host->req->stop) {
 644		if (mxcmci_start_cmd(host, host->req->stop, 0)) {
 645			mxcmci_finish_request(host, host->req);
 646			return;
 647		}
 648	} else {
 649		mxcmci_finish_request(host, host->req);
 650	}
 651}
 652
 653static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
 654{
 655	struct mmc_request *req;
 656	int data_error;
 657	unsigned long flags;
 658
 659	spin_lock_irqsave(&host->lock, flags);
 660
 661	if (!host->data) {
 662		spin_unlock_irqrestore(&host->lock, flags);
 663		return;
 664	}
 665
 666	if (!host->req) {
 667		spin_unlock_irqrestore(&host->lock, flags);
 668		return;
 669	}
 670
 671	req = host->req;
 672	if (!req->stop)
 673		host->req = NULL; /* we will handle finish req below */
 674
 675	data_error = mxcmci_finish_data(host, stat);
 676
 677	spin_unlock_irqrestore(&host->lock, flags);
 678
 679	if (data_error)
 680		return;
 681
 682	mxcmci_read_response(host, stat);
 683	host->cmd = NULL;
 684
 685	if (req->stop) {
 686		if (mxcmci_start_cmd(host, req->stop, 0)) {
 687			mxcmci_finish_request(host, req);
 688			return;
 689		}
 690	} else {
 691		mxcmci_finish_request(host, req);
 692	}
 693}
 694
 695static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
 696{
 697	mxcmci_read_response(host, stat);
 698	host->cmd = NULL;
 699
 700	if (!host->data && host->req) {
 701		mxcmci_finish_request(host, host->req);
 702		return;
 703	}
 704
 705	/* For the DMA case the DMA engine handles the data transfer
 706	 * automatically. For non DMA we have to do it ourselves.
 707	 * Don't do it in interrupt context though.
 708	 */
 709	if (!mxcmci_use_dma(host) && host->data)
 710		schedule_work(&host->datawork);
 711
 712}
 713
 714static irqreturn_t mxcmci_irq(int irq, void *devid)
 715{
 716	struct mxcmci_host *host = devid;
 
 717	bool sdio_irq;
 718	u32 stat;
 719
 720	stat = mxcmci_readl(host, MMC_REG_STATUS);
 721	mxcmci_writel(host,
 722		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
 723			 STATUS_WRITE_OP_DONE),
 724		MMC_REG_STATUS);
 725
 726	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
 727
 728	spin_lock(&host->lock);
 729	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
 730	spin_unlock(&host->lock);
 731
 732	if (mxcmci_use_dma(host) && (stat & (STATUS_WRITE_OP_DONE)))
 733		mxcmci_writel(host, STATUS_WRITE_OP_DONE, MMC_REG_STATUS);
 734
 735	if (sdio_irq) {
 736		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);
 737		mmc_signal_sdio_irq(host->mmc);
 738	}
 739
 740	if (stat & STATUS_END_CMD_RESP)
 741		mxcmci_cmd_done(host, stat);
 742
 743	if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) {
 744		del_timer(&host->watchdog);
 745		mxcmci_data_done(host, stat);
 746	}
 747
 748	if (host->default_irq_mask &&
 749		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
 750		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
 751
 752	return IRQ_HANDLED;
 753}
 754
 755static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
 756{
 757	struct mxcmci_host *host = mmc_priv(mmc);
 758	unsigned int cmdat = host->cmdat;
 759	int error;
 760
 761	WARN_ON(host->req != NULL);
 762
 763	host->req = req;
 764	host->cmdat &= ~CMD_DAT_CONT_INIT;
 765
 766	if (host->dma)
 767		host->do_dma = 1;
 768
 769	if (req->data) {
 770		error = mxcmci_setup_data(host, req->data);
 771		if (error) {
 772			req->cmd->error = error;
 773			goto out;
 774		}
 775
 776
 777		cmdat |= CMD_DAT_CONT_DATA_ENABLE;
 778
 779		if (req->data->flags & MMC_DATA_WRITE)
 780			cmdat |= CMD_DAT_CONT_WRITE;
 781	}
 782
 783	error = mxcmci_start_cmd(host, req->cmd, cmdat);
 784
 785out:
 786	if (error)
 787		mxcmci_finish_request(host, req);
 788}
 789
 790static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
 791{
 792	unsigned int divider;
 793	int prescaler = 0;
 794	unsigned int clk_in = clk_get_rate(host->clk_per);
 795
 796	while (prescaler <= 0x800) {
 797		for (divider = 1; divider <= 0xF; divider++) {
 798			int x;
 799
 800			x = (clk_in / (divider + 1));
 801
 802			if (prescaler)
 803				x /= (prescaler * 2);
 804
 805			if (x <= clk_ios)
 806				break;
 807		}
 808		if (divider < 0x10)
 809			break;
 810
 811		if (prescaler == 0)
 812			prescaler = 1;
 813		else
 814			prescaler <<= 1;
 815	}
 816
 817	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);
 818
 819	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
 820			prescaler, divider, clk_in, clk_ios);
 821}
 822
 823static int mxcmci_setup_dma(struct mmc_host *mmc)
 824{
 825	struct mxcmci_host *host = mmc_priv(mmc);
 826	struct dma_slave_config *config = &host->dma_slave_config;
 827
 828	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 829	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS;
 830	config->dst_addr_width = 4;
 831	config->src_addr_width = 4;
 832	config->dst_maxburst = host->burstlen;
 833	config->src_maxburst = host->burstlen;
 834	config->device_fc = false;
 835
 836	return dmaengine_slave_config(host->dma, config);
 837}
 838
 839static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 840{
 841	struct mxcmci_host *host = mmc_priv(mmc);
 842	int burstlen, ret;
 843
 844	/*
 845	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0)
 846	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
 847	 */
 848	if (ios->bus_width == MMC_BUS_WIDTH_4)
 849		burstlen = 16;
 850	else
 851		burstlen = 4;
 852
 853	if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
 854		host->burstlen = burstlen;
 855		ret = mxcmci_setup_dma(mmc);
 856		if (ret) {
 857			dev_err(mmc_dev(host->mmc),
 858				"failed to config DMA channel. Falling back to PIO\n");
 859			dma_release_channel(host->dma);
 860			host->do_dma = 0;
 861			host->dma = NULL;
 862		}
 863	}
 864
 865	if (ios->bus_width == MMC_BUS_WIDTH_4)
 866		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
 867	else
 868		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 869
 870	if (host->power_mode != ios->power_mode) {
 871		host->power_mode = ios->power_mode;
 872		mxcmci_set_power(host, ios->vdd);
 873
 874		if (ios->power_mode == MMC_POWER_ON)
 875			host->cmdat |= CMD_DAT_CONT_INIT;
 876	}
 877
 878	if (ios->clock) {
 879		mxcmci_set_clk_rate(host, ios->clock);
 880		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);
 881	} else {
 882		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);
 883	}
 884
 885	host->clock = ios->clock;
 886}
 887
 888static irqreturn_t mxcmci_detect_irq(int irq, void *data)
 889{
 890	struct mmc_host *mmc = data;
 891
 892	dev_dbg(mmc_dev(mmc), "%s\n", __func__);
 893
 894	mmc_detect_change(mmc, msecs_to_jiffies(250));
 895	return IRQ_HANDLED;
 896}
 897
 898static int mxcmci_get_ro(struct mmc_host *mmc)
 899{
 900	struct mxcmci_host *host = mmc_priv(mmc);
 901
 902	if (host->pdata && host->pdata->get_ro)
 903		return !!host->pdata->get_ro(mmc_dev(mmc));
 904	/*
 905	 * If board doesn't support read only detection (no mmc_gpio
 906	 * context or gpio is invalid), then let the mmc core decide
 907	 * what to do.
 908	 */
 909	return mmc_gpio_get_ro(mmc);
 910}
 911
 912static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 913{
 914	struct mxcmci_host *host = mmc_priv(mmc);
 915	unsigned long flags;
 916	u32 int_cntr;
 917
 918	spin_lock_irqsave(&host->lock, flags);
 919	host->use_sdio = enable;
 920	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);
 921
 922	if (enable)
 923		int_cntr |= INT_SDIO_IRQ_EN;
 924	else
 925		int_cntr &= ~INT_SDIO_IRQ_EN;
 926
 927	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);
 928	spin_unlock_irqrestore(&host->lock, flags);
 929}
 930
 931static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
 932{
 933	struct mxcmci_host *mxcmci = mmc_priv(host);
 934
 935	/*
 936	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
 937	 * multi-block transfers when connected SDIO peripheral doesn't
 938	 * drive the BUSY line as required by the specs.
 939	 * One way to prevent this is to only allow 1-bit transfers.
 940	 */
 941
 942	if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)
 943		host->caps &= ~MMC_CAP_4_BIT_DATA;
 944	else
 945		host->caps |= MMC_CAP_4_BIT_DATA;
 946}
 947
 948static bool filter(struct dma_chan *chan, void *param)
 949{
 950	struct mxcmci_host *host = param;
 951
 952	if (!imx_dma_is_general_purpose(chan))
 953		return false;
 954
 955	chan->private = &host->dma_data;
 956
 957	return true;
 958}
 959
 960static void mxcmci_watchdog(struct timer_list *t)
 961{
 962	struct mxcmci_host *host = from_timer(host, t, watchdog);
 963	struct mmc_request *req = host->req;
 964	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
 965
 966	if (host->dma_dir == DMA_FROM_DEVICE) {
 967		dmaengine_terminate_all(host->dma);
 968		dev_err(mmc_dev(host->mmc),
 969			"%s: read time out (status = 0x%08x)\n",
 970			__func__, stat);
 971	} else {
 972		dev_err(mmc_dev(host->mmc),
 973			"%s: write time out (status = 0x%08x)\n",
 974			__func__, stat);
 975		mxcmci_softreset(host);
 976	}
 977
 978	/* Mark transfer as erroneus and inform the upper layers */
 979
 980	if (host->data)
 981		host->data->error = -ETIMEDOUT;
 982	host->req = NULL;
 983	host->cmd = NULL;
 984	host->data = NULL;
 985	mmc_request_done(host->mmc, req);
 986}
 987
 988static const struct mmc_host_ops mxcmci_ops = {
 989	.request		= mxcmci_request,
 990	.set_ios		= mxcmci_set_ios,
 991	.get_ro			= mxcmci_get_ro,
 992	.enable_sdio_irq	= mxcmci_enable_sdio_irq,
 993	.init_card		= mxcmci_init_card,
 994};
 995
 996static int mxcmci_probe(struct platform_device *pdev)
 997{
 998	struct mmc_host *mmc;
 999	struct mxcmci_host *host;
1000	struct resource *res;
1001	int ret = 0, irq;
1002	bool dat3_card_detect = false;
1003	dma_cap_mask_t mask;
1004	const struct of_device_id *of_id;
1005	struct imxmmc_platform_data *pdata = pdev->dev.platform_data;
1006
1007	pr_info("i.MX/MPC512x SDHC driver\n");
1008
1009	of_id = of_match_device(mxcmci_of_match, &pdev->dev);
1010
1011	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1012	irq = platform_get_irq(pdev, 0);
1013	if (irq < 0)
 
1014		return irq;
 
1015
1016	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1017	if (!mmc)
1018		return -ENOMEM;
1019
1020	host = mmc_priv(mmc);
1021
1022	host->base = devm_ioremap_resource(&pdev->dev, res);
1023	if (IS_ERR(host->base)) {
1024		ret = PTR_ERR(host->base);
1025		goto out_free;
1026	}
1027
1028	host->phys_base = res->start;
1029
1030	ret = mmc_of_parse(mmc);
1031	if (ret)
1032		goto out_free;
1033	mmc->ops = &mxcmci_ops;
1034
1035	/* For devicetree parsing, the bus width is read from devicetree */
1036	if (pdata)
1037		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1038	else
1039		mmc->caps |= MMC_CAP_SDIO_IRQ;
1040
1041	/* MMC core transfer sizes tunable parameters */
1042	mmc->max_blk_size = 2048;
1043	mmc->max_blk_count = 65535;
1044	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1045	mmc->max_seg_size = mmc->max_req_size;
1046
1047	if (of_id) {
1048		const struct platform_device_id *id_entry = of_id->data;
1049		host->devtype = id_entry->driver_data;
1050	} else {
1051		host->devtype = pdev->id_entry->driver_data;
1052	}
1053
1054	/* adjust max_segs after devtype detection */
1055	if (!is_mpc512x_mmc(host))
1056		mmc->max_segs = 64;
1057
1058	host->mmc = mmc;
1059	host->pdata = pdata;
1060	spin_lock_init(&host->lock);
1061
1062	if (pdata)
1063		dat3_card_detect = pdata->dat3_card_detect;
1064	else if (mmc_card_is_removable(mmc)
1065			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
1066		dat3_card_detect = true;
1067
1068	ret = mmc_regulator_get_supply(mmc);
1069	if (ret)
1070		goto out_free;
1071
1072	if (!mmc->ocr_avail) {
1073		if (pdata && pdata->ocr_avail)
1074			mmc->ocr_avail = pdata->ocr_avail;
1075		else
1076			mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1077	}
1078
1079	if (dat3_card_detect)
1080		host->default_irq_mask =
1081			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
1082	else
1083		host->default_irq_mask = 0;
1084
1085	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1086	if (IS_ERR(host->clk_ipg)) {
1087		ret = PTR_ERR(host->clk_ipg);
1088		goto out_free;
1089	}
1090
1091	host->clk_per = devm_clk_get(&pdev->dev, "per");
1092	if (IS_ERR(host->clk_per)) {
1093		ret = PTR_ERR(host->clk_per);
1094		goto out_free;
1095	}
1096
1097	ret = clk_prepare_enable(host->clk_per);
1098	if (ret)
1099		goto out_free;
1100
1101	ret = clk_prepare_enable(host->clk_ipg);
1102	if (ret)
1103		goto out_clk_per_put;
1104
1105	mxcmci_softreset(host);
1106
1107	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);
1108	if (host->rev_no != 0x400) {
1109		ret = -ENODEV;
1110		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1111			host->rev_no);
1112		goto out_clk_put;
1113	}
1114
1115	mmc->f_min = clk_get_rate(host->clk_per) >> 16;
1116	mmc->f_max = clk_get_rate(host->clk_per) >> 1;
1117
1118	/* recommended in data sheet */
1119	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO);
1120
1121	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR);
1122
1123	if (!host->pdata) {
1124		host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx");
1125	} else {
1126		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1127		if (res) {
1128			host->dmareq = res->start;
1129			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
1130			host->dma_data.priority = DMA_PRIO_LOW;
1131			host->dma_data.dma_request = host->dmareq;
1132			dma_cap_zero(mask);
1133			dma_cap_set(DMA_SLAVE, mask);
1134			host->dma = dma_request_channel(mask, filter, host);
1135		}
1136	}
1137	if (host->dma)
1138		mmc->max_seg_size = dma_get_max_seg_size(
1139				host->dma->device->dev);
1140	else
1141		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
1142
1143	INIT_WORK(&host->datawork, mxcmci_datawork);
1144
1145	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0,
1146			       dev_name(&pdev->dev), host);
1147	if (ret)
1148		goto out_free_dma;
1149
1150	platform_set_drvdata(pdev, mmc);
1151
1152	if (host->pdata && host->pdata->init) {
1153		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
1154				host->mmc);
1155		if (ret)
1156			goto out_free_dma;
1157	}
1158
1159	timer_setup(&host->watchdog, mxcmci_watchdog, 0);
1160
1161	mmc_add_host(mmc);
1162
1163	return 0;
1164
1165out_free_dma:
1166	if (host->dma)
1167		dma_release_channel(host->dma);
1168
1169out_clk_put:
1170	clk_disable_unprepare(host->clk_ipg);
1171out_clk_per_put:
1172	clk_disable_unprepare(host->clk_per);
1173
1174out_free:
1175	mmc_free_host(mmc);
1176
1177	return ret;
1178}
1179
1180static int mxcmci_remove(struct platform_device *pdev)
1181{
1182	struct mmc_host *mmc = platform_get_drvdata(pdev);
1183	struct mxcmci_host *host = mmc_priv(mmc);
1184
1185	mmc_remove_host(mmc);
1186
1187	if (host->pdata && host->pdata->exit)
1188		host->pdata->exit(&pdev->dev, mmc);
1189
1190	if (host->dma)
1191		dma_release_channel(host->dma);
1192
1193	clk_disable_unprepare(host->clk_per);
1194	clk_disable_unprepare(host->clk_ipg);
1195
1196	mmc_free_host(mmc);
1197
1198	return 0;
1199}
1200
1201#ifdef CONFIG_PM_SLEEP
1202static int mxcmci_suspend(struct device *dev)
1203{
1204	struct mmc_host *mmc = dev_get_drvdata(dev);
1205	struct mxcmci_host *host = mmc_priv(mmc);
1206
1207	clk_disable_unprepare(host->clk_per);
1208	clk_disable_unprepare(host->clk_ipg);
1209	return 0;
1210}
1211
1212static int mxcmci_resume(struct device *dev)
1213{
1214	struct mmc_host *mmc = dev_get_drvdata(dev);
1215	struct mxcmci_host *host = mmc_priv(mmc);
1216	int ret;
1217
1218	ret = clk_prepare_enable(host->clk_per);
1219	if (ret)
1220		return ret;
1221
1222	ret = clk_prepare_enable(host->clk_ipg);
1223	if (ret)
1224		clk_disable_unprepare(host->clk_per);
1225
1226	return ret;
1227}
1228#endif
1229
1230static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
1231
1232static struct platform_driver mxcmci_driver = {
1233	.probe		= mxcmci_probe,
1234	.remove		= mxcmci_remove,
1235	.id_table	= mxcmci_devtype,
1236	.driver		= {
1237		.name		= DRIVER_NAME,
1238		.pm	= &mxcmci_pm_ops,
1239		.of_match_table	= mxcmci_of_match,
1240	}
1241};
1242
1243module_platform_driver(mxcmci_driver);
1244
1245MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1246MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1247MODULE_LICENSE("GPL");
1248MODULE_ALIAS("platform:mxc-mmc");