Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
   3 *  JZ4740 SD/MMC controller driver
   4 *
   5 *  This program is free software; you can redistribute  it and/or modify it
   6 *  under  the terms of  the GNU General  Public License as published by the
   7 *  Free Software Foundation;  either version 2 of the  License, or (at your
   8 *  option) any later version.
   9 *
  10 *  You should have received a copy of the  GNU General Public License along
  11 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  12 *  675 Mass Ave, Cambridge, MA 02139, USA.
  13 *
 
  14 */
  15
  16#include <linux/mmc/host.h>
  17#include <linux/mmc/slot-gpio.h>
 
 
 
  18#include <linux/err.h>
 
  19#include <linux/io.h>
  20#include <linux/irq.h>
  21#include <linux/interrupt.h>
 
  22#include <linux/module.h>
 
 
  23#include <linux/platform_device.h>
  24#include <linux/delay.h>
 
  25#include <linux/scatterlist.h>
  26#include <linux/clk.h>
  27
  28#include <linux/bitops.h>
  29#include <linux/gpio.h>
  30#include <asm/mach-jz4740/gpio.h>
  31#include <asm/cacheflush.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/dmaengine.h>
  34
  35#include <asm/mach-jz4740/dma.h>
  36#include <asm/mach-jz4740/jz4740_mmc.h>
  37
  38#define JZ_REG_MMC_STRPCL	0x00
  39#define JZ_REG_MMC_STATUS	0x04
  40#define JZ_REG_MMC_CLKRT	0x08
  41#define JZ_REG_MMC_CMDAT	0x0C
  42#define JZ_REG_MMC_RESTO	0x10
  43#define JZ_REG_MMC_RDTO		0x14
  44#define JZ_REG_MMC_BLKLEN	0x18
  45#define JZ_REG_MMC_NOB		0x1C
  46#define JZ_REG_MMC_SNOB		0x20
  47#define JZ_REG_MMC_IMASK	0x24
  48#define JZ_REG_MMC_IREG		0x28
  49#define JZ_REG_MMC_CMD		0x2C
  50#define JZ_REG_MMC_ARG		0x30
  51#define JZ_REG_MMC_RESP_FIFO	0x34
  52#define JZ_REG_MMC_RXFIFO	0x38
  53#define JZ_REG_MMC_TXFIFO	0x3C
 
 
  54
  55#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
  56#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
  57#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
  58#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
  59#define JZ_MMC_STRPCL_RESET BIT(3)
  60#define JZ_MMC_STRPCL_START_OP BIT(2)
  61#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
  62#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
  63#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
  64
  65
  66#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
  67#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
  68#define JZ_MMC_STATUS_PRG_DONE BIT(13)
  69#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
  70#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
  71#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
  72#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
  73#define JZ_MMC_STATUS_CLK_EN BIT(8)
  74#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
  75#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
  76#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
  77#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
  78#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
  79#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
  80#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
  81#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
  82
  83#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
  84#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
  85
  86
  87#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
  88#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
 
 
  89#define JZ_MMC_CMDAT_DMA_EN BIT(8)
  90#define JZ_MMC_CMDAT_INIT BIT(7)
  91#define JZ_MMC_CMDAT_BUSY BIT(6)
  92#define JZ_MMC_CMDAT_STREAM BIT(5)
  93#define JZ_MMC_CMDAT_WRITE BIT(4)
  94#define JZ_MMC_CMDAT_DATA_EN BIT(3)
  95#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
  96#define JZ_MMC_CMDAT_RSP_R1 1
  97#define JZ_MMC_CMDAT_RSP_R2 2
  98#define JZ_MMC_CMDAT_RSP_R3 3
  99
 100#define JZ_MMC_IRQ_SDIO BIT(7)
 101#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
 102#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
 103#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
 104#define JZ_MMC_IRQ_PRG_DONE BIT(1)
 105#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
 106
 
 
 
 
 
 
 
 
 107
 108#define JZ_MMC_CLK_RATE 24000000
 
 
 
 
 
 
 
 
 
 109
 110enum jz4740_mmc_state {
 111	JZ4740_MMC_STATE_READ_RESPONSE,
 112	JZ4740_MMC_STATE_TRANSFER_DATA,
 113	JZ4740_MMC_STATE_SEND_STOP,
 114	JZ4740_MMC_STATE_DONE,
 115};
 116
 117struct jz4740_mmc_host_next {
 118	int sg_len;
 119	s32 cookie;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120};
 121
 122struct jz4740_mmc_host {
 123	struct mmc_host *mmc;
 124	struct platform_device *pdev;
 125	struct jz4740_mmc_platform_data *pdata;
 126	struct clk *clk;
 127
 
 
 128	int irq;
 129	int card_detect_irq;
 130
 131	void __iomem *base;
 132	struct resource *mem_res;
 133	struct mmc_request *req;
 134	struct mmc_command *cmd;
 135
 
 
 136	unsigned long waiting;
 137
 138	uint32_t cmdat;
 139
 140	uint16_t irq_mask;
 141
 142	spinlock_t lock;
 143
 144	struct timer_list timeout_timer;
 145	struct sg_mapping_iter miter;
 146	enum jz4740_mmc_state state;
 147
 148	/* DMA support */
 149	struct dma_chan *dma_rx;
 150	struct dma_chan *dma_tx;
 151	struct jz4740_mmc_host_next next_data;
 152	bool use_dma;
 153	int sg_len;
 154
 155/* The DMA trigger level is 8 words, that is to say, the DMA read
 156 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
 157 * trigger is when data words in MSC_TXFIFO is < 8.
 158 */
 159#define JZ4740_MMC_FIFO_HALF_SIZE 8
 160};
 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162/*----------------------------------------------------------------------------*/
 163/* DMA infrastructure */
 164
 165static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
 166{
 167	if (!host->use_dma)
 168		return;
 169
 170	dma_release_channel(host->dma_tx);
 171	dma_release_channel(host->dma_rx);
 
 172}
 173
 174static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
 175{
 176	dma_cap_mask_t mask;
 177
 178	dma_cap_zero(mask);
 179	dma_cap_set(DMA_SLAVE, mask);
 
 180
 181	host->dma_tx = dma_request_channel(mask, NULL, host);
 182	if (!host->dma_tx) {
 
 
 
 
 
 183		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
 184		return -ENODEV;
 185	}
 186
 187	host->dma_rx = dma_request_channel(mask, NULL, host);
 188	if (!host->dma_rx) {
 189		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
 190		goto free_master_write;
 
 191	}
 192
 193	/* Initialize DMA pre request cookie */
 194	host->next_data.cookie = 1;
 
 
 
 
 
 195
 196	return 0;
 
 
 197
 198free_master_write:
 199	dma_release_channel(host->dma_tx);
 200	return -ENODEV;
 201}
 202
 203static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
 204{
 205	return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 
 
 206}
 207
 208static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
 209						       struct mmc_data *data)
 210{
 211	return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
 
 
 
 212}
 213
 214static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
 215				 struct mmc_data *data)
 216{
 217	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 218	enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
 219
 220	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
 
 221}
 222
 223/* Prepares DMA data for current/next transfer, returns non-zero on failure */
 
 
 224static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
 225				       struct mmc_data *data,
 226				       struct jz4740_mmc_host_next *next,
 227				       struct dma_chan *chan)
 228{
 229	struct jz4740_mmc_host_next *next_data = &host->next_data;
 230	enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
 231	int sg_len;
 232
 233	if (!next && data->host_cookie &&
 234	    data->host_cookie != host->next_data.cookie) {
 235		dev_warn(mmc_dev(host->mmc),
 236			 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
 237			 __func__,
 238			 data->host_cookie,
 239			 host->next_data.cookie);
 240		data->host_cookie = 0;
 241	}
 242
 243	/* Check if next job is already prepared */
 244	if (next || data->host_cookie != host->next_data.cookie) {
 245		sg_len = dma_map_sg(chan->device->dev,
 246				    data->sg,
 247				    data->sg_len,
 248				    dir);
 249
 250	} else {
 251		sg_len = next_data->sg_len;
 252		next_data->sg_len = 0;
 253	}
 254
 255	if (sg_len <= 0) {
 
 
 
 
 
 256		dev_err(mmc_dev(host->mmc),
 257			"Failed to map scatterlist for DMA operation\n");
 258		return -EINVAL;
 259	}
 260
 261	if (next) {
 262		next->sg_len = sg_len;
 263		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
 264	} else
 265		host->sg_len = sg_len;
 266
 267	return 0;
 268}
 269
 270static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
 271					 struct mmc_data *data)
 272{
 273	int ret;
 274	struct dma_chan *chan;
 275	struct dma_async_tx_descriptor *desc;
 276	struct dma_slave_config conf = {
 277		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 278		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 279		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 280		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 281	};
 
 282
 283	if (data->flags & MMC_DATA_WRITE) {
 284		conf.direction = DMA_MEM_TO_DEV;
 285		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
 286		conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
 287		chan = host->dma_tx;
 288	} else {
 289		conf.direction = DMA_DEV_TO_MEM;
 290		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
 291		conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
 292		chan = host->dma_rx;
 293	}
 294
 295	ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
 296	if (ret)
 297		return ret;
 298
 299	dmaengine_slave_config(chan, &conf);
 300	desc = dmaengine_prep_slave_sg(chan,
 301				       data->sg,
 302				       host->sg_len,
 303				       conf.direction,
 304				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 305	if (!desc) {
 306		dev_err(mmc_dev(host->mmc),
 307			"Failed to allocate DMA %s descriptor",
 308			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
 309		goto dma_unmap;
 310	}
 311
 312	dmaengine_submit(desc);
 313	dma_async_issue_pending(chan);
 314
 315	return 0;
 316
 317dma_unmap:
 318	jz4740_mmc_dma_unmap(host, data);
 
 319	return -ENOMEM;
 320}
 321
 322static void jz4740_mmc_pre_request(struct mmc_host *mmc,
 323				   struct mmc_request *mrq,
 324				   bool is_first_req)
 325{
 326	struct jz4740_mmc_host *host = mmc_priv(mmc);
 327	struct mmc_data *data = mrq->data;
 328	struct jz4740_mmc_host_next *next_data = &host->next_data;
 329
 330	BUG_ON(data->host_cookie);
 331
 332	if (host->use_dma) {
 333		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 334
 335		if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
 336			data->host_cookie = 0;
 337	}
 338}
 339
 340static void jz4740_mmc_post_request(struct mmc_host *mmc,
 341				    struct mmc_request *mrq,
 342				    int err)
 343{
 344	struct jz4740_mmc_host *host = mmc_priv(mmc);
 345	struct mmc_data *data = mrq->data;
 346
 347	if (host->use_dma && data->host_cookie) {
 348		jz4740_mmc_dma_unmap(host, data);
 349		data->host_cookie = 0;
 350	}
 351
 352	if (err) {
 353		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 354
 355		dmaengine_terminate_all(chan);
 356	}
 357}
 358
 359/*----------------------------------------------------------------------------*/
 360
 361static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
 362	unsigned int irq, bool enabled)
 363{
 364	unsigned long flags;
 365
 366	spin_lock_irqsave(&host->lock, flags);
 367	if (enabled)
 368		host->irq_mask &= ~irq;
 369	else
 370		host->irq_mask |= irq;
 371	spin_unlock_irqrestore(&host->lock, flags);
 372
 373	writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
 
 374}
 375
 376static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
 377	bool start_transfer)
 378{
 379	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
 380
 381	if (start_transfer)
 382		val |= JZ_MMC_STRPCL_START_OP;
 383
 384	writew(val, host->base + JZ_REG_MMC_STRPCL);
 385}
 386
 387static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
 388{
 389	uint32_t status;
 390	unsigned int timeout = 1000;
 391
 392	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
 393	do {
 394		status = readl(host->base + JZ_REG_MMC_STATUS);
 395	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
 396}
 397
 398static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
 399{
 400	uint32_t status;
 401	unsigned int timeout = 1000;
 402
 403	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
 404	udelay(10);
 405	do {
 406		status = readl(host->base + JZ_REG_MMC_STATUS);
 407	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
 408}
 409
 410static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
 411{
 412	struct mmc_request *req;
 
 413
 414	req = host->req;
 
 415	host->req = NULL;
 416
 
 
 417	mmc_request_done(host->mmc, req);
 418}
 419
 420static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
 421	unsigned int irq)
 422{
 423	unsigned int timeout = 0x800;
 424	uint16_t status;
 425
 426	do {
 427		status = readw(host->base + JZ_REG_MMC_IREG);
 428	} while (!(status & irq) && --timeout);
 429
 430	if (timeout == 0) {
 431		set_bit(0, &host->waiting);
 432		mod_timer(&host->timeout_timer, jiffies + 5*HZ);
 
 433		jz4740_mmc_set_irq_enabled(host, irq, true);
 434		return true;
 435	}
 436
 437	return false;
 438}
 439
 440static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
 441	struct mmc_data *data)
 442{
 443	int status;
 444
 445	status = readl(host->base + JZ_REG_MMC_STATUS);
 446	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
 447		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
 448			host->req->cmd->error = -ETIMEDOUT;
 449			data->error = -ETIMEDOUT;
 450		} else {
 451			host->req->cmd->error = -EIO;
 452			data->error = -EIO;
 453		}
 454	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
 455		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
 456			host->req->cmd->error = -ETIMEDOUT;
 457			data->error = -ETIMEDOUT;
 458		} else {
 459			host->req->cmd->error = -EIO;
 460			data->error = -EIO;
 461		}
 462	}
 463}
 464
 465static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
 466	struct mmc_data *data)
 467{
 468	struct sg_mapping_iter *miter = &host->miter;
 469	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
 470	uint32_t *buf;
 471	bool timeout;
 472	size_t i, j;
 473
 474	while (sg_miter_next(miter)) {
 475		buf = miter->addr;
 476		i = miter->length / 4;
 477		j = i / 8;
 478		i = i & 0x7;
 479		while (j) {
 480			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 481			if (unlikely(timeout))
 482				goto poll_timeout;
 483
 484			writel(buf[0], fifo_addr);
 485			writel(buf[1], fifo_addr);
 486			writel(buf[2], fifo_addr);
 487			writel(buf[3], fifo_addr);
 488			writel(buf[4], fifo_addr);
 489			writel(buf[5], fifo_addr);
 490			writel(buf[6], fifo_addr);
 491			writel(buf[7], fifo_addr);
 492			buf += 8;
 493			--j;
 494		}
 495		if (unlikely(i)) {
 496			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 497			if (unlikely(timeout))
 498				goto poll_timeout;
 499
 500			while (i) {
 501				writel(*buf, fifo_addr);
 502				++buf;
 503				--i;
 504			}
 505		}
 506		data->bytes_xfered += miter->length;
 507	}
 508	sg_miter_stop(miter);
 509
 510	return false;
 511
 512poll_timeout:
 513	miter->consumed = (void *)buf - miter->addr;
 514	data->bytes_xfered += miter->consumed;
 515	sg_miter_stop(miter);
 516
 517	return true;
 518}
 519
 520static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
 521				struct mmc_data *data)
 522{
 523	struct sg_mapping_iter *miter = &host->miter;
 524	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
 525	uint32_t *buf;
 526	uint32_t d;
 527	uint16_t status;
 528	size_t i, j;
 529	unsigned int timeout;
 530
 531	while (sg_miter_next(miter)) {
 532		buf = miter->addr;
 533		i = miter->length;
 534		j = i / 32;
 535		i = i & 0x1f;
 536		while (j) {
 537			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 538			if (unlikely(timeout))
 539				goto poll_timeout;
 540
 541			buf[0] = readl(fifo_addr);
 542			buf[1] = readl(fifo_addr);
 543			buf[2] = readl(fifo_addr);
 544			buf[3] = readl(fifo_addr);
 545			buf[4] = readl(fifo_addr);
 546			buf[5] = readl(fifo_addr);
 547			buf[6] = readl(fifo_addr);
 548			buf[7] = readl(fifo_addr);
 549
 550			buf += 8;
 551			--j;
 552		}
 553
 554		if (unlikely(i)) {
 555			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 556			if (unlikely(timeout))
 557				goto poll_timeout;
 558
 559			while (i >= 4) {
 560				*buf++ = readl(fifo_addr);
 561				i -= 4;
 562			}
 563			if (unlikely(i > 0)) {
 564				d = readl(fifo_addr);
 565				memcpy(buf, &d, i);
 566			}
 567		}
 568		data->bytes_xfered += miter->length;
 569
 570		/* This can go away once MIPS implements
 571		 * flush_kernel_dcache_page */
 572		flush_dcache_page(miter->page);
 573	}
 574	sg_miter_stop(miter);
 575
 576	/* For whatever reason there is sometime one word more in the fifo then
 577	 * requested */
 578	timeout = 1000;
 579	status = readl(host->base + JZ_REG_MMC_STATUS);
 580	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
 581		d = readl(fifo_addr);
 582		status = readl(host->base + JZ_REG_MMC_STATUS);
 583	}
 584
 585	return false;
 586
 587poll_timeout:
 588	miter->consumed = (void *)buf - miter->addr;
 589	data->bytes_xfered += miter->consumed;
 590	sg_miter_stop(miter);
 591
 592	return true;
 593}
 594
 595static void jz4740_mmc_timeout(unsigned long data)
 596{
 597	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
 598
 599	if (!test_and_clear_bit(0, &host->waiting))
 600		return;
 601
 602	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
 603
 604	host->req->cmd->error = -ETIMEDOUT;
 605	jz4740_mmc_request_done(host);
 606}
 607
 608static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
 609	struct mmc_command *cmd)
 610{
 611	int i;
 612	uint16_t tmp;
 613	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
 614
 615	if (cmd->flags & MMC_RSP_136) {
 616		tmp = readw(fifo_addr);
 617		for (i = 0; i < 4; ++i) {
 618			cmd->resp[i] = tmp << 24;
 619			tmp = readw(fifo_addr);
 620			cmd->resp[i] |= tmp << 8;
 621			tmp = readw(fifo_addr);
 622			cmd->resp[i] |= tmp >> 8;
 623		}
 624	} else {
 625		cmd->resp[0] = readw(fifo_addr) << 24;
 626		cmd->resp[0] |= readw(fifo_addr) << 8;
 627		cmd->resp[0] |= readw(fifo_addr) & 0xff;
 628	}
 629}
 630
 631static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
 632	struct mmc_command *cmd)
 633{
 634	uint32_t cmdat = host->cmdat;
 635
 636	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
 637	jz4740_mmc_clock_disable(host);
 638
 639	host->cmd = cmd;
 640
 641	if (cmd->flags & MMC_RSP_BUSY)
 642		cmdat |= JZ_MMC_CMDAT_BUSY;
 643
 644	switch (mmc_resp_type(cmd)) {
 645	case MMC_RSP_R1B:
 646	case MMC_RSP_R1:
 647		cmdat |= JZ_MMC_CMDAT_RSP_R1;
 648		break;
 649	case MMC_RSP_R2:
 650		cmdat |= JZ_MMC_CMDAT_RSP_R2;
 651		break;
 652	case MMC_RSP_R3:
 653		cmdat |= JZ_MMC_CMDAT_RSP_R3;
 654		break;
 655	default:
 656		break;
 657	}
 658
 659	if (cmd->data) {
 660		cmdat |= JZ_MMC_CMDAT_DATA_EN;
 661		if (cmd->data->flags & MMC_DATA_WRITE)
 662			cmdat |= JZ_MMC_CMDAT_WRITE;
 663		if (host->use_dma)
 664			cmdat |= JZ_MMC_CMDAT_DMA_EN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 665
 666		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
 667		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
 668	}
 669
 670	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
 671	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
 672	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
 673
 674	jz4740_mmc_clock_enable(host, 1);
 675}
 676
 677static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
 678{
 679	struct mmc_command *cmd = host->req->cmd;
 680	struct mmc_data *data = cmd->data;
 681	int direction;
 682
 683	if (data->flags & MMC_DATA_READ)
 684		direction = SG_MITER_TO_SG;
 685	else
 686		direction = SG_MITER_FROM_SG;
 687
 688	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
 689}
 690
 691
 692static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
 693{
 694	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
 695	struct mmc_command *cmd = host->req->cmd;
 696	struct mmc_request *req = host->req;
 697	struct mmc_data *data = cmd->data;
 698	bool timeout = false;
 699
 700	if (cmd->error)
 701		host->state = JZ4740_MMC_STATE_DONE;
 702
 703	switch (host->state) {
 704	case JZ4740_MMC_STATE_READ_RESPONSE:
 705		if (cmd->flags & MMC_RSP_PRESENT)
 706			jz4740_mmc_read_response(host, cmd);
 707
 708		if (!data)
 709			break;
 710
 711		jz_mmc_prepare_data_transfer(host);
 
 712
 713	case JZ4740_MMC_STATE_TRANSFER_DATA:
 714		if (host->use_dma) {
 715			/* Use DMA if enabled.
 716			 * Data transfer direction is defined later by
 717			 * relying on data flags in
 718			 * jz4740_mmc_prepare_dma_data() and
 719			 * jz4740_mmc_start_dma_transfer().
 720			 */
 721			timeout = jz4740_mmc_start_dma_transfer(host, data);
 722			data->bytes_xfered = data->blocks * data->blksz;
 723		} else if (data->flags & MMC_DATA_READ)
 724			/* Use PIO if DMA is not enabled.
 725			 * Data transfer direction was defined before
 726			 * by relying on data flags in
 727			 * jz_mmc_prepare_data_transfer().
 728			 */
 729			timeout = jz4740_mmc_read_data(host, data);
 730		else
 731			timeout = jz4740_mmc_write_data(host, data);
 732
 733		if (unlikely(timeout)) {
 734			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
 735			break;
 736		}
 737
 738		jz4740_mmc_transfer_check_state(host, data);
 739
 740		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 741		if (unlikely(timeout)) {
 742			host->state = JZ4740_MMC_STATE_SEND_STOP;
 743			break;
 744		}
 745		writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG);
 
 746
 747	case JZ4740_MMC_STATE_SEND_STOP:
 748		if (!req->stop)
 749			break;
 750
 751		jz4740_mmc_send_command(host, req->stop);
 752
 753		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
 754			timeout = jz4740_mmc_poll_irq(host,
 755						      JZ_MMC_IRQ_PRG_DONE);
 756			if (timeout) {
 757				host->state = JZ4740_MMC_STATE_DONE;
 758				break;
 759			}
 760		}
 
 
 761	case JZ4740_MMC_STATE_DONE:
 762		break;
 763	}
 764
 765	if (!timeout)
 766		jz4740_mmc_request_done(host);
 767
 768	return IRQ_HANDLED;
 769}
 770
 771static irqreturn_t jz_mmc_irq(int irq, void *devid)
 772{
 773	struct jz4740_mmc_host *host = devid;
 774	struct mmc_command *cmd = host->cmd;
 775	uint16_t irq_reg, status, tmp;
 776
 777	irq_reg = readw(host->base + JZ_REG_MMC_IREG);
 
 778
 779	tmp = irq_reg;
 780	irq_reg &= ~host->irq_mask;
 781
 782	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
 783		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
 784
 785	if (tmp != irq_reg)
 786		writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG);
 787
 788	if (irq_reg & JZ_MMC_IRQ_SDIO) {
 789		writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG);
 790		mmc_signal_sdio_irq(host->mmc);
 791		irq_reg &= ~JZ_MMC_IRQ_SDIO;
 792	}
 793
 794	if (host->req && cmd && irq_reg) {
 795		if (test_and_clear_bit(0, &host->waiting)) {
 796			del_timer(&host->timeout_timer);
 797
 798			status = readl(host->base + JZ_REG_MMC_STATUS);
 799
 800			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
 801					cmd->error = -ETIMEDOUT;
 802			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
 803					cmd->error = -EIO;
 804			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
 805				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
 806					if (cmd->data)
 807							cmd->data->error = -EIO;
 808					cmd->error = -EIO;
 809			}
 810
 811			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
 812			writew(irq_reg, host->base + JZ_REG_MMC_IREG);
 813
 814			return IRQ_WAKE_THREAD;
 815		}
 816	}
 817
 818	return IRQ_HANDLED;
 819}
 820
 821static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
 822{
 823	int div = 0;
 824	int real_rate;
 825
 826	jz4740_mmc_clock_disable(host);
 827	clk_set_rate(host->clk, JZ_MMC_CLK_RATE);
 828
 829	real_rate = clk_get_rate(host->clk);
 830
 831	while (real_rate > rate && div < 7) {
 832		++div;
 833		real_rate >>= 1;
 834	}
 835
 836	writew(div, host->base + JZ_REG_MMC_CLKRT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 837	return real_rate;
 838}
 839
 840static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
 841{
 842	struct jz4740_mmc_host *host = mmc_priv(mmc);
 843
 844	host->req = req;
 845
 846	writew(0xffff, host->base + JZ_REG_MMC_IREG);
 847
 848	writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG);
 849	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
 850
 851	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
 852	set_bit(0, &host->waiting);
 853	mod_timer(&host->timeout_timer, jiffies + 5*HZ);
 
 854	jz4740_mmc_send_command(host, req->cmd);
 855}
 856
 857static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 858{
 859	struct jz4740_mmc_host *host = mmc_priv(mmc);
 
 
 860	if (ios->clock)
 861		jz4740_mmc_set_clock_rate(host, ios->clock);
 862
 863	switch (ios->power_mode) {
 864	case MMC_POWER_UP:
 865		jz4740_mmc_reset(host);
 866		if (gpio_is_valid(host->pdata->gpio_power))
 867			gpio_set_value(host->pdata->gpio_power,
 868					!host->pdata->power_active_low);
 869		host->cmdat |= JZ_MMC_CMDAT_INIT;
 870		clk_prepare_enable(host->clk);
 871		break;
 872	case MMC_POWER_ON:
 
 
 
 
 
 
 
 873		break;
 874	default:
 875		if (gpio_is_valid(host->pdata->gpio_power))
 876			gpio_set_value(host->pdata->gpio_power,
 877					host->pdata->power_active_low);
 
 
 
 878		clk_disable_unprepare(host->clk);
 879		break;
 
 
 880	}
 881
 882	switch (ios->bus_width) {
 883	case MMC_BUS_WIDTH_1:
 884		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 885		break;
 886	case MMC_BUS_WIDTH_4:
 
 887		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 888		break;
 
 
 
 
 889	default:
 890		break;
 891	}
 892}
 893
 894static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 895{
 896	struct jz4740_mmc_host *host = mmc_priv(mmc);
 897	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
 898}
 899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900static const struct mmc_host_ops jz4740_mmc_ops = {
 901	.request	= jz4740_mmc_request,
 902	.pre_req	= jz4740_mmc_pre_request,
 903	.post_req	= jz4740_mmc_post_request,
 904	.set_ios	= jz4740_mmc_set_ios,
 905	.get_ro		= mmc_gpio_get_ro,
 906	.get_cd		= mmc_gpio_get_cd,
 907	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
 
 908};
 909
 910static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
 911	JZ_GPIO_BULK_PIN(MSC_CMD),
 912	JZ_GPIO_BULK_PIN(MSC_CLK),
 913	JZ_GPIO_BULK_PIN(MSC_DATA0),
 914	JZ_GPIO_BULK_PIN(MSC_DATA1),
 915	JZ_GPIO_BULK_PIN(MSC_DATA2),
 916	JZ_GPIO_BULK_PIN(MSC_DATA3),
 
 917};
 918
 919static int jz4740_mmc_request_gpio(struct device *dev, int gpio,
 920	const char *name, bool output, int value)
 921{
 922	int ret;
 923
 924	if (!gpio_is_valid(gpio))
 925		return 0;
 926
 927	ret = gpio_request(gpio, name);
 928	if (ret) {
 929		dev_err(dev, "Failed to request %s gpio: %d\n", name, ret);
 930		return ret;
 931	}
 932
 933	if (output)
 934		gpio_direction_output(gpio, value);
 935	else
 936		gpio_direction_input(gpio);
 937
 938	return 0;
 939}
 940
 941static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
 942	struct platform_device *pdev)
 943{
 944	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
 945	int ret = 0;
 946
 947	if (!pdata)
 948		return 0;
 949
 950	if (!pdata->card_detect_active_low)
 951		mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
 952	if (!pdata->read_only_active_low)
 953		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 954
 955	if (gpio_is_valid(pdata->gpio_card_detect)) {
 956		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
 957		if (ret)
 958			return ret;
 959	}
 960
 961	if (gpio_is_valid(pdata->gpio_read_only)) {
 962		ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
 963		if (ret)
 964			return ret;
 965	}
 966
 967	return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
 968			"MMC read only", true, pdata->power_active_low);
 969}
 970
 971static void jz4740_mmc_free_gpios(struct platform_device *pdev)
 972{
 973	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
 974
 975	if (!pdata)
 976		return;
 977
 978	if (gpio_is_valid(pdata->gpio_power))
 979		gpio_free(pdata->gpio_power);
 980}
 981
 982static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
 983{
 984	size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins);
 985	if (host->pdata && host->pdata->data_1bit)
 986		num_pins -= 3;
 987
 988	return num_pins;
 989}
 990
 991static int jz4740_mmc_probe(struct platform_device* pdev)
 992{
 993	int ret;
 994	struct mmc_host *mmc;
 995	struct jz4740_mmc_host *host;
 996	struct jz4740_mmc_platform_data *pdata;
 997
 998	pdata = pdev->dev.platform_data;
 999
1000	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1001	if (!mmc) {
1002		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1003		return -ENOMEM;
1004	}
1005
1006	host = mmc_priv(mmc);
1007	host->pdata = pdata;
 
 
 
 
 
 
 
 
 
 
1008
1009	host->irq = platform_get_irq(pdev, 0);
1010	if (host->irq < 0) {
1011		ret = host->irq;
1012		dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
1013		goto err_free_host;
1014	}
1015
1016	host->clk = devm_clk_get(&pdev->dev, "mmc");
1017	if (IS_ERR(host->clk)) {
1018		ret = PTR_ERR(host->clk);
1019		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1020		goto err_free_host;
1021	}
1022
1023	host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1024	host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
1025	if (IS_ERR(host->base)) {
1026		ret = PTR_ERR(host->base);
1027		dev_err(&pdev->dev, "Failed to ioremap base memory\n");
1028		goto err_free_host;
1029	}
1030
1031	ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1032	if (ret) {
1033		dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret);
1034		goto err_free_host;
1035	}
1036
1037	ret = jz4740_mmc_request_gpios(mmc, pdev);
1038	if (ret)
1039		goto err_gpio_bulk_free;
 
 
 
 
 
1040
1041	mmc->ops = &jz4740_mmc_ops;
1042	mmc->f_min = JZ_MMC_CLK_RATE / 128;
1043	mmc->f_max = JZ_MMC_CLK_RATE;
1044	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1045	mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA;
1046	mmc->caps |= MMC_CAP_SDIO_IRQ;
 
 
 
 
1047
1048	mmc->max_blk_size = (1 << 10) - 1;
1049	mmc->max_blk_count = (1 << 15) - 1;
1050	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1051
1052	mmc->max_segs = 128;
1053	mmc->max_seg_size = mmc->max_req_size;
1054
1055	host->mmc = mmc;
1056	host->pdev = pdev;
1057	spin_lock_init(&host->lock);
1058	host->irq_mask = 0xffff;
 
 
1059
1060	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1061			dev_name(&pdev->dev), host);
1062	if (ret) {
1063		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1064		goto err_free_gpios;
1065	}
1066
1067	jz4740_mmc_reset(host);
1068	jz4740_mmc_clock_disable(host);
1069	setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
1070			(unsigned long)host);
1071	/* It is not important when it times out, it just needs to timeout. */
1072	set_timer_slack(&host->timeout_timer, HZ);
1073
1074	host->use_dma = true;
1075	if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
1076		host->use_dma = false;
1077
1078	platform_set_drvdata(pdev, host);
1079	ret = mmc_add_host(mmc);
1080
1081	if (ret) {
1082		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1083		goto err_free_irq;
1084	}
1085	dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
1086
1087	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1088		 host->use_dma ? "DMA" : "PIO",
1089		 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
 
1090
1091	return 0;
1092
1093err_free_irq:
1094	free_irq(host->irq, host);
1095err_free_gpios:
1096	jz4740_mmc_free_gpios(pdev);
1097err_gpio_bulk_free:
1098	if (host->use_dma)
1099		jz4740_mmc_release_dma_channels(host);
1100	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
 
1101err_free_host:
1102	mmc_free_host(mmc);
1103
1104	return ret;
1105}
1106
1107static int jz4740_mmc_remove(struct platform_device *pdev)
1108{
1109	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1110
1111	del_timer_sync(&host->timeout_timer);
1112	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1113	jz4740_mmc_reset(host);
1114
1115	mmc_remove_host(host->mmc);
1116
1117	free_irq(host->irq, host);
1118
1119	jz4740_mmc_free_gpios(pdev);
1120	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1121
1122	if (host->use_dma)
1123		jz4740_mmc_release_dma_channels(host);
1124
1125	mmc_free_host(host->mmc);
1126
1127	return 0;
1128}
1129
1130#ifdef CONFIG_PM_SLEEP
1131
1132static int jz4740_mmc_suspend(struct device *dev)
1133{
1134	struct jz4740_mmc_host *host = dev_get_drvdata(dev);
1135
1136	jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1137
1138	return 0;
1139}
1140
1141static int jz4740_mmc_resume(struct device *dev)
1142{
1143	struct jz4740_mmc_host *host = dev_get_drvdata(dev);
1144
1145	jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1146
1147	return 0;
1148}
1149
1150static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1151	jz4740_mmc_resume);
1152#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
1153#else
1154#define JZ4740_MMC_PM_OPS NULL
1155#endif
1156
1157static struct platform_driver jz4740_mmc_driver = {
1158	.probe = jz4740_mmc_probe,
1159	.remove = jz4740_mmc_remove,
1160	.driver = {
1161		.name = "jz4740-mmc",
1162		.pm = JZ4740_MMC_PM_OPS,
 
 
1163	},
1164};
1165
1166module_platform_driver(jz4740_mmc_driver);
1167
1168MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1169MODULE_LICENSE("GPL");
1170MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
   4 *  Copyright (C) 2013, Imagination Technologies
 
 
 
 
 
 
 
 
 
   5 *
   6 *  JZ4740 SD/MMC controller driver
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/clk.h>
  11#include <linux/delay.h>
  12#include <linux/dmaengine.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/irq.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/slot-gpio.h>
  20#include <linux/module.h>
  21#include <linux/of.h>
  22#include <linux/pinctrl/consumer.h>
  23#include <linux/platform_device.h>
  24#include <linux/property.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/scatterlist.h>
 
  27
 
 
 
  28#include <asm/cacheflush.h>
 
 
 
 
 
  29
  30#define JZ_REG_MMC_STRPCL	0x00
  31#define JZ_REG_MMC_STATUS	0x04
  32#define JZ_REG_MMC_CLKRT	0x08
  33#define JZ_REG_MMC_CMDAT	0x0C
  34#define JZ_REG_MMC_RESTO	0x10
  35#define JZ_REG_MMC_RDTO		0x14
  36#define JZ_REG_MMC_BLKLEN	0x18
  37#define JZ_REG_MMC_NOB		0x1C
  38#define JZ_REG_MMC_SNOB		0x20
  39#define JZ_REG_MMC_IMASK	0x24
  40#define JZ_REG_MMC_IREG		0x28
  41#define JZ_REG_MMC_CMD		0x2C
  42#define JZ_REG_MMC_ARG		0x30
  43#define JZ_REG_MMC_RESP_FIFO	0x34
  44#define JZ_REG_MMC_RXFIFO	0x38
  45#define JZ_REG_MMC_TXFIFO	0x3C
  46#define JZ_REG_MMC_LPM		0x40
  47#define JZ_REG_MMC_DMAC		0x44
  48
  49#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
  50#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
  51#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
  52#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
  53#define JZ_MMC_STRPCL_RESET BIT(3)
  54#define JZ_MMC_STRPCL_START_OP BIT(2)
  55#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
  56#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
  57#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
  58
  59
  60#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
  61#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
  62#define JZ_MMC_STATUS_PRG_DONE BIT(13)
  63#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
  64#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
  65#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
  66#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
  67#define JZ_MMC_STATUS_CLK_EN BIT(8)
  68#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
  69#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
  70#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
  71#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
  72#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
  73#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
  74#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
  75#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
  76
  77#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
  78#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
  79
  80
  81#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
  82#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
  83#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
  84#define	JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
  85#define JZ_MMC_CMDAT_DMA_EN BIT(8)
  86#define JZ_MMC_CMDAT_INIT BIT(7)
  87#define JZ_MMC_CMDAT_BUSY BIT(6)
  88#define JZ_MMC_CMDAT_STREAM BIT(5)
  89#define JZ_MMC_CMDAT_WRITE BIT(4)
  90#define JZ_MMC_CMDAT_DATA_EN BIT(3)
  91#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
  92#define JZ_MMC_CMDAT_RSP_R1 1
  93#define JZ_MMC_CMDAT_RSP_R2 2
  94#define JZ_MMC_CMDAT_RSP_R3 3
  95
  96#define JZ_MMC_IRQ_SDIO BIT(7)
  97#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
  98#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
  99#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
 100#define JZ_MMC_IRQ_PRG_DONE BIT(1)
 101#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
 102
 103#define JZ_MMC_DMAC_DMA_SEL BIT(1)
 104#define JZ_MMC_DMAC_DMA_EN BIT(0)
 105
 106#define	JZ_MMC_LPM_DRV_RISING BIT(31)
 107#define	JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
 108#define	JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
 109#define	JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
 110#define	JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
 111
 112#define JZ_MMC_CLK_RATE 24000000
 113#define JZ_MMC_REQ_TIMEOUT_MS 5000
 114
 115enum jz4740_mmc_version {
 116	JZ_MMC_JZ4740,
 117	JZ_MMC_JZ4725B,
 118	JZ_MMC_JZ4760,
 119	JZ_MMC_JZ4780,
 120	JZ_MMC_X1000,
 121};
 122
 123enum jz4740_mmc_state {
 124	JZ4740_MMC_STATE_READ_RESPONSE,
 125	JZ4740_MMC_STATE_TRANSFER_DATA,
 126	JZ4740_MMC_STATE_SEND_STOP,
 127	JZ4740_MMC_STATE_DONE,
 128};
 129
 130/*
 131 * The MMC core allows to prepare a mmc_request while another mmc_request
 132 * is in-flight. This is used via the pre_req/post_req hooks.
 133 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
 134 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
 135 * flags to keep track of the mmc_request mapping state.
 136 *
 137 * COOKIE_UNMAPPED: the request is not mapped.
 138 * COOKIE_PREMAPPED: the request was mapped in pre_req,
 139 * and should be unmapped in post_req.
 140 * COOKIE_MAPPED: the request was mapped in the irq handler,
 141 * and should be unmapped before mmc_request_done is called..
 142 */
 143enum jz4780_cookie {
 144	COOKIE_UNMAPPED = 0,
 145	COOKIE_PREMAPPED,
 146	COOKIE_MAPPED,
 147};
 148
 149struct jz4740_mmc_host {
 150	struct mmc_host *mmc;
 151	struct platform_device *pdev;
 
 152	struct clk *clk;
 153
 154	enum jz4740_mmc_version version;
 155
 156	int irq;
 
 157
 158	void __iomem *base;
 159	struct resource *mem_res;
 160	struct mmc_request *req;
 161	struct mmc_command *cmd;
 162
 163	bool vqmmc_enabled;
 164
 165	unsigned long waiting;
 166
 167	uint32_t cmdat;
 168
 169	uint32_t irq_mask;
 170
 171	spinlock_t lock;
 172
 173	struct timer_list timeout_timer;
 174	struct sg_mapping_iter miter;
 175	enum jz4740_mmc_state state;
 176
 177	/* DMA support */
 178	struct dma_chan *dma_rx;
 179	struct dma_chan *dma_tx;
 
 180	bool use_dma;
 
 181
 182/* The DMA trigger level is 8 words, that is to say, the DMA read
 183 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
 184 * trigger is when data words in MSC_TXFIFO is < 8.
 185 */
 186#define JZ4740_MMC_FIFO_HALF_SIZE 8
 187};
 188
 189static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
 190				      uint32_t val)
 191{
 192	if (host->version >= JZ_MMC_JZ4725B)
 193		return writel(val, host->base + JZ_REG_MMC_IMASK);
 194	else
 195		return writew(val, host->base + JZ_REG_MMC_IMASK);
 196}
 197
 198static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
 199				     uint32_t val)
 200{
 201	if (host->version >= JZ_MMC_JZ4780)
 202		writel(val, host->base + JZ_REG_MMC_IREG);
 203	else
 204		writew(val, host->base + JZ_REG_MMC_IREG);
 205}
 206
 207static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
 208{
 209	if (host->version >= JZ_MMC_JZ4780)
 210		return readl(host->base + JZ_REG_MMC_IREG);
 211	else
 212		return readw(host->base + JZ_REG_MMC_IREG);
 213}
 214
 215/*----------------------------------------------------------------------------*/
 216/* DMA infrastructure */
 217
 218static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
 219{
 220	if (!host->use_dma)
 221		return;
 222
 223	dma_release_channel(host->dma_tx);
 224	if (host->dma_rx)
 225		dma_release_channel(host->dma_rx);
 226}
 227
 228static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
 229{
 230	struct device *dev = mmc_dev(host->mmc);
 231
 232	host->dma_tx = dma_request_chan(dev, "tx-rx");
 233	if (!IS_ERR(host->dma_tx))
 234		return 0;
 235
 236	if (PTR_ERR(host->dma_tx) != -ENODEV) {
 237		dev_err(dev, "Failed to get dma tx-rx channel\n");
 238		return PTR_ERR(host->dma_tx);
 239	}
 240
 241	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
 242	if (IS_ERR(host->dma_tx)) {
 243		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
 244		return PTR_ERR(host->dma_tx);
 245	}
 246
 247	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
 248	if (IS_ERR(host->dma_rx)) {
 249		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
 250		dma_release_channel(host->dma_tx);
 251		return PTR_ERR(host->dma_rx);
 252	}
 253
 254	/*
 255	 * Limit the maximum segment size in any SG entry according to
 256	 * the parameters of the DMA engine device.
 257	 */
 258	if (host->dma_tx) {
 259		struct device *dev = host->dma_tx->device->dev;
 260		unsigned int max_seg_size = dma_get_max_seg_size(dev);
 261
 262		if (max_seg_size < host->mmc->max_seg_size)
 263			host->mmc->max_seg_size = max_seg_size;
 264	}
 265
 266	if (host->dma_rx) {
 267		struct device *dev = host->dma_rx->device->dev;
 268		unsigned int max_seg_size = dma_get_max_seg_size(dev);
 
 269
 270		if (max_seg_size < host->mmc->max_seg_size)
 271			host->mmc->max_seg_size = max_seg_size;
 272	}
 273
 274	return 0;
 275}
 276
 277static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
 278						       struct mmc_data *data)
 279{
 280	if ((data->flags & MMC_DATA_READ) && host->dma_rx)
 281		return host->dma_rx;
 282	else
 283		return host->dma_tx;
 284}
 285
 286static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
 287				 struct mmc_data *data)
 288{
 289	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 290	enum dma_data_direction dir = mmc_get_dma_dir(data);
 291
 292	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
 293	data->host_cookie = COOKIE_UNMAPPED;
 294}
 295
 296/* Prepares DMA data for current or next transfer.
 297 * A request can be in-flight when this is called.
 298 */
 299static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
 300				       struct mmc_data *data,
 301				       int cookie)
 
 302{
 303	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 304	enum dma_data_direction dir = mmc_get_dma_dir(data);
 305	unsigned int sg_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306
 307	if (data->host_cookie == COOKIE_PREMAPPED)
 308		return data->sg_count;
 
 
 309
 310	sg_count = dma_map_sg(chan->device->dev,
 311			data->sg,
 312			data->sg_len,
 313			dir);
 314
 315	if (!sg_count) {
 316		dev_err(mmc_dev(host->mmc),
 317			"Failed to map scatterlist for DMA operation\n");
 318		return -EINVAL;
 319	}
 320
 321	data->sg_count = sg_count;
 322	data->host_cookie = cookie;
 
 
 
 323
 324	return data->sg_count;
 325}
 326
 327static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
 328					 struct mmc_data *data)
 329{
 330	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 
 331	struct dma_async_tx_descriptor *desc;
 332	struct dma_slave_config conf = {
 333		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 334		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 335		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 336		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 337	};
 338	int sg_count;
 339
 340	if (data->flags & MMC_DATA_WRITE) {
 341		conf.direction = DMA_MEM_TO_DEV;
 342		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
 
 
 343	} else {
 344		conf.direction = DMA_DEV_TO_MEM;
 345		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
 
 
 346	}
 347
 348	sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
 349	if (sg_count < 0)
 350		return sg_count;
 351
 352	dmaengine_slave_config(chan, &conf);
 353	desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
 354			conf.direction,
 355			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 
 356	if (!desc) {
 357		dev_err(mmc_dev(host->mmc),
 358			"Failed to allocate DMA %s descriptor",
 359			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
 360		goto dma_unmap;
 361	}
 362
 363	dmaengine_submit(desc);
 364	dma_async_issue_pending(chan);
 365
 366	return 0;
 367
 368dma_unmap:
 369	if (data->host_cookie == COOKIE_MAPPED)
 370		jz4740_mmc_dma_unmap(host, data);
 371	return -ENOMEM;
 372}
 373
 374static void jz4740_mmc_pre_request(struct mmc_host *mmc,
 375				   struct mmc_request *mrq)
 
 376{
 377	struct jz4740_mmc_host *host = mmc_priv(mmc);
 378	struct mmc_data *data = mrq->data;
 
 379
 380	if (!host->use_dma)
 381		return;
 
 
 382
 383	data->host_cookie = COOKIE_UNMAPPED;
 384	if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
 385		data->host_cookie = COOKIE_UNMAPPED;
 386}
 387
 388static void jz4740_mmc_post_request(struct mmc_host *mmc,
 389				    struct mmc_request *mrq,
 390				    int err)
 391{
 392	struct jz4740_mmc_host *host = mmc_priv(mmc);
 393	struct mmc_data *data = mrq->data;
 394
 395	if (data && data->host_cookie != COOKIE_UNMAPPED)
 396		jz4740_mmc_dma_unmap(host, data);
 
 
 397
 398	if (err) {
 399		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 400
 401		dmaengine_terminate_all(chan);
 402	}
 403}
 404
 405/*----------------------------------------------------------------------------*/
 406
 407static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
 408	unsigned int irq, bool enabled)
 409{
 410	unsigned long flags;
 411
 412	spin_lock_irqsave(&host->lock, flags);
 413	if (enabled)
 414		host->irq_mask &= ~irq;
 415	else
 416		host->irq_mask |= irq;
 
 417
 418	jz4740_mmc_write_irq_mask(host, host->irq_mask);
 419	spin_unlock_irqrestore(&host->lock, flags);
 420}
 421
 422static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
 423	bool start_transfer)
 424{
 425	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
 426
 427	if (start_transfer)
 428		val |= JZ_MMC_STRPCL_START_OP;
 429
 430	writew(val, host->base + JZ_REG_MMC_STRPCL);
 431}
 432
 433static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
 434{
 435	uint32_t status;
 436	unsigned int timeout = 1000;
 437
 438	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
 439	do {
 440		status = readl(host->base + JZ_REG_MMC_STATUS);
 441	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
 442}
 443
 444static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
 445{
 446	uint32_t status;
 447	unsigned int timeout = 1000;
 448
 449	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
 450	udelay(10);
 451	do {
 452		status = readl(host->base + JZ_REG_MMC_STATUS);
 453	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
 454}
 455
 456static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
 457{
 458	struct mmc_request *req;
 459	struct mmc_data *data;
 460
 461	req = host->req;
 462	data = req->data;
 463	host->req = NULL;
 464
 465	if (data && data->host_cookie == COOKIE_MAPPED)
 466		jz4740_mmc_dma_unmap(host, data);
 467	mmc_request_done(host->mmc, req);
 468}
 469
 470static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
 471	unsigned int irq)
 472{
 473	unsigned int timeout = 0x800;
 474	uint32_t status;
 475
 476	do {
 477		status = jz4740_mmc_read_irq_reg(host);
 478	} while (!(status & irq) && --timeout);
 479
 480	if (timeout == 0) {
 481		set_bit(0, &host->waiting);
 482		mod_timer(&host->timeout_timer,
 483			  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
 484		jz4740_mmc_set_irq_enabled(host, irq, true);
 485		return true;
 486	}
 487
 488	return false;
 489}
 490
 491static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
 492	struct mmc_data *data)
 493{
 494	int status;
 495
 496	status = readl(host->base + JZ_REG_MMC_STATUS);
 497	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
 498		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
 499			host->req->cmd->error = -ETIMEDOUT;
 500			data->error = -ETIMEDOUT;
 501		} else {
 502			host->req->cmd->error = -EIO;
 503			data->error = -EIO;
 504		}
 505	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
 506		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
 507			host->req->cmd->error = -ETIMEDOUT;
 508			data->error = -ETIMEDOUT;
 509		} else {
 510			host->req->cmd->error = -EIO;
 511			data->error = -EIO;
 512		}
 513	}
 514}
 515
 516static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
 517	struct mmc_data *data)
 518{
 519	struct sg_mapping_iter *miter = &host->miter;
 520	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
 521	uint32_t *buf;
 522	bool timeout;
 523	size_t i, j;
 524
 525	while (sg_miter_next(miter)) {
 526		buf = miter->addr;
 527		i = miter->length / 4;
 528		j = i / 8;
 529		i = i & 0x7;
 530		while (j) {
 531			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 532			if (unlikely(timeout))
 533				goto poll_timeout;
 534
 535			writel(buf[0], fifo_addr);
 536			writel(buf[1], fifo_addr);
 537			writel(buf[2], fifo_addr);
 538			writel(buf[3], fifo_addr);
 539			writel(buf[4], fifo_addr);
 540			writel(buf[5], fifo_addr);
 541			writel(buf[6], fifo_addr);
 542			writel(buf[7], fifo_addr);
 543			buf += 8;
 544			--j;
 545		}
 546		if (unlikely(i)) {
 547			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 548			if (unlikely(timeout))
 549				goto poll_timeout;
 550
 551			while (i) {
 552				writel(*buf, fifo_addr);
 553				++buf;
 554				--i;
 555			}
 556		}
 557		data->bytes_xfered += miter->length;
 558	}
 559	sg_miter_stop(miter);
 560
 561	return false;
 562
 563poll_timeout:
 564	miter->consumed = (void *)buf - miter->addr;
 565	data->bytes_xfered += miter->consumed;
 566	sg_miter_stop(miter);
 567
 568	return true;
 569}
 570
 571static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
 572				struct mmc_data *data)
 573{
 574	struct sg_mapping_iter *miter = &host->miter;
 575	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
 576	uint32_t *buf;
 577	uint32_t d;
 578	uint32_t status;
 579	size_t i, j;
 580	unsigned int timeout;
 581
 582	while (sg_miter_next(miter)) {
 583		buf = miter->addr;
 584		i = miter->length;
 585		j = i / 32;
 586		i = i & 0x1f;
 587		while (j) {
 588			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 589			if (unlikely(timeout))
 590				goto poll_timeout;
 591
 592			buf[0] = readl(fifo_addr);
 593			buf[1] = readl(fifo_addr);
 594			buf[2] = readl(fifo_addr);
 595			buf[3] = readl(fifo_addr);
 596			buf[4] = readl(fifo_addr);
 597			buf[5] = readl(fifo_addr);
 598			buf[6] = readl(fifo_addr);
 599			buf[7] = readl(fifo_addr);
 600
 601			buf += 8;
 602			--j;
 603		}
 604
 605		if (unlikely(i)) {
 606			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 607			if (unlikely(timeout))
 608				goto poll_timeout;
 609
 610			while (i >= 4) {
 611				*buf++ = readl(fifo_addr);
 612				i -= 4;
 613			}
 614			if (unlikely(i > 0)) {
 615				d = readl(fifo_addr);
 616				memcpy(buf, &d, i);
 617			}
 618		}
 619		data->bytes_xfered += miter->length;
 
 
 
 
 620	}
 621	sg_miter_stop(miter);
 622
 623	/* For whatever reason there is sometime one word more in the fifo then
 624	 * requested */
 625	timeout = 1000;
 626	status = readl(host->base + JZ_REG_MMC_STATUS);
 627	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
 628		d = readl(fifo_addr);
 629		status = readl(host->base + JZ_REG_MMC_STATUS);
 630	}
 631
 632	return false;
 633
 634poll_timeout:
 635	miter->consumed = (void *)buf - miter->addr;
 636	data->bytes_xfered += miter->consumed;
 637	sg_miter_stop(miter);
 638
 639	return true;
 640}
 641
 642static void jz4740_mmc_timeout(struct timer_list *t)
 643{
 644	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
 645
 646	if (!test_and_clear_bit(0, &host->waiting))
 647		return;
 648
 649	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
 650
 651	host->req->cmd->error = -ETIMEDOUT;
 652	jz4740_mmc_request_done(host);
 653}
 654
 655static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
 656	struct mmc_command *cmd)
 657{
 658	int i;
 659	uint16_t tmp;
 660	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
 661
 662	if (cmd->flags & MMC_RSP_136) {
 663		tmp = readw(fifo_addr);
 664		for (i = 0; i < 4; ++i) {
 665			cmd->resp[i] = tmp << 24;
 666			tmp = readw(fifo_addr);
 667			cmd->resp[i] |= tmp << 8;
 668			tmp = readw(fifo_addr);
 669			cmd->resp[i] |= tmp >> 8;
 670		}
 671	} else {
 672		cmd->resp[0] = readw(fifo_addr) << 24;
 673		cmd->resp[0] |= readw(fifo_addr) << 8;
 674		cmd->resp[0] |= readw(fifo_addr) & 0xff;
 675	}
 676}
 677
 678static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
 679	struct mmc_command *cmd)
 680{
 681	uint32_t cmdat = host->cmdat;
 682
 683	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
 684	jz4740_mmc_clock_disable(host);
 685
 686	host->cmd = cmd;
 687
 688	if (cmd->flags & MMC_RSP_BUSY)
 689		cmdat |= JZ_MMC_CMDAT_BUSY;
 690
 691	switch (mmc_resp_type(cmd)) {
 692	case MMC_RSP_R1B:
 693	case MMC_RSP_R1:
 694		cmdat |= JZ_MMC_CMDAT_RSP_R1;
 695		break;
 696	case MMC_RSP_R2:
 697		cmdat |= JZ_MMC_CMDAT_RSP_R2;
 698		break;
 699	case MMC_RSP_R3:
 700		cmdat |= JZ_MMC_CMDAT_RSP_R3;
 701		break;
 702	default:
 703		break;
 704	}
 705
 706	if (cmd->data) {
 707		cmdat |= JZ_MMC_CMDAT_DATA_EN;
 708		if (cmd->data->flags & MMC_DATA_WRITE)
 709			cmdat |= JZ_MMC_CMDAT_WRITE;
 710		if (host->use_dma) {
 711			/*
 712			 * The JZ4780's MMC controller has integrated DMA ability
 713			 * in addition to being able to use the external DMA
 714			 * controller. It moves DMA control bits to a separate
 715			 * register. The DMA_SEL bit chooses the external
 716			 * controller over the integrated one. Earlier SoCs
 717			 * can only use the external controller, and have a
 718			 * single DMA enable bit in CMDAT.
 719			 */
 720			if (host->version >= JZ_MMC_JZ4780) {
 721				writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
 722				       host->base + JZ_REG_MMC_DMAC);
 723			} else {
 724				cmdat |= JZ_MMC_CMDAT_DMA_EN;
 725			}
 726		} else if (host->version >= JZ_MMC_JZ4780) {
 727			writel(0, host->base + JZ_REG_MMC_DMAC);
 728		}
 729
 730		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
 731		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
 732	}
 733
 734	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
 735	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
 736	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
 737
 738	jz4740_mmc_clock_enable(host, 1);
 739}
 740
 741static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
 742{
 743	struct mmc_command *cmd = host->req->cmd;
 744	struct mmc_data *data = cmd->data;
 745	int direction;
 746
 747	if (data->flags & MMC_DATA_READ)
 748		direction = SG_MITER_TO_SG;
 749	else
 750		direction = SG_MITER_FROM_SG;
 751
 752	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
 753}
 754
 755
 756static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
 757{
 758	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
 759	struct mmc_command *cmd = host->req->cmd;
 760	struct mmc_request *req = host->req;
 761	struct mmc_data *data = cmd->data;
 762	bool timeout = false;
 763
 764	if (cmd->error)
 765		host->state = JZ4740_MMC_STATE_DONE;
 766
 767	switch (host->state) {
 768	case JZ4740_MMC_STATE_READ_RESPONSE:
 769		if (cmd->flags & MMC_RSP_PRESENT)
 770			jz4740_mmc_read_response(host, cmd);
 771
 772		if (!data)
 773			break;
 774
 775		jz_mmc_prepare_data_transfer(host);
 776		fallthrough;
 777
 778	case JZ4740_MMC_STATE_TRANSFER_DATA:
 779		if (host->use_dma) {
 780			/* Use DMA if enabled.
 781			 * Data transfer direction is defined later by
 782			 * relying on data flags in
 783			 * jz4740_mmc_prepare_dma_data() and
 784			 * jz4740_mmc_start_dma_transfer().
 785			 */
 786			timeout = jz4740_mmc_start_dma_transfer(host, data);
 787			data->bytes_xfered = data->blocks * data->blksz;
 788		} else if (data->flags & MMC_DATA_READ)
 789			/* Use PIO if DMA is not enabled.
 790			 * Data transfer direction was defined before
 791			 * by relying on data flags in
 792			 * jz_mmc_prepare_data_transfer().
 793			 */
 794			timeout = jz4740_mmc_read_data(host, data);
 795		else
 796			timeout = jz4740_mmc_write_data(host, data);
 797
 798		if (unlikely(timeout)) {
 799			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
 800			break;
 801		}
 802
 803		jz4740_mmc_transfer_check_state(host, data);
 804
 805		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 806		if (unlikely(timeout)) {
 807			host->state = JZ4740_MMC_STATE_SEND_STOP;
 808			break;
 809		}
 810		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 811		fallthrough;
 812
 813	case JZ4740_MMC_STATE_SEND_STOP:
 814		if (!req->stop)
 815			break;
 816
 817		jz4740_mmc_send_command(host, req->stop);
 818
 819		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
 820			timeout = jz4740_mmc_poll_irq(host,
 821						      JZ_MMC_IRQ_PRG_DONE);
 822			if (timeout) {
 823				host->state = JZ4740_MMC_STATE_DONE;
 824				break;
 825			}
 826		}
 827		fallthrough;
 828
 829	case JZ4740_MMC_STATE_DONE:
 830		break;
 831	}
 832
 833	if (!timeout)
 834		jz4740_mmc_request_done(host);
 835
 836	return IRQ_HANDLED;
 837}
 838
 839static irqreturn_t jz_mmc_irq(int irq, void *devid)
 840{
 841	struct jz4740_mmc_host *host = devid;
 842	struct mmc_command *cmd = host->cmd;
 843	uint32_t irq_reg, status, tmp;
 844
 845	status = readl(host->base + JZ_REG_MMC_STATUS);
 846	irq_reg = jz4740_mmc_read_irq_reg(host);
 847
 848	tmp = irq_reg;
 849	irq_reg &= ~host->irq_mask;
 850
 851	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
 852		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
 853
 854	if (tmp != irq_reg)
 855		jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
 856
 857	if (irq_reg & JZ_MMC_IRQ_SDIO) {
 858		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
 859		mmc_signal_sdio_irq(host->mmc);
 860		irq_reg &= ~JZ_MMC_IRQ_SDIO;
 861	}
 862
 863	if (host->req && cmd && irq_reg) {
 864		if (test_and_clear_bit(0, &host->waiting)) {
 865			del_timer(&host->timeout_timer);
 866
 
 
 867			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
 868				cmd->error = -ETIMEDOUT;
 869			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
 870				cmd->error = -EIO;
 871			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
 872				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
 873				if (cmd->data)
 874					cmd->data->error = -EIO;
 875				cmd->error = -EIO;
 876			}
 877
 878			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
 879			jz4740_mmc_write_irq_reg(host, irq_reg);
 880
 881			return IRQ_WAKE_THREAD;
 882		}
 883	}
 884
 885	return IRQ_HANDLED;
 886}
 887
 888static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
 889{
 890	int div = 0;
 891	int real_rate;
 892
 893	jz4740_mmc_clock_disable(host);
 894	clk_set_rate(host->clk, host->mmc->f_max);
 895
 896	real_rate = clk_get_rate(host->clk);
 897
 898	while (real_rate > rate && div < 7) {
 899		++div;
 900		real_rate >>= 1;
 901	}
 902
 903	writew(div, host->base + JZ_REG_MMC_CLKRT);
 904
 905	if (real_rate > 25000000) {
 906		if (host->version >= JZ_MMC_JZ4780) {
 907			writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
 908				   JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
 909				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
 910				   host->base + JZ_REG_MMC_LPM);
 911		} else if (host->version >= JZ_MMC_JZ4760) {
 912			writel(JZ_MMC_LPM_DRV_RISING |
 913				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
 914				   host->base + JZ_REG_MMC_LPM);
 915		} else if (host->version >= JZ_MMC_JZ4725B)
 916			writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
 917				   host->base + JZ_REG_MMC_LPM);
 918	}
 919
 920	return real_rate;
 921}
 922
 923static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
 924{
 925	struct jz4740_mmc_host *host = mmc_priv(mmc);
 926
 927	host->req = req;
 928
 929	jz4740_mmc_write_irq_reg(host, ~0);
 
 
 930	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
 931
 932	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
 933	set_bit(0, &host->waiting);
 934	mod_timer(&host->timeout_timer,
 935		  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
 936	jz4740_mmc_send_command(host, req->cmd);
 937}
 938
 939static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 940{
 941	struct jz4740_mmc_host *host = mmc_priv(mmc);
 942	int ret;
 943
 944	if (ios->clock)
 945		jz4740_mmc_set_clock_rate(host, ios->clock);
 946
 947	switch (ios->power_mode) {
 948	case MMC_POWER_UP:
 949		jz4740_mmc_reset(host);
 950		if (!IS_ERR(mmc->supply.vmmc))
 951			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 
 952		host->cmdat |= JZ_MMC_CMDAT_INIT;
 953		clk_prepare_enable(host->clk);
 954		break;
 955	case MMC_POWER_ON:
 956		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
 957			ret = regulator_enable(mmc->supply.vqmmc);
 958			if (ret)
 959				dev_err(&host->pdev->dev, "Failed to set vqmmc power!\n");
 960			else
 961				host->vqmmc_enabled = true;
 962		}
 963		break;
 964	case MMC_POWER_OFF:
 965		if (!IS_ERR(mmc->supply.vmmc))
 966			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 967		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
 968			regulator_disable(mmc->supply.vqmmc);
 969			host->vqmmc_enabled = false;
 970		}
 971		clk_disable_unprepare(host->clk);
 972		break;
 973	default:
 974		break;
 975	}
 976
 977	switch (ios->bus_width) {
 978	case MMC_BUS_WIDTH_1:
 979		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 980		break;
 981	case MMC_BUS_WIDTH_4:
 982		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 983		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 984		break;
 985	case MMC_BUS_WIDTH_8:
 986		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 987		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
 988		break;
 989	default:
 990		break;
 991	}
 992}
 993
 994static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 995{
 996	struct jz4740_mmc_host *host = mmc_priv(mmc);
 997	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
 998}
 999
1000static int jz4740_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1001{
1002	int ret;
1003
1004	/* vqmmc regulator is available */
1005	if (!IS_ERR(mmc->supply.vqmmc)) {
1006		ret = mmc_regulator_set_vqmmc(mmc, ios);
1007		return ret < 0 ? ret : 0;
1008	}
1009
1010	/* no vqmmc regulator, assume fixed regulator at 3/3.3V */
1011	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1012		return 0;
1013
1014	return -EINVAL;
1015}
1016
1017static const struct mmc_host_ops jz4740_mmc_ops = {
1018	.request	= jz4740_mmc_request,
1019	.pre_req	= jz4740_mmc_pre_request,
1020	.post_req	= jz4740_mmc_post_request,
1021	.set_ios	= jz4740_mmc_set_ios,
1022	.get_ro		= mmc_gpio_get_ro,
1023	.get_cd		= mmc_gpio_get_cd,
1024	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
1025	.start_signal_voltage_switch = jz4740_voltage_switch,
1026};
1027
1028static const struct of_device_id jz4740_mmc_of_match[] = {
1029	{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
1030	{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
1031	{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
1032	{ .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
1033	{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
1034	{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
1035	{},
1036};
1037MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038
1039static int jz4740_mmc_probe(struct platform_device* pdev)
1040{
1041	int ret;
1042	struct mmc_host *mmc;
1043	struct jz4740_mmc_host *host;
 
 
 
1044
1045	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1046	if (!mmc) {
1047		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1048		return -ENOMEM;
1049	}
1050
1051	host = mmc_priv(mmc);
1052
1053	/* Default if no match is JZ4740 */
1054	host->version = (enum jz4740_mmc_version)device_get_match_data(&pdev->dev);
1055
1056	ret = mmc_of_parse(mmc);
1057	if (ret) {
1058		dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
1059		goto err_free_host;
1060	}
1061
1062	mmc_regulator_get_supply(mmc);
1063
1064	host->irq = platform_get_irq(pdev, 0);
1065	if (host->irq < 0) {
1066		ret = host->irq;
 
1067		goto err_free_host;
1068	}
1069
1070	host->clk = devm_clk_get(&pdev->dev, "mmc");
1071	if (IS_ERR(host->clk)) {
1072		ret = PTR_ERR(host->clk);
1073		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1074		goto err_free_host;
1075	}
1076
1077	host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &host->mem_res);
 
1078	if (IS_ERR(host->base)) {
1079		ret = PTR_ERR(host->base);
 
1080		goto err_free_host;
1081	}
1082
1083	mmc->ops = &jz4740_mmc_ops;
1084	if (!mmc->f_max)
1085		mmc->f_max = JZ_MMC_CLK_RATE;
 
 
1086
1087	/*
1088	 * There seems to be a problem with this driver on the JZ4760 and
1089	 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
1090	 * the communication fails with many SD cards.
1091	 * Until this bug is sorted out, limit the maximum rate to 24 MHz.
1092	 */
1093	if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
1094		mmc->f_max = JZ_MMC_CLK_RATE;
1095
1096	mmc->f_min = mmc->f_max / 128;
 
 
1097	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1098
1099	/*
1100	 * We use a fixed timeout of 5s, hence inform the core about it. A
1101	 * future improvement should instead respect the cmd->busy_timeout.
1102	 */
1103	mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1104
1105	mmc->max_blk_size = (1 << 10) - 1;
1106	mmc->max_blk_count = (1 << 15) - 1;
1107	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1108
1109	mmc->max_segs = 128;
1110	mmc->max_seg_size = mmc->max_req_size;
1111
1112	host->mmc = mmc;
1113	host->pdev = pdev;
1114	spin_lock_init(&host->lock);
1115	host->irq_mask = ~0;
1116
1117	jz4740_mmc_reset(host);
1118
1119	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1120			dev_name(&pdev->dev), host);
1121	if (ret) {
1122		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1123		goto err_free_host;
1124	}
1125
 
1126	jz4740_mmc_clock_disable(host);
1127	timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
1128
1129	ret = jz4740_mmc_acquire_dma_channels(host);
1130	if (ret == -EPROBE_DEFER)
1131		goto err_free_irq;
1132	host->use_dma = !ret;
 
 
1133
1134	platform_set_drvdata(pdev, host);
1135	ret = mmc_add_host(mmc);
1136
1137	if (ret) {
1138		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1139		goto err_release_dma;
1140	}
1141	dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
1142
1143	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1144		 host->use_dma ? "DMA" : "PIO",
1145		 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1146		 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
1147
1148	return 0;
1149
1150err_release_dma:
 
 
 
 
1151	if (host->use_dma)
1152		jz4740_mmc_release_dma_channels(host);
1153err_free_irq:
1154	free_irq(host->irq, host);
1155err_free_host:
1156	mmc_free_host(mmc);
1157
1158	return ret;
1159}
1160
1161static void jz4740_mmc_remove(struct platform_device *pdev)
1162{
1163	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1164
1165	del_timer_sync(&host->timeout_timer);
1166	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1167	jz4740_mmc_reset(host);
1168
1169	mmc_remove_host(host->mmc);
1170
1171	free_irq(host->irq, host);
1172
 
 
 
1173	if (host->use_dma)
1174		jz4740_mmc_release_dma_channels(host);
1175
1176	mmc_free_host(host->mmc);
 
 
1177}
1178
 
 
1179static int jz4740_mmc_suspend(struct device *dev)
1180{
1181	return pinctrl_pm_select_sleep_state(dev);
 
 
 
 
1182}
1183
1184static int jz4740_mmc_resume(struct device *dev)
1185{
1186	return pinctrl_select_default_state(dev);
 
 
 
 
1187}
1188
1189static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1190				jz4740_mmc_resume);
 
 
 
 
1191
1192static struct platform_driver jz4740_mmc_driver = {
1193	.probe = jz4740_mmc_probe,
1194	.remove_new = jz4740_mmc_remove,
1195	.driver = {
1196		.name = "jz4740-mmc",
1197		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1198		.of_match_table = jz4740_mmc_of_match,
1199		.pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
1200	},
1201};
1202
1203module_platform_driver(jz4740_mmc_driver);
1204
1205MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1206MODULE_LICENSE("GPL");
1207MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");