Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
   4 *  Copyright (C) 2013, Imagination Technologies
   5 *
   6 *  JZ4740 SD/MMC controller driver
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/clk.h>
  11#include <linux/delay.h>
  12#include <linux/dmaengine.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/irq.h>
  18#include <linux/mmc/host.h>
  19#include <linux/mmc/slot-gpio.h>
  20#include <linux/module.h>
  21#include <linux/of_device.h>
  22#include <linux/pinctrl/consumer.h>
  23#include <linux/platform_device.h>
 
  24#include <linux/scatterlist.h>
 
  25
 
 
 
  26#include <asm/cacheflush.h>
 
 
 
 
 
  27
  28#define JZ_REG_MMC_STRPCL	0x00
  29#define JZ_REG_MMC_STATUS	0x04
  30#define JZ_REG_MMC_CLKRT	0x08
  31#define JZ_REG_MMC_CMDAT	0x0C
  32#define JZ_REG_MMC_RESTO	0x10
  33#define JZ_REG_MMC_RDTO		0x14
  34#define JZ_REG_MMC_BLKLEN	0x18
  35#define JZ_REG_MMC_NOB		0x1C
  36#define JZ_REG_MMC_SNOB		0x20
  37#define JZ_REG_MMC_IMASK	0x24
  38#define JZ_REG_MMC_IREG		0x28
  39#define JZ_REG_MMC_CMD		0x2C
  40#define JZ_REG_MMC_ARG		0x30
  41#define JZ_REG_MMC_RESP_FIFO	0x34
  42#define JZ_REG_MMC_RXFIFO	0x38
  43#define JZ_REG_MMC_TXFIFO	0x3C
  44#define JZ_REG_MMC_LPM		0x40
  45#define JZ_REG_MMC_DMAC		0x44
  46
  47#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
  48#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
  49#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
  50#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
  51#define JZ_MMC_STRPCL_RESET BIT(3)
  52#define JZ_MMC_STRPCL_START_OP BIT(2)
  53#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
  54#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
  55#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
  56
  57
  58#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
  59#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
  60#define JZ_MMC_STATUS_PRG_DONE BIT(13)
  61#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
  62#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
  63#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
  64#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
  65#define JZ_MMC_STATUS_CLK_EN BIT(8)
  66#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
  67#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
  68#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
  69#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
  70#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
  71#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
  72#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
  73#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
  74
  75#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
  76#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
  77
  78
  79#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
  80#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
  81#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
  82#define	JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
  83#define JZ_MMC_CMDAT_DMA_EN BIT(8)
  84#define JZ_MMC_CMDAT_INIT BIT(7)
  85#define JZ_MMC_CMDAT_BUSY BIT(6)
  86#define JZ_MMC_CMDAT_STREAM BIT(5)
  87#define JZ_MMC_CMDAT_WRITE BIT(4)
  88#define JZ_MMC_CMDAT_DATA_EN BIT(3)
  89#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
  90#define JZ_MMC_CMDAT_RSP_R1 1
  91#define JZ_MMC_CMDAT_RSP_R2 2
  92#define JZ_MMC_CMDAT_RSP_R3 3
  93
  94#define JZ_MMC_IRQ_SDIO BIT(7)
  95#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
  96#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
  97#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
  98#define JZ_MMC_IRQ_PRG_DONE BIT(1)
  99#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
 100
 101#define JZ_MMC_DMAC_DMA_SEL BIT(1)
 102#define JZ_MMC_DMAC_DMA_EN BIT(0)
 103
 104#define	JZ_MMC_LPM_DRV_RISING BIT(31)
 105#define	JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
 106#define	JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
 107#define	JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
 108#define	JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
 109
 110#define JZ_MMC_CLK_RATE 24000000
 111#define JZ_MMC_REQ_TIMEOUT_MS 5000
 112
 113enum jz4740_mmc_version {
 114	JZ_MMC_JZ4740,
 115	JZ_MMC_JZ4725B,
 116	JZ_MMC_JZ4760,
 117	JZ_MMC_JZ4780,
 118	JZ_MMC_X1000,
 119};
 120
 121enum jz4740_mmc_state {
 122	JZ4740_MMC_STATE_READ_RESPONSE,
 123	JZ4740_MMC_STATE_TRANSFER_DATA,
 124	JZ4740_MMC_STATE_SEND_STOP,
 125	JZ4740_MMC_STATE_DONE,
 126};
 127
 128/*
 129 * The MMC core allows to prepare a mmc_request while another mmc_request
 130 * is in-flight. This is used via the pre_req/post_req hooks.
 131 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request.
 132 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie
 133 * flags to keep track of the mmc_request mapping state.
 134 *
 135 * COOKIE_UNMAPPED: the request is not mapped.
 136 * COOKIE_PREMAPPED: the request was mapped in pre_req,
 137 * and should be unmapped in post_req.
 138 * COOKIE_MAPPED: the request was mapped in the irq handler,
 139 * and should be unmapped before mmc_request_done is called..
 140 */
 141enum jz4780_cookie {
 142	COOKIE_UNMAPPED = 0,
 143	COOKIE_PREMAPPED,
 144	COOKIE_MAPPED,
 145};
 146
 147struct jz4740_mmc_host {
 148	struct mmc_host *mmc;
 149	struct platform_device *pdev;
 
 150	struct clk *clk;
 151
 152	enum jz4740_mmc_version version;
 153
 154	int irq;
 
 155
 156	void __iomem *base;
 157	struct resource *mem_res;
 158	struct mmc_request *req;
 159	struct mmc_command *cmd;
 160
 161	unsigned long waiting;
 162
 163	uint32_t cmdat;
 164
 165	uint32_t irq_mask;
 166
 167	spinlock_t lock;
 168
 169	struct timer_list timeout_timer;
 170	struct sg_mapping_iter miter;
 171	enum jz4740_mmc_state state;
 172
 173	/* DMA support */
 174	struct dma_chan *dma_rx;
 175	struct dma_chan *dma_tx;
 
 176	bool use_dma;
 
 177
 178/* The DMA trigger level is 8 words, that is to say, the DMA read
 179 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
 180 * trigger is when data words in MSC_TXFIFO is < 8.
 181 */
 182#define JZ4740_MMC_FIFO_HALF_SIZE 8
 183};
 184
 185static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
 186				      uint32_t val)
 187{
 188	if (host->version >= JZ_MMC_JZ4725B)
 189		return writel(val, host->base + JZ_REG_MMC_IMASK);
 190	else
 191		return writew(val, host->base + JZ_REG_MMC_IMASK);
 192}
 193
 194static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
 195				     uint32_t val)
 196{
 197	if (host->version >= JZ_MMC_JZ4780)
 198		writel(val, host->base + JZ_REG_MMC_IREG);
 199	else
 200		writew(val, host->base + JZ_REG_MMC_IREG);
 201}
 202
 203static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
 204{
 205	if (host->version >= JZ_MMC_JZ4780)
 206		return readl(host->base + JZ_REG_MMC_IREG);
 207	else
 208		return readw(host->base + JZ_REG_MMC_IREG);
 209}
 210
 211/*----------------------------------------------------------------------------*/
 212/* DMA infrastructure */
 213
 214static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
 215{
 216	if (!host->use_dma)
 217		return;
 218
 219	dma_release_channel(host->dma_tx);
 220	if (host->dma_rx)
 221		dma_release_channel(host->dma_rx);
 222}
 223
 224static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
 225{
 226	struct device *dev = mmc_dev(host->mmc);
 227
 228	host->dma_tx = dma_request_chan(dev, "tx-rx");
 229	if (!IS_ERR(host->dma_tx))
 230		return 0;
 231
 232	if (PTR_ERR(host->dma_tx) != -ENODEV) {
 233		dev_err(dev, "Failed to get dma tx-rx channel\n");
 234		return PTR_ERR(host->dma_tx);
 235	}
 236
 237	host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
 238	if (IS_ERR(host->dma_tx)) {
 239		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
 240		return PTR_ERR(host->dma_tx);
 241	}
 242
 243	host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
 244	if (IS_ERR(host->dma_rx)) {
 245		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
 246		dma_release_channel(host->dma_tx);
 247		return PTR_ERR(host->dma_rx);
 248	}
 249
 250	/*
 251	 * Limit the maximum segment size in any SG entry according to
 252	 * the parameters of the DMA engine device.
 253	 */
 254	if (host->dma_tx) {
 255		struct device *dev = host->dma_tx->device->dev;
 256		unsigned int max_seg_size = dma_get_max_seg_size(dev);
 257
 258		if (max_seg_size < host->mmc->max_seg_size)
 259			host->mmc->max_seg_size = max_seg_size;
 260	}
 261
 262	if (host->dma_rx) {
 263		struct device *dev = host->dma_rx->device->dev;
 264		unsigned int max_seg_size = dma_get_max_seg_size(dev);
 265
 266		if (max_seg_size < host->mmc->max_seg_size)
 267			host->mmc->max_seg_size = max_seg_size;
 268	}
 
 269
 270	return 0;
 
 
 271}
 272
 273static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
 274						       struct mmc_data *data)
 275{
 276	if ((data->flags & MMC_DATA_READ) && host->dma_rx)
 277		return host->dma_rx;
 278	else
 279		return host->dma_tx;
 280}
 281
 282static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
 283				 struct mmc_data *data)
 284{
 285	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 286	enum dma_data_direction dir = mmc_get_dma_dir(data);
 287
 288	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
 289	data->host_cookie = COOKIE_UNMAPPED;
 290}
 291
 292/* Prepares DMA data for current or next transfer.
 293 * A request can be in-flight when this is called.
 294 */
 295static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
 296				       struct mmc_data *data,
 297				       int cookie)
 
 298{
 299	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 300	enum dma_data_direction dir = mmc_get_dma_dir(data);
 301	unsigned int sg_count;
 302
 303	if (data->host_cookie == COOKIE_PREMAPPED)
 304		return data->sg_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305
 306	sg_count = dma_map_sg(chan->device->dev,
 307			data->sg,
 308			data->sg_len,
 309			dir);
 310
 311	if (!sg_count) {
 312		dev_err(mmc_dev(host->mmc),
 313			"Failed to map scatterlist for DMA operation\n");
 314		return -EINVAL;
 315	}
 316
 317	data->sg_count = sg_count;
 318	data->host_cookie = cookie;
 
 
 
 319
 320	return data->sg_count;
 321}
 322
 323static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
 324					 struct mmc_data *data)
 325{
 326	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 
 327	struct dma_async_tx_descriptor *desc;
 328	struct dma_slave_config conf = {
 329		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 330		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 331		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 332		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 333	};
 334	int sg_count;
 335
 336	if (data->flags & MMC_DATA_WRITE) {
 337		conf.direction = DMA_MEM_TO_DEV;
 338		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
 
 
 339	} else {
 340		conf.direction = DMA_DEV_TO_MEM;
 341		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
 
 
 342	}
 343
 344	sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
 345	if (sg_count < 0)
 346		return sg_count;
 347
 348	dmaengine_slave_config(chan, &conf);
 349	desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
 350			conf.direction,
 351			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
 
 352	if (!desc) {
 353		dev_err(mmc_dev(host->mmc),
 354			"Failed to allocate DMA %s descriptor",
 355			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
 356		goto dma_unmap;
 357	}
 358
 359	dmaengine_submit(desc);
 360	dma_async_issue_pending(chan);
 361
 362	return 0;
 363
 364dma_unmap:
 365	if (data->host_cookie == COOKIE_MAPPED)
 366		jz4740_mmc_dma_unmap(host, data);
 367	return -ENOMEM;
 368}
 369
 370static void jz4740_mmc_pre_request(struct mmc_host *mmc,
 371				   struct mmc_request *mrq)
 372{
 373	struct jz4740_mmc_host *host = mmc_priv(mmc);
 374	struct mmc_data *data = mrq->data;
 
 375
 376	if (!host->use_dma)
 377		return;
 
 
 378
 379	data->host_cookie = COOKIE_UNMAPPED;
 380	if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
 381		data->host_cookie = COOKIE_UNMAPPED;
 382}
 383
 384static void jz4740_mmc_post_request(struct mmc_host *mmc,
 385				    struct mmc_request *mrq,
 386				    int err)
 387{
 388	struct jz4740_mmc_host *host = mmc_priv(mmc);
 389	struct mmc_data *data = mrq->data;
 390
 391	if (data && data->host_cookie != COOKIE_UNMAPPED)
 392		jz4740_mmc_dma_unmap(host, data);
 
 
 393
 394	if (err) {
 395		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 396
 397		dmaengine_terminate_all(chan);
 398	}
 399}
 400
 401/*----------------------------------------------------------------------------*/
 402
 403static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
 404	unsigned int irq, bool enabled)
 405{
 406	unsigned long flags;
 407
 408	spin_lock_irqsave(&host->lock, flags);
 409	if (enabled)
 410		host->irq_mask &= ~irq;
 411	else
 412		host->irq_mask |= irq;
 413
 414	jz4740_mmc_write_irq_mask(host, host->irq_mask);
 415	spin_unlock_irqrestore(&host->lock, flags);
 
 
 416}
 417
 418static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
 419	bool start_transfer)
 420{
 421	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
 422
 423	if (start_transfer)
 424		val |= JZ_MMC_STRPCL_START_OP;
 425
 426	writew(val, host->base + JZ_REG_MMC_STRPCL);
 427}
 428
 429static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
 430{
 431	uint32_t status;
 432	unsigned int timeout = 1000;
 433
 434	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
 435	do {
 436		status = readl(host->base + JZ_REG_MMC_STATUS);
 437	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
 438}
 439
 440static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
 441{
 442	uint32_t status;
 443	unsigned int timeout = 1000;
 444
 445	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
 446	udelay(10);
 447	do {
 448		status = readl(host->base + JZ_REG_MMC_STATUS);
 449	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
 450}
 451
 452static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
 453{
 454	struct mmc_request *req;
 455	struct mmc_data *data;
 456
 457	req = host->req;
 458	data = req->data;
 459	host->req = NULL;
 460
 461	if (data && data->host_cookie == COOKIE_MAPPED)
 462		jz4740_mmc_dma_unmap(host, data);
 463	mmc_request_done(host->mmc, req);
 464}
 465
 466static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
 467	unsigned int irq)
 468{
 469	unsigned int timeout = 0x800;
 470	uint32_t status;
 471
 472	do {
 473		status = jz4740_mmc_read_irq_reg(host);
 474	} while (!(status & irq) && --timeout);
 475
 476	if (timeout == 0) {
 477		set_bit(0, &host->waiting);
 478		mod_timer(&host->timeout_timer,
 479			  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
 480		jz4740_mmc_set_irq_enabled(host, irq, true);
 481		return true;
 482	}
 483
 484	return false;
 485}
 486
 487static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
 488	struct mmc_data *data)
 489{
 490	int status;
 491
 492	status = readl(host->base + JZ_REG_MMC_STATUS);
 493	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
 494		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
 495			host->req->cmd->error = -ETIMEDOUT;
 496			data->error = -ETIMEDOUT;
 497		} else {
 498			host->req->cmd->error = -EIO;
 499			data->error = -EIO;
 500		}
 501	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
 502		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
 503			host->req->cmd->error = -ETIMEDOUT;
 504			data->error = -ETIMEDOUT;
 505		} else {
 506			host->req->cmd->error = -EIO;
 507			data->error = -EIO;
 508		}
 509	}
 510}
 511
 512static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
 513	struct mmc_data *data)
 514{
 515	struct sg_mapping_iter *miter = &host->miter;
 516	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
 517	uint32_t *buf;
 518	bool timeout;
 519	size_t i, j;
 520
 521	while (sg_miter_next(miter)) {
 522		buf = miter->addr;
 523		i = miter->length / 4;
 524		j = i / 8;
 525		i = i & 0x7;
 526		while (j) {
 527			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 528			if (unlikely(timeout))
 529				goto poll_timeout;
 530
 531			writel(buf[0], fifo_addr);
 532			writel(buf[1], fifo_addr);
 533			writel(buf[2], fifo_addr);
 534			writel(buf[3], fifo_addr);
 535			writel(buf[4], fifo_addr);
 536			writel(buf[5], fifo_addr);
 537			writel(buf[6], fifo_addr);
 538			writel(buf[7], fifo_addr);
 539			buf += 8;
 540			--j;
 541		}
 542		if (unlikely(i)) {
 543			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 544			if (unlikely(timeout))
 545				goto poll_timeout;
 546
 547			while (i) {
 548				writel(*buf, fifo_addr);
 549				++buf;
 550				--i;
 551			}
 552		}
 553		data->bytes_xfered += miter->length;
 554	}
 555	sg_miter_stop(miter);
 556
 557	return false;
 558
 559poll_timeout:
 560	miter->consumed = (void *)buf - miter->addr;
 561	data->bytes_xfered += miter->consumed;
 562	sg_miter_stop(miter);
 563
 564	return true;
 565}
 566
 567static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
 568				struct mmc_data *data)
 569{
 570	struct sg_mapping_iter *miter = &host->miter;
 571	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
 572	uint32_t *buf;
 573	uint32_t d;
 574	uint32_t status;
 575	size_t i, j;
 576	unsigned int timeout;
 577
 578	while (sg_miter_next(miter)) {
 579		buf = miter->addr;
 580		i = miter->length;
 581		j = i / 32;
 582		i = i & 0x1f;
 583		while (j) {
 584			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 585			if (unlikely(timeout))
 586				goto poll_timeout;
 587
 588			buf[0] = readl(fifo_addr);
 589			buf[1] = readl(fifo_addr);
 590			buf[2] = readl(fifo_addr);
 591			buf[3] = readl(fifo_addr);
 592			buf[4] = readl(fifo_addr);
 593			buf[5] = readl(fifo_addr);
 594			buf[6] = readl(fifo_addr);
 595			buf[7] = readl(fifo_addr);
 596
 597			buf += 8;
 598			--j;
 599		}
 600
 601		if (unlikely(i)) {
 602			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 603			if (unlikely(timeout))
 604				goto poll_timeout;
 605
 606			while (i >= 4) {
 607				*buf++ = readl(fifo_addr);
 608				i -= 4;
 609			}
 610			if (unlikely(i > 0)) {
 611				d = readl(fifo_addr);
 612				memcpy(buf, &d, i);
 613			}
 614		}
 615		data->bytes_xfered += miter->length;
 
 
 
 
 616	}
 617	sg_miter_stop(miter);
 618
 619	/* For whatever reason there is sometime one word more in the fifo then
 620	 * requested */
 621	timeout = 1000;
 622	status = readl(host->base + JZ_REG_MMC_STATUS);
 623	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
 624		d = readl(fifo_addr);
 625		status = readl(host->base + JZ_REG_MMC_STATUS);
 626	}
 627
 628	return false;
 629
 630poll_timeout:
 631	miter->consumed = (void *)buf - miter->addr;
 632	data->bytes_xfered += miter->consumed;
 633	sg_miter_stop(miter);
 634
 635	return true;
 636}
 637
 638static void jz4740_mmc_timeout(struct timer_list *t)
 639{
 640	struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
 641
 642	if (!test_and_clear_bit(0, &host->waiting))
 643		return;
 644
 645	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
 646
 647	host->req->cmd->error = -ETIMEDOUT;
 648	jz4740_mmc_request_done(host);
 649}
 650
 651static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
 652	struct mmc_command *cmd)
 653{
 654	int i;
 655	uint16_t tmp;
 656	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
 657
 658	if (cmd->flags & MMC_RSP_136) {
 659		tmp = readw(fifo_addr);
 660		for (i = 0; i < 4; ++i) {
 661			cmd->resp[i] = tmp << 24;
 662			tmp = readw(fifo_addr);
 663			cmd->resp[i] |= tmp << 8;
 664			tmp = readw(fifo_addr);
 665			cmd->resp[i] |= tmp >> 8;
 666		}
 667	} else {
 668		cmd->resp[0] = readw(fifo_addr) << 24;
 669		cmd->resp[0] |= readw(fifo_addr) << 8;
 670		cmd->resp[0] |= readw(fifo_addr) & 0xff;
 671	}
 672}
 673
 674static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
 675	struct mmc_command *cmd)
 676{
 677	uint32_t cmdat = host->cmdat;
 678
 679	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
 680	jz4740_mmc_clock_disable(host);
 681
 682	host->cmd = cmd;
 683
 684	if (cmd->flags & MMC_RSP_BUSY)
 685		cmdat |= JZ_MMC_CMDAT_BUSY;
 686
 687	switch (mmc_resp_type(cmd)) {
 688	case MMC_RSP_R1B:
 689	case MMC_RSP_R1:
 690		cmdat |= JZ_MMC_CMDAT_RSP_R1;
 691		break;
 692	case MMC_RSP_R2:
 693		cmdat |= JZ_MMC_CMDAT_RSP_R2;
 694		break;
 695	case MMC_RSP_R3:
 696		cmdat |= JZ_MMC_CMDAT_RSP_R3;
 697		break;
 698	default:
 699		break;
 700	}
 701
 702	if (cmd->data) {
 703		cmdat |= JZ_MMC_CMDAT_DATA_EN;
 704		if (cmd->data->flags & MMC_DATA_WRITE)
 705			cmdat |= JZ_MMC_CMDAT_WRITE;
 706		if (host->use_dma) {
 707			/*
 708			 * The JZ4780's MMC controller has integrated DMA ability
 709			 * in addition to being able to use the external DMA
 710			 * controller. It moves DMA control bits to a separate
 711			 * register. The DMA_SEL bit chooses the external
 712			 * controller over the integrated one. Earlier SoCs
 713			 * can only use the external controller, and have a
 714			 * single DMA enable bit in CMDAT.
 715			 */
 716			if (host->version >= JZ_MMC_JZ4780) {
 717				writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
 718				       host->base + JZ_REG_MMC_DMAC);
 719			} else {
 720				cmdat |= JZ_MMC_CMDAT_DMA_EN;
 721			}
 722		} else if (host->version >= JZ_MMC_JZ4780) {
 723			writel(0, host->base + JZ_REG_MMC_DMAC);
 724		}
 725
 726		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
 727		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
 728	}
 729
 730	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
 731	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
 732	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
 733
 734	jz4740_mmc_clock_enable(host, 1);
 735}
 736
 737static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
 738{
 739	struct mmc_command *cmd = host->req->cmd;
 740	struct mmc_data *data = cmd->data;
 741	int direction;
 742
 743	if (data->flags & MMC_DATA_READ)
 744		direction = SG_MITER_TO_SG;
 745	else
 746		direction = SG_MITER_FROM_SG;
 747
 748	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
 749}
 750
 751
 752static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
 753{
 754	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
 755	struct mmc_command *cmd = host->req->cmd;
 756	struct mmc_request *req = host->req;
 757	struct mmc_data *data = cmd->data;
 758	bool timeout = false;
 759
 760	if (cmd->error)
 761		host->state = JZ4740_MMC_STATE_DONE;
 762
 763	switch (host->state) {
 764	case JZ4740_MMC_STATE_READ_RESPONSE:
 765		if (cmd->flags & MMC_RSP_PRESENT)
 766			jz4740_mmc_read_response(host, cmd);
 767
 768		if (!data)
 769			break;
 770
 771		jz_mmc_prepare_data_transfer(host);
 772		fallthrough;
 773
 774	case JZ4740_MMC_STATE_TRANSFER_DATA:
 775		if (host->use_dma) {
 776			/* Use DMA if enabled.
 777			 * Data transfer direction is defined later by
 778			 * relying on data flags in
 779			 * jz4740_mmc_prepare_dma_data() and
 780			 * jz4740_mmc_start_dma_transfer().
 781			 */
 782			timeout = jz4740_mmc_start_dma_transfer(host, data);
 783			data->bytes_xfered = data->blocks * data->blksz;
 784		} else if (data->flags & MMC_DATA_READ)
 785			/* Use PIO if DMA is not enabled.
 786			 * Data transfer direction was defined before
 787			 * by relying on data flags in
 788			 * jz_mmc_prepare_data_transfer().
 789			 */
 790			timeout = jz4740_mmc_read_data(host, data);
 791		else
 792			timeout = jz4740_mmc_write_data(host, data);
 793
 794		if (unlikely(timeout)) {
 795			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
 796			break;
 797		}
 798
 799		jz4740_mmc_transfer_check_state(host, data);
 800
 801		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 802		if (unlikely(timeout)) {
 803			host->state = JZ4740_MMC_STATE_SEND_STOP;
 804			break;
 805		}
 806		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 807		fallthrough;
 808
 809	case JZ4740_MMC_STATE_SEND_STOP:
 810		if (!req->stop)
 811			break;
 812
 813		jz4740_mmc_send_command(host, req->stop);
 814
 815		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
 816			timeout = jz4740_mmc_poll_irq(host,
 817						      JZ_MMC_IRQ_PRG_DONE);
 818			if (timeout) {
 819				host->state = JZ4740_MMC_STATE_DONE;
 820				break;
 821			}
 822		}
 823		fallthrough;
 824
 825	case JZ4740_MMC_STATE_DONE:
 826		break;
 827	}
 828
 829	if (!timeout)
 830		jz4740_mmc_request_done(host);
 831
 832	return IRQ_HANDLED;
 833}
 834
 835static irqreturn_t jz_mmc_irq(int irq, void *devid)
 836{
 837	struct jz4740_mmc_host *host = devid;
 838	struct mmc_command *cmd = host->cmd;
 839	uint32_t irq_reg, status, tmp;
 840
 841	status = readl(host->base + JZ_REG_MMC_STATUS);
 842	irq_reg = jz4740_mmc_read_irq_reg(host);
 843
 844	tmp = irq_reg;
 845	irq_reg &= ~host->irq_mask;
 846
 847	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
 848		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
 849
 850	if (tmp != irq_reg)
 851		jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
 852
 853	if (irq_reg & JZ_MMC_IRQ_SDIO) {
 854		jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
 855		mmc_signal_sdio_irq(host->mmc);
 856		irq_reg &= ~JZ_MMC_IRQ_SDIO;
 857	}
 858
 859	if (host->req && cmd && irq_reg) {
 860		if (test_and_clear_bit(0, &host->waiting)) {
 861			del_timer(&host->timeout_timer);
 862
 
 
 863			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
 864				cmd->error = -ETIMEDOUT;
 865			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
 866				cmd->error = -EIO;
 867			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
 868				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
 869				if (cmd->data)
 870					cmd->data->error = -EIO;
 871				cmd->error = -EIO;
 872			}
 873
 874			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
 875			jz4740_mmc_write_irq_reg(host, irq_reg);
 876
 877			return IRQ_WAKE_THREAD;
 878		}
 879	}
 880
 881	return IRQ_HANDLED;
 882}
 883
 884static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
 885{
 886	int div = 0;
 887	int real_rate;
 888
 889	jz4740_mmc_clock_disable(host);
 890	clk_set_rate(host->clk, host->mmc->f_max);
 891
 892	real_rate = clk_get_rate(host->clk);
 893
 894	while (real_rate > rate && div < 7) {
 895		++div;
 896		real_rate >>= 1;
 897	}
 898
 899	writew(div, host->base + JZ_REG_MMC_CLKRT);
 900
 901	if (real_rate > 25000000) {
 902		if (host->version >= JZ_MMC_JZ4780) {
 903			writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
 904				   JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
 905				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
 906				   host->base + JZ_REG_MMC_LPM);
 907		} else if (host->version >= JZ_MMC_JZ4760) {
 908			writel(JZ_MMC_LPM_DRV_RISING |
 909				   JZ_MMC_LPM_LOW_POWER_MODE_EN,
 910				   host->base + JZ_REG_MMC_LPM);
 911		} else if (host->version >= JZ_MMC_JZ4725B)
 912			writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
 913				   host->base + JZ_REG_MMC_LPM);
 914	}
 915
 916	return real_rate;
 917}
 918
 919static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
 920{
 921	struct jz4740_mmc_host *host = mmc_priv(mmc);
 922
 923	host->req = req;
 924
 925	jz4740_mmc_write_irq_reg(host, ~0);
 
 
 926	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
 927
 928	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
 929	set_bit(0, &host->waiting);
 930	mod_timer(&host->timeout_timer,
 931		  jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
 932	jz4740_mmc_send_command(host, req->cmd);
 933}
 934
 935static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 936{
 937	struct jz4740_mmc_host *host = mmc_priv(mmc);
 938	if (ios->clock)
 939		jz4740_mmc_set_clock_rate(host, ios->clock);
 940
 941	switch (ios->power_mode) {
 942	case MMC_POWER_UP:
 943		jz4740_mmc_reset(host);
 944		if (!IS_ERR(mmc->supply.vmmc))
 945			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
 
 946		host->cmdat |= JZ_MMC_CMDAT_INIT;
 947		clk_prepare_enable(host->clk);
 948		break;
 949	case MMC_POWER_ON:
 950		break;
 951	default:
 952		if (!IS_ERR(mmc->supply.vmmc))
 953			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
 
 954		clk_disable_unprepare(host->clk);
 955		break;
 956	}
 957
 958	switch (ios->bus_width) {
 959	case MMC_BUS_WIDTH_1:
 960		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 961		break;
 962	case MMC_BUS_WIDTH_4:
 963		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 964		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 965		break;
 966	case MMC_BUS_WIDTH_8:
 967		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
 968		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
 969		break;
 970	default:
 971		break;
 972	}
 973}
 974
 975static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 976{
 977	struct jz4740_mmc_host *host = mmc_priv(mmc);
 978	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
 979}
 980
 981static const struct mmc_host_ops jz4740_mmc_ops = {
 982	.request	= jz4740_mmc_request,
 983	.pre_req	= jz4740_mmc_pre_request,
 984	.post_req	= jz4740_mmc_post_request,
 985	.set_ios	= jz4740_mmc_set_ios,
 986	.get_ro		= mmc_gpio_get_ro,
 987	.get_cd		= mmc_gpio_get_cd,
 988	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
 989};
 990
 991static const struct of_device_id jz4740_mmc_of_match[] = {
 992	{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
 993	{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
 994	{ .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
 995	{ .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
 996	{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
 997	{ .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
 998	{},
 999};
1000MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001
1002static int jz4740_mmc_probe(struct platform_device* pdev)
1003{
1004	int ret;
1005	struct mmc_host *mmc;
1006	struct jz4740_mmc_host *host;
1007	const struct of_device_id *match;
 
 
1008
1009	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1010	if (!mmc) {
1011		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1012		return -ENOMEM;
1013	}
1014
1015	host = mmc_priv(mmc);
1016
1017	match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
1018	if (match) {
1019		host->version = (enum jz4740_mmc_version)match->data;
1020	} else {
1021		/* JZ4740 should be the only one using legacy probe */
1022		host->version = JZ_MMC_JZ4740;
1023	}
1024
1025	ret = mmc_of_parse(mmc);
1026	if (ret) {
1027		dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
1028		goto err_free_host;
1029	}
1030
1031	mmc_regulator_get_supply(mmc);
1032
1033	host->irq = platform_get_irq(pdev, 0);
1034	if (host->irq < 0) {
1035		ret = host->irq;
 
1036		goto err_free_host;
1037	}
1038
1039	host->clk = devm_clk_get(&pdev->dev, "mmc");
1040	if (IS_ERR(host->clk)) {
1041		ret = PTR_ERR(host->clk);
1042		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1043		goto err_free_host;
1044	}
1045
1046	host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047	host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
1048	if (IS_ERR(host->base)) {
1049		ret = PTR_ERR(host->base);
 
1050		goto err_free_host;
1051	}
1052
1053	mmc->ops = &jz4740_mmc_ops;
1054	if (!mmc->f_max)
1055		mmc->f_max = JZ_MMC_CLK_RATE;
 
 
1056
1057	/*
1058	 * There seems to be a problem with this driver on the JZ4760 and
1059	 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
1060	 * the communication fails with many SD cards.
1061	 * Until this bug is sorted out, limit the maximum rate to 24 MHz.
1062	 */
1063	if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
1064		mmc->f_max = JZ_MMC_CLK_RATE;
1065
1066	mmc->f_min = mmc->f_max / 128;
 
 
1067	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1068
1069	/*
1070	 * We use a fixed timeout of 5s, hence inform the core about it. A
1071	 * future improvement should instead respect the cmd->busy_timeout.
1072	 */
1073	mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1074
1075	mmc->max_blk_size = (1 << 10) - 1;
1076	mmc->max_blk_count = (1 << 15) - 1;
1077	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1078
1079	mmc->max_segs = 128;
1080	mmc->max_seg_size = mmc->max_req_size;
1081
1082	host->mmc = mmc;
1083	host->pdev = pdev;
1084	spin_lock_init(&host->lock);
1085	host->irq_mask = ~0;
1086
1087	jz4740_mmc_reset(host);
1088
1089	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1090			dev_name(&pdev->dev), host);
1091	if (ret) {
1092		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1093		goto err_free_host;
1094	}
1095
 
1096	jz4740_mmc_clock_disable(host);
1097	timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
 
1098
1099	ret = jz4740_mmc_acquire_dma_channels(host);
1100	if (ret == -EPROBE_DEFER)
1101		goto err_free_irq;
1102	host->use_dma = !ret;
1103
1104	platform_set_drvdata(pdev, host);
1105	ret = mmc_add_host(mmc);
1106
1107	if (ret) {
1108		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1109		goto err_release_dma;
1110	}
1111	dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
1112
1113	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1114		 host->use_dma ? "DMA" : "PIO",
1115		 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1116		 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
1117
1118	return 0;
1119
1120err_release_dma:
1121	if (host->use_dma)
1122		jz4740_mmc_release_dma_channels(host);
1123err_free_irq:
1124	free_irq(host->irq, host);
 
 
 
 
 
 
1125err_free_host:
1126	mmc_free_host(mmc);
1127
1128	return ret;
1129}
1130
1131static int jz4740_mmc_remove(struct platform_device *pdev)
1132{
1133	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1134
1135	del_timer_sync(&host->timeout_timer);
1136	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1137	jz4740_mmc_reset(host);
1138
1139	mmc_remove_host(host->mmc);
1140
1141	free_irq(host->irq, host);
1142
 
 
 
1143	if (host->use_dma)
1144		jz4740_mmc_release_dma_channels(host);
1145
1146	mmc_free_host(host->mmc);
1147
1148	return 0;
1149}
1150
 
 
1151static int jz4740_mmc_suspend(struct device *dev)
1152{
1153	return pinctrl_pm_select_sleep_state(dev);
 
 
 
 
1154}
1155
1156static int jz4740_mmc_resume(struct device *dev)
1157{
1158	return pinctrl_select_default_state(dev);
 
 
 
 
1159}
1160
1161static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1162				jz4740_mmc_resume);
 
 
 
 
1163
1164static struct platform_driver jz4740_mmc_driver = {
1165	.probe = jz4740_mmc_probe,
1166	.remove = jz4740_mmc_remove,
1167	.driver = {
1168		.name = "jz4740-mmc",
1169		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1170		.of_match_table = of_match_ptr(jz4740_mmc_of_match),
1171		.pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
1172	},
1173};
1174
1175module_platform_driver(jz4740_mmc_driver);
1176
1177MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1178MODULE_LICENSE("GPL");
1179MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
v4.10.11
 
   1/*
   2 *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
 
 
   3 *  JZ4740 SD/MMC controller driver
   4 *
   5 *  This program is free software; you can redistribute  it and/or modify it
   6 *  under  the terms of  the GNU General  Public License as published by the
   7 *  Free Software Foundation;  either version 2 of the  License, or (at your
   8 *  option) any later version.
   9 *
  10 *  You should have received a copy of the  GNU General Public License along
  11 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  12 *  675 Mass Ave, Cambridge, MA 02139, USA.
  13 *
  14 */
  15
  16#include <linux/mmc/host.h>
  17#include <linux/mmc/slot-gpio.h>
 
 
 
  18#include <linux/err.h>
 
  19#include <linux/io.h>
  20#include <linux/irq.h>
  21#include <linux/interrupt.h>
 
  22#include <linux/module.h>
 
 
  23#include <linux/platform_device.h>
  24#include <linux/delay.h>
  25#include <linux/scatterlist.h>
  26#include <linux/clk.h>
  27
  28#include <linux/bitops.h>
  29#include <linux/gpio.h>
  30#include <asm/mach-jz4740/gpio.h>
  31#include <asm/cacheflush.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/dmaengine.h>
  34
  35#include <asm/mach-jz4740/dma.h>
  36#include <asm/mach-jz4740/jz4740_mmc.h>
  37
  38#define JZ_REG_MMC_STRPCL	0x00
  39#define JZ_REG_MMC_STATUS	0x04
  40#define JZ_REG_MMC_CLKRT	0x08
  41#define JZ_REG_MMC_CMDAT	0x0C
  42#define JZ_REG_MMC_RESTO	0x10
  43#define JZ_REG_MMC_RDTO		0x14
  44#define JZ_REG_MMC_BLKLEN	0x18
  45#define JZ_REG_MMC_NOB		0x1C
  46#define JZ_REG_MMC_SNOB		0x20
  47#define JZ_REG_MMC_IMASK	0x24
  48#define JZ_REG_MMC_IREG		0x28
  49#define JZ_REG_MMC_CMD		0x2C
  50#define JZ_REG_MMC_ARG		0x30
  51#define JZ_REG_MMC_RESP_FIFO	0x34
  52#define JZ_REG_MMC_RXFIFO	0x38
  53#define JZ_REG_MMC_TXFIFO	0x3C
 
 
  54
  55#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
  56#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
  57#define JZ_MMC_STRPCL_START_READWAIT BIT(5)
  58#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
  59#define JZ_MMC_STRPCL_RESET BIT(3)
  60#define JZ_MMC_STRPCL_START_OP BIT(2)
  61#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
  62#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
  63#define JZ_MMC_STRPCL_CLOCK_START BIT(1)
  64
  65
  66#define JZ_MMC_STATUS_IS_RESETTING BIT(15)
  67#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
  68#define JZ_MMC_STATUS_PRG_DONE BIT(13)
  69#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
  70#define JZ_MMC_STATUS_END_CMD_RES BIT(11)
  71#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
  72#define JZ_MMC_STATUS_IS_READWAIT BIT(9)
  73#define JZ_MMC_STATUS_CLK_EN BIT(8)
  74#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
  75#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
  76#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
  77#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
  78#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
  79#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
  80#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
  81#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
  82
  83#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
  84#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
  85
  86
  87#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
  88#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
 
 
  89#define JZ_MMC_CMDAT_DMA_EN BIT(8)
  90#define JZ_MMC_CMDAT_INIT BIT(7)
  91#define JZ_MMC_CMDAT_BUSY BIT(6)
  92#define JZ_MMC_CMDAT_STREAM BIT(5)
  93#define JZ_MMC_CMDAT_WRITE BIT(4)
  94#define JZ_MMC_CMDAT_DATA_EN BIT(3)
  95#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
  96#define JZ_MMC_CMDAT_RSP_R1 1
  97#define JZ_MMC_CMDAT_RSP_R2 2
  98#define JZ_MMC_CMDAT_RSP_R3 3
  99
 100#define JZ_MMC_IRQ_SDIO BIT(7)
 101#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
 102#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
 103#define JZ_MMC_IRQ_END_CMD_RES BIT(2)
 104#define JZ_MMC_IRQ_PRG_DONE BIT(1)
 105#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
 106
 
 
 
 
 
 
 
 
 107
 108#define JZ_MMC_CLK_RATE 24000000
 
 
 
 
 
 
 
 
 
 109
 110enum jz4740_mmc_state {
 111	JZ4740_MMC_STATE_READ_RESPONSE,
 112	JZ4740_MMC_STATE_TRANSFER_DATA,
 113	JZ4740_MMC_STATE_SEND_STOP,
 114	JZ4740_MMC_STATE_DONE,
 115};
 116
 117struct jz4740_mmc_host_next {
 118	int sg_len;
 119	s32 cookie;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120};
 121
 122struct jz4740_mmc_host {
 123	struct mmc_host *mmc;
 124	struct platform_device *pdev;
 125	struct jz4740_mmc_platform_data *pdata;
 126	struct clk *clk;
 127
 
 
 128	int irq;
 129	int card_detect_irq;
 130
 131	void __iomem *base;
 132	struct resource *mem_res;
 133	struct mmc_request *req;
 134	struct mmc_command *cmd;
 135
 136	unsigned long waiting;
 137
 138	uint32_t cmdat;
 139
 140	uint16_t irq_mask;
 141
 142	spinlock_t lock;
 143
 144	struct timer_list timeout_timer;
 145	struct sg_mapping_iter miter;
 146	enum jz4740_mmc_state state;
 147
 148	/* DMA support */
 149	struct dma_chan *dma_rx;
 150	struct dma_chan *dma_tx;
 151	struct jz4740_mmc_host_next next_data;
 152	bool use_dma;
 153	int sg_len;
 154
 155/* The DMA trigger level is 8 words, that is to say, the DMA read
 156 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
 157 * trigger is when data words in MSC_TXFIFO is < 8.
 158 */
 159#define JZ4740_MMC_FIFO_HALF_SIZE 8
 160};
 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162/*----------------------------------------------------------------------------*/
 163/* DMA infrastructure */
 164
 165static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
 166{
 167	if (!host->use_dma)
 168		return;
 169
 170	dma_release_channel(host->dma_tx);
 171	dma_release_channel(host->dma_rx);
 
 172}
 173
 174static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
 175{
 176	dma_cap_mask_t mask;
 177
 178	dma_cap_zero(mask);
 179	dma_cap_set(DMA_SLAVE, mask);
 
 
 
 
 
 
 180
 181	host->dma_tx = dma_request_channel(mask, NULL, host);
 182	if (!host->dma_tx) {
 183		dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
 184		return -ENODEV;
 185	}
 186
 187	host->dma_rx = dma_request_channel(mask, NULL, host);
 188	if (!host->dma_rx) {
 189		dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
 190		goto free_master_write;
 
 191	}
 192
 193	/* Initialize DMA pre request cookie */
 194	host->next_data.cookie = 1;
 
 
 
 
 
 195
 196	return 0;
 
 
 
 
 
 
 197
 198free_master_write:
 199	dma_release_channel(host->dma_tx);
 200	return -ENODEV;
 201}
 202
 203static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
 204{
 205	return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 206}
 207
 208static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
 209						       struct mmc_data *data)
 210{
 211	return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx;
 
 
 
 212}
 213
 214static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
 215				 struct mmc_data *data)
 216{
 217	struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 218	enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
 219
 220	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
 
 221}
 222
 223/* Prepares DMA data for current/next transfer, returns non-zero on failure */
 
 
 224static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
 225				       struct mmc_data *data,
 226				       struct jz4740_mmc_host_next *next,
 227				       struct dma_chan *chan)
 228{
 229	struct jz4740_mmc_host_next *next_data = &host->next_data;
 230	enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
 231	int sg_len;
 232
 233	if (!next && data->host_cookie &&
 234	    data->host_cookie != host->next_data.cookie) {
 235		dev_warn(mmc_dev(host->mmc),
 236			 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n",
 237			 __func__,
 238			 data->host_cookie,
 239			 host->next_data.cookie);
 240		data->host_cookie = 0;
 241	}
 242
 243	/* Check if next job is already prepared */
 244	if (next || data->host_cookie != host->next_data.cookie) {
 245		sg_len = dma_map_sg(chan->device->dev,
 246				    data->sg,
 247				    data->sg_len,
 248				    dir);
 249
 250	} else {
 251		sg_len = next_data->sg_len;
 252		next_data->sg_len = 0;
 253	}
 254
 255	if (sg_len <= 0) {
 256		dev_err(mmc_dev(host->mmc),
 257			"Failed to map scatterlist for DMA operation\n");
 258		return -EINVAL;
 259	}
 260
 261	if (next) {
 262		next->sg_len = sg_len;
 263		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
 264	} else
 265		host->sg_len = sg_len;
 266
 267	return 0;
 268}
 269
 270static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
 271					 struct mmc_data *data)
 272{
 273	int ret;
 274	struct dma_chan *chan;
 275	struct dma_async_tx_descriptor *desc;
 276	struct dma_slave_config conf = {
 277		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 278		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
 279		.src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 280		.dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
 281	};
 
 282
 283	if (data->flags & MMC_DATA_WRITE) {
 284		conf.direction = DMA_MEM_TO_DEV;
 285		conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
 286		conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
 287		chan = host->dma_tx;
 288	} else {
 289		conf.direction = DMA_DEV_TO_MEM;
 290		conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
 291		conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
 292		chan = host->dma_rx;
 293	}
 294
 295	ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan);
 296	if (ret)
 297		return ret;
 298
 299	dmaengine_slave_config(chan, &conf);
 300	desc = dmaengine_prep_slave_sg(chan,
 301				       data->sg,
 302				       host->sg_len,
 303				       conf.direction,
 304				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 305	if (!desc) {
 306		dev_err(mmc_dev(host->mmc),
 307			"Failed to allocate DMA %s descriptor",
 308			 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
 309		goto dma_unmap;
 310	}
 311
 312	dmaengine_submit(desc);
 313	dma_async_issue_pending(chan);
 314
 315	return 0;
 316
 317dma_unmap:
 318	jz4740_mmc_dma_unmap(host, data);
 
 319	return -ENOMEM;
 320}
 321
 322static void jz4740_mmc_pre_request(struct mmc_host *mmc,
 323				   struct mmc_request *mrq)
 324{
 325	struct jz4740_mmc_host *host = mmc_priv(mmc);
 326	struct mmc_data *data = mrq->data;
 327	struct jz4740_mmc_host_next *next_data = &host->next_data;
 328
 329	BUG_ON(data->host_cookie);
 330
 331	if (host->use_dma) {
 332		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 333
 334		if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan))
 335			data->host_cookie = 0;
 336	}
 337}
 338
 339static void jz4740_mmc_post_request(struct mmc_host *mmc,
 340				    struct mmc_request *mrq,
 341				    int err)
 342{
 343	struct jz4740_mmc_host *host = mmc_priv(mmc);
 344	struct mmc_data *data = mrq->data;
 345
 346	if (host->use_dma && data->host_cookie) {
 347		jz4740_mmc_dma_unmap(host, data);
 348		data->host_cookie = 0;
 349	}
 350
 351	if (err) {
 352		struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
 353
 354		dmaengine_terminate_all(chan);
 355	}
 356}
 357
 358/*----------------------------------------------------------------------------*/
 359
 360static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
 361	unsigned int irq, bool enabled)
 362{
 363	unsigned long flags;
 364
 365	spin_lock_irqsave(&host->lock, flags);
 366	if (enabled)
 367		host->irq_mask &= ~irq;
 368	else
 369		host->irq_mask |= irq;
 
 
 370	spin_unlock_irqrestore(&host->lock, flags);
 371
 372	writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
 373}
 374
 375static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
 376	bool start_transfer)
 377{
 378	uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
 379
 380	if (start_transfer)
 381		val |= JZ_MMC_STRPCL_START_OP;
 382
 383	writew(val, host->base + JZ_REG_MMC_STRPCL);
 384}
 385
 386static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
 387{
 388	uint32_t status;
 389	unsigned int timeout = 1000;
 390
 391	writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
 392	do {
 393		status = readl(host->base + JZ_REG_MMC_STATUS);
 394	} while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
 395}
 396
 397static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
 398{
 399	uint32_t status;
 400	unsigned int timeout = 1000;
 401
 402	writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
 403	udelay(10);
 404	do {
 405		status = readl(host->base + JZ_REG_MMC_STATUS);
 406	} while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
 407}
 408
 409static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
 410{
 411	struct mmc_request *req;
 
 412
 413	req = host->req;
 
 414	host->req = NULL;
 415
 
 
 416	mmc_request_done(host->mmc, req);
 417}
 418
 419static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
 420	unsigned int irq)
 421{
 422	unsigned int timeout = 0x800;
 423	uint16_t status;
 424
 425	do {
 426		status = readw(host->base + JZ_REG_MMC_IREG);
 427	} while (!(status & irq) && --timeout);
 428
 429	if (timeout == 0) {
 430		set_bit(0, &host->waiting);
 431		mod_timer(&host->timeout_timer, jiffies + 5*HZ);
 
 432		jz4740_mmc_set_irq_enabled(host, irq, true);
 433		return true;
 434	}
 435
 436	return false;
 437}
 438
 439static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
 440	struct mmc_data *data)
 441{
 442	int status;
 443
 444	status = readl(host->base + JZ_REG_MMC_STATUS);
 445	if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
 446		if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
 447			host->req->cmd->error = -ETIMEDOUT;
 448			data->error = -ETIMEDOUT;
 449		} else {
 450			host->req->cmd->error = -EIO;
 451			data->error = -EIO;
 452		}
 453	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
 454		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
 455			host->req->cmd->error = -ETIMEDOUT;
 456			data->error = -ETIMEDOUT;
 457		} else {
 458			host->req->cmd->error = -EIO;
 459			data->error = -EIO;
 460		}
 461	}
 462}
 463
 464static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
 465	struct mmc_data *data)
 466{
 467	struct sg_mapping_iter *miter = &host->miter;
 468	void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
 469	uint32_t *buf;
 470	bool timeout;
 471	size_t i, j;
 472
 473	while (sg_miter_next(miter)) {
 474		buf = miter->addr;
 475		i = miter->length / 4;
 476		j = i / 8;
 477		i = i & 0x7;
 478		while (j) {
 479			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 480			if (unlikely(timeout))
 481				goto poll_timeout;
 482
 483			writel(buf[0], fifo_addr);
 484			writel(buf[1], fifo_addr);
 485			writel(buf[2], fifo_addr);
 486			writel(buf[3], fifo_addr);
 487			writel(buf[4], fifo_addr);
 488			writel(buf[5], fifo_addr);
 489			writel(buf[6], fifo_addr);
 490			writel(buf[7], fifo_addr);
 491			buf += 8;
 492			--j;
 493		}
 494		if (unlikely(i)) {
 495			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
 496			if (unlikely(timeout))
 497				goto poll_timeout;
 498
 499			while (i) {
 500				writel(*buf, fifo_addr);
 501				++buf;
 502				--i;
 503			}
 504		}
 505		data->bytes_xfered += miter->length;
 506	}
 507	sg_miter_stop(miter);
 508
 509	return false;
 510
 511poll_timeout:
 512	miter->consumed = (void *)buf - miter->addr;
 513	data->bytes_xfered += miter->consumed;
 514	sg_miter_stop(miter);
 515
 516	return true;
 517}
 518
 519static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
 520				struct mmc_data *data)
 521{
 522	struct sg_mapping_iter *miter = &host->miter;
 523	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
 524	uint32_t *buf;
 525	uint32_t d;
 526	uint16_t status;
 527	size_t i, j;
 528	unsigned int timeout;
 529
 530	while (sg_miter_next(miter)) {
 531		buf = miter->addr;
 532		i = miter->length;
 533		j = i / 32;
 534		i = i & 0x1f;
 535		while (j) {
 536			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 537			if (unlikely(timeout))
 538				goto poll_timeout;
 539
 540			buf[0] = readl(fifo_addr);
 541			buf[1] = readl(fifo_addr);
 542			buf[2] = readl(fifo_addr);
 543			buf[3] = readl(fifo_addr);
 544			buf[4] = readl(fifo_addr);
 545			buf[5] = readl(fifo_addr);
 546			buf[6] = readl(fifo_addr);
 547			buf[7] = readl(fifo_addr);
 548
 549			buf += 8;
 550			--j;
 551		}
 552
 553		if (unlikely(i)) {
 554			timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
 555			if (unlikely(timeout))
 556				goto poll_timeout;
 557
 558			while (i >= 4) {
 559				*buf++ = readl(fifo_addr);
 560				i -= 4;
 561			}
 562			if (unlikely(i > 0)) {
 563				d = readl(fifo_addr);
 564				memcpy(buf, &d, i);
 565			}
 566		}
 567		data->bytes_xfered += miter->length;
 568
 569		/* This can go away once MIPS implements
 570		 * flush_kernel_dcache_page */
 571		flush_dcache_page(miter->page);
 572	}
 573	sg_miter_stop(miter);
 574
 575	/* For whatever reason there is sometime one word more in the fifo then
 576	 * requested */
 577	timeout = 1000;
 578	status = readl(host->base + JZ_REG_MMC_STATUS);
 579	while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
 580		d = readl(fifo_addr);
 581		status = readl(host->base + JZ_REG_MMC_STATUS);
 582	}
 583
 584	return false;
 585
 586poll_timeout:
 587	miter->consumed = (void *)buf - miter->addr;
 588	data->bytes_xfered += miter->consumed;
 589	sg_miter_stop(miter);
 590
 591	return true;
 592}
 593
 594static void jz4740_mmc_timeout(unsigned long data)
 595{
 596	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
 597
 598	if (!test_and_clear_bit(0, &host->waiting))
 599		return;
 600
 601	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
 602
 603	host->req->cmd->error = -ETIMEDOUT;
 604	jz4740_mmc_request_done(host);
 605}
 606
 607static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
 608	struct mmc_command *cmd)
 609{
 610	int i;
 611	uint16_t tmp;
 612	void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
 613
 614	if (cmd->flags & MMC_RSP_136) {
 615		tmp = readw(fifo_addr);
 616		for (i = 0; i < 4; ++i) {
 617			cmd->resp[i] = tmp << 24;
 618			tmp = readw(fifo_addr);
 619			cmd->resp[i] |= tmp << 8;
 620			tmp = readw(fifo_addr);
 621			cmd->resp[i] |= tmp >> 8;
 622		}
 623	} else {
 624		cmd->resp[0] = readw(fifo_addr) << 24;
 625		cmd->resp[0] |= readw(fifo_addr) << 8;
 626		cmd->resp[0] |= readw(fifo_addr) & 0xff;
 627	}
 628}
 629
 630static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
 631	struct mmc_command *cmd)
 632{
 633	uint32_t cmdat = host->cmdat;
 634
 635	host->cmdat &= ~JZ_MMC_CMDAT_INIT;
 636	jz4740_mmc_clock_disable(host);
 637
 638	host->cmd = cmd;
 639
 640	if (cmd->flags & MMC_RSP_BUSY)
 641		cmdat |= JZ_MMC_CMDAT_BUSY;
 642
 643	switch (mmc_resp_type(cmd)) {
 644	case MMC_RSP_R1B:
 645	case MMC_RSP_R1:
 646		cmdat |= JZ_MMC_CMDAT_RSP_R1;
 647		break;
 648	case MMC_RSP_R2:
 649		cmdat |= JZ_MMC_CMDAT_RSP_R2;
 650		break;
 651	case MMC_RSP_R3:
 652		cmdat |= JZ_MMC_CMDAT_RSP_R3;
 653		break;
 654	default:
 655		break;
 656	}
 657
 658	if (cmd->data) {
 659		cmdat |= JZ_MMC_CMDAT_DATA_EN;
 660		if (cmd->data->flags & MMC_DATA_WRITE)
 661			cmdat |= JZ_MMC_CMDAT_WRITE;
 662		if (host->use_dma)
 663			cmdat |= JZ_MMC_CMDAT_DMA_EN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664
 665		writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
 666		writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
 667	}
 668
 669	writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
 670	writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
 671	writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
 672
 673	jz4740_mmc_clock_enable(host, 1);
 674}
 675
 676static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
 677{
 678	struct mmc_command *cmd = host->req->cmd;
 679	struct mmc_data *data = cmd->data;
 680	int direction;
 681
 682	if (data->flags & MMC_DATA_READ)
 683		direction = SG_MITER_TO_SG;
 684	else
 685		direction = SG_MITER_FROM_SG;
 686
 687	sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
 688}
 689
 690
 691static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
 692{
 693	struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
 694	struct mmc_command *cmd = host->req->cmd;
 695	struct mmc_request *req = host->req;
 696	struct mmc_data *data = cmd->data;
 697	bool timeout = false;
 698
 699	if (cmd->error)
 700		host->state = JZ4740_MMC_STATE_DONE;
 701
 702	switch (host->state) {
 703	case JZ4740_MMC_STATE_READ_RESPONSE:
 704		if (cmd->flags & MMC_RSP_PRESENT)
 705			jz4740_mmc_read_response(host, cmd);
 706
 707		if (!data)
 708			break;
 709
 710		jz_mmc_prepare_data_transfer(host);
 
 711
 712	case JZ4740_MMC_STATE_TRANSFER_DATA:
 713		if (host->use_dma) {
 714			/* Use DMA if enabled.
 715			 * Data transfer direction is defined later by
 716			 * relying on data flags in
 717			 * jz4740_mmc_prepare_dma_data() and
 718			 * jz4740_mmc_start_dma_transfer().
 719			 */
 720			timeout = jz4740_mmc_start_dma_transfer(host, data);
 721			data->bytes_xfered = data->blocks * data->blksz;
 722		} else if (data->flags & MMC_DATA_READ)
 723			/* Use PIO if DMA is not enabled.
 724			 * Data transfer direction was defined before
 725			 * by relying on data flags in
 726			 * jz_mmc_prepare_data_transfer().
 727			 */
 728			timeout = jz4740_mmc_read_data(host, data);
 729		else
 730			timeout = jz4740_mmc_write_data(host, data);
 731
 732		if (unlikely(timeout)) {
 733			host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
 734			break;
 735		}
 736
 737		jz4740_mmc_transfer_check_state(host, data);
 738
 739		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
 740		if (unlikely(timeout)) {
 741			host->state = JZ4740_MMC_STATE_SEND_STOP;
 742			break;
 743		}
 744		writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG);
 
 745
 746	case JZ4740_MMC_STATE_SEND_STOP:
 747		if (!req->stop)
 748			break;
 749
 750		jz4740_mmc_send_command(host, req->stop);
 751
 752		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
 753			timeout = jz4740_mmc_poll_irq(host,
 754						      JZ_MMC_IRQ_PRG_DONE);
 755			if (timeout) {
 756				host->state = JZ4740_MMC_STATE_DONE;
 757				break;
 758			}
 759		}
 
 
 760	case JZ4740_MMC_STATE_DONE:
 761		break;
 762	}
 763
 764	if (!timeout)
 765		jz4740_mmc_request_done(host);
 766
 767	return IRQ_HANDLED;
 768}
 769
 770static irqreturn_t jz_mmc_irq(int irq, void *devid)
 771{
 772	struct jz4740_mmc_host *host = devid;
 773	struct mmc_command *cmd = host->cmd;
 774	uint16_t irq_reg, status, tmp;
 775
 776	irq_reg = readw(host->base + JZ_REG_MMC_IREG);
 
 777
 778	tmp = irq_reg;
 779	irq_reg &= ~host->irq_mask;
 780
 781	tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
 782		JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
 783
 784	if (tmp != irq_reg)
 785		writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG);
 786
 787	if (irq_reg & JZ_MMC_IRQ_SDIO) {
 788		writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG);
 789		mmc_signal_sdio_irq(host->mmc);
 790		irq_reg &= ~JZ_MMC_IRQ_SDIO;
 791	}
 792
 793	if (host->req && cmd && irq_reg) {
 794		if (test_and_clear_bit(0, &host->waiting)) {
 795			del_timer(&host->timeout_timer);
 796
 797			status = readl(host->base + JZ_REG_MMC_STATUS);
 798
 799			if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
 800					cmd->error = -ETIMEDOUT;
 801			} else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
 802					cmd->error = -EIO;
 803			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
 804				    JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
 805					if (cmd->data)
 806							cmd->data->error = -EIO;
 807					cmd->error = -EIO;
 808			}
 809
 810			jz4740_mmc_set_irq_enabled(host, irq_reg, false);
 811			writew(irq_reg, host->base + JZ_REG_MMC_IREG);
 812
 813			return IRQ_WAKE_THREAD;
 814		}
 815	}
 816
 817	return IRQ_HANDLED;
 818}
 819
 820static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
 821{
 822	int div = 0;
 823	int real_rate;
 824
 825	jz4740_mmc_clock_disable(host);
 826	clk_set_rate(host->clk, JZ_MMC_CLK_RATE);
 827
 828	real_rate = clk_get_rate(host->clk);
 829
 830	while (real_rate > rate && div < 7) {
 831		++div;
 832		real_rate >>= 1;
 833	}
 834
 835	writew(div, host->base + JZ_REG_MMC_CLKRT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 836	return real_rate;
 837}
 838
 839static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
 840{
 841	struct jz4740_mmc_host *host = mmc_priv(mmc);
 842
 843	host->req = req;
 844
 845	writew(0xffff, host->base + JZ_REG_MMC_IREG);
 846
 847	writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG);
 848	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
 849
 850	host->state = JZ4740_MMC_STATE_READ_RESPONSE;
 851	set_bit(0, &host->waiting);
 852	mod_timer(&host->timeout_timer, jiffies + 5*HZ);
 
 853	jz4740_mmc_send_command(host, req->cmd);
 854}
 855
 856static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 857{
 858	struct jz4740_mmc_host *host = mmc_priv(mmc);
 859	if (ios->clock)
 860		jz4740_mmc_set_clock_rate(host, ios->clock);
 861
 862	switch (ios->power_mode) {
 863	case MMC_POWER_UP:
 864		jz4740_mmc_reset(host);
 865		if (gpio_is_valid(host->pdata->gpio_power))
 866			gpio_set_value(host->pdata->gpio_power,
 867					!host->pdata->power_active_low);
 868		host->cmdat |= JZ_MMC_CMDAT_INIT;
 869		clk_prepare_enable(host->clk);
 870		break;
 871	case MMC_POWER_ON:
 872		break;
 873	default:
 874		if (gpio_is_valid(host->pdata->gpio_power))
 875			gpio_set_value(host->pdata->gpio_power,
 876					host->pdata->power_active_low);
 877		clk_disable_unprepare(host->clk);
 878		break;
 879	}
 880
 881	switch (ios->bus_width) {
 882	case MMC_BUS_WIDTH_1:
 883		host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 884		break;
 885	case MMC_BUS_WIDTH_4:
 
 886		host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
 887		break;
 
 
 
 
 888	default:
 889		break;
 890	}
 891}
 892
 893static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 894{
 895	struct jz4740_mmc_host *host = mmc_priv(mmc);
 896	jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
 897}
 898
 899static const struct mmc_host_ops jz4740_mmc_ops = {
 900	.request	= jz4740_mmc_request,
 901	.pre_req	= jz4740_mmc_pre_request,
 902	.post_req	= jz4740_mmc_post_request,
 903	.set_ios	= jz4740_mmc_set_ios,
 904	.get_ro		= mmc_gpio_get_ro,
 905	.get_cd		= mmc_gpio_get_cd,
 906	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
 907};
 908
 909static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
 910	JZ_GPIO_BULK_PIN(MSC_CMD),
 911	JZ_GPIO_BULK_PIN(MSC_CLK),
 912	JZ_GPIO_BULK_PIN(MSC_DATA0),
 913	JZ_GPIO_BULK_PIN(MSC_DATA1),
 914	JZ_GPIO_BULK_PIN(MSC_DATA2),
 915	JZ_GPIO_BULK_PIN(MSC_DATA3),
 
 916};
 917
 918static int jz4740_mmc_request_gpio(struct device *dev, int gpio,
 919	const char *name, bool output, int value)
 920{
 921	int ret;
 922
 923	if (!gpio_is_valid(gpio))
 924		return 0;
 925
 926	ret = gpio_request(gpio, name);
 927	if (ret) {
 928		dev_err(dev, "Failed to request %s gpio: %d\n", name, ret);
 929		return ret;
 930	}
 931
 932	if (output)
 933		gpio_direction_output(gpio, value);
 934	else
 935		gpio_direction_input(gpio);
 936
 937	return 0;
 938}
 939
 940static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
 941	struct platform_device *pdev)
 942{
 943	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
 944	int ret = 0;
 945
 946	if (!pdata)
 947		return 0;
 948
 949	if (!pdata->card_detect_active_low)
 950		mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
 951	if (!pdata->read_only_active_low)
 952		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 953
 954	if (gpio_is_valid(pdata->gpio_card_detect)) {
 955		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
 956		if (ret)
 957			return ret;
 958	}
 959
 960	if (gpio_is_valid(pdata->gpio_read_only)) {
 961		ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
 962		if (ret)
 963			return ret;
 964	}
 965
 966	return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
 967			"MMC read only", true, pdata->power_active_low);
 968}
 969
 970static void jz4740_mmc_free_gpios(struct platform_device *pdev)
 971{
 972	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data;
 973
 974	if (!pdata)
 975		return;
 976
 977	if (gpio_is_valid(pdata->gpio_power))
 978		gpio_free(pdata->gpio_power);
 979}
 980
 981static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
 982{
 983	size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins);
 984	if (host->pdata && host->pdata->data_1bit)
 985		num_pins -= 3;
 986
 987	return num_pins;
 988}
 989
 990static int jz4740_mmc_probe(struct platform_device* pdev)
 991{
 992	int ret;
 993	struct mmc_host *mmc;
 994	struct jz4740_mmc_host *host;
 995	struct jz4740_mmc_platform_data *pdata;
 996
 997	pdata = pdev->dev.platform_data;
 998
 999	mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1000	if (!mmc) {
1001		dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1002		return -ENOMEM;
1003	}
1004
1005	host = mmc_priv(mmc);
1006	host->pdata = pdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007
1008	host->irq = platform_get_irq(pdev, 0);
1009	if (host->irq < 0) {
1010		ret = host->irq;
1011		dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
1012		goto err_free_host;
1013	}
1014
1015	host->clk = devm_clk_get(&pdev->dev, "mmc");
1016	if (IS_ERR(host->clk)) {
1017		ret = PTR_ERR(host->clk);
1018		dev_err(&pdev->dev, "Failed to get mmc clock\n");
1019		goto err_free_host;
1020	}
1021
1022	host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1023	host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
1024	if (IS_ERR(host->base)) {
1025		ret = PTR_ERR(host->base);
1026		dev_err(&pdev->dev, "Failed to ioremap base memory\n");
1027		goto err_free_host;
1028	}
1029
1030	ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1031	if (ret) {
1032		dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret);
1033		goto err_free_host;
1034	}
1035
1036	ret = jz4740_mmc_request_gpios(mmc, pdev);
1037	if (ret)
1038		goto err_gpio_bulk_free;
 
 
 
 
 
1039
1040	mmc->ops = &jz4740_mmc_ops;
1041	mmc->f_min = JZ_MMC_CLK_RATE / 128;
1042	mmc->f_max = JZ_MMC_CLK_RATE;
1043	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1044	mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA;
1045	mmc->caps |= MMC_CAP_SDIO_IRQ;
 
 
 
 
1046
1047	mmc->max_blk_size = (1 << 10) - 1;
1048	mmc->max_blk_count = (1 << 15) - 1;
1049	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1050
1051	mmc->max_segs = 128;
1052	mmc->max_seg_size = mmc->max_req_size;
1053
1054	host->mmc = mmc;
1055	host->pdev = pdev;
1056	spin_lock_init(&host->lock);
1057	host->irq_mask = 0xffff;
 
 
1058
1059	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1060			dev_name(&pdev->dev), host);
1061	if (ret) {
1062		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1063		goto err_free_gpios;
1064	}
1065
1066	jz4740_mmc_reset(host);
1067	jz4740_mmc_clock_disable(host);
1068	setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
1069			(unsigned long)host);
1070
1071	host->use_dma = true;
1072	if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
1073		host->use_dma = false;
 
1074
1075	platform_set_drvdata(pdev, host);
1076	ret = mmc_add_host(mmc);
1077
1078	if (ret) {
1079		dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1080		goto err_free_irq;
1081	}
1082	dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
1083
1084	dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1085		 host->use_dma ? "DMA" : "PIO",
1086		 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
 
1087
1088	return 0;
1089
 
 
 
1090err_free_irq:
1091	free_irq(host->irq, host);
1092err_free_gpios:
1093	jz4740_mmc_free_gpios(pdev);
1094err_gpio_bulk_free:
1095	if (host->use_dma)
1096		jz4740_mmc_release_dma_channels(host);
1097	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1098err_free_host:
1099	mmc_free_host(mmc);
1100
1101	return ret;
1102}
1103
1104static int jz4740_mmc_remove(struct platform_device *pdev)
1105{
1106	struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1107
1108	del_timer_sync(&host->timeout_timer);
1109	jz4740_mmc_set_irq_enabled(host, 0xff, false);
1110	jz4740_mmc_reset(host);
1111
1112	mmc_remove_host(host->mmc);
1113
1114	free_irq(host->irq, host);
1115
1116	jz4740_mmc_free_gpios(pdev);
1117	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1118
1119	if (host->use_dma)
1120		jz4740_mmc_release_dma_channels(host);
1121
1122	mmc_free_host(host->mmc);
1123
1124	return 0;
1125}
1126
1127#ifdef CONFIG_PM_SLEEP
1128
1129static int jz4740_mmc_suspend(struct device *dev)
1130{
1131	struct jz4740_mmc_host *host = dev_get_drvdata(dev);
1132
1133	jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1134
1135	return 0;
1136}
1137
1138static int jz4740_mmc_resume(struct device *dev)
1139{
1140	struct jz4740_mmc_host *host = dev_get_drvdata(dev);
1141
1142	jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
1143
1144	return 0;
1145}
1146
1147static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1148	jz4740_mmc_resume);
1149#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)
1150#else
1151#define JZ4740_MMC_PM_OPS NULL
1152#endif
1153
1154static struct platform_driver jz4740_mmc_driver = {
1155	.probe = jz4740_mmc_probe,
1156	.remove = jz4740_mmc_remove,
1157	.driver = {
1158		.name = "jz4740-mmc",
1159		.pm = JZ4740_MMC_PM_OPS,
 
 
1160	},
1161};
1162
1163module_platform_driver(jz4740_mmc_driver);
1164
1165MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1166MODULE_LICENSE("GPL");
1167MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");