Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * linux/drivers/mmc/host/tmio_mmc_pio.c
   3 *
   4 * Copyright (C) 2011 Guennadi Liakhovetski
   5 * Copyright (C) 2007 Ian Molton
   6 * Copyright (C) 2004 Ian Molton
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * Driver for the MMC / SD / SDIO IP found in:
  13 *
  14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
  15 *
  16 * This driver draws mainly on scattered spec sheets, Reverse engineering
  17 * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
  18 * support). (Further 4 bit support from a later datasheet).
  19 *
  20 * TODO:
  21 *   Investigate using a workqueue for PIO transfers
  22 *   Eliminate FIXMEs
  23 *   SDIO support
  24 *   Better Power management
  25 *   Handle MMC errors better
  26 *   double buffer support
  27 *
  28 */
  29
  30#include <linux/delay.h>
  31#include <linux/device.h>
  32#include <linux/highmem.h>
  33#include <linux/interrupt.h>
  34#include <linux/io.h>
  35#include <linux/irq.h>
  36#include <linux/mfd/tmio.h>
  37#include <linux/mmc/cd-gpio.h>
  38#include <linux/mmc/host.h>
  39#include <linux/mmc/tmio.h>
  40#include <linux/module.h>
  41#include <linux/pagemap.h>
  42#include <linux/platform_device.h>
  43#include <linux/pm_qos.h>
  44#include <linux/pm_runtime.h>
  45#include <linux/scatterlist.h>
  46#include <linux/spinlock.h>
  47#include <linux/workqueue.h>
  48
  49#include "tmio_mmc.h"
  50
  51void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  52{
  53	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
  54	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
  55}
  56
  57void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  58{
  59	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
  60	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
  61}
  62
  63static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
  64{
  65	sd_ctrl_write32(host, CTL_STATUS, ~i);
  66}
  67
  68static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
  69{
  70	host->sg_len = data->sg_len;
  71	host->sg_ptr = data->sg;
  72	host->sg_orig = data->sg;
  73	host->sg_off = 0;
  74}
  75
  76static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
  77{
  78	host->sg_ptr = sg_next(host->sg_ptr);
  79	host->sg_off = 0;
  80	return --host->sg_len;
  81}
  82
  83#ifdef CONFIG_MMC_DEBUG
  84
  85#define STATUS_TO_TEXT(a, status, i) \
  86	do { \
  87		if (status & TMIO_STAT_##a) { \
  88			if (i++) \
  89				printk(" | "); \
  90			printk(#a); \
  91		} \
  92	} while (0)
  93
  94static void pr_debug_status(u32 status)
  95{
  96	int i = 0;
  97	pr_debug("status: %08x = ", status);
  98	STATUS_TO_TEXT(CARD_REMOVE, status, i);
  99	STATUS_TO_TEXT(CARD_INSERT, status, i);
 100	STATUS_TO_TEXT(SIGSTATE, status, i);
 101	STATUS_TO_TEXT(WRPROTECT, status, i);
 102	STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
 103	STATUS_TO_TEXT(CARD_INSERT_A, status, i);
 104	STATUS_TO_TEXT(SIGSTATE_A, status, i);
 105	STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
 106	STATUS_TO_TEXT(STOPBIT_ERR, status, i);
 107	STATUS_TO_TEXT(ILL_FUNC, status, i);
 108	STATUS_TO_TEXT(CMD_BUSY, status, i);
 109	STATUS_TO_TEXT(CMDRESPEND, status, i);
 110	STATUS_TO_TEXT(DATAEND, status, i);
 111	STATUS_TO_TEXT(CRCFAIL, status, i);
 112	STATUS_TO_TEXT(DATATIMEOUT, status, i);
 113	STATUS_TO_TEXT(CMDTIMEOUT, status, i);
 114	STATUS_TO_TEXT(RXOVERFLOW, status, i);
 115	STATUS_TO_TEXT(TXUNDERRUN, status, i);
 116	STATUS_TO_TEXT(RXRDY, status, i);
 117	STATUS_TO_TEXT(TXRQ, status, i);
 118	STATUS_TO_TEXT(ILL_ACCESS, status, i);
 119	printk("\n");
 120}
 121
 122#else
 123#define pr_debug_status(s)  do { } while (0)
 124#endif
 125
 126static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 127{
 128	struct tmio_mmc_host *host = mmc_priv(mmc);
 129
 130	if (enable) {
 131		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
 132					~TMIO_SDIO_STAT_IOIRQ;
 133		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
 134		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
 135	} else {
 136		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
 137		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
 138		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
 139	}
 140}
 141
 142static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
 143{
 144	u32 clk = 0, clock;
 145
 146	if (new_clock) {
 147		for (clock = host->mmc->f_min, clk = 0x80000080;
 148			new_clock >= (clock<<1); clk >>= 1)
 149			clock <<= 1;
 150		clk |= 0x100;
 151	}
 152
 153	if (host->set_clk_div)
 154		host->set_clk_div(host->pdev, (clk>>22) & 1);
 155
 156	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
 157}
 158
 159static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 160{
 161	struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
 162
 163	/* implicit BUG_ON(!res) */
 164	if (resource_size(res) > 0x100) {
 165		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
 166		msleep(10);
 167	}
 168
 169	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
 170		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 171	msleep(10);
 172}
 173
 174static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 175{
 176	struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
 177
 178	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
 179		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 180	msleep(10);
 181
 182	/* implicit BUG_ON(!res) */
 183	if (resource_size(res) > 0x100) {
 184		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
 185		msleep(10);
 186	}
 187}
 188
 189static void tmio_mmc_reset(struct tmio_mmc_host *host)
 190{
 191	struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
 192
 193	/* FIXME - should we set stop clock reg here */
 194	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
 195	/* implicit BUG_ON(!res) */
 196	if (resource_size(res) > 0x100)
 197		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
 198	msleep(10);
 199	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
 200	if (resource_size(res) > 0x100)
 201		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
 202	msleep(10);
 203}
 204
 205static void tmio_mmc_reset_work(struct work_struct *work)
 206{
 207	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
 208						  delayed_reset_work.work);
 209	struct mmc_request *mrq;
 210	unsigned long flags;
 211
 212	spin_lock_irqsave(&host->lock, flags);
 213	mrq = host->mrq;
 214
 215	/*
 216	 * is request already finished? Since we use a non-blocking
 217	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
 218	 * us, so, have to check for IS_ERR(host->mrq)
 219	 */
 220	if (IS_ERR_OR_NULL(mrq)
 221	    || time_is_after_jiffies(host->last_req_ts +
 222		msecs_to_jiffies(2000))) {
 223		spin_unlock_irqrestore(&host->lock, flags);
 224		return;
 225	}
 226
 227	dev_warn(&host->pdev->dev,
 228		"timeout waiting for hardware interrupt (CMD%u)\n",
 229		mrq->cmd->opcode);
 230
 231	if (host->data)
 232		host->data->error = -ETIMEDOUT;
 233	else if (host->cmd)
 234		host->cmd->error = -ETIMEDOUT;
 235	else
 236		mrq->cmd->error = -ETIMEDOUT;
 237
 238	host->cmd = NULL;
 239	host->data = NULL;
 240	host->force_pio = false;
 241
 242	spin_unlock_irqrestore(&host->lock, flags);
 243
 244	tmio_mmc_reset(host);
 245
 246	/* Ready for new calls */
 247	host->mrq = NULL;
 248
 249	tmio_mmc_abort_dma(host);
 250	mmc_request_done(host->mmc, mrq);
 251}
 252
 253/* called with host->lock held, interrupts disabled */
 254static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
 255{
 256	struct mmc_request *mrq;
 257	unsigned long flags;
 258
 259	spin_lock_irqsave(&host->lock, flags);
 260
 261	mrq = host->mrq;
 262	if (IS_ERR_OR_NULL(mrq)) {
 263		spin_unlock_irqrestore(&host->lock, flags);
 264		return;
 265	}
 266
 267	host->cmd = NULL;
 268	host->data = NULL;
 269	host->force_pio = false;
 270
 271	cancel_delayed_work(&host->delayed_reset_work);
 272
 273	host->mrq = NULL;
 274	spin_unlock_irqrestore(&host->lock, flags);
 275
 276	if (mrq->cmd->error || (mrq->data && mrq->data->error))
 277		tmio_mmc_abort_dma(host);
 278
 279	mmc_request_done(host->mmc, mrq);
 280}
 281
 282static void tmio_mmc_done_work(struct work_struct *work)
 283{
 284	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
 285						  done);
 286	tmio_mmc_finish_request(host);
 287}
 288
 289/* These are the bitmasks the tmio chip requires to implement the MMC response
 290 * types. Note that R1 and R6 are the same in this scheme. */
 291#define APP_CMD        0x0040
 292#define RESP_NONE      0x0300
 293#define RESP_R1        0x0400
 294#define RESP_R1B       0x0500
 295#define RESP_R2        0x0600
 296#define RESP_R3        0x0700
 297#define DATA_PRESENT   0x0800
 298#define TRANSFER_READ  0x1000
 299#define TRANSFER_MULTI 0x2000
 300#define SECURITY_CMD   0x4000
 301
 302static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 303{
 304	struct mmc_data *data = host->data;
 305	int c = cmd->opcode;
 306	u32 irq_mask = TMIO_MASK_CMD;
 307
 308	/* Command 12 is handled by hardware */
 309	if (cmd->opcode == 12 && !cmd->arg) {
 310		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
 311		return 0;
 312	}
 313
 314	switch (mmc_resp_type(cmd)) {
 315	case MMC_RSP_NONE: c |= RESP_NONE; break;
 316	case MMC_RSP_R1:   c |= RESP_R1;   break;
 317	case MMC_RSP_R1B:  c |= RESP_R1B;  break;
 318	case MMC_RSP_R2:   c |= RESP_R2;   break;
 319	case MMC_RSP_R3:   c |= RESP_R3;   break;
 320	default:
 321		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
 322		return -EINVAL;
 323	}
 324
 325	host->cmd = cmd;
 326
 327/* FIXME - this seems to be ok commented out but the spec suggest this bit
 328 *         should be set when issuing app commands.
 329 *	if(cmd->flags & MMC_FLAG_ACMD)
 330 *		c |= APP_CMD;
 331 */
 332	if (data) {
 333		c |= DATA_PRESENT;
 334		if (data->blocks > 1) {
 335			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
 336			c |= TRANSFER_MULTI;
 337		}
 338		if (data->flags & MMC_DATA_READ)
 339			c |= TRANSFER_READ;
 340	}
 341
 342	if (!host->native_hotplug)
 343		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
 344	tmio_mmc_enable_mmc_irqs(host, irq_mask);
 345
 346	/* Fire off the command */
 347	sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
 348	sd_ctrl_write16(host, CTL_SD_CMD, c);
 349
 350	return 0;
 351}
 352
 353/*
 354 * This chip always returns (at least?) as much data as you ask for.
 355 * I'm unsure what happens if you ask for less than a block. This should be
 356 * looked into to ensure that a funny length read doesn't hose the controller.
 357 */
 358static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 359{
 360	struct mmc_data *data = host->data;
 361	void *sg_virt;
 362	unsigned short *buf;
 363	unsigned int count;
 364	unsigned long flags;
 365
 366	if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
 367		pr_err("PIO IRQ in DMA mode!\n");
 368		return;
 369	} else if (!data) {
 370		pr_debug("Spurious PIO IRQ\n");
 371		return;
 372	}
 373
 374	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
 375	buf = (unsigned short *)(sg_virt + host->sg_off);
 376
 377	count = host->sg_ptr->length - host->sg_off;
 378	if (count > data->blksz)
 379		count = data->blksz;
 380
 381	pr_debug("count: %08x offset: %08x flags %08x\n",
 382		 count, host->sg_off, data->flags);
 383
 384	/* Transfer the data */
 385	if (data->flags & MMC_DATA_READ)
 386		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
 387	else
 388		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
 389
 390	host->sg_off += count;
 391
 392	tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
 393
 394	if (host->sg_off == host->sg_ptr->length)
 395		tmio_mmc_next_sg(host);
 396
 397	return;
 398}
 399
 400static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
 401{
 402	if (host->sg_ptr == &host->bounce_sg) {
 403		unsigned long flags;
 404		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
 405		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
 406		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
 407	}
 408}
 409
 410/* needs to be called with host->lock held */
 411void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 412{
 413	struct mmc_data *data = host->data;
 414	struct mmc_command *stop;
 415
 416	host->data = NULL;
 417
 418	if (!data) {
 419		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
 420		return;
 421	}
 422	stop = data->stop;
 423
 424	/* FIXME - return correct transfer count on errors */
 425	if (!data->error)
 426		data->bytes_xfered = data->blocks * data->blksz;
 427	else
 428		data->bytes_xfered = 0;
 429
 430	pr_debug("Completed data request\n");
 431
 432	/*
 433	 * FIXME: other drivers allow an optional stop command of any given type
 434	 *        which we dont do, as the chip can auto generate them.
 435	 *        Perhaps we can be smarter about when to use auto CMD12 and
 436	 *        only issue the auto request when we know this is the desired
 437	 *        stop command, allowing fallback to the stop command the
 438	 *        upper layers expect. For now, we do what works.
 439	 */
 440
 441	if (data->flags & MMC_DATA_READ) {
 442		if (host->chan_rx && !host->force_pio)
 443			tmio_mmc_check_bounce_buffer(host);
 444		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
 445			host->mrq);
 446	} else {
 447		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
 448			host->mrq);
 449	}
 450
 451	if (stop) {
 452		if (stop->opcode == 12 && !stop->arg)
 453			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
 454		else
 455			BUG();
 456	}
 457
 458	schedule_work(&host->done);
 459}
 460
 461static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 462{
 463	struct mmc_data *data;
 464	spin_lock(&host->lock);
 465	data = host->data;
 466
 467	if (!data)
 468		goto out;
 469
 470	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
 471		/*
 472		 * Has all data been written out yet? Testing on SuperH showed,
 473		 * that in most cases the first interrupt comes already with the
 474		 * BUSY status bit clear, but on some operations, like mount or
 475		 * in the beginning of a write / sync / umount, there is one
 476		 * DATAEND interrupt with the BUSY bit set, in this cases
 477		 * waiting for one more interrupt fixes the problem.
 478		 */
 479		if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
 480			tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
 481			tasklet_schedule(&host->dma_complete);
 482		}
 483	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
 484		tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
 485		tasklet_schedule(&host->dma_complete);
 486	} else {
 487		tmio_mmc_do_data_irq(host);
 488		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
 489	}
 490out:
 491	spin_unlock(&host->lock);
 492}
 493
 494static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
 495	unsigned int stat)
 496{
 497	struct mmc_command *cmd = host->cmd;
 498	int i, addr;
 499
 500	spin_lock(&host->lock);
 501
 502	if (!host->cmd) {
 503		pr_debug("Spurious CMD irq\n");
 504		goto out;
 505	}
 506
 507	host->cmd = NULL;
 508
 509	/* This controller is sicker than the PXA one. Not only do we need to
 510	 * drop the top 8 bits of the first response word, we also need to
 511	 * modify the order of the response for short response command types.
 512	 */
 513
 514	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
 515		cmd->resp[i] = sd_ctrl_read32(host, addr);
 516
 517	if (cmd->flags &  MMC_RSP_136) {
 518		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
 519		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
 520		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
 521		cmd->resp[3] <<= 8;
 522	} else if (cmd->flags & MMC_RSP_R3) {
 523		cmd->resp[0] = cmd->resp[3];
 524	}
 525
 526	if (stat & TMIO_STAT_CMDTIMEOUT)
 527		cmd->error = -ETIMEDOUT;
 528	else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
 529		cmd->error = -EILSEQ;
 530
 531	/* If there is data to handle we enable data IRQs here, and
 532	 * we will ultimatley finish the request in the data_end handler.
 533	 * If theres no data or we encountered an error, finish now.
 534	 */
 535	if (host->data && !cmd->error) {
 536		if (host->data->flags & MMC_DATA_READ) {
 537			if (host->force_pio || !host->chan_rx)
 538				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
 539			else
 540				tasklet_schedule(&host->dma_issue);
 541		} else {
 542			if (host->force_pio || !host->chan_tx)
 543				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 544			else
 545				tasklet_schedule(&host->dma_issue);
 546		}
 547	} else {
 548		schedule_work(&host->done);
 549	}
 550
 551out:
 552	spin_unlock(&host->lock);
 553}
 554
 555static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
 556				       int *ireg, int *status)
 557{
 558	*status = sd_ctrl_read32(host, CTL_STATUS);
 559	*ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
 560
 561	pr_debug_status(*status);
 562	pr_debug_status(*ireg);
 563}
 564
 565static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
 566				      int ireg, int status)
 567{
 568	struct mmc_host *mmc = host->mmc;
 569
 570	/* Card insert / remove attempts */
 571	if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
 572		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
 573			TMIO_STAT_CARD_REMOVE);
 574		if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
 575		     ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
 576		    !work_pending(&mmc->detect.work))
 577			mmc_detect_change(host->mmc, msecs_to_jiffies(100));
 578		return true;
 579	}
 580
 581	return false;
 582}
 583
 584irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
 585{
 586	unsigned int ireg, status;
 587	struct tmio_mmc_host *host = devid;
 588
 589	tmio_mmc_card_irq_status(host, &ireg, &status);
 590	__tmio_mmc_card_detect_irq(host, ireg, status);
 591
 592	return IRQ_HANDLED;
 593}
 594EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
 595
 596static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
 597				 int ireg, int status)
 598{
 599	/* Command completion */
 600	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
 601		tmio_mmc_ack_mmc_irqs(host,
 602			     TMIO_STAT_CMDRESPEND |
 603			     TMIO_STAT_CMDTIMEOUT);
 604		tmio_mmc_cmd_irq(host, status);
 605		return true;
 606	}
 607
 608	/* Data transfer */
 609	if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
 610		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
 611		tmio_mmc_pio_irq(host);
 612		return true;
 613	}
 614
 615	/* Data transfer completion */
 616	if (ireg & TMIO_STAT_DATAEND) {
 617		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
 618		tmio_mmc_data_irq(host);
 619		return true;
 620	}
 621
 622	return false;
 623}
 624
 625irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
 626{
 627	unsigned int ireg, status;
 628	struct tmio_mmc_host *host = devid;
 629
 630	tmio_mmc_card_irq_status(host, &ireg, &status);
 631	__tmio_mmc_sdcard_irq(host, ireg, status);
 632
 633	return IRQ_HANDLED;
 634}
 635EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
 636
 637irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
 638{
 639	struct tmio_mmc_host *host = devid;
 640	struct mmc_host *mmc = host->mmc;
 641	struct tmio_mmc_data *pdata = host->pdata;
 642	unsigned int ireg, status;
 643
 644	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
 645		return IRQ_HANDLED;
 646
 647	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
 648	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
 649
 650	sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL);
 651
 652	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
 653		mmc_signal_sdio_irq(mmc);
 654
 655	return IRQ_HANDLED;
 656}
 657EXPORT_SYMBOL(tmio_mmc_sdio_irq);
 658
 659irqreturn_t tmio_mmc_irq(int irq, void *devid)
 660{
 661	struct tmio_mmc_host *host = devid;
 662	unsigned int ireg, status;
 663
 664	pr_debug("MMC IRQ begin\n");
 665
 666	tmio_mmc_card_irq_status(host, &ireg, &status);
 667	if (__tmio_mmc_card_detect_irq(host, ireg, status))
 668		return IRQ_HANDLED;
 669	if (__tmio_mmc_sdcard_irq(host, ireg, status))
 670		return IRQ_HANDLED;
 671
 672	tmio_mmc_sdio_irq(irq, devid);
 673
 674	return IRQ_HANDLED;
 675}
 676EXPORT_SYMBOL(tmio_mmc_irq);
 677
 678static int tmio_mmc_start_data(struct tmio_mmc_host *host,
 679	struct mmc_data *data)
 680{
 681	struct tmio_mmc_data *pdata = host->pdata;
 682
 683	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n",
 684		 data->blksz, data->blocks);
 685
 686	/* Some hardware cannot perform 2 byte requests in 4 bit mode */
 687	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
 688		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
 689
 690		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
 691			pr_err("%s: %d byte block unsupported in 4 bit mode\n",
 692			       mmc_hostname(host->mmc), data->blksz);
 693			return -EINVAL;
 694		}
 695	}
 696
 697	tmio_mmc_init_sg(host, data);
 698	host->data = data;
 699
 700	/* Set transfer length / blocksize */
 701	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
 702	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 703
 704	tmio_mmc_start_dma(host, data);
 705
 706	return 0;
 707}
 708
 709/* Process requests from the MMC layer */
 710static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 711{
 712	struct tmio_mmc_host *host = mmc_priv(mmc);
 713	unsigned long flags;
 714	int ret;
 715
 716	spin_lock_irqsave(&host->lock, flags);
 717
 718	if (host->mrq) {
 719		pr_debug("request not null\n");
 720		if (IS_ERR(host->mrq)) {
 721			spin_unlock_irqrestore(&host->lock, flags);
 722			mrq->cmd->error = -EAGAIN;
 723			mmc_request_done(mmc, mrq);
 724			return;
 725		}
 726	}
 727
 728	host->last_req_ts = jiffies;
 729	wmb();
 730	host->mrq = mrq;
 731
 732	spin_unlock_irqrestore(&host->lock, flags);
 733
 734	if (mrq->data) {
 735		ret = tmio_mmc_start_data(host, mrq->data);
 736		if (ret)
 737			goto fail;
 738	}
 739
 740	ret = tmio_mmc_start_command(host, mrq->cmd);
 741	if (!ret) {
 742		schedule_delayed_work(&host->delayed_reset_work,
 743				      msecs_to_jiffies(2000));
 744		return;
 745	}
 746
 747fail:
 748	host->force_pio = false;
 749	host->mrq = NULL;
 750	mrq->cmd->error = ret;
 751	mmc_request_done(mmc, mrq);
 752}
 753
 754/* Set MMC clock / power.
 755 * Note: This controller uses a simple divider scheme therefore it cannot
 756 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
 757 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
 758 * slowest setting.
 759 */
 760static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 761{
 762	struct tmio_mmc_host *host = mmc_priv(mmc);
 763	struct device *dev = &host->pdev->dev;
 764	unsigned long flags;
 765
 766	mutex_lock(&host->ios_lock);
 767
 768	spin_lock_irqsave(&host->lock, flags);
 769	if (host->mrq) {
 770		if (IS_ERR(host->mrq)) {
 771			dev_dbg(dev,
 772				"%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
 773				current->comm, task_pid_nr(current),
 774				ios->clock, ios->power_mode);
 775			host->mrq = ERR_PTR(-EINTR);
 776		} else {
 777			dev_dbg(dev,
 778				"%s.%d: CMD%u active since %lu, now %lu!\n",
 779				current->comm, task_pid_nr(current),
 780				host->mrq->cmd->opcode, host->last_req_ts, jiffies);
 781		}
 782		spin_unlock_irqrestore(&host->lock, flags);
 783
 784		mutex_unlock(&host->ios_lock);
 785		return;
 786	}
 787
 788	host->mrq = ERR_PTR(-EBUSY);
 789
 790	spin_unlock_irqrestore(&host->lock, flags);
 791
 792	/*
 793	 * host->power toggles between false and true in both cases - either
 794	 * or not the controller can be runtime-suspended during inactivity.
 795	 * But if the controller has to be kept on, the runtime-pm usage_count
 796	 * is kept positive, so no suspending actually takes place.
 797	 */
 798	if (ios->power_mode == MMC_POWER_ON && ios->clock) {
 799		if (!host->power) {
 800			pm_runtime_get_sync(dev);
 801			host->power = true;
 802		}
 803		tmio_mmc_set_clock(host, ios->clock);
 804		/* power up SD bus */
 805		if (host->set_pwr)
 806			host->set_pwr(host->pdev, 1);
 807		/* start bus clock */
 808		tmio_mmc_clk_start(host);
 809	} else if (ios->power_mode != MMC_POWER_UP) {
 810		if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
 811			host->set_pwr(host->pdev, 0);
 812		if (host->power) {
 813			host->power = false;
 814			pm_runtime_put(dev);
 815		}
 816		tmio_mmc_clk_stop(host);
 817	}
 818
 819	switch (ios->bus_width) {
 820	case MMC_BUS_WIDTH_1:
 821		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
 822	break;
 823	case MMC_BUS_WIDTH_4:
 824		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
 825	break;
 826	}
 827
 828	/* Let things settle. delay taken from winCE driver */
 829	udelay(140);
 830	if (PTR_ERR(host->mrq) == -EINTR)
 831		dev_dbg(&host->pdev->dev,
 832			"%s.%d: IOS interrupted: clk %u, mode %u",
 833			current->comm, task_pid_nr(current),
 834			ios->clock, ios->power_mode);
 835	host->mrq = NULL;
 836
 837	mutex_unlock(&host->ios_lock);
 838}
 839
 840static int tmio_mmc_get_ro(struct mmc_host *mmc)
 841{
 842	struct tmio_mmc_host *host = mmc_priv(mmc);
 843	struct tmio_mmc_data *pdata = host->pdata;
 844
 845	return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
 846		 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
 847}
 848
 849static int tmio_mmc_get_cd(struct mmc_host *mmc)
 850{
 851	struct tmio_mmc_host *host = mmc_priv(mmc);
 852	struct tmio_mmc_data *pdata = host->pdata;
 853
 854	if (!pdata->get_cd)
 855		return -ENOSYS;
 856	else
 857		return pdata->get_cd(host->pdev);
 858}
 859
 860static const struct mmc_host_ops tmio_mmc_ops = {
 861	.request	= tmio_mmc_request,
 862	.set_ios	= tmio_mmc_set_ios,
 863	.get_ro         = tmio_mmc_get_ro,
 864	.get_cd		= tmio_mmc_get_cd,
 865	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
 866};
 867
 868int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
 869				  struct platform_device *pdev,
 870				  struct tmio_mmc_data *pdata)
 871{
 872	struct tmio_mmc_host *_host;
 873	struct mmc_host *mmc;
 874	struct resource *res_ctl;
 875	int ret;
 876	u32 irq_mask = TMIO_MASK_CMD;
 877
 878	res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 879	if (!res_ctl)
 880		return -EINVAL;
 881
 882	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
 883	if (!mmc)
 884		return -ENOMEM;
 885
 886	pdata->dev = &pdev->dev;
 887	_host = mmc_priv(mmc);
 888	_host->pdata = pdata;
 889	_host->mmc = mmc;
 890	_host->pdev = pdev;
 891	platform_set_drvdata(pdev, mmc);
 892
 893	_host->set_pwr = pdata->set_pwr;
 894	_host->set_clk_div = pdata->set_clk_div;
 895
 896	/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
 897	_host->bus_shift = resource_size(res_ctl) >> 10;
 898
 899	_host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
 900	if (!_host->ctl) {
 901		ret = -ENOMEM;
 902		goto host_free;
 903	}
 904
 905	mmc->ops = &tmio_mmc_ops;
 906	mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
 907	mmc->f_max = pdata->hclk;
 908	mmc->f_min = mmc->f_max / 512;
 909	mmc->max_segs = 32;
 910	mmc->max_blk_size = 512;
 911	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
 912		mmc->max_segs;
 913	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
 914	mmc->max_seg_size = mmc->max_req_size;
 915	if (pdata->ocr_mask)
 916		mmc->ocr_avail = pdata->ocr_mask;
 917	else
 918		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 919
 920	_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
 921				  mmc->caps & MMC_CAP_NEEDS_POLL ||
 922				  mmc->caps & MMC_CAP_NONREMOVABLE);
 923
 924	_host->power = false;
 925	pm_runtime_enable(&pdev->dev);
 926	ret = pm_runtime_resume(&pdev->dev);
 927	if (ret < 0)
 928		goto pm_disable;
 929
 930	/*
 931	 * There are 4 different scenarios for the card detection:
 932	 *  1) an external gpio irq handles the cd (best for power savings)
 933	 *  2) internal sdhi irq handles the cd
 934	 *  3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
 935	 *  4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
 936	 *
 937	 *  While we increment the runtime PM counter for all scenarios when
 938	 *  the mmc core activates us by calling an appropriate set_ios(), we
 939	 *  must additionally ensure that in case 2) the tmio mmc hardware stays
 940	 *  additionally ensure that in case 2) the tmio mmc hardware stays
 941	 *  powered on during runtime for the card detection to work.
 942	 */
 943	if (_host->native_hotplug)
 944		pm_runtime_get_noresume(&pdev->dev);
 945
 946	tmio_mmc_clk_stop(_host);
 947	tmio_mmc_reset(_host);
 948
 949	_host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
 950	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
 951	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
 952		tmio_mmc_enable_sdio_irq(mmc, 0);
 953
 954	spin_lock_init(&_host->lock);
 955	mutex_init(&_host->ios_lock);
 956
 957	/* Init delayed work for request timeouts */
 958	INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
 959	INIT_WORK(&_host->done, tmio_mmc_done_work);
 960
 961	/* See if we also get DMA */
 962	tmio_mmc_request_dma(_host, pdata);
 963
 964	mmc_add_host(mmc);
 965
 966	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
 967
 968	/* Unmask the IRQs we want to know about */
 969	if (!_host->chan_rx)
 970		irq_mask |= TMIO_MASK_READOP;
 971	if (!_host->chan_tx)
 972		irq_mask |= TMIO_MASK_WRITEOP;
 973	if (!_host->native_hotplug)
 974		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
 975
 976	tmio_mmc_enable_mmc_irqs(_host, irq_mask);
 977
 978	if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
 979		ret = mmc_cd_gpio_request(mmc, pdata->cd_gpio);
 980		if (ret < 0) {
 981			tmio_mmc_host_remove(_host);
 982			return ret;
 983		}
 984	}
 985
 986	*host = _host;
 987
 988	return 0;
 989
 990pm_disable:
 991	pm_runtime_disable(&pdev->dev);
 992	iounmap(_host->ctl);
 993host_free:
 994	mmc_free_host(mmc);
 995
 996	return ret;
 997}
 998EXPORT_SYMBOL(tmio_mmc_host_probe);
 999
1000void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1001{
1002	struct platform_device *pdev = host->pdev;
1003	struct tmio_mmc_data *pdata = host->pdata;
1004	struct mmc_host *mmc = host->mmc;
1005
1006	if (pdata->flags & TMIO_MMC_USE_GPIO_CD)
1007		/*
1008		 * This means we can miss a card-eject, but this is anyway
1009		 * possible, because of delayed processing of hotplug events.
1010		 */
1011		mmc_cd_gpio_free(mmc);
1012
1013	if (!host->native_hotplug)
1014		pm_runtime_get_sync(&pdev->dev);
1015
1016	dev_pm_qos_hide_latency_limit(&pdev->dev);
1017
1018	mmc_remove_host(mmc);
1019	cancel_work_sync(&host->done);
1020	cancel_delayed_work_sync(&host->delayed_reset_work);
1021	tmio_mmc_release_dma(host);
1022
1023	pm_runtime_put_sync(&pdev->dev);
1024	pm_runtime_disable(&pdev->dev);
1025
1026	iounmap(host->ctl);
1027	mmc_free_host(mmc);
1028}
1029EXPORT_SYMBOL(tmio_mmc_host_remove);
1030
1031#ifdef CONFIG_PM
1032int tmio_mmc_host_suspend(struct device *dev)
1033{
1034	struct mmc_host *mmc = dev_get_drvdata(dev);
1035	struct tmio_mmc_host *host = mmc_priv(mmc);
1036	int ret = mmc_suspend_host(mmc);
1037
1038	if (!ret)
1039		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1040
1041	return ret;
1042}
1043EXPORT_SYMBOL(tmio_mmc_host_suspend);
1044
1045int tmio_mmc_host_resume(struct device *dev)
1046{
1047	struct mmc_host *mmc = dev_get_drvdata(dev);
1048	struct tmio_mmc_host *host = mmc_priv(mmc);
1049
1050	tmio_mmc_reset(host);
1051	tmio_mmc_enable_dma(host, true);
1052
1053	/* The MMC core will perform the complete set up */
1054	return mmc_resume_host(mmc);
1055}
1056EXPORT_SYMBOL(tmio_mmc_host_resume);
1057
1058#endif	/* CONFIG_PM */
1059
1060int tmio_mmc_host_runtime_suspend(struct device *dev)
1061{
1062	return 0;
1063}
1064EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1065
1066int tmio_mmc_host_runtime_resume(struct device *dev)
1067{
1068	struct mmc_host *mmc = dev_get_drvdata(dev);
1069	struct tmio_mmc_host *host = mmc_priv(mmc);
1070
1071	tmio_mmc_reset(host);
1072	tmio_mmc_enable_dma(host, true);
1073
1074	return 0;
1075}
1076EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1077
1078MODULE_LICENSE("GPL v2");