Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * bcm2835 sdhost driver.
   4 *
   5 * The 2835 has two SD controllers: The Arasan sdhci controller
   6 * (supported by the iproc driver) and a custom sdhost controller
   7 * (supported by this driver).
   8 *
   9 * The sdhci controller supports both sdcard and sdio.  The sdhost
  10 * controller supports the sdcard only, but has better performance.
  11 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
  12 * the sdhost controller allows to use the sdhci controller for wifi
  13 * support.
  14 *
  15 * The configuration is done by devicetree via pin muxing.  Both
  16 * SD controller are available on the same pins (2 pin groups = pin 22
  17 * to 27 + pin 48 to 53).  So it's possible to use both SD controllers
  18 * at the same time with different pin groups.
  19 *
  20 * Author:      Phil Elwell <phil@raspberrypi.org>
  21 *              Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
  22 *
  23 * Based on
  24 *  mmc-bcm2835.c by Gellert Weisz
  25 * which is, in turn, based on
  26 *  sdhci-bcm2708.c by Broadcom
  27 *  sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
  28 *  sdhci.c and sdhci-pci.c by Pierre Ossman
  29 */
  30#include <linux/clk.h>
  31#include <linux/delay.h>
  32#include <linux/device.h>
  33#include <linux/dmaengine.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/err.h>
  36#include <linux/highmem.h>
  37#include <linux/interrupt.h>
  38#include <linux/io.h>
  39#include <linux/iopoll.h>
  40#include <linux/module.h>
  41#include <linux/of_address.h>
  42#include <linux/of_irq.h>
  43#include <linux/platform_device.h>
  44#include <linux/scatterlist.h>
  45#include <linux/time.h>
  46#include <linux/workqueue.h>
  47
  48#include <linux/mmc/host.h>
  49#include <linux/mmc/mmc.h>
  50#include <linux/mmc/sd.h>
  51
  52#define SDCMD  0x00 /* Command to SD card              - 16 R/W */
  53#define SDARG  0x04 /* Argument to SD card             - 32 R/W */
  54#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
  55#define SDCDIV 0x0c /* Start value for clock divider   - 11 R/W */
  56#define SDRSP0 0x10 /* SD card response (31:0)         - 32 R   */
  57#define SDRSP1 0x14 /* SD card response (63:32)        - 32 R   */
  58#define SDRSP2 0x18 /* SD card response (95:64)        - 32 R   */
  59#define SDRSP3 0x1c /* SD card response (127:96)       - 32 R   */
  60#define SDHSTS 0x20 /* SD host status                  - 11 R/W */
  61#define SDVDD  0x30 /* SD card power control           -  1 R/W */
  62#define SDEDM  0x34 /* Emergency Debug Mode            - 13 R/W */
  63#define SDHCFG 0x38 /* Host configuration              -  2 R/W */
  64#define SDHBCT 0x3c /* Host byte count (debug)         - 32 R/W */
  65#define SDDATA 0x40 /* Data to/from SD card            - 32 R/W */
  66#define SDHBLC 0x50 /* Host block count (SDIO/SDHC)    -  9 R/W */
  67
  68#define SDCMD_NEW_FLAG			0x8000
  69#define SDCMD_FAIL_FLAG			0x4000
  70#define SDCMD_BUSYWAIT			0x800
  71#define SDCMD_NO_RESPONSE		0x400
  72#define SDCMD_LONG_RESPONSE		0x200
  73#define SDCMD_WRITE_CMD			0x80
  74#define SDCMD_READ_CMD			0x40
  75#define SDCMD_CMD_MASK			0x3f
  76
  77#define SDCDIV_MAX_CDIV			0x7ff
  78
  79#define SDHSTS_BUSY_IRPT		0x400
  80#define SDHSTS_BLOCK_IRPT		0x200
  81#define SDHSTS_SDIO_IRPT		0x100
  82#define SDHSTS_REW_TIME_OUT		0x80
  83#define SDHSTS_CMD_TIME_OUT		0x40
  84#define SDHSTS_CRC16_ERROR		0x20
  85#define SDHSTS_CRC7_ERROR		0x10
  86#define SDHSTS_FIFO_ERROR		0x08
  87/* Reserved */
  88/* Reserved */
  89#define SDHSTS_DATA_FLAG		0x01
  90
  91#define SDHSTS_TRANSFER_ERROR_MASK	(SDHSTS_CRC7_ERROR | \
  92					 SDHSTS_CRC16_ERROR | \
  93					 SDHSTS_REW_TIME_OUT | \
  94					 SDHSTS_FIFO_ERROR)
  95
  96#define SDHSTS_ERROR_MASK		(SDHSTS_CMD_TIME_OUT | \
  97					 SDHSTS_TRANSFER_ERROR_MASK)
  98
  99#define SDHCFG_BUSY_IRPT_EN	BIT(10)
 100#define SDHCFG_BLOCK_IRPT_EN	BIT(8)
 101#define SDHCFG_SDIO_IRPT_EN	BIT(5)
 102#define SDHCFG_DATA_IRPT_EN	BIT(4)
 103#define SDHCFG_SLOW_CARD	BIT(3)
 104#define SDHCFG_WIDE_EXT_BUS	BIT(2)
 105#define SDHCFG_WIDE_INT_BUS	BIT(1)
 106#define SDHCFG_REL_CMD_LINE	BIT(0)
 107
 108#define SDVDD_POWER_OFF		0
 109#define SDVDD_POWER_ON		1
 110
 111#define SDEDM_FORCE_DATA_MODE	BIT(19)
 112#define SDEDM_CLOCK_PULSE	BIT(20)
 113#define SDEDM_BYPASS		BIT(21)
 114
 115#define SDEDM_WRITE_THRESHOLD_SHIFT	9
 116#define SDEDM_READ_THRESHOLD_SHIFT	14
 117#define SDEDM_THRESHOLD_MASK		0x1f
 118
 119#define SDEDM_FSM_MASK		0xf
 120#define SDEDM_FSM_IDENTMODE	0x0
 121#define SDEDM_FSM_DATAMODE	0x1
 122#define SDEDM_FSM_READDATA	0x2
 123#define SDEDM_FSM_WRITEDATA	0x3
 124#define SDEDM_FSM_READWAIT	0x4
 125#define SDEDM_FSM_READCRC	0x5
 126#define SDEDM_FSM_WRITECRC	0x6
 127#define SDEDM_FSM_WRITEWAIT1	0x7
 128#define SDEDM_FSM_POWERDOWN	0x8
 129#define SDEDM_FSM_POWERUP	0x9
 130#define SDEDM_FSM_WRITESTART1	0xa
 131#define SDEDM_FSM_WRITESTART2	0xb
 132#define SDEDM_FSM_GENPULSES	0xc
 133#define SDEDM_FSM_WRITEWAIT2	0xd
 134#define SDEDM_FSM_STARTPOWDOWN	0xf
 135
 136#define SDDATA_FIFO_WORDS	16
 137
 138#define FIFO_READ_THRESHOLD	4
 139#define FIFO_WRITE_THRESHOLD	4
 140#define SDDATA_FIFO_PIO_BURST	8
 141
 142#define PIO_THRESHOLD	1  /* Maximum block count for PIO (0 = always DMA) */
 143
 144struct bcm2835_host {
 145	spinlock_t		lock;
 146	struct mutex		mutex;
 147
 148	void __iomem		*ioaddr;
 149	u32			phys_addr;
 150
 
 151	struct platform_device	*pdev;
 152
 153	int			clock;		/* Current clock speed */
 154	unsigned int		max_clk;	/* Max possible freq */
 155	struct work_struct	dma_work;
 156	struct delayed_work	timeout_work;	/* Timer for timeouts */
 157	struct sg_mapping_iter	sg_miter;	/* SG state for PIO */
 158	unsigned int		blocks;		/* remaining PIO blocks */
 159	int			irq;		/* Device IRQ */
 160
 161	u32			ns_per_fifo_word;
 162
 163	/* cached registers */
 164	u32			hcfg;
 165	u32			cdiv;
 166
 167	struct mmc_request	*mrq;		/* Current request */
 168	struct mmc_command	*cmd;		/* Current command */
 169	struct mmc_data		*data;		/* Current data request */
 170	bool			data_complete:1;/* Data finished before cmd */
 171	bool			use_busy:1;	/* Wait for busy interrupt */
 172	bool			use_sbc:1;	/* Send CMD23 */
 173
 174	/* for threaded irq handler */
 175	bool			irq_block;
 176	bool			irq_busy;
 177	bool			irq_data;
 178
 179	/* DMA part */
 180	struct dma_chan		*dma_chan_rxtx;
 181	struct dma_chan		*dma_chan;
 182	struct dma_slave_config dma_cfg_rx;
 183	struct dma_slave_config dma_cfg_tx;
 184	struct dma_async_tx_descriptor	*dma_desc;
 185	u32			dma_dir;
 186	u32			drain_words;
 187	struct page		*drain_page;
 188	u32			drain_offset;
 189	bool			use_dma;
 190};
 191
 192static void bcm2835_dumpcmd(struct bcm2835_host *host, struct mmc_command *cmd,
 193			    const char *label)
 194{
 195	struct device *dev = &host->pdev->dev;
 196
 197	if (!cmd)
 198		return;
 199
 200	dev_dbg(dev, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
 201		(cmd == host->cmd) ? '>' : ' ',
 202		label, cmd->opcode, cmd->arg, cmd->flags,
 203		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
 204		cmd->error);
 205}
 206
 207static void bcm2835_dumpregs(struct bcm2835_host *host)
 208{
 209	struct mmc_request *mrq = host->mrq;
 210	struct device *dev = &host->pdev->dev;
 211
 212	if (mrq) {
 213		bcm2835_dumpcmd(host, mrq->sbc, "sbc");
 214		bcm2835_dumpcmd(host, mrq->cmd, "cmd");
 215		if (mrq->data) {
 216			dev_dbg(dev, "data blocks %x blksz %x - err %d\n",
 217				mrq->data->blocks,
 218				mrq->data->blksz,
 219				mrq->data->error);
 220		}
 221		bcm2835_dumpcmd(host, mrq->stop, "stop");
 222	}
 223
 224	dev_dbg(dev, "=========== REGISTER DUMP ===========\n");
 225	dev_dbg(dev, "SDCMD  0x%08x\n", readl(host->ioaddr + SDCMD));
 226	dev_dbg(dev, "SDARG  0x%08x\n", readl(host->ioaddr + SDARG));
 227	dev_dbg(dev, "SDTOUT 0x%08x\n", readl(host->ioaddr + SDTOUT));
 228	dev_dbg(dev, "SDCDIV 0x%08x\n", readl(host->ioaddr + SDCDIV));
 229	dev_dbg(dev, "SDRSP0 0x%08x\n", readl(host->ioaddr + SDRSP0));
 230	dev_dbg(dev, "SDRSP1 0x%08x\n", readl(host->ioaddr + SDRSP1));
 231	dev_dbg(dev, "SDRSP2 0x%08x\n", readl(host->ioaddr + SDRSP2));
 232	dev_dbg(dev, "SDRSP3 0x%08x\n", readl(host->ioaddr + SDRSP3));
 233	dev_dbg(dev, "SDHSTS 0x%08x\n", readl(host->ioaddr + SDHSTS));
 234	dev_dbg(dev, "SDVDD  0x%08x\n", readl(host->ioaddr + SDVDD));
 235	dev_dbg(dev, "SDEDM  0x%08x\n", readl(host->ioaddr + SDEDM));
 236	dev_dbg(dev, "SDHCFG 0x%08x\n", readl(host->ioaddr + SDHCFG));
 237	dev_dbg(dev, "SDHBCT 0x%08x\n", readl(host->ioaddr + SDHBCT));
 238	dev_dbg(dev, "SDHBLC 0x%08x\n", readl(host->ioaddr + SDHBLC));
 239	dev_dbg(dev, "===========================================\n");
 240}
 241
 242static void bcm2835_reset_internal(struct bcm2835_host *host)
 243{
 244	u32 temp;
 245
 246	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
 247	writel(0, host->ioaddr + SDCMD);
 248	writel(0, host->ioaddr + SDARG);
 249	writel(0xf00000, host->ioaddr + SDTOUT);
 250	writel(0, host->ioaddr + SDCDIV);
 251	writel(0x7f8, host->ioaddr + SDHSTS); /* Write 1s to clear */
 252	writel(0, host->ioaddr + SDHCFG);
 253	writel(0, host->ioaddr + SDHBCT);
 254	writel(0, host->ioaddr + SDHBLC);
 255
 256	/* Limit fifo usage due to silicon bug */
 257	temp = readl(host->ioaddr + SDEDM);
 258	temp &= ~((SDEDM_THRESHOLD_MASK << SDEDM_READ_THRESHOLD_SHIFT) |
 259		  (SDEDM_THRESHOLD_MASK << SDEDM_WRITE_THRESHOLD_SHIFT));
 260	temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) |
 261		(FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT);
 262	writel(temp, host->ioaddr + SDEDM);
 263	msleep(20);
 264	writel(SDVDD_POWER_ON, host->ioaddr + SDVDD);
 265	msleep(20);
 266	host->clock = 0;
 267	writel(host->hcfg, host->ioaddr + SDHCFG);
 268	writel(host->cdiv, host->ioaddr + SDCDIV);
 269}
 270
 271static void bcm2835_reset(struct mmc_host *mmc)
 272{
 273	struct bcm2835_host *host = mmc_priv(mmc);
 274
 275	if (host->dma_chan)
 276		dmaengine_terminate_sync(host->dma_chan);
 277	host->dma_chan = NULL;
 278	bcm2835_reset_internal(host);
 279}
 280
 281static void bcm2835_finish_command(struct bcm2835_host *host);
 282
 283static void bcm2835_wait_transfer_complete(struct bcm2835_host *host)
 284{
 285	int timediff;
 286	u32 alternate_idle;
 287
 288	alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ?
 289		SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1;
 290
 291	timediff = 0;
 292
 293	while (1) {
 294		u32 edm, fsm;
 295
 296		edm = readl(host->ioaddr + SDEDM);
 297		fsm = edm & SDEDM_FSM_MASK;
 298
 299		if ((fsm == SDEDM_FSM_IDENTMODE) ||
 300		    (fsm == SDEDM_FSM_DATAMODE))
 301			break;
 302		if (fsm == alternate_idle) {
 303			writel(edm | SDEDM_FORCE_DATA_MODE,
 304			       host->ioaddr + SDEDM);
 305			break;
 306		}
 307
 308		timediff++;
 309		if (timediff == 100000) {
 310			dev_err(&host->pdev->dev,
 311				"wait_transfer_complete - still waiting after %d retries\n",
 312				timediff);
 313			bcm2835_dumpregs(host);
 314			host->mrq->data->error = -ETIMEDOUT;
 315			return;
 316		}
 317		cpu_relax();
 318	}
 319}
 320
 321static void bcm2835_dma_complete(void *param)
 322{
 323	struct bcm2835_host *host = param;
 324
 325	schedule_work(&host->dma_work);
 326}
 327
 328static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
 329{
 330	size_t blksize;
 331	unsigned long wait_max;
 332
 333	blksize = host->data->blksz;
 334
 335	wait_max = jiffies + msecs_to_jiffies(500);
 336
 337	while (blksize) {
 338		int copy_words;
 339		u32 hsts = 0;
 340		size_t len;
 341		u32 *buf;
 342
 343		if (!sg_miter_next(&host->sg_miter)) {
 344			host->data->error = -EINVAL;
 345			break;
 346		}
 347
 348		len = min(host->sg_miter.length, blksize);
 349		if (len % 4) {
 350			host->data->error = -EINVAL;
 351			break;
 352		}
 353
 354		blksize -= len;
 355		host->sg_miter.consumed = len;
 356
 357		buf = (u32 *)host->sg_miter.addr;
 358
 359		copy_words = len / 4;
 360
 361		while (copy_words) {
 362			int burst_words, words;
 363			u32 edm;
 364
 365			burst_words = min(SDDATA_FIFO_PIO_BURST, copy_words);
 366			edm = readl(host->ioaddr + SDEDM);
 367			if (is_read)
 368				words = ((edm >> 4) & 0x1f);
 369			else
 370				words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f);
 371
 372			if (words < burst_words) {
 373				int fsm_state = (edm & SDEDM_FSM_MASK);
 374				struct device *dev = &host->pdev->dev;
 375
 376				if ((is_read &&
 377				     (fsm_state != SDEDM_FSM_READDATA &&
 378				      fsm_state != SDEDM_FSM_READWAIT &&
 379				      fsm_state != SDEDM_FSM_READCRC)) ||
 380				    (!is_read &&
 381				     (fsm_state != SDEDM_FSM_WRITEDATA &&
 382				      fsm_state != SDEDM_FSM_WRITESTART1 &&
 383				      fsm_state != SDEDM_FSM_WRITESTART2))) {
 384					hsts = readl(host->ioaddr + SDHSTS);
 385					dev_err(dev, "fsm %x, hsts %08x\n",
 386						fsm_state, hsts);
 387					if (hsts & SDHSTS_ERROR_MASK)
 388						break;
 389				}
 390
 391				if (time_after(jiffies, wait_max)) {
 392					dev_err(dev, "PIO %s timeout - EDM %08x\n",
 393						is_read ? "read" : "write",
 394						edm);
 395					hsts = SDHSTS_REW_TIME_OUT;
 396					break;
 397				}
 398				ndelay((burst_words - words) *
 399				       host->ns_per_fifo_word);
 400				continue;
 401			} else if (words > copy_words) {
 402				words = copy_words;
 403			}
 404
 405			copy_words -= words;
 406
 407			while (words) {
 408				if (is_read)
 409					*(buf++) = readl(host->ioaddr + SDDATA);
 410				else
 411					writel(*(buf++), host->ioaddr + SDDATA);
 412				words--;
 413			}
 414		}
 415
 416		if (hsts & SDHSTS_ERROR_MASK)
 417			break;
 418	}
 419
 420	sg_miter_stop(&host->sg_miter);
 421}
 422
 423static void bcm2835_transfer_pio(struct bcm2835_host *host)
 424{
 425	struct device *dev = &host->pdev->dev;
 426	u32 sdhsts;
 427	bool is_read;
 428
 429	is_read = (host->data->flags & MMC_DATA_READ) != 0;
 430	bcm2835_transfer_block_pio(host, is_read);
 431
 432	sdhsts = readl(host->ioaddr + SDHSTS);
 433	if (sdhsts & (SDHSTS_CRC16_ERROR |
 434		      SDHSTS_CRC7_ERROR |
 435		      SDHSTS_FIFO_ERROR)) {
 436		dev_err(dev, "%s transfer error - HSTS %08x\n",
 437			is_read ? "read" : "write", sdhsts);
 438		host->data->error = -EILSEQ;
 439	} else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
 440			      SDHSTS_REW_TIME_OUT))) {
 441		dev_err(dev, "%s timeout error - HSTS %08x\n",
 442			is_read ? "read" : "write", sdhsts);
 443		host->data->error = -ETIMEDOUT;
 444	}
 445}
 446
 447static
 448void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
 449{
 450	int sg_len, dir_data, dir_slave;
 451	struct dma_async_tx_descriptor *desc = NULL;
 452	struct dma_chan *dma_chan;
 453
 454	dma_chan = host->dma_chan_rxtx;
 455	if (data->flags & MMC_DATA_READ) {
 456		dir_data = DMA_FROM_DEVICE;
 457		dir_slave = DMA_DEV_TO_MEM;
 458	} else {
 459		dir_data = DMA_TO_DEVICE;
 460		dir_slave = DMA_MEM_TO_DEV;
 461	}
 462
 463	/* The block doesn't manage the FIFO DREQs properly for
 464	 * multi-block transfers, so don't attempt to DMA the final
 465	 * few words.  Unfortunately this requires the final sg entry
 466	 * to be trimmed.  N.B. This code demands that the overspill
 467	 * is contained in a single sg entry.
 468	 */
 469
 470	host->drain_words = 0;
 471	if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) {
 472		struct scatterlist *sg;
 473		u32 len;
 474		int i;
 475
 476		len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4,
 477			  (u32)data->blocks * data->blksz);
 478
 479		for_each_sg(data->sg, sg, data->sg_len, i) {
 480			if (sg_is_last(sg)) {
 481				WARN_ON(sg->length < len);
 482				sg->length -= len;
 483				host->drain_page = sg_page(sg);
 484				host->drain_offset = sg->offset + sg->length;
 485			}
 486		}
 487		host->drain_words = len / 4;
 488	}
 489
 490	/* The parameters have already been validated, so this will not fail */
 491	(void)dmaengine_slave_config(dma_chan,
 492				     (dir_data == DMA_FROM_DEVICE) ?
 493				     &host->dma_cfg_rx :
 494				     &host->dma_cfg_tx);
 495
 496	sg_len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len,
 497			    dir_data);
 498	if (!sg_len)
 499		return;
 500
 501	desc = dmaengine_prep_slave_sg(dma_chan, data->sg, sg_len, dir_slave,
 502				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 503
 504	if (!desc) {
 505		dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
 506		return;
 507	}
 508
 509	desc->callback = bcm2835_dma_complete;
 510	desc->callback_param = host;
 511	host->dma_desc = desc;
 512	host->dma_chan = dma_chan;
 513	host->dma_dir = dir_data;
 514}
 515
 516static void bcm2835_start_dma(struct bcm2835_host *host)
 517{
 518	dmaengine_submit(host->dma_desc);
 519	dma_async_issue_pending(host->dma_chan);
 520}
 521
 522static void bcm2835_set_transfer_irqs(struct bcm2835_host *host)
 523{
 524	u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN |
 525		SDHCFG_BUSY_IRPT_EN;
 526
 527	if (host->dma_desc) {
 528		host->hcfg = (host->hcfg & ~all_irqs) |
 529			SDHCFG_BUSY_IRPT_EN;
 530	} else {
 531		host->hcfg = (host->hcfg & ~all_irqs) |
 532			SDHCFG_DATA_IRPT_EN |
 533			SDHCFG_BUSY_IRPT_EN;
 534	}
 535
 536	writel(host->hcfg, host->ioaddr + SDHCFG);
 537}
 538
 539static
 540void bcm2835_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd)
 541{
 542	struct mmc_data *data = cmd->data;
 543
 544	WARN_ON(host->data);
 545
 546	host->data = data;
 547	if (!data)
 548		return;
 549
 550	host->data_complete = false;
 551	host->data->bytes_xfered = 0;
 552
 553	if (!host->dma_desc) {
 554		/* Use PIO */
 555		int flags = SG_MITER_ATOMIC;
 556
 557		if (data->flags & MMC_DATA_READ)
 558			flags |= SG_MITER_TO_SG;
 559		else
 560			flags |= SG_MITER_FROM_SG;
 561		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 562		host->blocks = data->blocks;
 563	}
 564
 565	bcm2835_set_transfer_irqs(host);
 566
 567	writel(data->blksz, host->ioaddr + SDHBCT);
 568	writel(data->blocks, host->ioaddr + SDHBLC);
 569}
 570
 571static u32 bcm2835_read_wait_sdcmd(struct bcm2835_host *host, u32 max_ms)
 572{
 573	struct device *dev = &host->pdev->dev;
 574	u32 value;
 575	int ret;
 576
 577	ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
 578				 !(value & SDCMD_NEW_FLAG), 1, 10);
 579	if (ret == -ETIMEDOUT)
 580		/* if it takes a while make poll interval bigger */
 581		ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
 582					 !(value & SDCMD_NEW_FLAG),
 583					 10, max_ms * 1000);
 584	if (ret == -ETIMEDOUT)
 585		dev_err(dev, "%s: timeout (%d ms)\n", __func__, max_ms);
 586
 587	return value;
 588}
 589
 590static void bcm2835_finish_request(struct bcm2835_host *host)
 591{
 592	struct dma_chan *terminate_chan = NULL;
 593	struct mmc_request *mrq;
 594
 595	cancel_delayed_work(&host->timeout_work);
 596
 597	mrq = host->mrq;
 598
 599	host->mrq = NULL;
 600	host->cmd = NULL;
 601	host->data = NULL;
 602
 603	host->dma_desc = NULL;
 604	terminate_chan = host->dma_chan;
 605	host->dma_chan = NULL;
 606
 607	if (terminate_chan) {
 608		int err = dmaengine_terminate_all(terminate_chan);
 609
 610		if (err)
 611			dev_err(&host->pdev->dev,
 612				"failed to terminate DMA (%d)\n", err);
 613	}
 614
 615	mmc_request_done(mmc_from_priv(host), mrq);
 616}
 617
 618static
 619bool bcm2835_send_command(struct bcm2835_host *host, struct mmc_command *cmd)
 620{
 621	struct device *dev = &host->pdev->dev;
 622	u32 sdcmd, sdhsts;
 623	unsigned long timeout;
 624
 625	WARN_ON(host->cmd);
 626
 627	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
 628	if (sdcmd & SDCMD_NEW_FLAG) {
 629		dev_err(dev, "previous command never completed.\n");
 630		bcm2835_dumpregs(host);
 631		cmd->error = -EILSEQ;
 632		bcm2835_finish_request(host);
 633		return false;
 634	}
 635
 636	if (!cmd->data && cmd->busy_timeout > 9000)
 637		timeout = DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
 638	else
 639		timeout = 10 * HZ;
 640	schedule_delayed_work(&host->timeout_work, timeout);
 641
 642	host->cmd = cmd;
 643
 644	/* Clear any error flags */
 645	sdhsts = readl(host->ioaddr + SDHSTS);
 646	if (sdhsts & SDHSTS_ERROR_MASK)
 647		writel(sdhsts, host->ioaddr + SDHSTS);
 648
 649	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
 650		dev_err(dev, "unsupported response type!\n");
 651		cmd->error = -EINVAL;
 652		bcm2835_finish_request(host);
 653		return false;
 654	}
 655
 656	bcm2835_prepare_data(host, cmd);
 657
 658	writel(cmd->arg, host->ioaddr + SDARG);
 659
 660	sdcmd = cmd->opcode & SDCMD_CMD_MASK;
 661
 662	host->use_busy = false;
 663	if (!(cmd->flags & MMC_RSP_PRESENT)) {
 664		sdcmd |= SDCMD_NO_RESPONSE;
 665	} else {
 666		if (cmd->flags & MMC_RSP_136)
 667			sdcmd |= SDCMD_LONG_RESPONSE;
 668		if (cmd->flags & MMC_RSP_BUSY) {
 669			sdcmd |= SDCMD_BUSYWAIT;
 670			host->use_busy = true;
 671		}
 672	}
 673
 674	if (cmd->data) {
 675		if (cmd->data->flags & MMC_DATA_WRITE)
 676			sdcmd |= SDCMD_WRITE_CMD;
 677		if (cmd->data->flags & MMC_DATA_READ)
 678			sdcmd |= SDCMD_READ_CMD;
 679	}
 680
 681	writel(sdcmd | SDCMD_NEW_FLAG, host->ioaddr + SDCMD);
 682
 683	return true;
 684}
 685
 686static void bcm2835_transfer_complete(struct bcm2835_host *host)
 687{
 688	struct mmc_data *data;
 689
 690	WARN_ON(!host->data_complete);
 691
 692	data = host->data;
 693	host->data = NULL;
 694
 695	/* Need to send CMD12 if -
 696	 * a) open-ended multiblock transfer (no CMD23)
 697	 * b) error in multiblock transfer
 698	 */
 699	if (host->mrq->stop && (data->error || !host->use_sbc)) {
 700		if (bcm2835_send_command(host, host->mrq->stop)) {
 701			/* No busy, so poll for completion */
 702			if (!host->use_busy)
 703				bcm2835_finish_command(host);
 704		}
 705	} else {
 706		bcm2835_wait_transfer_complete(host);
 707		bcm2835_finish_request(host);
 708	}
 709}
 710
 711static void bcm2835_finish_data(struct bcm2835_host *host)
 712{
 713	struct device *dev = &host->pdev->dev;
 714	struct mmc_data *data;
 715
 716	data = host->data;
 717
 718	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
 719	writel(host->hcfg, host->ioaddr + SDHCFG);
 720
 721	data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks);
 722
 723	host->data_complete = true;
 724
 725	if (host->cmd) {
 726		/* Data managed to finish before the
 727		 * command completed. Make sure we do
 728		 * things in the proper order.
 729		 */
 730		dev_dbg(dev, "Finished early - HSTS %08x\n",
 731			readl(host->ioaddr + SDHSTS));
 732	} else {
 733		bcm2835_transfer_complete(host);
 734	}
 735}
 736
 737static void bcm2835_finish_command(struct bcm2835_host *host)
 738{
 739	struct device *dev = &host->pdev->dev;
 740	struct mmc_command *cmd = host->cmd;
 741	u32 sdcmd;
 742
 743	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
 744
 745	/* Check for errors */
 746	if (sdcmd & SDCMD_NEW_FLAG) {
 747		dev_err(dev, "command never completed.\n");
 748		bcm2835_dumpregs(host);
 749		host->cmd->error = -EIO;
 750		bcm2835_finish_request(host);
 751		return;
 752	} else if (sdcmd & SDCMD_FAIL_FLAG) {
 753		u32 sdhsts = readl(host->ioaddr + SDHSTS);
 754
 755		/* Clear the errors */
 756		writel(SDHSTS_ERROR_MASK, host->ioaddr + SDHSTS);
 757
 758		if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
 759		    (host->cmd->opcode != MMC_SEND_OP_COND)) {
 760			u32 edm, fsm;
 761
 762			if (sdhsts & SDHSTS_CMD_TIME_OUT) {
 763				host->cmd->error = -ETIMEDOUT;
 764			} else {
 765				dev_err(dev, "unexpected command %d error\n",
 766					host->cmd->opcode);
 767				bcm2835_dumpregs(host);
 768				host->cmd->error = -EILSEQ;
 769			}
 770			edm = readl(host->ioaddr + SDEDM);
 771			fsm = edm & SDEDM_FSM_MASK;
 772			if (fsm == SDEDM_FSM_READWAIT ||
 773			    fsm == SDEDM_FSM_WRITESTART1)
 774				/* Kick the FSM out of its wait */
 775				writel(edm | SDEDM_FORCE_DATA_MODE,
 776				       host->ioaddr + SDEDM);
 777			bcm2835_finish_request(host);
 778			return;
 779		}
 780	}
 781
 782	if (cmd->flags & MMC_RSP_PRESENT) {
 783		if (cmd->flags & MMC_RSP_136) {
 784			int i;
 785
 786			for (i = 0; i < 4; i++) {
 787				cmd->resp[3 - i] =
 788					readl(host->ioaddr + SDRSP0 + i * 4);
 789			}
 790		} else {
 791			cmd->resp[0] = readl(host->ioaddr + SDRSP0);
 792		}
 793	}
 794
 795	if (cmd == host->mrq->sbc) {
 796		/* Finished CMD23, now send actual command. */
 797		host->cmd = NULL;
 798		if (bcm2835_send_command(host, host->mrq->cmd)) {
 799			if (host->data && host->dma_desc)
 800				/* DMA transfer starts now, PIO starts
 801				 * after irq
 802				 */
 803				bcm2835_start_dma(host);
 804
 805			if (!host->use_busy)
 806				bcm2835_finish_command(host);
 807		}
 808	} else if (cmd == host->mrq->stop) {
 809		/* Finished CMD12 */
 810		bcm2835_finish_request(host);
 811	} else {
 812		/* Processed actual command. */
 813		host->cmd = NULL;
 814		if (!host->data)
 815			bcm2835_finish_request(host);
 816		else if (host->data_complete)
 817			bcm2835_transfer_complete(host);
 818	}
 819}
 820
 821static void bcm2835_timeout(struct work_struct *work)
 822{
 823	struct delayed_work *d = to_delayed_work(work);
 824	struct bcm2835_host *host =
 825		container_of(d, struct bcm2835_host, timeout_work);
 826	struct device *dev = &host->pdev->dev;
 827
 828	mutex_lock(&host->mutex);
 829
 830	if (host->mrq) {
 831		dev_err(dev, "timeout waiting for hardware interrupt.\n");
 832		bcm2835_dumpregs(host);
 833
 834		bcm2835_reset(mmc_from_priv(host));
 835
 836		if (host->data) {
 837			host->data->error = -ETIMEDOUT;
 838			bcm2835_finish_data(host);
 839		} else {
 840			if (host->cmd)
 841				host->cmd->error = -ETIMEDOUT;
 842			else
 843				host->mrq->cmd->error = -ETIMEDOUT;
 844
 845			bcm2835_finish_request(host);
 846		}
 847	}
 848
 849	mutex_unlock(&host->mutex);
 850}
 851
 852static bool bcm2835_check_cmd_error(struct bcm2835_host *host, u32 intmask)
 853{
 854	struct device *dev = &host->pdev->dev;
 855
 856	if (!(intmask & SDHSTS_ERROR_MASK))
 857		return false;
 858
 859	if (!host->cmd)
 860		return true;
 861
 862	dev_err(dev, "sdhost_busy_irq: intmask %08x\n", intmask);
 863	if (intmask & SDHSTS_CRC7_ERROR) {
 864		host->cmd->error = -EILSEQ;
 865	} else if (intmask & (SDHSTS_CRC16_ERROR |
 866			      SDHSTS_FIFO_ERROR)) {
 867		if (host->mrq->data)
 868			host->mrq->data->error = -EILSEQ;
 869		else
 870			host->cmd->error = -EILSEQ;
 871	} else if (intmask & SDHSTS_REW_TIME_OUT) {
 872		if (host->mrq->data)
 873			host->mrq->data->error = -ETIMEDOUT;
 874		else
 875			host->cmd->error = -ETIMEDOUT;
 876	} else if (intmask & SDHSTS_CMD_TIME_OUT) {
 877		host->cmd->error = -ETIMEDOUT;
 878	}
 879	bcm2835_dumpregs(host);
 880	return true;
 881}
 882
 883static void bcm2835_check_data_error(struct bcm2835_host *host, u32 intmask)
 884{
 885	if (!host->data)
 886		return;
 887	if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_FIFO_ERROR))
 888		host->data->error = -EILSEQ;
 889	if (intmask & SDHSTS_REW_TIME_OUT)
 890		host->data->error = -ETIMEDOUT;
 891}
 892
 893static void bcm2835_busy_irq(struct bcm2835_host *host)
 894{
 895	if (WARN_ON(!host->cmd)) {
 896		bcm2835_dumpregs(host);
 897		return;
 898	}
 899
 900	if (WARN_ON(!host->use_busy)) {
 901		bcm2835_dumpregs(host);
 902		return;
 903	}
 904	host->use_busy = false;
 905
 906	bcm2835_finish_command(host);
 907}
 908
 909static void bcm2835_data_irq(struct bcm2835_host *host, u32 intmask)
 910{
 911	/* There are no dedicated data/space available interrupt
 912	 * status bits, so it is necessary to use the single shared
 913	 * data/space available FIFO status bits. It is therefore not
 914	 * an error to get here when there is no data transfer in
 915	 * progress.
 916	 */
 917	if (!host->data)
 918		return;
 919
 920	bcm2835_check_data_error(host, intmask);
 921	if (host->data->error)
 922		goto finished;
 923
 924	if (host->data->flags & MMC_DATA_WRITE) {
 925		/* Use the block interrupt for writes after the first block */
 926		host->hcfg &= ~(SDHCFG_DATA_IRPT_EN);
 927		host->hcfg |= SDHCFG_BLOCK_IRPT_EN;
 928		writel(host->hcfg, host->ioaddr + SDHCFG);
 929		bcm2835_transfer_pio(host);
 930	} else {
 931		bcm2835_transfer_pio(host);
 932		host->blocks--;
 933		if ((host->blocks == 0) || host->data->error)
 934			goto finished;
 935	}
 936	return;
 937
 938finished:
 939	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
 940	writel(host->hcfg, host->ioaddr + SDHCFG);
 941}
 942
 943static void bcm2835_data_threaded_irq(struct bcm2835_host *host)
 944{
 945	if (!host->data)
 946		return;
 947	if ((host->blocks == 0) || host->data->error)
 948		bcm2835_finish_data(host);
 949}
 950
 951static void bcm2835_block_irq(struct bcm2835_host *host)
 952{
 953	if (WARN_ON(!host->data)) {
 954		bcm2835_dumpregs(host);
 955		return;
 956	}
 957
 958	if (!host->dma_desc) {
 959		WARN_ON(!host->blocks);
 960		if (host->data->error || (--host->blocks == 0))
 961			bcm2835_finish_data(host);
 962		else
 963			bcm2835_transfer_pio(host);
 964	} else if (host->data->flags & MMC_DATA_WRITE) {
 965		bcm2835_finish_data(host);
 966	}
 967}
 968
 969static irqreturn_t bcm2835_irq(int irq, void *dev_id)
 970{
 971	irqreturn_t result = IRQ_NONE;
 972	struct bcm2835_host *host = dev_id;
 973	u32 intmask;
 974
 975	spin_lock(&host->lock);
 976
 977	intmask = readl(host->ioaddr + SDHSTS);
 978
 979	writel(SDHSTS_BUSY_IRPT |
 980	       SDHSTS_BLOCK_IRPT |
 981	       SDHSTS_SDIO_IRPT |
 982	       SDHSTS_DATA_FLAG,
 983	       host->ioaddr + SDHSTS);
 984
 985	if (intmask & SDHSTS_BLOCK_IRPT) {
 986		bcm2835_check_data_error(host, intmask);
 987		host->irq_block = true;
 988		result = IRQ_WAKE_THREAD;
 989	}
 990
 991	if (intmask & SDHSTS_BUSY_IRPT) {
 992		if (!bcm2835_check_cmd_error(host, intmask)) {
 993			host->irq_busy = true;
 994			result = IRQ_WAKE_THREAD;
 995		} else {
 996			result = IRQ_HANDLED;
 997		}
 998	}
 999
1000	/* There is no true data interrupt status bit, so it is
1001	 * necessary to qualify the data flag with the interrupt
1002	 * enable bit.
1003	 */
1004	if ((intmask & SDHSTS_DATA_FLAG) &&
1005	    (host->hcfg & SDHCFG_DATA_IRPT_EN)) {
1006		bcm2835_data_irq(host, intmask);
1007		host->irq_data = true;
1008		result = IRQ_WAKE_THREAD;
1009	}
1010
1011	spin_unlock(&host->lock);
1012
1013	return result;
1014}
1015
1016static irqreturn_t bcm2835_threaded_irq(int irq, void *dev_id)
1017{
1018	struct bcm2835_host *host = dev_id;
1019	unsigned long flags;
1020	bool block, busy, data;
1021
1022	spin_lock_irqsave(&host->lock, flags);
1023
1024	block = host->irq_block;
1025	busy  = host->irq_busy;
1026	data  = host->irq_data;
1027	host->irq_block = false;
1028	host->irq_busy  = false;
1029	host->irq_data  = false;
1030
1031	spin_unlock_irqrestore(&host->lock, flags);
1032
1033	mutex_lock(&host->mutex);
1034
1035	if (block)
1036		bcm2835_block_irq(host);
1037	if (busy)
1038		bcm2835_busy_irq(host);
1039	if (data)
1040		bcm2835_data_threaded_irq(host);
1041
1042	mutex_unlock(&host->mutex);
1043
1044	return IRQ_HANDLED;
1045}
1046
1047static void bcm2835_dma_complete_work(struct work_struct *work)
1048{
1049	struct bcm2835_host *host =
1050		container_of(work, struct bcm2835_host, dma_work);
1051	struct mmc_data *data;
1052
1053	mutex_lock(&host->mutex);
1054
1055	data = host->data;
1056
1057	if (host->dma_chan) {
1058		dma_unmap_sg(host->dma_chan->device->dev,
1059			     data->sg, data->sg_len,
1060			     host->dma_dir);
1061
1062		host->dma_chan = NULL;
1063	}
1064
1065	if (host->drain_words) {
1066		void *page;
1067		u32 *buf;
1068
1069		if (host->drain_offset & PAGE_MASK) {
1070			host->drain_page += host->drain_offset >> PAGE_SHIFT;
1071			host->drain_offset &= ~PAGE_MASK;
1072		}
1073		page = kmap_local_page(host->drain_page);
1074		buf = page + host->drain_offset;
1075
1076		while (host->drain_words) {
1077			u32 edm = readl(host->ioaddr + SDEDM);
1078
1079			if ((edm >> 4) & 0x1f)
1080				*(buf++) = readl(host->ioaddr + SDDATA);
1081			host->drain_words--;
1082		}
1083
1084		kunmap_local(page);
1085	}
1086
1087	bcm2835_finish_data(host);
1088
1089	mutex_unlock(&host->mutex);
1090}
1091
1092static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
1093{
1094	struct mmc_host *mmc = mmc_from_priv(host);
1095	int div;
1096
1097	/* The SDCDIV register has 11 bits, and holds (div - 2).  But
1098	 * in data mode the max is 50MHz wihout a minimum, and only
1099	 * the bottom 3 bits are used. Since the switch over is
1100	 * automatic (unless we have marked the card as slow...),
1101	 * chosen values have to make sense in both modes.  Ident mode
1102	 * must be 100-400KHz, so can range check the requested
1103	 * clock. CMD15 must be used to return to data mode, so this
1104	 * can be monitored.
1105	 *
1106	 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1107	 *                 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1108	 *
1109	 *		 623->400KHz/27.8MHz
1110	 *		 reset value (507)->491159/50MHz
1111	 *
1112	 * BUT, the 3-bit clock divisor in data mode is too small if
1113	 * the core clock is higher than 250MHz, so instead use the
1114	 * SLOW_CARD configuration bit to force the use of the ident
1115	 * clock divisor at all times.
1116	 */
1117
1118	if (clock < 100000) {
1119		/* Can't stop the clock, but make it as slow as possible
1120		 * to show willing
1121		 */
1122		host->cdiv = SDCDIV_MAX_CDIV;
1123		writel(host->cdiv, host->ioaddr + SDCDIV);
1124		return;
1125	}
1126
1127	div = host->max_clk / clock;
1128	if (div < 2)
1129		div = 2;
1130	if ((host->max_clk / div) > clock)
1131		div++;
1132	div -= 2;
1133
1134	if (div > SDCDIV_MAX_CDIV)
1135		div = SDCDIV_MAX_CDIV;
1136
1137	clock = host->max_clk / (div + 2);
1138	mmc->actual_clock = clock;
1139
1140	/* Calibrate some delays */
1141
1142	host->ns_per_fifo_word = (1000000000 / clock) *
1143		((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
1144
1145	host->cdiv = div;
1146	writel(host->cdiv, host->ioaddr + SDCDIV);
1147
1148	/* Set the timeout to 500ms */
1149	writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT);
1150}
1151
1152static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
1153{
1154	struct bcm2835_host *host = mmc_priv(mmc);
1155	struct device *dev = &host->pdev->dev;
1156	u32 edm, fsm;
1157
1158	/* Reset the error statuses in case this is a retry */
1159	if (mrq->sbc)
1160		mrq->sbc->error = 0;
1161	if (mrq->cmd)
1162		mrq->cmd->error = 0;
1163	if (mrq->data)
1164		mrq->data->error = 0;
1165	if (mrq->stop)
1166		mrq->stop->error = 0;
1167
1168	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1169		dev_err(dev, "unsupported block size (%d bytes)\n",
1170			mrq->data->blksz);
1171
1172		if (mrq->cmd)
1173			mrq->cmd->error = -EINVAL;
1174
1175		mmc_request_done(mmc, mrq);
1176		return;
1177	}
1178
1179	mutex_lock(&host->mutex);
1180
1181	WARN_ON(host->mrq);
1182	host->mrq = mrq;
1183
1184	edm = readl(host->ioaddr + SDEDM);
1185	fsm = edm & SDEDM_FSM_MASK;
1186
1187	if ((fsm != SDEDM_FSM_IDENTMODE) &&
1188	    (fsm != SDEDM_FSM_DATAMODE)) {
1189		dev_err(dev, "previous command (%d) not complete (EDM %08x)\n",
1190			readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
1191			edm);
1192		bcm2835_dumpregs(host);
1193
1194		if (mrq->cmd)
1195			mrq->cmd->error = -EILSEQ;
1196
1197		bcm2835_finish_request(host);
1198		mutex_unlock(&host->mutex);
1199		return;
1200	}
1201
1202	if (host->use_dma && mrq->data && (mrq->data->blocks > PIO_THRESHOLD))
1203		bcm2835_prepare_dma(host, mrq->data);
1204
1205	host->use_sbc = !!mrq->sbc && host->mrq->data &&
1206			(host->mrq->data->flags & MMC_DATA_READ);
1207	if (host->use_sbc) {
1208		if (bcm2835_send_command(host, mrq->sbc)) {
1209			if (!host->use_busy)
1210				bcm2835_finish_command(host);
1211		}
1212	} else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) {
1213		if (host->data && host->dma_desc) {
1214			/* DMA transfer starts now, PIO starts after irq */
1215			bcm2835_start_dma(host);
1216		}
1217
1218		if (!host->use_busy)
1219			bcm2835_finish_command(host);
1220	}
1221
1222	mutex_unlock(&host->mutex);
1223}
1224
1225static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1226{
1227	struct bcm2835_host *host = mmc_priv(mmc);
1228
1229	mutex_lock(&host->mutex);
1230
1231	if (!ios->clock || ios->clock != host->clock) {
1232		bcm2835_set_clock(host, ios->clock);
1233		host->clock = ios->clock;
1234	}
1235
1236	/* set bus width */
1237	host->hcfg &= ~SDHCFG_WIDE_EXT_BUS;
1238	if (ios->bus_width == MMC_BUS_WIDTH_4)
1239		host->hcfg |= SDHCFG_WIDE_EXT_BUS;
1240
1241	host->hcfg |= SDHCFG_WIDE_INT_BUS;
1242
1243	/* Disable clever clock switching, to cope with fast core clocks */
1244	host->hcfg |= SDHCFG_SLOW_CARD;
1245
1246	writel(host->hcfg, host->ioaddr + SDHCFG);
1247
1248	mutex_unlock(&host->mutex);
1249}
1250
1251static const struct mmc_host_ops bcm2835_ops = {
1252	.request = bcm2835_request,
1253	.set_ios = bcm2835_set_ios,
1254	.card_hw_reset = bcm2835_reset,
1255};
1256
1257static int bcm2835_add_host(struct bcm2835_host *host)
1258{
1259	struct mmc_host *mmc = mmc_from_priv(host);
1260	struct device *dev = &host->pdev->dev;
1261	char pio_limit_string[20];
1262	int ret;
1263
1264	if (!mmc->f_max || mmc->f_max > host->max_clk)
1265		mmc->f_max = host->max_clk;
1266	mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1267
1268	mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
1269
1270	dev_dbg(dev, "f_max %d, f_min %d, max_busy_timeout %d\n",
1271		mmc->f_max, mmc->f_min, mmc->max_busy_timeout);
1272
1273	/* host controller capabilities */
1274	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1275		     MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_CMD23;
1276
1277	spin_lock_init(&host->lock);
1278	mutex_init(&host->mutex);
1279
1280	if (!host->dma_chan_rxtx) {
1281		dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
1282		host->use_dma = false;
1283	} else {
1284		host->use_dma = true;
1285
1286		host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1287		host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1288		host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
1289		host->dma_cfg_tx.src_addr = 0;
1290		host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
1291
1292		host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1293		host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1294		host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
1295		host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
1296		host->dma_cfg_rx.dst_addr = 0;
1297
1298		if (dmaengine_slave_config(host->dma_chan_rxtx,
1299					   &host->dma_cfg_tx) != 0 ||
1300		    dmaengine_slave_config(host->dma_chan_rxtx,
1301					   &host->dma_cfg_rx) != 0)
1302			host->use_dma = false;
1303	}
1304
1305	mmc->max_segs = 128;
1306	mmc->max_req_size = min_t(size_t, 524288, dma_max_mapping_size(dev));
1307	mmc->max_seg_size = mmc->max_req_size;
1308	mmc->max_blk_size = 1024;
1309	mmc->max_blk_count =  65535;
1310
1311	/* report supported voltage ranges */
1312	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1313
1314	INIT_WORK(&host->dma_work, bcm2835_dma_complete_work);
1315	INIT_DELAYED_WORK(&host->timeout_work, bcm2835_timeout);
1316
1317	/* Set interrupt enables */
1318	host->hcfg = SDHCFG_BUSY_IRPT_EN;
1319
1320	bcm2835_reset_internal(host);
1321
1322	ret = request_threaded_irq(host->irq, bcm2835_irq,
1323				   bcm2835_threaded_irq,
1324				   0, mmc_hostname(mmc), host);
1325	if (ret) {
1326		dev_err(dev, "failed to request IRQ %d: %d\n", host->irq, ret);
1327		return ret;
1328	}
1329
1330	ret = mmc_add_host(mmc);
1331	if (ret) {
1332		free_irq(host->irq, host);
1333		return ret;
1334	}
1335
1336	pio_limit_string[0] = '\0';
1337	if (host->use_dma && (PIO_THRESHOLD > 0))
1338		sprintf(pio_limit_string, " (>%d)", PIO_THRESHOLD);
1339	dev_info(dev, "loaded - DMA %s%s\n",
1340		 host->use_dma ? "enabled" : "disabled", pio_limit_string);
1341
1342	return 0;
1343}
1344
1345static int bcm2835_probe(struct platform_device *pdev)
1346{
1347	struct device *dev = &pdev->dev;
1348	struct clk *clk;
1349	struct bcm2835_host *host;
1350	struct mmc_host *mmc;
1351	const __be32 *regaddr_p;
1352	int ret;
1353
1354	dev_dbg(dev, "%s\n", __func__);
1355	mmc = mmc_alloc_host(sizeof(*host), dev);
1356	if (!mmc)
1357		return -ENOMEM;
1358
1359	mmc->ops = &bcm2835_ops;
1360	host = mmc_priv(mmc);
1361	host->pdev = pdev;
1362	spin_lock_init(&host->lock);
1363
1364	host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
1365	if (IS_ERR(host->ioaddr)) {
1366		ret = PTR_ERR(host->ioaddr);
1367		goto err;
1368	}
1369
1370	/* Parse OF address directly to get the physical address for
1371	 * DMA to our registers.
1372	 */
1373	regaddr_p = of_get_address(pdev->dev.of_node, 0, NULL, NULL);
1374	if (!regaddr_p) {
1375		dev_err(dev, "Can't get phys address\n");
1376		ret = -EINVAL;
1377		goto err;
1378	}
1379
1380	host->phys_addr = be32_to_cpup(regaddr_p);
1381
1382	host->dma_chan = NULL;
1383	host->dma_desc = NULL;
1384
1385	host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
1386	if (IS_ERR(host->dma_chan_rxtx)) {
1387		ret = PTR_ERR(host->dma_chan_rxtx);
1388		host->dma_chan_rxtx = NULL;
1389
1390		if (ret == -EPROBE_DEFER)
1391			goto err;
1392
1393		/* Ignore errors to fall back to PIO mode */
1394	}
1395
1396
1397	clk = devm_clk_get(dev, NULL);
1398	if (IS_ERR(clk)) {
1399		ret = dev_err_probe(dev, PTR_ERR(clk), "could not get clk\n");
1400		goto err;
1401	}
1402
1403	host->max_clk = clk_get_rate(clk);
 
 
1404
1405	host->irq = platform_get_irq(pdev, 0);
1406	if (host->irq <= 0) {
1407		ret = -EINVAL;
1408		goto err;
1409	}
1410
1411	ret = mmc_of_parse(mmc);
1412	if (ret)
1413		goto err;
1414
 
 
1415	ret = bcm2835_add_host(host);
1416	if (ret)
1417		goto err;
1418
1419	platform_set_drvdata(pdev, host);
1420
1421	dev_dbg(dev, "%s -> OK\n", __func__);
1422
1423	return 0;
1424
 
 
1425err:
1426	dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1427	if (host->dma_chan_rxtx)
1428		dma_release_channel(host->dma_chan_rxtx);
1429	mmc_free_host(mmc);
1430
1431	return ret;
1432}
1433
1434static int bcm2835_remove(struct platform_device *pdev)
1435{
1436	struct bcm2835_host *host = platform_get_drvdata(pdev);
1437	struct mmc_host *mmc = mmc_from_priv(host);
1438
1439	mmc_remove_host(mmc);
1440
1441	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
1442
1443	free_irq(host->irq, host);
1444
1445	cancel_work_sync(&host->dma_work);
1446	cancel_delayed_work_sync(&host->timeout_work);
1447
 
 
1448	if (host->dma_chan_rxtx)
1449		dma_release_channel(host->dma_chan_rxtx);
1450
1451	mmc_free_host(mmc);
1452
1453	return 0;
1454}
1455
1456static const struct of_device_id bcm2835_match[] = {
1457	{ .compatible = "brcm,bcm2835-sdhost" },
1458	{ }
1459};
1460MODULE_DEVICE_TABLE(of, bcm2835_match);
1461
1462static struct platform_driver bcm2835_driver = {
1463	.probe      = bcm2835_probe,
1464	.remove     = bcm2835_remove,
1465	.driver     = {
1466		.name		= "sdhost-bcm2835",
1467		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
1468		.of_match_table	= bcm2835_match,
1469	},
1470};
1471module_platform_driver(bcm2835_driver);
1472
1473MODULE_ALIAS("platform:sdhost-bcm2835");
1474MODULE_DESCRIPTION("BCM2835 SDHost driver");
1475MODULE_LICENSE("GPL v2");
1476MODULE_AUTHOR("Phil Elwell");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * bcm2835 sdhost driver.
   4 *
   5 * The 2835 has two SD controllers: The Arasan sdhci controller
   6 * (supported by the iproc driver) and a custom sdhost controller
   7 * (supported by this driver).
   8 *
   9 * The sdhci controller supports both sdcard and sdio.  The sdhost
  10 * controller supports the sdcard only, but has better performance.
  11 * Also note that the rpi3 has sdio wifi, so driving the sdcard with
  12 * the sdhost controller allows to use the sdhci controller for wifi
  13 * support.
  14 *
  15 * The configuration is done by devicetree via pin muxing.  Both
  16 * SD controller are available on the same pins (2 pin groups = pin 22
  17 * to 27 + pin 48 to 53).  So it's possible to use both SD controllers
  18 * at the same time with different pin groups.
  19 *
  20 * Author:      Phil Elwell <phil@raspberrypi.org>
  21 *              Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
  22 *
  23 * Based on
  24 *  mmc-bcm2835.c by Gellert Weisz
  25 * which is, in turn, based on
  26 *  sdhci-bcm2708.c by Broadcom
  27 *  sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
  28 *  sdhci.c and sdhci-pci.c by Pierre Ossman
  29 */
  30#include <linux/clk.h>
  31#include <linux/delay.h>
  32#include <linux/device.h>
  33#include <linux/dmaengine.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/err.h>
  36#include <linux/highmem.h>
  37#include <linux/interrupt.h>
  38#include <linux/io.h>
  39#include <linux/iopoll.h>
  40#include <linux/module.h>
  41#include <linux/of_address.h>
  42#include <linux/of_irq.h>
  43#include <linux/platform_device.h>
  44#include <linux/scatterlist.h>
  45#include <linux/time.h>
  46#include <linux/workqueue.h>
  47
  48#include <linux/mmc/host.h>
  49#include <linux/mmc/mmc.h>
  50#include <linux/mmc/sd.h>
  51
  52#define SDCMD  0x00 /* Command to SD card              - 16 R/W */
  53#define SDARG  0x04 /* Argument to SD card             - 32 R/W */
  54#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
  55#define SDCDIV 0x0c /* Start value for clock divider   - 11 R/W */
  56#define SDRSP0 0x10 /* SD card response (31:0)         - 32 R   */
  57#define SDRSP1 0x14 /* SD card response (63:32)        - 32 R   */
  58#define SDRSP2 0x18 /* SD card response (95:64)        - 32 R   */
  59#define SDRSP3 0x1c /* SD card response (127:96)       - 32 R   */
  60#define SDHSTS 0x20 /* SD host status                  - 11 R/W */
  61#define SDVDD  0x30 /* SD card power control           -  1 R/W */
  62#define SDEDM  0x34 /* Emergency Debug Mode            - 13 R/W */
  63#define SDHCFG 0x38 /* Host configuration              -  2 R/W */
  64#define SDHBCT 0x3c /* Host byte count (debug)         - 32 R/W */
  65#define SDDATA 0x40 /* Data to/from SD card            - 32 R/W */
  66#define SDHBLC 0x50 /* Host block count (SDIO/SDHC)    -  9 R/W */
  67
  68#define SDCMD_NEW_FLAG			0x8000
  69#define SDCMD_FAIL_FLAG			0x4000
  70#define SDCMD_BUSYWAIT			0x800
  71#define SDCMD_NO_RESPONSE		0x400
  72#define SDCMD_LONG_RESPONSE		0x200
  73#define SDCMD_WRITE_CMD			0x80
  74#define SDCMD_READ_CMD			0x40
  75#define SDCMD_CMD_MASK			0x3f
  76
  77#define SDCDIV_MAX_CDIV			0x7ff
  78
  79#define SDHSTS_BUSY_IRPT		0x400
  80#define SDHSTS_BLOCK_IRPT		0x200
  81#define SDHSTS_SDIO_IRPT		0x100
  82#define SDHSTS_REW_TIME_OUT		0x80
  83#define SDHSTS_CMD_TIME_OUT		0x40
  84#define SDHSTS_CRC16_ERROR		0x20
  85#define SDHSTS_CRC7_ERROR		0x10
  86#define SDHSTS_FIFO_ERROR		0x08
  87/* Reserved */
  88/* Reserved */
  89#define SDHSTS_DATA_FLAG		0x01
  90
  91#define SDHSTS_TRANSFER_ERROR_MASK	(SDHSTS_CRC7_ERROR | \
  92					 SDHSTS_CRC16_ERROR | \
  93					 SDHSTS_REW_TIME_OUT | \
  94					 SDHSTS_FIFO_ERROR)
  95
  96#define SDHSTS_ERROR_MASK		(SDHSTS_CMD_TIME_OUT | \
  97					 SDHSTS_TRANSFER_ERROR_MASK)
  98
  99#define SDHCFG_BUSY_IRPT_EN	BIT(10)
 100#define SDHCFG_BLOCK_IRPT_EN	BIT(8)
 101#define SDHCFG_SDIO_IRPT_EN	BIT(5)
 102#define SDHCFG_DATA_IRPT_EN	BIT(4)
 103#define SDHCFG_SLOW_CARD	BIT(3)
 104#define SDHCFG_WIDE_EXT_BUS	BIT(2)
 105#define SDHCFG_WIDE_INT_BUS	BIT(1)
 106#define SDHCFG_REL_CMD_LINE	BIT(0)
 107
 108#define SDVDD_POWER_OFF		0
 109#define SDVDD_POWER_ON		1
 110
 111#define SDEDM_FORCE_DATA_MODE	BIT(19)
 112#define SDEDM_CLOCK_PULSE	BIT(20)
 113#define SDEDM_BYPASS		BIT(21)
 114
 115#define SDEDM_WRITE_THRESHOLD_SHIFT	9
 116#define SDEDM_READ_THRESHOLD_SHIFT	14
 117#define SDEDM_THRESHOLD_MASK		0x1f
 118
 119#define SDEDM_FSM_MASK		0xf
 120#define SDEDM_FSM_IDENTMODE	0x0
 121#define SDEDM_FSM_DATAMODE	0x1
 122#define SDEDM_FSM_READDATA	0x2
 123#define SDEDM_FSM_WRITEDATA	0x3
 124#define SDEDM_FSM_READWAIT	0x4
 125#define SDEDM_FSM_READCRC	0x5
 126#define SDEDM_FSM_WRITECRC	0x6
 127#define SDEDM_FSM_WRITEWAIT1	0x7
 128#define SDEDM_FSM_POWERDOWN	0x8
 129#define SDEDM_FSM_POWERUP	0x9
 130#define SDEDM_FSM_WRITESTART1	0xa
 131#define SDEDM_FSM_WRITESTART2	0xb
 132#define SDEDM_FSM_GENPULSES	0xc
 133#define SDEDM_FSM_WRITEWAIT2	0xd
 134#define SDEDM_FSM_STARTPOWDOWN	0xf
 135
 136#define SDDATA_FIFO_WORDS	16
 137
 138#define FIFO_READ_THRESHOLD	4
 139#define FIFO_WRITE_THRESHOLD	4
 140#define SDDATA_FIFO_PIO_BURST	8
 141
 142#define PIO_THRESHOLD	1  /* Maximum block count for PIO (0 = always DMA) */
 143
 144struct bcm2835_host {
 145	spinlock_t		lock;
 146	struct mutex		mutex;
 147
 148	void __iomem		*ioaddr;
 149	u32			phys_addr;
 150
 151	struct clk		*clk;
 152	struct platform_device	*pdev;
 153
 154	unsigned int		clock;		/* Current clock speed */
 155	unsigned int		max_clk;	/* Max possible freq */
 156	struct work_struct	dma_work;
 157	struct delayed_work	timeout_work;	/* Timer for timeouts */
 158	struct sg_mapping_iter	sg_miter;	/* SG state for PIO */
 159	unsigned int		blocks;		/* remaining PIO blocks */
 160	int			irq;		/* Device IRQ */
 161
 162	u32			ns_per_fifo_word;
 163
 164	/* cached registers */
 165	u32			hcfg;
 166	u32			cdiv;
 167
 168	struct mmc_request	*mrq;		/* Current request */
 169	struct mmc_command	*cmd;		/* Current command */
 170	struct mmc_data		*data;		/* Current data request */
 171	bool			data_complete:1;/* Data finished before cmd */
 172	bool			use_busy:1;	/* Wait for busy interrupt */
 173	bool			use_sbc:1;	/* Send CMD23 */
 174
 175	/* for threaded irq handler */
 176	bool			irq_block;
 177	bool			irq_busy;
 178	bool			irq_data;
 179
 180	/* DMA part */
 181	struct dma_chan		*dma_chan_rxtx;
 182	struct dma_chan		*dma_chan;
 183	struct dma_slave_config dma_cfg_rx;
 184	struct dma_slave_config dma_cfg_tx;
 185	struct dma_async_tx_descriptor	*dma_desc;
 186	u32			dma_dir;
 187	u32			drain_words;
 188	struct page		*drain_page;
 189	u32			drain_offset;
 190	bool			use_dma;
 191};
 192
 193static void bcm2835_dumpcmd(struct bcm2835_host *host, struct mmc_command *cmd,
 194			    const char *label)
 195{
 196	struct device *dev = &host->pdev->dev;
 197
 198	if (!cmd)
 199		return;
 200
 201	dev_dbg(dev, "%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
 202		(cmd == host->cmd) ? '>' : ' ',
 203		label, cmd->opcode, cmd->arg, cmd->flags,
 204		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
 205		cmd->error);
 206}
 207
 208static void bcm2835_dumpregs(struct bcm2835_host *host)
 209{
 210	struct mmc_request *mrq = host->mrq;
 211	struct device *dev = &host->pdev->dev;
 212
 213	if (mrq) {
 214		bcm2835_dumpcmd(host, mrq->sbc, "sbc");
 215		bcm2835_dumpcmd(host, mrq->cmd, "cmd");
 216		if (mrq->data) {
 217			dev_dbg(dev, "data blocks %x blksz %x - err %d\n",
 218				mrq->data->blocks,
 219				mrq->data->blksz,
 220				mrq->data->error);
 221		}
 222		bcm2835_dumpcmd(host, mrq->stop, "stop");
 223	}
 224
 225	dev_dbg(dev, "=========== REGISTER DUMP ===========\n");
 226	dev_dbg(dev, "SDCMD  0x%08x\n", readl(host->ioaddr + SDCMD));
 227	dev_dbg(dev, "SDARG  0x%08x\n", readl(host->ioaddr + SDARG));
 228	dev_dbg(dev, "SDTOUT 0x%08x\n", readl(host->ioaddr + SDTOUT));
 229	dev_dbg(dev, "SDCDIV 0x%08x\n", readl(host->ioaddr + SDCDIV));
 230	dev_dbg(dev, "SDRSP0 0x%08x\n", readl(host->ioaddr + SDRSP0));
 231	dev_dbg(dev, "SDRSP1 0x%08x\n", readl(host->ioaddr + SDRSP1));
 232	dev_dbg(dev, "SDRSP2 0x%08x\n", readl(host->ioaddr + SDRSP2));
 233	dev_dbg(dev, "SDRSP3 0x%08x\n", readl(host->ioaddr + SDRSP3));
 234	dev_dbg(dev, "SDHSTS 0x%08x\n", readl(host->ioaddr + SDHSTS));
 235	dev_dbg(dev, "SDVDD  0x%08x\n", readl(host->ioaddr + SDVDD));
 236	dev_dbg(dev, "SDEDM  0x%08x\n", readl(host->ioaddr + SDEDM));
 237	dev_dbg(dev, "SDHCFG 0x%08x\n", readl(host->ioaddr + SDHCFG));
 238	dev_dbg(dev, "SDHBCT 0x%08x\n", readl(host->ioaddr + SDHBCT));
 239	dev_dbg(dev, "SDHBLC 0x%08x\n", readl(host->ioaddr + SDHBLC));
 240	dev_dbg(dev, "===========================================\n");
 241}
 242
 243static void bcm2835_reset_internal(struct bcm2835_host *host)
 244{
 245	u32 temp;
 246
 247	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
 248	writel(0, host->ioaddr + SDCMD);
 249	writel(0, host->ioaddr + SDARG);
 250	writel(0xf00000, host->ioaddr + SDTOUT);
 251	writel(0, host->ioaddr + SDCDIV);
 252	writel(0x7f8, host->ioaddr + SDHSTS); /* Write 1s to clear */
 253	writel(0, host->ioaddr + SDHCFG);
 254	writel(0, host->ioaddr + SDHBCT);
 255	writel(0, host->ioaddr + SDHBLC);
 256
 257	/* Limit fifo usage due to silicon bug */
 258	temp = readl(host->ioaddr + SDEDM);
 259	temp &= ~((SDEDM_THRESHOLD_MASK << SDEDM_READ_THRESHOLD_SHIFT) |
 260		  (SDEDM_THRESHOLD_MASK << SDEDM_WRITE_THRESHOLD_SHIFT));
 261	temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) |
 262		(FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT);
 263	writel(temp, host->ioaddr + SDEDM);
 264	msleep(20);
 265	writel(SDVDD_POWER_ON, host->ioaddr + SDVDD);
 266	msleep(20);
 267	host->clock = 0;
 268	writel(host->hcfg, host->ioaddr + SDHCFG);
 269	writel(host->cdiv, host->ioaddr + SDCDIV);
 270}
 271
 272static void bcm2835_reset(struct mmc_host *mmc)
 273{
 274	struct bcm2835_host *host = mmc_priv(mmc);
 275
 276	if (host->dma_chan)
 277		dmaengine_terminate_sync(host->dma_chan);
 278	host->dma_chan = NULL;
 279	bcm2835_reset_internal(host);
 280}
 281
 282static void bcm2835_finish_command(struct bcm2835_host *host);
 283
 284static void bcm2835_wait_transfer_complete(struct bcm2835_host *host)
 285{
 286	int timediff;
 287	u32 alternate_idle;
 288
 289	alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ?
 290		SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1;
 291
 292	timediff = 0;
 293
 294	while (1) {
 295		u32 edm, fsm;
 296
 297		edm = readl(host->ioaddr + SDEDM);
 298		fsm = edm & SDEDM_FSM_MASK;
 299
 300		if ((fsm == SDEDM_FSM_IDENTMODE) ||
 301		    (fsm == SDEDM_FSM_DATAMODE))
 302			break;
 303		if (fsm == alternate_idle) {
 304			writel(edm | SDEDM_FORCE_DATA_MODE,
 305			       host->ioaddr + SDEDM);
 306			break;
 307		}
 308
 309		timediff++;
 310		if (timediff == 100000) {
 311			dev_err(&host->pdev->dev,
 312				"wait_transfer_complete - still waiting after %d retries\n",
 313				timediff);
 314			bcm2835_dumpregs(host);
 315			host->mrq->data->error = -ETIMEDOUT;
 316			return;
 317		}
 318		cpu_relax();
 319	}
 320}
 321
 322static void bcm2835_dma_complete(void *param)
 323{
 324	struct bcm2835_host *host = param;
 325
 326	schedule_work(&host->dma_work);
 327}
 328
 329static void bcm2835_transfer_block_pio(struct bcm2835_host *host, bool is_read)
 330{
 331	size_t blksize;
 332	unsigned long wait_max;
 333
 334	blksize = host->data->blksz;
 335
 336	wait_max = jiffies + msecs_to_jiffies(500);
 337
 338	while (blksize) {
 339		int copy_words;
 340		u32 hsts = 0;
 341		size_t len;
 342		u32 *buf;
 343
 344		if (!sg_miter_next(&host->sg_miter)) {
 345			host->data->error = -EINVAL;
 346			break;
 347		}
 348
 349		len = min(host->sg_miter.length, blksize);
 350		if (len % 4) {
 351			host->data->error = -EINVAL;
 352			break;
 353		}
 354
 355		blksize -= len;
 356		host->sg_miter.consumed = len;
 357
 358		buf = (u32 *)host->sg_miter.addr;
 359
 360		copy_words = len / 4;
 361
 362		while (copy_words) {
 363			int burst_words, words;
 364			u32 edm;
 365
 366			burst_words = min(SDDATA_FIFO_PIO_BURST, copy_words);
 367			edm = readl(host->ioaddr + SDEDM);
 368			if (is_read)
 369				words = ((edm >> 4) & 0x1f);
 370			else
 371				words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f);
 372
 373			if (words < burst_words) {
 374				int fsm_state = (edm & SDEDM_FSM_MASK);
 375				struct device *dev = &host->pdev->dev;
 376
 377				if ((is_read &&
 378				     (fsm_state != SDEDM_FSM_READDATA &&
 379				      fsm_state != SDEDM_FSM_READWAIT &&
 380				      fsm_state != SDEDM_FSM_READCRC)) ||
 381				    (!is_read &&
 382				     (fsm_state != SDEDM_FSM_WRITEDATA &&
 383				      fsm_state != SDEDM_FSM_WRITESTART1 &&
 384				      fsm_state != SDEDM_FSM_WRITESTART2))) {
 385					hsts = readl(host->ioaddr + SDHSTS);
 386					dev_err(dev, "fsm %x, hsts %08x\n",
 387						fsm_state, hsts);
 388					if (hsts & SDHSTS_ERROR_MASK)
 389						break;
 390				}
 391
 392				if (time_after(jiffies, wait_max)) {
 393					dev_err(dev, "PIO %s timeout - EDM %08x\n",
 394						is_read ? "read" : "write",
 395						edm);
 396					hsts = SDHSTS_REW_TIME_OUT;
 397					break;
 398				}
 399				ndelay((burst_words - words) *
 400				       host->ns_per_fifo_word);
 401				continue;
 402			} else if (words > copy_words) {
 403				words = copy_words;
 404			}
 405
 406			copy_words -= words;
 407
 408			while (words) {
 409				if (is_read)
 410					*(buf++) = readl(host->ioaddr + SDDATA);
 411				else
 412					writel(*(buf++), host->ioaddr + SDDATA);
 413				words--;
 414			}
 415		}
 416
 417		if (hsts & SDHSTS_ERROR_MASK)
 418			break;
 419	}
 420
 421	sg_miter_stop(&host->sg_miter);
 422}
 423
 424static void bcm2835_transfer_pio(struct bcm2835_host *host)
 425{
 426	struct device *dev = &host->pdev->dev;
 427	u32 sdhsts;
 428	bool is_read;
 429
 430	is_read = (host->data->flags & MMC_DATA_READ) != 0;
 431	bcm2835_transfer_block_pio(host, is_read);
 432
 433	sdhsts = readl(host->ioaddr + SDHSTS);
 434	if (sdhsts & (SDHSTS_CRC16_ERROR |
 435		      SDHSTS_CRC7_ERROR |
 436		      SDHSTS_FIFO_ERROR)) {
 437		dev_err(dev, "%s transfer error - HSTS %08x\n",
 438			is_read ? "read" : "write", sdhsts);
 439		host->data->error = -EILSEQ;
 440	} else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
 441			      SDHSTS_REW_TIME_OUT))) {
 442		dev_err(dev, "%s timeout error - HSTS %08x\n",
 443			is_read ? "read" : "write", sdhsts);
 444		host->data->error = -ETIMEDOUT;
 445	}
 446}
 447
 448static
 449void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
 450{
 451	int sg_len, dir_data, dir_slave;
 452	struct dma_async_tx_descriptor *desc = NULL;
 453	struct dma_chan *dma_chan;
 454
 455	dma_chan = host->dma_chan_rxtx;
 456	if (data->flags & MMC_DATA_READ) {
 457		dir_data = DMA_FROM_DEVICE;
 458		dir_slave = DMA_DEV_TO_MEM;
 459	} else {
 460		dir_data = DMA_TO_DEVICE;
 461		dir_slave = DMA_MEM_TO_DEV;
 462	}
 463
 464	/* The block doesn't manage the FIFO DREQs properly for
 465	 * multi-block transfers, so don't attempt to DMA the final
 466	 * few words.  Unfortunately this requires the final sg entry
 467	 * to be trimmed.  N.B. This code demands that the overspill
 468	 * is contained in a single sg entry.
 469	 */
 470
 471	host->drain_words = 0;
 472	if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) {
 473		struct scatterlist *sg;
 474		u32 len;
 475		int i;
 476
 477		len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4,
 478			  (u32)data->blocks * data->blksz);
 479
 480		for_each_sg(data->sg, sg, data->sg_len, i) {
 481			if (sg_is_last(sg)) {
 482				WARN_ON(sg->length < len);
 483				sg->length -= len;
 484				host->drain_page = sg_page(sg);
 485				host->drain_offset = sg->offset + sg->length;
 486			}
 487		}
 488		host->drain_words = len / 4;
 489	}
 490
 491	/* The parameters have already been validated, so this will not fail */
 492	(void)dmaengine_slave_config(dma_chan,
 493				     (dir_data == DMA_FROM_DEVICE) ?
 494				     &host->dma_cfg_rx :
 495				     &host->dma_cfg_tx);
 496
 497	sg_len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len,
 498			    dir_data);
 499	if (!sg_len)
 500		return;
 501
 502	desc = dmaengine_prep_slave_sg(dma_chan, data->sg, sg_len, dir_slave,
 503				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 504
 505	if (!desc) {
 506		dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
 507		return;
 508	}
 509
 510	desc->callback = bcm2835_dma_complete;
 511	desc->callback_param = host;
 512	host->dma_desc = desc;
 513	host->dma_chan = dma_chan;
 514	host->dma_dir = dir_data;
 515}
 516
 517static void bcm2835_start_dma(struct bcm2835_host *host)
 518{
 519	dmaengine_submit(host->dma_desc);
 520	dma_async_issue_pending(host->dma_chan);
 521}
 522
 523static void bcm2835_set_transfer_irqs(struct bcm2835_host *host)
 524{
 525	u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN |
 526		SDHCFG_BUSY_IRPT_EN;
 527
 528	if (host->dma_desc) {
 529		host->hcfg = (host->hcfg & ~all_irqs) |
 530			SDHCFG_BUSY_IRPT_EN;
 531	} else {
 532		host->hcfg = (host->hcfg & ~all_irqs) |
 533			SDHCFG_DATA_IRPT_EN |
 534			SDHCFG_BUSY_IRPT_EN;
 535	}
 536
 537	writel(host->hcfg, host->ioaddr + SDHCFG);
 538}
 539
 540static
 541void bcm2835_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd)
 542{
 543	struct mmc_data *data = cmd->data;
 544
 545	WARN_ON(host->data);
 546
 547	host->data = data;
 548	if (!data)
 549		return;
 550
 551	host->data_complete = false;
 552	host->data->bytes_xfered = 0;
 553
 554	if (!host->dma_desc) {
 555		/* Use PIO */
 556		int flags = SG_MITER_ATOMIC;
 557
 558		if (data->flags & MMC_DATA_READ)
 559			flags |= SG_MITER_TO_SG;
 560		else
 561			flags |= SG_MITER_FROM_SG;
 562		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 563		host->blocks = data->blocks;
 564	}
 565
 566	bcm2835_set_transfer_irqs(host);
 567
 568	writel(data->blksz, host->ioaddr + SDHBCT);
 569	writel(data->blocks, host->ioaddr + SDHBLC);
 570}
 571
 572static u32 bcm2835_read_wait_sdcmd(struct bcm2835_host *host, u32 max_ms)
 573{
 574	struct device *dev = &host->pdev->dev;
 575	u32 value;
 576	int ret;
 577
 578	ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
 579				 !(value & SDCMD_NEW_FLAG), 1, 10);
 580	if (ret == -ETIMEDOUT)
 581		/* if it takes a while make poll interval bigger */
 582		ret = readl_poll_timeout(host->ioaddr + SDCMD, value,
 583					 !(value & SDCMD_NEW_FLAG),
 584					 10, max_ms * 1000);
 585	if (ret == -ETIMEDOUT)
 586		dev_err(dev, "%s: timeout (%d ms)\n", __func__, max_ms);
 587
 588	return value;
 589}
 590
 591static void bcm2835_finish_request(struct bcm2835_host *host)
 592{
 593	struct dma_chan *terminate_chan = NULL;
 594	struct mmc_request *mrq;
 595
 596	cancel_delayed_work(&host->timeout_work);
 597
 598	mrq = host->mrq;
 599
 600	host->mrq = NULL;
 601	host->cmd = NULL;
 602	host->data = NULL;
 603
 604	host->dma_desc = NULL;
 605	terminate_chan = host->dma_chan;
 606	host->dma_chan = NULL;
 607
 608	if (terminate_chan) {
 609		int err = dmaengine_terminate_all(terminate_chan);
 610
 611		if (err)
 612			dev_err(&host->pdev->dev,
 613				"failed to terminate DMA (%d)\n", err);
 614	}
 615
 616	mmc_request_done(mmc_from_priv(host), mrq);
 617}
 618
 619static
 620bool bcm2835_send_command(struct bcm2835_host *host, struct mmc_command *cmd)
 621{
 622	struct device *dev = &host->pdev->dev;
 623	u32 sdcmd, sdhsts;
 624	unsigned long timeout;
 625
 626	WARN_ON(host->cmd);
 627
 628	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
 629	if (sdcmd & SDCMD_NEW_FLAG) {
 630		dev_err(dev, "previous command never completed.\n");
 631		bcm2835_dumpregs(host);
 632		cmd->error = -EILSEQ;
 633		bcm2835_finish_request(host);
 634		return false;
 635	}
 636
 637	if (!cmd->data && cmd->busy_timeout > 9000)
 638		timeout = DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
 639	else
 640		timeout = 10 * HZ;
 641	schedule_delayed_work(&host->timeout_work, timeout);
 642
 643	host->cmd = cmd;
 644
 645	/* Clear any error flags */
 646	sdhsts = readl(host->ioaddr + SDHSTS);
 647	if (sdhsts & SDHSTS_ERROR_MASK)
 648		writel(sdhsts, host->ioaddr + SDHSTS);
 649
 650	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
 651		dev_err(dev, "unsupported response type!\n");
 652		cmd->error = -EINVAL;
 653		bcm2835_finish_request(host);
 654		return false;
 655	}
 656
 657	bcm2835_prepare_data(host, cmd);
 658
 659	writel(cmd->arg, host->ioaddr + SDARG);
 660
 661	sdcmd = cmd->opcode & SDCMD_CMD_MASK;
 662
 663	host->use_busy = false;
 664	if (!(cmd->flags & MMC_RSP_PRESENT)) {
 665		sdcmd |= SDCMD_NO_RESPONSE;
 666	} else {
 667		if (cmd->flags & MMC_RSP_136)
 668			sdcmd |= SDCMD_LONG_RESPONSE;
 669		if (cmd->flags & MMC_RSP_BUSY) {
 670			sdcmd |= SDCMD_BUSYWAIT;
 671			host->use_busy = true;
 672		}
 673	}
 674
 675	if (cmd->data) {
 676		if (cmd->data->flags & MMC_DATA_WRITE)
 677			sdcmd |= SDCMD_WRITE_CMD;
 678		if (cmd->data->flags & MMC_DATA_READ)
 679			sdcmd |= SDCMD_READ_CMD;
 680	}
 681
 682	writel(sdcmd | SDCMD_NEW_FLAG, host->ioaddr + SDCMD);
 683
 684	return true;
 685}
 686
 687static void bcm2835_transfer_complete(struct bcm2835_host *host)
 688{
 689	struct mmc_data *data;
 690
 691	WARN_ON(!host->data_complete);
 692
 693	data = host->data;
 694	host->data = NULL;
 695
 696	/* Need to send CMD12 if -
 697	 * a) open-ended multiblock transfer (no CMD23)
 698	 * b) error in multiblock transfer
 699	 */
 700	if (host->mrq->stop && (data->error || !host->use_sbc)) {
 701		if (bcm2835_send_command(host, host->mrq->stop)) {
 702			/* No busy, so poll for completion */
 703			if (!host->use_busy)
 704				bcm2835_finish_command(host);
 705		}
 706	} else {
 707		bcm2835_wait_transfer_complete(host);
 708		bcm2835_finish_request(host);
 709	}
 710}
 711
 712static void bcm2835_finish_data(struct bcm2835_host *host)
 713{
 714	struct device *dev = &host->pdev->dev;
 715	struct mmc_data *data;
 716
 717	data = host->data;
 718
 719	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
 720	writel(host->hcfg, host->ioaddr + SDHCFG);
 721
 722	data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks);
 723
 724	host->data_complete = true;
 725
 726	if (host->cmd) {
 727		/* Data managed to finish before the
 728		 * command completed. Make sure we do
 729		 * things in the proper order.
 730		 */
 731		dev_dbg(dev, "Finished early - HSTS %08x\n",
 732			readl(host->ioaddr + SDHSTS));
 733	} else {
 734		bcm2835_transfer_complete(host);
 735	}
 736}
 737
 738static void bcm2835_finish_command(struct bcm2835_host *host)
 739{
 740	struct device *dev = &host->pdev->dev;
 741	struct mmc_command *cmd = host->cmd;
 742	u32 sdcmd;
 743
 744	sdcmd = bcm2835_read_wait_sdcmd(host, 100);
 745
 746	/* Check for errors */
 747	if (sdcmd & SDCMD_NEW_FLAG) {
 748		dev_err(dev, "command never completed.\n");
 749		bcm2835_dumpregs(host);
 750		host->cmd->error = -EIO;
 751		bcm2835_finish_request(host);
 752		return;
 753	} else if (sdcmd & SDCMD_FAIL_FLAG) {
 754		u32 sdhsts = readl(host->ioaddr + SDHSTS);
 755
 756		/* Clear the errors */
 757		writel(SDHSTS_ERROR_MASK, host->ioaddr + SDHSTS);
 758
 759		if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
 760		    (host->cmd->opcode != MMC_SEND_OP_COND)) {
 761			u32 edm, fsm;
 762
 763			if (sdhsts & SDHSTS_CMD_TIME_OUT) {
 764				host->cmd->error = -ETIMEDOUT;
 765			} else {
 766				dev_err(dev, "unexpected command %d error\n",
 767					host->cmd->opcode);
 768				bcm2835_dumpregs(host);
 769				host->cmd->error = -EILSEQ;
 770			}
 771			edm = readl(host->ioaddr + SDEDM);
 772			fsm = edm & SDEDM_FSM_MASK;
 773			if (fsm == SDEDM_FSM_READWAIT ||
 774			    fsm == SDEDM_FSM_WRITESTART1)
 775				/* Kick the FSM out of its wait */
 776				writel(edm | SDEDM_FORCE_DATA_MODE,
 777				       host->ioaddr + SDEDM);
 778			bcm2835_finish_request(host);
 779			return;
 780		}
 781	}
 782
 783	if (cmd->flags & MMC_RSP_PRESENT) {
 784		if (cmd->flags & MMC_RSP_136) {
 785			int i;
 786
 787			for (i = 0; i < 4; i++) {
 788				cmd->resp[3 - i] =
 789					readl(host->ioaddr + SDRSP0 + i * 4);
 790			}
 791		} else {
 792			cmd->resp[0] = readl(host->ioaddr + SDRSP0);
 793		}
 794	}
 795
 796	if (cmd == host->mrq->sbc) {
 797		/* Finished CMD23, now send actual command. */
 798		host->cmd = NULL;
 799		if (bcm2835_send_command(host, host->mrq->cmd)) {
 800			if (host->data && host->dma_desc)
 801				/* DMA transfer starts now, PIO starts
 802				 * after irq
 803				 */
 804				bcm2835_start_dma(host);
 805
 806			if (!host->use_busy)
 807				bcm2835_finish_command(host);
 808		}
 809	} else if (cmd == host->mrq->stop) {
 810		/* Finished CMD12 */
 811		bcm2835_finish_request(host);
 812	} else {
 813		/* Processed actual command. */
 814		host->cmd = NULL;
 815		if (!host->data)
 816			bcm2835_finish_request(host);
 817		else if (host->data_complete)
 818			bcm2835_transfer_complete(host);
 819	}
 820}
 821
 822static void bcm2835_timeout(struct work_struct *work)
 823{
 824	struct delayed_work *d = to_delayed_work(work);
 825	struct bcm2835_host *host =
 826		container_of(d, struct bcm2835_host, timeout_work);
 827	struct device *dev = &host->pdev->dev;
 828
 829	mutex_lock(&host->mutex);
 830
 831	if (host->mrq) {
 832		dev_err(dev, "timeout waiting for hardware interrupt.\n");
 833		bcm2835_dumpregs(host);
 834
 835		bcm2835_reset(mmc_from_priv(host));
 836
 837		if (host->data) {
 838			host->data->error = -ETIMEDOUT;
 839			bcm2835_finish_data(host);
 840		} else {
 841			if (host->cmd)
 842				host->cmd->error = -ETIMEDOUT;
 843			else
 844				host->mrq->cmd->error = -ETIMEDOUT;
 845
 846			bcm2835_finish_request(host);
 847		}
 848	}
 849
 850	mutex_unlock(&host->mutex);
 851}
 852
 853static bool bcm2835_check_cmd_error(struct bcm2835_host *host, u32 intmask)
 854{
 855	struct device *dev = &host->pdev->dev;
 856
 857	if (!(intmask & SDHSTS_ERROR_MASK))
 858		return false;
 859
 860	if (!host->cmd)
 861		return true;
 862
 863	dev_err(dev, "sdhost_busy_irq: intmask %08x\n", intmask);
 864	if (intmask & SDHSTS_CRC7_ERROR) {
 865		host->cmd->error = -EILSEQ;
 866	} else if (intmask & (SDHSTS_CRC16_ERROR |
 867			      SDHSTS_FIFO_ERROR)) {
 868		if (host->mrq->data)
 869			host->mrq->data->error = -EILSEQ;
 870		else
 871			host->cmd->error = -EILSEQ;
 872	} else if (intmask & SDHSTS_REW_TIME_OUT) {
 873		if (host->mrq->data)
 874			host->mrq->data->error = -ETIMEDOUT;
 875		else
 876			host->cmd->error = -ETIMEDOUT;
 877	} else if (intmask & SDHSTS_CMD_TIME_OUT) {
 878		host->cmd->error = -ETIMEDOUT;
 879	}
 880	bcm2835_dumpregs(host);
 881	return true;
 882}
 883
 884static void bcm2835_check_data_error(struct bcm2835_host *host, u32 intmask)
 885{
 886	if (!host->data)
 887		return;
 888	if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_FIFO_ERROR))
 889		host->data->error = -EILSEQ;
 890	if (intmask & SDHSTS_REW_TIME_OUT)
 891		host->data->error = -ETIMEDOUT;
 892}
 893
 894static void bcm2835_busy_irq(struct bcm2835_host *host)
 895{
 896	if (WARN_ON(!host->cmd)) {
 897		bcm2835_dumpregs(host);
 898		return;
 899	}
 900
 901	if (WARN_ON(!host->use_busy)) {
 902		bcm2835_dumpregs(host);
 903		return;
 904	}
 905	host->use_busy = false;
 906
 907	bcm2835_finish_command(host);
 908}
 909
 910static void bcm2835_data_irq(struct bcm2835_host *host, u32 intmask)
 911{
 912	/* There are no dedicated data/space available interrupt
 913	 * status bits, so it is necessary to use the single shared
 914	 * data/space available FIFO status bits. It is therefore not
 915	 * an error to get here when there is no data transfer in
 916	 * progress.
 917	 */
 918	if (!host->data)
 919		return;
 920
 921	bcm2835_check_data_error(host, intmask);
 922	if (host->data->error)
 923		goto finished;
 924
 925	if (host->data->flags & MMC_DATA_WRITE) {
 926		/* Use the block interrupt for writes after the first block */
 927		host->hcfg &= ~(SDHCFG_DATA_IRPT_EN);
 928		host->hcfg |= SDHCFG_BLOCK_IRPT_EN;
 929		writel(host->hcfg, host->ioaddr + SDHCFG);
 930		bcm2835_transfer_pio(host);
 931	} else {
 932		bcm2835_transfer_pio(host);
 933		host->blocks--;
 934		if ((host->blocks == 0) || host->data->error)
 935			goto finished;
 936	}
 937	return;
 938
 939finished:
 940	host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
 941	writel(host->hcfg, host->ioaddr + SDHCFG);
 942}
 943
 944static void bcm2835_data_threaded_irq(struct bcm2835_host *host)
 945{
 946	if (!host->data)
 947		return;
 948	if ((host->blocks == 0) || host->data->error)
 949		bcm2835_finish_data(host);
 950}
 951
 952static void bcm2835_block_irq(struct bcm2835_host *host)
 953{
 954	if (WARN_ON(!host->data)) {
 955		bcm2835_dumpregs(host);
 956		return;
 957	}
 958
 959	if (!host->dma_desc) {
 960		WARN_ON(!host->blocks);
 961		if (host->data->error || (--host->blocks == 0))
 962			bcm2835_finish_data(host);
 963		else
 964			bcm2835_transfer_pio(host);
 965	} else if (host->data->flags & MMC_DATA_WRITE) {
 966		bcm2835_finish_data(host);
 967	}
 968}
 969
 970static irqreturn_t bcm2835_irq(int irq, void *dev_id)
 971{
 972	irqreturn_t result = IRQ_NONE;
 973	struct bcm2835_host *host = dev_id;
 974	u32 intmask;
 975
 976	spin_lock(&host->lock);
 977
 978	intmask = readl(host->ioaddr + SDHSTS);
 979
 980	writel(SDHSTS_BUSY_IRPT |
 981	       SDHSTS_BLOCK_IRPT |
 982	       SDHSTS_SDIO_IRPT |
 983	       SDHSTS_DATA_FLAG,
 984	       host->ioaddr + SDHSTS);
 985
 986	if (intmask & SDHSTS_BLOCK_IRPT) {
 987		bcm2835_check_data_error(host, intmask);
 988		host->irq_block = true;
 989		result = IRQ_WAKE_THREAD;
 990	}
 991
 992	if (intmask & SDHSTS_BUSY_IRPT) {
 993		if (!bcm2835_check_cmd_error(host, intmask)) {
 994			host->irq_busy = true;
 995			result = IRQ_WAKE_THREAD;
 996		} else {
 997			result = IRQ_HANDLED;
 998		}
 999	}
1000
1001	/* There is no true data interrupt status bit, so it is
1002	 * necessary to qualify the data flag with the interrupt
1003	 * enable bit.
1004	 */
1005	if ((intmask & SDHSTS_DATA_FLAG) &&
1006	    (host->hcfg & SDHCFG_DATA_IRPT_EN)) {
1007		bcm2835_data_irq(host, intmask);
1008		host->irq_data = true;
1009		result = IRQ_WAKE_THREAD;
1010	}
1011
1012	spin_unlock(&host->lock);
1013
1014	return result;
1015}
1016
1017static irqreturn_t bcm2835_threaded_irq(int irq, void *dev_id)
1018{
1019	struct bcm2835_host *host = dev_id;
1020	unsigned long flags;
1021	bool block, busy, data;
1022
1023	spin_lock_irqsave(&host->lock, flags);
1024
1025	block = host->irq_block;
1026	busy  = host->irq_busy;
1027	data  = host->irq_data;
1028	host->irq_block = false;
1029	host->irq_busy  = false;
1030	host->irq_data  = false;
1031
1032	spin_unlock_irqrestore(&host->lock, flags);
1033
1034	mutex_lock(&host->mutex);
1035
1036	if (block)
1037		bcm2835_block_irq(host);
1038	if (busy)
1039		bcm2835_busy_irq(host);
1040	if (data)
1041		bcm2835_data_threaded_irq(host);
1042
1043	mutex_unlock(&host->mutex);
1044
1045	return IRQ_HANDLED;
1046}
1047
1048static void bcm2835_dma_complete_work(struct work_struct *work)
1049{
1050	struct bcm2835_host *host =
1051		container_of(work, struct bcm2835_host, dma_work);
1052	struct mmc_data *data;
1053
1054	mutex_lock(&host->mutex);
1055
1056	data = host->data;
1057
1058	if (host->dma_chan) {
1059		dma_unmap_sg(host->dma_chan->device->dev,
1060			     data->sg, data->sg_len,
1061			     host->dma_dir);
1062
1063		host->dma_chan = NULL;
1064	}
1065
1066	if (host->drain_words) {
1067		void *page;
1068		u32 *buf;
1069
1070		if (host->drain_offset & PAGE_MASK) {
1071			host->drain_page += host->drain_offset >> PAGE_SHIFT;
1072			host->drain_offset &= ~PAGE_MASK;
1073		}
1074		page = kmap_local_page(host->drain_page);
1075		buf = page + host->drain_offset;
1076
1077		while (host->drain_words) {
1078			u32 edm = readl(host->ioaddr + SDEDM);
1079
1080			if ((edm >> 4) & 0x1f)
1081				*(buf++) = readl(host->ioaddr + SDDATA);
1082			host->drain_words--;
1083		}
1084
1085		kunmap_local(page);
1086	}
1087
1088	bcm2835_finish_data(host);
1089
1090	mutex_unlock(&host->mutex);
1091}
1092
1093static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock)
1094{
1095	struct mmc_host *mmc = mmc_from_priv(host);
1096	int div;
1097
1098	/* The SDCDIV register has 11 bits, and holds (div - 2).  But
1099	 * in data mode the max is 50MHz wihout a minimum, and only
1100	 * the bottom 3 bits are used. Since the switch over is
1101	 * automatic (unless we have marked the card as slow...),
1102	 * chosen values have to make sense in both modes.  Ident mode
1103	 * must be 100-400KHz, so can range check the requested
1104	 * clock. CMD15 must be used to return to data mode, so this
1105	 * can be monitored.
1106	 *
1107	 * clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1108	 *                 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1109	 *
1110	 *		 623->400KHz/27.8MHz
1111	 *		 reset value (507)->491159/50MHz
1112	 *
1113	 * BUT, the 3-bit clock divisor in data mode is too small if
1114	 * the core clock is higher than 250MHz, so instead use the
1115	 * SLOW_CARD configuration bit to force the use of the ident
1116	 * clock divisor at all times.
1117	 */
1118
1119	if (clock < 100000) {
1120		/* Can't stop the clock, but make it as slow as possible
1121		 * to show willing
1122		 */
1123		host->cdiv = SDCDIV_MAX_CDIV;
1124		writel(host->cdiv, host->ioaddr + SDCDIV);
1125		return;
1126	}
1127
1128	div = host->max_clk / clock;
1129	if (div < 2)
1130		div = 2;
1131	if ((host->max_clk / div) > clock)
1132		div++;
1133	div -= 2;
1134
1135	if (div > SDCDIV_MAX_CDIV)
1136		div = SDCDIV_MAX_CDIV;
1137
1138	clock = host->max_clk / (div + 2);
1139	mmc->actual_clock = clock;
1140
1141	/* Calibrate some delays */
1142
1143	host->ns_per_fifo_word = (1000000000 / clock) *
1144		((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
1145
1146	host->cdiv = div;
1147	writel(host->cdiv, host->ioaddr + SDCDIV);
1148
1149	/* Set the timeout to 500ms */
1150	writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT);
1151}
1152
1153static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq)
1154{
1155	struct bcm2835_host *host = mmc_priv(mmc);
1156	struct device *dev = &host->pdev->dev;
1157	u32 edm, fsm;
1158
1159	/* Reset the error statuses in case this is a retry */
1160	if (mrq->sbc)
1161		mrq->sbc->error = 0;
1162	if (mrq->cmd)
1163		mrq->cmd->error = 0;
1164	if (mrq->data)
1165		mrq->data->error = 0;
1166	if (mrq->stop)
1167		mrq->stop->error = 0;
1168
1169	if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1170		dev_err(dev, "unsupported block size (%d bytes)\n",
1171			mrq->data->blksz);
1172
1173		if (mrq->cmd)
1174			mrq->cmd->error = -EINVAL;
1175
1176		mmc_request_done(mmc, mrq);
1177		return;
1178	}
1179
1180	mutex_lock(&host->mutex);
1181
1182	WARN_ON(host->mrq);
1183	host->mrq = mrq;
1184
1185	edm = readl(host->ioaddr + SDEDM);
1186	fsm = edm & SDEDM_FSM_MASK;
1187
1188	if ((fsm != SDEDM_FSM_IDENTMODE) &&
1189	    (fsm != SDEDM_FSM_DATAMODE)) {
1190		dev_err(dev, "previous command (%d) not complete (EDM %08x)\n",
1191			readl(host->ioaddr + SDCMD) & SDCMD_CMD_MASK,
1192			edm);
1193		bcm2835_dumpregs(host);
1194
1195		if (mrq->cmd)
1196			mrq->cmd->error = -EILSEQ;
1197
1198		bcm2835_finish_request(host);
1199		mutex_unlock(&host->mutex);
1200		return;
1201	}
1202
1203	if (host->use_dma && mrq->data && (mrq->data->blocks > PIO_THRESHOLD))
1204		bcm2835_prepare_dma(host, mrq->data);
1205
1206	host->use_sbc = !!mrq->sbc && host->mrq->data &&
1207			(host->mrq->data->flags & MMC_DATA_READ);
1208	if (host->use_sbc) {
1209		if (bcm2835_send_command(host, mrq->sbc)) {
1210			if (!host->use_busy)
1211				bcm2835_finish_command(host);
1212		}
1213	} else if (mrq->cmd && bcm2835_send_command(host, mrq->cmd)) {
1214		if (host->data && host->dma_desc) {
1215			/* DMA transfer starts now, PIO starts after irq */
1216			bcm2835_start_dma(host);
1217		}
1218
1219		if (!host->use_busy)
1220			bcm2835_finish_command(host);
1221	}
1222
1223	mutex_unlock(&host->mutex);
1224}
1225
1226static void bcm2835_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1227{
1228	struct bcm2835_host *host = mmc_priv(mmc);
1229
1230	mutex_lock(&host->mutex);
1231
1232	if (!ios->clock || ios->clock != host->clock) {
1233		bcm2835_set_clock(host, ios->clock);
1234		host->clock = ios->clock;
1235	}
1236
1237	/* set bus width */
1238	host->hcfg &= ~SDHCFG_WIDE_EXT_BUS;
1239	if (ios->bus_width == MMC_BUS_WIDTH_4)
1240		host->hcfg |= SDHCFG_WIDE_EXT_BUS;
1241
1242	host->hcfg |= SDHCFG_WIDE_INT_BUS;
1243
1244	/* Disable clever clock switching, to cope with fast core clocks */
1245	host->hcfg |= SDHCFG_SLOW_CARD;
1246
1247	writel(host->hcfg, host->ioaddr + SDHCFG);
1248
1249	mutex_unlock(&host->mutex);
1250}
1251
1252static const struct mmc_host_ops bcm2835_ops = {
1253	.request = bcm2835_request,
1254	.set_ios = bcm2835_set_ios,
1255	.card_hw_reset = bcm2835_reset,
1256};
1257
1258static int bcm2835_add_host(struct bcm2835_host *host)
1259{
1260	struct mmc_host *mmc = mmc_from_priv(host);
1261	struct device *dev = &host->pdev->dev;
1262	char pio_limit_string[20];
1263	int ret;
1264
1265	if (!mmc->f_max || mmc->f_max > host->max_clk)
1266		mmc->f_max = host->max_clk;
1267	mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1268
1269	mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
1270
1271	dev_dbg(dev, "f_max %d, f_min %d, max_busy_timeout %d\n",
1272		mmc->f_max, mmc->f_min, mmc->max_busy_timeout);
1273
1274	/* host controller capabilities */
1275	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1276		     MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_CMD23;
1277
1278	spin_lock_init(&host->lock);
1279	mutex_init(&host->mutex);
1280
1281	if (!host->dma_chan_rxtx) {
1282		dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n");
1283		host->use_dma = false;
1284	} else {
1285		host->use_dma = true;
1286
1287		host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1288		host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1289		host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
1290		host->dma_cfg_tx.src_addr = 0;
1291		host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
1292
1293		host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1294		host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1295		host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
1296		host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
1297		host->dma_cfg_rx.dst_addr = 0;
1298
1299		if (dmaengine_slave_config(host->dma_chan_rxtx,
1300					   &host->dma_cfg_tx) != 0 ||
1301		    dmaengine_slave_config(host->dma_chan_rxtx,
1302					   &host->dma_cfg_rx) != 0)
1303			host->use_dma = false;
1304	}
1305
1306	mmc->max_segs = 128;
1307	mmc->max_req_size = min_t(size_t, 524288, dma_max_mapping_size(dev));
1308	mmc->max_seg_size = mmc->max_req_size;
1309	mmc->max_blk_size = 1024;
1310	mmc->max_blk_count =  65535;
1311
1312	/* report supported voltage ranges */
1313	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1314
1315	INIT_WORK(&host->dma_work, bcm2835_dma_complete_work);
1316	INIT_DELAYED_WORK(&host->timeout_work, bcm2835_timeout);
1317
1318	/* Set interrupt enables */
1319	host->hcfg = SDHCFG_BUSY_IRPT_EN;
1320
1321	bcm2835_reset_internal(host);
1322
1323	ret = request_threaded_irq(host->irq, bcm2835_irq,
1324				   bcm2835_threaded_irq,
1325				   0, mmc_hostname(mmc), host);
1326	if (ret) {
1327		dev_err(dev, "failed to request IRQ %d: %d\n", host->irq, ret);
1328		return ret;
1329	}
1330
1331	ret = mmc_add_host(mmc);
1332	if (ret) {
1333		free_irq(host->irq, host);
1334		return ret;
1335	}
1336
1337	pio_limit_string[0] = '\0';
1338	if (host->use_dma && (PIO_THRESHOLD > 0))
1339		sprintf(pio_limit_string, " (>%d)", PIO_THRESHOLD);
1340	dev_info(dev, "loaded - DMA %s%s\n",
1341		 host->use_dma ? "enabled" : "disabled", pio_limit_string);
1342
1343	return 0;
1344}
1345
1346static int bcm2835_probe(struct platform_device *pdev)
1347{
1348	struct device *dev = &pdev->dev;
 
1349	struct bcm2835_host *host;
1350	struct mmc_host *mmc;
1351	const __be32 *regaddr_p;
1352	int ret;
1353
1354	dev_dbg(dev, "%s\n", __func__);
1355	mmc = mmc_alloc_host(sizeof(*host), dev);
1356	if (!mmc)
1357		return -ENOMEM;
1358
1359	mmc->ops = &bcm2835_ops;
1360	host = mmc_priv(mmc);
1361	host->pdev = pdev;
1362	spin_lock_init(&host->lock);
1363
1364	host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
1365	if (IS_ERR(host->ioaddr)) {
1366		ret = PTR_ERR(host->ioaddr);
1367		goto err;
1368	}
1369
1370	/* Parse OF address directly to get the physical address for
1371	 * DMA to our registers.
1372	 */
1373	regaddr_p = of_get_address(pdev->dev.of_node, 0, NULL, NULL);
1374	if (!regaddr_p) {
1375		dev_err(dev, "Can't get phys address\n");
1376		ret = -EINVAL;
1377		goto err;
1378	}
1379
1380	host->phys_addr = be32_to_cpup(regaddr_p);
1381
1382	host->dma_chan = NULL;
1383	host->dma_desc = NULL;
1384
1385	host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx");
1386	if (IS_ERR(host->dma_chan_rxtx)) {
1387		ret = PTR_ERR(host->dma_chan_rxtx);
1388		host->dma_chan_rxtx = NULL;
1389
1390		if (ret == -EPROBE_DEFER)
1391			goto err;
1392
1393		/* Ignore errors to fall back to PIO mode */
1394	}
1395
1396	host->irq = platform_get_irq(pdev, 0);
1397	if (host->irq < 0) {
1398		ret = host->irq;
 
1399		goto err;
1400	}
1401
1402	ret = mmc_of_parse(mmc);
1403	if (ret)
1404		goto err;
1405
1406	host->clk = devm_clk_get(dev, NULL);
1407	if (IS_ERR(host->clk)) {
1408		ret = dev_err_probe(dev, PTR_ERR(host->clk), "could not get clk\n");
1409		goto err;
1410	}
1411
1412	ret = clk_prepare_enable(host->clk);
1413	if (ret)
1414		goto err;
1415
1416	host->max_clk = clk_get_rate(host->clk);
1417
1418	ret = bcm2835_add_host(host);
1419	if (ret)
1420		goto err_clk;
1421
1422	platform_set_drvdata(pdev, host);
1423
1424	dev_dbg(dev, "%s -> OK\n", __func__);
1425
1426	return 0;
1427
1428err_clk:
1429	clk_disable_unprepare(host->clk);
1430err:
1431	dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1432	if (host->dma_chan_rxtx)
1433		dma_release_channel(host->dma_chan_rxtx);
1434	mmc_free_host(mmc);
1435
1436	return ret;
1437}
1438
1439static void bcm2835_remove(struct platform_device *pdev)
1440{
1441	struct bcm2835_host *host = platform_get_drvdata(pdev);
1442	struct mmc_host *mmc = mmc_from_priv(host);
1443
1444	mmc_remove_host(mmc);
1445
1446	writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD);
1447
1448	free_irq(host->irq, host);
1449
1450	cancel_work_sync(&host->dma_work);
1451	cancel_delayed_work_sync(&host->timeout_work);
1452
1453	clk_disable_unprepare(host->clk);
1454
1455	if (host->dma_chan_rxtx)
1456		dma_release_channel(host->dma_chan_rxtx);
1457
1458	mmc_free_host(mmc);
 
 
1459}
1460
1461static const struct of_device_id bcm2835_match[] = {
1462	{ .compatible = "brcm,bcm2835-sdhost" },
1463	{ }
1464};
1465MODULE_DEVICE_TABLE(of, bcm2835_match);
1466
1467static struct platform_driver bcm2835_driver = {
1468	.probe      = bcm2835_probe,
1469	.remove     = bcm2835_remove,
1470	.driver     = {
1471		.name		= "sdhost-bcm2835",
1472		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
1473		.of_match_table	= bcm2835_match,
1474	},
1475};
1476module_platform_driver(bcm2835_driver);
1477
1478MODULE_ALIAS("platform:sdhost-bcm2835");
1479MODULE_DESCRIPTION("BCM2835 SDHost driver");
1480MODULE_LICENSE("GPL v2");
1481MODULE_AUTHOR("Phil Elwell");