Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
   4 *
   5 *  Copyright (c) 2005, Advanced Micro Devices, Inc.
   6 *
   7 *  Developed with help from the 2.4.30 MMC AU1XXX controller including
   8 *  the following copyright notices:
   9 *     Copyright (c) 2003-2004 Embedded Edge, LLC.
  10 *     Portions Copyright (C) 2002 Embedix, Inc
  11 *     Copyright 2002 Hewlett-Packard Company
  12
  13 *  2.6 version of this driver inspired by:
  14 *     (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
  15 *     All Rights Reserved.
  16 *     (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
  17 *     All Rights Reserved.
  18 *
  19
 
 
 
  20 */
  21
  22/* Why don't we use the SD controllers' carddetect feature?
  23 *
  24 * From the AU1100 MMC application guide:
  25 * If the Au1100-based design is intended to support both MultiMediaCards
  26 * and 1- or 4-data bit SecureDigital cards, then the solution is to
  27 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
  28 * In doing so, a MMC card never enters SPI-mode communications,
  29 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
  30 * (the low to high transition will not occur).
  31 */
  32
  33#include <linux/clk.h>
  34#include <linux/module.h>
  35#include <linux/init.h>
  36#include <linux/platform_device.h>
  37#include <linux/mm.h>
  38#include <linux/interrupt.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/scatterlist.h>
  41#include <linux/highmem.h>
  42#include <linux/leds.h>
  43#include <linux/mmc/host.h>
  44#include <linux/slab.h>
  45
  46#include <asm/io.h>
  47#include <asm/mach-au1x00/au1000.h>
  48#include <asm/mach-au1x00/au1xxx_dbdma.h>
  49#include <asm/mach-au1x00/au1100_mmc.h>
  50
  51#define DRIVER_NAME "au1xxx-mmc"
  52
  53/* Set this to enable special debugging macros */
  54/* #define DEBUG */
  55
  56#ifdef DEBUG
  57#define DBG(fmt, idx, args...)	\
  58	pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
  59#else
  60#define DBG(fmt, idx, args...) do {} while (0)
  61#endif
  62
  63/* Hardware definitions */
  64#define AU1XMMC_DESCRIPTOR_COUNT 1
  65
  66/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
  67#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
  68#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
 
 
 
  69
  70#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
  71		     MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
  72		     MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
  73
  74/* This gives us a hard value for the stop command that we can write directly
  75 * to the command register.
  76 */
  77#define STOP_CMD	\
  78	(SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
  79
  80/* This is the set of interrupts that we configure by default. */
  81#define AU1XMMC_INTERRUPTS 				\
  82	(SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT |	\
  83	 SD_CONFIG_CR | SD_CONFIG_I)
  84
  85/* The poll event (looking for insert/remove events runs twice a second. */
  86#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
  87
  88struct au1xmmc_host {
  89	struct mmc_host *mmc;
  90	struct mmc_request *mrq;
  91
  92	u32 flags;
  93	void __iomem *iobase;
  94	u32 clock;
  95	u32 bus_width;
  96	u32 power_mode;
  97
  98	int status;
  99
 100	struct {
 101		int len;
 102		int dir;
 103	} dma;
 104
 105	struct {
 106		int index;
 107		int offset;
 108		int len;
 109	} pio;
 110
 111	u32 tx_chan;
 112	u32 rx_chan;
 113
 114	int irq;
 115
 116	struct tasklet_struct finish_task;
 117	struct tasklet_struct data_task;
 118	struct au1xmmc_platform_data *platdata;
 119	struct platform_device *pdev;
 120	struct resource *ioarea;
 121	struct clk *clk;
 122};
 123
 124/* Status flags used by the host structure */
 125#define HOST_F_XMIT	0x0001
 126#define HOST_F_RECV	0x0002
 127#define HOST_F_DMA	0x0010
 128#define HOST_F_DBDMA	0x0020
 129#define HOST_F_ACTIVE	0x0100
 130#define HOST_F_STOP	0x1000
 131
 132#define HOST_S_IDLE	0x0001
 133#define HOST_S_CMD	0x0002
 134#define HOST_S_DATA	0x0003
 135#define HOST_S_STOP	0x0004
 136
 137/* Easy access macros */
 138#define HOST_STATUS(h)	((h)->iobase + SD_STATUS)
 139#define HOST_CONFIG(h)	((h)->iobase + SD_CONFIG)
 140#define HOST_ENABLE(h)	((h)->iobase + SD_ENABLE)
 141#define HOST_TXPORT(h)	((h)->iobase + SD_TXPORT)
 142#define HOST_RXPORT(h)	((h)->iobase + SD_RXPORT)
 143#define HOST_CMDARG(h)	((h)->iobase + SD_CMDARG)
 144#define HOST_BLKSIZE(h)	((h)->iobase + SD_BLKSIZE)
 145#define HOST_CMD(h)	((h)->iobase + SD_CMD)
 146#define HOST_CONFIG2(h)	((h)->iobase + SD_CONFIG2)
 147#define HOST_TIMEOUT(h)	((h)->iobase + SD_TIMEOUT)
 148#define HOST_DEBUG(h)	((h)->iobase + SD_DEBUG)
 149
 150#define DMA_CHANNEL(h)	\
 151	(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
 152
 153static inline int has_dbdma(void)
 154{
 155	switch (alchemy_get_cputype()) {
 156	case ALCHEMY_CPU_AU1200:
 157	case ALCHEMY_CPU_AU1300:
 158		return 1;
 159	default:
 160		return 0;
 161	}
 162}
 163
 164static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
 165{
 166	u32 val = __raw_readl(HOST_CONFIG(host));
 167	val |= mask;
 168	__raw_writel(val, HOST_CONFIG(host));
 169	wmb(); /* drain writebuffer */
 170}
 171
 172static inline void FLUSH_FIFO(struct au1xmmc_host *host)
 173{
 174	u32 val = __raw_readl(HOST_CONFIG2(host));
 175
 176	__raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
 177	wmb(); /* drain writebuffer */
 178	mdelay(1);
 179
 180	/* SEND_STOP will turn off clock control - this re-enables it */
 181	val &= ~SD_CONFIG2_DF;
 182
 183	__raw_writel(val, HOST_CONFIG2(host));
 184	wmb(); /* drain writebuffer */
 185}
 186
 187static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
 188{
 189	u32 val = __raw_readl(HOST_CONFIG(host));
 190	val &= ~mask;
 191	__raw_writel(val, HOST_CONFIG(host));
 192	wmb(); /* drain writebuffer */
 193}
 194
 195static inline void SEND_STOP(struct au1xmmc_host *host)
 196{
 197	u32 config2;
 198
 199	WARN_ON(host->status != HOST_S_DATA);
 200	host->status = HOST_S_STOP;
 201
 202	config2 = __raw_readl(HOST_CONFIG2(host));
 203	__raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
 204	wmb(); /* drain writebuffer */
 205
 206	/* Send the stop command */
 207	__raw_writel(STOP_CMD, HOST_CMD(host));
 208	wmb(); /* drain writebuffer */
 209}
 210
 211static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
 212{
 213	if (host->platdata && host->platdata->set_power)
 214		host->platdata->set_power(host->mmc, state);
 215}
 216
 217static int au1xmmc_card_inserted(struct mmc_host *mmc)
 218{
 219	struct au1xmmc_host *host = mmc_priv(mmc);
 220
 221	if (host->platdata && host->platdata->card_inserted)
 222		return !!host->platdata->card_inserted(host->mmc);
 223
 224	return -ENOSYS;
 225}
 226
 227static int au1xmmc_card_readonly(struct mmc_host *mmc)
 228{
 229	struct au1xmmc_host *host = mmc_priv(mmc);
 230
 231	if (host->platdata && host->platdata->card_readonly)
 232		return !!host->platdata->card_readonly(mmc);
 233
 234	return -ENOSYS;
 235}
 236
 237static void au1xmmc_finish_request(struct au1xmmc_host *host)
 238{
 239	struct mmc_request *mrq = host->mrq;
 240
 241	host->mrq = NULL;
 242	host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
 243
 244	host->dma.len = 0;
 245	host->dma.dir = 0;
 246
 247	host->pio.index  = 0;
 248	host->pio.offset = 0;
 249	host->pio.len = 0;
 250
 251	host->status = HOST_S_IDLE;
 252
 253	mmc_request_done(host->mmc, mrq);
 254}
 255
 256static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
 257{
 258	struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
 259	au1xmmc_finish_request(host);
 260}
 261
 262static int au1xmmc_send_command(struct au1xmmc_host *host,
 263				struct mmc_command *cmd, struct mmc_data *data)
 264{
 265	u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
 266
 267	switch (mmc_resp_type(cmd)) {
 268	case MMC_RSP_NONE:
 269		break;
 270	case MMC_RSP_R1:
 271		mmccmd |= SD_CMD_RT_1;
 272		break;
 273	case MMC_RSP_R1B:
 274		mmccmd |= SD_CMD_RT_1B;
 275		break;
 276	case MMC_RSP_R2:
 277		mmccmd |= SD_CMD_RT_2;
 278		break;
 279	case MMC_RSP_R3:
 280		mmccmd |= SD_CMD_RT_3;
 281		break;
 282	default:
 283		pr_info("au1xmmc: unhandled response type %02x\n",
 284			mmc_resp_type(cmd));
 285		return -EINVAL;
 286	}
 287
 288	if (data) {
 289		if (data->flags & MMC_DATA_READ) {
 290			if (data->blocks > 1)
 291				mmccmd |= SD_CMD_CT_4;
 292			else
 293				mmccmd |= SD_CMD_CT_2;
 294		} else if (data->flags & MMC_DATA_WRITE) {
 295			if (data->blocks > 1)
 296				mmccmd |= SD_CMD_CT_3;
 297			else
 298				mmccmd |= SD_CMD_CT_1;
 299		}
 300	}
 301
 302	__raw_writel(cmd->arg, HOST_CMDARG(host));
 303	wmb(); /* drain writebuffer */
 
 
 
 304
 305	__raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
 306	wmb(); /* drain writebuffer */
 307
 308	/* Wait for the command to go on the line */
 309	while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
 310		/* nop */;
 311
 
 
 
 
 
 
 
 
 
 
 
 
 
 312	return 0;
 313}
 314
 315static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
 316{
 317	struct mmc_request *mrq = host->mrq;
 318	struct mmc_data *data;
 319	u32 crc;
 320
 321	WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
 322
 323	if (host->mrq == NULL)
 324		return;
 325
 326	data = mrq->cmd->data;
 327
 328	if (status == 0)
 329		status = __raw_readl(HOST_STATUS(host));
 330
 331	/* The transaction is really over when the SD_STATUS_DB bit is clear */
 332	while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
 333		status = __raw_readl(HOST_STATUS(host));
 334
 335	data->error = 0;
 336	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
 337
 338        /* Process any errors */
 339	crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
 340	if (host->flags & HOST_F_XMIT)
 341		crc |= ((status & 0x07) == 0x02) ? 0 : 1;
 342
 343	if (crc)
 344		data->error = -EILSEQ;
 345
 346	/* Clear the CRC bits */
 347	__raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
 348
 349	data->bytes_xfered = 0;
 350
 351	if (!data->error) {
 352		if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
 
 353			u32 chan = DMA_CHANNEL(host);
 354
 355			chan_tab_t *c = *((chan_tab_t **)chan);
 356			au1x_dma_chan_t *cp = c->chan_ptr;
 357			data->bytes_xfered = cp->ddma_bytecnt;
 
 358		} else
 359			data->bytes_xfered =
 360				(data->blocks * data->blksz) - host->pio.len;
 361	}
 362
 363	au1xmmc_finish_request(host);
 364}
 365
 366static void au1xmmc_tasklet_data(struct tasklet_struct *t)
 367{
 368	struct au1xmmc_host *host = from_tasklet(host, t, data_task);
 369
 370	u32 status = __raw_readl(HOST_STATUS(host));
 371	au1xmmc_data_complete(host, status);
 372}
 373
 374#define AU1XMMC_MAX_TRANSFER 8
 375
 376static void au1xmmc_send_pio(struct au1xmmc_host *host)
 377{
 378	struct mmc_data *data;
 379	int sg_len, max, count;
 380	unsigned char *sg_ptr, val;
 381	u32 status;
 382	struct scatterlist *sg;
 383
 384	data = host->mrq->data;
 385
 386	if (!(host->flags & HOST_F_XMIT))
 387		return;
 388
 389	/* This is the pointer to the data buffer */
 390	sg = &data->sg[host->pio.index];
 391	sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
 392
 393	/* This is the space left inside the buffer */
 394	sg_len = data->sg[host->pio.index].length - host->pio.offset;
 395
 396	/* Check if we need less than the size of the sg_buffer */
 397	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
 398	if (max > AU1XMMC_MAX_TRANSFER)
 399		max = AU1XMMC_MAX_TRANSFER;
 400
 401	for (count = 0; count < max; count++) {
 402		status = __raw_readl(HOST_STATUS(host));
 403
 404		if (!(status & SD_STATUS_TH))
 405			break;
 406
 407		val = sg_ptr[count];
 408
 409		__raw_writel((unsigned long)val, HOST_TXPORT(host));
 410		wmb(); /* drain writebuffer */
 411	}
 412	kunmap_local(sg_ptr);
 413
 414	host->pio.len -= count;
 415	host->pio.offset += count;
 416
 417	if (count == sg_len) {
 418		host->pio.index++;
 419		host->pio.offset = 0;
 420	}
 421
 422	if (host->pio.len == 0) {
 423		IRQ_OFF(host, SD_CONFIG_TH);
 424
 425		if (host->flags & HOST_F_STOP)
 426			SEND_STOP(host);
 427
 428		tasklet_schedule(&host->data_task);
 429	}
 430}
 431
 432static void au1xmmc_receive_pio(struct au1xmmc_host *host)
 433{
 434	struct mmc_data *data;
 435	int max, count, sg_len = 0;
 436	unsigned char *sg_ptr = NULL;
 437	u32 status, val;
 438	struct scatterlist *sg;
 439
 440	data = host->mrq->data;
 441
 442	if (!(host->flags & HOST_F_RECV))
 443		return;
 444
 445	max = host->pio.len;
 446
 447	if (host->pio.index < host->dma.len) {
 448		sg = &data->sg[host->pio.index];
 449		sg_ptr = kmap_local_page(sg_page(sg)) + sg->offset + host->pio.offset;
 450
 451		/* This is the space left inside the buffer */
 452		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
 453
 454		/* Check if we need less than the size of the sg_buffer */
 455		if (sg_len < max)
 456			max = sg_len;
 457	}
 458
 459	if (max > AU1XMMC_MAX_TRANSFER)
 460		max = AU1XMMC_MAX_TRANSFER;
 461
 462	for (count = 0; count < max; count++) {
 463		status = __raw_readl(HOST_STATUS(host));
 464
 465		if (!(status & SD_STATUS_NE))
 466			break;
 467
 468		if (status & SD_STATUS_RC) {
 469			DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
 470					host->pio.len, count);
 471			break;
 472		}
 473
 474		if (status & SD_STATUS_RO) {
 475			DBG("RX Overrun [%d + %d]\n", host->pdev->id,
 476					host->pio.len, count);
 477			break;
 478		}
 479		else if (status & SD_STATUS_RU) {
 480			DBG("RX Underrun [%d + %d]\n", host->pdev->id,
 481					host->pio.len,	count);
 482			break;
 483		}
 484
 485		val = __raw_readl(HOST_RXPORT(host));
 486
 487		if (sg_ptr)
 488			sg_ptr[count] = (unsigned char)(val & 0xFF);
 489	}
 490	if (sg_ptr)
 491		kunmap_local(sg_ptr);
 492
 493	host->pio.len -= count;
 494	host->pio.offset += count;
 495
 496	if (sg_len && count == sg_len) {
 497		host->pio.index++;
 498		host->pio.offset = 0;
 499	}
 500
 501	if (host->pio.len == 0) {
 502		/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
 503		IRQ_OFF(host, SD_CONFIG_NE);
 504
 505		if (host->flags & HOST_F_STOP)
 506			SEND_STOP(host);
 507
 508		tasklet_schedule(&host->data_task);
 509	}
 510}
 511
 512/* This is called when a command has been completed - grab the response
 513 * and check for errors.  Then start the data transfer if it is indicated.
 514 */
 515static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
 516{
 517	struct mmc_request *mrq = host->mrq;
 518	struct mmc_command *cmd;
 519	u32 r[4];
 520	int i, trans;
 521
 522	if (!host->mrq)
 523		return;
 524
 525	cmd = mrq->cmd;
 526	cmd->error = 0;
 527
 528	if (cmd->flags & MMC_RSP_PRESENT) {
 529		if (cmd->flags & MMC_RSP_136) {
 530			r[0] = __raw_readl(host->iobase + SD_RESP3);
 531			r[1] = __raw_readl(host->iobase + SD_RESP2);
 532			r[2] = __raw_readl(host->iobase + SD_RESP1);
 533			r[3] = __raw_readl(host->iobase + SD_RESP0);
 534
 535			/* The CRC is omitted from the response, so really
 536			 * we only got 120 bytes, but the engine expects
 537			 * 128 bits, so we have to shift things up.
 538			 */
 539			for (i = 0; i < 4; i++) {
 540				cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
 541				if (i != 3)
 542					cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
 543			}
 544		} else {
 545			/* Techincally, we should be getting all 48 bits of
 546			 * the response (SD_RESP1 + SD_RESP2), but because
 547			 * our response omits the CRC, our data ends up
 548			 * being shifted 8 bits to the right.  In this case,
 549			 * that means that the OSR data starts at bit 31,
 550			 * so we can just read RESP0 and return that.
 551			 */
 552			cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
 553		}
 554	}
 555
 556        /* Figure out errors */
 557	if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
 558		cmd->error = -EILSEQ;
 559
 560	trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
 561
 562	if (!trans || cmd->error) {
 563		IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
 564		tasklet_schedule(&host->finish_task);
 565		return;
 566	}
 567
 568	host->status = HOST_S_DATA;
 569
 570	if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
 
 571		u32 channel = DMA_CHANNEL(host);
 572
 573		/* Start the DBDMA as soon as the buffer gets something in it */
 574
 575		if (host->flags & HOST_F_RECV) {
 576			u32 mask = SD_STATUS_DB | SD_STATUS_NE;
 577
 578			while((status & mask) != mask)
 579				status = __raw_readl(HOST_STATUS(host));
 580		}
 581
 582		au1xxx_dbdma_start(channel);
 
 583	}
 584}
 585
 586static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
 587{
 588	unsigned int pbus = clk_get_rate(host->clk);
 589	unsigned int divisor = ((pbus / rate) / 2) - 1;
 590	u32 config;
 591
 592	config = __raw_readl(HOST_CONFIG(host));
 
 
 
 
 
 
 
 593
 594	config &= ~(SD_CONFIG_DIV);
 595	config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
 596
 597	__raw_writel(config, HOST_CONFIG(host));
 598	wmb(); /* drain writebuffer */
 599}
 600
 601static int au1xmmc_prepare_data(struct au1xmmc_host *host,
 602				struct mmc_data *data)
 603{
 604	int datalen = data->blocks * data->blksz;
 605
 606	if (data->flags & MMC_DATA_READ)
 607		host->flags |= HOST_F_RECV;
 608	else
 609		host->flags |= HOST_F_XMIT;
 610
 611	if (host->mrq->stop)
 612		host->flags |= HOST_F_STOP;
 613
 614	host->dma.dir = DMA_BIDIRECTIONAL;
 615
 616	host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
 617				   data->sg_len, host->dma.dir);
 618
 619	if (host->dma.len == 0)
 620		return -ETIMEDOUT;
 621
 622	__raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
 623
 624	if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
 
 625		int i;
 626		u32 channel = DMA_CHANNEL(host);
 627
 628		au1xxx_dbdma_stop(channel);
 629
 630		for (i = 0; i < host->dma.len; i++) {
 631			u32 ret = 0, flags = DDMA_FLAGS_NOIE;
 632			struct scatterlist *sg = &data->sg[i];
 633			int sg_len = sg->length;
 634
 635			int len = (datalen > sg_len) ? sg_len : datalen;
 636
 637			if (i == host->dma.len - 1)
 638				flags = DDMA_FLAGS_IE;
 639
 640			if (host->flags & HOST_F_XMIT) {
 641				ret = au1xxx_dbdma_put_source(channel,
 642					sg_phys(sg), len, flags);
 643			} else {
 644				ret = au1xxx_dbdma_put_dest(channel,
 645					sg_phys(sg), len, flags);
 646			}
 647
 648			if (!ret)
 649				goto dataerr;
 650
 651			datalen -= len;
 652		}
 
 653	} else {
 654		host->pio.index = 0;
 655		host->pio.offset = 0;
 656		host->pio.len = datalen;
 657
 658		if (host->flags & HOST_F_XMIT)
 659			IRQ_ON(host, SD_CONFIG_TH);
 660		else
 661			IRQ_ON(host, SD_CONFIG_NE);
 662			/* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
 663	}
 664
 665	return 0;
 666
 667dataerr:
 668	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 669			host->dma.dir);
 670	return -ETIMEDOUT;
 671}
 672
 673/* This actually starts a command or data transaction */
 674static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
 675{
 676	struct au1xmmc_host *host = mmc_priv(mmc);
 677	int ret = 0;
 678
 679	WARN_ON(irqs_disabled());
 680	WARN_ON(host->status != HOST_S_IDLE);
 681
 682	host->mrq = mrq;
 683	host->status = HOST_S_CMD;
 684
 685	/* fail request immediately if no card is present */
 686	if (0 == au1xmmc_card_inserted(mmc)) {
 687		mrq->cmd->error = -ENOMEDIUM;
 688		au1xmmc_finish_request(host);
 689		return;
 690	}
 691
 692	if (mrq->data) {
 693		FLUSH_FIFO(host);
 694		ret = au1xmmc_prepare_data(host, mrq->data);
 695	}
 696
 697	if (!ret)
 698		ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
 699
 700	if (ret) {
 701		mrq->cmd->error = ret;
 702		au1xmmc_finish_request(host);
 703	}
 704}
 705
 706static void au1xmmc_reset_controller(struct au1xmmc_host *host)
 707{
 708	/* Apply the clock */
 709	__raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
 710	wmb(); /* drain writebuffer */
 711	mdelay(1);
 712
 713	__raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
 714	wmb(); /* drain writebuffer */
 715	mdelay(5);
 716
 717	__raw_writel(~0, HOST_STATUS(host));
 718	wmb(); /* drain writebuffer */
 719
 720	__raw_writel(0, HOST_BLKSIZE(host));
 721	__raw_writel(0x001fffff, HOST_TIMEOUT(host));
 722	wmb(); /* drain writebuffer */
 723
 724	__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
 725	wmb(); /* drain writebuffer */
 726
 727	__raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
 728	wmb(); /* drain writebuffer */
 729	mdelay(1);
 730
 731	__raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
 732	wmb(); /* drain writebuffer */
 
 
 
 
 
 
 733
 734	/* Configure interrupts */
 735	__raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
 736	wmb(); /* drain writebuffer */
 737}
 738
 739
 740static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 741{
 742	struct au1xmmc_host *host = mmc_priv(mmc);
 743	u32 config2;
 744
 745	if (ios->power_mode == MMC_POWER_OFF)
 746		au1xmmc_set_power(host, 0);
 747	else if (ios->power_mode == MMC_POWER_ON) {
 748		au1xmmc_set_power(host, 1);
 749	}
 750
 751	if (ios->clock && ios->clock != host->clock) {
 752		au1xmmc_set_clock(host, ios->clock);
 753		host->clock = ios->clock;
 754	}
 755
 756	config2 = __raw_readl(HOST_CONFIG2(host));
 757	switch (ios->bus_width) {
 758	case MMC_BUS_WIDTH_8:
 759		config2 |= SD_CONFIG2_BB;
 760		break;
 761	case MMC_BUS_WIDTH_4:
 762		config2 &= ~SD_CONFIG2_BB;
 763		config2 |= SD_CONFIG2_WB;
 764		break;
 765	case MMC_BUS_WIDTH_1:
 766		config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
 767		break;
 768	}
 769	__raw_writel(config2, HOST_CONFIG2(host));
 770	wmb(); /* drain writebuffer */
 771}
 772
 773#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
 774#define STATUS_DATA_IN  (SD_STATUS_NE)
 775#define STATUS_DATA_OUT (SD_STATUS_TH)
 776
 777static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
 778{
 779	struct au1xmmc_host *host = dev_id;
 780	u32 status;
 781
 782	status = __raw_readl(HOST_STATUS(host));
 783
 784	if (!(status & SD_STATUS_I))
 785		return IRQ_NONE;	/* not ours */
 786
 787	if (status & SD_STATUS_SI)	/* SDIO */
 788		mmc_signal_sdio_irq(host->mmc);
 789
 790	if (host->mrq && (status & STATUS_TIMEOUT)) {
 791		if (status & SD_STATUS_RAT)
 792			host->mrq->cmd->error = -ETIMEDOUT;
 793		else if (status & SD_STATUS_DT)
 794			host->mrq->data->error = -ETIMEDOUT;
 795
 796		/* In PIO mode, interrupts might still be enabled */
 797		IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
 798
 799		/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
 800		tasklet_schedule(&host->finish_task);
 801	}
 802#if 0
 803	else if (status & SD_STATUS_DD) {
 804		/* Sometimes we get a DD before a NE in PIO mode */
 805		if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
 806			au1xmmc_receive_pio(host);
 807		else {
 808			au1xmmc_data_complete(host, status);
 809			/* tasklet_schedule(&host->data_task); */
 810		}
 811	}
 812#endif
 813	else if (status & SD_STATUS_CR) {
 814		if (host->status == HOST_S_CMD)
 815			au1xmmc_cmd_complete(host, status);
 816
 817	} else if (!(host->flags & HOST_F_DMA)) {
 818		if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
 819			au1xmmc_send_pio(host);
 820		else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
 821			au1xmmc_receive_pio(host);
 822
 823	} else if (status & 0x203F3C70) {
 824			DBG("Unhandled status %8.8x\n", host->pdev->id,
 825				status);
 826	}
 827
 828	__raw_writel(status, HOST_STATUS(host));
 829	wmb(); /* drain writebuffer */
 830
 831	return IRQ_HANDLED;
 832}
 833
 
 834/* 8bit memory DMA device */
 835static dbdev_tab_t au1xmmc_mem_dbdev = {
 836	.dev_id		= DSCR_CMD0_ALWAYS,
 837	.dev_flags	= DEV_FLAGS_ANYUSE,
 838	.dev_tsize	= 0,
 839	.dev_devwidth	= 8,
 840	.dev_physaddr	= 0x00000000,
 841	.dev_intlevel	= 0,
 842	.dev_intpolarity = 0,
 843};
 844static int memid;
 845
 846static void au1xmmc_dbdma_callback(int irq, void *dev_id)
 847{
 848	struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
 849
 850	/* Avoid spurious interrupts */
 851	if (!host->mrq)
 852		return;
 853
 854	if (host->flags & HOST_F_STOP)
 855		SEND_STOP(host);
 856
 857	tasklet_schedule(&host->data_task);
 858}
 859
 860static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
 861{
 862	struct resource *res;
 863	int txid, rxid;
 864
 865	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
 866	if (!res)
 867		return -ENODEV;
 868	txid = res->start;
 869
 870	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
 871	if (!res)
 872		return -ENODEV;
 873	rxid = res->start;
 874
 875	if (!memid)
 876		return -ENODEV;
 877
 878	host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
 879				au1xmmc_dbdma_callback, (void *)host);
 880	if (!host->tx_chan) {
 881		dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
 882		return -ENODEV;
 883	}
 884
 885	host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
 886				au1xmmc_dbdma_callback, (void *)host);
 887	if (!host->rx_chan) {
 888		dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
 889		au1xxx_dbdma_chan_free(host->tx_chan);
 890		return -ENODEV;
 891	}
 892
 893	au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
 894	au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
 895
 896	au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
 897	au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
 898
 899	/* DBDMA is good to go */
 900	host->flags |= HOST_F_DMA | HOST_F_DBDMA;
 901
 902	return 0;
 903}
 904
 905static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
 906{
 907	if (host->flags & HOST_F_DMA) {
 908		host->flags &= ~HOST_F_DMA;
 909		au1xxx_dbdma_chan_free(host->tx_chan);
 910		au1xxx_dbdma_chan_free(host->rx_chan);
 911	}
 912}
 
 913
 914static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
 915{
 916	struct au1xmmc_host *host = mmc_priv(mmc);
 917
 918	if (en)
 919		IRQ_ON(host, SD_CONFIG_SI);
 920	else
 921		IRQ_OFF(host, SD_CONFIG_SI);
 922}
 923
 924static const struct mmc_host_ops au1xmmc_ops = {
 925	.request	= au1xmmc_request,
 926	.set_ios	= au1xmmc_set_ios,
 927	.get_ro		= au1xmmc_card_readonly,
 928	.get_cd		= au1xmmc_card_inserted,
 929	.enable_sdio_irq = au1xmmc_enable_sdio_irq,
 930};
 931
 932static int au1xmmc_probe(struct platform_device *pdev)
 933{
 934	struct mmc_host *mmc;
 935	struct au1xmmc_host *host;
 936	struct resource *r;
 937	int ret, iflag;
 938
 939	mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
 940	if (!mmc) {
 941		dev_err(&pdev->dev, "no memory for mmc_host\n");
 942		ret = -ENOMEM;
 943		goto out0;
 944	}
 945
 946	host = mmc_priv(mmc);
 947	host->mmc = mmc;
 948	host->platdata = pdev->dev.platform_data;
 949	host->pdev = pdev;
 950
 951	ret = -ENODEV;
 952	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 953	if (!r) {
 954		dev_err(&pdev->dev, "no mmio defined\n");
 955		goto out1;
 956	}
 957
 958	host->ioarea = request_mem_region(r->start, resource_size(r),
 959					   pdev->name);
 960	if (!host->ioarea) {
 961		dev_err(&pdev->dev, "mmio already in use\n");
 962		goto out1;
 963	}
 964
 965	host->iobase = ioremap(r->start, 0x3c);
 966	if (!host->iobase) {
 967		dev_err(&pdev->dev, "cannot remap mmio\n");
 968		goto out2;
 969	}
 970
 971	host->irq = platform_get_irq(pdev, 0);
 972	if (host->irq < 0) {
 973		ret = host->irq;
 
 
 
 
 
 
 
 
 
 974		goto out3;
 975	}
 976
 977	mmc->ops = &au1xmmc_ops;
 978
 979	mmc->f_min =   450000;
 980	mmc->f_max = 24000000;
 981
 
 
 
 982	mmc->max_blk_size = 2048;
 983	mmc->max_blk_count = 512;
 984
 985	mmc->ocr_avail = AU1XMMC_OCR;
 986	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
 987	mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
 988
 989	iflag = IRQF_SHARED;	/* Au1100/Au1200: one int for both ctrls */
 990
 991	switch (alchemy_get_cputype()) {
 992	case ALCHEMY_CPU_AU1100:
 993		mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
 994		break;
 995	case ALCHEMY_CPU_AU1200:
 996		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
 997		break;
 998	case ALCHEMY_CPU_AU1300:
 999		iflag = 0;	/* nothing is shared */
1000		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
1001		mmc->f_max = 52000000;
1002		if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
1003			mmc->caps |= MMC_CAP_8_BIT_DATA;
1004		break;
1005	}
1006
1007	ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
1008	if (ret) {
1009		dev_err(&pdev->dev, "cannot grab IRQ\n");
1010		goto out3;
1011	}
1012
1013	host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
1014	if (IS_ERR(host->clk)) {
1015		dev_err(&pdev->dev, "cannot find clock\n");
1016		ret = PTR_ERR(host->clk);
1017		goto out_irq;
1018	}
1019
1020	ret = clk_prepare_enable(host->clk);
1021	if (ret) {
1022		dev_err(&pdev->dev, "cannot enable clock\n");
1023		goto out_clk;
1024	}
1025
1026	host->status = HOST_S_IDLE;
1027
1028	/* board-specific carddetect setup, if any */
1029	if (host->platdata && host->platdata->cd_setup) {
1030		ret = host->platdata->cd_setup(mmc, 1);
1031		if (ret) {
1032			dev_warn(&pdev->dev, "board CD setup failed\n");
1033			mmc->caps |= MMC_CAP_NEEDS_POLL;
1034		}
1035	} else
1036		mmc->caps |= MMC_CAP_NEEDS_POLL;
1037
1038	/* platform may not be able to use all advertised caps */
1039	if (host->platdata)
1040		mmc->caps &= ~(host->platdata->mask_host_caps);
1041
1042	tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
 
1043
1044	tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
 
1045
1046	if (has_dbdma()) {
1047		ret = au1xmmc_dbdma_init(host);
1048		if (ret)
1049			pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
1050	}
1051
1052#ifdef CONFIG_LEDS_CLASS
1053	if (host->platdata && host->platdata->led) {
1054		struct led_classdev *led = host->platdata->led;
1055		led->name = mmc_hostname(mmc);
1056		led->brightness = LED_OFF;
1057		led->default_trigger = mmc_hostname(mmc);
1058		ret = led_classdev_register(mmc_dev(mmc), led);
1059		if (ret)
1060			goto out5;
1061	}
1062#endif
1063
1064	au1xmmc_reset_controller(host);
1065
1066	ret = mmc_add_host(mmc);
1067	if (ret) {
1068		dev_err(&pdev->dev, "cannot add mmc host\n");
1069		goto out6;
1070	}
1071
1072	platform_set_drvdata(pdev, host);
1073
1074	pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
1075		" (mode=%s)\n", pdev->id, host->iobase,
1076		host->flags & HOST_F_DMA ? "dma" : "pio");
1077
1078	return 0;	/* all ok */
1079
1080out6:
1081#ifdef CONFIG_LEDS_CLASS
1082	if (host->platdata && host->platdata->led)
1083		led_classdev_unregister(host->platdata->led);
1084out5:
1085#endif
1086	__raw_writel(0, HOST_ENABLE(host));
1087	__raw_writel(0, HOST_CONFIG(host));
1088	__raw_writel(0, HOST_CONFIG2(host));
1089	wmb(); /* drain writebuffer */
1090
1091	if (host->flags & HOST_F_DBDMA)
1092		au1xmmc_dbdma_shutdown(host);
 
1093
1094	tasklet_kill(&host->data_task);
1095	tasklet_kill(&host->finish_task);
1096
1097	if (host->platdata && host->platdata->cd_setup &&
1098	    !(mmc->caps & MMC_CAP_NEEDS_POLL))
1099		host->platdata->cd_setup(mmc, 0);
1100
1101	clk_disable_unprepare(host->clk);
1102out_clk:
1103	clk_put(host->clk);
1104out_irq:
1105	free_irq(host->irq, host);
1106out3:
1107	iounmap((void *)host->iobase);
1108out2:
1109	release_resource(host->ioarea);
1110	kfree(host->ioarea);
1111out1:
1112	mmc_free_host(mmc);
1113out0:
1114	return ret;
1115}
1116
1117static int au1xmmc_remove(struct platform_device *pdev)
1118{
1119	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1120
1121	if (host) {
1122		mmc_remove_host(host->mmc);
1123
1124#ifdef CONFIG_LEDS_CLASS
1125		if (host->platdata && host->platdata->led)
1126			led_classdev_unregister(host->platdata->led);
1127#endif
1128
1129		if (host->platdata && host->platdata->cd_setup &&
1130		    !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1131			host->platdata->cd_setup(host->mmc, 0);
1132
1133		__raw_writel(0, HOST_ENABLE(host));
1134		__raw_writel(0, HOST_CONFIG(host));
1135		__raw_writel(0, HOST_CONFIG2(host));
1136		wmb(); /* drain writebuffer */
1137
1138		tasklet_kill(&host->data_task);
1139		tasklet_kill(&host->finish_task);
1140
1141		if (host->flags & HOST_F_DBDMA)
1142			au1xmmc_dbdma_shutdown(host);
1143
1144		au1xmmc_set_power(host, 0);
1145
1146		clk_disable_unprepare(host->clk);
1147		clk_put(host->clk);
1148
1149		free_irq(host->irq, host);
1150		iounmap((void *)host->iobase);
1151		release_resource(host->ioarea);
1152		kfree(host->ioarea);
1153
1154		mmc_free_host(host->mmc);
 
1155	}
1156	return 0;
1157}
1158
1159#ifdef CONFIG_PM
1160static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1161{
1162	struct au1xmmc_host *host = platform_get_drvdata(pdev);
 
1163
1164	__raw_writel(0, HOST_CONFIG2(host));
1165	__raw_writel(0, HOST_CONFIG(host));
1166	__raw_writel(0xffffffff, HOST_STATUS(host));
1167	__raw_writel(0, HOST_ENABLE(host));
1168	wmb(); /* drain writebuffer */
 
 
 
 
1169
1170	return 0;
1171}
1172
1173static int au1xmmc_resume(struct platform_device *pdev)
1174{
1175	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1176
1177	au1xmmc_reset_controller(host);
1178
1179	return 0;
1180}
1181#else
1182#define au1xmmc_suspend NULL
1183#define au1xmmc_resume NULL
1184#endif
1185
1186static struct platform_driver au1xmmc_driver = {
1187	.probe         = au1xmmc_probe,
1188	.remove        = au1xmmc_remove,
1189	.suspend       = au1xmmc_suspend,
1190	.resume        = au1xmmc_resume,
1191	.driver        = {
1192		.name  = DRIVER_NAME,
1193		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1194	},
1195};
1196
1197static int __init au1xmmc_init(void)
1198{
1199	if (has_dbdma()) {
1200		/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1201		* of 8 bits.  And since devices are shared, we need to create
1202		* our own to avoid freaking out other devices.
1203		*/
1204		memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1205		if (!memid)
1206			pr_err("au1xmmc: cannot add memory dbdma\n");
1207	}
1208	return platform_driver_register(&au1xmmc_driver);
1209}
1210
1211static void __exit au1xmmc_exit(void)
1212{
1213	if (has_dbdma() && memid)
 
1214		au1xxx_ddma_del_device(memid);
1215
1216	platform_driver_unregister(&au1xmmc_driver);
1217}
1218
1219module_init(au1xmmc_init);
1220module_exit(au1xmmc_exit);
1221
1222MODULE_AUTHOR("Advanced Micro Devices, Inc");
1223MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1224MODULE_LICENSE("GPL");
1225MODULE_ALIAS("platform:au1xxx-mmc");
v3.1
 
   1/*
   2 * linux/drivers/mmc/host/au1xmmc.c - AU1XX0 MMC driver
   3 *
   4 *  Copyright (c) 2005, Advanced Micro Devices, Inc.
   5 *
   6 *  Developed with help from the 2.4.30 MMC AU1XXX controller including
   7 *  the following copyright notices:
   8 *     Copyright (c) 2003-2004 Embedded Edge, LLC.
   9 *     Portions Copyright (C) 2002 Embedix, Inc
  10 *     Copyright 2002 Hewlett-Packard Company
  11
  12 *  2.6 version of this driver inspired by:
  13 *     (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
  14 *     All Rights Reserved.
  15 *     (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
  16 *     All Rights Reserved.
  17 *
  18
  19 * This program is free software; you can redistribute it and/or modify
  20 * it under the terms of the GNU General Public License version 2 as
  21 * published by the Free Software Foundation.
  22 */
  23
  24/* Why don't we use the SD controllers' carddetect feature?
  25 *
  26 * From the AU1100 MMC application guide:
  27 * If the Au1100-based design is intended to support both MultiMediaCards
  28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
  29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
  30 * In doing so, a MMC card never enters SPI-mode communications,
  31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
  32 * (the low to high transition will not occur).
  33 */
  34
 
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/platform_device.h>
  38#include <linux/mm.h>
  39#include <linux/interrupt.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/scatterlist.h>
 
  42#include <linux/leds.h>
  43#include <linux/mmc/host.h>
  44#include <linux/slab.h>
  45
  46#include <asm/io.h>
  47#include <asm/mach-au1x00/au1000.h>
  48#include <asm/mach-au1x00/au1xxx_dbdma.h>
  49#include <asm/mach-au1x00/au1100_mmc.h>
  50
  51#define DRIVER_NAME "au1xxx-mmc"
  52
  53/* Set this to enable special debugging macros */
  54/* #define DEBUG */
  55
  56#ifdef DEBUG
  57#define DBG(fmt, idx, args...)	\
  58	printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args)
  59#else
  60#define DBG(fmt, idx, args...) do {} while (0)
  61#endif
  62
  63/* Hardware definitions */
  64#define AU1XMMC_DESCRIPTOR_COUNT 1
  65
  66/* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */
  67#ifdef CONFIG_SOC_AU1100
  68#define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff
  69#else	/* Au1200 */
  70#define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff
  71#endif
  72
  73#define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
  74		     MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
  75		     MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
  76
  77/* This gives us a hard value for the stop command that we can write directly
  78 * to the command register.
  79 */
  80#define STOP_CMD	\
  81	(SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
  82
  83/* This is the set of interrupts that we configure by default. */
  84#define AU1XMMC_INTERRUPTS 				\
  85	(SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT |	\
  86	 SD_CONFIG_CR | SD_CONFIG_I)
  87
  88/* The poll event (looking for insert/remove events runs twice a second. */
  89#define AU1XMMC_DETECT_TIMEOUT (HZ/2)
  90
  91struct au1xmmc_host {
  92	struct mmc_host *mmc;
  93	struct mmc_request *mrq;
  94
  95	u32 flags;
  96	u32 iobase;
  97	u32 clock;
  98	u32 bus_width;
  99	u32 power_mode;
 100
 101	int status;
 102
 103	struct {
 104		int len;
 105		int dir;
 106	} dma;
 107
 108	struct {
 109		int index;
 110		int offset;
 111		int len;
 112	} pio;
 113
 114	u32 tx_chan;
 115	u32 rx_chan;
 116
 117	int irq;
 118
 119	struct tasklet_struct finish_task;
 120	struct tasklet_struct data_task;
 121	struct au1xmmc_platform_data *platdata;
 122	struct platform_device *pdev;
 123	struct resource *ioarea;
 
 124};
 125
 126/* Status flags used by the host structure */
 127#define HOST_F_XMIT	0x0001
 128#define HOST_F_RECV	0x0002
 129#define HOST_F_DMA	0x0010
 
 130#define HOST_F_ACTIVE	0x0100
 131#define HOST_F_STOP	0x1000
 132
 133#define HOST_S_IDLE	0x0001
 134#define HOST_S_CMD	0x0002
 135#define HOST_S_DATA	0x0003
 136#define HOST_S_STOP	0x0004
 137
 138/* Easy access macros */
 139#define HOST_STATUS(h)	((h)->iobase + SD_STATUS)
 140#define HOST_CONFIG(h)	((h)->iobase + SD_CONFIG)
 141#define HOST_ENABLE(h)	((h)->iobase + SD_ENABLE)
 142#define HOST_TXPORT(h)	((h)->iobase + SD_TXPORT)
 143#define HOST_RXPORT(h)	((h)->iobase + SD_RXPORT)
 144#define HOST_CMDARG(h)	((h)->iobase + SD_CMDARG)
 145#define HOST_BLKSIZE(h)	((h)->iobase + SD_BLKSIZE)
 146#define HOST_CMD(h)	((h)->iobase + SD_CMD)
 147#define HOST_CONFIG2(h)	((h)->iobase + SD_CONFIG2)
 148#define HOST_TIMEOUT(h)	((h)->iobase + SD_TIMEOUT)
 149#define HOST_DEBUG(h)	((h)->iobase + SD_DEBUG)
 150
 151#define DMA_CHANNEL(h)	\
 152	(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
 153
 
 
 
 
 
 
 
 
 
 
 
 154static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
 155{
 156	u32 val = au_readl(HOST_CONFIG(host));
 157	val |= mask;
 158	au_writel(val, HOST_CONFIG(host));
 159	au_sync();
 160}
 161
 162static inline void FLUSH_FIFO(struct au1xmmc_host *host)
 163{
 164	u32 val = au_readl(HOST_CONFIG2(host));
 165
 166	au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
 167	au_sync_delay(1);
 
 168
 169	/* SEND_STOP will turn off clock control - this re-enables it */
 170	val &= ~SD_CONFIG2_DF;
 171
 172	au_writel(val, HOST_CONFIG2(host));
 173	au_sync();
 174}
 175
 176static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
 177{
 178	u32 val = au_readl(HOST_CONFIG(host));
 179	val &= ~mask;
 180	au_writel(val, HOST_CONFIG(host));
 181	au_sync();
 182}
 183
 184static inline void SEND_STOP(struct au1xmmc_host *host)
 185{
 186	u32 config2;
 187
 188	WARN_ON(host->status != HOST_S_DATA);
 189	host->status = HOST_S_STOP;
 190
 191	config2 = au_readl(HOST_CONFIG2(host));
 192	au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
 193	au_sync();
 194
 195	/* Send the stop command */
 196	au_writel(STOP_CMD, HOST_CMD(host));
 
 197}
 198
 199static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
 200{
 201	if (host->platdata && host->platdata->set_power)
 202		host->platdata->set_power(host->mmc, state);
 203}
 204
 205static int au1xmmc_card_inserted(struct mmc_host *mmc)
 206{
 207	struct au1xmmc_host *host = mmc_priv(mmc);
 208
 209	if (host->platdata && host->platdata->card_inserted)
 210		return !!host->platdata->card_inserted(host->mmc);
 211
 212	return -ENOSYS;
 213}
 214
 215static int au1xmmc_card_readonly(struct mmc_host *mmc)
 216{
 217	struct au1xmmc_host *host = mmc_priv(mmc);
 218
 219	if (host->platdata && host->platdata->card_readonly)
 220		return !!host->platdata->card_readonly(mmc);
 221
 222	return -ENOSYS;
 223}
 224
 225static void au1xmmc_finish_request(struct au1xmmc_host *host)
 226{
 227	struct mmc_request *mrq = host->mrq;
 228
 229	host->mrq = NULL;
 230	host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
 231
 232	host->dma.len = 0;
 233	host->dma.dir = 0;
 234
 235	host->pio.index  = 0;
 236	host->pio.offset = 0;
 237	host->pio.len = 0;
 238
 239	host->status = HOST_S_IDLE;
 240
 241	mmc_request_done(host->mmc, mrq);
 242}
 243
 244static void au1xmmc_tasklet_finish(unsigned long param)
 245{
 246	struct au1xmmc_host *host = (struct au1xmmc_host *) param;
 247	au1xmmc_finish_request(host);
 248}
 249
 250static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
 251				struct mmc_command *cmd, struct mmc_data *data)
 252{
 253	u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
 254
 255	switch (mmc_resp_type(cmd)) {
 256	case MMC_RSP_NONE:
 257		break;
 258	case MMC_RSP_R1:
 259		mmccmd |= SD_CMD_RT_1;
 260		break;
 261	case MMC_RSP_R1B:
 262		mmccmd |= SD_CMD_RT_1B;
 263		break;
 264	case MMC_RSP_R2:
 265		mmccmd |= SD_CMD_RT_2;
 266		break;
 267	case MMC_RSP_R3:
 268		mmccmd |= SD_CMD_RT_3;
 269		break;
 270	default:
 271		printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
 272			mmc_resp_type(cmd));
 273		return -EINVAL;
 274	}
 275
 276	if (data) {
 277		if (data->flags & MMC_DATA_READ) {
 278			if (data->blocks > 1)
 279				mmccmd |= SD_CMD_CT_4;
 280			else
 281				mmccmd |= SD_CMD_CT_2;
 282		} else if (data->flags & MMC_DATA_WRITE) {
 283			if (data->blocks > 1)
 284				mmccmd |= SD_CMD_CT_3;
 285			else
 286				mmccmd |= SD_CMD_CT_1;
 287		}
 288	}
 289
 290	au_writel(cmd->arg, HOST_CMDARG(host));
 291	au_sync();
 292
 293	if (wait)
 294		IRQ_OFF(host, SD_CONFIG_CR);
 295
 296	au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
 297	au_sync();
 298
 299	/* Wait for the command to go on the line */
 300	while (au_readl(HOST_CMD(host)) & SD_CMD_GO)
 301		/* nop */;
 302
 303	/* Wait for the command to come back */
 304	if (wait) {
 305		u32 status = au_readl(HOST_STATUS(host));
 306
 307		while (!(status & SD_STATUS_CR))
 308			status = au_readl(HOST_STATUS(host));
 309
 310		/* Clear the CR status */
 311		au_writel(SD_STATUS_CR, HOST_STATUS(host));
 312
 313		IRQ_ON(host, SD_CONFIG_CR);
 314	}
 315
 316	return 0;
 317}
 318
 319static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
 320{
 321	struct mmc_request *mrq = host->mrq;
 322	struct mmc_data *data;
 323	u32 crc;
 324
 325	WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
 326
 327	if (host->mrq == NULL)
 328		return;
 329
 330	data = mrq->cmd->data;
 331
 332	if (status == 0)
 333		status = au_readl(HOST_STATUS(host));
 334
 335	/* The transaction is really over when the SD_STATUS_DB bit is clear */
 336	while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
 337		status = au_readl(HOST_STATUS(host));
 338
 339	data->error = 0;
 340	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
 341
 342        /* Process any errors */
 343	crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
 344	if (host->flags & HOST_F_XMIT)
 345		crc |= ((status & 0x07) == 0x02) ? 0 : 1;
 346
 347	if (crc)
 348		data->error = -EILSEQ;
 349
 350	/* Clear the CRC bits */
 351	au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
 352
 353	data->bytes_xfered = 0;
 354
 355	if (!data->error) {
 356		if (host->flags & HOST_F_DMA) {
 357#ifdef CONFIG_SOC_AU1200	/* DBDMA */
 358			u32 chan = DMA_CHANNEL(host);
 359
 360			chan_tab_t *c = *((chan_tab_t **)chan);
 361			au1x_dma_chan_t *cp = c->chan_ptr;
 362			data->bytes_xfered = cp->ddma_bytecnt;
 363#endif
 364		} else
 365			data->bytes_xfered =
 366				(data->blocks * data->blksz) - host->pio.len;
 367	}
 368
 369	au1xmmc_finish_request(host);
 370}
 371
 372static void au1xmmc_tasklet_data(unsigned long param)
 373{
 374	struct au1xmmc_host *host = (struct au1xmmc_host *)param;
 375
 376	u32 status = au_readl(HOST_STATUS(host));
 377	au1xmmc_data_complete(host, status);
 378}
 379
 380#define AU1XMMC_MAX_TRANSFER 8
 381
 382static void au1xmmc_send_pio(struct au1xmmc_host *host)
 383{
 384	struct mmc_data *data;
 385	int sg_len, max, count;
 386	unsigned char *sg_ptr, val;
 387	u32 status;
 388	struct scatterlist *sg;
 389
 390	data = host->mrq->data;
 391
 392	if (!(host->flags & HOST_F_XMIT))
 393		return;
 394
 395	/* This is the pointer to the data buffer */
 396	sg = &data->sg[host->pio.index];
 397	sg_ptr = sg_virt(sg) + host->pio.offset;
 398
 399	/* This is the space left inside the buffer */
 400	sg_len = data->sg[host->pio.index].length - host->pio.offset;
 401
 402	/* Check if we need less than the size of the sg_buffer */
 403	max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
 404	if (max > AU1XMMC_MAX_TRANSFER)
 405		max = AU1XMMC_MAX_TRANSFER;
 406
 407	for (count = 0; count < max; count++) {
 408		status = au_readl(HOST_STATUS(host));
 409
 410		if (!(status & SD_STATUS_TH))
 411			break;
 412
 413		val = *sg_ptr++;
 414
 415		au_writel((unsigned long)val, HOST_TXPORT(host));
 416		au_sync();
 417	}
 
 418
 419	host->pio.len -= count;
 420	host->pio.offset += count;
 421
 422	if (count == sg_len) {
 423		host->pio.index++;
 424		host->pio.offset = 0;
 425	}
 426
 427	if (host->pio.len == 0) {
 428		IRQ_OFF(host, SD_CONFIG_TH);
 429
 430		if (host->flags & HOST_F_STOP)
 431			SEND_STOP(host);
 432
 433		tasklet_schedule(&host->data_task);
 434	}
 435}
 436
 437static void au1xmmc_receive_pio(struct au1xmmc_host *host)
 438{
 439	struct mmc_data *data;
 440	int max, count, sg_len = 0;
 441	unsigned char *sg_ptr = NULL;
 442	u32 status, val;
 443	struct scatterlist *sg;
 444
 445	data = host->mrq->data;
 446
 447	if (!(host->flags & HOST_F_RECV))
 448		return;
 449
 450	max = host->pio.len;
 451
 452	if (host->pio.index < host->dma.len) {
 453		sg = &data->sg[host->pio.index];
 454		sg_ptr = sg_virt(sg) + host->pio.offset;
 455
 456		/* This is the space left inside the buffer */
 457		sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
 458
 459		/* Check if we need less than the size of the sg_buffer */
 460		if (sg_len < max)
 461			max = sg_len;
 462	}
 463
 464	if (max > AU1XMMC_MAX_TRANSFER)
 465		max = AU1XMMC_MAX_TRANSFER;
 466
 467	for (count = 0; count < max; count++) {
 468		status = au_readl(HOST_STATUS(host));
 469
 470		if (!(status & SD_STATUS_NE))
 471			break;
 472
 473		if (status & SD_STATUS_RC) {
 474			DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
 475					host->pio.len, count);
 476			break;
 477		}
 478
 479		if (status & SD_STATUS_RO) {
 480			DBG("RX Overrun [%d + %d]\n", host->pdev->id,
 481					host->pio.len, count);
 482			break;
 483		}
 484		else if (status & SD_STATUS_RU) {
 485			DBG("RX Underrun [%d + %d]\n", host->pdev->id,
 486					host->pio.len,	count);
 487			break;
 488		}
 489
 490		val = au_readl(HOST_RXPORT(host));
 491
 492		if (sg_ptr)
 493			*sg_ptr++ = (unsigned char)(val & 0xFF);
 494	}
 
 
 495
 496	host->pio.len -= count;
 497	host->pio.offset += count;
 498
 499	if (sg_len && count == sg_len) {
 500		host->pio.index++;
 501		host->pio.offset = 0;
 502	}
 503
 504	if (host->pio.len == 0) {
 505		/* IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF); */
 506		IRQ_OFF(host, SD_CONFIG_NE);
 507
 508		if (host->flags & HOST_F_STOP)
 509			SEND_STOP(host);
 510
 511		tasklet_schedule(&host->data_task);
 512	}
 513}
 514
 515/* This is called when a command has been completed - grab the response
 516 * and check for errors.  Then start the data transfer if it is indicated.
 517 */
 518static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
 519{
 520	struct mmc_request *mrq = host->mrq;
 521	struct mmc_command *cmd;
 522	u32 r[4];
 523	int i, trans;
 524
 525	if (!host->mrq)
 526		return;
 527
 528	cmd = mrq->cmd;
 529	cmd->error = 0;
 530
 531	if (cmd->flags & MMC_RSP_PRESENT) {
 532		if (cmd->flags & MMC_RSP_136) {
 533			r[0] = au_readl(host->iobase + SD_RESP3);
 534			r[1] = au_readl(host->iobase + SD_RESP2);
 535			r[2] = au_readl(host->iobase + SD_RESP1);
 536			r[3] = au_readl(host->iobase + SD_RESP0);
 537
 538			/* The CRC is omitted from the response, so really
 539			 * we only got 120 bytes, but the engine expects
 540			 * 128 bits, so we have to shift things up.
 541			 */
 542			for (i = 0; i < 4; i++) {
 543				cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
 544				if (i != 3)
 545					cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
 546			}
 547		} else {
 548			/* Techincally, we should be getting all 48 bits of
 549			 * the response (SD_RESP1 + SD_RESP2), but because
 550			 * our response omits the CRC, our data ends up
 551			 * being shifted 8 bits to the right.  In this case,
 552			 * that means that the OSR data starts at bit 31,
 553			 * so we can just read RESP0 and return that.
 554			 */
 555			cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
 556		}
 557	}
 558
 559        /* Figure out errors */
 560	if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
 561		cmd->error = -EILSEQ;
 562
 563	trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
 564
 565	if (!trans || cmd->error) {
 566		IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
 567		tasklet_schedule(&host->finish_task);
 568		return;
 569	}
 570
 571	host->status = HOST_S_DATA;
 572
 573	if (host->flags & HOST_F_DMA) {
 574#ifdef CONFIG_SOC_AU1200	/* DBDMA */
 575		u32 channel = DMA_CHANNEL(host);
 576
 577		/* Start the DMA as soon as the buffer gets something in it */
 578
 579		if (host->flags & HOST_F_RECV) {
 580			u32 mask = SD_STATUS_DB | SD_STATUS_NE;
 581
 582			while((status & mask) != mask)
 583				status = au_readl(HOST_STATUS(host));
 584		}
 585
 586		au1xxx_dbdma_start(channel);
 587#endif
 588	}
 589}
 590
 591static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
 592{
 593	unsigned int pbus = get_au1x00_speed();
 594	unsigned int divisor;
 595	u32 config;
 596
 597	/* From databook:
 598	 * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
 599	 */
 600	pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
 601	pbus /= 2;
 602	divisor = ((pbus / rate) / 2) - 1;
 603
 604	config = au_readl(HOST_CONFIG(host));
 605
 606	config &= ~(SD_CONFIG_DIV);
 607	config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
 608
 609	au_writel(config, HOST_CONFIG(host));
 610	au_sync();
 611}
 612
 613static int au1xmmc_prepare_data(struct au1xmmc_host *host,
 614				struct mmc_data *data)
 615{
 616	int datalen = data->blocks * data->blksz;
 617
 618	if (data->flags & MMC_DATA_READ)
 619		host->flags |= HOST_F_RECV;
 620	else
 621		host->flags |= HOST_F_XMIT;
 622
 623	if (host->mrq->stop)
 624		host->flags |= HOST_F_STOP;
 625
 626	host->dma.dir = DMA_BIDIRECTIONAL;
 627
 628	host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
 629				   data->sg_len, host->dma.dir);
 630
 631	if (host->dma.len == 0)
 632		return -ETIMEDOUT;
 633
 634	au_writel(data->blksz - 1, HOST_BLKSIZE(host));
 635
 636	if (host->flags & HOST_F_DMA) {
 637#ifdef CONFIG_SOC_AU1200	/* DBDMA */
 638		int i;
 639		u32 channel = DMA_CHANNEL(host);
 640
 641		au1xxx_dbdma_stop(channel);
 642
 643		for (i = 0; i < host->dma.len; i++) {
 644			u32 ret = 0, flags = DDMA_FLAGS_NOIE;
 645			struct scatterlist *sg = &data->sg[i];
 646			int sg_len = sg->length;
 647
 648			int len = (datalen > sg_len) ? sg_len : datalen;
 649
 650			if (i == host->dma.len - 1)
 651				flags = DDMA_FLAGS_IE;
 652
 653			if (host->flags & HOST_F_XMIT) {
 654				ret = au1xxx_dbdma_put_source(channel,
 655					sg_phys(sg), len, flags);
 656			} else {
 657				ret = au1xxx_dbdma_put_dest(channel,
 658					sg_phys(sg), len, flags);
 659			}
 660
 661			if (!ret)
 662				goto dataerr;
 663
 664			datalen -= len;
 665		}
 666#endif
 667	} else {
 668		host->pio.index = 0;
 669		host->pio.offset = 0;
 670		host->pio.len = datalen;
 671
 672		if (host->flags & HOST_F_XMIT)
 673			IRQ_ON(host, SD_CONFIG_TH);
 674		else
 675			IRQ_ON(host, SD_CONFIG_NE);
 676			/* IRQ_ON(host, SD_CONFIG_RA | SD_CONFIG_RF); */
 677	}
 678
 679	return 0;
 680
 681dataerr:
 682	dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
 683			host->dma.dir);
 684	return -ETIMEDOUT;
 685}
 686
 687/* This actually starts a command or data transaction */
 688static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
 689{
 690	struct au1xmmc_host *host = mmc_priv(mmc);
 691	int ret = 0;
 692
 693	WARN_ON(irqs_disabled());
 694	WARN_ON(host->status != HOST_S_IDLE);
 695
 696	host->mrq = mrq;
 697	host->status = HOST_S_CMD;
 698
 699	/* fail request immediately if no card is present */
 700	if (0 == au1xmmc_card_inserted(mmc)) {
 701		mrq->cmd->error = -ENOMEDIUM;
 702		au1xmmc_finish_request(host);
 703		return;
 704	}
 705
 706	if (mrq->data) {
 707		FLUSH_FIFO(host);
 708		ret = au1xmmc_prepare_data(host, mrq->data);
 709	}
 710
 711	if (!ret)
 712		ret = au1xmmc_send_command(host, 0, mrq->cmd, mrq->data);
 713
 714	if (ret) {
 715		mrq->cmd->error = ret;
 716		au1xmmc_finish_request(host);
 717	}
 718}
 719
 720static void au1xmmc_reset_controller(struct au1xmmc_host *host)
 721{
 722	/* Apply the clock */
 723	au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
 724        au_sync_delay(1);
 725
 726	au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
 727	au_sync_delay(5);
 728
 729	au_writel(~0, HOST_STATUS(host));
 730	au_sync();
 731
 732	au_writel(0, HOST_BLKSIZE(host));
 733	au_writel(0x001fffff, HOST_TIMEOUT(host));
 734	au_sync();
 
 
 
 
 
 
 
 
 
 735
 736	au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
 737        au_sync();
 738
 739	au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
 740	au_sync_delay(1);
 741
 742	au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
 743	au_sync();
 744
 745	/* Configure interrupts */
 746	au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
 747	au_sync();
 748}
 749
 750
 751static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 752{
 753	struct au1xmmc_host *host = mmc_priv(mmc);
 754	u32 config2;
 755
 756	if (ios->power_mode == MMC_POWER_OFF)
 757		au1xmmc_set_power(host, 0);
 758	else if (ios->power_mode == MMC_POWER_ON) {
 759		au1xmmc_set_power(host, 1);
 760	}
 761
 762	if (ios->clock && ios->clock != host->clock) {
 763		au1xmmc_set_clock(host, ios->clock);
 764		host->clock = ios->clock;
 765	}
 766
 767	config2 = au_readl(HOST_CONFIG2(host));
 768	switch (ios->bus_width) {
 
 
 
 769	case MMC_BUS_WIDTH_4:
 
 770		config2 |= SD_CONFIG2_WB;
 771		break;
 772	case MMC_BUS_WIDTH_1:
 773		config2 &= ~SD_CONFIG2_WB;
 774		break;
 775	}
 776	au_writel(config2, HOST_CONFIG2(host));
 777	au_sync();
 778}
 779
 780#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
 781#define STATUS_DATA_IN  (SD_STATUS_NE)
 782#define STATUS_DATA_OUT (SD_STATUS_TH)
 783
 784static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
 785{
 786	struct au1xmmc_host *host = dev_id;
 787	u32 status;
 788
 789	status = au_readl(HOST_STATUS(host));
 790
 791	if (!(status & SD_STATUS_I))
 792		return IRQ_NONE;	/* not ours */
 793
 794	if (status & SD_STATUS_SI)	/* SDIO */
 795		mmc_signal_sdio_irq(host->mmc);
 796
 797	if (host->mrq && (status & STATUS_TIMEOUT)) {
 798		if (status & SD_STATUS_RAT)
 799			host->mrq->cmd->error = -ETIMEDOUT;
 800		else if (status & SD_STATUS_DT)
 801			host->mrq->data->error = -ETIMEDOUT;
 802
 803		/* In PIO mode, interrupts might still be enabled */
 804		IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
 805
 806		/* IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF); */
 807		tasklet_schedule(&host->finish_task);
 808	}
 809#if 0
 810	else if (status & SD_STATUS_DD) {
 811		/* Sometimes we get a DD before a NE in PIO mode */
 812		if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
 813			au1xmmc_receive_pio(host);
 814		else {
 815			au1xmmc_data_complete(host, status);
 816			/* tasklet_schedule(&host->data_task); */
 817		}
 818	}
 819#endif
 820	else if (status & SD_STATUS_CR) {
 821		if (host->status == HOST_S_CMD)
 822			au1xmmc_cmd_complete(host, status);
 823
 824	} else if (!(host->flags & HOST_F_DMA)) {
 825		if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
 826			au1xmmc_send_pio(host);
 827		else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
 828			au1xmmc_receive_pio(host);
 829
 830	} else if (status & 0x203F3C70) {
 831			DBG("Unhandled status %8.8x\n", host->pdev->id,
 832				status);
 833	}
 834
 835	au_writel(status, HOST_STATUS(host));
 836	au_sync();
 837
 838	return IRQ_HANDLED;
 839}
 840
 841#ifdef CONFIG_SOC_AU1200
 842/* 8bit memory DMA device */
 843static dbdev_tab_t au1xmmc_mem_dbdev = {
 844	.dev_id		= DSCR_CMD0_ALWAYS,
 845	.dev_flags	= DEV_FLAGS_ANYUSE,
 846	.dev_tsize	= 0,
 847	.dev_devwidth	= 8,
 848	.dev_physaddr	= 0x00000000,
 849	.dev_intlevel	= 0,
 850	.dev_intpolarity = 0,
 851};
 852static int memid;
 853
 854static void au1xmmc_dbdma_callback(int irq, void *dev_id)
 855{
 856	struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
 857
 858	/* Avoid spurious interrupts */
 859	if (!host->mrq)
 860		return;
 861
 862	if (host->flags & HOST_F_STOP)
 863		SEND_STOP(host);
 864
 865	tasklet_schedule(&host->data_task);
 866}
 867
 868static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
 869{
 870	struct resource *res;
 871	int txid, rxid;
 872
 873	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
 874	if (!res)
 875		return -ENODEV;
 876	txid = res->start;
 877
 878	res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
 879	if (!res)
 880		return -ENODEV;
 881	rxid = res->start;
 882
 883	if (!memid)
 884		return -ENODEV;
 885
 886	host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
 887				au1xmmc_dbdma_callback, (void *)host);
 888	if (!host->tx_chan) {
 889		dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
 890		return -ENODEV;
 891	}
 892
 893	host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
 894				au1xmmc_dbdma_callback, (void *)host);
 895	if (!host->rx_chan) {
 896		dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
 897		au1xxx_dbdma_chan_free(host->tx_chan);
 898		return -ENODEV;
 899	}
 900
 901	au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
 902	au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
 903
 904	au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
 905	au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
 906
 907	/* DBDMA is good to go */
 908	host->flags |= HOST_F_DMA;
 909
 910	return 0;
 911}
 912
 913static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
 914{
 915	if (host->flags & HOST_F_DMA) {
 916		host->flags &= ~HOST_F_DMA;
 917		au1xxx_dbdma_chan_free(host->tx_chan);
 918		au1xxx_dbdma_chan_free(host->rx_chan);
 919	}
 920}
 921#endif
 922
 923static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
 924{
 925	struct au1xmmc_host *host = mmc_priv(mmc);
 926
 927	if (en)
 928		IRQ_ON(host, SD_CONFIG_SI);
 929	else
 930		IRQ_OFF(host, SD_CONFIG_SI);
 931}
 932
 933static const struct mmc_host_ops au1xmmc_ops = {
 934	.request	= au1xmmc_request,
 935	.set_ios	= au1xmmc_set_ios,
 936	.get_ro		= au1xmmc_card_readonly,
 937	.get_cd		= au1xmmc_card_inserted,
 938	.enable_sdio_irq = au1xmmc_enable_sdio_irq,
 939};
 940
 941static int __devinit au1xmmc_probe(struct platform_device *pdev)
 942{
 943	struct mmc_host *mmc;
 944	struct au1xmmc_host *host;
 945	struct resource *r;
 946	int ret;
 947
 948	mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
 949	if (!mmc) {
 950		dev_err(&pdev->dev, "no memory for mmc_host\n");
 951		ret = -ENOMEM;
 952		goto out0;
 953	}
 954
 955	host = mmc_priv(mmc);
 956	host->mmc = mmc;
 957	host->platdata = pdev->dev.platform_data;
 958	host->pdev = pdev;
 959
 960	ret = -ENODEV;
 961	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 962	if (!r) {
 963		dev_err(&pdev->dev, "no mmio defined\n");
 964		goto out1;
 965	}
 966
 967	host->ioarea = request_mem_region(r->start, resource_size(r),
 968					   pdev->name);
 969	if (!host->ioarea) {
 970		dev_err(&pdev->dev, "mmio already in use\n");
 971		goto out1;
 972	}
 973
 974	host->iobase = (unsigned long)ioremap(r->start, 0x3c);
 975	if (!host->iobase) {
 976		dev_err(&pdev->dev, "cannot remap mmio\n");
 977		goto out2;
 978	}
 979
 980	r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 981	if (!r) {
 982		dev_err(&pdev->dev, "no IRQ defined\n");
 983		goto out3;
 984	}
 985
 986	host->irq = r->start;
 987	/* IRQ is shared among both SD controllers */
 988	ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
 989			  DRIVER_NAME, host);
 990	if (ret) {
 991		dev_err(&pdev->dev, "cannot grab IRQ\n");
 992		goto out3;
 993	}
 994
 995	mmc->ops = &au1xmmc_ops;
 996
 997	mmc->f_min =   450000;
 998	mmc->f_max = 24000000;
 999
1000	mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
1001	mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
1002
1003	mmc->max_blk_size = 2048;
1004	mmc->max_blk_count = 512;
1005
1006	mmc->ocr_avail = AU1XMMC_OCR;
1007	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008
1009	host->status = HOST_S_IDLE;
1010
1011	/* board-specific carddetect setup, if any */
1012	if (host->platdata && host->platdata->cd_setup) {
1013		ret = host->platdata->cd_setup(mmc, 1);
1014		if (ret) {
1015			dev_warn(&pdev->dev, "board CD setup failed\n");
1016			mmc->caps |= MMC_CAP_NEEDS_POLL;
1017		}
1018	} else
1019		mmc->caps |= MMC_CAP_NEEDS_POLL;
1020
1021	/* platform may not be able to use all advertised caps */
1022	if (host->platdata)
1023		mmc->caps &= ~(host->platdata->mask_host_caps);
1024
1025	tasklet_init(&host->data_task, au1xmmc_tasklet_data,
1026			(unsigned long)host);
1027
1028	tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
1029			(unsigned long)host);
1030
1031#ifdef CONFIG_SOC_AU1200
1032	ret = au1xmmc_dbdma_init(host);
1033	if (ret)
1034		printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n");
1035#endif
1036
1037#ifdef CONFIG_LEDS_CLASS
1038	if (host->platdata && host->platdata->led) {
1039		struct led_classdev *led = host->platdata->led;
1040		led->name = mmc_hostname(mmc);
1041		led->brightness = LED_OFF;
1042		led->default_trigger = mmc_hostname(mmc);
1043		ret = led_classdev_register(mmc_dev(mmc), led);
1044		if (ret)
1045			goto out5;
1046	}
1047#endif
1048
1049	au1xmmc_reset_controller(host);
1050
1051	ret = mmc_add_host(mmc);
1052	if (ret) {
1053		dev_err(&pdev->dev, "cannot add mmc host\n");
1054		goto out6;
1055	}
1056
1057	platform_set_drvdata(pdev, host);
1058
1059	printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
1060		" (mode=%s)\n", pdev->id, host->iobase,
1061		host->flags & HOST_F_DMA ? "dma" : "pio");
1062
1063	return 0;	/* all ok */
1064
1065out6:
1066#ifdef CONFIG_LEDS_CLASS
1067	if (host->platdata && host->platdata->led)
1068		led_classdev_unregister(host->platdata->led);
1069out5:
1070#endif
1071	au_writel(0, HOST_ENABLE(host));
1072	au_writel(0, HOST_CONFIG(host));
1073	au_writel(0, HOST_CONFIG2(host));
1074	au_sync();
1075
1076#ifdef CONFIG_SOC_AU1200
1077	au1xmmc_dbdma_shutdown(host);
1078#endif
1079
1080	tasklet_kill(&host->data_task);
1081	tasklet_kill(&host->finish_task);
1082
1083	if (host->platdata && host->platdata->cd_setup &&
1084	    !(mmc->caps & MMC_CAP_NEEDS_POLL))
1085		host->platdata->cd_setup(mmc, 0);
1086
 
 
 
 
1087	free_irq(host->irq, host);
1088out3:
1089	iounmap((void *)host->iobase);
1090out2:
1091	release_resource(host->ioarea);
1092	kfree(host->ioarea);
1093out1:
1094	mmc_free_host(mmc);
1095out0:
1096	return ret;
1097}
1098
1099static int __devexit au1xmmc_remove(struct platform_device *pdev)
1100{
1101	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1102
1103	if (host) {
1104		mmc_remove_host(host->mmc);
1105
1106#ifdef CONFIG_LEDS_CLASS
1107		if (host->platdata && host->platdata->led)
1108			led_classdev_unregister(host->platdata->led);
1109#endif
1110
1111		if (host->platdata && host->platdata->cd_setup &&
1112		    !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1113			host->platdata->cd_setup(host->mmc, 0);
1114
1115		au_writel(0, HOST_ENABLE(host));
1116		au_writel(0, HOST_CONFIG(host));
1117		au_writel(0, HOST_CONFIG2(host));
1118		au_sync();
1119
1120		tasklet_kill(&host->data_task);
1121		tasklet_kill(&host->finish_task);
1122
1123#ifdef CONFIG_SOC_AU1200
1124		au1xmmc_dbdma_shutdown(host);
1125#endif
1126		au1xmmc_set_power(host, 0);
1127
 
 
 
1128		free_irq(host->irq, host);
1129		iounmap((void *)host->iobase);
1130		release_resource(host->ioarea);
1131		kfree(host->ioarea);
1132
1133		mmc_free_host(host->mmc);
1134		platform_set_drvdata(pdev, NULL);
1135	}
1136	return 0;
1137}
1138
1139#ifdef CONFIG_PM
1140static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1141{
1142	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143	int ret;
1144
1145	ret = mmc_suspend_host(host->mmc);
1146	if (ret)
1147		return ret;
1148
1149	au_writel(0, HOST_CONFIG2(host));
1150	au_writel(0, HOST_CONFIG(host));
1151	au_writel(0xffffffff, HOST_STATUS(host));
1152	au_writel(0, HOST_ENABLE(host));
1153	au_sync();
1154
1155	return 0;
1156}
1157
1158static int au1xmmc_resume(struct platform_device *pdev)
1159{
1160	struct au1xmmc_host *host = platform_get_drvdata(pdev);
1161
1162	au1xmmc_reset_controller(host);
1163
1164	return mmc_resume_host(host->mmc);
1165}
1166#else
1167#define au1xmmc_suspend NULL
1168#define au1xmmc_resume NULL
1169#endif
1170
1171static struct platform_driver au1xmmc_driver = {
1172	.probe         = au1xmmc_probe,
1173	.remove        = au1xmmc_remove,
1174	.suspend       = au1xmmc_suspend,
1175	.resume        = au1xmmc_resume,
1176	.driver        = {
1177		.name  = DRIVER_NAME,
1178		.owner = THIS_MODULE,
1179	},
1180};
1181
1182static int __init au1xmmc_init(void)
1183{
1184#ifdef CONFIG_SOC_AU1200
1185	/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
1186	 * of 8 bits.  And since devices are shared, we need to create
1187	 * our own to avoid freaking out other devices.
1188	 */
1189	memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1190	if (!memid)
1191		printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n");
1192#endif
1193	return platform_driver_register(&au1xmmc_driver);
1194}
1195
1196static void __exit au1xmmc_exit(void)
1197{
1198#ifdef CONFIG_SOC_AU1200
1199	if (memid)
1200		au1xxx_ddma_del_device(memid);
1201#endif
1202	platform_driver_unregister(&au1xmmc_driver);
1203}
1204
1205module_init(au1xmmc_init);
1206module_exit(au1xmmc_exit);
1207
1208MODULE_AUTHOR("Advanced Micro Devices, Inc");
1209MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1210MODULE_LICENSE("GPL");
1211MODULE_ALIAS("platform:au1xxx-mmc");