Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
   3 *
   4 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 *
  11 * Thanks to the following companies for their support:
  12 *
  13 *     - JMicron (hardware and technical support)
  14 */
  15
  16#include <linux/delay.h>
  17#include <linux/ktime.h>
  18#include <linux/highmem.h>
  19#include <linux/io.h>
  20#include <linux/module.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/slab.h>
  23#include <linux/scatterlist.h>
  24#include <linux/sizes.h>
  25#include <linux/swiotlb.h>
  26#include <linux/regulator/consumer.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/of.h>
  29
  30#include <linux/leds.h>
  31
  32#include <linux/mmc/mmc.h>
  33#include <linux/mmc/host.h>
  34#include <linux/mmc/card.h>
  35#include <linux/mmc/sdio.h>
  36#include <linux/mmc/slot-gpio.h>
  37
  38#include "sdhci.h"
  39
  40#define DRIVER_NAME "sdhci"
  41
  42#define DBG(f, x...) \
  43	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
  44
  45#define SDHCI_DUMP(f, x...) \
  46	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
 
 
  47
  48#define MAX_TUNING_LOOP 40
  49
  50static unsigned int debug_quirks = 0;
  51static unsigned int debug_quirks2;
  52
  53static void sdhci_finish_data(struct sdhci_host *);
  54
 
 
 
  55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
  56
  57void sdhci_dumpregs(struct sdhci_host *host)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58{
  59	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
 
 
 
 
  60
  61	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
  62		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
  63		   sdhci_readw(host, SDHCI_HOST_VERSION));
  64	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
  65		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
  66		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
  67	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
  68		   sdhci_readl(host, SDHCI_ARGUMENT),
  69		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
  70	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
  71		   sdhci_readl(host, SDHCI_PRESENT_STATE),
  72		   sdhci_readb(host, SDHCI_HOST_CONTROL));
  73	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
  74		   sdhci_readb(host, SDHCI_POWER_CONTROL),
  75		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
  76	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
  77		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
  78		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
  79	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
  80		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
  81		   sdhci_readl(host, SDHCI_INT_STATUS));
  82	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
  83		   sdhci_readl(host, SDHCI_INT_ENABLE),
  84		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
  85	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
  86		   sdhci_readw(host, SDHCI_ACMD12_ERR),
  87		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
  88	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
  89		   sdhci_readl(host, SDHCI_CAPABILITIES),
  90		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
  91	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
  92		   sdhci_readw(host, SDHCI_COMMAND),
  93		   sdhci_readl(host, SDHCI_MAX_CURRENT));
  94	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
  95		   sdhci_readl(host, SDHCI_RESPONSE),
  96		   sdhci_readl(host, SDHCI_RESPONSE + 4));
  97	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
  98		   sdhci_readl(host, SDHCI_RESPONSE + 8),
  99		   sdhci_readl(host, SDHCI_RESPONSE + 12));
 100	SDHCI_DUMP("Host ctl2: 0x%08x\n",
 101		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
 102
 103	if (host->flags & SDHCI_USE_ADMA) {
 104		if (host->flags & SDHCI_USE_64_BIT_DMA) {
 105			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
 106				   sdhci_readl(host, SDHCI_ADMA_ERROR),
 107				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
 108				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
 109		} else {
 110			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
 111				   sdhci_readl(host, SDHCI_ADMA_ERROR),
 112				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
 113		}
 114	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115
 116	SDHCI_DUMP("============================================\n");
 117}
 118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
 119
 120/*****************************************************************************\
 121 *                                                                           *
 122 * Low level functions                                                       *
 123 *                                                                           *
 124\*****************************************************************************/
 125
 126static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127{
 128	return cmd->data || cmd->flags & MMC_RSP_BUSY;
 129}
 130
 131static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 132{
 133	u32 present;
 134
 135	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
 136	    !mmc_card_is_removable(host->mmc))
 137		return;
 138
 139	if (enable) {
 140		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
 141				      SDHCI_CARD_PRESENT;
 142
 143		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
 144				       SDHCI_INT_CARD_INSERT;
 145	} else {
 146		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
 147	}
 148
 149	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 150	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 151}
 152
 153static void sdhci_enable_card_detection(struct sdhci_host *host)
 154{
 155	sdhci_set_card_detection(host, true);
 156}
 157
 158static void sdhci_disable_card_detection(struct sdhci_host *host)
 159{
 160	sdhci_set_card_detection(host, false);
 161}
 162
 163static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
 164{
 165	if (host->bus_on)
 166		return;
 167	host->bus_on = true;
 168	pm_runtime_get_noresume(host->mmc->parent);
 169}
 170
 171static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
 172{
 173	if (!host->bus_on)
 174		return;
 175	host->bus_on = false;
 176	pm_runtime_put_noidle(host->mmc->parent);
 177}
 178
 179void sdhci_reset(struct sdhci_host *host, u8 mask)
 180{
 181	ktime_t timeout;
 
 
 182
 183	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 184
 185	if (mask & SDHCI_RESET_ALL) {
 186		host->clock = 0;
 187		/* Reset-all turns off SD Bus Power */
 188		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 189			sdhci_runtime_pm_bus_off(host);
 190	}
 191
 192	/* Wait max 100 ms */
 193	timeout = ktime_add_ms(ktime_get(), 100);
 194
 195	/* hw clears the bit when it's done */
 196	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 197		if (ktime_after(ktime_get(), timeout)) {
 198			pr_err("%s: Reset 0x%x never completed.\n",
 199				mmc_hostname(host->mmc), (int)mask);
 200			sdhci_dumpregs(host);
 201			return;
 202		}
 203		udelay(10);
 204	}
 205}
 206EXPORT_SYMBOL_GPL(sdhci_reset);
 207
 208static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
 209{
 210	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
 211		struct mmc_host *mmc = host->mmc;
 212
 213		if (!mmc->ops->get_cd(mmc))
 214			return;
 215	}
 216
 217	host->ops->reset(host, mask);
 
 218
 219	if (mask & SDHCI_RESET_ALL) {
 220		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 221			if (host->ops->enable_dma)
 222				host->ops->enable_dma(host);
 223		}
 224
 225		/* Resetting the controller clears many */
 226		host->preset_enabled = false;
 
 227	}
 228}
 229
 230static void sdhci_set_default_irqs(struct sdhci_host *host)
 231{
 232	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
 233		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
 234		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
 235		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
 236		    SDHCI_INT_RESPONSE;
 237
 238	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
 239	    host->tuning_mode == SDHCI_TUNING_MODE_3)
 240		host->ier |= SDHCI_INT_RETUNE;
 241
 242	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 243	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 244}
 245
 246static void sdhci_init(struct sdhci_host *host, int soft)
 247{
 248	struct mmc_host *mmc = host->mmc;
 249
 250	if (soft)
 251		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 252	else
 253		sdhci_do_reset(host, SDHCI_RESET_ALL);
 254
 255	sdhci_set_default_irqs(host);
 256
 257	host->cqe_on = false;
 
 
 
 
 258
 259	if (soft) {
 260		/* force clock reconfiguration */
 261		host->clock = 0;
 262		mmc->ops->set_ios(mmc, &mmc->ios);
 263	}
 264}
 265
 266static void sdhci_reinit(struct sdhci_host *host)
 267{
 268	sdhci_init(host, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 269	sdhci_enable_card_detection(host);
 270}
 271
 272static void __sdhci_led_activate(struct sdhci_host *host)
 273{
 274	u8 ctrl;
 275
 276	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 277	ctrl |= SDHCI_CTRL_LED;
 278	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 279}
 280
 281static void __sdhci_led_deactivate(struct sdhci_host *host)
 282{
 283	u8 ctrl;
 284
 285	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 286	ctrl &= ~SDHCI_CTRL_LED;
 287	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 288}
 289
 290#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 291static void sdhci_led_control(struct led_classdev *led,
 292			      enum led_brightness brightness)
 293{
 294	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 295	unsigned long flags;
 296
 297	spin_lock_irqsave(&host->lock, flags);
 298
 299	if (host->runtime_suspended)
 300		goto out;
 301
 302	if (brightness == LED_OFF)
 303		__sdhci_led_deactivate(host);
 304	else
 305		__sdhci_led_activate(host);
 306out:
 307	spin_unlock_irqrestore(&host->lock, flags);
 308}
 309
 310static int sdhci_led_register(struct sdhci_host *host)
 311{
 312	struct mmc_host *mmc = host->mmc;
 313
 314	snprintf(host->led_name, sizeof(host->led_name),
 315		 "%s::", mmc_hostname(mmc));
 316
 317	host->led.name = host->led_name;
 318	host->led.brightness = LED_OFF;
 319	host->led.default_trigger = mmc_hostname(mmc);
 320	host->led.brightness_set = sdhci_led_control;
 321
 322	return led_classdev_register(mmc_dev(mmc), &host->led);
 323}
 324
 325static void sdhci_led_unregister(struct sdhci_host *host)
 326{
 327	led_classdev_unregister(&host->led);
 328}
 329
 330static inline void sdhci_led_activate(struct sdhci_host *host)
 331{
 332}
 333
 334static inline void sdhci_led_deactivate(struct sdhci_host *host)
 335{
 336}
 337
 338#else
 339
 340static inline int sdhci_led_register(struct sdhci_host *host)
 341{
 342	return 0;
 343}
 344
 345static inline void sdhci_led_unregister(struct sdhci_host *host)
 346{
 347}
 348
 349static inline void sdhci_led_activate(struct sdhci_host *host)
 350{
 351	__sdhci_led_activate(host);
 352}
 353
 354static inline void sdhci_led_deactivate(struct sdhci_host *host)
 355{
 356	__sdhci_led_deactivate(host);
 357}
 358
 359#endif
 360
 361/*****************************************************************************\
 362 *                                                                           *
 363 * Core functions                                                            *
 364 *                                                                           *
 365\*****************************************************************************/
 366
 367static void sdhci_read_block_pio(struct sdhci_host *host)
 368{
 369	unsigned long flags;
 370	size_t blksize, len, chunk;
 371	u32 uninitialized_var(scratch);
 372	u8 *buf;
 373
 374	DBG("PIO reading\n");
 375
 376	blksize = host->data->blksz;
 377	chunk = 0;
 378
 379	local_irq_save(flags);
 380
 381	while (blksize) {
 382		BUG_ON(!sg_miter_next(&host->sg_miter));
 
 383
 384		len = min(host->sg_miter.length, blksize);
 385
 386		blksize -= len;
 387		host->sg_miter.consumed = len;
 388
 389		buf = host->sg_miter.addr;
 390
 391		while (len) {
 392			if (chunk == 0) {
 393				scratch = sdhci_readl(host, SDHCI_BUFFER);
 394				chunk = 4;
 395			}
 396
 397			*buf = scratch & 0xFF;
 398
 399			buf++;
 400			scratch >>= 8;
 401			chunk--;
 402			len--;
 403		}
 404	}
 405
 406	sg_miter_stop(&host->sg_miter);
 407
 408	local_irq_restore(flags);
 409}
 410
 411static void sdhci_write_block_pio(struct sdhci_host *host)
 412{
 413	unsigned long flags;
 414	size_t blksize, len, chunk;
 415	u32 scratch;
 416	u8 *buf;
 417
 418	DBG("PIO writing\n");
 419
 420	blksize = host->data->blksz;
 421	chunk = 0;
 422	scratch = 0;
 423
 424	local_irq_save(flags);
 425
 426	while (blksize) {
 427		BUG_ON(!sg_miter_next(&host->sg_miter));
 
 428
 429		len = min(host->sg_miter.length, blksize);
 430
 431		blksize -= len;
 432		host->sg_miter.consumed = len;
 433
 434		buf = host->sg_miter.addr;
 435
 436		while (len) {
 437			scratch |= (u32)*buf << (chunk * 8);
 438
 439			buf++;
 440			chunk++;
 441			len--;
 442
 443			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
 444				sdhci_writel(host, scratch, SDHCI_BUFFER);
 445				chunk = 0;
 446				scratch = 0;
 447			}
 448		}
 449	}
 450
 451	sg_miter_stop(&host->sg_miter);
 452
 453	local_irq_restore(flags);
 454}
 455
 456static void sdhci_transfer_pio(struct sdhci_host *host)
 457{
 458	u32 mask;
 459
 
 
 460	if (host->blocks == 0)
 461		return;
 462
 463	if (host->data->flags & MMC_DATA_READ)
 464		mask = SDHCI_DATA_AVAILABLE;
 465	else
 466		mask = SDHCI_SPACE_AVAILABLE;
 467
 468	/*
 469	 * Some controllers (JMicron JMB38x) mess up the buffer bits
 470	 * for transfers < 4 bytes. As long as it is just one block,
 471	 * we can ignore the bits.
 472	 */
 473	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
 474		(host->data->blocks == 1))
 475		mask = ~0;
 476
 477	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
 478		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
 479			udelay(100);
 480
 481		if (host->data->flags & MMC_DATA_READ)
 482			sdhci_read_block_pio(host);
 483		else
 484			sdhci_write_block_pio(host);
 485
 486		host->blocks--;
 487		if (host->blocks == 0)
 488			break;
 489	}
 490
 491	DBG("PIO transfer complete.\n");
 492}
 493
 494static int sdhci_pre_dma_transfer(struct sdhci_host *host,
 495				  struct mmc_data *data, int cookie)
 496{
 497	int sg_count;
 498
 499	/*
 500	 * If the data buffers are already mapped, return the previous
 501	 * dma_map_sg() result.
 502	 */
 503	if (data->host_cookie == COOKIE_PRE_MAPPED)
 504		return data->sg_count;
 505
 506	/* Bounce write requests to the bounce buffer */
 507	if (host->bounce_buffer) {
 508		unsigned int length = data->blksz * data->blocks;
 509
 510		if (length > host->bounce_buffer_size) {
 511			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
 512			       mmc_hostname(host->mmc), length,
 513			       host->bounce_buffer_size);
 514			return -EIO;
 515		}
 516		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
 517			/* Copy the data to the bounce buffer */
 518			sg_copy_to_buffer(data->sg, data->sg_len,
 519					  host->bounce_buffer,
 520					  length);
 521		}
 522		/* Switch ownership to the DMA */
 523		dma_sync_single_for_device(host->mmc->parent,
 524					   host->bounce_addr,
 525					   host->bounce_buffer_size,
 526					   mmc_get_dma_dir(data));
 527		/* Just a dummy value */
 528		sg_count = 1;
 529	} else {
 530		/* Just access the data directly from memory */
 531		sg_count = dma_map_sg(mmc_dev(host->mmc),
 532				      data->sg, data->sg_len,
 533				      mmc_get_dma_dir(data));
 534	}
 535
 536	if (sg_count == 0)
 537		return -ENOSPC;
 538
 539	data->sg_count = sg_count;
 540	data->host_cookie = cookie;
 541
 542	return sg_count;
 543}
 544
 545static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
 546{
 547	local_irq_save(*flags);
 548	return kmap_atomic(sg_page(sg)) + sg->offset;
 549}
 550
 551static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
 552{
 553	kunmap_atomic(buffer);
 554	local_irq_restore(*flags);
 555}
 556
 557static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
 558				  dma_addr_t addr, int len, unsigned cmd)
 559{
 560	struct sdhci_adma2_64_desc *dma_desc = desc;
 
 561
 562	/* 32-bit and 64-bit descriptors have these members in same position */
 563	dma_desc->cmd = cpu_to_le16(cmd);
 564	dma_desc->len = cpu_to_le16(len);
 565	dma_desc->addr_lo = cpu_to_le32((u32)addr);
 566
 567	if (host->flags & SDHCI_USE_64_BIT_DMA)
 568		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
 
 
 569}
 570
 571static void sdhci_adma_mark_end(void *desc)
 
 572{
 573	struct sdhci_adma2_64_desc *dma_desc = desc;
 574
 575	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
 576	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
 577}
 
 
 578
 579static void sdhci_adma_table_pre(struct sdhci_host *host,
 580	struct mmc_data *data, int sg_count)
 581{
 582	struct scatterlist *sg;
 583	unsigned long flags;
 584	dma_addr_t addr, align_addr;
 585	void *desc, *align;
 586	char *buffer;
 587	int len, offset, i;
 588
 589	/*
 590	 * The spec does not specify endianness of descriptor table.
 591	 * We currently guess that it is LE.
 592	 */
 593
 594	host->sg_count = sg_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 595
 596	desc = host->adma_table;
 
 
 
 
 
 597	align = host->align_buffer;
 598
 599	align_addr = host->align_addr;
 600
 601	for_each_sg(data->sg, sg, host->sg_count, i) {
 602		addr = sg_dma_address(sg);
 603		len = sg_dma_len(sg);
 604
 605		/*
 606		 * The SDHCI specification states that ADMA addresses must
 607		 * be 32-bit aligned. If they aren't, then we use a bounce
 608		 * buffer for the (up to three) bytes that screw up the
 
 609		 * alignment.
 610		 */
 611		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
 612			 SDHCI_ADMA2_MASK;
 613		if (offset) {
 614			if (data->flags & MMC_DATA_WRITE) {
 615				buffer = sdhci_kmap_atomic(sg, &flags);
 
 616				memcpy(align, buffer, offset);
 617				sdhci_kunmap_atomic(buffer, &flags);
 618			}
 619
 620			/* tran, valid */
 621			sdhci_adma_write_desc(host, desc, align_addr, offset,
 622					      ADMA2_TRAN_VALID);
 623
 624			BUG_ON(offset > 65536);
 625
 626			align += SDHCI_ADMA2_ALIGN;
 627			align_addr += SDHCI_ADMA2_ALIGN;
 628
 629			desc += host->desc_sz;
 630
 631			addr += offset;
 632			len -= offset;
 633		}
 634
 635		BUG_ON(len > 65536);
 636
 637		if (len) {
 638			/* tran, valid */
 639			sdhci_adma_write_desc(host, desc, addr, len,
 640					      ADMA2_TRAN_VALID);
 641			desc += host->desc_sz;
 642		}
 643
 644		/*
 645		 * If this triggers then we have a calculation bug
 646		 * somewhere. :/
 647		 */
 648		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
 649	}
 650
 651	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
 652		/* Mark the last descriptor as the terminating descriptor */
 653		if (desc != host->adma_table) {
 654			desc -= host->desc_sz;
 655			sdhci_adma_mark_end(desc);
 
 
 656		}
 657	} else {
 658		/* Add a terminating entry - nop, end, valid */
 659		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
 
 
 
 
 660	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661}
 662
 663static void sdhci_adma_table_post(struct sdhci_host *host,
 664	struct mmc_data *data)
 665{
 
 
 666	struct scatterlist *sg;
 667	int i, size;
 668	void *align;
 669	char *buffer;
 670	unsigned long flags;
 671
 
 
 
 
 
 
 
 
 
 
 
 672	if (data->flags & MMC_DATA_READ) {
 673		bool has_unaligned = false;
 
 674
 675		/* Do a quick scan of the SG list for any unaligned mappings */
 676		for_each_sg(data->sg, sg, host->sg_count, i)
 677			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
 678				has_unaligned = true;
 679				break;
 680			}
 681
 682		if (has_unaligned) {
 683			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
 684					    data->sg_len, DMA_FROM_DEVICE);
 685
 686			align = host->align_buffer;
 687
 688			for_each_sg(data->sg, sg, host->sg_count, i) {
 689				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
 690					size = SDHCI_ADMA2_ALIGN -
 691					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
 692
 693					buffer = sdhci_kmap_atomic(sg, &flags);
 694					memcpy(buffer, align, size);
 695					sdhci_kunmap_atomic(buffer, &flags);
 696
 697					align += SDHCI_ADMA2_ALIGN;
 698				}
 
 
 
 
 699			}
 700		}
 701	}
 702}
 703
 704static u32 sdhci_sdma_address(struct sdhci_host *host)
 705{
 706	if (host->bounce_buffer)
 707		return host->bounce_addr;
 708	else
 709		return sg_dma_address(host->data->sg);
 710}
 711
 712static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 713{
 714	u8 count;
 715	struct mmc_data *data = cmd->data;
 716	unsigned target_timeout, current_timeout;
 717
 718	/*
 719	 * If the host controller provides us with an incorrect timeout
 720	 * value, just skip the check and use 0xE.  The hardware may take
 721	 * longer to time out, but that's much better than having a too-short
 722	 * timeout value.
 723	 */
 724	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
 725		return 0xE;
 726
 727	/* Unspecified timeout, assume max */
 728	if (!data && !cmd->busy_timeout)
 729		return 0xE;
 730
 731	/* timeout in us */
 732	if (!data)
 733		target_timeout = cmd->busy_timeout * 1000;
 734	else {
 735		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
 736		if (host->clock && data->timeout_clks) {
 737			unsigned long long val;
 738
 739			/*
 740			 * data->timeout_clks is in units of clock cycles.
 741			 * host->clock is in Hz.  target_timeout is in us.
 742			 * Hence, us = 1000000 * cycles / Hz.  Round up.
 743			 */
 744			val = 1000000ULL * data->timeout_clks;
 745			if (do_div(val, host->clock))
 746				target_timeout++;
 747			target_timeout += val;
 748		}
 749	}
 750
 751	/*
 752	 * Figure out needed cycles.
 753	 * We do this in steps in order to fit inside a 32 bit int.
 754	 * The first step is the minimum timeout, which will have a
 755	 * minimum resolution of 6 bits:
 756	 * (1) 2^13*1000 > 2^22,
 757	 * (2) host->timeout_clk < 2^16
 758	 *     =>
 759	 *     (1) / (2) > 2^6
 760	 */
 761	count = 0;
 762	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
 763	while (current_timeout < target_timeout) {
 764		count++;
 765		current_timeout <<= 1;
 766		if (count >= 0xF)
 767			break;
 768	}
 769
 770	if (count >= 0xF) {
 771		DBG("Too large timeout 0x%x requested for CMD%d!\n",
 772		    count, cmd->opcode);
 773		count = 0xE;
 774	}
 775
 776	return count;
 777}
 778
 779static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 780{
 781	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
 782	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
 783
 784	if (host->flags & SDHCI_REQ_USE_DMA)
 785		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
 786	else
 787		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
 788
 789	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
 790	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
 791}
 792
 793static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 794{
 795	u8 count;
 
 
 
 796
 797	if (host->ops->set_timeout) {
 798		host->ops->set_timeout(host, cmd);
 799	} else {
 800		count = sdhci_calc_timeout(host, cmd);
 801		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
 802	}
 803}
 804
 805static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 806{
 807	u8 ctrl;
 808	struct mmc_data *data = cmd->data;
 809
 810	if (sdhci_data_line_cmd(cmd))
 811		sdhci_set_timeout(host, cmd);
 812
 813	if (!data)
 814		return;
 815
 816	WARN_ON(host->data);
 817
 818	/* Sanity checks */
 819	BUG_ON(data->blksz * data->blocks > 524288);
 820	BUG_ON(data->blksz > host->mmc->max_blk_size);
 821	BUG_ON(data->blocks > 65535);
 822
 823	host->data = data;
 824	host->data_early = 0;
 825	host->data->bytes_xfered = 0;
 826
 827	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 828		struct scatterlist *sg;
 829		unsigned int length_mask, offset_mask;
 830		int i;
 831
 832		host->flags |= SDHCI_REQ_USE_DMA;
 833
 834		/*
 835		 * FIXME: This doesn't account for merging when mapping the
 836		 * scatterlist.
 837		 *
 838		 * The assumption here being that alignment and lengths are
 839		 * the same after DMA mapping to device address space.
 840		 */
 841		length_mask = 0;
 842		offset_mask = 0;
 843		if (host->flags & SDHCI_USE_ADMA) {
 844			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
 845				length_mask = 3;
 846				/*
 847				 * As we use up to 3 byte chunks to work
 848				 * around alignment problems, we need to
 849				 * check the offset as well.
 850				 */
 851				offset_mask = 3;
 852			}
 853		} else {
 854			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
 855				length_mask = 3;
 856			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
 857				offset_mask = 3;
 858		}
 859
 860		if (unlikely(length_mask | offset_mask)) {
 861			for_each_sg(data->sg, sg, data->sg_len, i) {
 862				if (sg->length & length_mask) {
 863					DBG("Reverting to PIO because of transfer size (%d)\n",
 864					    sg->length);
 865					host->flags &= ~SDHCI_REQ_USE_DMA;
 866					break;
 867				}
 868				if (sg->offset & offset_mask) {
 869					DBG("Reverting to PIO because of bad alignment\n");
 870					host->flags &= ~SDHCI_REQ_USE_DMA;
 871					break;
 872				}
 873			}
 874		}
 875	}
 876
 
 
 
 
 877	if (host->flags & SDHCI_REQ_USE_DMA) {
 878		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
 
 879
 880		if (sg_cnt <= 0) {
 
 881			/*
 882			 * This only happens when someone fed
 883			 * us an invalid request.
 
 884			 */
 885			WARN_ON(1);
 886			host->flags &= ~SDHCI_REQ_USE_DMA;
 887		} else if (host->flags & SDHCI_USE_ADMA) {
 888			sdhci_adma_table_pre(host, data, sg_cnt);
 889
 890			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
 891			if (host->flags & SDHCI_USE_64_BIT_DMA)
 892				sdhci_writel(host,
 893					     (u64)host->adma_addr >> 32,
 894					     SDHCI_ADMA_ADDRESS_HI);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895		} else {
 896			WARN_ON(sg_cnt != 1);
 897			sdhci_writel(host, sdhci_sdma_address(host),
 898				     SDHCI_DMA_ADDRESS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899		}
 900	}
 901
 902	/*
 903	 * Always adjust the DMA selection as some controllers
 904	 * (e.g. JMicron) can't do PIO properly when the selection
 905	 * is ADMA.
 906	 */
 907	if (host->version >= SDHCI_SPEC_200) {
 908		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 909		ctrl &= ~SDHCI_CTRL_DMA_MASK;
 910		if ((host->flags & SDHCI_REQ_USE_DMA) &&
 911			(host->flags & SDHCI_USE_ADMA)) {
 912			if (host->flags & SDHCI_USE_64_BIT_DMA)
 913				ctrl |= SDHCI_CTRL_ADMA64;
 914			else
 915				ctrl |= SDHCI_CTRL_ADMA32;
 916		} else {
 917			ctrl |= SDHCI_CTRL_SDMA;
 918		}
 919		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 920	}
 921
 922	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
 923		int flags;
 924
 925		flags = SG_MITER_ATOMIC;
 926		if (host->data->flags & MMC_DATA_READ)
 927			flags |= SG_MITER_TO_SG;
 928		else
 929			flags |= SG_MITER_FROM_SG;
 930		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 931		host->blocks = data->blocks;
 932	}
 933
 934	sdhci_set_transfer_irqs(host);
 935
 936	/* Set the DMA boundary value and block size */
 937	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
 938		     SDHCI_BLOCK_SIZE);
 939	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
 940}
 941
 942static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
 943				    struct mmc_request *mrq)
 944{
 945	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
 946	       !mrq->cap_cmd_during_tfr;
 947}
 948
 949static void sdhci_set_transfer_mode(struct sdhci_host *host,
 950	struct mmc_command *cmd)
 951{
 952	u16 mode = 0;
 953	struct mmc_data *data = cmd->data;
 954
 955	if (data == NULL) {
 956		if (host->quirks2 &
 957			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
 958			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
 959		} else {
 960		/* clear Auto CMD settings for no data CMDs */
 961			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
 962			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
 963				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
 964		}
 965		return;
 966	}
 967
 968	WARN_ON(!host->data);
 969
 970	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
 971		mode = SDHCI_TRNS_BLK_CNT_EN;
 972
 973	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
 974		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
 975		/*
 976		 * If we are sending CMD23, CMD12 never gets sent
 977		 * on successful completion (so no Auto-CMD12).
 978		 */
 979		if (sdhci_auto_cmd12(host, cmd->mrq) &&
 980		    (cmd->opcode != SD_IO_RW_EXTENDED))
 981			mode |= SDHCI_TRNS_AUTO_CMD12;
 982		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
 983			mode |= SDHCI_TRNS_AUTO_CMD23;
 984			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
 985		}
 986	}
 987
 988	if (data->flags & MMC_DATA_READ)
 989		mode |= SDHCI_TRNS_READ;
 990	if (host->flags & SDHCI_REQ_USE_DMA)
 991		mode |= SDHCI_TRNS_DMA;
 992
 993	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
 994}
 995
 996static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
 997{
 998	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
 999		((mrq->cmd && mrq->cmd->error) ||
1000		 (mrq->sbc && mrq->sbc->error) ||
1001		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1002				(mrq->data->stop && mrq->data->stop->error))) ||
1003		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1004}
1005
1006static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1007{
1008	int i;
1009
1010	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1011		if (host->mrqs_done[i] == mrq) {
1012			WARN_ON(1);
1013			return;
1014		}
1015	}
1016
1017	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1018		if (!host->mrqs_done[i]) {
1019			host->mrqs_done[i] = mrq;
1020			break;
 
 
 
1021		}
1022	}
1023
1024	WARN_ON(i >= SDHCI_MAX_MRQS);
1025
1026	tasklet_schedule(&host->finish_tasklet);
1027}
1028
1029static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1030{
1031	if (host->cmd && host->cmd->mrq == mrq)
1032		host->cmd = NULL;
1033
1034	if (host->data_cmd && host->data_cmd->mrq == mrq)
1035		host->data_cmd = NULL;
1036
1037	if (host->data && host->data->mrq == mrq)
1038		host->data = NULL;
1039
1040	if (sdhci_needs_reset(host, mrq))
1041		host->pending_reset = true;
1042
1043	__sdhci_finish_mrq(host, mrq);
1044}
1045
1046static void sdhci_finish_data(struct sdhci_host *host)
1047{
1048	struct mmc_command *data_cmd = host->data_cmd;
1049	struct mmc_data *data = host->data;
1050
1051	host->data = NULL;
1052	host->data_cmd = NULL;
1053
1054	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1055	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1056		sdhci_adma_table_post(host, data);
1057
1058	/*
1059	 * The specification states that the block count register must
1060	 * be updated, but it does not specify at what point in the
1061	 * data flow. That makes the register entirely useless to read
1062	 * back so we have to assume that nothing made it to the card
1063	 * in the event of an error.
1064	 */
1065	if (data->error)
1066		data->bytes_xfered = 0;
1067	else
1068		data->bytes_xfered = data->blksz * data->blocks;
1069
1070	/*
1071	 * Need to send CMD12 if -
1072	 * a) open-ended multiblock transfer (no CMD23)
1073	 * b) error in multiblock transfer
1074	 */
1075	if (data->stop &&
1076	    (data->error ||
1077	     !data->mrq->sbc)) {
1078
1079		/*
1080		 * The controller needs a reset of internal state machines
1081		 * upon error conditions.
1082		 */
1083		if (data->error) {
1084			if (!host->cmd || host->cmd == data_cmd)
1085				sdhci_do_reset(host, SDHCI_RESET_CMD);
1086			sdhci_do_reset(host, SDHCI_RESET_DATA);
1087		}
1088
1089		/*
1090		 * 'cap_cmd_during_tfr' request must not use the command line
1091		 * after mmc_command_done() has been called. It is upper layer's
1092		 * responsibility to send the stop command if required.
1093		 */
1094		if (data->mrq->cap_cmd_during_tfr) {
1095			sdhci_finish_mrq(host, data->mrq);
1096		} else {
1097			/* Avoid triggering warning in sdhci_send_command() */
1098			host->cmd = NULL;
1099			sdhci_send_command(host, data->stop);
1100		}
1101	} else {
1102		sdhci_finish_mrq(host, data->mrq);
1103	}
1104}
1105
1106static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1107			    unsigned long timeout)
1108{
1109	if (sdhci_data_line_cmd(mrq->cmd))
1110		mod_timer(&host->data_timer, timeout);
1111	else
1112		mod_timer(&host->timer, timeout);
1113}
1114
1115static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1116{
1117	if (sdhci_data_line_cmd(mrq->cmd))
1118		del_timer(&host->data_timer);
1119	else
1120		del_timer(&host->timer);
1121}
1122
1123void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1124{
1125	int flags;
1126	u32 mask;
1127	unsigned long timeout;
1128
1129	WARN_ON(host->cmd);
1130
1131	/* Initially, a command has no error */
1132	cmd->error = 0;
1133
1134	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1135	    cmd->opcode == MMC_STOP_TRANSMISSION)
1136		cmd->flags |= MMC_RSP_BUSY;
1137
1138	/* Wait max 10 ms */
1139	timeout = 10;
1140
1141	mask = SDHCI_CMD_INHIBIT;
1142	if (sdhci_data_line_cmd(cmd))
1143		mask |= SDHCI_DATA_INHIBIT;
1144
1145	/* We shouldn't wait for data inihibit for stop commands, even
1146	   though they might use busy signaling */
1147	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1148		mask &= ~SDHCI_DATA_INHIBIT;
1149
1150	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1151		if (timeout == 0) {
1152			pr_err("%s: Controller never released inhibit bit(s).\n",
1153			       mmc_hostname(host->mmc));
1154			sdhci_dumpregs(host);
1155			cmd->error = -EIO;
1156			sdhci_finish_mrq(host, cmd->mrq);
1157			return;
1158		}
1159		timeout--;
1160		mdelay(1);
1161	}
1162
1163	timeout = jiffies;
1164	if (!cmd->data && cmd->busy_timeout > 9000)
1165		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1166	else
1167		timeout += 10 * HZ;
1168	sdhci_mod_timer(host, cmd->mrq, timeout);
1169
1170	host->cmd = cmd;
1171	if (sdhci_data_line_cmd(cmd)) {
1172		WARN_ON(host->data_cmd);
1173		host->data_cmd = cmd;
1174	}
1175
1176	sdhci_prepare_data(host, cmd);
1177
1178	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1179
1180	sdhci_set_transfer_mode(host, cmd);
1181
1182	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1183		pr_err("%s: Unsupported response type!\n",
1184			mmc_hostname(host->mmc));
1185		cmd->error = -EINVAL;
1186		sdhci_finish_mrq(host, cmd->mrq);
1187		return;
1188	}
1189
1190	if (!(cmd->flags & MMC_RSP_PRESENT))
1191		flags = SDHCI_CMD_RESP_NONE;
1192	else if (cmd->flags & MMC_RSP_136)
1193		flags = SDHCI_CMD_RESP_LONG;
1194	else if (cmd->flags & MMC_RSP_BUSY)
1195		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1196	else
1197		flags = SDHCI_CMD_RESP_SHORT;
1198
1199	if (cmd->flags & MMC_RSP_CRC)
1200		flags |= SDHCI_CMD_CRC;
1201	if (cmd->flags & MMC_RSP_OPCODE)
1202		flags |= SDHCI_CMD_INDEX;
1203
1204	/* CMD19 is special in that the Data Present Select should be set */
1205	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1206	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1207		flags |= SDHCI_CMD_DATA;
1208
1209	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1210}
1211EXPORT_SYMBOL_GPL(sdhci_send_command);
1212
1213static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1214{
1215	int i, reg;
1216
1217	for (i = 0; i < 4; i++) {
1218		reg = SDHCI_RESPONSE + (3 - i) * 4;
1219		cmd->resp[i] = sdhci_readl(host, reg);
1220	}
1221
1222	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1223		return;
1224
1225	/* CRC is stripped so we need to do some shifting */
1226	for (i = 0; i < 4; i++) {
1227		cmd->resp[i] <<= 8;
1228		if (i != 3)
1229			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1230	}
1231}
1232
1233static void sdhci_finish_command(struct sdhci_host *host)
1234{
1235	struct mmc_command *cmd = host->cmd;
1236
1237	host->cmd = NULL;
1238
1239	if (cmd->flags & MMC_RSP_PRESENT) {
1240		if (cmd->flags & MMC_RSP_136) {
1241			sdhci_read_rsp_136(host, cmd);
 
 
 
 
 
 
 
 
1242		} else {
1243			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1244		}
1245	}
1246
1247	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1248		mmc_command_done(host->mmc, cmd->mrq);
1249
1250	/*
1251	 * The host can send and interrupt when the busy state has
1252	 * ended, allowing us to wait without wasting CPU cycles.
1253	 * The busy signal uses DAT0 so this is similar to waiting
1254	 * for data to complete.
1255	 *
1256	 * Note: The 1.0 specification is a bit ambiguous about this
1257	 *       feature so there might be some problems with older
1258	 *       controllers.
1259	 */
1260	if (cmd->flags & MMC_RSP_BUSY) {
1261		if (cmd->data) {
1262			DBG("Cannot wait for busy signal when also doing a data transfer");
1263		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1264			   cmd == host->data_cmd) {
1265			/* Command complete before busy is ended */
1266			return;
1267		}
1268	}
1269
1270	/* Finished CMD23, now send actual command. */
1271	if (cmd == cmd->mrq->sbc) {
1272		sdhci_send_command(host, cmd->mrq->cmd);
 
1273	} else {
1274
1275		/* Processed actual command. */
1276		if (host->data && host->data_early)
1277			sdhci_finish_data(host);
1278
1279		if (!cmd->data)
1280			sdhci_finish_mrq(host, cmd->mrq);
 
 
1281	}
1282}
1283
1284static u16 sdhci_get_preset_value(struct sdhci_host *host)
1285{
1286	u16 preset = 0;
 
 
1287
1288	switch (host->timing) {
1289	case MMC_TIMING_UHS_SDR12:
1290		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1291		break;
1292	case MMC_TIMING_UHS_SDR25:
1293		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1294		break;
1295	case MMC_TIMING_UHS_SDR50:
1296		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1297		break;
1298	case MMC_TIMING_UHS_SDR104:
1299	case MMC_TIMING_MMC_HS200:
1300		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1301		break;
1302	case MMC_TIMING_UHS_DDR50:
1303	case MMC_TIMING_MMC_DDR52:
1304		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1305		break;
1306	case MMC_TIMING_MMC_HS400:
1307		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1308		break;
1309	default:
1310		pr_warn("%s: Invalid UHS-I mode selected\n",
1311			mmc_hostname(host->mmc));
1312		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1313		break;
1314	}
1315	return preset;
1316}
1317
1318u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1319		   unsigned int *actual_clock)
1320{
1321	int div = 0; /* Initialized for compiler warning */
1322	int real_div = div, clk_mul = 1;
1323	u16 clk = 0;
1324	bool switch_base_clk = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1325
1326	if (host->version >= SDHCI_SPEC_300) {
1327		if (host->preset_enabled) {
 
1328			u16 pre_val;
1329
1330			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1331			pre_val = sdhci_get_preset_value(host);
1332			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1333				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1334			if (host->clk_mul &&
1335				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1336				clk = SDHCI_PROG_CLOCK_MODE;
1337				real_div = div + 1;
1338				clk_mul = host->clk_mul;
1339			} else {
1340				real_div = max_t(int, 1, div << 1);
1341			}
1342			goto clock_set;
1343		}
1344
1345		/*
1346		 * Check if the Host Controller supports Programmable Clock
1347		 * Mode.
1348		 */
1349		if (host->clk_mul) {
1350			for (div = 1; div <= 1024; div++) {
1351				if ((host->max_clk * host->clk_mul / div)
1352					<= clock)
1353					break;
1354			}
1355			if ((host->max_clk * host->clk_mul / div) <= clock) {
1356				/*
1357				 * Set Programmable Clock Mode in the Clock
1358				 * Control register.
1359				 */
1360				clk = SDHCI_PROG_CLOCK_MODE;
1361				real_div = div;
1362				clk_mul = host->clk_mul;
1363				div--;
1364			} else {
1365				/*
1366				 * Divisor can be too small to reach clock
1367				 * speed requirement. Then use the base clock.
1368				 */
1369				switch_base_clk = true;
1370			}
1371		}
1372
1373		if (!host->clk_mul || switch_base_clk) {
1374			/* Version 3.00 divisors must be a multiple of 2. */
1375			if (host->max_clk <= clock)
1376				div = 1;
1377			else {
1378				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1379				     div += 2) {
1380					if ((host->max_clk / div) <= clock)
1381						break;
1382				}
1383			}
1384			real_div = div;
1385			div >>= 1;
1386			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1387				&& !div && host->max_clk <= 25000000)
1388				div = 1;
1389		}
1390	} else {
1391		/* Version 2.00 divisors must be a power of 2. */
1392		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1393			if ((host->max_clk / div) <= clock)
1394				break;
1395		}
1396		real_div = div;
1397		div >>= 1;
1398	}
1399
1400clock_set:
1401	if (real_div)
1402		*actual_clock = (host->max_clk * clk_mul) / real_div;
 
1403	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1404	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1405		<< SDHCI_DIVIDER_HI_SHIFT;
1406
1407	return clk;
1408}
1409EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1410
1411void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1412{
1413	ktime_t timeout;
1414
1415	clk |= SDHCI_CLOCK_INT_EN;
1416	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1417
1418	/* Wait max 20 ms */
1419	timeout = ktime_add_ms(ktime_get(), 20);
1420	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1421		& SDHCI_CLOCK_INT_STABLE)) {
1422		if (ktime_after(ktime_get(), timeout)) {
1423			pr_err("%s: Internal clock never stabilised.\n",
1424			       mmc_hostname(host->mmc));
1425			sdhci_dumpregs(host);
1426			return;
1427		}
1428		udelay(10);
 
1429	}
1430
1431	clk |= SDHCI_CLOCK_CARD_EN;
1432	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1433}
1434EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1435
1436void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1437{
1438	u16 clk;
1439
1440	host->mmc->actual_clock = 0;
1441
1442	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1443
1444	if (clock == 0)
1445		return;
1446
1447	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1448	sdhci_enable_clk(host, clk);
1449}
1450EXPORT_SYMBOL_GPL(sdhci_set_clock);
1451
1452static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1453				unsigned short vdd)
1454{
1455	struct mmc_host *mmc = host->mmc;
1456
1457	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1458
1459	if (mode != MMC_POWER_OFF)
1460		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1461	else
1462		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1463}
1464
1465void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1466			   unsigned short vdd)
1467{
1468	u8 pwr = 0;
1469
1470	if (mode != MMC_POWER_OFF) {
1471		switch (1 << vdd) {
1472		case MMC_VDD_165_195:
1473		/*
1474		 * Without a regulator, SDHCI does not support 2.0v
1475		 * so we only get here if the driver deliberately
1476		 * added the 2.0v range to ocr_avail. Map it to 1.8v
1477		 * for the purpose of turning on the power.
1478		 */
1479		case MMC_VDD_20_21:
1480			pwr = SDHCI_POWER_180;
1481			break;
1482		case MMC_VDD_29_30:
1483		case MMC_VDD_30_31:
1484			pwr = SDHCI_POWER_300;
1485			break;
1486		case MMC_VDD_32_33:
1487		case MMC_VDD_33_34:
1488			pwr = SDHCI_POWER_330;
1489			break;
1490		default:
1491			WARN(1, "%s: Invalid vdd %#x\n",
1492			     mmc_hostname(host->mmc), vdd);
1493			break;
1494		}
1495	}
1496
1497	if (host->pwr == pwr)
1498		return;
1499
1500	host->pwr = pwr;
1501
1502	if (pwr == 0) {
1503		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1504		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1505			sdhci_runtime_pm_bus_off(host);
1506	} else {
1507		/*
1508		 * Spec says that we should clear the power reg before setting
1509		 * a new value. Some controllers don't seem to like this though.
1510		 */
1511		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1512			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1513
1514		/*
1515		 * At least the Marvell CaFe chip gets confused if we set the
1516		 * voltage and set turn on power at the same time, so set the
1517		 * voltage first.
1518		 */
1519		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1520			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1521
1522		pwr |= SDHCI_POWER_ON;
 
 
 
 
 
1523
 
 
 
 
 
1524		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1525
1526		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1527			sdhci_runtime_pm_bus_on(host);
1528
1529		/*
1530		 * Some controllers need an extra 10ms delay of 10ms before
1531		 * they can apply clock after applying power
1532		 */
1533		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1534			mdelay(10);
1535	}
1536}
1537EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1538
1539void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1540		     unsigned short vdd)
1541{
1542	if (IS_ERR(host->mmc->supply.vmmc))
1543		sdhci_set_power_noreg(host, mode, vdd);
1544	else
1545		sdhci_set_power_reg(host, mode, vdd);
 
 
 
 
1546}
1547EXPORT_SYMBOL_GPL(sdhci_set_power);
1548
1549/*****************************************************************************\
1550 *                                                                           *
1551 * MMC callbacks                                                             *
1552 *                                                                           *
1553\*****************************************************************************/
1554
1555static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1556{
1557	struct sdhci_host *host;
1558	int present;
1559	unsigned long flags;
 
1560
1561	host = mmc_priv(mmc);
1562
1563	/* Firstly check card presence */
1564	present = mmc->ops->get_cd(mmc);
1565
1566	spin_lock_irqsave(&host->lock, flags);
1567
1568	sdhci_led_activate(host);
 
 
 
 
1569
1570	/*
1571	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1572	 * requests if Auto-CMD12 is enabled.
1573	 */
1574	if (sdhci_auto_cmd12(host, mrq)) {
1575		if (mrq->stop) {
1576			mrq->data->stop = NULL;
1577			mrq->stop = NULL;
1578		}
1579	}
1580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1581	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1582		mrq->cmd->error = -ENOMEDIUM;
1583		sdhci_finish_mrq(host, mrq);
1584	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1586			sdhci_send_command(host, mrq->sbc);
1587		else
1588			sdhci_send_command(host, mrq->cmd);
1589	}
1590
1591	mmiowb();
1592	spin_unlock_irqrestore(&host->lock, flags);
1593}
1594
1595void sdhci_set_bus_width(struct sdhci_host *host, int width)
1596{
 
 
1597	u8 ctrl;
1598
1599	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1600	if (width == MMC_BUS_WIDTH_8) {
1601		ctrl &= ~SDHCI_CTRL_4BITBUS;
1602		ctrl |= SDHCI_CTRL_8BITBUS;
1603	} else {
1604		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1605			ctrl &= ~SDHCI_CTRL_8BITBUS;
1606		if (width == MMC_BUS_WIDTH_4)
1607			ctrl |= SDHCI_CTRL_4BITBUS;
1608		else
1609			ctrl &= ~SDHCI_CTRL_4BITBUS;
1610	}
1611	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1612}
1613EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1614
1615void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1616{
1617	u16 ctrl_2;
1618
1619	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1620	/* Select Bus Speed Mode for host */
1621	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1622	if ((timing == MMC_TIMING_MMC_HS200) ||
1623	    (timing == MMC_TIMING_UHS_SDR104))
1624		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1625	else if (timing == MMC_TIMING_UHS_SDR12)
1626		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1627	else if (timing == MMC_TIMING_UHS_SDR25)
1628		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1629	else if (timing == MMC_TIMING_UHS_SDR50)
1630		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1631	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1632		 (timing == MMC_TIMING_MMC_DDR52))
1633		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1634	else if (timing == MMC_TIMING_MMC_HS400)
1635		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1636	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1637}
1638EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1639
1640void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1641{
1642	struct sdhci_host *host = mmc_priv(mmc);
1643	u8 ctrl;
1644
1645	if (ios->power_mode == MMC_POWER_UNDEFINED)
1646		return;
1647
1648	if (host->flags & SDHCI_DEVICE_DEAD) {
1649		if (!IS_ERR(mmc->supply.vmmc) &&
1650		    ios->power_mode == MMC_POWER_OFF)
1651			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1652		return;
1653	}
1654
1655	/*
1656	 * Reset the chip on each power off.
1657	 * Should clear out any weird states.
1658	 */
1659	if (ios->power_mode == MMC_POWER_OFF) {
1660		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1661		sdhci_reinit(host);
1662	}
1663
1664	if (host->version >= SDHCI_SPEC_300 &&
1665		(ios->power_mode == MMC_POWER_UP) &&
1666		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1667		sdhci_enable_preset_value(host, false);
1668
1669	if (!ios->clock || ios->clock != host->clock) {
1670		host->ops->set_clock(host, ios->clock);
1671		host->clock = ios->clock;
1672
1673		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1674		    host->clock) {
1675			host->timeout_clk = host->mmc->actual_clock ?
1676						host->mmc->actual_clock / 1000 :
1677						host->clock / 1000;
1678			host->mmc->max_busy_timeout =
1679				host->ops->get_max_timeout_count ?
1680				host->ops->get_max_timeout_count(host) :
1681				1 << 27;
1682			host->mmc->max_busy_timeout /= host->timeout_clk;
1683		}
1684	}
1685
1686	if (host->ops->set_power)
1687		host->ops->set_power(host, ios->power_mode, ios->vdd);
1688	else
1689		sdhci_set_power(host, ios->power_mode, ios->vdd);
 
 
 
 
 
 
1690
1691	if (host->ops->platform_send_init_74_clocks)
1692		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1693
1694	host->ops->set_bus_width(host, ios->bus_width);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1695
1696	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1697
1698	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1699		if (ios->timing == MMC_TIMING_SD_HS ||
1700		     ios->timing == MMC_TIMING_MMC_HS ||
1701		     ios->timing == MMC_TIMING_MMC_HS400 ||
1702		     ios->timing == MMC_TIMING_MMC_HS200 ||
1703		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1704		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1705		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1706		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1707		     ios->timing == MMC_TIMING_UHS_SDR25)
1708			ctrl |= SDHCI_CTRL_HISPD;
1709		else
1710			ctrl &= ~SDHCI_CTRL_HISPD;
1711	}
1712
1713	if (host->version >= SDHCI_SPEC_300) {
1714		u16 clk, ctrl_2;
1715
1716		if (!host->preset_enabled) {
 
 
 
 
 
 
 
 
 
1717			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1718			/*
1719			 * We only need to set Driver Strength if the
1720			 * preset value enable is not set.
1721			 */
1722			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1723			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1724			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1725				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1726			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1727				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1728			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1729				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1730			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1731				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1732			else {
1733				pr_warn("%s: invalid driver type, default to driver type B\n",
1734					mmc_hostname(mmc));
1735				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1736			}
1737
1738			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1739		} else {
1740			/*
1741			 * According to SDHC Spec v3.00, if the Preset Value
1742			 * Enable in the Host Control 2 register is set, we
1743			 * need to reset SD Clock Enable before changing High
1744			 * Speed Enable to avoid generating clock gliches.
1745			 */
1746
1747			/* Reset SD Clock Enable */
1748			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1749			clk &= ~SDHCI_CLOCK_CARD_EN;
1750			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1751
1752			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1753
1754			/* Re-enable SD Clock */
1755			host->ops->set_clock(host, host->clock);
1756		}
1757
 
1758		/* Reset SD Clock Enable */
1759		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1760		clk &= ~SDHCI_CLOCK_CARD_EN;
1761		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1762
1763		host->ops->set_uhs_signaling(host, ios->timing);
1764		host->timing = ios->timing;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765
1766		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1767				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1768				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1769				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1770				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1771				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1772				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1773			u16 preset;
1774
1775			sdhci_enable_preset_value(host, true);
1776			preset = sdhci_get_preset_value(host);
1777			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1778				>> SDHCI_PRESET_DRV_SHIFT;
1779		}
1780
1781		/* Re-enable SD Clock */
1782		host->ops->set_clock(host, host->clock);
1783	} else
1784		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1785
1786	/*
1787	 * Some (ENE) controllers go apeshit on some ios operation,
1788	 * signalling timeout and CRC errors even on CMD0. Resetting
1789	 * it on each ios seems to solve the problem.
1790	 */
1791	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1792		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1793
1794	mmiowb();
 
1795}
1796EXPORT_SYMBOL_GPL(sdhci_set_ios);
1797
1798static int sdhci_get_cd(struct mmc_host *mmc)
1799{
1800	struct sdhci_host *host = mmc_priv(mmc);
1801	int gpio_cd = mmc_gpio_get_cd(mmc);
 
 
 
 
 
 
 
 
1802
1803	if (host->flags & SDHCI_DEVICE_DEAD)
1804		return 0;
1805
1806	/* If nonremovable, assume that the card is always present. */
1807	if (!mmc_card_is_removable(host->mmc))
 
1808		return 1;
1809
1810	/*
1811	 * Try slot gpio detect, if defined it take precedence
1812	 * over build in controller functionality
1813	 */
1814	if (gpio_cd >= 0)
1815		return !!gpio_cd;
1816
1817	/* If polling, assume that the card is always present. */
1818	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1819		return 1;
1820
1821	/* Host native card detect */
1822	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1823}
1824
 
 
 
 
 
 
 
 
 
 
 
1825static int sdhci_check_ro(struct sdhci_host *host)
1826{
1827	unsigned long flags;
1828	int is_readonly;
1829
1830	spin_lock_irqsave(&host->lock, flags);
1831
1832	if (host->flags & SDHCI_DEVICE_DEAD)
1833		is_readonly = 0;
1834	else if (host->ops->get_ro)
1835		is_readonly = host->ops->get_ro(host);
1836	else
1837		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1838				& SDHCI_WRITE_PROTECT);
1839
1840	spin_unlock_irqrestore(&host->lock, flags);
1841
1842	/* This quirk needs to be replaced by a callback-function later */
1843	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1844		!is_readonly : is_readonly;
1845}
1846
1847#define SAMPLE_COUNT	5
1848
1849static int sdhci_get_ro(struct mmc_host *mmc)
1850{
1851	struct sdhci_host *host = mmc_priv(mmc);
1852	int i, ro_count;
1853
1854	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1855		return sdhci_check_ro(host);
1856
1857	ro_count = 0;
1858	for (i = 0; i < SAMPLE_COUNT; i++) {
1859		if (sdhci_check_ro(host)) {
1860			if (++ro_count > SAMPLE_COUNT / 2)
1861				return 1;
1862		}
1863		msleep(30);
1864	}
1865	return 0;
1866}
1867
1868static void sdhci_hw_reset(struct mmc_host *mmc)
1869{
1870	struct sdhci_host *host = mmc_priv(mmc);
1871
1872	if (host->ops && host->ops->hw_reset)
1873		host->ops->hw_reset(host);
1874}
1875
1876static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1877{
1878	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1879		if (enable)
1880			host->ier |= SDHCI_INT_CARD_INT;
1881		else
1882			host->ier &= ~SDHCI_INT_CARD_INT;
1883
1884		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1885		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1886		mmiowb();
1887	}
1888}
1889
1890void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1891{
1892	struct sdhci_host *host = mmc_priv(mmc);
1893	unsigned long flags;
1894
1895	if (enable)
1896		pm_runtime_get_noresume(host->mmc->parent);
1897
1898	spin_lock_irqsave(&host->lock, flags);
1899	if (enable)
1900		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1901	else
1902		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1903
1904	sdhci_enable_sdio_irq_nolock(host, enable);
1905	spin_unlock_irqrestore(&host->lock, flags);
 
1906
1907	if (!enable)
1908		pm_runtime_put_noidle(host->mmc->parent);
 
 
 
 
1909}
1910EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1911
1912int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1913				      struct mmc_ios *ios)
1914{
1915	struct sdhci_host *host = mmc_priv(mmc);
 
 
 
 
 
 
 
 
 
 
1916	u16 ctrl;
1917	int ret;
1918
1919	/*
1920	 * Signal Voltage Switching is only applicable for Host Controllers
1921	 * v3.00 and above.
1922	 */
1923	if (host->version < SDHCI_SPEC_300)
1924		return 0;
1925
1926	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1927
1928	switch (ios->signal_voltage) {
1929	case MMC_SIGNAL_VOLTAGE_330:
1930		if (!(host->flags & SDHCI_SIGNALING_330))
1931			return -EINVAL;
1932		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1933		ctrl &= ~SDHCI_CTRL_VDD_180;
1934		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1935
1936		if (!IS_ERR(mmc->supply.vqmmc)) {
1937			ret = mmc_regulator_set_vqmmc(mmc, ios);
1938			if (ret) {
1939				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1940					mmc_hostname(mmc));
1941				return -EIO;
1942			}
1943		}
1944		/* Wait for 5ms */
1945		usleep_range(5000, 5500);
1946
1947		/* 3.3V regulator output should be stable within 5 ms */
1948		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1949		if (!(ctrl & SDHCI_CTRL_VDD_180))
1950			return 0;
1951
1952		pr_warn("%s: 3.3V regulator output did not became stable\n",
1953			mmc_hostname(mmc));
1954
1955		return -EAGAIN;
1956	case MMC_SIGNAL_VOLTAGE_180:
1957		if (!(host->flags & SDHCI_SIGNALING_180))
1958			return -EINVAL;
1959		if (!IS_ERR(mmc->supply.vqmmc)) {
1960			ret = mmc_regulator_set_vqmmc(mmc, ios);
1961			if (ret) {
1962				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1963					mmc_hostname(mmc));
1964				return -EIO;
1965			}
1966		}
1967
1968		/*
1969		 * Enable 1.8V Signal Enable in the Host Control2
1970		 * register
1971		 */
1972		ctrl |= SDHCI_CTRL_VDD_180;
1973		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1974
1975		/* Some controller need to do more when switching */
1976		if (host->ops->voltage_switch)
1977			host->ops->voltage_switch(host);
1978
1979		/* 1.8V regulator output should be stable within 5 ms */
1980		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1981		if (ctrl & SDHCI_CTRL_VDD_180)
1982			return 0;
1983
1984		pr_warn("%s: 1.8V regulator output did not became stable\n",
1985			mmc_hostname(mmc));
1986
1987		return -EAGAIN;
1988	case MMC_SIGNAL_VOLTAGE_120:
1989		if (!(host->flags & SDHCI_SIGNALING_120))
1990			return -EINVAL;
1991		if (!IS_ERR(mmc->supply.vqmmc)) {
1992			ret = mmc_regulator_set_vqmmc(mmc, ios);
1993			if (ret) {
1994				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1995					mmc_hostname(mmc));
1996				return -EIO;
1997			}
1998		}
1999		return 0;
2000	default:
2001		/* No signal voltage switch required */
2002		return 0;
2003	}
2004}
2005EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
 
 
 
 
 
 
 
 
 
 
 
 
 
2006
2007static int sdhci_card_busy(struct mmc_host *mmc)
2008{
2009	struct sdhci_host *host = mmc_priv(mmc);
2010	u32 present_state;
2011
2012	/* Check whether DAT[0] is 0 */
 
2013	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
 
2014
2015	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2016}
2017
2018static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2019{
2020	struct sdhci_host *host = mmc_priv(mmc);
 
 
 
 
 
 
2021	unsigned long flags;
2022
 
 
 
2023	spin_lock_irqsave(&host->lock, flags);
2024	host->flags |= SDHCI_HS400_TUNING;
2025	spin_unlock_irqrestore(&host->lock, flags);
2026
2027	return 0;
2028}
2029
2030static void sdhci_start_tuning(struct sdhci_host *host)
2031{
2032	u16 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2033
2034	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2035	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2036	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2037		ctrl |= SDHCI_CTRL_TUNED_CLK;
2038	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2039
2040	/*
2041	 * As per the Host Controller spec v3.00, tuning command
2042	 * generates Buffer Read Ready interrupt, so enable that.
2043	 *
2044	 * Note: The spec clearly says that when tuning sequence
2045	 * is being performed, the controller does not generate
2046	 * interrupts other than Buffer Read Ready interrupt. But
2047	 * to make sure we don't hit a controller bug, we _only_
2048	 * enable Buffer Read Ready interrupt here.
2049	 */
2050	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2051	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2052}
2053
2054static void sdhci_end_tuning(struct sdhci_host *host)
2055{
2056	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2057	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2058}
2059
2060static void sdhci_reset_tuning(struct sdhci_host *host)
2061{
2062	u16 ctrl;
2063
2064	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2065	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2066	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2067	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2068}
2069
2070static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2071{
2072	sdhci_reset_tuning(host);
2073
2074	sdhci_do_reset(host, SDHCI_RESET_CMD);
2075	sdhci_do_reset(host, SDHCI_RESET_DATA);
2076
2077	sdhci_end_tuning(host);
2078
2079	mmc_abort_tuning(host->mmc, opcode);
2080}
2081
2082/*
2083 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2084 * tuning command does not have a data payload (or rather the hardware does it
2085 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2086 * interrupt setup is different to other commands and there is no timeout
2087 * interrupt so special handling is needed.
2088 */
2089static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2090{
2091	struct mmc_host *mmc = host->mmc;
2092	struct mmc_command cmd = {};
2093	struct mmc_request mrq = {};
2094	unsigned long flags;
2095	u32 b = host->sdma_boundary;
2096
2097	spin_lock_irqsave(&host->lock, flags);
2098
2099	cmd.opcode = opcode;
2100	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2101	cmd.mrq = &mrq;
2102
2103	mrq.cmd = &cmd;
2104	/*
2105	 * In response to CMD19, the card sends 64 bytes of tuning
2106	 * block to the Host Controller. So we set the block size
2107	 * to 64 here.
2108	 */
2109	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2110	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2111		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2112	else
2113		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2114
2115	/*
2116	 * The tuning block is sent by the card to the host controller.
2117	 * So we set the TRNS_READ bit in the Transfer Mode register.
2118	 * This also takes care of setting DMA Enable and Multi Block
2119	 * Select in the same register to 0.
2120	 */
2121	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2122
2123	sdhci_send_command(host, &cmd);
2124
2125	host->cmd = NULL;
2126
2127	sdhci_del_timer(host, &mrq);
2128
2129	host->tuning_done = 0;
2130
2131	mmiowb();
2132	spin_unlock_irqrestore(&host->lock, flags);
2133
2134	/* Wait for Buffer Read Ready interrupt */
2135	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2136			   msecs_to_jiffies(50));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2137
2138}
 
 
 
 
 
 
2139
2140static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2141{
2142	int i;
2143
2144	/*
2145	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2146	 * of loops reaches 40 times.
2147	 */
2148	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2149		u16 ctrl;
2150
2151		sdhci_send_tuning(host, opcode);
 
 
 
 
 
2152
2153		if (!host->tuning_done) {
2154			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2155				mmc_hostname(host->mmc));
2156			sdhci_abort_tuning(host, opcode);
2157			return;
2158		}
 
 
 
2159
2160		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2161		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2162			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2163				return; /* Success! */
2164			break;
2165		}
2166
2167		/* Spec does not require a delay between tuning cycles */
2168		if (host->tuning_delay > 0)
2169			mdelay(host->tuning_delay);
2170	}
2171
2172	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2173		mmc_hostname(host->mmc));
2174	sdhci_reset_tuning(host);
2175}
2176
2177int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2178{
2179	struct sdhci_host *host = mmc_priv(mmc);
2180	int err = 0;
2181	unsigned int tuning_count = 0;
2182	bool hs400_tuning;
2183
2184	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2185
2186	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2187		tuning_count = host->tuning_count;
 
2188
2189	/*
2190	 * The Host Controller needs tuning in case of SDR104 and DDR50
2191	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2192	 * the Capabilities register.
2193	 * If the Host Controller supports the HS200 mode then the
2194	 * tuning function has to be executed.
2195	 */
2196	switch (host->timing) {
2197	/* HS400 tuning is done in HS200 mode */
2198	case MMC_TIMING_MMC_HS400:
2199		err = -EINVAL;
2200		goto out;
2201
2202	case MMC_TIMING_MMC_HS200:
2203		/*
2204		 * Periodic re-tuning for HS400 is not expected to be needed, so
2205		 * disable it here.
2206		 */
2207		if (hs400_tuning)
2208			tuning_count = 0;
2209		break;
2210
2211	case MMC_TIMING_UHS_SDR104:
2212	case MMC_TIMING_UHS_DDR50:
2213		break;
2214
2215	case MMC_TIMING_UHS_SDR50:
2216		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2217			break;
2218		/* FALLTHROUGH */
2219
2220	default:
2221		goto out;
2222	}
2223
2224	if (host->ops->platform_execute_tuning) {
2225		err = host->ops->platform_execute_tuning(host, opcode);
2226		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227	}
2228
2229	host->mmc->retune_period = tuning_count;
2230
2231	if (host->tuning_delay < 0)
2232		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2233
2234	sdhci_start_tuning(host);
2235
2236	__sdhci_execute_tuning(host, opcode);
 
 
2237
2238	sdhci_end_tuning(host);
2239out:
2240	host->flags &= ~SDHCI_HS400_TUNING;
2241
2242	return err;
2243}
2244EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2245
2246static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2247{
 
 
2248	/* Host Controller v3.00 defines preset value registers */
2249	if (host->version < SDHCI_SPEC_300)
2250		return;
2251
 
 
2252	/*
2253	 * We only enable or disable Preset Value if they are not already
2254	 * enabled or disabled respectively. Otherwise, we bail out.
2255	 */
2256	if (host->preset_enabled != enable) {
2257		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2258
2259		if (enable)
2260			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2261		else
2262			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2263
2264		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2265
2266		if (enable)
2267			host->flags |= SDHCI_PV_ENABLED;
2268		else
2269			host->flags &= ~SDHCI_PV_ENABLED;
2270
2271		host->preset_enabled = enable;
2272	}
2273}
2274
2275static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2276				int err)
2277{
2278	struct sdhci_host *host = mmc_priv(mmc);
2279	struct mmc_data *data = mrq->data;
2280
2281	if (data->host_cookie != COOKIE_UNMAPPED)
2282		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2283			     mmc_get_dma_dir(data));
2284
2285	data->host_cookie = COOKIE_UNMAPPED;
2286}
2287
2288static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2289{
2290	struct sdhci_host *host = mmc_priv(mmc);
2291
2292	mrq->data->host_cookie = COOKIE_UNMAPPED;
2293
2294	/*
2295	 * No pre-mapping in the pre hook if we're using the bounce buffer,
2296	 * for that we would need two bounce buffers since one buffer is
2297	 * in flight when this is getting called.
2298	 */
2299	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2300		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2301}
2302
2303static inline bool sdhci_has_requests(struct sdhci_host *host)
2304{
2305	return host->cmd || host->data_cmd;
2306}
2307
2308static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2309{
2310	if (host->data_cmd) {
2311		host->data_cmd->error = err;
2312		sdhci_finish_mrq(host, host->data_cmd->mrq);
2313	}
2314
2315	if (host->cmd) {
2316		host->cmd->error = err;
2317		sdhci_finish_mrq(host, host->cmd->mrq);
2318	}
2319}
2320
2321static void sdhci_card_event(struct mmc_host *mmc)
2322{
2323	struct sdhci_host *host = mmc_priv(mmc);
2324	unsigned long flags;
2325	int present;
2326
2327	/* First check if client has provided their own card event */
2328	if (host->ops->card_event)
2329		host->ops->card_event(host);
2330
2331	present = mmc->ops->get_cd(mmc);
2332
2333	spin_lock_irqsave(&host->lock, flags);
2334
2335	/* Check sdhci_has_requests() first in case we are runtime suspended */
2336	if (sdhci_has_requests(host) && !present) {
2337		pr_err("%s: Card removed during transfer!\n",
2338			mmc_hostname(host->mmc));
2339		pr_err("%s: Resetting controller.\n",
2340			mmc_hostname(host->mmc));
2341
2342		sdhci_do_reset(host, SDHCI_RESET_CMD);
2343		sdhci_do_reset(host, SDHCI_RESET_DATA);
2344
2345		sdhci_error_out_mrqs(host, -ENOMEDIUM);
 
2346	}
2347
2348	spin_unlock_irqrestore(&host->lock, flags);
2349}
2350
2351static const struct mmc_host_ops sdhci_ops = {
2352	.request	= sdhci_request,
2353	.post_req	= sdhci_post_req,
2354	.pre_req	= sdhci_pre_req,
2355	.set_ios	= sdhci_set_ios,
2356	.get_cd		= sdhci_get_cd,
2357	.get_ro		= sdhci_get_ro,
2358	.hw_reset	= sdhci_hw_reset,
2359	.enable_sdio_irq = sdhci_enable_sdio_irq,
2360	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2361	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2362	.execute_tuning			= sdhci_execute_tuning,
2363	.card_event			= sdhci_card_event,
2364	.card_busy	= sdhci_card_busy,
2365};
2366
2367/*****************************************************************************\
2368 *                                                                           *
2369 * Tasklets                                                                  *
2370 *                                                                           *
2371\*****************************************************************************/
2372
2373static bool sdhci_request_done(struct sdhci_host *host)
2374{
 
 
 
 
 
 
 
 
 
 
2375	unsigned long flags;
2376	struct mmc_request *mrq;
2377	int i;
2378
2379	spin_lock_irqsave(&host->lock, flags);
2380
2381	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2382		mrq = host->mrqs_done[i];
2383		if (mrq)
2384			break;
2385	}
2386
2387	if (!mrq) {
 
 
 
 
2388		spin_unlock_irqrestore(&host->lock, flags);
2389		return true;
2390	}
2391
2392	sdhci_del_timer(host, mrq);
2393
2394	/*
2395	 * Always unmap the data buffers if they were mapped by
2396	 * sdhci_prepare_data() whenever we finish with a request.
2397	 * This avoids leaking DMA mappings on error.
2398	 */
2399	if (host->flags & SDHCI_REQ_USE_DMA) {
2400		struct mmc_data *data = mrq->data;
2401
2402		if (data && data->host_cookie == COOKIE_MAPPED) {
2403			if (host->bounce_buffer) {
2404				/*
2405				 * On reads, copy the bounced data into the
2406				 * sglist
2407				 */
2408				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2409					unsigned int length = data->bytes_xfered;
2410
2411					if (length > host->bounce_buffer_size) {
2412						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2413						       mmc_hostname(host->mmc),
2414						       host->bounce_buffer_size,
2415						       data->bytes_xfered);
2416						/* Cap it down and continue */
2417						length = host->bounce_buffer_size;
2418					}
2419					dma_sync_single_for_cpu(
2420						host->mmc->parent,
2421						host->bounce_addr,
2422						host->bounce_buffer_size,
2423						DMA_FROM_DEVICE);
2424					sg_copy_from_buffer(data->sg,
2425						data->sg_len,
2426						host->bounce_buffer,
2427						length);
2428				} else {
2429					/* No copying, just switch ownership */
2430					dma_sync_single_for_cpu(
2431						host->mmc->parent,
2432						host->bounce_addr,
2433						host->bounce_buffer_size,
2434						mmc_get_dma_dir(data));
2435				}
2436			} else {
2437				/* Unmap the raw data */
2438				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2439					     data->sg_len,
2440					     mmc_get_dma_dir(data));
2441			}
2442			data->host_cookie = COOKIE_UNMAPPED;
2443		}
2444	}
2445
2446	/*
2447	 * The controller needs a reset of internal state machines
2448	 * upon error conditions.
2449	 */
2450	if (sdhci_needs_reset(host, mrq)) {
2451		/*
2452		 * Do not finish until command and data lines are available for
2453		 * reset. Note there can only be one other mrq, so it cannot
2454		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2455		 * would both be null.
2456		 */
2457		if (host->cmd || host->data_cmd) {
2458			spin_unlock_irqrestore(&host->lock, flags);
2459			return true;
2460		}
2461
2462		/* Some controllers need this kick or reset won't work here */
2463		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2464			/* This is to force an update */
2465			host->ops->set_clock(host, host->clock);
2466
2467		/* Spec says we should do both at the same time, but Ricoh
2468		   controllers do not like that. */
2469		sdhci_do_reset(host, SDHCI_RESET_CMD);
2470		sdhci_do_reset(host, SDHCI_RESET_DATA);
2471
2472		host->pending_reset = false;
2473	}
2474
2475	if (!sdhci_has_requests(host))
2476		sdhci_led_deactivate(host);
 
2477
2478	host->mrqs_done[i] = NULL;
 
 
2479
2480	mmiowb();
2481	spin_unlock_irqrestore(&host->lock, flags);
2482
2483	mmc_request_done(host->mmc, mrq);
2484
2485	return false;
2486}
2487
2488static void sdhci_tasklet_finish(unsigned long param)
2489{
2490	struct sdhci_host *host = (struct sdhci_host *)param;
2491
2492	while (!sdhci_request_done(host))
2493		;
2494}
2495
2496static void sdhci_timeout_timer(struct timer_list *t)
2497{
2498	struct sdhci_host *host;
2499	unsigned long flags;
2500
2501	host = from_timer(host, t, timer);
2502
2503	spin_lock_irqsave(&host->lock, flags);
2504
2505	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2506		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2507		       mmc_hostname(host->mmc));
2508		sdhci_dumpregs(host);
2509
2510		host->cmd->error = -ETIMEDOUT;
2511		sdhci_finish_mrq(host, host->cmd->mrq);
 
 
 
 
 
 
 
 
 
2512	}
2513
2514	mmiowb();
2515	spin_unlock_irqrestore(&host->lock, flags);
2516}
2517
2518static void sdhci_timeout_data_timer(struct timer_list *t)
2519{
2520	struct sdhci_host *host;
2521	unsigned long flags;
2522
2523	host = from_timer(host, t, data_timer);
2524
2525	spin_lock_irqsave(&host->lock, flags);
2526
2527	if (host->data || host->data_cmd ||
2528	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2529		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2530		       mmc_hostname(host->mmc));
2531		sdhci_dumpregs(host);
2532
2533		if (host->data) {
2534			host->data->error = -ETIMEDOUT;
2535			sdhci_finish_data(host);
2536		} else if (host->data_cmd) {
2537			host->data_cmd->error = -ETIMEDOUT;
2538			sdhci_finish_mrq(host, host->data_cmd->mrq);
2539		} else {
2540			host->cmd->error = -ETIMEDOUT;
2541			sdhci_finish_mrq(host, host->cmd->mrq);
2542		}
2543	}
2544
2545	mmiowb();
2546	spin_unlock_irqrestore(&host->lock, flags);
2547}
2548
2549/*****************************************************************************\
2550 *                                                                           *
2551 * Interrupt handling                                                        *
2552 *                                                                           *
2553\*****************************************************************************/
2554
2555static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2556{
 
 
2557	if (!host->cmd) {
2558		/*
2559		 * SDHCI recovers from errors by resetting the cmd and data
2560		 * circuits.  Until that is done, there very well might be more
2561		 * interrupts, so ignore them in that case.
2562		 */
2563		if (host->pending_reset)
2564			return;
2565		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2566		       mmc_hostname(host->mmc), (unsigned)intmask);
2567		sdhci_dumpregs(host);
2568		return;
2569	}
2570
2571	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2572		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2573		if (intmask & SDHCI_INT_TIMEOUT)
2574			host->cmd->error = -ETIMEDOUT;
2575		else
2576			host->cmd->error = -EILSEQ;
2577
2578		/*
2579		 * If this command initiates a data phase and a response
2580		 * CRC error is signalled, the card can start transferring
2581		 * data - the card may have received the command without
2582		 * error.  We must not terminate the mmc_request early.
2583		 *
2584		 * If the card did not receive the command or returned an
2585		 * error which prevented it sending data, the data phase
2586		 * will time out.
2587		 */
2588		if (host->cmd->data &&
2589		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2590		     SDHCI_INT_CRC) {
2591			host->cmd = NULL;
 
 
 
 
 
 
 
2592			return;
2593		}
2594
2595		sdhci_finish_mrq(host, host->cmd->mrq);
2596		return;
2597	}
2598
2599	if (intmask & SDHCI_INT_RESPONSE)
2600		sdhci_finish_command(host);
2601}
2602
2603static void sdhci_adma_show_error(struct sdhci_host *host)
 
2604{
2605	void *desc = host->adma_table;
 
 
 
 
2606
2607	sdhci_dumpregs(host);
2608
2609	while (true) {
2610		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 
2611
2612		if (host->flags & SDHCI_USE_64_BIT_DMA)
2613			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2614			    desc, le32_to_cpu(dma_desc->addr_hi),
2615			    le32_to_cpu(dma_desc->addr_lo),
2616			    le16_to_cpu(dma_desc->len),
2617			    le16_to_cpu(dma_desc->cmd));
2618		else
2619			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2620			    desc, le32_to_cpu(dma_desc->addr_lo),
2621			    le16_to_cpu(dma_desc->len),
2622			    le16_to_cpu(dma_desc->cmd));
2623
2624		desc += host->desc_sz;
2625
2626		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2627			break;
2628	}
2629}
 
 
 
2630
2631static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2632{
2633	u32 command;
 
2634
2635	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2636	if (intmask & SDHCI_INT_DATA_AVAIL) {
2637		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2638		if (command == MMC_SEND_TUNING_BLOCK ||
2639		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2640			host->tuning_done = 1;
2641			wake_up(&host->buf_ready_int);
2642			return;
2643		}
2644	}
2645
2646	if (!host->data) {
2647		struct mmc_command *data_cmd = host->data_cmd;
2648
2649		/*
2650		 * The "data complete" interrupt is also used to
2651		 * indicate that a busy state has ended. See comment
2652		 * above in sdhci_cmd_irq().
2653		 */
2654		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2655			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2656				host->data_cmd = NULL;
2657				data_cmd->error = -ETIMEDOUT;
2658				sdhci_finish_mrq(host, data_cmd->mrq);
2659				return;
2660			}
2661			if (intmask & SDHCI_INT_DATA_END) {
2662				host->data_cmd = NULL;
2663				/*
2664				 * Some cards handle busy-end interrupt
2665				 * before the command completed, so make
2666				 * sure we do things in the proper order.
2667				 */
2668				if (host->cmd == data_cmd)
2669					return;
2670
2671				sdhci_finish_mrq(host, data_cmd->mrq);
2672				return;
2673			}
2674		}
2675
2676		/*
2677		 * SDHCI recovers from errors by resetting the cmd and data
2678		 * circuits. Until that is done, there very well might be more
2679		 * interrupts, so ignore them in that case.
2680		 */
2681		if (host->pending_reset)
2682			return;
2683
2684		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2685		       mmc_hostname(host->mmc), (unsigned)intmask);
2686		sdhci_dumpregs(host);
2687
2688		return;
2689	}
2690
2691	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2692		host->data->error = -ETIMEDOUT;
2693	else if (intmask & SDHCI_INT_DATA_END_BIT)
2694		host->data->error = -EILSEQ;
2695	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2696		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2697			!= MMC_BUS_TEST_R)
2698		host->data->error = -EILSEQ;
2699	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2700		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2701		sdhci_adma_show_error(host);
2702		host->data->error = -EIO;
2703		if (host->ops->adma_workaround)
2704			host->ops->adma_workaround(host, intmask);
2705	}
2706
2707	if (host->data->error)
2708		sdhci_finish_data(host);
2709	else {
2710		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2711			sdhci_transfer_pio(host);
2712
2713		/*
2714		 * We currently don't do anything fancy with DMA
2715		 * boundaries, but as we can't disable the feature
2716		 * we need to at least restart the transfer.
2717		 *
2718		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2719		 * should return a valid address to continue from, but as
2720		 * some controllers are faulty, don't trust them.
2721		 */
2722		if (intmask & SDHCI_INT_DMA_END) {
2723			u32 dmastart, dmanow;
2724
2725			dmastart = sdhci_sdma_address(host);
2726			dmanow = dmastart + host->data->bytes_xfered;
2727			/*
2728			 * Force update to the next DMA block boundary.
2729			 */
2730			dmanow = (dmanow &
2731				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2732				SDHCI_DEFAULT_BOUNDARY_SIZE;
2733			host->data->bytes_xfered = dmanow - dmastart;
2734			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2735			    dmastart, host->data->bytes_xfered, dmanow);
 
 
2736			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2737		}
2738
2739		if (intmask & SDHCI_INT_DATA_END) {
2740			if (host->cmd == host->data_cmd) {
2741				/*
2742				 * Data managed to finish before the
2743				 * command completed. Make sure we do
2744				 * things in the proper order.
2745				 */
2746				host->data_early = 1;
2747			} else {
2748				sdhci_finish_data(host);
2749			}
2750		}
2751	}
2752}
2753
2754static irqreturn_t sdhci_irq(int irq, void *dev_id)
2755{
2756	irqreturn_t result = IRQ_NONE;
2757	struct sdhci_host *host = dev_id;
2758	u32 intmask, mask, unexpected = 0;
2759	int max_loops = 16;
2760
2761	spin_lock(&host->lock);
2762
2763	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2764		spin_unlock(&host->lock);
2765		return IRQ_NONE;
2766	}
2767
2768	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
 
2769	if (!intmask || intmask == 0xffffffff) {
2770		result = IRQ_NONE;
2771		goto out;
2772	}
2773
2774	do {
2775		DBG("IRQ status 0x%08x\n", intmask);
 
 
 
 
 
2776
2777		if (host->ops->irq) {
2778			intmask = host->ops->irq(host, intmask);
2779			if (!intmask)
2780				goto cont;
2781		}
2782
2783		/* Clear selected interrupts. */
2784		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2785				  SDHCI_INT_BUS_POWER);
2786		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2787
2788		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2789			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2790				      SDHCI_CARD_PRESENT;
2791
2792			/*
2793			 * There is a observation on i.mx esdhc.  INSERT
2794			 * bit will be immediately set again when it gets
2795			 * cleared, if a card is inserted.  We have to mask
2796			 * the irq to prevent interrupt storm which will
2797			 * freeze the system.  And the REMOVE gets the
2798			 * same situation.
2799			 *
2800			 * More testing are needed here to ensure it works
2801			 * for other platforms though.
2802			 */
2803			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2804				       SDHCI_INT_CARD_REMOVE);
2805			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2806					       SDHCI_INT_CARD_INSERT;
2807			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2808			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2809
2810			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2811				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
 
 
 
2812
2813			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2814						       SDHCI_INT_CARD_REMOVE);
2815			result = IRQ_WAKE_THREAD;
2816		}
2817
2818		if (intmask & SDHCI_INT_CMD_MASK)
2819			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2820
2821		if (intmask & SDHCI_INT_DATA_MASK)
2822			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
 
 
 
2823
2824		if (intmask & SDHCI_INT_BUS_POWER)
2825			pr_err("%s: Card is consuming too much power!\n",
2826				mmc_hostname(host->mmc));
2827
2828		if (intmask & SDHCI_INT_RETUNE)
2829			mmc_retune_needed(host->mmc);
2830
2831		if ((intmask & SDHCI_INT_CARD_INT) &&
2832		    (host->ier & SDHCI_INT_CARD_INT)) {
2833			sdhci_enable_sdio_irq_nolock(host, false);
2834			host->thread_isr |= SDHCI_INT_CARD_INT;
2835			result = IRQ_WAKE_THREAD;
2836		}
2837
2838		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2839			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2840			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2841			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2842
2843		if (intmask) {
2844			unexpected |= intmask;
2845			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2846		}
2847cont:
2848		if (result == IRQ_NONE)
2849			result = IRQ_HANDLED;
2850
2851		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2852	} while (intmask && --max_loops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2853out:
2854	spin_unlock(&host->lock);
2855
2856	if (unexpected) {
2857		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2858			   mmc_hostname(host->mmc), unexpected);
2859		sdhci_dumpregs(host);
2860	}
 
 
 
 
 
2861
2862	return result;
2863}
2864
2865static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2866{
2867	struct sdhci_host *host = dev_id;
2868	unsigned long flags;
2869	u32 isr;
2870
2871	spin_lock_irqsave(&host->lock, flags);
2872	isr = host->thread_isr;
2873	host->thread_isr = 0;
2874	spin_unlock_irqrestore(&host->lock, flags);
2875
2876	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2877		struct mmc_host *mmc = host->mmc;
2878
2879		mmc->ops->card_event(mmc);
2880		mmc_detect_change(mmc, msecs_to_jiffies(200));
2881	}
2882
2883	if (isr & SDHCI_INT_CARD_INT) {
2884		sdio_run_irqs(host->mmc);
2885
2886		spin_lock_irqsave(&host->lock, flags);
2887		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2888			sdhci_enable_sdio_irq_nolock(host, true);
2889		spin_unlock_irqrestore(&host->lock, flags);
2890	}
2891
2892	return isr ? IRQ_HANDLED : IRQ_NONE;
2893}
2894
2895/*****************************************************************************\
2896 *                                                                           *
2897 * Suspend/resume                                                            *
2898 *                                                                           *
2899\*****************************************************************************/
2900
2901#ifdef CONFIG_PM
2902
2903static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
2904{
2905	return mmc_card_is_removable(host->mmc) &&
2906	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2907	       !mmc_can_gpio_cd(host->mmc);
2908}
2909
2910/*
2911 * To enable wakeup events, the corresponding events have to be enabled in
2912 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2913 * Table' in the SD Host Controller Standard Specification.
2914 * It is useless to restore SDHCI_INT_ENABLE state in
2915 * sdhci_disable_irq_wakeups() since it will be set by
2916 * sdhci_enable_card_detection() or sdhci_init().
2917 */
2918static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
2919{
2920	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
2921		  SDHCI_WAKE_ON_INT;
2922	u32 irq_val = 0;
2923	u8 wake_val = 0;
2924	u8 val;
2925
2926	if (sdhci_cd_irq_can_wakeup(host)) {
2927		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
2928		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
2929	}
2930
2931	if (mmc_card_wake_sdio_irq(host->mmc)) {
2932		wake_val |= SDHCI_WAKE_ON_INT;
2933		irq_val |= SDHCI_INT_CARD_INT;
2934	}
2935
2936	if (!irq_val)
2937		return false;
2938
2939	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2940	val &= ~mask;
2941	val |= wake_val;
 
 
2942	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2943
2944	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2945
2946	host->irq_wake_enabled = !enable_irq_wake(host->irq);
2947
2948	return host->irq_wake_enabled;
2949}
 
2950
2951static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2952{
2953	u8 val;
2954	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2955			| SDHCI_WAKE_ON_INT;
2956
2957	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2958	val &= ~mask;
2959	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2960
2961	disable_irq_wake(host->irq);
2962
2963	host->irq_wake_enabled = false;
2964}
 
2965
2966int sdhci_suspend_host(struct sdhci_host *host)
2967{
 
 
 
2968	sdhci_disable_card_detection(host);
2969
2970	mmc_retune_timer_stop(host->mmc);
 
 
 
 
2971
2972	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
2973	    !sdhci_enable_irq_wakeups(host)) {
2974		host->ier = 0;
2975		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2976		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2977		free_irq(host->irq, host);
 
 
 
2978	}
2979
2980	return 0;
2981}
2982
2983EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2984
2985int sdhci_resume_host(struct sdhci_host *host)
2986{
2987	struct mmc_host *mmc = host->mmc;
2988	int ret = 0;
2989
2990	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2991		if (host->ops->enable_dma)
2992			host->ops->enable_dma(host);
2993	}
2994
 
 
 
 
 
 
 
 
 
 
2995	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2996	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2997		/* Card keeps power but host controller does not */
2998		sdhci_init(host, 0);
2999		host->pwr = 0;
3000		host->clock = 0;
3001		mmc->ops->set_ios(mmc, &mmc->ios);
3002	} else {
3003		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3004		mmiowb();
3005	}
3006
3007	if (host->irq_wake_enabled) {
3008		sdhci_disable_irq_wakeups(host);
3009	} else {
3010		ret = request_threaded_irq(host->irq, sdhci_irq,
3011					   sdhci_thread_irq, IRQF_SHARED,
3012					   mmc_hostname(host->mmc), host);
3013		if (ret)
3014			return ret;
3015	}
3016
3017	sdhci_enable_card_detection(host);
3018
 
 
 
 
 
 
 
3019	return ret;
3020}
3021
3022EXPORT_SYMBOL_GPL(sdhci_resume_host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3023
3024int sdhci_runtime_suspend_host(struct sdhci_host *host)
3025{
3026	unsigned long flags;
 
3027
3028	mmc_retune_timer_stop(host->mmc);
 
 
 
 
3029
3030	spin_lock_irqsave(&host->lock, flags);
3031	host->ier &= SDHCI_INT_CARD_INT;
3032	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3033	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3034	spin_unlock_irqrestore(&host->lock, flags);
3035
3036	synchronize_hardirq(host->irq);
3037
3038	spin_lock_irqsave(&host->lock, flags);
3039	host->runtime_suspended = true;
3040	spin_unlock_irqrestore(&host->lock, flags);
3041
3042	return 0;
3043}
3044EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3045
3046int sdhci_runtime_resume_host(struct sdhci_host *host)
3047{
3048	struct mmc_host *mmc = host->mmc;
3049	unsigned long flags;
3050	int host_flags = host->flags;
3051
3052	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3053		if (host->ops->enable_dma)
3054			host->ops->enable_dma(host);
3055	}
3056
3057	sdhci_init(host, 0);
3058
3059	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3060	    mmc->ios.power_mode != MMC_POWER_OFF) {
3061		/* Force clock and power re-program */
3062		host->pwr = 0;
3063		host->clock = 0;
3064		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3065		mmc->ops->set_ios(mmc, &mmc->ios);
3066
3067		if ((host_flags & SDHCI_PV_ENABLED) &&
3068		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3069			spin_lock_irqsave(&host->lock, flags);
3070			sdhci_enable_preset_value(host, true);
3071			spin_unlock_irqrestore(&host->lock, flags);
3072		}
3073
3074		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3075		    mmc->ops->hs400_enhanced_strobe)
3076			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3077	}
3078
 
 
 
 
3079	spin_lock_irqsave(&host->lock, flags);
3080
3081	host->runtime_suspended = false;
3082
3083	/* Enable SDIO IRQ */
3084	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3085		sdhci_enable_sdio_irq_nolock(host, true);
3086
3087	/* Enable Card Detection */
3088	sdhci_enable_card_detection(host);
3089
3090	spin_unlock_irqrestore(&host->lock, flags);
3091
3092	return 0;
3093}
3094EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3095
3096#endif /* CONFIG_PM */
3097
3098/*****************************************************************************\
3099 *                                                                           *
3100 * Command Queue Engine (CQE) helpers                                        *
3101 *                                                                           *
3102\*****************************************************************************/
3103
3104void sdhci_cqe_enable(struct mmc_host *mmc)
3105{
3106	struct sdhci_host *host = mmc_priv(mmc);
3107	unsigned long flags;
3108	u8 ctrl;
3109
3110	spin_lock_irqsave(&host->lock, flags);
3111
3112	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3113	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3114	if (host->flags & SDHCI_USE_64_BIT_DMA)
3115		ctrl |= SDHCI_CTRL_ADMA64;
3116	else
3117		ctrl |= SDHCI_CTRL_ADMA32;
3118	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3119
3120	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3121		     SDHCI_BLOCK_SIZE);
3122
3123	/* Set maximum timeout */
3124	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3125
3126	host->ier = host->cqe_ier;
3127
3128	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3129	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3130
3131	host->cqe_on = true;
3132
3133	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3134		 mmc_hostname(mmc), host->ier,
3135		 sdhci_readl(host, SDHCI_INT_STATUS));
3136
3137	mmiowb();
3138	spin_unlock_irqrestore(&host->lock, flags);
3139}
3140EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3141
3142void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3143{
3144	struct sdhci_host *host = mmc_priv(mmc);
3145	unsigned long flags;
3146
3147	spin_lock_irqsave(&host->lock, flags);
3148
3149	sdhci_set_default_irqs(host);
3150
3151	host->cqe_on = false;
3152
3153	if (recovery) {
3154		sdhci_do_reset(host, SDHCI_RESET_CMD);
3155		sdhci_do_reset(host, SDHCI_RESET_DATA);
3156	}
3157
3158	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3159		 mmc_hostname(mmc), host->ier,
3160		 sdhci_readl(host, SDHCI_INT_STATUS));
3161
3162	mmiowb();
3163	spin_unlock_irqrestore(&host->lock, flags);
3164}
3165EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3166
3167bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3168		   int *data_error)
3169{
3170	u32 mask;
3171
3172	if (!host->cqe_on)
3173		return false;
3174
3175	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3176		*cmd_error = -EILSEQ;
3177	else if (intmask & SDHCI_INT_TIMEOUT)
3178		*cmd_error = -ETIMEDOUT;
3179	else
3180		*cmd_error = 0;
3181
3182	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3183		*data_error = -EILSEQ;
3184	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3185		*data_error = -ETIMEDOUT;
3186	else if (intmask & SDHCI_INT_ADMA_ERROR)
3187		*data_error = -EIO;
3188	else
3189		*data_error = 0;
3190
3191	/* Clear selected interrupts. */
3192	mask = intmask & host->cqe_ier;
3193	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3194
3195	if (intmask & SDHCI_INT_BUS_POWER)
3196		pr_err("%s: Card is consuming too much power!\n",
3197		       mmc_hostname(host->mmc));
3198
3199	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3200	if (intmask) {
3201		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3202		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3203		       mmc_hostname(host->mmc), intmask);
3204		sdhci_dumpregs(host);
3205	}
3206
3207	return true;
3208}
3209EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3210
3211/*****************************************************************************\
3212 *                                                                           *
3213 * Device allocation/registration                                            *
3214 *                                                                           *
3215\*****************************************************************************/
3216
3217struct sdhci_host *sdhci_alloc_host(struct device *dev,
3218	size_t priv_size)
3219{
3220	struct mmc_host *mmc;
3221	struct sdhci_host *host;
3222
3223	WARN_ON(dev == NULL);
3224
3225	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3226	if (!mmc)
3227		return ERR_PTR(-ENOMEM);
3228
3229	host = mmc_priv(mmc);
3230	host->mmc = mmc;
3231	host->mmc_host_ops = sdhci_ops;
3232	mmc->ops = &host->mmc_host_ops;
3233
3234	host->flags = SDHCI_SIGNALING_330;
3235
3236	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3237	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3238
3239	host->tuning_delay = -1;
3240
3241	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3242
3243	return host;
3244}
3245
3246EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3247
3248static int sdhci_set_dma_mask(struct sdhci_host *host)
3249{
3250	struct mmc_host *mmc = host->mmc;
3251	struct device *dev = mmc_dev(mmc);
3252	int ret = -EINVAL;
3253
3254	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3255		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3256
3257	/* Try 64-bit mask if hardware is capable  of it */
3258	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3259		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3260		if (ret) {
3261			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3262				mmc_hostname(mmc));
3263			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3264		}
3265	}
3266
3267	/* 32-bit mask as default & fallback */
3268	if (ret) {
3269		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3270		if (ret)
3271			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3272				mmc_hostname(mmc));
3273	}
3274
3275	return ret;
3276}
3277
3278void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3279{
3280	u16 v;
3281	u64 dt_caps_mask = 0;
3282	u64 dt_caps = 0;
3283
3284	if (host->read_caps)
3285		return;
3286
3287	host->read_caps = true;
3288
3289	if (debug_quirks)
3290		host->quirks = debug_quirks;
3291
3292	if (debug_quirks2)
3293		host->quirks2 = debug_quirks2;
3294
3295	sdhci_do_reset(host, SDHCI_RESET_ALL);
3296
3297	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3298			     "sdhci-caps-mask", &dt_caps_mask);
3299	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3300			     "sdhci-caps", &dt_caps);
3301
3302	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3303	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3304
3305	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3306		return;
3307
3308	if (caps) {
3309		host->caps = *caps;
3310	} else {
3311		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3312		host->caps &= ~lower_32_bits(dt_caps_mask);
3313		host->caps |= lower_32_bits(dt_caps);
3314	}
3315
3316	if (host->version < SDHCI_SPEC_300)
3317		return;
3318
3319	if (caps1) {
3320		host->caps1 = *caps1;
3321	} else {
3322		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3323		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3324		host->caps1 |= upper_32_bits(dt_caps);
3325	}
3326}
3327EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3328
3329static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3330{
3331	struct mmc_host *mmc = host->mmc;
3332	unsigned int max_blocks;
3333	unsigned int bounce_size;
3334	int ret;
3335
3336	/*
3337	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3338	 * has diminishing returns, this is probably because SD/MMC
3339	 * cards are usually optimized to handle this size of requests.
3340	 */
3341	bounce_size = SZ_64K;
3342	/*
3343	 * Adjust downwards to maximum request size if this is less
3344	 * than our segment size, else hammer down the maximum
3345	 * request size to the maximum buffer size.
3346	 */
3347	if (mmc->max_req_size < bounce_size)
3348		bounce_size = mmc->max_req_size;
3349	max_blocks = bounce_size / 512;
3350
3351	/*
3352	 * When we just support one segment, we can get significant
3353	 * speedups by the help of a bounce buffer to group scattered
3354	 * reads/writes together.
3355	 */
3356	host->bounce_buffer = devm_kmalloc(mmc->parent,
3357					   bounce_size,
3358					   GFP_KERNEL);
3359	if (!host->bounce_buffer) {
3360		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3361		       mmc_hostname(mmc),
3362		       bounce_size);
3363		/*
3364		 * Exiting with zero here makes sure we proceed with
3365		 * mmc->max_segs == 1.
3366		 */
3367		return 0;
3368	}
3369
3370	host->bounce_addr = dma_map_single(mmc->parent,
3371					   host->bounce_buffer,
3372					   bounce_size,
3373					   DMA_BIDIRECTIONAL);
3374	ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3375	if (ret)
3376		/* Again fall back to max_segs == 1 */
3377		return 0;
3378	host->bounce_buffer_size = bounce_size;
3379
3380	/* Lie about this since we're bouncing */
3381	mmc->max_segs = max_blocks;
3382	mmc->max_seg_size = bounce_size;
3383	mmc->max_req_size = bounce_size;
3384
3385	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3386		mmc_hostname(mmc), max_blocks, bounce_size);
3387
3388	return 0;
3389}
3390
3391int sdhci_setup_host(struct sdhci_host *host)
3392{
3393	struct mmc_host *mmc;
 
3394	u32 max_current_caps;
3395	unsigned int ocr_avail;
3396	unsigned int override_timeout_clk;
3397	u32 max_clk;
3398	int ret;
3399
3400	WARN_ON(host == NULL);
3401	if (host == NULL)
3402		return -EINVAL;
3403
3404	mmc = host->mmc;
3405
3406	/*
3407	 * If there are external regulators, get them. Note this must be done
3408	 * early before resetting the host and reading the capabilities so that
3409	 * the host can take the appropriate action if regulators are not
3410	 * available.
3411	 */
3412	ret = mmc_regulator_get_supply(mmc);
3413	if (ret)
3414		return ret;
3415
3416	DBG("Version:   0x%08x | Present:  0x%08x\n",
3417	    sdhci_readw(host, SDHCI_HOST_VERSION),
3418	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3419	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3420	    sdhci_readl(host, SDHCI_CAPABILITIES),
3421	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3422
3423	sdhci_read_caps(host);
3424
3425	override_timeout_clk = host->timeout_clk;
3426
 
 
 
3427	if (host->version > SDHCI_SPEC_300) {
3428		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3429		       mmc_hostname(mmc), host->version);
 
3430	}
3431
 
 
 
 
 
 
 
 
3432	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3433		host->flags |= SDHCI_USE_SDMA;
3434	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3435		DBG("Controller doesn't have SDMA capability\n");
3436	else
3437		host->flags |= SDHCI_USE_SDMA;
3438
3439	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3440		(host->flags & SDHCI_USE_SDMA)) {
3441		DBG("Disabling DMA as it is marked broken\n");
3442		host->flags &= ~SDHCI_USE_SDMA;
3443	}
3444
3445	if ((host->version >= SDHCI_SPEC_200) &&
3446		(host->caps & SDHCI_CAN_DO_ADMA2))
3447		host->flags |= SDHCI_USE_ADMA;
3448
3449	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3450		(host->flags & SDHCI_USE_ADMA)) {
3451		DBG("Disabling ADMA as it is marked broken\n");
3452		host->flags &= ~SDHCI_USE_ADMA;
3453	}
3454
3455	/*
3456	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3457	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3458	 * that during the first call to ->enable_dma().  Similarly
3459	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3460	 * implement.
3461	 */
3462	if (host->caps & SDHCI_CAN_64BIT)
3463		host->flags |= SDHCI_USE_64_BIT_DMA;
3464
3465	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3466		ret = sdhci_set_dma_mask(host);
3467
3468		if (!ret && host->ops->enable_dma)
3469			ret = host->ops->enable_dma(host);
3470
3471		if (ret) {
3472			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3473				mmc_hostname(mmc));
3474			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3475
3476			ret = 0;
3477		}
3478	}
3479
3480	/* SDMA does not support 64-bit DMA */
3481	if (host->flags & SDHCI_USE_64_BIT_DMA)
3482		host->flags &= ~SDHCI_USE_SDMA;
3483
3484	if (host->flags & SDHCI_USE_ADMA) {
3485		dma_addr_t dma;
3486		void *buf;
3487
3488		/*
3489		 * The DMA descriptor table size is calculated as the maximum
3490		 * number of segments times 2, to allow for an alignment
3491		 * descriptor for each segment, plus 1 for a nop end descriptor,
3492		 * all multipled by the descriptor size.
3493		 */
3494		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3495			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3496					      SDHCI_ADMA2_64_DESC_SZ;
3497			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3498		} else {
3499			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3500					      SDHCI_ADMA2_32_DESC_SZ;
3501			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3502		}
3503
3504		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3505		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3506					 host->adma_table_sz, &dma, GFP_KERNEL);
3507		if (!buf) {
3508			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3509				mmc_hostname(mmc));
3510			host->flags &= ~SDHCI_USE_ADMA;
3511		} else if ((dma + host->align_buffer_sz) &
3512			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3513			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3514				mmc_hostname(mmc));
3515			host->flags &= ~SDHCI_USE_ADMA;
3516			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3517					  host->adma_table_sz, buf, dma);
3518		} else {
3519			host->align_buffer = buf;
3520			host->align_addr = dma;
3521
3522			host->adma_table = buf + host->align_buffer_sz;
3523			host->adma_addr = dma + host->align_buffer_sz;
3524		}
3525	}
3526
3527	/*
3528	 * If we use DMA, then it's up to the caller to set the DMA
3529	 * mask, but PIO does not need the hw shim so we set a new
3530	 * mask here in that case.
3531	 */
3532	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3533		host->dma_mask = DMA_BIT_MASK(64);
3534		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3535	}
3536
3537	if (host->version >= SDHCI_SPEC_300)
3538		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3539			>> SDHCI_CLOCK_BASE_SHIFT;
3540	else
3541		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3542			>> SDHCI_CLOCK_BASE_SHIFT;
3543
3544	host->max_clk *= 1000000;
3545	if (host->max_clk == 0 || host->quirks &
3546			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3547		if (!host->ops->get_max_clock) {
3548			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3549			       mmc_hostname(mmc));
3550			ret = -ENODEV;
3551			goto undma;
3552		}
3553		host->max_clk = host->ops->get_max_clock(host);
3554	}
3555
3556	/*
3557	 * In case of Host Controller v3.00, find out whether clock
3558	 * multiplier is supported.
3559	 */
3560	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3561			SDHCI_CLOCK_MUL_SHIFT;
3562
3563	/*
3564	 * In case the value in Clock Multiplier is 0, then programmable
3565	 * clock mode is not supported, otherwise the actual clock
3566	 * multiplier is one more than the value of Clock Multiplier
3567	 * in the Capabilities Register.
3568	 */
3569	if (host->clk_mul)
3570		host->clk_mul += 1;
3571
3572	/*
3573	 * Set host parameters.
3574	 */
3575	max_clk = host->max_clk;
3576
3577	if (host->ops->get_min_clock)
3578		mmc->f_min = host->ops->get_min_clock(host);
3579	else if (host->version >= SDHCI_SPEC_300) {
3580		if (host->clk_mul) {
3581			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3582			max_clk = host->max_clk * host->clk_mul;
3583		} else
3584			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3585	} else
3586		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3587
3588	if (!mmc->f_max || mmc->f_max > max_clk)
3589		mmc->f_max = max_clk;
3590
3591	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3592		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3593					SDHCI_TIMEOUT_CLK_SHIFT;
3594
3595		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3596			host->timeout_clk *= 1000;
3597
3598		if (host->timeout_clk == 0) {
3599			if (!host->ops->get_timeout_clock) {
3600				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3601					mmc_hostname(mmc));
3602				ret = -ENODEV;
3603				goto undma;
3604			}
3605
3606			host->timeout_clk =
3607				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3608					     1000);
3609		}
 
 
 
3610
3611		if (override_timeout_clk)
3612			host->timeout_clk = override_timeout_clk;
3613
3614		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3615			host->ops->get_max_timeout_count(host) : 1 << 27;
3616		mmc->max_busy_timeout /= host->timeout_clk;
3617	}
3618
3619	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3620	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3621
3622	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3623		host->flags |= SDHCI_AUTO_CMD12;
3624
3625	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3626	if ((host->version >= SDHCI_SPEC_300) &&
3627	    ((host->flags & SDHCI_USE_ADMA) ||
3628	     !(host->flags & SDHCI_USE_SDMA)) &&
3629	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3630		host->flags |= SDHCI_AUTO_CMD23;
3631		DBG("Auto-CMD23 available\n");
3632	} else {
3633		DBG("Auto-CMD23 unavailable\n");
3634	}
3635
3636	/*
3637	 * A controller may support 8-bit width, but the board itself
3638	 * might not have the pins brought out.  Boards that support
3639	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3640	 * their platform code before calling sdhci_add_host(), and we
3641	 * won't assume 8-bit width for hosts without that CAP.
3642	 */
3643	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3644		mmc->caps |= MMC_CAP_4_BIT_DATA;
3645
3646	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3647		mmc->caps &= ~MMC_CAP_CMD23;
3648
3649	if (host->caps & SDHCI_CAN_DO_HISPD)
3650		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3651
3652	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3653	    mmc_card_is_removable(mmc) &&
3654	    mmc_gpio_get_cd(host->mmc) < 0)
3655		mmc->caps |= MMC_CAP_NEEDS_POLL;
3656
3657	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3658	if (!IS_ERR(mmc->supply.vqmmc)) {
3659		ret = regulator_enable(mmc->supply.vqmmc);
3660		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3661						    1950000))
3662			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3663					 SDHCI_SUPPORT_SDR50 |
3664					 SDHCI_SUPPORT_DDR50);
 
 
 
 
 
 
 
3665		if (ret) {
3666			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3667				mmc_hostname(mmc), ret);
3668			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3669		}
3670	}
3671
3672	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3673		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3674				 SDHCI_SUPPORT_DDR50);
3675	}
3676
3677	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3678	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3679			   SDHCI_SUPPORT_DDR50))
3680		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3681
3682	/* SDR104 supports also implies SDR50 support */
3683	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3684		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3685		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3686		 * field can be promoted to support HS200.
3687		 */
3688		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3689			mmc->caps2 |= MMC_CAP2_HS200;
3690	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3691		mmc->caps |= MMC_CAP_UHS_SDR50;
3692	}
3693
3694	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3695	    (host->caps1 & SDHCI_SUPPORT_HS400))
3696		mmc->caps2 |= MMC_CAP2_HS400;
3697
3698	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3699	    (IS_ERR(mmc->supply.vqmmc) ||
3700	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3701					     1300000)))
3702		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3703
3704	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3705	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3706		mmc->caps |= MMC_CAP_UHS_DDR50;
3707
3708	/* Does the host need tuning for SDR50? */
3709	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3710		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3711
 
 
 
 
3712	/* Driver Type(s) (A, C, D) supported by the host */
3713	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3714		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3715	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3716		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3717	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3718		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3719
3720	/* Initial value for re-tuning timer count */
3721	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3722			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3723
3724	/*
3725	 * In case Re-tuning Timer is not disabled, the actual value of
3726	 * re-tuning timer will be 2 ^ (n - 1).
3727	 */
3728	if (host->tuning_count)
3729		host->tuning_count = 1 << (host->tuning_count - 1);
3730
3731	/* Re-tuning mode supported by the Host Controller */
3732	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3733			     SDHCI_RETUNING_MODE_SHIFT;
3734
3735	ocr_avail = 0;
3736
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3737	/*
3738	 * According to SD Host Controller spec v3.00, if the Host System
3739	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3740	 * the value is meaningful only if Voltage Support in the Capabilities
3741	 * register is set. The actual current value is 4 times the register
3742	 * value.
3743	 */
3744	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3745	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3746		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3747		if (curr > 0) {
3748
3749			/* convert to SDHCI_MAX_CURRENT format */
3750			curr = curr/1000;  /* convert to mA */
3751			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3752
3753			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3754			max_current_caps =
3755				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3756				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3757				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3758		}
3759	}
3760
3761	if (host->caps & SDHCI_CAN_VDD_330) {
3762		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3763
3764		mmc->max_current_330 = ((max_current_caps &
3765				   SDHCI_MAX_CURRENT_330_MASK) >>
3766				   SDHCI_MAX_CURRENT_330_SHIFT) *
3767				   SDHCI_MAX_CURRENT_MULTIPLIER;
3768	}
3769	if (host->caps & SDHCI_CAN_VDD_300) {
3770		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3771
3772		mmc->max_current_300 = ((max_current_caps &
3773				   SDHCI_MAX_CURRENT_300_MASK) >>
3774				   SDHCI_MAX_CURRENT_300_SHIFT) *
3775				   SDHCI_MAX_CURRENT_MULTIPLIER;
3776	}
3777	if (host->caps & SDHCI_CAN_VDD_180) {
3778		ocr_avail |= MMC_VDD_165_195;
3779
3780		mmc->max_current_180 = ((max_current_caps &
3781				   SDHCI_MAX_CURRENT_180_MASK) >>
3782				   SDHCI_MAX_CURRENT_180_SHIFT) *
3783				   SDHCI_MAX_CURRENT_MULTIPLIER;
3784	}
3785
3786	/* If OCR set by host, use it instead. */
3787	if (host->ocr_mask)
3788		ocr_avail = host->ocr_mask;
3789
3790	/* If OCR set by external regulators, give it highest prio. */
3791	if (mmc->ocr_avail)
3792		ocr_avail = mmc->ocr_avail;
3793
3794	mmc->ocr_avail = ocr_avail;
3795	mmc->ocr_avail_sdio = ocr_avail;
3796	if (host->ocr_avail_sdio)
3797		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3798	mmc->ocr_avail_sd = ocr_avail;
3799	if (host->ocr_avail_sd)
3800		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3801	else /* normal SD controllers don't support 1.8V */
3802		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3803	mmc->ocr_avail_mmc = ocr_avail;
3804	if (host->ocr_avail_mmc)
3805		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3806
3807	if (mmc->ocr_avail == 0) {
3808		pr_err("%s: Hardware doesn't report any support voltages.\n",
3809		       mmc_hostname(mmc));
3810		ret = -ENODEV;
3811		goto unreg;
3812	}
3813
3814	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3815			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3816			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3817	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3818		host->flags |= SDHCI_SIGNALING_180;
3819
3820	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3821		host->flags |= SDHCI_SIGNALING_120;
3822
3823	spin_lock_init(&host->lock);
3824
3825	/*
3826	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3827	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3828	 * is less anyway.
3829	 */
3830	mmc->max_req_size = 524288;
3831
3832	/*
3833	 * Maximum number of segments. Depends on if the hardware
3834	 * can do scatter/gather or not.
3835	 */
3836	if (host->flags & SDHCI_USE_ADMA) {
3837		mmc->max_segs = SDHCI_MAX_SEGS;
3838	} else if (host->flags & SDHCI_USE_SDMA) {
3839		mmc->max_segs = 1;
3840		if (swiotlb_max_segment()) {
3841			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3842						IO_TLB_SEGSIZE;
3843			mmc->max_req_size = min(mmc->max_req_size,
3844						max_req_size);
3845		}
3846	} else { /* PIO */
3847		mmc->max_segs = SDHCI_MAX_SEGS;
3848	}
3849
3850	/*
3851	 * Maximum segment size. Could be one segment with the maximum number
3852	 * of bytes. When doing hardware scatter/gather, each entry cannot
3853	 * be larger than 64 KiB though.
3854	 */
3855	if (host->flags & SDHCI_USE_ADMA) {
3856		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3857			mmc->max_seg_size = 65535;
3858		else
3859			mmc->max_seg_size = 65536;
3860	} else {
3861		mmc->max_seg_size = mmc->max_req_size;
3862	}
3863
3864	/*
3865	 * Maximum block size. This varies from controller to controller and
3866	 * is specified in the capabilities register.
3867	 */
3868	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3869		mmc->max_blk_size = 2;
3870	} else {
3871		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3872				SDHCI_MAX_BLOCK_SHIFT;
3873		if (mmc->max_blk_size >= 3) {
3874			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3875				mmc_hostname(mmc));
3876			mmc->max_blk_size = 0;
3877		}
3878	}
3879
3880	mmc->max_blk_size = 512 << mmc->max_blk_size;
3881
3882	/*
3883	 * Maximum block count.
3884	 */
3885	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3886
3887	if (mmc->max_segs == 1) {
3888		/* This may alter mmc->*_blk_* parameters */
3889		ret = sdhci_allocate_bounce_buffer(host);
3890		if (ret)
3891			return ret;
3892	}
3893
3894	return 0;
3895
3896unreg:
3897	if (!IS_ERR(mmc->supply.vqmmc))
3898		regulator_disable(mmc->supply.vqmmc);
3899undma:
3900	if (host->align_buffer)
3901		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3902				  host->adma_table_sz, host->align_buffer,
3903				  host->align_addr);
3904	host->adma_table = NULL;
3905	host->align_buffer = NULL;
3906
3907	return ret;
3908}
3909EXPORT_SYMBOL_GPL(sdhci_setup_host);
3910
3911void sdhci_cleanup_host(struct sdhci_host *host)
3912{
3913	struct mmc_host *mmc = host->mmc;
3914
3915	if (!IS_ERR(mmc->supply.vqmmc))
3916		regulator_disable(mmc->supply.vqmmc);
3917
3918	if (host->align_buffer)
3919		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3920				  host->adma_table_sz, host->align_buffer,
3921				  host->align_addr);
3922	host->adma_table = NULL;
3923	host->align_buffer = NULL;
3924}
3925EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3926
3927int __sdhci_add_host(struct sdhci_host *host)
3928{
3929	struct mmc_host *mmc = host->mmc;
3930	int ret;
3931
3932	/*
3933	 * Init tasklets.
3934	 */
 
 
3935	tasklet_init(&host->finish_tasklet,
3936		sdhci_tasklet_finish, (unsigned long)host);
3937
3938	timer_setup(&host->timer, sdhci_timeout_timer, 0);
3939	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
3940
3941	init_waitqueue_head(&host->buf_ready_int);
 
 
 
 
 
 
 
3942
3943	sdhci_init(host, 0);
3944
3945	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3946				   IRQF_SHARED,	mmc_hostname(mmc), host);
3947	if (ret) {
3948		pr_err("%s: Failed to request IRQ %d: %d\n",
3949		       mmc_hostname(mmc), host->irq, ret);
3950		goto untasklet;
3951	}
3952
3953	ret = sdhci_led_register(host);
 
 
 
 
 
 
 
 
 
 
 
 
3954	if (ret) {
3955		pr_err("%s: Failed to register LED device: %d\n",
3956		       mmc_hostname(mmc), ret);
3957		goto unirq;
3958	}
 
3959
3960	mmiowb();
3961
3962	ret = mmc_add_host(mmc);
3963	if (ret)
3964		goto unled;
3965
3966	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3967		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3968		(host->flags & SDHCI_USE_ADMA) ?
3969		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3970		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3971
3972	sdhci_enable_card_detection(host);
3973
3974	return 0;
3975
3976unled:
3977	sdhci_led_unregister(host);
3978unirq:
3979	sdhci_do_reset(host, SDHCI_RESET_ALL);
3980	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3981	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3982	free_irq(host->irq, host);
 
3983untasklet:
 
3984	tasklet_kill(&host->finish_tasklet);
3985
3986	return ret;
3987}
3988EXPORT_SYMBOL_GPL(__sdhci_add_host);
3989
3990int sdhci_add_host(struct sdhci_host *host)
3991{
3992	int ret;
3993
3994	ret = sdhci_setup_host(host);
3995	if (ret)
3996		return ret;
3997
3998	ret = __sdhci_add_host(host);
3999	if (ret)
4000		goto cleanup;
4001
4002	return 0;
4003
4004cleanup:
4005	sdhci_cleanup_host(host);
4006
4007	return ret;
4008}
4009EXPORT_SYMBOL_GPL(sdhci_add_host);
4010
4011void sdhci_remove_host(struct sdhci_host *host, int dead)
4012{
4013	struct mmc_host *mmc = host->mmc;
4014	unsigned long flags;
4015
4016	if (dead) {
4017		spin_lock_irqsave(&host->lock, flags);
4018
4019		host->flags |= SDHCI_DEVICE_DEAD;
4020
4021		if (sdhci_has_requests(host)) {
4022			pr_err("%s: Controller removed during "
4023				" transfer!\n", mmc_hostname(mmc));
4024			sdhci_error_out_mrqs(host, -ENOMEDIUM);
 
 
4025		}
4026
4027		spin_unlock_irqrestore(&host->lock, flags);
4028	}
4029
4030	sdhci_disable_card_detection(host);
4031
4032	mmc_remove_host(mmc);
4033
4034	sdhci_led_unregister(host);
 
 
4035
4036	if (!dead)
4037		sdhci_do_reset(host, SDHCI_RESET_ALL);
4038
4039	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4040	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4041	free_irq(host->irq, host);
4042
4043	del_timer_sync(&host->timer);
4044	del_timer_sync(&host->data_timer);
4045
 
4046	tasklet_kill(&host->finish_tasklet);
4047
4048	if (!IS_ERR(mmc->supply.vqmmc))
4049		regulator_disable(mmc->supply.vqmmc);
 
 
 
 
 
 
 
4050
4051	if (host->align_buffer)
4052		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4053				  host->adma_table_sz, host->align_buffer,
4054				  host->align_addr);
4055
4056	host->adma_table = NULL;
4057	host->align_buffer = NULL;
4058}
4059
4060EXPORT_SYMBOL_GPL(sdhci_remove_host);
4061
4062void sdhci_free_host(struct sdhci_host *host)
4063{
4064	mmc_free_host(host->mmc);
4065}
4066
4067EXPORT_SYMBOL_GPL(sdhci_free_host);
4068
4069/*****************************************************************************\
4070 *                                                                           *
4071 * Driver init/exit                                                          *
4072 *                                                                           *
4073\*****************************************************************************/
4074
4075static int __init sdhci_drv_init(void)
4076{
4077	pr_info(DRIVER_NAME
4078		": Secure Digital Host Controller Interface driver\n");
4079	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4080
4081	return 0;
4082}
4083
4084static void __exit sdhci_drv_exit(void)
4085{
4086}
4087
4088module_init(sdhci_drv_init);
4089module_exit(sdhci_drv_exit);
4090
4091module_param(debug_quirks, uint, 0444);
4092module_param(debug_quirks2, uint, 0444);
4093
4094MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4095MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4096MODULE_LICENSE("GPL");
4097
4098MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4099MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
v3.15
   1/*
   2 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
   3 *
   4 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 *
  11 * Thanks to the following companies for their support:
  12 *
  13 *     - JMicron (hardware and technical support)
  14 */
  15
  16#include <linux/delay.h>
 
  17#include <linux/highmem.h>
  18#include <linux/io.h>
  19#include <linux/module.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/slab.h>
  22#include <linux/scatterlist.h>
 
 
  23#include <linux/regulator/consumer.h>
  24#include <linux/pm_runtime.h>
 
  25
  26#include <linux/leds.h>
  27
  28#include <linux/mmc/mmc.h>
  29#include <linux/mmc/host.h>
  30#include <linux/mmc/card.h>
 
  31#include <linux/mmc/slot-gpio.h>
  32
  33#include "sdhci.h"
  34
  35#define DRIVER_NAME "sdhci"
  36
  37#define DBG(f, x...) \
  38	pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
  39
  40#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
  41	defined(CONFIG_MMC_SDHCI_MODULE))
  42#define SDHCI_USE_LEDS_CLASS
  43#endif
  44
  45#define MAX_TUNING_LOOP 40
  46
  47static unsigned int debug_quirks = 0;
  48static unsigned int debug_quirks2;
  49
  50static void sdhci_finish_data(struct sdhci_host *);
  51
  52static void sdhci_finish_command(struct sdhci_host *);
  53static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
  54static void sdhci_tuning_timer(unsigned long data);
  55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
  56
  57#ifdef CONFIG_PM_RUNTIME
  58static int sdhci_runtime_pm_get(struct sdhci_host *host);
  59static int sdhci_runtime_pm_put(struct sdhci_host *host);
  60static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
  61static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
  62#else
  63static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
  64{
  65	return 0;
  66}
  67static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
  68{
  69	return 0;
  70}
  71static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
  72{
  73}
  74static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
  75{
  76}
  77#endif
  78
  79static void sdhci_dumpregs(struct sdhci_host *host)
  80{
  81	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
  82		mmc_hostname(host->mmc));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83
  84	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
  85		sdhci_readl(host, SDHCI_DMA_ADDRESS),
  86		sdhci_readw(host, SDHCI_HOST_VERSION));
  87	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
  88		sdhci_readw(host, SDHCI_BLOCK_SIZE),
  89		sdhci_readw(host, SDHCI_BLOCK_COUNT));
  90	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
  91		sdhci_readl(host, SDHCI_ARGUMENT),
  92		sdhci_readw(host, SDHCI_TRANSFER_MODE));
  93	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
  94		sdhci_readl(host, SDHCI_PRESENT_STATE),
  95		sdhci_readb(host, SDHCI_HOST_CONTROL));
  96	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
  97		sdhci_readb(host, SDHCI_POWER_CONTROL),
  98		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
  99	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
 100		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
 101		sdhci_readw(host, SDHCI_CLOCK_CONTROL));
 102	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
 103		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 104		sdhci_readl(host, SDHCI_INT_STATUS));
 105	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
 106		sdhci_readl(host, SDHCI_INT_ENABLE),
 107		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
 108	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 109		sdhci_readw(host, SDHCI_ACMD12_ERR),
 110		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
 111	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 112		sdhci_readl(host, SDHCI_CAPABILITIES),
 113		sdhci_readl(host, SDHCI_CAPABILITIES_1));
 114	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 115		sdhci_readw(host, SDHCI_COMMAND),
 116		sdhci_readl(host, SDHCI_MAX_CURRENT));
 117	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
 118		sdhci_readw(host, SDHCI_HOST_CONTROL2));
 119
 120	if (host->flags & SDHCI_USE_ADMA)
 121		pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
 122		       readl(host->ioaddr + SDHCI_ADMA_ERROR),
 123		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 124
 125	pr_debug(DRIVER_NAME ": ===========================================\n");
 126}
 
 127
 128/*****************************************************************************\
 129 *                                                                           *
 130 * Low level functions                                                       *
 131 *                                                                           *
 132\*****************************************************************************/
 133
 134static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set)
 135{
 136	u32 ier;
 137
 138	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
 139	ier &= ~clear;
 140	ier |= set;
 141	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
 142	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
 143}
 144
 145static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs)
 146{
 147	sdhci_clear_set_irqs(host, 0, irqs);
 148}
 149
 150static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
 151{
 152	sdhci_clear_set_irqs(host, irqs, 0);
 153}
 154
 155static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
 156{
 157	u32 present, irqs;
 158
 159	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
 160	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
 161		return;
 162
 163	present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
 164			      SDHCI_CARD_PRESENT;
 165	irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
 166
 167	if (enable)
 168		sdhci_unmask_irqs(host, irqs);
 169	else
 170		sdhci_mask_irqs(host, irqs);
 
 
 
 
 171}
 172
 173static void sdhci_enable_card_detection(struct sdhci_host *host)
 174{
 175	sdhci_set_card_detection(host, true);
 176}
 177
 178static void sdhci_disable_card_detection(struct sdhci_host *host)
 179{
 180	sdhci_set_card_detection(host, false);
 181}
 182
 183static void sdhci_reset(struct sdhci_host *host, u8 mask)
 184{
 185	unsigned long timeout;
 186	u32 uninitialized_var(ier);
 
 
 
 187
 188	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
 189		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
 190			SDHCI_CARD_PRESENT))
 191			return;
 192	}
 
 
 193
 194	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 195		ier = sdhci_readl(host, SDHCI_INT_ENABLE);
 196
 197	if (host->ops->platform_reset_enter)
 198		host->ops->platform_reset_enter(host, mask);
 199
 200	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 201
 202	if (mask & SDHCI_RESET_ALL) {
 203		host->clock = 0;
 204		/* Reset-all turns off SD Bus Power */
 205		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 206			sdhci_runtime_pm_bus_off(host);
 207	}
 208
 209	/* Wait max 100 ms */
 210	timeout = 100;
 211
 212	/* hw clears the bit when it's done */
 213	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 214		if (timeout == 0) {
 215			pr_err("%s: Reset 0x%x never completed.\n",
 216				mmc_hostname(host->mmc), (int)mask);
 217			sdhci_dumpregs(host);
 218			return;
 219		}
 220		timeout--;
 221		mdelay(1);
 
 
 
 
 
 
 
 
 
 
 222	}
 223
 224	if (host->ops->platform_reset_exit)
 225		host->ops->platform_reset_exit(host, mask);
 226
 227	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
 228		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
 
 
 
 229
 230	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
 231		if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
 232			host->ops->enable_dma(host);
 233	}
 234}
 235
 236static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237
 238static void sdhci_init(struct sdhci_host *host, int soft)
 239{
 
 
 240	if (soft)
 241		sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
 242	else
 243		sdhci_reset(host, SDHCI_RESET_ALL);
 
 
 244
 245	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
 246		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
 247		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
 248		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
 249		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
 250
 251	if (soft) {
 252		/* force clock reconfiguration */
 253		host->clock = 0;
 254		sdhci_set_ios(host->mmc, &host->mmc->ios);
 255	}
 256}
 257
 258static void sdhci_reinit(struct sdhci_host *host)
 259{
 260	sdhci_init(host, 0);
 261	/*
 262	 * Retuning stuffs are affected by different cards inserted and only
 263	 * applicable to UHS-I cards. So reset these fields to their initial
 264	 * value when card is removed.
 265	 */
 266	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
 267		host->flags &= ~SDHCI_USING_RETUNING_TIMER;
 268
 269		del_timer_sync(&host->tuning_timer);
 270		host->flags &= ~SDHCI_NEEDS_RETUNING;
 271		host->mmc->max_blk_count =
 272			(host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
 273	}
 274	sdhci_enable_card_detection(host);
 275}
 276
 277static void sdhci_activate_led(struct sdhci_host *host)
 278{
 279	u8 ctrl;
 280
 281	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 282	ctrl |= SDHCI_CTRL_LED;
 283	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 284}
 285
 286static void sdhci_deactivate_led(struct sdhci_host *host)
 287{
 288	u8 ctrl;
 289
 290	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 291	ctrl &= ~SDHCI_CTRL_LED;
 292	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 293}
 294
 295#ifdef SDHCI_USE_LEDS_CLASS
 296static void sdhci_led_control(struct led_classdev *led,
 297	enum led_brightness brightness)
 298{
 299	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 300	unsigned long flags;
 301
 302	spin_lock_irqsave(&host->lock, flags);
 303
 304	if (host->runtime_suspended)
 305		goto out;
 306
 307	if (brightness == LED_OFF)
 308		sdhci_deactivate_led(host);
 309	else
 310		sdhci_activate_led(host);
 311out:
 312	spin_unlock_irqrestore(&host->lock, flags);
 313}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314#endif
 315
 316/*****************************************************************************\
 317 *                                                                           *
 318 * Core functions                                                            *
 319 *                                                                           *
 320\*****************************************************************************/
 321
 322static void sdhci_read_block_pio(struct sdhci_host *host)
 323{
 324	unsigned long flags;
 325	size_t blksize, len, chunk;
 326	u32 uninitialized_var(scratch);
 327	u8 *buf;
 328
 329	DBG("PIO reading\n");
 330
 331	blksize = host->data->blksz;
 332	chunk = 0;
 333
 334	local_irq_save(flags);
 335
 336	while (blksize) {
 337		if (!sg_miter_next(&host->sg_miter))
 338			BUG();
 339
 340		len = min(host->sg_miter.length, blksize);
 341
 342		blksize -= len;
 343		host->sg_miter.consumed = len;
 344
 345		buf = host->sg_miter.addr;
 346
 347		while (len) {
 348			if (chunk == 0) {
 349				scratch = sdhci_readl(host, SDHCI_BUFFER);
 350				chunk = 4;
 351			}
 352
 353			*buf = scratch & 0xFF;
 354
 355			buf++;
 356			scratch >>= 8;
 357			chunk--;
 358			len--;
 359		}
 360	}
 361
 362	sg_miter_stop(&host->sg_miter);
 363
 364	local_irq_restore(flags);
 365}
 366
 367static void sdhci_write_block_pio(struct sdhci_host *host)
 368{
 369	unsigned long flags;
 370	size_t blksize, len, chunk;
 371	u32 scratch;
 372	u8 *buf;
 373
 374	DBG("PIO writing\n");
 375
 376	blksize = host->data->blksz;
 377	chunk = 0;
 378	scratch = 0;
 379
 380	local_irq_save(flags);
 381
 382	while (blksize) {
 383		if (!sg_miter_next(&host->sg_miter))
 384			BUG();
 385
 386		len = min(host->sg_miter.length, blksize);
 387
 388		blksize -= len;
 389		host->sg_miter.consumed = len;
 390
 391		buf = host->sg_miter.addr;
 392
 393		while (len) {
 394			scratch |= (u32)*buf << (chunk * 8);
 395
 396			buf++;
 397			chunk++;
 398			len--;
 399
 400			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
 401				sdhci_writel(host, scratch, SDHCI_BUFFER);
 402				chunk = 0;
 403				scratch = 0;
 404			}
 405		}
 406	}
 407
 408	sg_miter_stop(&host->sg_miter);
 409
 410	local_irq_restore(flags);
 411}
 412
 413static void sdhci_transfer_pio(struct sdhci_host *host)
 414{
 415	u32 mask;
 416
 417	BUG_ON(!host->data);
 418
 419	if (host->blocks == 0)
 420		return;
 421
 422	if (host->data->flags & MMC_DATA_READ)
 423		mask = SDHCI_DATA_AVAILABLE;
 424	else
 425		mask = SDHCI_SPACE_AVAILABLE;
 426
 427	/*
 428	 * Some controllers (JMicron JMB38x) mess up the buffer bits
 429	 * for transfers < 4 bytes. As long as it is just one block,
 430	 * we can ignore the bits.
 431	 */
 432	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
 433		(host->data->blocks == 1))
 434		mask = ~0;
 435
 436	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
 437		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
 438			udelay(100);
 439
 440		if (host->data->flags & MMC_DATA_READ)
 441			sdhci_read_block_pio(host);
 442		else
 443			sdhci_write_block_pio(host);
 444
 445		host->blocks--;
 446		if (host->blocks == 0)
 447			break;
 448	}
 449
 450	DBG("PIO transfer complete.\n");
 451}
 452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
 454{
 455	local_irq_save(*flags);
 456	return kmap_atomic(sg_page(sg)) + sg->offset;
 457}
 458
 459static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
 460{
 461	kunmap_atomic(buffer);
 462	local_irq_restore(*flags);
 463}
 464
 465static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
 
 466{
 467	__le32 *dataddr = (__le32 __force *)(desc + 4);
 468	__le16 *cmdlen = (__le16 __force *)desc;
 469
 470	/* SDHCI specification says ADMA descriptors should be 4 byte
 471	 * aligned, so using 16 or 32bit operations should be safe. */
 
 
 472
 473	cmdlen[0] = cpu_to_le16(cmd);
 474	cmdlen[1] = cpu_to_le16(len);
 475
 476	dataddr[0] = cpu_to_le32(addr);
 477}
 478
 479static int sdhci_adma_table_pre(struct sdhci_host *host,
 480	struct mmc_data *data)
 481{
 482	int direction;
 483
 484	u8 *desc;
 485	u8 *align;
 486	dma_addr_t addr;
 487	dma_addr_t align_addr;
 488	int len, offset;
 489
 
 
 
 490	struct scatterlist *sg;
 491	int i;
 
 
 492	char *buffer;
 493	unsigned long flags;
 494
 495	/*
 496	 * The spec does not specify endianness of descriptor table.
 497	 * We currently guess that it is LE.
 498	 */
 499
 500	if (data->flags & MMC_DATA_READ)
 501		direction = DMA_FROM_DEVICE;
 502	else
 503		direction = DMA_TO_DEVICE;
 504
 505	/*
 506	 * The ADMA descriptor table is mapped further down as we
 507	 * need to fill it with data first.
 508	 */
 509
 510	host->align_addr = dma_map_single(mmc_dev(host->mmc),
 511		host->align_buffer, 128 * 4, direction);
 512	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
 513		goto fail;
 514	BUG_ON(host->align_addr & 0x3);
 515
 516	host->sg_count = dma_map_sg(mmc_dev(host->mmc),
 517		data->sg, data->sg_len, direction);
 518	if (host->sg_count == 0)
 519		goto unmap_align;
 520
 521	desc = host->adma_desc;
 522	align = host->align_buffer;
 523
 524	align_addr = host->align_addr;
 525
 526	for_each_sg(data->sg, sg, host->sg_count, i) {
 527		addr = sg_dma_address(sg);
 528		len = sg_dma_len(sg);
 529
 530		/*
 531		 * The SDHCI specification states that ADMA
 532		 * addresses must be 32-bit aligned. If they
 533		 * aren't, then we use a bounce buffer for
 534		 * the (up to three) bytes that screw up the
 535		 * alignment.
 536		 */
 537		offset = (4 - (addr & 0x3)) & 0x3;
 
 538		if (offset) {
 539			if (data->flags & MMC_DATA_WRITE) {
 540				buffer = sdhci_kmap_atomic(sg, &flags);
 541				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
 542				memcpy(align, buffer, offset);
 543				sdhci_kunmap_atomic(buffer, &flags);
 544			}
 545
 546			/* tran, valid */
 547			sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
 
 548
 549			BUG_ON(offset > 65536);
 550
 551			align += 4;
 552			align_addr += 4;
 553
 554			desc += 8;
 555
 556			addr += offset;
 557			len -= offset;
 558		}
 559
 560		BUG_ON(len > 65536);
 561
 562		/* tran, valid */
 563		sdhci_set_adma_desc(desc, addr, len, 0x21);
 564		desc += 8;
 
 
 
 565
 566		/*
 567		 * If this triggers then we have a calculation bug
 568		 * somewhere. :/
 569		 */
 570		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
 571	}
 572
 573	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
 574		/*
 575		* Mark the last descriptor as the terminating descriptor
 576		*/
 577		if (desc != host->adma_desc) {
 578			desc -= 8;
 579			desc[0] |= 0x2; /* end */
 580		}
 581	} else {
 582		/*
 583		* Add a terminating entry.
 584		*/
 585
 586		/* nop, end, valid */
 587		sdhci_set_adma_desc(desc, 0, 0, 0x3);
 588	}
 589
 590	/*
 591	 * Resync align buffer as we might have changed it.
 592	 */
 593	if (data->flags & MMC_DATA_WRITE) {
 594		dma_sync_single_for_device(mmc_dev(host->mmc),
 595			host->align_addr, 128 * 4, direction);
 596	}
 597
 598	host->adma_addr = dma_map_single(mmc_dev(host->mmc),
 599		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
 600	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr))
 601		goto unmap_entries;
 602	BUG_ON(host->adma_addr & 0x3);
 603
 604	return 0;
 605
 606unmap_entries:
 607	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 608		data->sg_len, direction);
 609unmap_align:
 610	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
 611		128 * 4, direction);
 612fail:
 613	return -EINVAL;
 614}
 615
 616static void sdhci_adma_table_post(struct sdhci_host *host,
 617	struct mmc_data *data)
 618{
 619	int direction;
 620
 621	struct scatterlist *sg;
 622	int i, size;
 623	u8 *align;
 624	char *buffer;
 625	unsigned long flags;
 626
 627	if (data->flags & MMC_DATA_READ)
 628		direction = DMA_FROM_DEVICE;
 629	else
 630		direction = DMA_TO_DEVICE;
 631
 632	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr,
 633		(128 * 2 + 1) * 4, DMA_TO_DEVICE);
 634
 635	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
 636		128 * 4, direction);
 637
 638	if (data->flags & MMC_DATA_READ) {
 639		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
 640			data->sg_len, direction);
 641
 642		align = host->align_buffer;
 
 
 
 
 
 643
 644		for_each_sg(data->sg, sg, host->sg_count, i) {
 645			if (sg_dma_address(sg) & 0x3) {
 646				size = 4 - (sg_dma_address(sg) & 0x3);
 
 
 
 
 
 
 
 
 
 
 
 647
 648				buffer = sdhci_kmap_atomic(sg, &flags);
 649				WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
 650				memcpy(buffer, align, size);
 651				sdhci_kunmap_atomic(buffer, &flags);
 652
 653				align += 4;
 654			}
 655		}
 656	}
 
 657
 658	dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 659		data->sg_len, direction);
 
 
 
 
 660}
 661
 662static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
 663{
 664	u8 count;
 665	struct mmc_data *data = cmd->data;
 666	unsigned target_timeout, current_timeout;
 667
 668	/*
 669	 * If the host controller provides us with an incorrect timeout
 670	 * value, just skip the check and use 0xE.  The hardware may take
 671	 * longer to time out, but that's much better than having a too-short
 672	 * timeout value.
 673	 */
 674	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
 675		return 0xE;
 676
 677	/* Unspecified timeout, assume max */
 678	if (!data && !cmd->busy_timeout)
 679		return 0xE;
 680
 681	/* timeout in us */
 682	if (!data)
 683		target_timeout = cmd->busy_timeout * 1000;
 684	else {
 685		target_timeout = data->timeout_ns / 1000;
 686		if (host->clock)
 687			target_timeout += data->timeout_clks / host->clock;
 
 
 
 
 
 
 
 
 
 
 
 688	}
 689
 690	/*
 691	 * Figure out needed cycles.
 692	 * We do this in steps in order to fit inside a 32 bit int.
 693	 * The first step is the minimum timeout, which will have a
 694	 * minimum resolution of 6 bits:
 695	 * (1) 2^13*1000 > 2^22,
 696	 * (2) host->timeout_clk < 2^16
 697	 *     =>
 698	 *     (1) / (2) > 2^6
 699	 */
 700	count = 0;
 701	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
 702	while (current_timeout < target_timeout) {
 703		count++;
 704		current_timeout <<= 1;
 705		if (count >= 0xF)
 706			break;
 707	}
 708
 709	if (count >= 0xF) {
 710		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
 711		    mmc_hostname(host->mmc), count, cmd->opcode);
 712		count = 0xE;
 713	}
 714
 715	return count;
 716}
 717
 718static void sdhci_set_transfer_irqs(struct sdhci_host *host)
 719{
 720	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
 721	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
 722
 723	if (host->flags & SDHCI_REQ_USE_DMA)
 724		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs);
 725	else
 726		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
 
 
 
 727}
 728
 729static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 730{
 731	u8 count;
 732	u8 ctrl;
 733	struct mmc_data *data = cmd->data;
 734	int ret;
 735
 736	WARN_ON(host->data);
 737
 738	if (data || (cmd->flags & MMC_RSP_BUSY)) {
 739		count = sdhci_calc_timeout(host, cmd);
 740		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
 741	}
 
 
 
 
 
 
 
 
 
 742
 743	if (!data)
 744		return;
 745
 
 
 746	/* Sanity checks */
 747	BUG_ON(data->blksz * data->blocks > 524288);
 748	BUG_ON(data->blksz > host->mmc->max_blk_size);
 749	BUG_ON(data->blocks > 65535);
 750
 751	host->data = data;
 752	host->data_early = 0;
 753	host->data->bytes_xfered = 0;
 754
 755	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
 
 
 
 
 756		host->flags |= SDHCI_REQ_USE_DMA;
 757
 758	/*
 759	 * FIXME: This doesn't account for merging when mapping the
 760	 * scatterlist.
 761	 */
 762	if (host->flags & SDHCI_REQ_USE_DMA) {
 763		int broken, i;
 764		struct scatterlist *sg;
 765
 766		broken = 0;
 767		if (host->flags & SDHCI_USE_ADMA) {
 768			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
 769				broken = 1;
 
 
 
 
 
 
 
 770		} else {
 771			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
 772				broken = 1;
 
 
 773		}
 774
 775		if (unlikely(broken)) {
 776			for_each_sg(data->sg, sg, data->sg_len, i) {
 777				if (sg->length & 0x3) {
 778					DBG("Reverting to PIO because of "
 779						"transfer size (%d)\n",
 780						sg->length);
 
 
 
 
 781					host->flags &= ~SDHCI_REQ_USE_DMA;
 782					break;
 783				}
 784			}
 785		}
 786	}
 787
 788	/*
 789	 * The assumption here being that alignment is the same after
 790	 * translation to device address space.
 791	 */
 792	if (host->flags & SDHCI_REQ_USE_DMA) {
 793		int broken, i;
 794		struct scatterlist *sg;
 795
 796		broken = 0;
 797		if (host->flags & SDHCI_USE_ADMA) {
 798			/*
 799			 * As we use 3 byte chunks to work around
 800			 * alignment problems, we need to check this
 801			 * quirk.
 802			 */
 803			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
 804				broken = 1;
 805		} else {
 806			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
 807				broken = 1;
 808		}
 809
 810		if (unlikely(broken)) {
 811			for_each_sg(data->sg, sg, data->sg_len, i) {
 812				if (sg->offset & 0x3) {
 813					DBG("Reverting to PIO because of "
 814						"bad alignment\n");
 815					host->flags &= ~SDHCI_REQ_USE_DMA;
 816					break;
 817				}
 818			}
 819		}
 820	}
 821
 822	if (host->flags & SDHCI_REQ_USE_DMA) {
 823		if (host->flags & SDHCI_USE_ADMA) {
 824			ret = sdhci_adma_table_pre(host, data);
 825			if (ret) {
 826				/*
 827				 * This only happens when someone fed
 828				 * us an invalid request.
 829				 */
 830				WARN_ON(1);
 831				host->flags &= ~SDHCI_REQ_USE_DMA;
 832			} else {
 833				sdhci_writel(host, host->adma_addr,
 834					SDHCI_ADMA_ADDRESS);
 835			}
 836		} else {
 837			int sg_cnt;
 838
 839			sg_cnt = dma_map_sg(mmc_dev(host->mmc),
 840					data->sg, data->sg_len,
 841					(data->flags & MMC_DATA_READ) ?
 842						DMA_FROM_DEVICE :
 843						DMA_TO_DEVICE);
 844			if (sg_cnt == 0) {
 845				/*
 846				 * This only happens when someone fed
 847				 * us an invalid request.
 848				 */
 849				WARN_ON(1);
 850				host->flags &= ~SDHCI_REQ_USE_DMA;
 851			} else {
 852				WARN_ON(sg_cnt != 1);
 853				sdhci_writel(host, sg_dma_address(data->sg),
 854					SDHCI_DMA_ADDRESS);
 855			}
 856		}
 857	}
 858
 859	/*
 860	 * Always adjust the DMA selection as some controllers
 861	 * (e.g. JMicron) can't do PIO properly when the selection
 862	 * is ADMA.
 863	 */
 864	if (host->version >= SDHCI_SPEC_200) {
 865		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
 866		ctrl &= ~SDHCI_CTRL_DMA_MASK;
 867		if ((host->flags & SDHCI_REQ_USE_DMA) &&
 868			(host->flags & SDHCI_USE_ADMA))
 869			ctrl |= SDHCI_CTRL_ADMA32;
 870		else
 
 
 
 871			ctrl |= SDHCI_CTRL_SDMA;
 
 872		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 873	}
 874
 875	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
 876		int flags;
 877
 878		flags = SG_MITER_ATOMIC;
 879		if (host->data->flags & MMC_DATA_READ)
 880			flags |= SG_MITER_TO_SG;
 881		else
 882			flags |= SG_MITER_FROM_SG;
 883		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
 884		host->blocks = data->blocks;
 885	}
 886
 887	sdhci_set_transfer_irqs(host);
 888
 889	/* Set the DMA boundary value and block size */
 890	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
 891		data->blksz), SDHCI_BLOCK_SIZE);
 892	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
 893}
 894
 
 
 
 
 
 
 
 895static void sdhci_set_transfer_mode(struct sdhci_host *host,
 896	struct mmc_command *cmd)
 897{
 898	u16 mode;
 899	struct mmc_data *data = cmd->data;
 900
 901	if (data == NULL) {
 
 
 
 
 902		/* clear Auto CMD settings for no data CMDs */
 903		mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
 904		sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
 905				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
 
 906		return;
 907	}
 908
 909	WARN_ON(!host->data);
 910
 911	mode = SDHCI_TRNS_BLK_CNT_EN;
 
 
 912	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
 913		mode |= SDHCI_TRNS_MULTI;
 914		/*
 915		 * If we are sending CMD23, CMD12 never gets sent
 916		 * on successful completion (so no Auto-CMD12).
 917		 */
 918		if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
 
 919			mode |= SDHCI_TRNS_AUTO_CMD12;
 920		else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
 921			mode |= SDHCI_TRNS_AUTO_CMD23;
 922			sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
 923		}
 924	}
 925
 926	if (data->flags & MMC_DATA_READ)
 927		mode |= SDHCI_TRNS_READ;
 928	if (host->flags & SDHCI_REQ_USE_DMA)
 929		mode |= SDHCI_TRNS_DMA;
 930
 931	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
 932}
 933
 934static void sdhci_finish_data(struct sdhci_host *host)
 935{
 936	struct mmc_data *data;
 
 
 
 
 
 
 937
 938	BUG_ON(!host->data);
 
 
 939
 940	data = host->data;
 941	host->data = NULL;
 
 
 
 
 942
 943	if (host->flags & SDHCI_REQ_USE_DMA) {
 944		if (host->flags & SDHCI_USE_ADMA)
 945			sdhci_adma_table_post(host, data);
 946		else {
 947			dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 948				data->sg_len, (data->flags & MMC_DATA_READ) ?
 949					DMA_FROM_DEVICE : DMA_TO_DEVICE);
 950		}
 951	}
 952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953	/*
 954	 * The specification states that the block count register must
 955	 * be updated, but it does not specify at what point in the
 956	 * data flow. That makes the register entirely useless to read
 957	 * back so we have to assume that nothing made it to the card
 958	 * in the event of an error.
 959	 */
 960	if (data->error)
 961		data->bytes_xfered = 0;
 962	else
 963		data->bytes_xfered = data->blksz * data->blocks;
 964
 965	/*
 966	 * Need to send CMD12 if -
 967	 * a) open-ended multiblock transfer (no CMD23)
 968	 * b) error in multiblock transfer
 969	 */
 970	if (data->stop &&
 971	    (data->error ||
 972	     !host->mrq->sbc)) {
 973
 974		/*
 975		 * The controller needs a reset of internal state machines
 976		 * upon error conditions.
 977		 */
 978		if (data->error) {
 979			sdhci_reset(host, SDHCI_RESET_CMD);
 980			sdhci_reset(host, SDHCI_RESET_DATA);
 
 981		}
 982
 983		sdhci_send_command(host, data->stop);
 984	} else
 985		tasklet_schedule(&host->finish_tasklet);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986}
 987
 988void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
 989{
 990	int flags;
 991	u32 mask;
 992	unsigned long timeout;
 993
 994	WARN_ON(host->cmd);
 995
 
 
 
 
 
 
 
 996	/* Wait max 10 ms */
 997	timeout = 10;
 998
 999	mask = SDHCI_CMD_INHIBIT;
1000	if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
1001		mask |= SDHCI_DATA_INHIBIT;
1002
1003	/* We shouldn't wait for data inihibit for stop commands, even
1004	   though they might use busy signaling */
1005	if (host->mrq->data && (cmd == host->mrq->data->stop))
1006		mask &= ~SDHCI_DATA_INHIBIT;
1007
1008	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1009		if (timeout == 0) {
1010			pr_err("%s: Controller never released "
1011				"inhibit bit(s).\n", mmc_hostname(host->mmc));
1012			sdhci_dumpregs(host);
1013			cmd->error = -EIO;
1014			tasklet_schedule(&host->finish_tasklet);
1015			return;
1016		}
1017		timeout--;
1018		mdelay(1);
1019	}
1020
1021	timeout = jiffies;
1022	if (!cmd->data && cmd->busy_timeout > 9000)
1023		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1024	else
1025		timeout += 10 * HZ;
1026	mod_timer(&host->timer, timeout);
1027
1028	host->cmd = cmd;
 
 
 
 
1029
1030	sdhci_prepare_data(host, cmd);
1031
1032	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1033
1034	sdhci_set_transfer_mode(host, cmd);
1035
1036	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1037		pr_err("%s: Unsupported response type!\n",
1038			mmc_hostname(host->mmc));
1039		cmd->error = -EINVAL;
1040		tasklet_schedule(&host->finish_tasklet);
1041		return;
1042	}
1043
1044	if (!(cmd->flags & MMC_RSP_PRESENT))
1045		flags = SDHCI_CMD_RESP_NONE;
1046	else if (cmd->flags & MMC_RSP_136)
1047		flags = SDHCI_CMD_RESP_LONG;
1048	else if (cmd->flags & MMC_RSP_BUSY)
1049		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1050	else
1051		flags = SDHCI_CMD_RESP_SHORT;
1052
1053	if (cmd->flags & MMC_RSP_CRC)
1054		flags |= SDHCI_CMD_CRC;
1055	if (cmd->flags & MMC_RSP_OPCODE)
1056		flags |= SDHCI_CMD_INDEX;
1057
1058	/* CMD19 is special in that the Data Present Select should be set */
1059	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1060	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1061		flags |= SDHCI_CMD_DATA;
1062
1063	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1064}
1065EXPORT_SYMBOL_GPL(sdhci_send_command);
1066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067static void sdhci_finish_command(struct sdhci_host *host)
1068{
1069	int i;
1070
1071	BUG_ON(host->cmd == NULL);
1072
1073	if (host->cmd->flags & MMC_RSP_PRESENT) {
1074		if (host->cmd->flags & MMC_RSP_136) {
1075			/* CRC is stripped so we need to do some shifting. */
1076			for (i = 0;i < 4;i++) {
1077				host->cmd->resp[i] = sdhci_readl(host,
1078					SDHCI_RESPONSE + (3-i)*4) << 8;
1079				if (i != 3)
1080					host->cmd->resp[i] |=
1081						sdhci_readb(host,
1082						SDHCI_RESPONSE + (3-i)*4-1);
1083			}
1084		} else {
1085			host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1086		}
1087	}
1088
1089	host->cmd->error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1090
1091	/* Finished CMD23, now send actual command. */
1092	if (host->cmd == host->mrq->sbc) {
1093		host->cmd = NULL;
1094		sdhci_send_command(host, host->mrq->cmd);
1095	} else {
1096
1097		/* Processed actual command. */
1098		if (host->data && host->data_early)
1099			sdhci_finish_data(host);
1100
1101		if (!host->cmd->data)
1102			tasklet_schedule(&host->finish_tasklet);
1103
1104		host->cmd = NULL;
1105	}
1106}
1107
1108static u16 sdhci_get_preset_value(struct sdhci_host *host)
1109{
1110	u16 ctrl, preset = 0;
1111
1112	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1113
1114	switch (ctrl & SDHCI_CTRL_UHS_MASK) {
1115	case SDHCI_CTRL_UHS_SDR12:
1116		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1117		break;
1118	case SDHCI_CTRL_UHS_SDR25:
1119		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1120		break;
1121	case SDHCI_CTRL_UHS_SDR50:
1122		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1123		break;
1124	case SDHCI_CTRL_UHS_SDR104:
 
1125		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1126		break;
1127	case SDHCI_CTRL_UHS_DDR50:
 
1128		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1129		break;
 
 
 
1130	default:
1131		pr_warn("%s: Invalid UHS-I mode selected\n",
1132			mmc_hostname(host->mmc));
1133		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1134		break;
1135	}
1136	return preset;
1137}
1138
1139static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
 
1140{
1141	int div = 0; /* Initialized for compiler warning */
1142	int real_div = div, clk_mul = 1;
1143	u16 clk = 0;
1144	unsigned long timeout;
1145
1146	if (clock && clock == host->clock)
1147		return;
1148
1149	host->mmc->actual_clock = 0;
1150
1151	if (host->ops->set_clock) {
1152		host->ops->set_clock(host, clock);
1153		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
1154			return;
1155	}
1156
1157	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1158
1159	if (clock == 0)
1160		goto out;
1161
1162	if (host->version >= SDHCI_SPEC_300) {
1163		if (sdhci_readw(host, SDHCI_HOST_CONTROL2) &
1164			SDHCI_CTRL_PRESET_VAL_ENABLE) {
1165			u16 pre_val;
1166
1167			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1168			pre_val = sdhci_get_preset_value(host);
1169			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1170				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1171			if (host->clk_mul &&
1172				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1173				clk = SDHCI_PROG_CLOCK_MODE;
1174				real_div = div + 1;
1175				clk_mul = host->clk_mul;
1176			} else {
1177				real_div = max_t(int, 1, div << 1);
1178			}
1179			goto clock_set;
1180		}
1181
1182		/*
1183		 * Check if the Host Controller supports Programmable Clock
1184		 * Mode.
1185		 */
1186		if (host->clk_mul) {
1187			for (div = 1; div <= 1024; div++) {
1188				if ((host->max_clk * host->clk_mul / div)
1189					<= clock)
1190					break;
1191			}
1192			/*
1193			 * Set Programmable Clock Mode in the Clock
1194			 * Control register.
1195			 */
1196			clk = SDHCI_PROG_CLOCK_MODE;
1197			real_div = div;
1198			clk_mul = host->clk_mul;
1199			div--;
1200		} else {
 
 
 
 
 
 
 
 
 
 
1201			/* Version 3.00 divisors must be a multiple of 2. */
1202			if (host->max_clk <= clock)
1203				div = 1;
1204			else {
1205				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1206				     div += 2) {
1207					if ((host->max_clk / div) <= clock)
1208						break;
1209				}
1210			}
1211			real_div = div;
1212			div >>= 1;
 
 
 
1213		}
1214	} else {
1215		/* Version 2.00 divisors must be a power of 2. */
1216		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1217			if ((host->max_clk / div) <= clock)
1218				break;
1219		}
1220		real_div = div;
1221		div >>= 1;
1222	}
1223
1224clock_set:
1225	if (real_div)
1226		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
1227
1228	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1229	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1230		<< SDHCI_DIVIDER_HI_SHIFT;
 
 
 
 
 
 
 
 
 
1231	clk |= SDHCI_CLOCK_INT_EN;
1232	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1233
1234	/* Wait max 20 ms */
1235	timeout = 20;
1236	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1237		& SDHCI_CLOCK_INT_STABLE)) {
1238		if (timeout == 0) {
1239			pr_err("%s: Internal clock never "
1240				"stabilised.\n", mmc_hostname(host->mmc));
1241			sdhci_dumpregs(host);
1242			return;
1243		}
1244		timeout--;
1245		mdelay(1);
1246	}
1247
1248	clk |= SDHCI_CLOCK_CARD_EN;
1249	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 
 
1250
1251out:
1252	host->clock = clock;
 
 
 
 
 
 
 
 
 
 
 
1253}
 
1254
1255static inline void sdhci_update_clock(struct sdhci_host *host)
 
1256{
1257	unsigned int clock;
 
 
1258
1259	clock = host->clock;
1260	host->clock = 0;
1261	sdhci_set_clock(host, clock);
 
1262}
1263
1264static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
 
1265{
1266	u8 pwr = 0;
1267
1268	if (power != (unsigned short)-1) {
1269		switch (1 << power) {
1270		case MMC_VDD_165_195:
 
 
 
 
 
 
 
1271			pwr = SDHCI_POWER_180;
1272			break;
1273		case MMC_VDD_29_30:
1274		case MMC_VDD_30_31:
1275			pwr = SDHCI_POWER_300;
1276			break;
1277		case MMC_VDD_32_33:
1278		case MMC_VDD_33_34:
1279			pwr = SDHCI_POWER_330;
1280			break;
1281		default:
1282			BUG();
 
 
1283		}
1284	}
1285
1286	if (host->pwr == pwr)
1287		return -1;
1288
1289	host->pwr = pwr;
1290
1291	if (pwr == 0) {
1292		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1293		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1294			sdhci_runtime_pm_bus_off(host);
1295		return 0;
1296	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1297
1298	/*
1299	 * Spec says that we should clear the power reg before setting
1300	 * a new value. Some controllers don't seem to like this though.
1301	 */
1302	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1303		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1304
1305	/*
1306	 * At least the Marvell CaFe chip gets confused if we set the voltage
1307	 * and set turn on power at the same time, so set the voltage first.
1308	 */
1309	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1310		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1311
1312	pwr |= SDHCI_POWER_ON;
 
1313
1314	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
 
 
 
 
 
 
 
 
1315
1316	if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1317		sdhci_runtime_pm_bus_on(host);
1318
1319	/*
1320	 * Some controllers need an extra 10ms delay of 10ms before they
1321	 * can apply clock after applying power
1322	 */
1323	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1324		mdelay(10);
1325
1326	return power;
1327}
 
1328
1329/*****************************************************************************\
1330 *                                                                           *
1331 * MMC callbacks                                                             *
1332 *                                                                           *
1333\*****************************************************************************/
1334
1335static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1336{
1337	struct sdhci_host *host;
1338	int present;
1339	unsigned long flags;
1340	u32 tuning_opcode;
1341
1342	host = mmc_priv(mmc);
1343
1344	sdhci_runtime_pm_get(host);
 
1345
1346	spin_lock_irqsave(&host->lock, flags);
1347
1348	WARN_ON(host->mrq != NULL);
1349
1350#ifndef SDHCI_USE_LEDS_CLASS
1351	sdhci_activate_led(host);
1352#endif
1353
1354	/*
1355	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1356	 * requests if Auto-CMD12 is enabled.
1357	 */
1358	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1359		if (mrq->stop) {
1360			mrq->data->stop = NULL;
1361			mrq->stop = NULL;
1362		}
1363	}
1364
1365	host->mrq = mrq;
1366
1367	/*
1368	 * Firstly check card presence from cd-gpio.  The return could
1369	 * be one of the following possibilities:
1370	 *     negative: cd-gpio is not available
1371	 *     zero: cd-gpio is used, and card is removed
1372	 *     one: cd-gpio is used, and card is present
1373	 */
1374	present = mmc_gpio_get_cd(host->mmc);
1375	if (present < 0) {
1376		/* If polling, assume that the card is always present. */
1377		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1378			present = 1;
1379		else
1380			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
1381					SDHCI_CARD_PRESENT;
1382	}
1383
1384	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1385		host->mrq->cmd->error = -ENOMEDIUM;
1386		tasklet_schedule(&host->finish_tasklet);
1387	} else {
1388		u32 present_state;
1389
1390		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1391		/*
1392		 * Check if the re-tuning timer has already expired and there
1393		 * is no on-going data transfer. If so, we need to execute
1394		 * tuning procedure before sending command.
1395		 */
1396		if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1397		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1398			if (mmc->card) {
1399				/* eMMC uses cmd21 but sd and sdio use cmd19 */
1400				tuning_opcode =
1401					mmc->card->type == MMC_TYPE_MMC ?
1402					MMC_SEND_TUNING_BLOCK_HS200 :
1403					MMC_SEND_TUNING_BLOCK;
1404
1405				/* Here we need to set the host->mrq to NULL,
1406				 * in case the pending finish_tasklet
1407				 * finishes it incorrectly.
1408				 */
1409				host->mrq = NULL;
1410
1411				spin_unlock_irqrestore(&host->lock, flags);
1412				sdhci_execute_tuning(mmc, tuning_opcode);
1413				spin_lock_irqsave(&host->lock, flags);
1414
1415				/* Restore original mmc_request structure */
1416				host->mrq = mrq;
1417			}
1418		}
1419
1420		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1421			sdhci_send_command(host, mrq->sbc);
1422		else
1423			sdhci_send_command(host, mrq->cmd);
1424	}
1425
1426	mmiowb();
1427	spin_unlock_irqrestore(&host->lock, flags);
1428}
1429
1430static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
1431{
1432	unsigned long flags;
1433	int vdd_bit = -1;
1434	u8 ctrl;
1435
1436	spin_lock_irqsave(&host->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1437
1438	if (host->flags & SDHCI_DEVICE_DEAD) {
1439		spin_unlock_irqrestore(&host->lock, flags);
1440		if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
1441			mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
1442		return;
1443	}
1444
1445	/*
1446	 * Reset the chip on each power off.
1447	 * Should clear out any weird states.
1448	 */
1449	if (ios->power_mode == MMC_POWER_OFF) {
1450		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1451		sdhci_reinit(host);
1452	}
1453
1454	if (host->version >= SDHCI_SPEC_300 &&
1455		(ios->power_mode == MMC_POWER_UP) &&
1456		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1457		sdhci_enable_preset_value(host, false);
1458
1459	sdhci_set_clock(host, ios->clock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460
1461	if (ios->power_mode == MMC_POWER_OFF)
1462		vdd_bit = sdhci_set_power(host, -1);
1463	else
1464		vdd_bit = sdhci_set_power(host, ios->vdd);
1465
1466	if (host->vmmc && vdd_bit != -1) {
1467		spin_unlock_irqrestore(&host->lock, flags);
1468		mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
1469		spin_lock_irqsave(&host->lock, flags);
1470	}
1471
1472	if (host->ops->platform_send_init_74_clocks)
1473		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1474
1475	/*
1476	 * If your platform has 8-bit width support but is not a v3 controller,
1477	 * or if it requires special setup code, you should implement that in
1478	 * platform_bus_width().
1479	 */
1480	if (host->ops->platform_bus_width) {
1481		host->ops->platform_bus_width(host, ios->bus_width);
1482	} else {
1483		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1484		if (ios->bus_width == MMC_BUS_WIDTH_8) {
1485			ctrl &= ~SDHCI_CTRL_4BITBUS;
1486			if (host->version >= SDHCI_SPEC_300)
1487				ctrl |= SDHCI_CTRL_8BITBUS;
1488		} else {
1489			if (host->version >= SDHCI_SPEC_300)
1490				ctrl &= ~SDHCI_CTRL_8BITBUS;
1491			if (ios->bus_width == MMC_BUS_WIDTH_4)
1492				ctrl |= SDHCI_CTRL_4BITBUS;
1493			else
1494				ctrl &= ~SDHCI_CTRL_4BITBUS;
1495		}
1496		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1497	}
1498
1499	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1500
1501	if ((ios->timing == MMC_TIMING_SD_HS ||
1502	     ios->timing == MMC_TIMING_MMC_HS)
1503	    && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1504		ctrl |= SDHCI_CTRL_HISPD;
1505	else
1506		ctrl &= ~SDHCI_CTRL_HISPD;
 
 
 
 
 
 
 
 
1507
1508	if (host->version >= SDHCI_SPEC_300) {
1509		u16 clk, ctrl_2;
1510
1511		/* In case of UHS-I modes, set High Speed Enable */
1512		if ((ios->timing == MMC_TIMING_MMC_HS200) ||
1513		    (ios->timing == MMC_TIMING_UHS_SDR50) ||
1514		    (ios->timing == MMC_TIMING_UHS_SDR104) ||
1515		    (ios->timing == MMC_TIMING_UHS_DDR50) ||
1516		    (ios->timing == MMC_TIMING_UHS_SDR25))
1517			ctrl |= SDHCI_CTRL_HISPD;
1518
1519		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1520		if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1521			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1522			/*
1523			 * We only need to set Driver Strength if the
1524			 * preset value enable is not set.
1525			 */
 
1526			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1527			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1528				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
 
 
1529			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1530				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
 
 
 
 
 
 
 
1531
1532			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1533		} else {
1534			/*
1535			 * According to SDHC Spec v3.00, if the Preset Value
1536			 * Enable in the Host Control 2 register is set, we
1537			 * need to reset SD Clock Enable before changing High
1538			 * Speed Enable to avoid generating clock gliches.
1539			 */
1540
1541			/* Reset SD Clock Enable */
1542			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1543			clk &= ~SDHCI_CLOCK_CARD_EN;
1544			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1545
1546			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1547
1548			/* Re-enable SD Clock */
1549			sdhci_update_clock(host);
1550		}
1551
1552
1553		/* Reset SD Clock Enable */
1554		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1555		clk &= ~SDHCI_CLOCK_CARD_EN;
1556		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1557
1558		if (host->ops->set_uhs_signaling)
1559			host->ops->set_uhs_signaling(host, ios->timing);
1560		else {
1561			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1562			/* Select Bus Speed Mode for host */
1563			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1564			if ((ios->timing == MMC_TIMING_MMC_HS200) ||
1565			    (ios->timing == MMC_TIMING_UHS_SDR104))
1566				ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1567			else if (ios->timing == MMC_TIMING_UHS_SDR12)
1568				ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1569			else if (ios->timing == MMC_TIMING_UHS_SDR25)
1570				ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1571			else if (ios->timing == MMC_TIMING_UHS_SDR50)
1572				ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1573			else if (ios->timing == MMC_TIMING_UHS_DDR50)
1574				ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1575			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1576		}
1577
1578		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1579				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1580				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1581				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1582				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1583				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
 
1584			u16 preset;
1585
1586			sdhci_enable_preset_value(host, true);
1587			preset = sdhci_get_preset_value(host);
1588			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1589				>> SDHCI_PRESET_DRV_SHIFT;
1590		}
1591
1592		/* Re-enable SD Clock */
1593		sdhci_update_clock(host);
1594	} else
1595		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1596
1597	/*
1598	 * Some (ENE) controllers go apeshit on some ios operation,
1599	 * signalling timeout and CRC errors even on CMD0. Resetting
1600	 * it on each ios seems to solve the problem.
1601	 */
1602	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1603		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1604
1605	mmiowb();
1606	spin_unlock_irqrestore(&host->lock, flags);
1607}
 
1608
1609static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1610{
1611	struct sdhci_host *host = mmc_priv(mmc);
1612
1613	sdhci_runtime_pm_get(host);
1614	sdhci_do_set_ios(host, ios);
1615	sdhci_runtime_pm_put(host);
1616}
1617
1618static int sdhci_do_get_cd(struct sdhci_host *host)
1619{
1620	int gpio_cd = mmc_gpio_get_cd(host->mmc);
1621
1622	if (host->flags & SDHCI_DEVICE_DEAD)
1623		return 0;
1624
1625	/* If polling/nonremovable, assume that the card is always present. */
1626	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
1627	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))
1628		return 1;
1629
1630	/* Try slot gpio detect */
1631	if (!IS_ERR_VALUE(gpio_cd))
 
 
 
1632		return !!gpio_cd;
1633
 
 
 
 
1634	/* Host native card detect */
1635	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1636}
1637
1638static int sdhci_get_cd(struct mmc_host *mmc)
1639{
1640	struct sdhci_host *host = mmc_priv(mmc);
1641	int ret;
1642
1643	sdhci_runtime_pm_get(host);
1644	ret = sdhci_do_get_cd(host);
1645	sdhci_runtime_pm_put(host);
1646	return ret;
1647}
1648
1649static int sdhci_check_ro(struct sdhci_host *host)
1650{
1651	unsigned long flags;
1652	int is_readonly;
1653
1654	spin_lock_irqsave(&host->lock, flags);
1655
1656	if (host->flags & SDHCI_DEVICE_DEAD)
1657		is_readonly = 0;
1658	else if (host->ops->get_ro)
1659		is_readonly = host->ops->get_ro(host);
1660	else
1661		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1662				& SDHCI_WRITE_PROTECT);
1663
1664	spin_unlock_irqrestore(&host->lock, flags);
1665
1666	/* This quirk needs to be replaced by a callback-function later */
1667	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1668		!is_readonly : is_readonly;
1669}
1670
1671#define SAMPLE_COUNT	5
1672
1673static int sdhci_do_get_ro(struct sdhci_host *host)
1674{
 
1675	int i, ro_count;
1676
1677	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1678		return sdhci_check_ro(host);
1679
1680	ro_count = 0;
1681	for (i = 0; i < SAMPLE_COUNT; i++) {
1682		if (sdhci_check_ro(host)) {
1683			if (++ro_count > SAMPLE_COUNT / 2)
1684				return 1;
1685		}
1686		msleep(30);
1687	}
1688	return 0;
1689}
1690
1691static void sdhci_hw_reset(struct mmc_host *mmc)
1692{
1693	struct sdhci_host *host = mmc_priv(mmc);
1694
1695	if (host->ops && host->ops->hw_reset)
1696		host->ops->hw_reset(host);
1697}
1698
1699static int sdhci_get_ro(struct mmc_host *mmc)
1700{
1701	struct sdhci_host *host = mmc_priv(mmc);
1702	int ret;
 
 
 
1703
1704	sdhci_runtime_pm_get(host);
1705	ret = sdhci_do_get_ro(host);
1706	sdhci_runtime_pm_put(host);
1707	return ret;
1708}
1709
1710static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1711{
1712	if (host->flags & SDHCI_DEVICE_DEAD)
1713		goto out;
 
 
 
1714
 
1715	if (enable)
1716		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1717	else
1718		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1719
1720	/* SDIO IRQ will be enabled as appropriate in runtime resume */
1721	if (host->runtime_suspended)
1722		goto out;
1723
1724	if (enable)
1725		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT);
1726	else
1727		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT);
1728out:
1729	mmiowb();
1730}
 
1731
1732static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
 
1733{
1734	struct sdhci_host *host = mmc_priv(mmc);
1735	unsigned long flags;
1736
1737	spin_lock_irqsave(&host->lock, flags);
1738	sdhci_enable_sdio_irq_nolock(host, enable);
1739	spin_unlock_irqrestore(&host->lock, flags);
1740}
1741
1742static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
1743						struct mmc_ios *ios)
1744{
1745	u16 ctrl;
1746	int ret;
1747
1748	/*
1749	 * Signal Voltage Switching is only applicable for Host Controllers
1750	 * v3.00 and above.
1751	 */
1752	if (host->version < SDHCI_SPEC_300)
1753		return 0;
1754
1755	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1756
1757	switch (ios->signal_voltage) {
1758	case MMC_SIGNAL_VOLTAGE_330:
 
 
1759		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1760		ctrl &= ~SDHCI_CTRL_VDD_180;
1761		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1762
1763		if (host->vqmmc) {
1764			ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000);
1765			if (ret) {
1766				pr_warning("%s: Switching to 3.3V signalling voltage "
1767						" failed\n", mmc_hostname(host->mmc));
1768				return -EIO;
1769			}
1770		}
1771		/* Wait for 5ms */
1772		usleep_range(5000, 5500);
1773
1774		/* 3.3V regulator output should be stable within 5 ms */
1775		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1776		if (!(ctrl & SDHCI_CTRL_VDD_180))
1777			return 0;
1778
1779		pr_warning("%s: 3.3V regulator output did not became stable\n",
1780				mmc_hostname(host->mmc));
1781
1782		return -EAGAIN;
1783	case MMC_SIGNAL_VOLTAGE_180:
1784		if (host->vqmmc) {
1785			ret = regulator_set_voltage(host->vqmmc,
1786					1700000, 1950000);
 
1787			if (ret) {
1788				pr_warning("%s: Switching to 1.8V signalling voltage "
1789						" failed\n", mmc_hostname(host->mmc));
1790				return -EIO;
1791			}
1792		}
1793
1794		/*
1795		 * Enable 1.8V Signal Enable in the Host Control2
1796		 * register
1797		 */
1798		ctrl |= SDHCI_CTRL_VDD_180;
1799		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1800
1801		/* Wait for 5ms */
1802		usleep_range(5000, 5500);
 
1803
1804		/* 1.8V regulator output should be stable within 5 ms */
1805		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1806		if (ctrl & SDHCI_CTRL_VDD_180)
1807			return 0;
1808
1809		pr_warning("%s: 1.8V regulator output did not became stable\n",
1810				mmc_hostname(host->mmc));
1811
1812		return -EAGAIN;
1813	case MMC_SIGNAL_VOLTAGE_120:
1814		if (host->vqmmc) {
1815			ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000);
 
 
1816			if (ret) {
1817				pr_warning("%s: Switching to 1.2V signalling voltage "
1818						" failed\n", mmc_hostname(host->mmc));
1819				return -EIO;
1820			}
1821		}
1822		return 0;
1823	default:
1824		/* No signal voltage switch required */
1825		return 0;
1826	}
1827}
1828
1829static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1830	struct mmc_ios *ios)
1831{
1832	struct sdhci_host *host = mmc_priv(mmc);
1833	int err;
1834
1835	if (host->version < SDHCI_SPEC_300)
1836		return 0;
1837	sdhci_runtime_pm_get(host);
1838	err = sdhci_do_start_signal_voltage_switch(host, ios);
1839	sdhci_runtime_pm_put(host);
1840	return err;
1841}
1842
1843static int sdhci_card_busy(struct mmc_host *mmc)
1844{
1845	struct sdhci_host *host = mmc_priv(mmc);
1846	u32 present_state;
1847
1848	sdhci_runtime_pm_get(host);
1849	/* Check whether DAT[3:0] is 0000 */
1850	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1851	sdhci_runtime_pm_put(host);
1852
1853	return !(present_state & SDHCI_DATA_LVL_MASK);
1854}
1855
1856static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1857{
1858	struct sdhci_host *host;
1859	u16 ctrl;
1860	u32 ier;
1861	int tuning_loop_counter = MAX_TUNING_LOOP;
1862	unsigned long timeout;
1863	int err = 0;
1864	bool requires_tuning_nonuhs = false;
1865	unsigned long flags;
1866
1867	host = mmc_priv(mmc);
1868
1869	sdhci_runtime_pm_get(host);
1870	spin_lock_irqsave(&host->lock, flags);
 
 
1871
1872	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
 
1873
1874	/*
1875	 * The Host Controller needs tuning only in case of SDR104 mode
1876	 * and for SDR50 mode when Use Tuning for SDR50 is set in the
1877	 * Capabilities register.
1878	 * If the Host Controller supports the HS200 mode then the
1879	 * tuning function has to be executed.
1880	 */
1881	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1882	    (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
1883	     host->flags & SDHCI_SDR104_NEEDS_TUNING))
1884		requires_tuning_nonuhs = true;
1885
1886	if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1887	    requires_tuning_nonuhs)
1888		ctrl |= SDHCI_CTRL_EXEC_TUNING;
1889	else {
1890		spin_unlock_irqrestore(&host->lock, flags);
1891		sdhci_runtime_pm_put(host);
1892		return 0;
1893	}
1894
1895	if (host->ops->platform_execute_tuning) {
1896		spin_unlock_irqrestore(&host->lock, flags);
1897		err = host->ops->platform_execute_tuning(host, opcode);
1898		sdhci_runtime_pm_put(host);
1899		return err;
1900	}
1901
 
 
 
 
1902	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1903
1904	/*
1905	 * As per the Host Controller spec v3.00, tuning command
1906	 * generates Buffer Read Ready interrupt, so enable that.
1907	 *
1908	 * Note: The spec clearly says that when tuning sequence
1909	 * is being performed, the controller does not generate
1910	 * interrupts other than Buffer Read Ready interrupt. But
1911	 * to make sure we don't hit a controller bug, we _only_
1912	 * enable Buffer Read Ready interrupt here.
1913	 */
1914	ier = sdhci_readl(host, SDHCI_INT_ENABLE);
1915	sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1916
1917	/*
1918	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1919	 * of loops reaches 40 times or a timeout of 150ms occurs.
 
 
1920	 */
1921	timeout = 150;
1922	do {
1923		struct mmc_command cmd = {0};
1924		struct mmc_request mrq = {NULL};
 
 
 
 
 
1925
1926		if (!tuning_loop_counter && !timeout)
1927			break;
1928
1929		cmd.opcode = opcode;
1930		cmd.arg = 0;
1931		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1932		cmd.retries = 0;
1933		cmd.data = NULL;
1934		cmd.error = 0;
1935
1936		mrq.cmd = &cmd;
1937		host->mrq = &mrq;
1938
1939		/*
1940		 * In response to CMD19, the card sends 64 bytes of tuning
1941		 * block to the Host Controller. So we set the block size
1942		 * to 64 here.
1943		 */
1944		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1945			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1946				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
1947					     SDHCI_BLOCK_SIZE);
1948			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1949				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1950					     SDHCI_BLOCK_SIZE);
1951		} else {
1952			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
1953				     SDHCI_BLOCK_SIZE);
1954		}
1955
1956		/*
1957		 * The tuning block is sent by the card to the host controller.
1958		 * So we set the TRNS_READ bit in the Transfer Mode register.
1959		 * This also takes care of setting DMA Enable and Multi Block
1960		 * Select in the same register to 0.
1961		 */
1962		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1963
1964		sdhci_send_command(host, &cmd);
 
 
1965
1966		host->cmd = NULL;
1967		host->mrq = NULL;
 
 
 
 
1968
1969		spin_unlock_irqrestore(&host->lock, flags);
1970		/* Wait for Buffer Read Ready interrupt */
1971		wait_event_interruptible_timeout(host->buf_ready_int,
1972					(host->tuning_done == 1),
1973					msecs_to_jiffies(50));
1974		spin_lock_irqsave(&host->lock, flags);
1975
1976		if (!host->tuning_done) {
1977			pr_info(DRIVER_NAME ": Timeout waiting for "
1978				"Buffer Read Ready interrupt during tuning "
1979				"procedure, falling back to fixed sampling "
1980				"clock\n");
1981			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1982			ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1983			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1984			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1985
1986			err = -EIO;
1987			goto out;
 
 
 
1988		}
1989
1990		host->tuning_done = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991
1992		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1993		tuning_loop_counter--;
1994		timeout--;
1995		mdelay(1);
1996	} while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1997
1998	/*
1999	 * The Host Driver has exhausted the maximum number of loops allowed,
2000	 * so use fixed sampling frequency.
 
 
 
2001	 */
2002	if (!tuning_loop_counter || !timeout) {
2003		ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2004		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2005		err = -EIO;
2006	} else {
2007		if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
2008			pr_info(DRIVER_NAME ": Tuning procedure"
2009				" failed, falling back to fixed sampling"
2010				" clock\n");
2011			err = -EIO;
2012		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2013	}
2014
2015out:
2016	/*
2017	 * If this is the very first time we are here, we start the retuning
2018	 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
2019	 * flag won't be set, we check this condition before actually starting
2020	 * the timer.
2021	 */
2022	if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
2023	    (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
2024		host->flags |= SDHCI_USING_RETUNING_TIMER;
2025		mod_timer(&host->tuning_timer, jiffies +
2026			host->tuning_count * HZ);
2027		/* Tuning mode 1 limits the maximum data length to 4MB */
2028		mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
2029	} else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2030		host->flags &= ~SDHCI_NEEDS_RETUNING;
2031		/* Reload the new initial value for timer */
2032		mod_timer(&host->tuning_timer, jiffies +
2033			  host->tuning_count * HZ);
2034	}
2035
2036	/*
2037	 * In case tuning fails, host controllers which support re-tuning can
2038	 * try tuning again at a later time, when the re-tuning timer expires.
2039	 * So for these controllers, we return 0. Since there might be other
2040	 * controllers who do not have this capability, we return error for
2041	 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
2042	 * a retuning timer to do the retuning for the card.
2043	 */
2044	if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
2045		err = 0;
2046
2047	sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
2048	spin_unlock_irqrestore(&host->lock, flags);
2049	sdhci_runtime_pm_put(host);
2050
2051	return err;
2052}
2053
2054
2055static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2056{
2057	u16 ctrl;
2058
2059	/* Host Controller v3.00 defines preset value registers */
2060	if (host->version < SDHCI_SPEC_300)
2061		return;
2062
2063	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2064
2065	/*
2066	 * We only enable or disable Preset Value if they are not already
2067	 * enabled or disabled respectively. Otherwise, we bail out.
2068	 */
2069	if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2070		ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
 
 
 
 
 
 
2071		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2072		host->flags |= SDHCI_PV_ENABLED;
2073	} else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
2074		ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2075		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2076		host->flags &= ~SDHCI_PV_ENABLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077	}
2078}
2079
2080static void sdhci_card_event(struct mmc_host *mmc)
2081{
2082	struct sdhci_host *host = mmc_priv(mmc);
2083	unsigned long flags;
 
2084
2085	/* First check if client has provided their own card event */
2086	if (host->ops->card_event)
2087		host->ops->card_event(host);
2088
 
 
2089	spin_lock_irqsave(&host->lock, flags);
2090
2091	/* Check host->mrq first in case we are runtime suspended */
2092	if (host->mrq && !sdhci_do_get_cd(host)) {
2093		pr_err("%s: Card removed during transfer!\n",
2094			mmc_hostname(host->mmc));
2095		pr_err("%s: Resetting controller.\n",
2096			mmc_hostname(host->mmc));
2097
2098		sdhci_reset(host, SDHCI_RESET_CMD);
2099		sdhci_reset(host, SDHCI_RESET_DATA);
2100
2101		host->mrq->cmd->error = -ENOMEDIUM;
2102		tasklet_schedule(&host->finish_tasklet);
2103	}
2104
2105	spin_unlock_irqrestore(&host->lock, flags);
2106}
2107
2108static const struct mmc_host_ops sdhci_ops = {
2109	.request	= sdhci_request,
 
 
2110	.set_ios	= sdhci_set_ios,
2111	.get_cd		= sdhci_get_cd,
2112	.get_ro		= sdhci_get_ro,
2113	.hw_reset	= sdhci_hw_reset,
2114	.enable_sdio_irq = sdhci_enable_sdio_irq,
2115	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
 
2116	.execute_tuning			= sdhci_execute_tuning,
2117	.card_event			= sdhci_card_event,
2118	.card_busy	= sdhci_card_busy,
2119};
2120
2121/*****************************************************************************\
2122 *                                                                           *
2123 * Tasklets                                                                  *
2124 *                                                                           *
2125\*****************************************************************************/
2126
2127static void sdhci_tasklet_card(unsigned long param)
2128{
2129	struct sdhci_host *host = (struct sdhci_host*)param;
2130
2131	sdhci_card_event(host->mmc);
2132
2133	mmc_detect_change(host->mmc, msecs_to_jiffies(200));
2134}
2135
2136static void sdhci_tasklet_finish(unsigned long param)
2137{
2138	struct sdhci_host *host;
2139	unsigned long flags;
2140	struct mmc_request *mrq;
 
2141
2142	host = (struct sdhci_host*)param;
2143
2144	spin_lock_irqsave(&host->lock, flags);
 
 
 
 
2145
2146        /*
2147         * If this tasklet gets rescheduled while running, it will
2148         * be run again afterwards but without any active request.
2149         */
2150	if (!host->mrq) {
2151		spin_unlock_irqrestore(&host->lock, flags);
2152		return;
2153	}
2154
2155	del_timer(&host->timer);
2156
2157	mrq = host->mrq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2158
2159	/*
2160	 * The controller needs a reset of internal state machines
2161	 * upon error conditions.
2162	 */
2163	if (!(host->flags & SDHCI_DEVICE_DEAD) &&
2164	    ((mrq->cmd && mrq->cmd->error) ||
2165		 (mrq->data && (mrq->data->error ||
2166		  (mrq->data->stop && mrq->data->stop->error))) ||
2167		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
 
 
 
 
 
 
2168
2169		/* Some controllers need this kick or reset won't work here */
2170		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2171			/* This is to force an update */
2172			sdhci_update_clock(host);
2173
2174		/* Spec says we should do both at the same time, but Ricoh
2175		   controllers do not like that. */
2176		sdhci_reset(host, SDHCI_RESET_CMD);
2177		sdhci_reset(host, SDHCI_RESET_DATA);
 
 
2178	}
2179
2180	host->mrq = NULL;
2181	host->cmd = NULL;
2182	host->data = NULL;
2183
2184#ifndef SDHCI_USE_LEDS_CLASS
2185	sdhci_deactivate_led(host);
2186#endif
2187
2188	mmiowb();
2189	spin_unlock_irqrestore(&host->lock, flags);
2190
2191	mmc_request_done(host->mmc, mrq);
2192	sdhci_runtime_pm_put(host);
 
2193}
2194
2195static void sdhci_timeout_timer(unsigned long data)
 
 
 
 
 
 
 
 
2196{
2197	struct sdhci_host *host;
2198	unsigned long flags;
2199
2200	host = (struct sdhci_host*)data;
2201
2202	spin_lock_irqsave(&host->lock, flags);
2203
2204	if (host->mrq) {
2205		pr_err("%s: Timeout waiting for hardware "
2206			"interrupt.\n", mmc_hostname(host->mmc));
2207		sdhci_dumpregs(host);
2208
2209		if (host->data) {
2210			host->data->error = -ETIMEDOUT;
2211			sdhci_finish_data(host);
2212		} else {
2213			if (host->cmd)
2214				host->cmd->error = -ETIMEDOUT;
2215			else
2216				host->mrq->cmd->error = -ETIMEDOUT;
2217
2218			tasklet_schedule(&host->finish_tasklet);
2219		}
2220	}
2221
2222	mmiowb();
2223	spin_unlock_irqrestore(&host->lock, flags);
2224}
2225
2226static void sdhci_tuning_timer(unsigned long data)
2227{
2228	struct sdhci_host *host;
2229	unsigned long flags;
2230
2231	host = (struct sdhci_host *)data;
2232
2233	spin_lock_irqsave(&host->lock, flags);
2234
2235	host->flags |= SDHCI_NEEDS_RETUNING;
 
 
 
 
2236
 
 
 
 
 
 
 
 
 
 
 
 
 
2237	spin_unlock_irqrestore(&host->lock, flags);
2238}
2239
2240/*****************************************************************************\
2241 *                                                                           *
2242 * Interrupt handling                                                        *
2243 *                                                                           *
2244\*****************************************************************************/
2245
2246static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2247{
2248	BUG_ON(intmask == 0);
2249
2250	if (!host->cmd) {
2251		pr_err("%s: Got command interrupt 0x%08x even "
2252			"though no command operation was in progress.\n",
2253			mmc_hostname(host->mmc), (unsigned)intmask);
 
 
 
 
 
 
2254		sdhci_dumpregs(host);
2255		return;
2256	}
2257
2258	if (intmask & SDHCI_INT_TIMEOUT)
2259		host->cmd->error = -ETIMEDOUT;
2260	else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
2261			SDHCI_INT_INDEX))
2262		host->cmd->error = -EILSEQ;
 
2263
2264	if (host->cmd->error) {
2265		tasklet_schedule(&host->finish_tasklet);
2266		return;
2267	}
2268
2269	/*
2270	 * The host can send and interrupt when the busy state has
2271	 * ended, allowing us to wait without wasting CPU cycles.
2272	 * Unfortunately this is overloaded on the "data complete"
2273	 * interrupt, so we need to take some care when handling
2274	 * it.
2275	 *
2276	 * Note: The 1.0 specification is a bit ambiguous about this
2277	 *       feature so there might be some problems with older
2278	 *       controllers.
2279	 */
2280	if (host->cmd->flags & MMC_RSP_BUSY) {
2281		if (host->cmd->data)
2282			DBG("Cannot wait for busy signal when also "
2283				"doing a data transfer");
2284		else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
2285			return;
 
2286
2287		/* The controller does not support the end-of-busy IRQ,
2288		 * fall through and take the SDHCI_INT_RESPONSE */
2289	}
2290
2291	if (intmask & SDHCI_INT_RESPONSE)
2292		sdhci_finish_command(host);
2293}
2294
2295#ifdef CONFIG_MMC_DEBUG
2296static void sdhci_show_adma_error(struct sdhci_host *host)
2297{
2298	const char *name = mmc_hostname(host->mmc);
2299	u8 *desc = host->adma_desc;
2300	__le32 *dma;
2301	__le16 *len;
2302	u8 attr;
2303
2304	sdhci_dumpregs(host);
2305
2306	while (true) {
2307		dma = (__le32 *)(desc + 4);
2308		len = (__le16 *)(desc + 2);
2309		attr = *desc;
2310
2311		DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2312		    name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
 
 
 
 
 
 
 
 
 
2313
2314		desc += 8;
2315
2316		if (attr & 2)
2317			break;
2318	}
2319}
2320#else
2321static void sdhci_show_adma_error(struct sdhci_host *host) { }
2322#endif
2323
2324static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2325{
2326	u32 command;
2327	BUG_ON(intmask == 0);
2328
2329	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2330	if (intmask & SDHCI_INT_DATA_AVAIL) {
2331		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2332		if (command == MMC_SEND_TUNING_BLOCK ||
2333		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2334			host->tuning_done = 1;
2335			wake_up(&host->buf_ready_int);
2336			return;
2337		}
2338	}
2339
2340	if (!host->data) {
 
 
2341		/*
2342		 * The "data complete" interrupt is also used to
2343		 * indicate that a busy state has ended. See comment
2344		 * above in sdhci_cmd_irq().
2345		 */
2346		if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
 
 
 
 
 
 
2347			if (intmask & SDHCI_INT_DATA_END) {
2348				sdhci_finish_command(host);
 
 
 
 
 
 
 
 
 
2349				return;
2350			}
2351		}
2352
2353		pr_err("%s: Got data interrupt 0x%08x even "
2354			"though no data operation was in progress.\n",
2355			mmc_hostname(host->mmc), (unsigned)intmask);
 
 
 
 
 
 
 
2356		sdhci_dumpregs(host);
2357
2358		return;
2359	}
2360
2361	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2362		host->data->error = -ETIMEDOUT;
2363	else if (intmask & SDHCI_INT_DATA_END_BIT)
2364		host->data->error = -EILSEQ;
2365	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2366		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2367			!= MMC_BUS_TEST_R)
2368		host->data->error = -EILSEQ;
2369	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2370		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2371		sdhci_show_adma_error(host);
2372		host->data->error = -EIO;
2373		if (host->ops->adma_workaround)
2374			host->ops->adma_workaround(host, intmask);
2375	}
2376
2377	if (host->data->error)
2378		sdhci_finish_data(host);
2379	else {
2380		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2381			sdhci_transfer_pio(host);
2382
2383		/*
2384		 * We currently don't do anything fancy with DMA
2385		 * boundaries, but as we can't disable the feature
2386		 * we need to at least restart the transfer.
2387		 *
2388		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2389		 * should return a valid address to continue from, but as
2390		 * some controllers are faulty, don't trust them.
2391		 */
2392		if (intmask & SDHCI_INT_DMA_END) {
2393			u32 dmastart, dmanow;
2394			dmastart = sg_dma_address(host->data->sg);
 
2395			dmanow = dmastart + host->data->bytes_xfered;
2396			/*
2397			 * Force update to the next DMA block boundary.
2398			 */
2399			dmanow = (dmanow &
2400				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2401				SDHCI_DEFAULT_BOUNDARY_SIZE;
2402			host->data->bytes_xfered = dmanow - dmastart;
2403			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2404				" next 0x%08x\n",
2405				mmc_hostname(host->mmc), dmastart,
2406				host->data->bytes_xfered, dmanow);
2407			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2408		}
2409
2410		if (intmask & SDHCI_INT_DATA_END) {
2411			if (host->cmd) {
2412				/*
2413				 * Data managed to finish before the
2414				 * command completed. Make sure we do
2415				 * things in the proper order.
2416				 */
2417				host->data_early = 1;
2418			} else {
2419				sdhci_finish_data(host);
2420			}
2421		}
2422	}
2423}
2424
2425static irqreturn_t sdhci_irq(int irq, void *dev_id)
2426{
2427	irqreturn_t result;
2428	struct sdhci_host *host = dev_id;
2429	u32 intmask, unexpected = 0;
2430	int cardint = 0, max_loops = 16;
2431
2432	spin_lock(&host->lock);
2433
2434	if (host->runtime_suspended) {
2435		spin_unlock(&host->lock);
2436		return IRQ_NONE;
2437	}
2438
2439	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2440
2441	if (!intmask || intmask == 0xffffffff) {
2442		result = IRQ_NONE;
2443		goto out;
2444	}
2445
2446again:
2447	DBG("*** %s got interrupt: 0x%08x\n",
2448		mmc_hostname(host->mmc), intmask);
2449
2450	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2451		u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2452			      SDHCI_CARD_PRESENT;
2453
2454		/*
2455		 * There is a observation on i.mx esdhc.  INSERT bit will be
2456		 * immediately set again when it gets cleared, if a card is
2457		 * inserted.  We have to mask the irq to prevent interrupt
2458		 * storm which will freeze the system.  And the REMOVE gets
2459		 * the same situation.
2460		 *
2461		 * More testing are needed here to ensure it works for other
2462		 * platforms though.
2463		 */
2464		sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
2465						SDHCI_INT_CARD_REMOVE);
2466		sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
2467						  SDHCI_INT_CARD_INSERT);
2468
2469		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2470			     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2471		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2472		tasklet_schedule(&host->card_tasklet);
2473	}
2474
2475	if (intmask & SDHCI_INT_CMD_MASK) {
2476		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
2477			SDHCI_INT_STATUS);
2478		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2479	}
 
 
 
 
 
 
2480
2481	if (intmask & SDHCI_INT_DATA_MASK) {
2482		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK,
2483			SDHCI_INT_STATUS);
2484		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2485	}
2486
2487	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
 
 
 
2488
2489	intmask &= ~SDHCI_INT_ERROR;
 
2490
2491	if (intmask & SDHCI_INT_BUS_POWER) {
2492		pr_err("%s: Card is consuming too much power!\n",
2493			mmc_hostname(host->mmc));
2494		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS);
2495	}
2496
2497	intmask &= ~SDHCI_INT_BUS_POWER;
 
 
2498
2499	if (intmask & SDHCI_INT_CARD_INT)
2500		cardint = 1;
2501
2502	intmask &= ~SDHCI_INT_CARD_INT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2503
2504	if (intmask) {
2505		unexpected |= intmask;
2506		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2507	}
2508
2509	result = IRQ_HANDLED;
2510
2511	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2512
2513	/*
2514	 * If we know we'll call the driver to signal SDIO IRQ, disregard
2515	 * further indications of Card Interrupt in the status to avoid a
2516	 * needless loop.
2517	 */
2518	if (cardint)
2519		intmask &= ~SDHCI_INT_CARD_INT;
2520	if (intmask && --max_loops)
2521		goto again;
2522out:
2523	spin_unlock(&host->lock);
2524
2525	if (unexpected) {
2526		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2527			   mmc_hostname(host->mmc), unexpected);
2528		sdhci_dumpregs(host);
2529	}
2530	/*
2531	 * We have to delay this as it calls back into the driver.
2532	 */
2533	if (cardint)
2534		mmc_signal_sdio_irq(host->mmc);
2535
2536	return result;
2537}
2538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2539/*****************************************************************************\
2540 *                                                                           *
2541 * Suspend/resume                                                            *
2542 *                                                                           *
2543\*****************************************************************************/
2544
2545#ifdef CONFIG_PM
2546void sdhci_enable_irq_wakeups(struct sdhci_host *host)
 
2547{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2548	u8 val;
2549	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2550			| SDHCI_WAKE_ON_INT;
 
 
 
 
 
 
 
 
 
 
 
2551
2552	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2553	val |= mask ;
2554	/* Avoid fake wake up */
2555	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2556		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2557	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
 
 
 
 
 
 
2558}
2559EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2560
2561void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2562{
2563	u8 val;
2564	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2565			| SDHCI_WAKE_ON_INT;
2566
2567	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2568	val &= ~mask;
2569	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
 
 
 
 
2570}
2571EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups);
2572
2573int sdhci_suspend_host(struct sdhci_host *host)
2574{
2575	if (host->ops->platform_suspend)
2576		host->ops->platform_suspend(host);
2577
2578	sdhci_disable_card_detection(host);
2579
2580	/* Disable tuning since we are suspending */
2581	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2582		del_timer_sync(&host->tuning_timer);
2583		host->flags &= ~SDHCI_NEEDS_RETUNING;
2584	}
2585
2586	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2587		sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
 
 
 
2588		free_irq(host->irq, host);
2589	} else {
2590		sdhci_enable_irq_wakeups(host);
2591		enable_irq_wake(host->irq);
2592	}
 
2593	return 0;
2594}
2595
2596EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2597
2598int sdhci_resume_host(struct sdhci_host *host)
2599{
 
2600	int ret = 0;
2601
2602	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2603		if (host->ops->enable_dma)
2604			host->ops->enable_dma(host);
2605	}
2606
2607	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2608		ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2609				  mmc_hostname(host->mmc), host);
2610		if (ret)
2611			return ret;
2612	} else {
2613		sdhci_disable_irq_wakeups(host);
2614		disable_irq_wake(host->irq);
2615	}
2616
2617	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2618	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2619		/* Card keeps power but host controller does not */
2620		sdhci_init(host, 0);
2621		host->pwr = 0;
2622		host->clock = 0;
2623		sdhci_do_set_ios(host, &host->mmc->ios);
2624	} else {
2625		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2626		mmiowb();
2627	}
2628
 
 
 
 
 
 
 
 
 
 
2629	sdhci_enable_card_detection(host);
2630
2631	if (host->ops->platform_resume)
2632		host->ops->platform_resume(host);
2633
2634	/* Set the re-tuning expiration flag */
2635	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2636		host->flags |= SDHCI_NEEDS_RETUNING;
2637
2638	return ret;
2639}
2640
2641EXPORT_SYMBOL_GPL(sdhci_resume_host);
2642#endif /* CONFIG_PM */
2643
2644#ifdef CONFIG_PM_RUNTIME
2645
2646static int sdhci_runtime_pm_get(struct sdhci_host *host)
2647{
2648	return pm_runtime_get_sync(host->mmc->parent);
2649}
2650
2651static int sdhci_runtime_pm_put(struct sdhci_host *host)
2652{
2653	pm_runtime_mark_last_busy(host->mmc->parent);
2654	return pm_runtime_put_autosuspend(host->mmc->parent);
2655}
2656
2657static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
2658{
2659	if (host->runtime_suspended || host->bus_on)
2660		return;
2661	host->bus_on = true;
2662	pm_runtime_get_noresume(host->mmc->parent);
2663}
2664
2665static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
2666{
2667	if (host->runtime_suspended || !host->bus_on)
2668		return;
2669	host->bus_on = false;
2670	pm_runtime_put_noidle(host->mmc->parent);
2671}
2672
2673int sdhci_runtime_suspend_host(struct sdhci_host *host)
2674{
2675	unsigned long flags;
2676	int ret = 0;
2677
2678	/* Disable tuning since we are suspending */
2679	if (host->flags & SDHCI_USING_RETUNING_TIMER) {
2680		del_timer_sync(&host->tuning_timer);
2681		host->flags &= ~SDHCI_NEEDS_RETUNING;
2682	}
2683
2684	spin_lock_irqsave(&host->lock, flags);
2685	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
 
 
2686	spin_unlock_irqrestore(&host->lock, flags);
2687
2688	synchronize_irq(host->irq);
2689
2690	spin_lock_irqsave(&host->lock, flags);
2691	host->runtime_suspended = true;
2692	spin_unlock_irqrestore(&host->lock, flags);
2693
2694	return ret;
2695}
2696EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2697
2698int sdhci_runtime_resume_host(struct sdhci_host *host)
2699{
 
2700	unsigned long flags;
2701	int ret = 0, host_flags = host->flags;
2702
2703	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2704		if (host->ops->enable_dma)
2705			host->ops->enable_dma(host);
2706	}
2707
2708	sdhci_init(host, 0);
2709
2710	/* Force clock and power re-program */
2711	host->pwr = 0;
2712	host->clock = 0;
2713	sdhci_do_set_ios(host, &host->mmc->ios);
2714
2715	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
2716	if ((host_flags & SDHCI_PV_ENABLED) &&
2717		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2718		spin_lock_irqsave(&host->lock, flags);
2719		sdhci_enable_preset_value(host, true);
2720		spin_unlock_irqrestore(&host->lock, flags);
 
 
 
 
 
 
 
2721	}
2722
2723	/* Set the re-tuning expiration flag */
2724	if (host->flags & SDHCI_USING_RETUNING_TIMER)
2725		host->flags |= SDHCI_NEEDS_RETUNING;
2726
2727	spin_lock_irqsave(&host->lock, flags);
2728
2729	host->runtime_suspended = false;
2730
2731	/* Enable SDIO IRQ */
2732	if ((host->flags & SDHCI_SDIO_IRQ_ENABLED))
2733		sdhci_enable_sdio_irq_nolock(host, true);
2734
2735	/* Enable Card Detection */
2736	sdhci_enable_card_detection(host);
2737
2738	spin_unlock_irqrestore(&host->lock, flags);
2739
2740	return ret;
2741}
2742EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2743
2744#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2745
2746/*****************************************************************************\
2747 *                                                                           *
2748 * Device allocation/registration                                            *
2749 *                                                                           *
2750\*****************************************************************************/
2751
2752struct sdhci_host *sdhci_alloc_host(struct device *dev,
2753	size_t priv_size)
2754{
2755	struct mmc_host *mmc;
2756	struct sdhci_host *host;
2757
2758	WARN_ON(dev == NULL);
2759
2760	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
2761	if (!mmc)
2762		return ERR_PTR(-ENOMEM);
2763
2764	host = mmc_priv(mmc);
2765	host->mmc = mmc;
 
 
 
 
 
 
 
 
 
 
 
2766
2767	return host;
2768}
2769
2770EXPORT_SYMBOL_GPL(sdhci_alloc_host);
2771
2772int sdhci_add_host(struct sdhci_host *host)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2773{
2774	struct mmc_host *mmc;
2775	u32 caps[2] = {0, 0};
2776	u32 max_current_caps;
2777	unsigned int ocr_avail;
 
 
2778	int ret;
2779
2780	WARN_ON(host == NULL);
2781	if (host == NULL)
2782		return -EINVAL;
2783
2784	mmc = host->mmc;
2785
2786	if (debug_quirks)
2787		host->quirks = debug_quirks;
2788	if (debug_quirks2)
2789		host->quirks2 = debug_quirks2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2790
2791	sdhci_reset(host, SDHCI_RESET_ALL);
2792
2793	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
2794	host->version = (host->version & SDHCI_SPEC_VER_MASK)
2795				>> SDHCI_SPEC_VER_SHIFT;
2796	if (host->version > SDHCI_SPEC_300) {
2797		pr_err("%s: Unknown controller version (%d). "
2798			"You may experience problems.\n", mmc_hostname(mmc),
2799			host->version);
2800	}
2801
2802	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
2803		sdhci_readl(host, SDHCI_CAPABILITIES);
2804
2805	if (host->version >= SDHCI_SPEC_300)
2806		caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
2807			host->caps1 :
2808			sdhci_readl(host, SDHCI_CAPABILITIES_1);
2809
2810	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
2811		host->flags |= SDHCI_USE_SDMA;
2812	else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
2813		DBG("Controller doesn't have SDMA capability\n");
2814	else
2815		host->flags |= SDHCI_USE_SDMA;
2816
2817	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
2818		(host->flags & SDHCI_USE_SDMA)) {
2819		DBG("Disabling DMA as it is marked broken\n");
2820		host->flags &= ~SDHCI_USE_SDMA;
2821	}
2822
2823	if ((host->version >= SDHCI_SPEC_200) &&
2824		(caps[0] & SDHCI_CAN_DO_ADMA2))
2825		host->flags |= SDHCI_USE_ADMA;
2826
2827	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
2828		(host->flags & SDHCI_USE_ADMA)) {
2829		DBG("Disabling ADMA as it is marked broken\n");
2830		host->flags &= ~SDHCI_USE_ADMA;
2831	}
2832
 
 
 
 
 
 
 
 
 
 
2833	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2834		if (host->ops->enable_dma) {
2835			if (host->ops->enable_dma(host)) {
2836				pr_warning("%s: No suitable DMA "
2837					"available. Falling back to PIO.\n",
2838					mmc_hostname(mmc));
2839				host->flags &=
2840					~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
2841			}
 
 
 
2842		}
2843	}
2844
 
 
 
 
2845	if (host->flags & SDHCI_USE_ADMA) {
 
 
 
2846		/*
2847		 * We need to allocate descriptors for all sg entries
2848		 * (128) and potentially one alignment transfer for
2849		 * each of those entries.
2850		 */
2851		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL);
2852		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);
2853		if (!host->adma_desc || !host->align_buffer) {
2854			kfree(host->adma_desc);
2855			kfree(host->align_buffer);
2856			pr_warning("%s: Unable to allocate ADMA "
2857				"buffers. Falling back to standard DMA.\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858				mmc_hostname(mmc));
2859			host->flags &= ~SDHCI_USE_ADMA;
 
 
 
 
 
 
 
 
2860		}
2861	}
2862
2863	/*
2864	 * If we use DMA, then it's up to the caller to set the DMA
2865	 * mask, but PIO does not need the hw shim so we set a new
2866	 * mask here in that case.
2867	 */
2868	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
2869		host->dma_mask = DMA_BIT_MASK(64);
2870		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
2871	}
2872
2873	if (host->version >= SDHCI_SPEC_300)
2874		host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
2875			>> SDHCI_CLOCK_BASE_SHIFT;
2876	else
2877		host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
2878			>> SDHCI_CLOCK_BASE_SHIFT;
2879
2880	host->max_clk *= 1000000;
2881	if (host->max_clk == 0 || host->quirks &
2882			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
2883		if (!host->ops->get_max_clock) {
2884			pr_err("%s: Hardware doesn't specify base clock "
2885			       "frequency.\n", mmc_hostname(mmc));
2886			return -ENODEV;
 
2887		}
2888		host->max_clk = host->ops->get_max_clock(host);
2889	}
2890
2891	/*
2892	 * In case of Host Controller v3.00, find out whether clock
2893	 * multiplier is supported.
2894	 */
2895	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2896			SDHCI_CLOCK_MUL_SHIFT;
2897
2898	/*
2899	 * In case the value in Clock Multiplier is 0, then programmable
2900	 * clock mode is not supported, otherwise the actual clock
2901	 * multiplier is one more than the value of Clock Multiplier
2902	 * in the Capabilities Register.
2903	 */
2904	if (host->clk_mul)
2905		host->clk_mul += 1;
2906
2907	/*
2908	 * Set host parameters.
2909	 */
2910	mmc->ops = &sdhci_ops;
2911	mmc->f_max = host->max_clk;
2912	if (host->ops->get_min_clock)
2913		mmc->f_min = host->ops->get_min_clock(host);
2914	else if (host->version >= SDHCI_SPEC_300) {
2915		if (host->clk_mul) {
2916			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
2917			mmc->f_max = host->max_clk * host->clk_mul;
2918		} else
2919			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2920	} else
2921		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
2922
2923	host->timeout_clk =
2924		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
2925	if (host->timeout_clk == 0) {
2926		if (host->ops->get_timeout_clock) {
2927			host->timeout_clk = host->ops->get_timeout_clock(host);
2928		} else if (!(host->quirks &
2929				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
2930			pr_err("%s: Hardware doesn't specify timeout clock "
2931			       "frequency.\n", mmc_hostname(mmc));
2932			return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
2933		}
2934	}
2935	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
2936		host->timeout_clk *= 1000;
2937
2938	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
2939		host->timeout_clk = mmc->f_max / 1000;
2940
2941	mmc->max_busy_timeout = (1 << 27) / host->timeout_clk;
 
 
 
2942
2943	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 
2944
2945	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2946		host->flags |= SDHCI_AUTO_CMD12;
2947
2948	/* Auto-CMD23 stuff only works in ADMA or PIO. */
2949	if ((host->version >= SDHCI_SPEC_300) &&
2950	    ((host->flags & SDHCI_USE_ADMA) ||
2951	     !(host->flags & SDHCI_USE_SDMA))) {
 
2952		host->flags |= SDHCI_AUTO_CMD23;
2953		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2954	} else {
2955		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2956	}
2957
2958	/*
2959	 * A controller may support 8-bit width, but the board itself
2960	 * might not have the pins brought out.  Boards that support
2961	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
2962	 * their platform code before calling sdhci_add_host(), and we
2963	 * won't assume 8-bit width for hosts without that CAP.
2964	 */
2965	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
2966		mmc->caps |= MMC_CAP_4_BIT_DATA;
2967
2968	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
2969		mmc->caps &= ~MMC_CAP_CMD23;
2970
2971	if (caps[0] & SDHCI_CAN_DO_HISPD)
2972		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2973
2974	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2975	    !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
 
2976		mmc->caps |= MMC_CAP_NEEDS_POLL;
2977
2978	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
2979	host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc");
2980	if (IS_ERR_OR_NULL(host->vqmmc)) {
2981		if (PTR_ERR(host->vqmmc) < 0) {
2982			pr_info("%s: no vqmmc regulator found\n",
2983				mmc_hostname(mmc));
2984			host->vqmmc = NULL;
2985		}
2986	} else {
2987		ret = regulator_enable(host->vqmmc);
2988		if (!regulator_is_supported_voltage(host->vqmmc, 1700000,
2989			1950000))
2990			caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
2991					SDHCI_SUPPORT_SDR50 |
2992					SDHCI_SUPPORT_DDR50);
2993		if (ret) {
2994			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
2995				mmc_hostname(mmc), ret);
2996			host->vqmmc = NULL;
2997		}
2998	}
2999
3000	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
3001		caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3002		       SDHCI_SUPPORT_DDR50);
 
3003
3004	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3005	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3006		       SDHCI_SUPPORT_DDR50))
3007		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3008
3009	/* SDR104 supports also implies SDR50 support */
3010	if (caps[1] & SDHCI_SUPPORT_SDR104) {
3011		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3012		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3013		 * field can be promoted to support HS200.
3014		 */
3015		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3016			mmc->caps2 |= MMC_CAP2_HS200;
3017	} else if (caps[1] & SDHCI_SUPPORT_SDR50)
3018		mmc->caps |= MMC_CAP_UHS_SDR50;
 
 
 
 
 
 
 
 
 
 
 
3019
3020	if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
3021		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3022		mmc->caps |= MMC_CAP_UHS_DDR50;
3023
3024	/* Does the host need tuning for SDR50? */
3025	if (caps[1] & SDHCI_USE_SDR50_TUNING)
3026		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3027
3028	/* Does the host need tuning for SDR104 / HS200? */
3029	if (mmc->caps2 & MMC_CAP2_HS200)
3030		host->flags |= SDHCI_SDR104_NEEDS_TUNING;
3031
3032	/* Driver Type(s) (A, C, D) supported by the host */
3033	if (caps[1] & SDHCI_DRIVER_TYPE_A)
3034		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3035	if (caps[1] & SDHCI_DRIVER_TYPE_C)
3036		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3037	if (caps[1] & SDHCI_DRIVER_TYPE_D)
3038		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3039
3040	/* Initial value for re-tuning timer count */
3041	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3042			      SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3043
3044	/*
3045	 * In case Re-tuning Timer is not disabled, the actual value of
3046	 * re-tuning timer will be 2 ^ (n - 1).
3047	 */
3048	if (host->tuning_count)
3049		host->tuning_count = 1 << (host->tuning_count - 1);
3050
3051	/* Re-tuning mode supported by the Host Controller */
3052	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
3053			     SDHCI_RETUNING_MODE_SHIFT;
3054
3055	ocr_avail = 0;
3056
3057	host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc");
3058	if (IS_ERR_OR_NULL(host->vmmc)) {
3059		if (PTR_ERR(host->vmmc) < 0) {
3060			pr_info("%s: no vmmc regulator found\n",
3061				mmc_hostname(mmc));
3062			host->vmmc = NULL;
3063		}
3064	}
3065
3066#ifdef CONFIG_REGULATOR
3067	/*
3068	 * Voltage range check makes sense only if regulator reports
3069	 * any voltage value.
3070	 */
3071	if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) {
3072		ret = regulator_is_supported_voltage(host->vmmc, 2700000,
3073			3600000);
3074		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
3075			caps[0] &= ~SDHCI_CAN_VDD_330;
3076		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
3077			caps[0] &= ~SDHCI_CAN_VDD_300;
3078		ret = regulator_is_supported_voltage(host->vmmc, 1700000,
3079			1950000);
3080		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
3081			caps[0] &= ~SDHCI_CAN_VDD_180;
3082	}
3083#endif /* CONFIG_REGULATOR */
3084
3085	/*
3086	 * According to SD Host Controller spec v3.00, if the Host System
3087	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3088	 * the value is meaningful only if Voltage Support in the Capabilities
3089	 * register is set. The actual current value is 4 times the register
3090	 * value.
3091	 */
3092	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3093	if (!max_current_caps && host->vmmc) {
3094		u32 curr = regulator_get_current_limit(host->vmmc);
3095		if (curr > 0) {
3096
3097			/* convert to SDHCI_MAX_CURRENT format */
3098			curr = curr/1000;  /* convert to mA */
3099			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3100
3101			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3102			max_current_caps =
3103				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3104				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3105				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3106		}
3107	}
3108
3109	if (caps[0] & SDHCI_CAN_VDD_330) {
3110		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3111
3112		mmc->max_current_330 = ((max_current_caps &
3113				   SDHCI_MAX_CURRENT_330_MASK) >>
3114				   SDHCI_MAX_CURRENT_330_SHIFT) *
3115				   SDHCI_MAX_CURRENT_MULTIPLIER;
3116	}
3117	if (caps[0] & SDHCI_CAN_VDD_300) {
3118		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3119
3120		mmc->max_current_300 = ((max_current_caps &
3121				   SDHCI_MAX_CURRENT_300_MASK) >>
3122				   SDHCI_MAX_CURRENT_300_SHIFT) *
3123				   SDHCI_MAX_CURRENT_MULTIPLIER;
3124	}
3125	if (caps[0] & SDHCI_CAN_VDD_180) {
3126		ocr_avail |= MMC_VDD_165_195;
3127
3128		mmc->max_current_180 = ((max_current_caps &
3129				   SDHCI_MAX_CURRENT_180_MASK) >>
3130				   SDHCI_MAX_CURRENT_180_SHIFT) *
3131				   SDHCI_MAX_CURRENT_MULTIPLIER;
3132	}
3133
 
3134	if (host->ocr_mask)
3135		ocr_avail = host->ocr_mask;
3136
 
 
 
 
3137	mmc->ocr_avail = ocr_avail;
3138	mmc->ocr_avail_sdio = ocr_avail;
3139	if (host->ocr_avail_sdio)
3140		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3141	mmc->ocr_avail_sd = ocr_avail;
3142	if (host->ocr_avail_sd)
3143		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3144	else /* normal SD controllers don't support 1.8V */
3145		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3146	mmc->ocr_avail_mmc = ocr_avail;
3147	if (host->ocr_avail_mmc)
3148		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3149
3150	if (mmc->ocr_avail == 0) {
3151		pr_err("%s: Hardware doesn't report any "
3152			"support voltages.\n", mmc_hostname(mmc));
3153		return -ENODEV;
 
3154	}
3155
 
 
 
 
 
 
 
 
 
3156	spin_lock_init(&host->lock);
3157
3158	/*
 
 
 
 
 
 
 
3159	 * Maximum number of segments. Depends on if the hardware
3160	 * can do scatter/gather or not.
3161	 */
3162	if (host->flags & SDHCI_USE_ADMA)
3163		mmc->max_segs = 128;
3164	else if (host->flags & SDHCI_USE_SDMA)
3165		mmc->max_segs = 1;
3166	else /* PIO */
3167		mmc->max_segs = 128;
3168
3169	/*
3170	 * Maximum number of sectors in one transfer. Limited by DMA boundary
3171	 * size (512KiB).
3172	 */
3173	mmc->max_req_size = 524288;
 
3174
3175	/*
3176	 * Maximum segment size. Could be one segment with the maximum number
3177	 * of bytes. When doing hardware scatter/gather, each entry cannot
3178	 * be larger than 64 KiB though.
3179	 */
3180	if (host->flags & SDHCI_USE_ADMA) {
3181		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3182			mmc->max_seg_size = 65535;
3183		else
3184			mmc->max_seg_size = 65536;
3185	} else {
3186		mmc->max_seg_size = mmc->max_req_size;
3187	}
3188
3189	/*
3190	 * Maximum block size. This varies from controller to controller and
3191	 * is specified in the capabilities register.
3192	 */
3193	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3194		mmc->max_blk_size = 2;
3195	} else {
3196		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
3197				SDHCI_MAX_BLOCK_SHIFT;
3198		if (mmc->max_blk_size >= 3) {
3199			pr_warning("%s: Invalid maximum block size, "
3200				"assuming 512 bytes\n", mmc_hostname(mmc));
3201			mmc->max_blk_size = 0;
3202		}
3203	}
3204
3205	mmc->max_blk_size = 512 << mmc->max_blk_size;
3206
3207	/*
3208	 * Maximum block count.
3209	 */
3210	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3212	/*
3213	 * Init tasklets.
3214	 */
3215	tasklet_init(&host->card_tasklet,
3216		sdhci_tasklet_card, (unsigned long)host);
3217	tasklet_init(&host->finish_tasklet,
3218		sdhci_tasklet_finish, (unsigned long)host);
3219
3220	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
 
3221
3222	if (host->version >= SDHCI_SPEC_300) {
3223		init_waitqueue_head(&host->buf_ready_int);
3224
3225		/* Initialize re-tuning timer */
3226		init_timer(&host->tuning_timer);
3227		host->tuning_timer.data = (unsigned long)host;
3228		host->tuning_timer.function = sdhci_tuning_timer;
3229	}
3230
3231	sdhci_init(host, 0);
3232
3233	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
3234		mmc_hostname(mmc), host);
3235	if (ret) {
3236		pr_err("%s: Failed to request IRQ %d: %d\n",
3237		       mmc_hostname(mmc), host->irq, ret);
3238		goto untasklet;
3239	}
3240
3241#ifdef CONFIG_MMC_DEBUG
3242	sdhci_dumpregs(host);
3243#endif
3244
3245#ifdef SDHCI_USE_LEDS_CLASS
3246	snprintf(host->led_name, sizeof(host->led_name),
3247		"%s::", mmc_hostname(mmc));
3248	host->led.name = host->led_name;
3249	host->led.brightness = LED_OFF;
3250	host->led.default_trigger = mmc_hostname(mmc);
3251	host->led.brightness_set = sdhci_led_control;
3252
3253	ret = led_classdev_register(mmc_dev(mmc), &host->led);
3254	if (ret) {
3255		pr_err("%s: Failed to register LED device: %d\n",
3256		       mmc_hostname(mmc), ret);
3257		goto reset;
3258	}
3259#endif
3260
3261	mmiowb();
3262
3263	mmc_add_host(mmc);
 
 
3264
3265	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3266		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3267		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :
 
3268		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3269
3270	sdhci_enable_card_detection(host);
3271
3272	return 0;
3273
3274#ifdef SDHCI_USE_LEDS_CLASS
3275reset:
3276	sdhci_reset(host, SDHCI_RESET_ALL);
3277	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
 
 
3278	free_irq(host->irq, host);
3279#endif
3280untasklet:
3281	tasklet_kill(&host->card_tasklet);
3282	tasklet_kill(&host->finish_tasklet);
3283
3284	return ret;
3285}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3286
 
 
3287EXPORT_SYMBOL_GPL(sdhci_add_host);
3288
3289void sdhci_remove_host(struct sdhci_host *host, int dead)
3290{
 
3291	unsigned long flags;
3292
3293	if (dead) {
3294		spin_lock_irqsave(&host->lock, flags);
3295
3296		host->flags |= SDHCI_DEVICE_DEAD;
3297
3298		if (host->mrq) {
3299			pr_err("%s: Controller removed during "
3300				" transfer!\n", mmc_hostname(host->mmc));
3301
3302			host->mrq->cmd->error = -ENOMEDIUM;
3303			tasklet_schedule(&host->finish_tasklet);
3304		}
3305
3306		spin_unlock_irqrestore(&host->lock, flags);
3307	}
3308
3309	sdhci_disable_card_detection(host);
3310
3311	mmc_remove_host(host->mmc);
3312
3313#ifdef SDHCI_USE_LEDS_CLASS
3314	led_classdev_unregister(&host->led);
3315#endif
3316
3317	if (!dead)
3318		sdhci_reset(host, SDHCI_RESET_ALL);
3319
3320	sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK);
 
3321	free_irq(host->irq, host);
3322
3323	del_timer_sync(&host->timer);
 
3324
3325	tasklet_kill(&host->card_tasklet);
3326	tasklet_kill(&host->finish_tasklet);
3327
3328	if (host->vmmc) {
3329		regulator_disable(host->vmmc);
3330		regulator_put(host->vmmc);
3331	}
3332
3333	if (host->vqmmc) {
3334		regulator_disable(host->vqmmc);
3335		regulator_put(host->vqmmc);
3336	}
3337
3338	kfree(host->adma_desc);
3339	kfree(host->align_buffer);
 
 
3340
3341	host->adma_desc = NULL;
3342	host->align_buffer = NULL;
3343}
3344
3345EXPORT_SYMBOL_GPL(sdhci_remove_host);
3346
3347void sdhci_free_host(struct sdhci_host *host)
3348{
3349	mmc_free_host(host->mmc);
3350}
3351
3352EXPORT_SYMBOL_GPL(sdhci_free_host);
3353
3354/*****************************************************************************\
3355 *                                                                           *
3356 * Driver init/exit                                                          *
3357 *                                                                           *
3358\*****************************************************************************/
3359
3360static int __init sdhci_drv_init(void)
3361{
3362	pr_info(DRIVER_NAME
3363		": Secure Digital Host Controller Interface driver\n");
3364	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3365
3366	return 0;
3367}
3368
3369static void __exit sdhci_drv_exit(void)
3370{
3371}
3372
3373module_init(sdhci_drv_init);
3374module_exit(sdhci_drv_exit);
3375
3376module_param(debug_quirks, uint, 0444);
3377module_param(debug_quirks2, uint, 0444);
3378
3379MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3380MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3381MODULE_LICENSE("GPL");
3382
3383MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3384MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");