Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/drivers/mmc/core/core.c
   3 *
   4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/completion.h>
  17#include <linux/device.h>
  18#include <linux/delay.h>
  19#include <linux/pagemap.h>
  20#include <linux/err.h>
  21#include <linux/leds.h>
  22#include <linux/scatterlist.h>
  23#include <linux/log2.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/pm_runtime.h>
 
  26#include <linux/suspend.h>
 
 
 
 
  27
  28#include <linux/mmc/card.h>
  29#include <linux/mmc/host.h>
  30#include <linux/mmc/mmc.h>
  31#include <linux/mmc/sd.h>
 
 
 
 
  32
  33#include "core.h"
  34#include "bus.h"
  35#include "host.h"
  36#include "sdio_bus.h"
 
  37
  38#include "mmc_ops.h"
  39#include "sd_ops.h"
  40#include "sdio_ops.h"
  41
  42static struct workqueue_struct *workqueue;
 
 
 
 
 
 
 
 
 
 
 
 
  43
  44/*
  45 * Enabling software CRCs on the data blocks can be a significant (30%)
  46 * performance cost, and for other reasons may not always be desired.
  47 * So we allow it it to be disabled.
  48 */
  49int use_spi_crc = 1;
  50module_param(use_spi_crc, bool, 0);
  51
  52/*
  53 * We normally treat cards as removed during suspend if they are not
  54 * known to be on a non-removable bus, to avoid the risk of writing
  55 * back data to a different card after resume.  Allow this to be
  56 * overridden if necessary.
  57 */
  58#ifdef CONFIG_MMC_UNSAFE_RESUME
  59int mmc_assume_removable;
  60#else
  61int mmc_assume_removable = 1;
  62#endif
  63EXPORT_SYMBOL(mmc_assume_removable);
  64module_param_named(removable, mmc_assume_removable, bool, 0644);
  65MODULE_PARM_DESC(
  66	removable,
  67	"MMC/SD cards are removable and may be removed during suspend");
  68
  69/*
  70 * Internal function. Schedule delayed work in the MMC work queue.
  71 */
  72static int mmc_schedule_delayed_work(struct delayed_work *work,
  73				     unsigned long delay)
  74{
  75	return queue_delayed_work(workqueue, work, delay);
 
 
 
 
 
 
  76}
  77
 
 
  78/*
  79 * Internal function. Flush all scheduled work from the MMC work queue.
 
  80 */
  81static void mmc_flush_scheduled_work(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82{
  83	flush_workqueue(workqueue);
 
  84}
  85
 
 
 
 
 
 
 
 
 
 
 
 
  86/**
  87 *	mmc_request_done - finish processing an MMC request
  88 *	@host: MMC host which completed request
  89 *	@mrq: MMC request which request
  90 *
  91 *	MMC drivers should call this function when they have completed
  92 *	their processing of a request.
  93 */
  94void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  95{
  96	struct mmc_command *cmd = mrq->cmd;
  97	int err = cmd->error;
  98
 
 
 
 
 
 
 
 
  99	if (err && cmd->retries && mmc_host_is_spi(host)) {
 100		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 101			cmd->retries = 0;
 102	}
 103
 104	if (err && cmd->retries) {
 105		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 106			mmc_hostname(host), cmd->opcode, err);
 107
 108		cmd->retries--;
 109		cmd->error = 0;
 110		host->ops->request(host, mrq);
 
 
 
 
 
 
 
 
 111	} else {
 112		led_trigger_event(host->led, LED_OFF);
 
 
 
 
 
 
 
 
 
 
 
 113
 114		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 115			mmc_hostname(host), cmd->opcode, err,
 116			cmd->resp[0], cmd->resp[1],
 117			cmd->resp[2], cmd->resp[3]);
 118
 119		if (mrq->data) {
 120			pr_debug("%s:     %d bytes transferred: %d\n",
 121				mmc_hostname(host),
 122				mrq->data->bytes_xfered, mrq->data->error);
 123		}
 124
 125		if (mrq->stop) {
 126			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 127				mmc_hostname(host), mrq->stop->opcode,
 128				mrq->stop->error,
 129				mrq->stop->resp[0], mrq->stop->resp[1],
 130				mrq->stop->resp[2], mrq->stop->resp[3]);
 131		}
 132
 133		if (mrq->done)
 134			mrq->done(mrq);
 135
 136		mmc_host_clk_release(host);
 137	}
 138}
 139
 140EXPORT_SYMBOL(mmc_request_done);
 141
 142static void
 143mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 144{
 145#ifdef CONFIG_MMC_DEBUG
 146	unsigned int i, sz;
 147	struct scatterlist *sg;
 148#endif
 
 
 
 
 
 
 
 
 
 
 149
 150	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
 151		 mmc_hostname(host), mrq->cmd->opcode,
 152		 mrq->cmd->arg, mrq->cmd->flags);
 153
 154	if (mrq->data) {
 155		pr_debug("%s:     blksz %d blocks %d flags %08x "
 156			"tsac %d ms nsac %d\n",
 157			mmc_hostname(host), mrq->data->blksz,
 158			mrq->data->blocks, mrq->data->flags,
 159			mrq->data->timeout_ns / 1000000,
 160			mrq->data->timeout_clks);
 161	}
 162
 163	if (mrq->stop) {
 164		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 165			 mmc_hostname(host), mrq->stop->opcode,
 166			 mrq->stop->arg, mrq->stop->flags);
 167	}
 168
 169	WARN_ON(!host->claimed);
 170
 171	mrq->cmd->error = 0;
 172	mrq->cmd->mrq = mrq;
 
 
 
 
 173	if (mrq->data) {
 174		BUG_ON(mrq->data->blksz > host->max_blk_size);
 175		BUG_ON(mrq->data->blocks > host->max_blk_count);
 176		BUG_ON(mrq->data->blocks * mrq->data->blksz >
 177			host->max_req_size);
 178
 179#ifdef CONFIG_MMC_DEBUG
 180		sz = 0;
 181		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 182			sz += sg->length;
 183		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
 
 184#endif
 185
 186		mrq->cmd->data = mrq->data;
 187		mrq->data->error = 0;
 188		mrq->data->mrq = mrq;
 189		if (mrq->stop) {
 190			mrq->data->stop = mrq->stop;
 191			mrq->stop->error = 0;
 192			mrq->stop->mrq = mrq;
 193		}
 194	}
 195	mmc_host_clk_hold(host);
 196	led_trigger_event(host->led, LED_FULL);
 197	host->ops->request(host, mrq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 198}
 199
 200static void mmc_wait_done(struct mmc_request *mrq)
 201{
 202	complete(&mrq->completion);
 203}
 204
 205static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206{
 
 
 
 
 207	init_completion(&mrq->completion);
 208	mrq->done = mmc_wait_done;
 209	mmc_start_request(host, mrq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210}
 
 211
 212static void mmc_wait_for_req_done(struct mmc_host *host,
 213				  struct mmc_request *mrq)
 
 
 
 
 
 
 
 
 
 
 
 214{
 215	wait_for_completion(&mrq->completion);
 
 
 
 216}
 
 217
 218/**
 219 *	mmc_pre_req - Prepare for a new request
 220 *	@host: MMC host to prepare command
 221 *	@mrq: MMC request to prepare for
 222 *	@is_first_req: true if there is no previous started request
 223 *                     that may run in parellel to this call, otherwise false
 224 *
 225 *	mmc_pre_req() is called in prior to mmc_start_req() to let
 226 *	host prepare for the new request. Preparation of a request may be
 227 *	performed while another request is running on the host.
 228 */
 229static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 230		 bool is_first_req)
 231{
 232	if (host->ops->pre_req)
 233		host->ops->pre_req(host, mrq, is_first_req);
 234}
 235
 236/**
 237 *	mmc_post_req - Post process a completed request
 238 *	@host: MMC host to post process command
 239 *	@mrq: MMC request to post process for
 240 *	@err: Error, if non zero, clean up any resources made in pre_req
 241 *
 242 *	Let the host post process a completed request. Post processing of
 243 *	a request may be performed while another reuqest is running.
 244 */
 245static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 246			 int err)
 247{
 248	if (host->ops->post_req)
 249		host->ops->post_req(host, mrq, err);
 250}
 251
 252/**
 253 *	mmc_start_req - start a non-blocking request
 254 *	@host: MMC host to start command
 255 *	@areq: async request to start
 256 *	@error: out parameter returns 0 for success, otherwise non zero
 257 *
 258 *	Start a new MMC custom command request for a host.
 259 *	If there is on ongoing async request wait for completion
 260 *	of that request and start the new one and return.
 261 *	Does not wait for the new request to complete.
 262 *
 263 *      Returns the completed request, NULL in case of none completed.
 264 *	Wait for the an ongoing request (previoulsy started) to complete and
 265 *	return the completed request. If there is no ongoing request, NULL
 266 *	is returned without waiting. NULL is not an error condition.
 267 */
 268struct mmc_async_req *mmc_start_req(struct mmc_host *host,
 269				    struct mmc_async_req *areq, int *error)
 
 270{
 271	int err = 0;
 
 272	struct mmc_async_req *data = host->areq;
 273
 274	/* Prepare a new request */
 275	if (areq)
 276		mmc_pre_req(host, areq->mrq, !host->areq);
 277
 278	if (host->areq) {
 279		mmc_wait_for_req_done(host, host->areq->mrq);
 280		err = host->areq->err_check(host->card, host->areq);
 281		if (err) {
 282			mmc_post_req(host, host->areq->mrq, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283			if (areq)
 284				mmc_post_req(host, areq->mrq, -EINVAL);
 285
 286			host->areq = NULL;
 287			goto out;
 
 
 
 288		}
 289	}
 290
 291	if (areq)
 292		__mmc_start_req(host, areq->mrq);
 293
 294	if (host->areq)
 295		mmc_post_req(host, host->areq->mrq, 0);
 296
 297	host->areq = areq;
 298 out:
 299	if (error)
 300		*error = err;
 
 
 
 
 
 
 
 301	return data;
 302}
 303EXPORT_SYMBOL(mmc_start_req);
 304
 305/**
 306 *	mmc_wait_for_req - start a request and wait for completion
 307 *	@host: MMC host to start command
 308 *	@mrq: MMC request to start
 309 *
 310 *	Start a new MMC custom command request for a host, and wait
 311 *	for the command to complete. Does not attempt to parse the
 312 *	response.
 
 
 
 313 */
 314void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 315{
 316	__mmc_start_req(host, mrq);
 317	mmc_wait_for_req_done(host, mrq);
 
 
 318}
 319EXPORT_SYMBOL(mmc_wait_for_req);
 320
 321/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322 *	mmc_wait_for_cmd - start a command and wait for completion
 323 *	@host: MMC host to start command
 324 *	@cmd: MMC command to start
 325 *	@retries: maximum number of retries
 326 *
 327 *	Start a new MMC command for a host, and wait for the command
 328 *	to complete.  Return any error that occurred while the command
 329 *	was executing.  Do not attempt to parse the response.
 330 */
 331int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 332{
 333	struct mmc_request mrq = {0};
 334
 335	WARN_ON(!host->claimed);
 336
 337	memset(cmd->resp, 0, sizeof(cmd->resp));
 338	cmd->retries = retries;
 339
 340	mrq.cmd = cmd;
 341	cmd->data = NULL;
 342
 343	mmc_wait_for_req(host, &mrq);
 344
 345	return cmd->error;
 346}
 347
 348EXPORT_SYMBOL(mmc_wait_for_cmd);
 349
 350/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351 *	mmc_set_data_timeout - set the timeout for a data command
 352 *	@data: data phase for command
 353 *	@card: the MMC card associated with the data transfer
 354 *
 355 *	Computes the data timeout parameters according to the
 356 *	correct algorithm given the card type.
 357 */
 358void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 359{
 360	unsigned int mult;
 361
 362	/*
 363	 * SDIO cards only define an upper 1 s limit on access.
 364	 */
 365	if (mmc_card_sdio(card)) {
 366		data->timeout_ns = 1000000000;
 367		data->timeout_clks = 0;
 368		return;
 369	}
 370
 371	/*
 372	 * SD cards use a 100 multiplier rather than 10
 373	 */
 374	mult = mmc_card_sd(card) ? 100 : 10;
 375
 376	/*
 377	 * Scale up the multiplier (and therefore the timeout) by
 378	 * the r2w factor for writes.
 379	 */
 380	if (data->flags & MMC_DATA_WRITE)
 381		mult <<= card->csd.r2w_factor;
 382
 383	data->timeout_ns = card->csd.tacc_ns * mult;
 384	data->timeout_clks = card->csd.tacc_clks * mult;
 385
 386	/*
 387	 * SD cards also have an upper limit on the timeout.
 388	 */
 389	if (mmc_card_sd(card)) {
 390		unsigned int timeout_us, limit_us;
 391
 392		timeout_us = data->timeout_ns / 1000;
 393		if (mmc_host_clk_rate(card->host))
 394			timeout_us += data->timeout_clks * 1000 /
 395				(mmc_host_clk_rate(card->host) / 1000);
 396
 397		if (data->flags & MMC_DATA_WRITE)
 398			/*
 399			 * The limit is really 250 ms, but that is
 400			 * insufficient for some crappy cards.
 
 
 
 
 401			 */
 402			limit_us = 300000;
 403		else
 404			limit_us = 100000;
 405
 406		/*
 407		 * SDHC cards always use these fixed values.
 408		 */
 409		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
 410			data->timeout_ns = limit_us * 1000;
 411			data->timeout_clks = 0;
 412		}
 
 
 
 
 413	}
 
 
 
 
 
 
 
 
 
 
 
 
 414	/*
 415	 * Some cards need very high timeouts if driven in SPI mode.
 416	 * The worst observed timeout was 900ms after writing a
 417	 * continuous stream of data until the internal logic
 418	 * overflowed.
 419	 */
 420	if (mmc_host_is_spi(card->host)) {
 421		if (data->flags & MMC_DATA_WRITE) {
 422			if (data->timeout_ns < 1000000000)
 423				data->timeout_ns = 1000000000;	/* 1s */
 424		} else {
 425			if (data->timeout_ns < 100000000)
 426				data->timeout_ns =  100000000;	/* 100ms */
 427		}
 428	}
 429}
 430EXPORT_SYMBOL(mmc_set_data_timeout);
 431
 432/**
 433 *	mmc_align_data_size - pads a transfer size to a more optimal value
 434 *	@card: the MMC card associated with the data transfer
 435 *	@sz: original transfer size
 436 *
 437 *	Pads the original data size with a number of extra bytes in
 438 *	order to avoid controller bugs and/or performance hits
 439 *	(e.g. some controllers revert to PIO for certain sizes).
 440 *
 441 *	Returns the improved size, which might be unmodified.
 442 *
 443 *	Note that this function is only relevant when issuing a
 444 *	single scatter gather entry.
 445 */
 446unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
 447{
 448	/*
 449	 * FIXME: We don't have a system for the controller to tell
 450	 * the core about its problems yet, so for now we just 32-bit
 451	 * align the size.
 452	 */
 453	sz = ((sz + 3) / 4) * 4;
 454
 455	return sz;
 456}
 457EXPORT_SYMBOL(mmc_align_data_size);
 458
 459/**
 460 *	mmc_host_enable - enable a host.
 461 *	@host: mmc host to enable
 462 *
 463 *	Hosts that support power saving can use the 'enable' and 'disable'
 464 *	methods to exit and enter power saving states. For more information
 465 *	see comments for struct mmc_host_ops.
 466 */
 467int mmc_host_enable(struct mmc_host *host)
 468{
 469	if (!(host->caps & MMC_CAP_DISABLE))
 470		return 0;
 471
 472	if (host->en_dis_recurs)
 473		return 0;
 474
 475	if (host->nesting_cnt++)
 476		return 0;
 477
 478	cancel_delayed_work_sync(&host->disable);
 479
 480	if (host->enabled)
 481		return 0;
 482
 483	if (host->ops->enable) {
 484		int err;
 485
 486		host->en_dis_recurs = 1;
 487		err = host->ops->enable(host);
 488		host->en_dis_recurs = 0;
 489
 490		if (err) {
 491			pr_debug("%s: enable error %d\n",
 492				 mmc_hostname(host), err);
 493			return err;
 494		}
 495	}
 496	host->enabled = 1;
 497	return 0;
 498}
 499EXPORT_SYMBOL(mmc_host_enable);
 500
 501static int mmc_host_do_disable(struct mmc_host *host, int lazy)
 502{
 503	if (host->ops->disable) {
 504		int err;
 505
 506		host->en_dis_recurs = 1;
 507		err = host->ops->disable(host, lazy);
 508		host->en_dis_recurs = 0;
 509
 510		if (err < 0) {
 511			pr_debug("%s: disable error %d\n",
 512				 mmc_hostname(host), err);
 513			return err;
 514		}
 515		if (err > 0) {
 516			unsigned long delay = msecs_to_jiffies(err);
 517
 518			mmc_schedule_delayed_work(&host->disable, delay);
 519		}
 520	}
 521	host->enabled = 0;
 522	return 0;
 523}
 524
 525/**
 526 *	mmc_host_disable - disable a host.
 527 *	@host: mmc host to disable
 528 *
 529 *	Hosts that support power saving can use the 'enable' and 'disable'
 530 *	methods to exit and enter power saving states. For more information
 531 *	see comments for struct mmc_host_ops.
 532 */
 533int mmc_host_disable(struct mmc_host *host)
 534{
 535	int err;
 536
 537	if (!(host->caps & MMC_CAP_DISABLE))
 538		return 0;
 539
 540	if (host->en_dis_recurs)
 541		return 0;
 542
 543	if (--host->nesting_cnt)
 544		return 0;
 545
 546	if (!host->enabled)
 547		return 0;
 548
 549	err = mmc_host_do_disable(host, 0);
 550	return err;
 551}
 552EXPORT_SYMBOL(mmc_host_disable);
 553
 554/**
 555 *	__mmc_claim_host - exclusively claim a host
 556 *	@host: mmc host to claim
 557 *	@abort: whether or not the operation should be aborted
 558 *
 559 *	Claim a host for a set of operations.  If @abort is non null and
 560 *	dereference a non-zero value then this will return prematurely with
 561 *	that non-zero value without acquiring the lock.  Returns zero
 562 *	with the lock held otherwise.
 563 */
 564int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
 565{
 566	DECLARE_WAITQUEUE(wait, current);
 567	unsigned long flags;
 568	int stop;
 
 569
 570	might_sleep();
 571
 572	add_wait_queue(&host->wq, &wait);
 573	spin_lock_irqsave(&host->lock, flags);
 574	while (1) {
 575		set_current_state(TASK_UNINTERRUPTIBLE);
 576		stop = abort ? atomic_read(abort) : 0;
 577		if (stop || !host->claimed || host->claimer == current)
 578			break;
 579		spin_unlock_irqrestore(&host->lock, flags);
 580		schedule();
 581		spin_lock_irqsave(&host->lock, flags);
 582	}
 583	set_current_state(TASK_RUNNING);
 584	if (!stop) {
 585		host->claimed = 1;
 586		host->claimer = current;
 587		host->claim_cnt += 1;
 
 
 588	} else
 589		wake_up(&host->wq);
 590	spin_unlock_irqrestore(&host->lock, flags);
 591	remove_wait_queue(&host->wq, &wait);
 592	if (!stop)
 593		mmc_host_enable(host);
 594	return stop;
 595}
 596
 597EXPORT_SYMBOL(__mmc_claim_host);
 598
 599/**
 600 *	mmc_try_claim_host - try exclusively to claim a host
 601 *	@host: mmc host to claim
 602 *
 603 *	Returns %1 if the host is claimed, %0 otherwise.
 604 */
 605int mmc_try_claim_host(struct mmc_host *host)
 606{
 607	int claimed_host = 0;
 608	unsigned long flags;
 609
 610	spin_lock_irqsave(&host->lock, flags);
 611	if (!host->claimed || host->claimer == current) {
 612		host->claimed = 1;
 613		host->claimer = current;
 614		host->claim_cnt += 1;
 615		claimed_host = 1;
 616	}
 617	spin_unlock_irqrestore(&host->lock, flags);
 618	return claimed_host;
 619}
 620EXPORT_SYMBOL(mmc_try_claim_host);
 621
 622/**
 623 *	mmc_do_release_host - release a claimed host
 624 *	@host: mmc host to release
 625 *
 626 *	If you successfully claimed a host, this function will
 627 *	release it again.
 628 */
 629void mmc_do_release_host(struct mmc_host *host)
 630{
 631	unsigned long flags;
 632
 
 
 633	spin_lock_irqsave(&host->lock, flags);
 634	if (--host->claim_cnt) {
 635		/* Release for nested claim */
 636		spin_unlock_irqrestore(&host->lock, flags);
 637	} else {
 638		host->claimed = 0;
 639		host->claimer = NULL;
 640		spin_unlock_irqrestore(&host->lock, flags);
 641		wake_up(&host->wq);
 
 
 642	}
 643}
 644EXPORT_SYMBOL(mmc_do_release_host);
 645
 646void mmc_host_deeper_disable(struct work_struct *work)
 647{
 648	struct mmc_host *host =
 649		container_of(work, struct mmc_host, disable.work);
 650
 651	/* If the host is claimed then we do not want to disable it anymore */
 652	if (!mmc_try_claim_host(host))
 653		return;
 654	mmc_host_do_disable(host, 1);
 655	mmc_do_release_host(host);
 656}
 657
 658/**
 659 *	mmc_host_lazy_disable - lazily disable a host.
 660 *	@host: mmc host to disable
 661 *
 662 *	Hosts that support power saving can use the 'enable' and 'disable'
 663 *	methods to exit and enter power saving states. For more information
 664 *	see comments for struct mmc_host_ops.
 665 */
 666int mmc_host_lazy_disable(struct mmc_host *host)
 667{
 668	if (!(host->caps & MMC_CAP_DISABLE))
 669		return 0;
 670
 671	if (host->en_dis_recurs)
 672		return 0;
 673
 674	if (--host->nesting_cnt)
 675		return 0;
 676
 677	if (!host->enabled)
 678		return 0;
 679
 680	if (host->disable_delay) {
 681		mmc_schedule_delayed_work(&host->disable,
 682				msecs_to_jiffies(host->disable_delay));
 683		return 0;
 684	} else
 685		return mmc_host_do_disable(host, 1);
 686}
 687EXPORT_SYMBOL(mmc_host_lazy_disable);
 688
 689/**
 690 *	mmc_release_host - release a host
 691 *	@host: mmc host to release
 692 *
 693 *	Release a MMC host, allowing others to claim the host
 694 *	for their operations.
 695 */
 696void mmc_release_host(struct mmc_host *host)
 697{
 698	WARN_ON(!host->claimed);
 699
 700	mmc_host_lazy_disable(host);
 701
 702	mmc_do_release_host(host);
 703}
 704
 705EXPORT_SYMBOL(mmc_release_host);
 706
 707/*
 708 * Internal function that does the actual ios call to the host driver,
 709 * optionally printing some debug output.
 710 */
 711static inline void mmc_set_ios(struct mmc_host *host)
 712{
 713	struct mmc_ios *ios = &host->ios;
 714
 715	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
 716		"width %u timing %u\n",
 717		 mmc_hostname(host), ios->clock, ios->bus_mode,
 718		 ios->power_mode, ios->chip_select, ios->vdd,
 719		 ios->bus_width, ios->timing);
 720
 721	if (ios->clock > 0)
 722		mmc_set_ungated(host);
 723	host->ops->set_ios(host, ios);
 724}
 725
 726/*
 727 * Control chip select pin on a host.
 728 */
 729void mmc_set_chip_select(struct mmc_host *host, int mode)
 730{
 731	mmc_host_clk_hold(host);
 732	host->ios.chip_select = mode;
 733	mmc_set_ios(host);
 734	mmc_host_clk_release(host);
 735}
 736
 737/*
 738 * Sets the host clock to the highest possible frequency that
 739 * is below "hz".
 740 */
 741static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 742{
 743	WARN_ON(hz < host->f_min);
 744
 745	if (hz > host->f_max)
 746		hz = host->f_max;
 747
 748	host->ios.clock = hz;
 749	mmc_set_ios(host);
 750}
 751
 752void mmc_set_clock(struct mmc_host *host, unsigned int hz)
 753{
 754	mmc_host_clk_hold(host);
 755	__mmc_set_clock(host, hz);
 756	mmc_host_clk_release(host);
 757}
 758
 759#ifdef CONFIG_MMC_CLKGATE
 760/*
 761 * This gates the clock by setting it to 0 Hz.
 762 */
 763void mmc_gate_clock(struct mmc_host *host)
 764{
 765	unsigned long flags;
 
 
 766
 767	spin_lock_irqsave(&host->clk_lock, flags);
 768	host->clk_old = host->ios.clock;
 769	host->ios.clock = 0;
 770	host->clk_gated = true;
 771	spin_unlock_irqrestore(&host->clk_lock, flags);
 772	mmc_set_ios(host);
 773}
 774
 775/*
 776 * This restores the clock from gating by using the cached
 777 * clock value.
 778 */
 779void mmc_ungate_clock(struct mmc_host *host)
 780{
 781	/*
 782	 * We should previously have gated the clock, so the clock shall
 783	 * be 0 here! The clock may however be 0 during initialization,
 784	 * when some request operations are performed before setting
 785	 * the frequency. When ungate is requested in that situation
 786	 * we just ignore the call.
 787	 */
 788	if (host->clk_old) {
 789		BUG_ON(host->ios.clock);
 790		/* This call will also set host->clk_gated to false */
 791		__mmc_set_clock(host, host->clk_old);
 792	}
 793}
 794
 795void mmc_set_ungated(struct mmc_host *host)
 796{
 797	unsigned long flags;
 798
 799	/*
 800	 * We've been given a new frequency while the clock is gated,
 801	 * so make sure we regard this as ungating it.
 802	 */
 803	spin_lock_irqsave(&host->clk_lock, flags);
 804	host->clk_gated = false;
 805	spin_unlock_irqrestore(&host->clk_lock, flags);
 806}
 807
 808#else
 809void mmc_set_ungated(struct mmc_host *host)
 810{
 811}
 812#endif
 813
 814/*
 815 * Change the bus mode (open drain/push-pull) of a host.
 816 */
 817void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 818{
 819	mmc_host_clk_hold(host);
 820	host->ios.bus_mode = mode;
 821	mmc_set_ios(host);
 822	mmc_host_clk_release(host);
 823}
 824
 825/*
 826 * Change data bus width of a host.
 827 */
 828void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 829{
 830	mmc_host_clk_hold(host);
 831	host->ios.bus_width = width;
 832	mmc_set_ios(host);
 833	mmc_host_clk_release(host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834}
 835
 836/**
 837 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
 838 * @vdd:	voltage (mV)
 839 * @low_bits:	prefer low bits in boundary cases
 840 *
 841 * This function returns the OCR bit number according to the provided @vdd
 842 * value. If conversion is not possible a negative errno value returned.
 843 *
 844 * Depending on the @low_bits flag the function prefers low or high OCR bits
 845 * on boundary voltages. For example,
 846 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
 847 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
 848 *
 849 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
 850 */
 851static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
 852{
 853	const int max_bit = ilog2(MMC_VDD_35_36);
 854	int bit;
 855
 856	if (vdd < 1650 || vdd > 3600)
 857		return -EINVAL;
 858
 859	if (vdd >= 1650 && vdd <= 1950)
 860		return ilog2(MMC_VDD_165_195);
 861
 862	if (low_bits)
 863		vdd -= 1;
 864
 865	/* Base 2000 mV, step 100 mV, bit's base 8. */
 866	bit = (vdd - 2000) / 100 + 8;
 867	if (bit > max_bit)
 868		return max_bit;
 869	return bit;
 870}
 871
 872/**
 873 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
 874 * @vdd_min:	minimum voltage value (mV)
 875 * @vdd_max:	maximum voltage value (mV)
 876 *
 877 * This function returns the OCR mask bits according to the provided @vdd_min
 878 * and @vdd_max values. If conversion is not possible the function returns 0.
 879 *
 880 * Notes wrt boundary cases:
 881 * This function sets the OCR bits for all boundary voltages, for example
 882 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
 883 * MMC_VDD_34_35 mask.
 884 */
 885u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
 886{
 887	u32 mask = 0;
 888
 889	if (vdd_max < vdd_min)
 890		return 0;
 891
 892	/* Prefer high bits for the boundary vdd_max values. */
 893	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
 894	if (vdd_max < 0)
 895		return 0;
 896
 897	/* Prefer low bits for the boundary vdd_min values. */
 898	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
 899	if (vdd_min < 0)
 900		return 0;
 901
 902	/* Fill the mask, from max bit to min bit. */
 903	while (vdd_max >= vdd_min)
 904		mask |= 1 << vdd_max--;
 905
 906	return mask;
 907}
 908EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
 909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910#ifdef CONFIG_REGULATOR
 911
 912/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913 * mmc_regulator_get_ocrmask - return mask of supported voltages
 914 * @supply: regulator to use
 915 *
 916 * This returns either a negative errno, or a mask of voltages that
 917 * can be provided to MMC/SD/SDIO devices using the specified voltage
 918 * regulator.  This would normally be called before registering the
 919 * MMC host adapter.
 920 */
 921int mmc_regulator_get_ocrmask(struct regulator *supply)
 922{
 923	int			result = 0;
 924	int			count;
 925	int			i;
 
 
 926
 927	count = regulator_count_voltages(supply);
 928	if (count < 0)
 929		return count;
 930
 931	for (i = 0; i < count; i++) {
 932		int		vdd_uV;
 933		int		vdd_mV;
 934
 935		vdd_uV = regulator_list_voltage(supply, i);
 936		if (vdd_uV <= 0)
 937			continue;
 938
 939		vdd_mV = vdd_uV / 1000;
 940		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
 941	}
 942
 
 
 
 
 
 
 
 
 
 943	return result;
 944}
 945EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
 946
 947/**
 948 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
 949 * @mmc: the host to regulate
 950 * @supply: regulator to use
 951 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
 952 *
 953 * Returns zero on success, else negative errno.
 954 *
 955 * MMC host drivers may use this to enable or disable a regulator using
 956 * a particular supply voltage.  This would normally be called from the
 957 * set_ios() method.
 958 */
 959int mmc_regulator_set_ocr(struct mmc_host *mmc,
 960			struct regulator *supply,
 961			unsigned short vdd_bit)
 962{
 963	int			result = 0;
 964	int			min_uV, max_uV;
 965
 966	if (vdd_bit) {
 967		int		tmp;
 968		int		voltage;
 969
 970		/* REVISIT mmc_vddrange_to_ocrmask() may have set some
 971		 * bits this regulator doesn't quite support ... don't
 972		 * be too picky, most cards and regulators are OK with
 973		 * a 0.1V range goof (it's a small error percentage).
 974		 */
 975		tmp = vdd_bit - ilog2(MMC_VDD_165_195);
 976		if (tmp == 0) {
 977			min_uV = 1650 * 1000;
 978			max_uV = 1950 * 1000;
 979		} else {
 980			min_uV = 1900 * 1000 + tmp * 100 * 1000;
 981			max_uV = min_uV + 100 * 1000;
 982		}
 983
 984		/* avoid needless changes to this voltage; the regulator
 985		 * might not allow this operation
 986		 */
 987		voltage = regulator_get_voltage(supply);
 988		if (voltage < 0)
 989			result = voltage;
 990		else if (voltage < min_uV || voltage > max_uV)
 991			result = regulator_set_voltage(supply, min_uV, max_uV);
 992		else
 993			result = 0;
 994
 
 995		if (result == 0 && !mmc->regulator_enabled) {
 996			result = regulator_enable(supply);
 997			if (!result)
 998				mmc->regulator_enabled = true;
 999		}
1000	} else if (mmc->regulator_enabled) {
1001		result = regulator_disable(supply);
1002		if (result == 0)
1003			mmc->regulator_enabled = false;
1004	}
1005
1006	if (result)
1007		dev_err(mmc_dev(mmc),
1008			"could not set regulator OCR (%d)\n", result);
1009	return result;
1010}
1011EXPORT_SYMBOL(mmc_regulator_set_ocr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012
1013#endif /* CONFIG_REGULATOR */
1014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015/*
1016 * Mask off any voltages we don't support and select
1017 * the lowest voltage
1018 */
1019u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1020{
1021	int bit;
1022
1023	ocr &= host->ocr_avail;
 
 
 
 
 
 
 
 
1024
1025	bit = ffs(ocr);
1026	if (bit) {
1027		bit -= 1;
 
 
1028
 
 
1029		ocr &= 3 << bit;
1030
1031		mmc_host_clk_hold(host);
1032		host->ios.vdd = bit;
1033		mmc_set_ios(host);
1034		mmc_host_clk_release(host);
1035	} else {
1036		pr_warning("%s: host doesn't support card's voltages\n",
1037				mmc_hostname(host));
1038		ocr = 0;
 
1039	}
1040
1041	return ocr;
1042}
1043
1044int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1045{
1046	struct mmc_command cmd = {0};
1047	int err = 0;
 
 
 
 
 
 
 
 
1048
1049	BUG_ON(!host);
 
 
 
 
 
 
 
 
1050
1051	/*
1052	 * Send CMD11 only if the request is to switch the card to
1053	 * 1.8V signalling.
1054	 */
1055	if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1056		cmd.opcode = SD_SWITCH_VOLTAGE;
1057		cmd.arg = 0;
1058		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1059
1060		err = mmc_wait_for_cmd(host, &cmd, 0);
1061		if (err)
1062			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063
1064		if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1065			return -EIO;
 
 
 
 
 
 
 
 
 
1066	}
 
 
 
 
 
 
 
1067
1068	host->ios.signal_voltage = signal_voltage;
 
 
 
 
 
 
 
1069
1070	if (host->ops->start_signal_voltage_switch)
1071		err = host->ops->start_signal_voltage_switch(host, &host->ios);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072
1073	return err;
1074}
1075
1076/*
1077 * Select timing parameters for host.
1078 */
1079void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1080{
1081	mmc_host_clk_hold(host);
1082	host->ios.timing = timing;
1083	mmc_set_ios(host);
1084	mmc_host_clk_release(host);
1085}
1086
1087/*
1088 * Select appropriate driver type for host.
1089 */
1090void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1091{
1092	mmc_host_clk_hold(host);
1093	host->ios.drv_type = drv_type;
1094	mmc_set_ios(host);
1095	mmc_host_clk_release(host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096}
1097
1098/*
1099 * Apply power to the MMC stack.  This is a two-stage process.
1100 * First, we enable power to the card without the clock running.
1101 * We then wait a bit for the power to stabilise.  Finally,
1102 * enable the bus drivers and clock to the card.
1103 *
1104 * We must _NOT_ enable the clock prior to power stablising.
1105 *
1106 * If a host does all the power sequencing itself, ignore the
1107 * initial MMC_POWER_UP stage.
1108 */
1109static void mmc_power_up(struct mmc_host *host)
1110{
1111	int bit;
 
1112
1113	mmc_host_clk_hold(host);
1114
1115	/* If ocr is set, we use it */
1116	if (host->ocr)
1117		bit = ffs(host->ocr) - 1;
1118	else
1119		bit = fls(host->ocr_avail) - 1;
1120
1121	host->ios.vdd = bit;
1122	if (mmc_host_is_spi(host)) {
1123		host->ios.chip_select = MMC_CS_HIGH;
1124		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1125	} else {
1126		host->ios.chip_select = MMC_CS_DONTCARE;
1127		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1128	}
1129	host->ios.power_mode = MMC_POWER_UP;
1130	host->ios.bus_width = MMC_BUS_WIDTH_1;
1131	host->ios.timing = MMC_TIMING_LEGACY;
1132	mmc_set_ios(host);
 
 
 
 
 
 
 
1133
1134	/*
1135	 * This delay should be sufficient to allow the power supply
1136	 * to reach the minimum voltage.
1137	 */
1138	mmc_delay(10);
1139
 
 
1140	host->ios.clock = host->f_init;
1141
1142	host->ios.power_mode = MMC_POWER_ON;
1143	mmc_set_ios(host);
1144
1145	/*
1146	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1147	 * time required to reach a stable voltage.
1148	 */
1149	mmc_delay(10);
1150
1151	mmc_host_clk_release(host);
1152}
1153
1154static void mmc_power_off(struct mmc_host *host)
1155{
1156	mmc_host_clk_hold(host);
 
 
 
1157
1158	host->ios.clock = 0;
1159	host->ios.vdd = 0;
1160
 
 
 
 
1161	/*
1162	 * Reset ocr mask to be the highest possible voltage supported for
1163	 * this mmc host. This value will be used at next power up.
 
1164	 */
1165	host->ocr = 1 << (fls(host->ocr_avail) - 1);
1166
1167	if (!mmc_host_is_spi(host)) {
1168		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1169		host->ios.chip_select = MMC_CS_DONTCARE;
1170	}
1171	host->ios.power_mode = MMC_POWER_OFF;
1172	host->ios.bus_width = MMC_BUS_WIDTH_1;
1173	host->ios.timing = MMC_TIMING_LEGACY;
1174	mmc_set_ios(host);
1175
1176	mmc_host_clk_release(host);
 
 
 
 
 
1177}
1178
1179/*
1180 * Cleanup when the last reference to the bus operator is dropped.
1181 */
1182static void __mmc_release_bus(struct mmc_host *host)
1183{
1184	BUG_ON(!host);
1185	BUG_ON(host->bus_refs);
1186	BUG_ON(!host->bus_dead);
1187
1188	host->bus_ops = NULL;
1189}
1190
1191/*
1192 * Increase reference count of bus operator
1193 */
1194static inline void mmc_bus_get(struct mmc_host *host)
1195{
1196	unsigned long flags;
1197
1198	spin_lock_irqsave(&host->lock, flags);
1199	host->bus_refs++;
1200	spin_unlock_irqrestore(&host->lock, flags);
1201}
1202
1203/*
1204 * Decrease reference count of bus operator and free it if
1205 * it is the last reference.
1206 */
1207static inline void mmc_bus_put(struct mmc_host *host)
1208{
1209	unsigned long flags;
1210
1211	spin_lock_irqsave(&host->lock, flags);
1212	host->bus_refs--;
1213	if ((host->bus_refs == 0) && host->bus_ops)
1214		__mmc_release_bus(host);
1215	spin_unlock_irqrestore(&host->lock, flags);
1216}
1217
1218/*
1219 * Assign a mmc bus handler to a host. Only one bus handler may control a
1220 * host at any given time.
1221 */
1222void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1223{
1224	unsigned long flags;
1225
1226	BUG_ON(!host);
1227	BUG_ON(!ops);
1228
1229	WARN_ON(!host->claimed);
1230
1231	spin_lock_irqsave(&host->lock, flags);
1232
1233	BUG_ON(host->bus_ops);
1234	BUG_ON(host->bus_refs);
1235
1236	host->bus_ops = ops;
1237	host->bus_refs = 1;
1238	host->bus_dead = 0;
1239
1240	spin_unlock_irqrestore(&host->lock, flags);
1241}
1242
1243/*
1244 * Remove the current bus handler from a host. Assumes that there are
1245 * no interesting cards left, so the bus is powered down.
1246 */
1247void mmc_detach_bus(struct mmc_host *host)
1248{
1249	unsigned long flags;
1250
1251	BUG_ON(!host);
1252
1253	WARN_ON(!host->claimed);
1254	WARN_ON(!host->bus_ops);
1255
1256	spin_lock_irqsave(&host->lock, flags);
1257
1258	host->bus_dead = 1;
1259
1260	spin_unlock_irqrestore(&host->lock, flags);
1261
1262	mmc_power_off(host);
1263
1264	mmc_bus_put(host);
1265}
1266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267/**
1268 *	mmc_detect_change - process change of state on a MMC socket
1269 *	@host: host which changed state.
1270 *	@delay: optional delay to wait before detection (jiffies)
1271 *
1272 *	MMC drivers should call this when they detect a card has been
1273 *	inserted or removed. The MMC layer will confirm that any
1274 *	present card is still functional, and initialize any newly
1275 *	inserted.
1276 */
1277void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1278{
1279#ifdef CONFIG_MMC_DEBUG
1280	unsigned long flags;
1281	spin_lock_irqsave(&host->lock, flags);
1282	WARN_ON(host->removed);
1283	spin_unlock_irqrestore(&host->lock, flags);
1284#endif
1285
1286	mmc_schedule_delayed_work(&host->detect, delay);
1287}
1288
1289EXPORT_SYMBOL(mmc_detect_change);
1290
1291void mmc_init_erase(struct mmc_card *card)
1292{
1293	unsigned int sz;
1294
1295	if (is_power_of_2(card->erase_size))
1296		card->erase_shift = ffs(card->erase_size) - 1;
1297	else
1298		card->erase_shift = 0;
1299
1300	/*
1301	 * It is possible to erase an arbitrarily large area of an SD or MMC
1302	 * card.  That is not desirable because it can take a long time
1303	 * (minutes) potentially delaying more important I/O, and also the
1304	 * timeout calculations become increasingly hugely over-estimated.
1305	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1306	 * to that size and alignment.
1307	 *
1308	 * For SD cards that define Allocation Unit size, limit erases to one
1309	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1310	 * Erase Size, whether it is switched on or not, limit to that size.
1311	 * Otherwise just have a stab at a good value.  For modern cards it
1312	 * will end up being 4MiB.  Note that if the value is too small, it
1313	 * can end up taking longer to erase.
1314	 */
1315	if (mmc_card_sd(card) && card->ssr.au) {
1316		card->pref_erase = card->ssr.au;
1317		card->erase_shift = ffs(card->ssr.au) - 1;
1318	} else if (card->ext_csd.hc_erase_size) {
1319		card->pref_erase = card->ext_csd.hc_erase_size;
1320	} else {
1321		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1322		if (sz < 128)
1323			card->pref_erase = 512 * 1024 / 512;
1324		else if (sz < 512)
1325			card->pref_erase = 1024 * 1024 / 512;
1326		else if (sz < 1024)
1327			card->pref_erase = 2 * 1024 * 1024 / 512;
1328		else
1329			card->pref_erase = 4 * 1024 * 1024 / 512;
1330		if (card->pref_erase < card->erase_size)
1331			card->pref_erase = card->erase_size;
1332		else {
1333			sz = card->pref_erase % card->erase_size;
1334			if (sz)
1335				card->pref_erase += card->erase_size - sz;
1336		}
1337	}
 
1338}
1339
1340static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1341				          unsigned int arg, unsigned int qty)
1342{
1343	unsigned int erase_timeout;
1344
1345	if (card->ext_csd.erase_group_def & 1) {
 
 
 
1346		/* High Capacity Erase Group Size uses HC timeouts */
1347		if (arg == MMC_TRIM_ARG)
1348			erase_timeout = card->ext_csd.trim_timeout;
1349		else
1350			erase_timeout = card->ext_csd.hc_erase_timeout;
1351	} else {
1352		/* CSD Erase Group Size uses write timeout */
1353		unsigned int mult = (10 << card->csd.r2w_factor);
1354		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1355		unsigned int timeout_us;
1356
1357		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1358		if (card->csd.tacc_ns < 1000000)
1359			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1360		else
1361			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1362
1363		/*
1364		 * ios.clock is only a target.  The real clock rate might be
1365		 * less but not that much less, so fudge it by multiplying by 2.
1366		 */
1367		timeout_clks <<= 1;
1368		timeout_us += (timeout_clks * 1000) /
1369			      (mmc_host_clk_rate(card->host) / 1000);
1370
1371		erase_timeout = timeout_us / 1000;
1372
1373		/*
1374		 * Theoretically, the calculation could underflow so round up
1375		 * to 1ms in that case.
1376		 */
1377		if (!erase_timeout)
1378			erase_timeout = 1;
1379	}
1380
1381	/* Multiplier for secure operations */
1382	if (arg & MMC_SECURE_ARGS) {
1383		if (arg == MMC_SECURE_ERASE_ARG)
1384			erase_timeout *= card->ext_csd.sec_erase_mult;
1385		else
1386			erase_timeout *= card->ext_csd.sec_trim_mult;
1387	}
1388
1389	erase_timeout *= qty;
1390
1391	/*
1392	 * Ensure at least a 1 second timeout for SPI as per
1393	 * 'mmc_set_data_timeout()'
1394	 */
1395	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1396		erase_timeout = 1000;
1397
1398	return erase_timeout;
1399}
1400
1401static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1402					 unsigned int arg,
1403					 unsigned int qty)
1404{
1405	unsigned int erase_timeout;
1406
1407	if (card->ssr.erase_timeout) {
1408		/* Erase timeout specified in SD Status Register (SSR) */
1409		erase_timeout = card->ssr.erase_timeout * qty +
1410				card->ssr.erase_offset;
1411	} else {
1412		/*
1413		 * Erase timeout not specified in SD Status Register (SSR) so
1414		 * use 250ms per write block.
1415		 */
1416		erase_timeout = 250 * qty;
1417	}
1418
1419	/* Must not be less than 1 second */
1420	if (erase_timeout < 1000)
1421		erase_timeout = 1000;
1422
1423	return erase_timeout;
1424}
1425
1426static unsigned int mmc_erase_timeout(struct mmc_card *card,
1427				      unsigned int arg,
1428				      unsigned int qty)
1429{
1430	if (mmc_card_sd(card))
1431		return mmc_sd_erase_timeout(card, arg, qty);
1432	else
1433		return mmc_mmc_erase_timeout(card, arg, qty);
1434}
1435
1436static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1437			unsigned int to, unsigned int arg)
1438{
1439	struct mmc_command cmd = {0};
1440	unsigned int qty = 0;
 
 
1441	int err;
1442
 
 
1443	/*
1444	 * qty is used to calculate the erase timeout which depends on how many
1445	 * erase groups (or allocation units in SD terminology) are affected.
1446	 * We count erasing part of an erase group as one erase group.
1447	 * For SD, the allocation units are always a power of 2.  For MMC, the
1448	 * erase group size is almost certainly also power of 2, but it does not
1449	 * seem to insist on that in the JEDEC standard, so we fall back to
1450	 * division in that case.  SD may not specify an allocation unit size,
1451	 * in which case the timeout is based on the number of write blocks.
1452	 *
1453	 * Note that the timeout for secure trim 2 will only be correct if the
1454	 * number of erase groups specified is the same as the total of all
1455	 * preceding secure trim 1 commands.  Since the power may have been
1456	 * lost since the secure trim 1 commands occurred, it is generally
1457	 * impossible to calculate the secure trim 2 timeout correctly.
1458	 */
1459	if (card->erase_shift)
1460		qty += ((to >> card->erase_shift) -
1461			(from >> card->erase_shift)) + 1;
1462	else if (mmc_card_sd(card))
1463		qty += to - from + 1;
1464	else
1465		qty += ((to / card->erase_size) -
1466			(from / card->erase_size)) + 1;
1467
1468	if (!mmc_card_blockaddr(card)) {
1469		from <<= 9;
1470		to <<= 9;
1471	}
1472
1473	if (mmc_card_sd(card))
1474		cmd.opcode = SD_ERASE_WR_BLK_START;
1475	else
1476		cmd.opcode = MMC_ERASE_GROUP_START;
1477	cmd.arg = from;
1478	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1479	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1480	if (err) {
1481		printk(KERN_ERR "mmc_erase: group start error %d, "
1482		       "status %#x\n", err, cmd.resp[0]);
1483		err = -EINVAL;
1484		goto out;
1485	}
1486
1487	memset(&cmd, 0, sizeof(struct mmc_command));
1488	if (mmc_card_sd(card))
1489		cmd.opcode = SD_ERASE_WR_BLK_END;
1490	else
1491		cmd.opcode = MMC_ERASE_GROUP_END;
1492	cmd.arg = to;
1493	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1494	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1495	if (err) {
1496		printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
1497		       err, cmd.resp[0]);
1498		err = -EINVAL;
1499		goto out;
1500	}
1501
1502	memset(&cmd, 0, sizeof(struct mmc_command));
1503	cmd.opcode = MMC_ERASE;
1504	cmd.arg = arg;
1505	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1506	cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1507	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1508	if (err) {
1509		printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
1510		       err, cmd.resp[0]);
1511		err = -EIO;
1512		goto out;
1513	}
1514
1515	if (mmc_host_is_spi(card->host))
1516		goto out;
1517
 
 
 
 
 
 
 
 
1518	do {
1519		memset(&cmd, 0, sizeof(struct mmc_command));
1520		cmd.opcode = MMC_SEND_STATUS;
1521		cmd.arg = card->rca << 16;
1522		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1523		/* Do not retry else we can't see errors */
1524		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1525		if (err || (cmd.resp[0] & 0xFDF92000)) {
1526			printk(KERN_ERR "error %d requesting status %#x\n",
1527				err, cmd.resp[0]);
1528			err = -EIO;
1529			goto out;
1530		}
 
 
 
 
 
 
 
 
 
 
 
1531	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1532		 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1533out:
 
1534	return err;
1535}
1536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537/**
1538 * mmc_erase - erase sectors.
1539 * @card: card to erase
1540 * @from: first sector to erase
1541 * @nr: number of sectors to erase
1542 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1543 *
1544 * Caller must claim host before calling this function.
1545 */
1546int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1547	      unsigned int arg)
1548{
1549	unsigned int rem, to = from + nr;
 
1550
1551	if (!(card->host->caps & MMC_CAP_ERASE) ||
1552	    !(card->csd.cmdclass & CCC_ERASE))
1553		return -EOPNOTSUPP;
1554
1555	if (!card->erase_size)
1556		return -EOPNOTSUPP;
1557
1558	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1559		return -EOPNOTSUPP;
1560
1561	if ((arg & MMC_SECURE_ARGS) &&
1562	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1563		return -EOPNOTSUPP;
1564
1565	if ((arg & MMC_TRIM_ARGS) &&
1566	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1567		return -EOPNOTSUPP;
1568
1569	if (arg == MMC_SECURE_ERASE_ARG) {
1570		if (from % card->erase_size || nr % card->erase_size)
1571			return -EINVAL;
1572	}
1573
1574	if (arg == MMC_ERASE_ARG) {
1575		rem = from % card->erase_size;
1576		if (rem) {
1577			rem = card->erase_size - rem;
1578			from += rem;
1579			if (nr > rem)
1580				nr -= rem;
1581			else
1582				return 0;
1583		}
1584		rem = nr % card->erase_size;
1585		if (rem)
1586			nr -= rem;
1587	}
1588
1589	if (nr == 0)
1590		return 0;
1591
1592	to = from + nr;
1593
1594	if (to <= from)
1595		return -EINVAL;
1596
1597	/* 'from' and 'to' are inclusive */
1598	to -= 1;
1599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1600	return mmc_do_erase(card, from, to, arg);
1601}
1602EXPORT_SYMBOL(mmc_erase);
1603
1604int mmc_can_erase(struct mmc_card *card)
1605{
1606	if ((card->host->caps & MMC_CAP_ERASE) &&
1607	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1608		return 1;
1609	return 0;
1610}
1611EXPORT_SYMBOL(mmc_can_erase);
1612
1613int mmc_can_trim(struct mmc_card *card)
1614{
1615	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
 
1616		return 1;
1617	return 0;
1618}
1619EXPORT_SYMBOL(mmc_can_trim);
1620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1621int mmc_can_secure_erase_trim(struct mmc_card *card)
1622{
1623	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
 
1624		return 1;
1625	return 0;
1626}
1627EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1628
1629int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1630			    unsigned int nr)
1631{
1632	if (!card->erase_size)
1633		return 0;
1634	if (from % card->erase_size || nr % card->erase_size)
1635		return 0;
1636	return 1;
1637}
1638EXPORT_SYMBOL(mmc_erase_group_aligned);
1639
1640static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1641					    unsigned int arg)
1642{
1643	struct mmc_host *host = card->host;
1644	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1645	unsigned int last_timeout = 0;
 
 
1646
1647	if (card->erase_shift)
1648		max_qty = UINT_MAX >> card->erase_shift;
1649	else if (mmc_card_sd(card))
 
1650		max_qty = UINT_MAX;
1651	else
 
1652		max_qty = UINT_MAX / card->erase_size;
 
 
1653
1654	/* Find the largest qty with an OK timeout */
 
 
 
 
 
 
 
 
 
 
 
 
1655	do {
1656		y = 0;
1657		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1658			timeout = mmc_erase_timeout(card, arg, qty + x);
1659			if (timeout > host->max_discard_to)
 
1660				break;
 
1661			if (timeout < last_timeout)
1662				break;
1663			last_timeout = timeout;
1664			y = x;
1665		}
1666		qty += y;
1667	} while (y);
1668
1669	if (!qty)
1670		return 0;
1671
 
 
 
 
 
 
 
 
 
 
1672	if (qty == 1)
1673		return 1;
 
 
1674
1675	/* Convert qty to sectors */
1676	if (card->erase_shift)
1677		max_discard = --qty << card->erase_shift;
1678	else if (mmc_card_sd(card))
1679		max_discard = qty;
1680	else
1681		max_discard = --qty * card->erase_size;
1682
1683	return max_discard;
1684}
1685
1686unsigned int mmc_calc_max_discard(struct mmc_card *card)
1687{
1688	struct mmc_host *host = card->host;
1689	unsigned int max_discard, max_trim;
1690
1691	if (!host->max_discard_to)
1692		return UINT_MAX;
1693
1694	/*
1695	 * Without erase_group_def set, MMC erase timeout depends on clock
1696	 * frequence which can change.  In that case, the best choice is
1697	 * just the preferred erase size.
1698	 */
1699	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1700		return card->pref_erase;
1701
1702	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1703	if (mmc_can_trim(card)) {
1704		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1705		if (max_trim < max_discard)
1706			max_discard = max_trim;
1707	} else if (max_discard < card->erase_size) {
1708		max_discard = 0;
1709	}
1710	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1711		 mmc_hostname(host), max_discard, host->max_discard_to);
 
1712	return max_discard;
1713}
1714EXPORT_SYMBOL(mmc_calc_max_discard);
1715
1716int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1717{
1718	struct mmc_command cmd = {0};
1719
1720	if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
 
1721		return 0;
1722
1723	cmd.opcode = MMC_SET_BLOCKLEN;
1724	cmd.arg = blocklen;
1725	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1726	return mmc_wait_for_cmd(card->host, &cmd, 5);
1727}
1728EXPORT_SYMBOL(mmc_set_blocklen);
1729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1731{
1732	host->f_init = freq;
1733
1734#ifdef CONFIG_MMC_DEBUG
1735	pr_info("%s: %s: trying to init card at %u Hz\n",
1736		mmc_hostname(host), __func__, host->f_init);
1737#endif
1738	mmc_power_up(host);
 
 
 
 
 
 
1739
1740	/*
1741	 * sdio_reset sends CMD52 to reset card.  Since we do not know
1742	 * if the card is being re-initialized, just send it.  CMD52
1743	 * should be ignored by SD/eMMC cards.
 
1744	 */
1745	sdio_reset(host);
 
 
1746	mmc_go_idle(host);
1747
1748	mmc_send_if_cond(host, host->ocr_avail);
 
1749
1750	/* Order's important: probe SDIO, then SD, then MMC */
1751	if (!mmc_attach_sdio(host))
1752		return 0;
1753	if (!mmc_attach_sd(host))
1754		return 0;
1755	if (!mmc_attach_mmc(host))
1756		return 0;
 
 
 
 
 
1757
1758	mmc_power_off(host);
1759	return -EIO;
1760}
1761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1762void mmc_rescan(struct work_struct *work)
1763{
1764	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1765	struct mmc_host *host =
1766		container_of(work, struct mmc_host, detect.work);
1767	int i;
1768
1769	if (host->rescan_disable)
1770		return;
1771
 
 
 
 
 
 
 
 
 
 
 
 
1772	mmc_bus_get(host);
1773
1774	/*
1775	 * if there is a _removable_ card registered, check whether it is
1776	 * still present
1777	 */
1778	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1779	    && !(host->caps & MMC_CAP_NONREMOVABLE))
1780		host->bus_ops->detect(host);
1781
 
 
1782	/*
1783	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1784	 * the card is no longer present.
1785	 */
1786	mmc_bus_put(host);
1787	mmc_bus_get(host);
1788
1789	/* if there still is a card present, stop here */
1790	if (host->bus_ops != NULL) {
1791		mmc_bus_put(host);
1792		goto out;
1793	}
1794
1795	/*
1796	 * Only we can add a new handler, so it's safe to
1797	 * release the lock here.
1798	 */
1799	mmc_bus_put(host);
1800
1801	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
 
 
 
 
1802		goto out;
 
1803
1804	mmc_claim_host(host);
1805	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1806		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1807			break;
1808		if (freqs[i] <= host->f_min)
1809			break;
1810	}
1811	mmc_release_host(host);
1812
1813 out:
1814	if (host->caps & MMC_CAP_NEEDS_POLL)
1815		mmc_schedule_delayed_work(&host->detect, HZ);
1816}
1817
1818void mmc_start_host(struct mmc_host *host)
1819{
1820	mmc_power_off(host);
1821	mmc_detect_change(host, 0);
 
 
 
 
 
 
 
 
 
 
1822}
1823
1824void mmc_stop_host(struct mmc_host *host)
1825{
1826#ifdef CONFIG_MMC_DEBUG
1827	unsigned long flags;
1828	spin_lock_irqsave(&host->lock, flags);
1829	host->removed = 1;
1830	spin_unlock_irqrestore(&host->lock, flags);
1831#endif
 
 
1832
1833	if (host->caps & MMC_CAP_DISABLE)
1834		cancel_delayed_work(&host->disable);
1835	cancel_delayed_work_sync(&host->detect);
1836	mmc_flush_scheduled_work();
1837
1838	/* clear pm flags now and let card drivers set them as needed */
1839	host->pm_flags = 0;
1840
1841	mmc_bus_get(host);
1842	if (host->bus_ops && !host->bus_dead) {
1843		if (host->bus_ops->remove)
1844			host->bus_ops->remove(host);
1845
1846		mmc_claim_host(host);
1847		mmc_detach_bus(host);
 
1848		mmc_release_host(host);
1849		mmc_bus_put(host);
1850		return;
1851	}
1852	mmc_bus_put(host);
1853
1854	BUG_ON(host->card);
1855
1856	mmc_power_off(host);
 
1857}
1858
1859int mmc_power_save_host(struct mmc_host *host)
1860{
1861	int ret = 0;
1862
1863#ifdef CONFIG_MMC_DEBUG
1864	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
1865#endif
1866
1867	mmc_bus_get(host);
1868
1869	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1870		mmc_bus_put(host);
1871		return -EINVAL;
1872	}
1873
1874	if (host->bus_ops->power_save)
1875		ret = host->bus_ops->power_save(host);
1876
1877	mmc_bus_put(host);
1878
1879	mmc_power_off(host);
1880
1881	return ret;
1882}
1883EXPORT_SYMBOL(mmc_power_save_host);
1884
1885int mmc_power_restore_host(struct mmc_host *host)
1886{
1887	int ret;
1888
1889#ifdef CONFIG_MMC_DEBUG
1890	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
1891#endif
1892
1893	mmc_bus_get(host);
1894
1895	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
1896		mmc_bus_put(host);
1897		return -EINVAL;
1898	}
1899
1900	mmc_power_up(host);
1901	ret = host->bus_ops->power_restore(host);
1902
1903	mmc_bus_put(host);
1904
1905	return ret;
1906}
1907EXPORT_SYMBOL(mmc_power_restore_host);
1908
1909int mmc_card_awake(struct mmc_host *host)
1910{
1911	int err = -ENOSYS;
1912
1913	mmc_bus_get(host);
1914
1915	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1916		err = host->bus_ops->awake(host);
1917
1918	mmc_bus_put(host);
1919
1920	return err;
1921}
1922EXPORT_SYMBOL(mmc_card_awake);
1923
1924int mmc_card_sleep(struct mmc_host *host)
1925{
1926	int err = -ENOSYS;
1927
1928	mmc_bus_get(host);
1929
1930	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
1931		err = host->bus_ops->sleep(host);
1932
1933	mmc_bus_put(host);
1934
1935	return err;
1936}
1937EXPORT_SYMBOL(mmc_card_sleep);
1938
1939int mmc_card_can_sleep(struct mmc_host *host)
1940{
1941	struct mmc_card *card = host->card;
1942
1943	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
1944		return 1;
1945	return 0;
1946}
1947EXPORT_SYMBOL(mmc_card_can_sleep);
1948
1949#ifdef CONFIG_PM
1950
1951/**
1952 *	mmc_suspend_host - suspend a host
1953 *	@host: mmc host
1954 */
1955int mmc_suspend_host(struct mmc_host *host)
1956{
1957	int err = 0;
1958
1959	if (host->caps & MMC_CAP_DISABLE)
1960		cancel_delayed_work(&host->disable);
1961	cancel_delayed_work(&host->detect);
1962	mmc_flush_scheduled_work();
1963
1964	mmc_bus_get(host);
1965	if (host->bus_ops && !host->bus_dead) {
1966		if (host->bus_ops->suspend)
1967			err = host->bus_ops->suspend(host);
1968		if (err == -ENOSYS || !host->bus_ops->resume) {
1969			/*
1970			 * We simply "remove" the card in this case.
1971			 * It will be redetected on resume.
1972			 */
1973			if (host->bus_ops->remove)
1974				host->bus_ops->remove(host);
1975			mmc_claim_host(host);
1976			mmc_detach_bus(host);
1977			mmc_release_host(host);
1978			host->pm_flags = 0;
1979			err = 0;
1980		}
1981	}
1982	mmc_bus_put(host);
1983
1984	if (!err && !mmc_card_keep_power(host))
1985		mmc_power_off(host);
1986
1987	return err;
1988}
1989
1990EXPORT_SYMBOL(mmc_suspend_host);
1991
1992/**
1993 *	mmc_resume_host - resume a previously suspended host
1994 *	@host: mmc host
1995 */
1996int mmc_resume_host(struct mmc_host *host)
1997{
1998	int err = 0;
1999
2000	mmc_bus_get(host);
2001	if (host->bus_ops && !host->bus_dead) {
2002		if (!mmc_card_keep_power(host)) {
2003			mmc_power_up(host);
2004			mmc_select_voltage(host, host->ocr);
2005			/*
2006			 * Tell runtime PM core we just powered up the card,
2007			 * since it still believes the card is powered off.
2008			 * Note that currently runtime PM is only enabled
2009			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2010			 */
2011			if (mmc_card_sdio(host->card) &&
2012			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2013				pm_runtime_disable(&host->card->dev);
2014				pm_runtime_set_active(&host->card->dev);
2015				pm_runtime_enable(&host->card->dev);
2016			}
2017		}
2018		BUG_ON(!host->bus_ops->resume);
2019		err = host->bus_ops->resume(host);
2020		if (err) {
2021			printk(KERN_WARNING "%s: error %d during resume "
2022					    "(card was removed?)\n",
2023					    mmc_hostname(host), err);
2024			err = 0;
2025		}
2026	}
2027	host->pm_flags &= ~MMC_PM_KEEP_POWER;
2028	mmc_bus_put(host);
2029
2030	return err;
2031}
2032EXPORT_SYMBOL(mmc_resume_host);
2033
 
2034/* Do the card removal on suspend if card is assumed removeable
2035 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2036   to sync the card.
2037*/
2038int mmc_pm_notify(struct notifier_block *notify_block,
2039					unsigned long mode, void *unused)
2040{
2041	struct mmc_host *host = container_of(
2042		notify_block, struct mmc_host, pm_notify);
2043	unsigned long flags;
2044
2045
2046	switch (mode) {
2047	case PM_HIBERNATION_PREPARE:
2048	case PM_SUSPEND_PREPARE:
2049
2050		spin_lock_irqsave(&host->lock, flags);
2051		host->rescan_disable = 1;
2052		spin_unlock_irqrestore(&host->lock, flags);
2053		cancel_delayed_work_sync(&host->detect);
2054
2055		if (!host->bus_ops || host->bus_ops->suspend)
2056			break;
2057
2058		mmc_claim_host(host);
2059
2060		if (host->bus_ops->remove)
2061			host->bus_ops->remove(host);
 
2062
 
 
 
2063		mmc_detach_bus(host);
 
2064		mmc_release_host(host);
2065		host->pm_flags = 0;
2066		break;
2067
2068	case PM_POST_SUSPEND:
2069	case PM_POST_HIBERNATION:
2070	case PM_POST_RESTORE:
2071
2072		spin_lock_irqsave(&host->lock, flags);
2073		host->rescan_disable = 0;
2074		spin_unlock_irqrestore(&host->lock, flags);
2075		mmc_detect_change(host, 0);
2076
2077	}
2078
2079	return 0;
2080}
 
 
 
 
 
 
 
 
 
 
 
2081#endif
2082
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083static int __init mmc_init(void)
2084{
2085	int ret;
2086
2087	workqueue = alloc_ordered_workqueue("kmmcd", 0);
2088	if (!workqueue)
2089		return -ENOMEM;
2090
2091	ret = mmc_register_bus();
2092	if (ret)
2093		goto destroy_workqueue;
2094
2095	ret = mmc_register_host_class();
2096	if (ret)
2097		goto unregister_bus;
2098
2099	ret = sdio_register_bus();
2100	if (ret)
2101		goto unregister_host_class;
2102
2103	return 0;
2104
2105unregister_host_class:
2106	mmc_unregister_host_class();
2107unregister_bus:
2108	mmc_unregister_bus();
2109destroy_workqueue:
2110	destroy_workqueue(workqueue);
2111
2112	return ret;
2113}
2114
2115static void __exit mmc_exit(void)
2116{
2117	sdio_unregister_bus();
2118	mmc_unregister_host_class();
2119	mmc_unregister_bus();
2120	destroy_workqueue(workqueue);
2121}
2122
2123subsys_initcall(mmc_init);
2124module_exit(mmc_exit);
2125
2126MODULE_LICENSE("GPL");
v4.10.11
   1/*
   2 *  linux/drivers/mmc/core/core.c
   3 *
   4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/completion.h>
  17#include <linux/device.h>
  18#include <linux/delay.h>
  19#include <linux/pagemap.h>
  20#include <linux/err.h>
  21#include <linux/leds.h>
  22#include <linux/scatterlist.h>
  23#include <linux/log2.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/suspend.h>
  28#include <linux/fault-inject.h>
  29#include <linux/random.h>
  30#include <linux/slab.h>
  31#include <linux/of.h>
  32
  33#include <linux/mmc/card.h>
  34#include <linux/mmc/host.h>
  35#include <linux/mmc/mmc.h>
  36#include <linux/mmc/sd.h>
  37#include <linux/mmc/slot-gpio.h>
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/mmc.h>
  41
  42#include "core.h"
  43#include "bus.h"
  44#include "host.h"
  45#include "sdio_bus.h"
  46#include "pwrseq.h"
  47
  48#include "mmc_ops.h"
  49#include "sd_ops.h"
  50#include "sdio_ops.h"
  51
  52/* If the device is not responding */
  53#define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
  54
  55/*
  56 * Background operations can take a long time, depending on the housekeeping
  57 * operations the card has to perform.
  58 */
  59#define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
  60
  61/* The max erase timeout, used when host->max_busy_timeout isn't specified */
  62#define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
  63
  64static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  65
  66/*
  67 * Enabling software CRCs on the data blocks can be a significant (30%)
  68 * performance cost, and for other reasons may not always be desired.
  69 * So we allow it it to be disabled.
  70 */
  71bool use_spi_crc = 1;
  72module_param(use_spi_crc, bool, 0);
  73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74static int mmc_schedule_delayed_work(struct delayed_work *work,
  75				     unsigned long delay)
  76{
  77	/*
  78	 * We use the system_freezable_wq, because of two reasons.
  79	 * First, it allows several works (not the same work item) to be
  80	 * executed simultaneously. Second, the queue becomes frozen when
  81	 * userspace becomes frozen during system PM.
  82	 */
  83	return queue_delayed_work(system_freezable_wq, work, delay);
  84}
  85
  86#ifdef CONFIG_FAIL_MMC_REQUEST
  87
  88/*
  89 * Internal function. Inject random data errors.
  90 * If mmc_data is NULL no errors are injected.
  91 */
  92static void mmc_should_fail_request(struct mmc_host *host,
  93				    struct mmc_request *mrq)
  94{
  95	struct mmc_command *cmd = mrq->cmd;
  96	struct mmc_data *data = mrq->data;
  97	static const int data_errors[] = {
  98		-ETIMEDOUT,
  99		-EILSEQ,
 100		-EIO,
 101	};
 102
 103	if (!data)
 104		return;
 105
 106	if (cmd->error || data->error ||
 107	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 108		return;
 109
 110	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 111	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 112}
 113
 114#else /* CONFIG_FAIL_MMC_REQUEST */
 115
 116static inline void mmc_should_fail_request(struct mmc_host *host,
 117					   struct mmc_request *mrq)
 118{
 119}
 120
 121#endif /* CONFIG_FAIL_MMC_REQUEST */
 122
 123static inline void mmc_complete_cmd(struct mmc_request *mrq)
 124{
 125	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
 126		complete_all(&mrq->cmd_completion);
 127}
 128
 129void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
 130{
 131	if (!mrq->cap_cmd_during_tfr)
 132		return;
 133
 134	mmc_complete_cmd(mrq);
 135
 136	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
 137		 mmc_hostname(host), mrq->cmd->opcode);
 138}
 139EXPORT_SYMBOL(mmc_command_done);
 140
 141/**
 142 *	mmc_request_done - finish processing an MMC request
 143 *	@host: MMC host which completed request
 144 *	@mrq: MMC request which request
 145 *
 146 *	MMC drivers should call this function when they have completed
 147 *	their processing of a request.
 148 */
 149void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 150{
 151	struct mmc_command *cmd = mrq->cmd;
 152	int err = cmd->error;
 153
 154	/* Flag re-tuning needed on CRC errors */
 155	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 156	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
 157	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 158	    (mrq->data && mrq->data->error == -EILSEQ) ||
 159	    (mrq->stop && mrq->stop->error == -EILSEQ)))
 160		mmc_retune_needed(host);
 161
 162	if (err && cmd->retries && mmc_host_is_spi(host)) {
 163		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 164			cmd->retries = 0;
 165	}
 166
 167	if (host->ongoing_mrq == mrq)
 168		host->ongoing_mrq = NULL;
 
 169
 170	mmc_complete_cmd(mrq);
 171
 172	trace_mmc_request_done(host, mrq);
 173
 174	if (err && cmd->retries && !mmc_card_removed(host->card)) {
 175		/*
 176		 * Request starter must handle retries - see
 177		 * mmc_wait_for_req_done().
 178		 */
 179		if (mrq->done)
 180			mrq->done(mrq);
 181	} else {
 182		mmc_should_fail_request(host, mrq);
 183
 184		if (!host->ongoing_mrq)
 185			led_trigger_event(host->led, LED_OFF);
 186
 187		if (mrq->sbc) {
 188			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 189				mmc_hostname(host), mrq->sbc->opcode,
 190				mrq->sbc->error,
 191				mrq->sbc->resp[0], mrq->sbc->resp[1],
 192				mrq->sbc->resp[2], mrq->sbc->resp[3]);
 193		}
 194
 195		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 196			mmc_hostname(host), cmd->opcode, err,
 197			cmd->resp[0], cmd->resp[1],
 198			cmd->resp[2], cmd->resp[3]);
 199
 200		if (mrq->data) {
 201			pr_debug("%s:     %d bytes transferred: %d\n",
 202				mmc_hostname(host),
 203				mrq->data->bytes_xfered, mrq->data->error);
 204		}
 205
 206		if (mrq->stop) {
 207			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 208				mmc_hostname(host), mrq->stop->opcode,
 209				mrq->stop->error,
 210				mrq->stop->resp[0], mrq->stop->resp[1],
 211				mrq->stop->resp[2], mrq->stop->resp[3]);
 212		}
 213
 214		if (mrq->done)
 215			mrq->done(mrq);
 
 
 216	}
 217}
 218
 219EXPORT_SYMBOL(mmc_request_done);
 220
 221static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 222{
 223	int err;
 224
 225	/* Assumes host controller has been runtime resumed by mmc_claim_host */
 226	err = mmc_retune(host);
 227	if (err) {
 228		mrq->cmd->error = err;
 229		mmc_request_done(host, mrq);
 230		return;
 231	}
 232
 233	/*
 234	 * For sdio rw commands we must wait for card busy otherwise some
 235	 * sdio devices won't work properly.
 236	 */
 237	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
 238		int tries = 500; /* Wait aprox 500ms at maximum */
 239
 240		while (host->ops->card_busy(host) && --tries)
 241			mmc_delay(1);
 242
 243		if (tries == 0) {
 244			mrq->cmd->error = -EBUSY;
 245			mmc_request_done(host, mrq);
 246			return;
 247		}
 248	}
 249
 250	if (mrq->cap_cmd_during_tfr) {
 251		host->ongoing_mrq = mrq;
 252		/*
 253		 * Retry path could come through here without having waiting on
 254		 * cmd_completion, so ensure it is reinitialised.
 255		 */
 256		reinit_completion(&mrq->cmd_completion);
 257	}
 258
 259	trace_mmc_request_start(host, mrq);
 260
 261	host->ops->request(host, mrq);
 262}
 263
 264static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 265{
 266#ifdef CONFIG_MMC_DEBUG
 267	unsigned int i, sz;
 268	struct scatterlist *sg;
 269#endif
 270	mmc_retune_hold(host);
 271
 272	if (mmc_card_removed(host->card))
 273		return -ENOMEDIUM;
 274
 275	if (mrq->sbc) {
 276		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 277			 mmc_hostname(host), mrq->sbc->opcode,
 278			 mrq->sbc->arg, mrq->sbc->flags);
 279	}
 280
 281	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
 282		 mmc_hostname(host), mrq->cmd->opcode,
 283		 mrq->cmd->arg, mrq->cmd->flags);
 284
 285	if (mrq->data) {
 286		pr_debug("%s:     blksz %d blocks %d flags %08x "
 287			"tsac %d ms nsac %d\n",
 288			mmc_hostname(host), mrq->data->blksz,
 289			mrq->data->blocks, mrq->data->flags,
 290			mrq->data->timeout_ns / 1000000,
 291			mrq->data->timeout_clks);
 292	}
 293
 294	if (mrq->stop) {
 295		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 296			 mmc_hostname(host), mrq->stop->opcode,
 297			 mrq->stop->arg, mrq->stop->flags);
 298	}
 299
 300	WARN_ON(!host->claimed);
 301
 302	mrq->cmd->error = 0;
 303	mrq->cmd->mrq = mrq;
 304	if (mrq->sbc) {
 305		mrq->sbc->error = 0;
 306		mrq->sbc->mrq = mrq;
 307	}
 308	if (mrq->data) {
 309		if (mrq->data->blksz > host->max_blk_size ||
 310		    mrq->data->blocks > host->max_blk_count ||
 311		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
 312			return -EINVAL;
 
 313#ifdef CONFIG_MMC_DEBUG
 314		sz = 0;
 315		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 316			sz += sg->length;
 317		if (sz != mrq->data->blocks * mrq->data->blksz)
 318			return -EINVAL;
 319#endif
 320
 321		mrq->cmd->data = mrq->data;
 322		mrq->data->error = 0;
 323		mrq->data->mrq = mrq;
 324		if (mrq->stop) {
 325			mrq->data->stop = mrq->stop;
 326			mrq->stop->error = 0;
 327			mrq->stop->mrq = mrq;
 328		}
 329	}
 
 330	led_trigger_event(host->led, LED_FULL);
 331	__mmc_start_request(host, mrq);
 332
 333	return 0;
 334}
 335
 336/**
 337 *	mmc_start_bkops - start BKOPS for supported cards
 338 *	@card: MMC card to start BKOPS
 339 *	@form_exception: A flag to indicate if this function was
 340 *			 called due to an exception raised by the card
 341 *
 342 *	Start background operations whenever requested.
 343 *	When the urgent BKOPS bit is set in a R1 command response
 344 *	then background operations should be started immediately.
 345*/
 346void mmc_start_bkops(struct mmc_card *card, bool from_exception)
 347{
 348	int err;
 349	int timeout;
 350	bool use_busy_signal;
 351
 352	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
 353		return;
 354
 355	err = mmc_read_bkops_status(card);
 356	if (err) {
 357		pr_err("%s: Failed to read bkops status: %d\n",
 358		       mmc_hostname(card->host), err);
 359		return;
 360	}
 361
 362	if (!card->ext_csd.raw_bkops_status)
 363		return;
 364
 365	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
 366	    from_exception)
 367		return;
 368
 369	mmc_claim_host(card->host);
 370	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
 371		timeout = MMC_BKOPS_MAX_TIMEOUT;
 372		use_busy_signal = true;
 373	} else {
 374		timeout = 0;
 375		use_busy_signal = false;
 376	}
 377
 378	mmc_retune_hold(card->host);
 379
 380	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 381			EXT_CSD_BKOPS_START, 1, timeout, 0,
 382			use_busy_signal, true, false);
 383	if (err) {
 384		pr_warn("%s: Error %d starting bkops\n",
 385			mmc_hostname(card->host), err);
 386		mmc_retune_release(card->host);
 387		goto out;
 388	}
 389
 390	/*
 391	 * For urgent bkops status (LEVEL_2 and more)
 392	 * bkops executed synchronously, otherwise
 393	 * the operation is in progress
 394	 */
 395	if (!use_busy_signal)
 396		mmc_card_set_doing_bkops(card);
 397	else
 398		mmc_retune_release(card->host);
 399out:
 400	mmc_release_host(card->host);
 401}
 402EXPORT_SYMBOL(mmc_start_bkops);
 403
 404/*
 405 * mmc_wait_data_done() - done callback for data request
 406 * @mrq: done data request
 407 *
 408 * Wakes up mmc context, passed as a callback to host controller driver
 409 */
 410static void mmc_wait_data_done(struct mmc_request *mrq)
 411{
 412	struct mmc_context_info *context_info = &mrq->host->context_info;
 413
 414	context_info->is_done_rcv = true;
 415	wake_up_interruptible(&context_info->wait);
 416}
 417
 418static void mmc_wait_done(struct mmc_request *mrq)
 419{
 420	complete(&mrq->completion);
 421}
 422
 423static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
 424{
 425	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
 426
 427	/*
 428	 * If there is an ongoing transfer, wait for the command line to become
 429	 * available.
 430	 */
 431	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
 432		wait_for_completion(&ongoing_mrq->cmd_completion);
 433}
 434
 435/*
 436 *__mmc_start_data_req() - starts data request
 437 * @host: MMC host to start the request
 438 * @mrq: data request to start
 439 *
 440 * Sets the done callback to be called when request is completed by the card.
 441 * Starts data mmc request execution
 442 * If an ongoing transfer is already in progress, wait for the command line
 443 * to become available before sending another command.
 444 */
 445static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
 446{
 447	int err;
 448
 449	mmc_wait_ongoing_tfr_cmd(host);
 450
 451	mrq->done = mmc_wait_data_done;
 452	mrq->host = host;
 453
 454	init_completion(&mrq->cmd_completion);
 455
 456	err = mmc_start_request(host, mrq);
 457	if (err) {
 458		mrq->cmd->error = err;
 459		mmc_complete_cmd(mrq);
 460		mmc_wait_data_done(mrq);
 461	}
 462
 463	return err;
 464}
 465
 466static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 467{
 468	int err;
 469
 470	mmc_wait_ongoing_tfr_cmd(host);
 471
 472	init_completion(&mrq->completion);
 473	mrq->done = mmc_wait_done;
 474
 475	init_completion(&mrq->cmd_completion);
 476
 477	err = mmc_start_request(host, mrq);
 478	if (err) {
 479		mrq->cmd->error = err;
 480		mmc_complete_cmd(mrq);
 481		complete(&mrq->completion);
 482	}
 483
 484	return err;
 485}
 486
 487/*
 488 * mmc_wait_for_data_req_done() - wait for request completed
 489 * @host: MMC host to prepare the command.
 490 * @mrq: MMC request to wait for
 491 *
 492 * Blocks MMC context till host controller will ack end of data request
 493 * execution or new request notification arrives from the block layer.
 494 * Handles command retries.
 495 *
 496 * Returns enum mmc_blk_status after checking errors.
 497 */
 498static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
 499						      struct mmc_request *mrq)
 500{
 501	struct mmc_command *cmd;
 502	struct mmc_context_info *context_info = &host->context_info;
 503	enum mmc_blk_status status;
 504
 505	while (1) {
 506		wait_event_interruptible(context_info->wait,
 507				(context_info->is_done_rcv ||
 508				 context_info->is_new_req));
 509
 510		if (context_info->is_done_rcv) {
 511			context_info->is_done_rcv = false;
 512			cmd = mrq->cmd;
 513
 514			if (!cmd->error || !cmd->retries ||
 515			    mmc_card_removed(host->card)) {
 516				status = host->areq->err_check(host->card,
 517							       host->areq);
 518				break; /* return status */
 519			} else {
 520				mmc_retune_recheck(host);
 521				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
 522					mmc_hostname(host),
 523					cmd->opcode, cmd->error);
 524				cmd->retries--;
 525				cmd->error = 0;
 526				__mmc_start_request(host, mrq);
 527				continue; /* wait for done/new event again */
 528			}
 529		}
 530
 531		return MMC_BLK_NEW_REQUEST;
 532	}
 533	mmc_retune_release(host);
 534	return status;
 535}
 536
 537void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 538{
 539	struct mmc_command *cmd;
 540
 541	while (1) {
 542		wait_for_completion(&mrq->completion);
 543
 544		cmd = mrq->cmd;
 545
 546		/*
 547		 * If host has timed out waiting for the sanitize
 548		 * to complete, card might be still in programming state
 549		 * so let's try to bring the card out of programming
 550		 * state.
 551		 */
 552		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 553			if (!mmc_interrupt_hpi(host->card)) {
 554				pr_warn("%s: %s: Interrupted sanitize\n",
 555					mmc_hostname(host), __func__);
 556				cmd->error = 0;
 557				break;
 558			} else {
 559				pr_err("%s: %s: Failed to interrupt sanitize\n",
 560				       mmc_hostname(host), __func__);
 561			}
 562		}
 563		if (!cmd->error || !cmd->retries ||
 564		    mmc_card_removed(host->card))
 565			break;
 566
 567		mmc_retune_recheck(host);
 568
 569		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 570			 mmc_hostname(host), cmd->opcode, cmd->error);
 571		cmd->retries--;
 572		cmd->error = 0;
 573		__mmc_start_request(host, mrq);
 574	}
 575
 576	mmc_retune_release(host);
 577}
 578EXPORT_SYMBOL(mmc_wait_for_req_done);
 579
 580/**
 581 *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 582 *	@host: MMC host
 583 *	@mrq: MMC request
 584 *
 585 *	mmc_is_req_done() is used with requests that have
 586 *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 587 *	starting a request and before waiting for it to complete. That is,
 588 *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 589 *	and before mmc_wait_for_req_done(). If it is called at other times the
 590 *	result is not meaningful.
 591 */
 592bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
 593{
 594	if (host->areq)
 595		return host->context_info.is_done_rcv;
 596	else
 597		return completion_done(&mrq->completion);
 598}
 599EXPORT_SYMBOL(mmc_is_req_done);
 600
 601/**
 602 *	mmc_pre_req - Prepare for a new request
 603 *	@host: MMC host to prepare command
 604 *	@mrq: MMC request to prepare for
 
 
 605 *
 606 *	mmc_pre_req() is called in prior to mmc_start_req() to let
 607 *	host prepare for the new request. Preparation of a request may be
 608 *	performed while another request is running on the host.
 609 */
 610static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
 
 611{
 612	if (host->ops->pre_req)
 613		host->ops->pre_req(host, mrq);
 614}
 615
 616/**
 617 *	mmc_post_req - Post process a completed request
 618 *	@host: MMC host to post process command
 619 *	@mrq: MMC request to post process for
 620 *	@err: Error, if non zero, clean up any resources made in pre_req
 621 *
 622 *	Let the host post process a completed request. Post processing of
 623 *	a request may be performed while another reuqest is running.
 624 */
 625static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 626			 int err)
 627{
 628	if (host->ops->post_req)
 629		host->ops->post_req(host, mrq, err);
 630}
 631
 632/**
 633 *	mmc_start_req - start a non-blocking request
 634 *	@host: MMC host to start command
 635 *	@areq: async request to start
 636 *	@error: out parameter returns 0 for success, otherwise non zero
 637 *
 638 *	Start a new MMC custom command request for a host.
 639 *	If there is on ongoing async request wait for completion
 640 *	of that request and start the new one and return.
 641 *	Does not wait for the new request to complete.
 642 *
 643 *      Returns the completed request, NULL in case of none completed.
 644 *	Wait for the an ongoing request (previoulsy started) to complete and
 645 *	return the completed request. If there is no ongoing request, NULL
 646 *	is returned without waiting. NULL is not an error condition.
 647 */
 648struct mmc_async_req *mmc_start_req(struct mmc_host *host,
 649				    struct mmc_async_req *areq,
 650				    enum mmc_blk_status *ret_stat)
 651{
 652	enum mmc_blk_status status = MMC_BLK_SUCCESS;
 653	int start_err = 0;
 654	struct mmc_async_req *data = host->areq;
 655
 656	/* Prepare a new request */
 657	if (areq)
 658		mmc_pre_req(host, areq->mrq);
 659
 660	if (host->areq) {
 661		status = mmc_wait_for_data_req_done(host, host->areq->mrq);
 662		if (status == MMC_BLK_NEW_REQUEST) {
 663			if (ret_stat)
 664				*ret_stat = status;
 665			/*
 666			 * The previous request was not completed,
 667			 * nothing to return
 668			 */
 669			return NULL;
 670		}
 671		/*
 672		 * Check BKOPS urgency for each R1 response
 673		 */
 674		if (host->card && mmc_card_mmc(host->card) &&
 675		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
 676		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
 677		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
 678
 679			/* Cancel the prepared request */
 680			if (areq)
 681				mmc_post_req(host, areq->mrq, -EINVAL);
 682
 683			mmc_start_bkops(host->card, true);
 684
 685			/* prepare the request again */
 686			if (areq)
 687				mmc_pre_req(host, areq->mrq);
 688		}
 689	}
 690
 691	if (status == MMC_BLK_SUCCESS && areq)
 692		start_err = __mmc_start_data_req(host, areq->mrq);
 693
 694	if (host->areq)
 695		mmc_post_req(host, host->areq->mrq, 0);
 696
 697	 /* Cancel a prepared request if it was not started. */
 698	if ((status != MMC_BLK_SUCCESS || start_err) && areq)
 699		mmc_post_req(host, areq->mrq, -EINVAL);
 700
 701	if (status != MMC_BLK_SUCCESS)
 702		host->areq = NULL;
 703	else
 704		host->areq = areq;
 705
 706	if (ret_stat)
 707		*ret_stat = status;
 708	return data;
 709}
 710EXPORT_SYMBOL(mmc_start_req);
 711
 712/**
 713 *	mmc_wait_for_req - start a request and wait for completion
 714 *	@host: MMC host to start command
 715 *	@mrq: MMC request to start
 716 *
 717 *	Start a new MMC custom command request for a host, and wait
 718 *	for the command to complete. In the case of 'cap_cmd_during_tfr'
 719 *	requests, the transfer is ongoing and the caller can issue further
 720 *	commands that do not use the data lines, and then wait by calling
 721 *	mmc_wait_for_req_done().
 722 *	Does not attempt to parse the response.
 723 */
 724void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 725{
 726	__mmc_start_req(host, mrq);
 727
 728	if (!mrq->cap_cmd_during_tfr)
 729		mmc_wait_for_req_done(host, mrq);
 730}
 731EXPORT_SYMBOL(mmc_wait_for_req);
 732
 733/**
 734 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 735 *	@card: the MMC card associated with the HPI transfer
 736 *
 737 *	Issued High Priority Interrupt, and check for card status
 738 *	until out-of prg-state.
 739 */
 740int mmc_interrupt_hpi(struct mmc_card *card)
 741{
 742	int err;
 743	u32 status;
 744	unsigned long prg_wait;
 745
 746	if (!card->ext_csd.hpi_en) {
 747		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 748		return 1;
 749	}
 750
 751	mmc_claim_host(card->host);
 752	err = mmc_send_status(card, &status);
 753	if (err) {
 754		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 755		goto out;
 756	}
 757
 758	switch (R1_CURRENT_STATE(status)) {
 759	case R1_STATE_IDLE:
 760	case R1_STATE_READY:
 761	case R1_STATE_STBY:
 762	case R1_STATE_TRAN:
 763		/*
 764		 * In idle and transfer states, HPI is not needed and the caller
 765		 * can issue the next intended command immediately
 766		 */
 767		goto out;
 768	case R1_STATE_PRG:
 769		break;
 770	default:
 771		/* In all other states, it's illegal to issue HPI */
 772		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 773			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 774		err = -EINVAL;
 775		goto out;
 776	}
 777
 778	err = mmc_send_hpi_cmd(card, &status);
 779	if (err)
 780		goto out;
 781
 782	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 783	do {
 784		err = mmc_send_status(card, &status);
 785
 786		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 787			break;
 788		if (time_after(jiffies, prg_wait))
 789			err = -ETIMEDOUT;
 790	} while (!err);
 791
 792out:
 793	mmc_release_host(card->host);
 794	return err;
 795}
 796EXPORT_SYMBOL(mmc_interrupt_hpi);
 797
 798/**
 799 *	mmc_wait_for_cmd - start a command and wait for completion
 800 *	@host: MMC host to start command
 801 *	@cmd: MMC command to start
 802 *	@retries: maximum number of retries
 803 *
 804 *	Start a new MMC command for a host, and wait for the command
 805 *	to complete.  Return any error that occurred while the command
 806 *	was executing.  Do not attempt to parse the response.
 807 */
 808int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 809{
 810	struct mmc_request mrq = {NULL};
 811
 812	WARN_ON(!host->claimed);
 813
 814	memset(cmd->resp, 0, sizeof(cmd->resp));
 815	cmd->retries = retries;
 816
 817	mrq.cmd = cmd;
 818	cmd->data = NULL;
 819
 820	mmc_wait_for_req(host, &mrq);
 821
 822	return cmd->error;
 823}
 824
 825EXPORT_SYMBOL(mmc_wait_for_cmd);
 826
 827/**
 828 *	mmc_stop_bkops - stop ongoing BKOPS
 829 *	@card: MMC card to check BKOPS
 830 *
 831 *	Send HPI command to stop ongoing background operations to
 832 *	allow rapid servicing of foreground operations, e.g. read/
 833 *	writes. Wait until the card comes out of the programming state
 834 *	to avoid errors in servicing read/write requests.
 835 */
 836int mmc_stop_bkops(struct mmc_card *card)
 837{
 838	int err = 0;
 839
 840	err = mmc_interrupt_hpi(card);
 841
 842	/*
 843	 * If err is EINVAL, we can't issue an HPI.
 844	 * It should complete the BKOPS.
 845	 */
 846	if (!err || (err == -EINVAL)) {
 847		mmc_card_clr_doing_bkops(card);
 848		mmc_retune_release(card->host);
 849		err = 0;
 850	}
 851
 852	return err;
 853}
 854EXPORT_SYMBOL(mmc_stop_bkops);
 855
 856int mmc_read_bkops_status(struct mmc_card *card)
 857{
 858	int err;
 859	u8 *ext_csd;
 860
 861	mmc_claim_host(card->host);
 862	err = mmc_get_ext_csd(card, &ext_csd);
 863	mmc_release_host(card->host);
 864	if (err)
 865		return err;
 866
 867	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 868	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 869	kfree(ext_csd);
 870	return 0;
 871}
 872EXPORT_SYMBOL(mmc_read_bkops_status);
 873
 874/**
 875 *	mmc_set_data_timeout - set the timeout for a data command
 876 *	@data: data phase for command
 877 *	@card: the MMC card associated with the data transfer
 878 *
 879 *	Computes the data timeout parameters according to the
 880 *	correct algorithm given the card type.
 881 */
 882void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 883{
 884	unsigned int mult;
 885
 886	/*
 887	 * SDIO cards only define an upper 1 s limit on access.
 888	 */
 889	if (mmc_card_sdio(card)) {
 890		data->timeout_ns = 1000000000;
 891		data->timeout_clks = 0;
 892		return;
 893	}
 894
 895	/*
 896	 * SD cards use a 100 multiplier rather than 10
 897	 */
 898	mult = mmc_card_sd(card) ? 100 : 10;
 899
 900	/*
 901	 * Scale up the multiplier (and therefore the timeout) by
 902	 * the r2w factor for writes.
 903	 */
 904	if (data->flags & MMC_DATA_WRITE)
 905		mult <<= card->csd.r2w_factor;
 906
 907	data->timeout_ns = card->csd.tacc_ns * mult;
 908	data->timeout_clks = card->csd.tacc_clks * mult;
 909
 910	/*
 911	 * SD cards also have an upper limit on the timeout.
 912	 */
 913	if (mmc_card_sd(card)) {
 914		unsigned int timeout_us, limit_us;
 915
 916		timeout_us = data->timeout_ns / 1000;
 917		if (card->host->ios.clock)
 918			timeout_us += data->timeout_clks * 1000 /
 919				(card->host->ios.clock / 1000);
 920
 921		if (data->flags & MMC_DATA_WRITE)
 922			/*
 923			 * The MMC spec "It is strongly recommended
 924			 * for hosts to implement more than 500ms
 925			 * timeout value even if the card indicates
 926			 * the 250ms maximum busy length."  Even the
 927			 * previous value of 300ms is known to be
 928			 * insufficient for some cards.
 929			 */
 930			limit_us = 3000000;
 931		else
 932			limit_us = 100000;
 933
 934		/*
 935		 * SDHC cards always use these fixed values.
 936		 */
 937		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
 938			data->timeout_ns = limit_us * 1000;
 939			data->timeout_clks = 0;
 940		}
 941
 942		/* assign limit value if invalid */
 943		if (timeout_us == 0)
 944			data->timeout_ns = limit_us * 1000;
 945	}
 946
 947	/*
 948	 * Some cards require longer data read timeout than indicated in CSD.
 949	 * Address this by setting the read timeout to a "reasonably high"
 950	 * value. For the cards tested, 600ms has proven enough. If necessary,
 951	 * this value can be increased if other problematic cards require this.
 952	 */
 953	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 954		data->timeout_ns = 600000000;
 955		data->timeout_clks = 0;
 956	}
 957
 958	/*
 959	 * Some cards need very high timeouts if driven in SPI mode.
 960	 * The worst observed timeout was 900ms after writing a
 961	 * continuous stream of data until the internal logic
 962	 * overflowed.
 963	 */
 964	if (mmc_host_is_spi(card->host)) {
 965		if (data->flags & MMC_DATA_WRITE) {
 966			if (data->timeout_ns < 1000000000)
 967				data->timeout_ns = 1000000000;	/* 1s */
 968		} else {
 969			if (data->timeout_ns < 100000000)
 970				data->timeout_ns =  100000000;	/* 100ms */
 971		}
 972	}
 973}
 974EXPORT_SYMBOL(mmc_set_data_timeout);
 975
 976/**
 977 *	mmc_align_data_size - pads a transfer size to a more optimal value
 978 *	@card: the MMC card associated with the data transfer
 979 *	@sz: original transfer size
 980 *
 981 *	Pads the original data size with a number of extra bytes in
 982 *	order to avoid controller bugs and/or performance hits
 983 *	(e.g. some controllers revert to PIO for certain sizes).
 984 *
 985 *	Returns the improved size, which might be unmodified.
 986 *
 987 *	Note that this function is only relevant when issuing a
 988 *	single scatter gather entry.
 989 */
 990unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
 991{
 992	/*
 993	 * FIXME: We don't have a system for the controller to tell
 994	 * the core about its problems yet, so for now we just 32-bit
 995	 * align the size.
 996	 */
 997	sz = ((sz + 3) / 4) * 4;
 998
 999	return sz;
1000}
1001EXPORT_SYMBOL(mmc_align_data_size);
1002
1003/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004 *	__mmc_claim_host - exclusively claim a host
1005 *	@host: mmc host to claim
1006 *	@abort: whether or not the operation should be aborted
1007 *
1008 *	Claim a host for a set of operations.  If @abort is non null and
1009 *	dereference a non-zero value then this will return prematurely with
1010 *	that non-zero value without acquiring the lock.  Returns zero
1011 *	with the lock held otherwise.
1012 */
1013int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1014{
1015	DECLARE_WAITQUEUE(wait, current);
1016	unsigned long flags;
1017	int stop;
1018	bool pm = false;
1019
1020	might_sleep();
1021
1022	add_wait_queue(&host->wq, &wait);
1023	spin_lock_irqsave(&host->lock, flags);
1024	while (1) {
1025		set_current_state(TASK_UNINTERRUPTIBLE);
1026		stop = abort ? atomic_read(abort) : 0;
1027		if (stop || !host->claimed || host->claimer == current)
1028			break;
1029		spin_unlock_irqrestore(&host->lock, flags);
1030		schedule();
1031		spin_lock_irqsave(&host->lock, flags);
1032	}
1033	set_current_state(TASK_RUNNING);
1034	if (!stop) {
1035		host->claimed = 1;
1036		host->claimer = current;
1037		host->claim_cnt += 1;
1038		if (host->claim_cnt == 1)
1039			pm = true;
1040	} else
1041		wake_up(&host->wq);
1042	spin_unlock_irqrestore(&host->lock, flags);
1043	remove_wait_queue(&host->wq, &wait);
 
 
 
 
1044
1045	if (pm)
1046		pm_runtime_get_sync(mmc_dev(host));
 
 
 
 
 
 
 
 
 
 
1047
1048	return stop;
 
 
 
 
 
 
 
 
1049}
1050EXPORT_SYMBOL(__mmc_claim_host);
1051
1052/**
1053 *	mmc_release_host - release a host
1054 *	@host: mmc host to release
1055 *
1056 *	Release a MMC host, allowing others to claim the host
1057 *	for their operations.
1058 */
1059void mmc_release_host(struct mmc_host *host)
1060{
1061	unsigned long flags;
1062
1063	WARN_ON(!host->claimed);
1064
1065	spin_lock_irqsave(&host->lock, flags);
1066	if (--host->claim_cnt) {
1067		/* Release for nested claim */
1068		spin_unlock_irqrestore(&host->lock, flags);
1069	} else {
1070		host->claimed = 0;
1071		host->claimer = NULL;
1072		spin_unlock_irqrestore(&host->lock, flags);
1073		wake_up(&host->wq);
1074		pm_runtime_mark_last_busy(mmc_dev(host));
1075		pm_runtime_put_autosuspend(mmc_dev(host));
1076	}
1077}
1078EXPORT_SYMBOL(mmc_release_host);
 
 
 
 
 
 
 
 
 
 
 
 
1079
1080/*
1081 * This is a helper function, which fetches a runtime pm reference for the
1082 * card device and also claims the host.
 
 
 
 
1083 */
1084void mmc_get_card(struct mmc_card *card)
1085{
1086	pm_runtime_get_sync(&card->dev);
1087	mmc_claim_host(card->host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1088}
1089EXPORT_SYMBOL(mmc_get_card);
1090
1091/*
1092 * This is a helper function, which releases the host and drops the runtime
1093 * pm reference for the card device.
 
 
 
1094 */
1095void mmc_put_card(struct mmc_card *card)
1096{
1097	mmc_release_host(card->host);
1098	pm_runtime_mark_last_busy(&card->dev);
1099	pm_runtime_put_autosuspend(&card->dev);
 
 
1100}
1101EXPORT_SYMBOL(mmc_put_card);
 
1102
1103/*
1104 * Internal function that does the actual ios call to the host driver,
1105 * optionally printing some debug output.
1106 */
1107static inline void mmc_set_ios(struct mmc_host *host)
1108{
1109	struct mmc_ios *ios = &host->ios;
1110
1111	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1112		"width %u timing %u\n",
1113		 mmc_hostname(host), ios->clock, ios->bus_mode,
1114		 ios->power_mode, ios->chip_select, ios->vdd,
1115		 1 << ios->bus_width, ios->timing);
1116
 
 
1117	host->ops->set_ios(host, ios);
1118}
1119
1120/*
1121 * Control chip select pin on a host.
1122 */
1123void mmc_set_chip_select(struct mmc_host *host, int mode)
1124{
 
1125	host->ios.chip_select = mode;
1126	mmc_set_ios(host);
 
1127}
1128
1129/*
1130 * Sets the host clock to the highest possible frequency that
1131 * is below "hz".
1132 */
1133void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1134{
1135	WARN_ON(hz && hz < host->f_min);
1136
1137	if (hz > host->f_max)
1138		hz = host->f_max;
1139
1140	host->ios.clock = hz;
1141	mmc_set_ios(host);
1142}
1143
1144int mmc_execute_tuning(struct mmc_card *card)
 
 
 
 
 
 
 
 
 
 
 
1145{
1146	struct mmc_host *host = card->host;
1147	u32 opcode;
1148	int err;
1149
1150	if (!host->ops->execute_tuning)
1151		return 0;
 
 
 
 
 
1152
1153	if (mmc_card_mmc(card))
1154		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1155	else
1156		opcode = MMC_SEND_TUNING_BLOCK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157
1158	err = host->ops->execute_tuning(host, opcode);
 
 
1159
1160	if (err)
1161		pr_err("%s: tuning execution failed: %d\n",
1162			mmc_hostname(host), err);
1163	else
1164		mmc_retune_enable(host);
 
 
 
1165
1166	return err;
 
 
1167}
 
1168
1169/*
1170 * Change the bus mode (open drain/push-pull) of a host.
1171 */
1172void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1173{
 
1174	host->ios.bus_mode = mode;
1175	mmc_set_ios(host);
 
1176}
1177
1178/*
1179 * Change data bus width of a host.
1180 */
1181void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1182{
 
1183	host->ios.bus_width = width;
1184	mmc_set_ios(host);
1185}
1186
1187/*
1188 * Set initial state after a power cycle or a hw_reset.
1189 */
1190void mmc_set_initial_state(struct mmc_host *host)
1191{
1192	mmc_retune_disable(host);
1193
1194	if (mmc_host_is_spi(host))
1195		host->ios.chip_select = MMC_CS_HIGH;
1196	else
1197		host->ios.chip_select = MMC_CS_DONTCARE;
1198	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1199	host->ios.bus_width = MMC_BUS_WIDTH_1;
1200	host->ios.timing = MMC_TIMING_LEGACY;
1201	host->ios.drv_type = 0;
1202	host->ios.enhanced_strobe = false;
1203
1204	/*
1205	 * Make sure we are in non-enhanced strobe mode before we
1206	 * actually enable it in ext_csd.
1207	 */
1208	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1209	     host->ops->hs400_enhanced_strobe)
1210		host->ops->hs400_enhanced_strobe(host, &host->ios);
1211
1212	mmc_set_ios(host);
1213}
1214
1215/**
1216 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1217 * @vdd:	voltage (mV)
1218 * @low_bits:	prefer low bits in boundary cases
1219 *
1220 * This function returns the OCR bit number according to the provided @vdd
1221 * value. If conversion is not possible a negative errno value returned.
1222 *
1223 * Depending on the @low_bits flag the function prefers low or high OCR bits
1224 * on boundary voltages. For example,
1225 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1226 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1227 *
1228 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1229 */
1230static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1231{
1232	const int max_bit = ilog2(MMC_VDD_35_36);
1233	int bit;
1234
1235	if (vdd < 1650 || vdd > 3600)
1236		return -EINVAL;
1237
1238	if (vdd >= 1650 && vdd <= 1950)
1239		return ilog2(MMC_VDD_165_195);
1240
1241	if (low_bits)
1242		vdd -= 1;
1243
1244	/* Base 2000 mV, step 100 mV, bit's base 8. */
1245	bit = (vdd - 2000) / 100 + 8;
1246	if (bit > max_bit)
1247		return max_bit;
1248	return bit;
1249}
1250
1251/**
1252 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1253 * @vdd_min:	minimum voltage value (mV)
1254 * @vdd_max:	maximum voltage value (mV)
1255 *
1256 * This function returns the OCR mask bits according to the provided @vdd_min
1257 * and @vdd_max values. If conversion is not possible the function returns 0.
1258 *
1259 * Notes wrt boundary cases:
1260 * This function sets the OCR bits for all boundary voltages, for example
1261 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1262 * MMC_VDD_34_35 mask.
1263 */
1264u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1265{
1266	u32 mask = 0;
1267
1268	if (vdd_max < vdd_min)
1269		return 0;
1270
1271	/* Prefer high bits for the boundary vdd_max values. */
1272	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1273	if (vdd_max < 0)
1274		return 0;
1275
1276	/* Prefer low bits for the boundary vdd_min values. */
1277	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1278	if (vdd_min < 0)
1279		return 0;
1280
1281	/* Fill the mask, from max bit to min bit. */
1282	while (vdd_max >= vdd_min)
1283		mask |= 1 << vdd_max--;
1284
1285	return mask;
1286}
1287EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1288
1289#ifdef CONFIG_OF
1290
1291/**
1292 * mmc_of_parse_voltage - return mask of supported voltages
1293 * @np: The device node need to be parsed.
1294 * @mask: mask of voltages available for MMC/SD/SDIO
1295 *
1296 * Parse the "voltage-ranges" DT property, returning zero if it is not
1297 * found, negative errno if the voltage-range specification is invalid,
1298 * or one if the voltage-range is specified and successfully parsed.
1299 */
1300int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1301{
1302	const u32 *voltage_ranges;
1303	int num_ranges, i;
1304
1305	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1306	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1307	if (!voltage_ranges) {
1308		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1309		return 0;
1310	}
1311	if (!num_ranges) {
1312		pr_err("%s: voltage-ranges empty\n", np->full_name);
1313		return -EINVAL;
1314	}
1315
1316	for (i = 0; i < num_ranges; i++) {
1317		const int j = i * 2;
1318		u32 ocr_mask;
1319
1320		ocr_mask = mmc_vddrange_to_ocrmask(
1321				be32_to_cpu(voltage_ranges[j]),
1322				be32_to_cpu(voltage_ranges[j + 1]));
1323		if (!ocr_mask) {
1324			pr_err("%s: voltage-range #%d is invalid\n",
1325				np->full_name, i);
1326			return -EINVAL;
1327		}
1328		*mask |= ocr_mask;
1329	}
1330
1331	return 1;
1332}
1333EXPORT_SYMBOL(mmc_of_parse_voltage);
1334
1335#endif /* CONFIG_OF */
1336
1337static int mmc_of_get_func_num(struct device_node *node)
1338{
1339	u32 reg;
1340	int ret;
1341
1342	ret = of_property_read_u32(node, "reg", &reg);
1343	if (ret < 0)
1344		return ret;
1345
1346	return reg;
1347}
1348
1349struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1350		unsigned func_num)
1351{
1352	struct device_node *node;
1353
1354	if (!host->parent || !host->parent->of_node)
1355		return NULL;
1356
1357	for_each_child_of_node(host->parent->of_node, node) {
1358		if (mmc_of_get_func_num(node) == func_num)
1359			return node;
1360	}
1361
1362	return NULL;
1363}
1364
1365#ifdef CONFIG_REGULATOR
1366
1367/**
1368 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1369 * @vdd_bit:	OCR bit number
1370 * @min_uV:	minimum voltage value (mV)
1371 * @max_uV:	maximum voltage value (mV)
1372 *
1373 * This function returns the voltage range according to the provided OCR
1374 * bit number. If conversion is not possible a negative errno value returned.
1375 */
1376static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1377{
1378	int		tmp;
1379
1380	if (!vdd_bit)
1381		return -EINVAL;
1382
1383	/*
1384	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1385	 * bits this regulator doesn't quite support ... don't
1386	 * be too picky, most cards and regulators are OK with
1387	 * a 0.1V range goof (it's a small error percentage).
1388	 */
1389	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1390	if (tmp == 0) {
1391		*min_uV = 1650 * 1000;
1392		*max_uV = 1950 * 1000;
1393	} else {
1394		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1395		*max_uV = *min_uV + 100 * 1000;
1396	}
1397
1398	return 0;
1399}
1400
1401/**
1402 * mmc_regulator_get_ocrmask - return mask of supported voltages
1403 * @supply: regulator to use
1404 *
1405 * This returns either a negative errno, or a mask of voltages that
1406 * can be provided to MMC/SD/SDIO devices using the specified voltage
1407 * regulator.  This would normally be called before registering the
1408 * MMC host adapter.
1409 */
1410int mmc_regulator_get_ocrmask(struct regulator *supply)
1411{
1412	int			result = 0;
1413	int			count;
1414	int			i;
1415	int			vdd_uV;
1416	int			vdd_mV;
1417
1418	count = regulator_count_voltages(supply);
1419	if (count < 0)
1420		return count;
1421
1422	for (i = 0; i < count; i++) {
 
 
 
1423		vdd_uV = regulator_list_voltage(supply, i);
1424		if (vdd_uV <= 0)
1425			continue;
1426
1427		vdd_mV = vdd_uV / 1000;
1428		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1429	}
1430
1431	if (!result) {
1432		vdd_uV = regulator_get_voltage(supply);
1433		if (vdd_uV <= 0)
1434			return vdd_uV;
1435
1436		vdd_mV = vdd_uV / 1000;
1437		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1438	}
1439
1440	return result;
1441}
1442EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1443
1444/**
1445 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1446 * @mmc: the host to regulate
1447 * @supply: regulator to use
1448 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1449 *
1450 * Returns zero on success, else negative errno.
1451 *
1452 * MMC host drivers may use this to enable or disable a regulator using
1453 * a particular supply voltage.  This would normally be called from the
1454 * set_ios() method.
1455 */
1456int mmc_regulator_set_ocr(struct mmc_host *mmc,
1457			struct regulator *supply,
1458			unsigned short vdd_bit)
1459{
1460	int			result = 0;
1461	int			min_uV, max_uV;
1462
1463	if (vdd_bit) {
1464		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1465
1466		result = regulator_set_voltage(supply, min_uV, max_uV);
1467		if (result == 0 && !mmc->regulator_enabled) {
1468			result = regulator_enable(supply);
1469			if (!result)
1470				mmc->regulator_enabled = true;
1471		}
1472	} else if (mmc->regulator_enabled) {
1473		result = regulator_disable(supply);
1474		if (result == 0)
1475			mmc->regulator_enabled = false;
1476	}
1477
1478	if (result)
1479		dev_err(mmc_dev(mmc),
1480			"could not set regulator OCR (%d)\n", result);
1481	return result;
1482}
1483EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1484
1485static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1486						  int min_uV, int target_uV,
1487						  int max_uV)
1488{
1489	/*
1490	 * Check if supported first to avoid errors since we may try several
1491	 * signal levels during power up and don't want to show errors.
1492	 */
1493	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1494		return -EINVAL;
1495
1496	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1497					     max_uV);
1498}
1499
1500/**
1501 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1502 *
1503 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1504 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1505 * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1506 * SD card spec also define VQMMC in terms of VMMC.
1507 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1508 *
1509 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1510 * requested voltage.  This is definitely a good idea for UHS where there's a
1511 * separate regulator on the card that's trying to make 1.8V and it's best if
1512 * we match.
1513 *
1514 * This function is expected to be used by a controller's
1515 * start_signal_voltage_switch() function.
1516 */
1517int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1518{
1519	struct device *dev = mmc_dev(mmc);
1520	int ret, volt, min_uV, max_uV;
1521
1522	/* If no vqmmc supply then we can't change the voltage */
1523	if (IS_ERR(mmc->supply.vqmmc))
1524		return -EINVAL;
1525
1526	switch (ios->signal_voltage) {
1527	case MMC_SIGNAL_VOLTAGE_120:
1528		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1529						1100000, 1200000, 1300000);
1530	case MMC_SIGNAL_VOLTAGE_180:
1531		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1532						1700000, 1800000, 1950000);
1533	case MMC_SIGNAL_VOLTAGE_330:
1534		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1535		if (ret < 0)
1536			return ret;
1537
1538		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1539			__func__, volt, max_uV);
1540
1541		min_uV = max(volt - 300000, 2700000);
1542		max_uV = min(max_uV + 200000, 3600000);
1543
1544		/*
1545		 * Due to a limitation in the current implementation of
1546		 * regulator_set_voltage_triplet() which is taking the lowest
1547		 * voltage possible if below the target, search for a suitable
1548		 * voltage in two steps and try to stay close to vmmc
1549		 * with a 0.3V tolerance at first.
1550		 */
1551		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1552						min_uV, volt, max_uV))
1553			return 0;
1554
1555		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1556						2700000, volt, 3600000);
1557	default:
1558		return -EINVAL;
1559	}
1560}
1561EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1562
1563#endif /* CONFIG_REGULATOR */
1564
1565int mmc_regulator_get_supply(struct mmc_host *mmc)
1566{
1567	struct device *dev = mmc_dev(mmc);
1568	int ret;
1569
1570	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1571	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1572
1573	if (IS_ERR(mmc->supply.vmmc)) {
1574		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1575			return -EPROBE_DEFER;
1576		dev_dbg(dev, "No vmmc regulator found\n");
1577	} else {
1578		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1579		if (ret > 0)
1580			mmc->ocr_avail = ret;
1581		else
1582			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1583	}
1584
1585	if (IS_ERR(mmc->supply.vqmmc)) {
1586		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1587			return -EPROBE_DEFER;
1588		dev_dbg(dev, "No vqmmc regulator found\n");
1589	}
1590
1591	return 0;
1592}
1593EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1594
1595/*
1596 * Mask off any voltages we don't support and select
1597 * the lowest voltage
1598 */
1599u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1600{
1601	int bit;
1602
1603	/*
1604	 * Sanity check the voltages that the card claims to
1605	 * support.
1606	 */
1607	if (ocr & 0x7F) {
1608		dev_warn(mmc_dev(host),
1609		"card claims to support voltages below defined range\n");
1610		ocr &= ~0x7F;
1611	}
1612
1613	ocr &= host->ocr_avail;
1614	if (!ocr) {
1615		dev_warn(mmc_dev(host), "no support for card's volts\n");
1616		return 0;
1617	}
1618
1619	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1620		bit = ffs(ocr) - 1;
1621		ocr &= 3 << bit;
1622		mmc_power_cycle(host, ocr);
 
 
 
 
1623	} else {
1624		bit = fls(ocr) - 1;
1625		ocr &= 3 << bit;
1626		if (bit != host->ios.vdd)
1627			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1628	}
1629
1630	return ocr;
1631}
1632
1633int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1634{
 
1635	int err = 0;
1636	int old_signal_voltage = host->ios.signal_voltage;
1637
1638	host->ios.signal_voltage = signal_voltage;
1639	if (host->ops->start_signal_voltage_switch)
1640		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1641
1642	if (err)
1643		host->ios.signal_voltage = old_signal_voltage;
1644
1645	return err;
1646
1647}
1648
1649int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1650{
1651	struct mmc_command cmd = {0};
1652	int err = 0;
1653	u32 clock;
1654
1655	/*
1656	 * Send CMD11 only if the request is to switch the card to
1657	 * 1.8V signalling.
1658	 */
1659	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1660		return __mmc_set_signal_voltage(host, signal_voltage);
 
 
1661
1662	/*
1663	 * If we cannot switch voltages, return failure so the caller
1664	 * can continue without UHS mode
1665	 */
1666	if (!host->ops->start_signal_voltage_switch)
1667		return -EPERM;
1668	if (!host->ops->card_busy)
1669		pr_warn("%s: cannot verify signal voltage switch\n",
1670			mmc_hostname(host));
1671
1672	cmd.opcode = SD_SWITCH_VOLTAGE;
1673	cmd.arg = 0;
1674	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1675
1676	err = mmc_wait_for_cmd(host, &cmd, 0);
1677	if (err)
1678		return err;
1679
1680	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1681		return -EIO;
1682
1683	/*
1684	 * The card should drive cmd and dat[0:3] low immediately
1685	 * after the response of cmd11, but wait 1 ms to be sure
1686	 */
1687	mmc_delay(1);
1688	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1689		err = -EAGAIN;
1690		goto power_cycle;
1691	}
1692	/*
1693	 * During a signal voltage level switch, the clock must be gated
1694	 * for 5 ms according to the SD spec
1695	 */
1696	clock = host->ios.clock;
1697	host->ios.clock = 0;
1698	mmc_set_ios(host);
1699
1700	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1701		/*
1702		 * Voltages may not have been switched, but we've already
1703		 * sent CMD11, so a power cycle is required anyway
1704		 */
1705		err = -EAGAIN;
1706		goto power_cycle;
1707	}
1708
1709	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1710	mmc_delay(10);
1711	host->ios.clock = clock;
1712	mmc_set_ios(host);
1713
1714	/* Wait for at least 1 ms according to spec */
1715	mmc_delay(1);
1716
1717	/*
1718	 * Failure to switch is indicated by the card holding
1719	 * dat[0:3] low
1720	 */
1721	if (host->ops->card_busy && host->ops->card_busy(host))
1722		err = -EAGAIN;
1723
1724power_cycle:
1725	if (err) {
1726		pr_debug("%s: Signal voltage switch failed, "
1727			"power cycling card\n", mmc_hostname(host));
1728		mmc_power_cycle(host, ocr);
1729	}
1730
1731	return err;
1732}
1733
1734/*
1735 * Select timing parameters for host.
1736 */
1737void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1738{
 
1739	host->ios.timing = timing;
1740	mmc_set_ios(host);
 
1741}
1742
1743/*
1744 * Select appropriate driver type for host.
1745 */
1746void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1747{
 
1748	host->ios.drv_type = drv_type;
1749	mmc_set_ios(host);
1750}
1751
1752int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1753			      int card_drv_type, int *drv_type)
1754{
1755	struct mmc_host *host = card->host;
1756	int host_drv_type = SD_DRIVER_TYPE_B;
1757
1758	*drv_type = 0;
1759
1760	if (!host->ops->select_drive_strength)
1761		return 0;
1762
1763	/* Use SD definition of driver strength for hosts */
1764	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1765		host_drv_type |= SD_DRIVER_TYPE_A;
1766
1767	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1768		host_drv_type |= SD_DRIVER_TYPE_C;
1769
1770	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1771		host_drv_type |= SD_DRIVER_TYPE_D;
1772
1773	/*
1774	 * The drive strength that the hardware can support
1775	 * depends on the board design.  Pass the appropriate
1776	 * information and let the hardware specific code
1777	 * return what is possible given the options
1778	 */
1779	return host->ops->select_drive_strength(card, max_dtr,
1780						host_drv_type,
1781						card_drv_type,
1782						drv_type);
1783}
1784
1785/*
1786 * Apply power to the MMC stack.  This is a two-stage process.
1787 * First, we enable power to the card without the clock running.
1788 * We then wait a bit for the power to stabilise.  Finally,
1789 * enable the bus drivers and clock to the card.
1790 *
1791 * We must _NOT_ enable the clock prior to power stablising.
1792 *
1793 * If a host does all the power sequencing itself, ignore the
1794 * initial MMC_POWER_UP stage.
1795 */
1796void mmc_power_up(struct mmc_host *host, u32 ocr)
1797{
1798	if (host->ios.power_mode == MMC_POWER_ON)
1799		return;
1800
1801	mmc_pwrseq_pre_power_on(host);
1802
1803	host->ios.vdd = fls(ocr) - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
1804	host->ios.power_mode = MMC_POWER_UP;
1805	/* Set initial state and call mmc_set_ios */
1806	mmc_set_initial_state(host);
1807
1808	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1809	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1810		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1811	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1812		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1813	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1814		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1815
1816	/*
1817	 * This delay should be sufficient to allow the power supply
1818	 * to reach the minimum voltage.
1819	 */
1820	mmc_delay(10);
1821
1822	mmc_pwrseq_post_power_on(host);
1823
1824	host->ios.clock = host->f_init;
1825
1826	host->ios.power_mode = MMC_POWER_ON;
1827	mmc_set_ios(host);
1828
1829	/*
1830	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1831	 * time required to reach a stable voltage.
1832	 */
1833	mmc_delay(10);
 
 
1834}
1835
1836void mmc_power_off(struct mmc_host *host)
1837{
1838	if (host->ios.power_mode == MMC_POWER_OFF)
1839		return;
1840
1841	mmc_pwrseq_power_off(host);
1842
1843	host->ios.clock = 0;
1844	host->ios.vdd = 0;
1845
1846	host->ios.power_mode = MMC_POWER_OFF;
1847	/* Set initial state and call mmc_set_ios */
1848	mmc_set_initial_state(host);
1849
1850	/*
1851	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1852	 * XO-1.5, require a short delay after poweroff before the card
1853	 * can be successfully turned on again.
1854	 */
1855	mmc_delay(1);
1856}
 
 
 
 
 
 
 
 
1857
1858void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1859{
1860	mmc_power_off(host);
1861	/* Wait at least 1 ms according to SD spec */
1862	mmc_delay(1);
1863	mmc_power_up(host, ocr);
1864}
1865
1866/*
1867 * Cleanup when the last reference to the bus operator is dropped.
1868 */
1869static void __mmc_release_bus(struct mmc_host *host)
1870{
1871	WARN_ON(!host->bus_dead);
 
 
1872
1873	host->bus_ops = NULL;
1874}
1875
1876/*
1877 * Increase reference count of bus operator
1878 */
1879static inline void mmc_bus_get(struct mmc_host *host)
1880{
1881	unsigned long flags;
1882
1883	spin_lock_irqsave(&host->lock, flags);
1884	host->bus_refs++;
1885	spin_unlock_irqrestore(&host->lock, flags);
1886}
1887
1888/*
1889 * Decrease reference count of bus operator and free it if
1890 * it is the last reference.
1891 */
1892static inline void mmc_bus_put(struct mmc_host *host)
1893{
1894	unsigned long flags;
1895
1896	spin_lock_irqsave(&host->lock, flags);
1897	host->bus_refs--;
1898	if ((host->bus_refs == 0) && host->bus_ops)
1899		__mmc_release_bus(host);
1900	spin_unlock_irqrestore(&host->lock, flags);
1901}
1902
1903/*
1904 * Assign a mmc bus handler to a host. Only one bus handler may control a
1905 * host at any given time.
1906 */
1907void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1908{
1909	unsigned long flags;
1910
 
 
 
1911	WARN_ON(!host->claimed);
1912
1913	spin_lock_irqsave(&host->lock, flags);
1914
1915	WARN_ON(host->bus_ops);
1916	WARN_ON(host->bus_refs);
1917
1918	host->bus_ops = ops;
1919	host->bus_refs = 1;
1920	host->bus_dead = 0;
1921
1922	spin_unlock_irqrestore(&host->lock, flags);
1923}
1924
1925/*
1926 * Remove the current bus handler from a host.
 
1927 */
1928void mmc_detach_bus(struct mmc_host *host)
1929{
1930	unsigned long flags;
1931
 
 
1932	WARN_ON(!host->claimed);
1933	WARN_ON(!host->bus_ops);
1934
1935	spin_lock_irqsave(&host->lock, flags);
1936
1937	host->bus_dead = 1;
1938
1939	spin_unlock_irqrestore(&host->lock, flags);
1940
 
 
1941	mmc_bus_put(host);
1942}
1943
1944static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1945				bool cd_irq)
1946{
1947#ifdef CONFIG_MMC_DEBUG
1948	unsigned long flags;
1949	spin_lock_irqsave(&host->lock, flags);
1950	WARN_ON(host->removed);
1951	spin_unlock_irqrestore(&host->lock, flags);
1952#endif
1953
1954	/*
1955	 * If the device is configured as wakeup, we prevent a new sleep for
1956	 * 5 s to give provision for user space to consume the event.
1957	 */
1958	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1959		device_can_wakeup(mmc_dev(host)))
1960		pm_wakeup_event(mmc_dev(host), 5000);
1961
1962	host->detect_change = 1;
1963	mmc_schedule_delayed_work(&host->detect, delay);
1964}
1965
1966/**
1967 *	mmc_detect_change - process change of state on a MMC socket
1968 *	@host: host which changed state.
1969 *	@delay: optional delay to wait before detection (jiffies)
1970 *
1971 *	MMC drivers should call this when they detect a card has been
1972 *	inserted or removed. The MMC layer will confirm that any
1973 *	present card is still functional, and initialize any newly
1974 *	inserted.
1975 */
1976void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1977{
1978	_mmc_detect_change(host, delay, true);
 
 
 
 
 
 
 
1979}
 
1980EXPORT_SYMBOL(mmc_detect_change);
1981
1982void mmc_init_erase(struct mmc_card *card)
1983{
1984	unsigned int sz;
1985
1986	if (is_power_of_2(card->erase_size))
1987		card->erase_shift = ffs(card->erase_size) - 1;
1988	else
1989		card->erase_shift = 0;
1990
1991	/*
1992	 * It is possible to erase an arbitrarily large area of an SD or MMC
1993	 * card.  That is not desirable because it can take a long time
1994	 * (minutes) potentially delaying more important I/O, and also the
1995	 * timeout calculations become increasingly hugely over-estimated.
1996	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1997	 * to that size and alignment.
1998	 *
1999	 * For SD cards that define Allocation Unit size, limit erases to one
2000	 * Allocation Unit at a time.
2001	 * For MMC, have a stab at ai good value and for modern cards it will
2002	 * end up being 4MiB. Note that if the value is too small, it can end
2003	 * up taking longer to erase. Also note, erase_size is already set to
2004	 * High Capacity Erase Size if available when this function is called.
2005	 */
2006	if (mmc_card_sd(card) && card->ssr.au) {
2007		card->pref_erase = card->ssr.au;
2008		card->erase_shift = ffs(card->ssr.au) - 1;
2009	} else if (card->erase_size) {
 
 
2010		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
2011		if (sz < 128)
2012			card->pref_erase = 512 * 1024 / 512;
2013		else if (sz < 512)
2014			card->pref_erase = 1024 * 1024 / 512;
2015		else if (sz < 1024)
2016			card->pref_erase = 2 * 1024 * 1024 / 512;
2017		else
2018			card->pref_erase = 4 * 1024 * 1024 / 512;
2019		if (card->pref_erase < card->erase_size)
2020			card->pref_erase = card->erase_size;
2021		else {
2022			sz = card->pref_erase % card->erase_size;
2023			if (sz)
2024				card->pref_erase += card->erase_size - sz;
2025		}
2026	} else
2027		card->pref_erase = 0;
2028}
2029
2030static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
2031				          unsigned int arg, unsigned int qty)
2032{
2033	unsigned int erase_timeout;
2034
2035	if (arg == MMC_DISCARD_ARG ||
2036	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
2037		erase_timeout = card->ext_csd.trim_timeout;
2038	} else if (card->ext_csd.erase_group_def & 1) {
2039		/* High Capacity Erase Group Size uses HC timeouts */
2040		if (arg == MMC_TRIM_ARG)
2041			erase_timeout = card->ext_csd.trim_timeout;
2042		else
2043			erase_timeout = card->ext_csd.hc_erase_timeout;
2044	} else {
2045		/* CSD Erase Group Size uses write timeout */
2046		unsigned int mult = (10 << card->csd.r2w_factor);
2047		unsigned int timeout_clks = card->csd.tacc_clks * mult;
2048		unsigned int timeout_us;
2049
2050		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2051		if (card->csd.tacc_ns < 1000000)
2052			timeout_us = (card->csd.tacc_ns * mult) / 1000;
2053		else
2054			timeout_us = (card->csd.tacc_ns / 1000) * mult;
2055
2056		/*
2057		 * ios.clock is only a target.  The real clock rate might be
2058		 * less but not that much less, so fudge it by multiplying by 2.
2059		 */
2060		timeout_clks <<= 1;
2061		timeout_us += (timeout_clks * 1000) /
2062			      (card->host->ios.clock / 1000);
2063
2064		erase_timeout = timeout_us / 1000;
2065
2066		/*
2067		 * Theoretically, the calculation could underflow so round up
2068		 * to 1ms in that case.
2069		 */
2070		if (!erase_timeout)
2071			erase_timeout = 1;
2072	}
2073
2074	/* Multiplier for secure operations */
2075	if (arg & MMC_SECURE_ARGS) {
2076		if (arg == MMC_SECURE_ERASE_ARG)
2077			erase_timeout *= card->ext_csd.sec_erase_mult;
2078		else
2079			erase_timeout *= card->ext_csd.sec_trim_mult;
2080	}
2081
2082	erase_timeout *= qty;
2083
2084	/*
2085	 * Ensure at least a 1 second timeout for SPI as per
2086	 * 'mmc_set_data_timeout()'
2087	 */
2088	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2089		erase_timeout = 1000;
2090
2091	return erase_timeout;
2092}
2093
2094static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2095					 unsigned int arg,
2096					 unsigned int qty)
2097{
2098	unsigned int erase_timeout;
2099
2100	if (card->ssr.erase_timeout) {
2101		/* Erase timeout specified in SD Status Register (SSR) */
2102		erase_timeout = card->ssr.erase_timeout * qty +
2103				card->ssr.erase_offset;
2104	} else {
2105		/*
2106		 * Erase timeout not specified in SD Status Register (SSR) so
2107		 * use 250ms per write block.
2108		 */
2109		erase_timeout = 250 * qty;
2110	}
2111
2112	/* Must not be less than 1 second */
2113	if (erase_timeout < 1000)
2114		erase_timeout = 1000;
2115
2116	return erase_timeout;
2117}
2118
2119static unsigned int mmc_erase_timeout(struct mmc_card *card,
2120				      unsigned int arg,
2121				      unsigned int qty)
2122{
2123	if (mmc_card_sd(card))
2124		return mmc_sd_erase_timeout(card, arg, qty);
2125	else
2126		return mmc_mmc_erase_timeout(card, arg, qty);
2127}
2128
2129static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2130			unsigned int to, unsigned int arg)
2131{
2132	struct mmc_command cmd = {0};
2133	unsigned int qty = 0, busy_timeout = 0;
2134	bool use_r1b_resp = false;
2135	unsigned long timeout;
2136	int err;
2137
2138	mmc_retune_hold(card->host);
2139
2140	/*
2141	 * qty is used to calculate the erase timeout which depends on how many
2142	 * erase groups (or allocation units in SD terminology) are affected.
2143	 * We count erasing part of an erase group as one erase group.
2144	 * For SD, the allocation units are always a power of 2.  For MMC, the
2145	 * erase group size is almost certainly also power of 2, but it does not
2146	 * seem to insist on that in the JEDEC standard, so we fall back to
2147	 * division in that case.  SD may not specify an allocation unit size,
2148	 * in which case the timeout is based on the number of write blocks.
2149	 *
2150	 * Note that the timeout for secure trim 2 will only be correct if the
2151	 * number of erase groups specified is the same as the total of all
2152	 * preceding secure trim 1 commands.  Since the power may have been
2153	 * lost since the secure trim 1 commands occurred, it is generally
2154	 * impossible to calculate the secure trim 2 timeout correctly.
2155	 */
2156	if (card->erase_shift)
2157		qty += ((to >> card->erase_shift) -
2158			(from >> card->erase_shift)) + 1;
2159	else if (mmc_card_sd(card))
2160		qty += to - from + 1;
2161	else
2162		qty += ((to / card->erase_size) -
2163			(from / card->erase_size)) + 1;
2164
2165	if (!mmc_card_blockaddr(card)) {
2166		from <<= 9;
2167		to <<= 9;
2168	}
2169
2170	if (mmc_card_sd(card))
2171		cmd.opcode = SD_ERASE_WR_BLK_START;
2172	else
2173		cmd.opcode = MMC_ERASE_GROUP_START;
2174	cmd.arg = from;
2175	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2176	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2177	if (err) {
2178		pr_err("mmc_erase: group start error %d, "
2179		       "status %#x\n", err, cmd.resp[0]);
2180		err = -EIO;
2181		goto out;
2182	}
2183
2184	memset(&cmd, 0, sizeof(struct mmc_command));
2185	if (mmc_card_sd(card))
2186		cmd.opcode = SD_ERASE_WR_BLK_END;
2187	else
2188		cmd.opcode = MMC_ERASE_GROUP_END;
2189	cmd.arg = to;
2190	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2191	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2192	if (err) {
2193		pr_err("mmc_erase: group end error %d, status %#x\n",
2194		       err, cmd.resp[0]);
2195		err = -EIO;
2196		goto out;
2197	}
2198
2199	memset(&cmd, 0, sizeof(struct mmc_command));
2200	cmd.opcode = MMC_ERASE;
2201	cmd.arg = arg;
2202	busy_timeout = mmc_erase_timeout(card, arg, qty);
2203	/*
2204	 * If the host controller supports busy signalling and the timeout for
2205	 * the erase operation does not exceed the max_busy_timeout, we should
2206	 * use R1B response. Or we need to prevent the host from doing hw busy
2207	 * detection, which is done by converting to a R1 response instead.
2208	 */
2209	if (card->host->max_busy_timeout &&
2210	    busy_timeout > card->host->max_busy_timeout) {
2211		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2212	} else {
2213		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2214		cmd.busy_timeout = busy_timeout;
2215		use_r1b_resp = true;
2216	}
2217
2218	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2219	if (err) {
2220		pr_err("mmc_erase: erase error %d, status %#x\n",
2221		       err, cmd.resp[0]);
2222		err = -EIO;
2223		goto out;
2224	}
2225
2226	if (mmc_host_is_spi(card->host))
2227		goto out;
2228
2229	/*
2230	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2231	 * shall be avoided.
2232	 */
2233	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2234		goto out;
2235
2236	timeout = jiffies + msecs_to_jiffies(busy_timeout);
2237	do {
2238		memset(&cmd, 0, sizeof(struct mmc_command));
2239		cmd.opcode = MMC_SEND_STATUS;
2240		cmd.arg = card->rca << 16;
2241		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2242		/* Do not retry else we can't see errors */
2243		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2244		if (err || (cmd.resp[0] & 0xFDF92000)) {
2245			pr_err("error %d requesting status %#x\n",
2246				err, cmd.resp[0]);
2247			err = -EIO;
2248			goto out;
2249		}
2250
2251		/* Timeout if the device never becomes ready for data and
2252		 * never leaves the program state.
2253		 */
2254		if (time_after(jiffies, timeout)) {
2255			pr_err("%s: Card stuck in programming state! %s\n",
2256				mmc_hostname(card->host), __func__);
2257			err =  -EIO;
2258			goto out;
2259		}
2260
2261	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2262		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2263out:
2264	mmc_retune_release(card->host);
2265	return err;
2266}
2267
2268static unsigned int mmc_align_erase_size(struct mmc_card *card,
2269					 unsigned int *from,
2270					 unsigned int *to,
2271					 unsigned int nr)
2272{
2273	unsigned int from_new = *from, nr_new = nr, rem;
2274
2275	/*
2276	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2277	 * to align the erase size efficiently.
2278	 */
2279	if (is_power_of_2(card->erase_size)) {
2280		unsigned int temp = from_new;
2281
2282		from_new = round_up(temp, card->erase_size);
2283		rem = from_new - temp;
2284
2285		if (nr_new > rem)
2286			nr_new -= rem;
2287		else
2288			return 0;
2289
2290		nr_new = round_down(nr_new, card->erase_size);
2291	} else {
2292		rem = from_new % card->erase_size;
2293		if (rem) {
2294			rem = card->erase_size - rem;
2295			from_new += rem;
2296			if (nr_new > rem)
2297				nr_new -= rem;
2298			else
2299				return 0;
2300		}
2301
2302		rem = nr_new % card->erase_size;
2303		if (rem)
2304			nr_new -= rem;
2305	}
2306
2307	if (nr_new == 0)
2308		return 0;
2309
2310	*to = from_new + nr_new;
2311	*from = from_new;
2312
2313	return nr_new;
2314}
2315
2316/**
2317 * mmc_erase - erase sectors.
2318 * @card: card to erase
2319 * @from: first sector to erase
2320 * @nr: number of sectors to erase
2321 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2322 *
2323 * Caller must claim host before calling this function.
2324 */
2325int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2326	      unsigned int arg)
2327{
2328	unsigned int rem, to = from + nr;
2329	int err;
2330
2331	if (!(card->host->caps & MMC_CAP_ERASE) ||
2332	    !(card->csd.cmdclass & CCC_ERASE))
2333		return -EOPNOTSUPP;
2334
2335	if (!card->erase_size)
2336		return -EOPNOTSUPP;
2337
2338	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2339		return -EOPNOTSUPP;
2340
2341	if ((arg & MMC_SECURE_ARGS) &&
2342	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2343		return -EOPNOTSUPP;
2344
2345	if ((arg & MMC_TRIM_ARGS) &&
2346	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2347		return -EOPNOTSUPP;
2348
2349	if (arg == MMC_SECURE_ERASE_ARG) {
2350		if (from % card->erase_size || nr % card->erase_size)
2351			return -EINVAL;
2352	}
2353
2354	if (arg == MMC_ERASE_ARG)
2355		nr = mmc_align_erase_size(card, &from, &to, nr);
 
 
 
 
 
 
 
 
 
 
 
 
2356
2357	if (nr == 0)
2358		return 0;
2359
 
 
2360	if (to <= from)
2361		return -EINVAL;
2362
2363	/* 'from' and 'to' are inclusive */
2364	to -= 1;
2365
2366	/*
2367	 * Special case where only one erase-group fits in the timeout budget:
2368	 * If the region crosses an erase-group boundary on this particular
2369	 * case, we will be trimming more than one erase-group which, does not
2370	 * fit in the timeout budget of the controller, so we need to split it
2371	 * and call mmc_do_erase() twice if necessary. This special case is
2372	 * identified by the card->eg_boundary flag.
2373	 */
2374	rem = card->erase_size - (from % card->erase_size);
2375	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2376		err = mmc_do_erase(card, from, from + rem - 1, arg);
2377		from += rem;
2378		if ((err) || (to <= from))
2379			return err;
2380	}
2381
2382	return mmc_do_erase(card, from, to, arg);
2383}
2384EXPORT_SYMBOL(mmc_erase);
2385
2386int mmc_can_erase(struct mmc_card *card)
2387{
2388	if ((card->host->caps & MMC_CAP_ERASE) &&
2389	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2390		return 1;
2391	return 0;
2392}
2393EXPORT_SYMBOL(mmc_can_erase);
2394
2395int mmc_can_trim(struct mmc_card *card)
2396{
2397	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2398	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2399		return 1;
2400	return 0;
2401}
2402EXPORT_SYMBOL(mmc_can_trim);
2403
2404int mmc_can_discard(struct mmc_card *card)
2405{
2406	/*
2407	 * As there's no way to detect the discard support bit at v4.5
2408	 * use the s/w feature support filed.
2409	 */
2410	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2411		return 1;
2412	return 0;
2413}
2414EXPORT_SYMBOL(mmc_can_discard);
2415
2416int mmc_can_sanitize(struct mmc_card *card)
2417{
2418	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2419		return 0;
2420	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2421		return 1;
2422	return 0;
2423}
2424EXPORT_SYMBOL(mmc_can_sanitize);
2425
2426int mmc_can_secure_erase_trim(struct mmc_card *card)
2427{
2428	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2429	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2430		return 1;
2431	return 0;
2432}
2433EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2434
2435int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2436			    unsigned int nr)
2437{
2438	if (!card->erase_size)
2439		return 0;
2440	if (from % card->erase_size || nr % card->erase_size)
2441		return 0;
2442	return 1;
2443}
2444EXPORT_SYMBOL(mmc_erase_group_aligned);
2445
2446static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2447					    unsigned int arg)
2448{
2449	struct mmc_host *host = card->host;
2450	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2451	unsigned int last_timeout = 0;
2452	unsigned int max_busy_timeout = host->max_busy_timeout ?
2453			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2454
2455	if (card->erase_shift) {
2456		max_qty = UINT_MAX >> card->erase_shift;
2457		min_qty = card->pref_erase >> card->erase_shift;
2458	} else if (mmc_card_sd(card)) {
2459		max_qty = UINT_MAX;
2460		min_qty = card->pref_erase;
2461	} else {
2462		max_qty = UINT_MAX / card->erase_size;
2463		min_qty = card->pref_erase / card->erase_size;
2464	}
2465
2466	/*
2467	 * We should not only use 'host->max_busy_timeout' as the limitation
2468	 * when deciding the max discard sectors. We should set a balance value
2469	 * to improve the erase speed, and it can not get too long timeout at
2470	 * the same time.
2471	 *
2472	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2473	 * matter what size of 'host->max_busy_timeout', but if the
2474	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2475	 * then we can continue to increase the max discard sectors until we
2476	 * get a balance value. In cases when the 'host->max_busy_timeout'
2477	 * isn't specified, use the default max erase timeout.
2478	 */
2479	do {
2480		y = 0;
2481		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2482			timeout = mmc_erase_timeout(card, arg, qty + x);
2483
2484			if (qty + x > min_qty && timeout > max_busy_timeout)
2485				break;
2486
2487			if (timeout < last_timeout)
2488				break;
2489			last_timeout = timeout;
2490			y = x;
2491		}
2492		qty += y;
2493	} while (y);
2494
2495	if (!qty)
2496		return 0;
2497
2498	/*
2499	 * When specifying a sector range to trim, chances are we might cross
2500	 * an erase-group boundary even if the amount of sectors is less than
2501	 * one erase-group.
2502	 * If we can only fit one erase-group in the controller timeout budget,
2503	 * we have to care that erase-group boundaries are not crossed by a
2504	 * single trim operation. We flag that special case with "eg_boundary".
2505	 * In all other cases we can just decrement qty and pretend that we
2506	 * always touch (qty + 1) erase-groups as a simple optimization.
2507	 */
2508	if (qty == 1)
2509		card->eg_boundary = 1;
2510	else
2511		qty--;
2512
2513	/* Convert qty to sectors */
2514	if (card->erase_shift)
2515		max_discard = qty << card->erase_shift;
2516	else if (mmc_card_sd(card))
2517		max_discard = qty + 1;
2518	else
2519		max_discard = qty * card->erase_size;
2520
2521	return max_discard;
2522}
2523
2524unsigned int mmc_calc_max_discard(struct mmc_card *card)
2525{
2526	struct mmc_host *host = card->host;
2527	unsigned int max_discard, max_trim;
2528
 
 
 
2529	/*
2530	 * Without erase_group_def set, MMC erase timeout depends on clock
2531	 * frequence which can change.  In that case, the best choice is
2532	 * just the preferred erase size.
2533	 */
2534	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2535		return card->pref_erase;
2536
2537	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2538	if (mmc_can_trim(card)) {
2539		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2540		if (max_trim < max_discard)
2541			max_discard = max_trim;
2542	} else if (max_discard < card->erase_size) {
2543		max_discard = 0;
2544	}
2545	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2546		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2547		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2548	return max_discard;
2549}
2550EXPORT_SYMBOL(mmc_calc_max_discard);
2551
2552int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2553{
2554	struct mmc_command cmd = {0};
2555
2556	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2557	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2558		return 0;
2559
2560	cmd.opcode = MMC_SET_BLOCKLEN;
2561	cmd.arg = blocklen;
2562	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2563	return mmc_wait_for_cmd(card->host, &cmd, 5);
2564}
2565EXPORT_SYMBOL(mmc_set_blocklen);
2566
2567int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2568			bool is_rel_write)
2569{
2570	struct mmc_command cmd = {0};
2571
2572	cmd.opcode = MMC_SET_BLOCK_COUNT;
2573	cmd.arg = blockcount & 0x0000FFFF;
2574	if (is_rel_write)
2575		cmd.arg |= 1 << 31;
2576	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2577	return mmc_wait_for_cmd(card->host, &cmd, 5);
2578}
2579EXPORT_SYMBOL(mmc_set_blockcount);
2580
2581static void mmc_hw_reset_for_init(struct mmc_host *host)
2582{
2583	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2584		return;
2585	host->ops->hw_reset(host);
2586}
2587
2588int mmc_hw_reset(struct mmc_host *host)
2589{
2590	int ret;
2591
2592	if (!host->card)
2593		return -EINVAL;
2594
2595	mmc_bus_get(host);
2596	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2597		mmc_bus_put(host);
2598		return -EOPNOTSUPP;
2599	}
2600
2601	ret = host->bus_ops->reset(host);
2602	mmc_bus_put(host);
2603
2604	if (ret)
2605		pr_warn("%s: tried to reset card, got error %d\n",
2606			mmc_hostname(host), ret);
2607
2608	return ret;
2609}
2610EXPORT_SYMBOL(mmc_hw_reset);
2611
2612static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2613{
2614	host->f_init = freq;
2615
2616#ifdef CONFIG_MMC_DEBUG
2617	pr_info("%s: %s: trying to init card at %u Hz\n",
2618		mmc_hostname(host), __func__, host->f_init);
2619#endif
2620	mmc_power_up(host, host->ocr_avail);
2621
2622	/*
2623	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2624	 * do a hardware reset if possible.
2625	 */
2626	mmc_hw_reset_for_init(host);
2627
2628	/*
2629	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2630	 * if the card is being re-initialized, just send it.  CMD52
2631	 * should be ignored by SD/eMMC cards.
2632	 * Skip it if we already know that we do not support SDIO commands
2633	 */
2634	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2635		sdio_reset(host);
2636
2637	mmc_go_idle(host);
2638
2639	if (!(host->caps2 & MMC_CAP2_NO_SD))
2640		mmc_send_if_cond(host, host->ocr_avail);
2641
2642	/* Order's important: probe SDIO, then SD, then MMC */
2643	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2644		if (!mmc_attach_sdio(host))
2645			return 0;
2646
2647	if (!(host->caps2 & MMC_CAP2_NO_SD))
2648		if (!mmc_attach_sd(host))
2649			return 0;
2650
2651	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2652		if (!mmc_attach_mmc(host))
2653			return 0;
2654
2655	mmc_power_off(host);
2656	return -EIO;
2657}
2658
2659int _mmc_detect_card_removed(struct mmc_host *host)
2660{
2661	int ret;
2662
2663	if (!host->card || mmc_card_removed(host->card))
2664		return 1;
2665
2666	ret = host->bus_ops->alive(host);
2667
2668	/*
2669	 * Card detect status and alive check may be out of sync if card is
2670	 * removed slowly, when card detect switch changes while card/slot
2671	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2672	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2673	 * detect work 200ms later for this case.
2674	 */
2675	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2676		mmc_detect_change(host, msecs_to_jiffies(200));
2677		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2678	}
2679
2680	if (ret) {
2681		mmc_card_set_removed(host->card);
2682		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2683	}
2684
2685	return ret;
2686}
2687
2688int mmc_detect_card_removed(struct mmc_host *host)
2689{
2690	struct mmc_card *card = host->card;
2691	int ret;
2692
2693	WARN_ON(!host->claimed);
2694
2695	if (!card)
2696		return 1;
2697
2698	if (!mmc_card_is_removable(host))
2699		return 0;
2700
2701	ret = mmc_card_removed(card);
2702	/*
2703	 * The card will be considered unchanged unless we have been asked to
2704	 * detect a change or host requires polling to provide card detection.
2705	 */
2706	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2707		return ret;
2708
2709	host->detect_change = 0;
2710	if (!ret) {
2711		ret = _mmc_detect_card_removed(host);
2712		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2713			/*
2714			 * Schedule a detect work as soon as possible to let a
2715			 * rescan handle the card removal.
2716			 */
2717			cancel_delayed_work(&host->detect);
2718			_mmc_detect_change(host, 0, false);
2719		}
2720	}
2721
2722	return ret;
2723}
2724EXPORT_SYMBOL(mmc_detect_card_removed);
2725
2726void mmc_rescan(struct work_struct *work)
2727{
 
2728	struct mmc_host *host =
2729		container_of(work, struct mmc_host, detect.work);
2730	int i;
2731
2732	if (host->rescan_disable)
2733		return;
2734
2735	/* If there is a non-removable card registered, only scan once */
2736	if (!mmc_card_is_removable(host) && host->rescan_entered)
2737		return;
2738	host->rescan_entered = 1;
2739
2740	if (host->trigger_card_event && host->ops->card_event) {
2741		mmc_claim_host(host);
2742		host->ops->card_event(host);
2743		mmc_release_host(host);
2744		host->trigger_card_event = false;
2745	}
2746
2747	mmc_bus_get(host);
2748
2749	/*
2750	 * if there is a _removable_ card registered, check whether it is
2751	 * still present
2752	 */
2753	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
 
2754		host->bus_ops->detect(host);
2755
2756	host->detect_change = 0;
2757
2758	/*
2759	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2760	 * the card is no longer present.
2761	 */
2762	mmc_bus_put(host);
2763	mmc_bus_get(host);
2764
2765	/* if there still is a card present, stop here */
2766	if (host->bus_ops != NULL) {
2767		mmc_bus_put(host);
2768		goto out;
2769	}
2770
2771	/*
2772	 * Only we can add a new handler, so it's safe to
2773	 * release the lock here.
2774	 */
2775	mmc_bus_put(host);
2776
2777	mmc_claim_host(host);
2778	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2779			host->ops->get_cd(host) == 0) {
2780		mmc_power_off(host);
2781		mmc_release_host(host);
2782		goto out;
2783	}
2784
 
2785	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2786		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2787			break;
2788		if (freqs[i] <= host->f_min)
2789			break;
2790	}
2791	mmc_release_host(host);
2792
2793 out:
2794	if (host->caps & MMC_CAP_NEEDS_POLL)
2795		mmc_schedule_delayed_work(&host->detect, HZ);
2796}
2797
2798void mmc_start_host(struct mmc_host *host)
2799{
2800	host->f_init = max(freqs[0], host->f_min);
2801	host->rescan_disable = 0;
2802	host->ios.power_mode = MMC_POWER_UNDEFINED;
2803
2804	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2805		mmc_claim_host(host);
2806		mmc_power_up(host, host->ocr_avail);
2807		mmc_release_host(host);
2808	}
2809
2810	mmc_gpiod_request_cd_irq(host);
2811	_mmc_detect_change(host, 0, false);
2812}
2813
2814void mmc_stop_host(struct mmc_host *host)
2815{
2816#ifdef CONFIG_MMC_DEBUG
2817	unsigned long flags;
2818	spin_lock_irqsave(&host->lock, flags);
2819	host->removed = 1;
2820	spin_unlock_irqrestore(&host->lock, flags);
2821#endif
2822	if (host->slot.cd_irq >= 0)
2823		disable_irq(host->slot.cd_irq);
2824
2825	host->rescan_disable = 1;
 
2826	cancel_delayed_work_sync(&host->detect);
 
2827
2828	/* clear pm flags now and let card drivers set them as needed */
2829	host->pm_flags = 0;
2830
2831	mmc_bus_get(host);
2832	if (host->bus_ops && !host->bus_dead) {
2833		/* Calling bus_ops->remove() with a claimed host can deadlock */
2834		host->bus_ops->remove(host);
 
2835		mmc_claim_host(host);
2836		mmc_detach_bus(host);
2837		mmc_power_off(host);
2838		mmc_release_host(host);
2839		mmc_bus_put(host);
2840		return;
2841	}
2842	mmc_bus_put(host);
2843
2844	mmc_claim_host(host);
 
2845	mmc_power_off(host);
2846	mmc_release_host(host);
2847}
2848
2849int mmc_power_save_host(struct mmc_host *host)
2850{
2851	int ret = 0;
2852
2853#ifdef CONFIG_MMC_DEBUG
2854	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2855#endif
2856
2857	mmc_bus_get(host);
2858
2859	if (!host->bus_ops || host->bus_dead) {
2860		mmc_bus_put(host);
2861		return -EINVAL;
2862	}
2863
2864	if (host->bus_ops->power_save)
2865		ret = host->bus_ops->power_save(host);
2866
2867	mmc_bus_put(host);
2868
2869	mmc_power_off(host);
2870
2871	return ret;
2872}
2873EXPORT_SYMBOL(mmc_power_save_host);
2874
2875int mmc_power_restore_host(struct mmc_host *host)
2876{
2877	int ret;
2878
2879#ifdef CONFIG_MMC_DEBUG
2880	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2881#endif
2882
2883	mmc_bus_get(host);
2884
2885	if (!host->bus_ops || host->bus_dead) {
2886		mmc_bus_put(host);
2887		return -EINVAL;
2888	}
2889
2890	mmc_power_up(host, host->card->ocr);
2891	ret = host->bus_ops->power_restore(host);
2892
2893	mmc_bus_put(host);
2894
2895	return ret;
2896}
2897EXPORT_SYMBOL(mmc_power_restore_host);
2898
2899/*
2900 * Flush the cache to the non-volatile storage.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2901 */
2902int mmc_flush_cache(struct mmc_card *card)
2903{
2904	int err = 0;
2905
2906	if (mmc_card_mmc(card) &&
2907			(card->ext_csd.cache_size > 0) &&
2908			(card->ext_csd.cache_ctrl & 1)) {
2909		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2910				EXT_CSD_FLUSH_CACHE, 1, 0);
2911		if (err)
2912			pr_err("%s: cache flush error %d\n",
2913					mmc_hostname(card->host), err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2914	}
 
 
2915
2916	return err;
2917}
2918EXPORT_SYMBOL(mmc_flush_cache);
2919
2920#ifdef CONFIG_PM_SLEEP
2921/* Do the card removal on suspend if card is assumed removeable
2922 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2923   to sync the card.
2924*/
2925static int mmc_pm_notify(struct notifier_block *notify_block,
2926			unsigned long mode, void *unused)
2927{
2928	struct mmc_host *host = container_of(
2929		notify_block, struct mmc_host, pm_notify);
2930	unsigned long flags;
2931	int err = 0;
2932
2933	switch (mode) {
2934	case PM_HIBERNATION_PREPARE:
2935	case PM_SUSPEND_PREPARE:
2936	case PM_RESTORE_PREPARE:
2937		spin_lock_irqsave(&host->lock, flags);
2938		host->rescan_disable = 1;
2939		spin_unlock_irqrestore(&host->lock, flags);
2940		cancel_delayed_work_sync(&host->detect);
2941
2942		if (!host->bus_ops)
2943			break;
2944
2945		/* Validate prerequisites for suspend */
2946		if (host->bus_ops->pre_suspend)
2947			err = host->bus_ops->pre_suspend(host);
2948		if (!err)
2949			break;
2950
2951		/* Calling bus_ops->remove() with a claimed host can deadlock */
2952		host->bus_ops->remove(host);
2953		mmc_claim_host(host);
2954		mmc_detach_bus(host);
2955		mmc_power_off(host);
2956		mmc_release_host(host);
2957		host->pm_flags = 0;
2958		break;
2959
2960	case PM_POST_SUSPEND:
2961	case PM_POST_HIBERNATION:
2962	case PM_POST_RESTORE:
2963
2964		spin_lock_irqsave(&host->lock, flags);
2965		host->rescan_disable = 0;
2966		spin_unlock_irqrestore(&host->lock, flags);
2967		_mmc_detect_change(host, 0, false);
2968
2969	}
2970
2971	return 0;
2972}
2973
2974void mmc_register_pm_notifier(struct mmc_host *host)
2975{
2976	host->pm_notify.notifier_call = mmc_pm_notify;
2977	register_pm_notifier(&host->pm_notify);
2978}
2979
2980void mmc_unregister_pm_notifier(struct mmc_host *host)
2981{
2982	unregister_pm_notifier(&host->pm_notify);
2983}
2984#endif
2985
2986/**
2987 * mmc_init_context_info() - init synchronization context
2988 * @host: mmc host
2989 *
2990 * Init struct context_info needed to implement asynchronous
2991 * request mechanism, used by mmc core, host driver and mmc requests
2992 * supplier.
2993 */
2994void mmc_init_context_info(struct mmc_host *host)
2995{
2996	host->context_info.is_new_req = false;
2997	host->context_info.is_done_rcv = false;
2998	host->context_info.is_waiting_last_req = false;
2999	init_waitqueue_head(&host->context_info.wait);
3000}
3001
3002static int __init mmc_init(void)
3003{
3004	int ret;
3005
 
 
 
 
3006	ret = mmc_register_bus();
3007	if (ret)
3008		return ret;
3009
3010	ret = mmc_register_host_class();
3011	if (ret)
3012		goto unregister_bus;
3013
3014	ret = sdio_register_bus();
3015	if (ret)
3016		goto unregister_host_class;
3017
3018	return 0;
3019
3020unregister_host_class:
3021	mmc_unregister_host_class();
3022unregister_bus:
3023	mmc_unregister_bus();
 
 
 
3024	return ret;
3025}
3026
3027static void __exit mmc_exit(void)
3028{
3029	sdio_unregister_bus();
3030	mmc_unregister_host_class();
3031	mmc_unregister_bus();
 
3032}
3033
3034subsys_initcall(mmc_init);
3035module_exit(mmc_exit);
3036
3037MODULE_LICENSE("GPL");