Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  linux/drivers/mmc/core/core.c
   3 *
   4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   5 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   6 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   7 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/interrupt.h>
  16#include <linux/completion.h>
  17#include <linux/device.h>
  18#include <linux/delay.h>
  19#include <linux/pagemap.h>
  20#include <linux/err.h>
  21#include <linux/leds.h>
  22#include <linux/scatterlist.h>
  23#include <linux/log2.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/suspend.h>
  28#include <linux/fault-inject.h>
  29#include <linux/random.h>
  30#include <linux/slab.h>
  31#include <linux/of.h>
  32
  33#include <linux/mmc/card.h>
  34#include <linux/mmc/host.h>
  35#include <linux/mmc/mmc.h>
  36#include <linux/mmc/sd.h>
  37#include <linux/mmc/slot-gpio.h>
  38
 
 
 
  39#include "core.h"
 
  40#include "bus.h"
  41#include "host.h"
  42#include "sdio_bus.h"
  43#include "pwrseq.h"
  44
  45#include "mmc_ops.h"
  46#include "sd_ops.h"
  47#include "sdio_ops.h"
  48
  49/* If the device is not responding */
  50#define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
  51
  52/*
  53 * Background operations can take a long time, depending on the housekeeping
  54 * operations the card has to perform.
  55 */
  56#define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */
  57
  58static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  59
  60/*
  61 * Enabling software CRCs on the data blocks can be a significant (30%)
  62 * performance cost, and for other reasons may not always be desired.
  63 * So we allow it it to be disabled.
  64 */
  65bool use_spi_crc = 1;
  66module_param(use_spi_crc, bool, 0);
  67
  68static int mmc_schedule_delayed_work(struct delayed_work *work,
  69				     unsigned long delay)
  70{
  71	/*
  72	 * We use the system_freezable_wq, because of two reasons.
  73	 * First, it allows several works (not the same work item) to be
  74	 * executed simultaneously. Second, the queue becomes frozen when
  75	 * userspace becomes frozen during system PM.
  76	 */
  77	return queue_delayed_work(system_freezable_wq, work, delay);
  78}
  79
  80#ifdef CONFIG_FAIL_MMC_REQUEST
  81
  82/*
  83 * Internal function. Inject random data errors.
  84 * If mmc_data is NULL no errors are injected.
  85 */
  86static void mmc_should_fail_request(struct mmc_host *host,
  87				    struct mmc_request *mrq)
  88{
  89	struct mmc_command *cmd = mrq->cmd;
  90	struct mmc_data *data = mrq->data;
  91	static const int data_errors[] = {
  92		-ETIMEDOUT,
  93		-EILSEQ,
  94		-EIO,
  95	};
  96
  97	if (!data)
  98		return;
  99
 100	if (cmd->error || data->error ||
 101	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
 102		return;
 103
 104	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 105	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 106}
 107
 108#else /* CONFIG_FAIL_MMC_REQUEST */
 109
 110static inline void mmc_should_fail_request(struct mmc_host *host,
 111					   struct mmc_request *mrq)
 112{
 113}
 114
 115#endif /* CONFIG_FAIL_MMC_REQUEST */
 116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117/**
 118 *	mmc_request_done - finish processing an MMC request
 119 *	@host: MMC host which completed request
 120 *	@mrq: MMC request which request
 121 *
 122 *	MMC drivers should call this function when they have completed
 123 *	their processing of a request.
 124 */
 125void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 126{
 127	struct mmc_command *cmd = mrq->cmd;
 128	int err = cmd->error;
 129
 130	/* Flag re-tuning needed on CRC errors */
 131	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 132	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
 
 133	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 134	    (mrq->data && mrq->data->error == -EILSEQ) ||
 135	    (mrq->stop && mrq->stop->error == -EILSEQ)))
 136		mmc_retune_needed(host);
 137
 138	if (err && cmd->retries && mmc_host_is_spi(host)) {
 139		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 140			cmd->retries = 0;
 141	}
 142
 143	if (err && cmd->retries && !mmc_card_removed(host->card)) {
 144		/*
 145		 * Request starter must handle retries - see
 146		 * mmc_wait_for_req_done().
 147		 */
 148		if (mrq->done)
 149			mrq->done(mrq);
 150	} else {
 
 
 
 
 
 
 
 
 
 151		mmc_should_fail_request(host, mrq);
 152
 153		led_trigger_event(host->led, LED_OFF);
 
 154
 155		if (mrq->sbc) {
 156			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 157				mmc_hostname(host), mrq->sbc->opcode,
 158				mrq->sbc->error,
 159				mrq->sbc->resp[0], mrq->sbc->resp[1],
 160				mrq->sbc->resp[2], mrq->sbc->resp[3]);
 161		}
 162
 163		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 164			mmc_hostname(host), cmd->opcode, err,
 165			cmd->resp[0], cmd->resp[1],
 166			cmd->resp[2], cmd->resp[3]);
 167
 168		if (mrq->data) {
 169			pr_debug("%s:     %d bytes transferred: %d\n",
 170				mmc_hostname(host),
 171				mrq->data->bytes_xfered, mrq->data->error);
 172		}
 173
 174		if (mrq->stop) {
 175			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 176				mmc_hostname(host), mrq->stop->opcode,
 177				mrq->stop->error,
 178				mrq->stop->resp[0], mrq->stop->resp[1],
 179				mrq->stop->resp[2], mrq->stop->resp[3]);
 180		}
 181
 182		if (mrq->done)
 183			mrq->done(mrq);
 184	}
 
 
 
 
 
 
 185}
 186
 187EXPORT_SYMBOL(mmc_request_done);
 188
 189static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 190{
 191	int err;
 192
 193	/* Assumes host controller has been runtime resumed by mmc_claim_host */
 194	err = mmc_retune(host);
 195	if (err) {
 196		mrq->cmd->error = err;
 197		mmc_request_done(host, mrq);
 198		return;
 199	}
 200
 201	/*
 202	 * For sdio rw commands we must wait for card busy otherwise some
 203	 * sdio devices won't work properly.
 
 204	 */
 205	if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
 
 206		int tries = 500; /* Wait aprox 500ms at maximum */
 207
 208		while (host->ops->card_busy(host) && --tries)
 209			mmc_delay(1);
 210
 211		if (tries == 0) {
 212			mrq->cmd->error = -EBUSY;
 213			mmc_request_done(host, mrq);
 214			return;
 215		}
 216	}
 217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218	host->ops->request(host, mrq);
 219}
 220
 221static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 
 222{
 223#ifdef CONFIG_MMC_DEBUG
 224	unsigned int i, sz;
 225	struct scatterlist *sg;
 226#endif
 227	mmc_retune_hold(host);
 228
 229	if (mmc_card_removed(host->card))
 230		return -ENOMEDIUM;
 231
 232	if (mrq->sbc) {
 233		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 234			 mmc_hostname(host), mrq->sbc->opcode,
 235			 mrq->sbc->arg, mrq->sbc->flags);
 236	}
 237
 238	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
 239		 mmc_hostname(host), mrq->cmd->opcode,
 240		 mrq->cmd->arg, mrq->cmd->flags);
 
 
 
 
 
 241
 242	if (mrq->data) {
 243		pr_debug("%s:     blksz %d blocks %d flags %08x "
 244			"tsac %d ms nsac %d\n",
 245			mmc_hostname(host), mrq->data->blksz,
 246			mrq->data->blocks, mrq->data->flags,
 247			mrq->data->timeout_ns / 1000000,
 248			mrq->data->timeout_clks);
 249	}
 250
 251	if (mrq->stop) {
 252		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 253			 mmc_hostname(host), mrq->stop->opcode,
 254			 mrq->stop->arg, mrq->stop->flags);
 255	}
 
 256
 257	WARN_ON(!host->claimed);
 
 
 
 258
 259	mrq->cmd->error = 0;
 260	mrq->cmd->mrq = mrq;
 
 
 
 261	if (mrq->sbc) {
 262		mrq->sbc->error = 0;
 263		mrq->sbc->mrq = mrq;
 264	}
 265	if (mrq->data) {
 266		BUG_ON(mrq->data->blksz > host->max_blk_size);
 267		BUG_ON(mrq->data->blocks > host->max_blk_count);
 268		BUG_ON(mrq->data->blocks * mrq->data->blksz >
 269			host->max_req_size);
 270
 271#ifdef CONFIG_MMC_DEBUG
 272		sz = 0;
 273		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 274			sz += sg->length;
 275		BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
 276#endif
 277
 278		mrq->cmd->data = mrq->data;
 279		mrq->data->error = 0;
 280		mrq->data->mrq = mrq;
 281		if (mrq->stop) {
 282			mrq->data->stop = mrq->stop;
 283			mrq->stop->error = 0;
 284			mrq->stop->mrq = mrq;
 285		}
 286	}
 287	led_trigger_event(host->led, LED_FULL);
 288	__mmc_start_request(host, mrq);
 289
 290	return 0;
 291}
 292
 293/**
 294 *	mmc_start_bkops - start BKOPS for supported cards
 295 *	@card: MMC card to start BKOPS
 296 *	@form_exception: A flag to indicate if this function was
 297 *			 called due to an exception raised by the card
 298 *
 299 *	Start background operations whenever requested.
 300 *	When the urgent BKOPS bit is set in a R1 command response
 301 *	then background operations should be started immediately.
 302*/
 303void mmc_start_bkops(struct mmc_card *card, bool from_exception)
 304{
 305	int err;
 306	int timeout;
 307	bool use_busy_signal;
 308
 309	BUG_ON(!card);
 310
 311	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
 312		return;
 313
 314	err = mmc_read_bkops_status(card);
 315	if (err) {
 316		pr_err("%s: Failed to read bkops status: %d\n",
 317		       mmc_hostname(card->host), err);
 318		return;
 319	}
 320
 321	if (!card->ext_csd.raw_bkops_status)
 322		return;
 323
 324	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
 325	    from_exception)
 326		return;
 327
 328	mmc_claim_host(card->host);
 329	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
 330		timeout = MMC_BKOPS_MAX_TIMEOUT;
 331		use_busy_signal = true;
 332	} else {
 333		timeout = 0;
 334		use_busy_signal = false;
 335	}
 336
 337	mmc_retune_hold(card->host);
 338
 339	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 340			EXT_CSD_BKOPS_START, 1, timeout,
 341			use_busy_signal, true, false);
 342	if (err) {
 343		pr_warn("%s: Error %d starting bkops\n",
 344			mmc_hostname(card->host), err);
 345		mmc_retune_release(card->host);
 346		goto out;
 347	}
 348
 349	/*
 350	 * For urgent bkops status (LEVEL_2 and more)
 351	 * bkops executed synchronously, otherwise
 352	 * the operation is in progress
 353	 */
 354	if (!use_busy_signal)
 355		mmc_card_set_doing_bkops(card);
 356	else
 357		mmc_retune_release(card->host);
 358out:
 359	mmc_release_host(card->host);
 360}
 361EXPORT_SYMBOL(mmc_start_bkops);
 362
 363/*
 364 * mmc_wait_data_done() - done callback for data request
 365 * @mrq: done data request
 366 *
 367 * Wakes up mmc context, passed as a callback to host controller driver
 368 */
 369static void mmc_wait_data_done(struct mmc_request *mrq)
 370{
 371	struct mmc_context_info *context_info = &mrq->host->context_info;
 372
 373	context_info->is_done_rcv = true;
 374	wake_up_interruptible(&context_info->wait);
 375}
 
 376
 377static void mmc_wait_done(struct mmc_request *mrq)
 378{
 379	complete(&mrq->completion);
 380}
 381
 382/*
 383 *__mmc_start_data_req() - starts data request
 384 * @host: MMC host to start the request
 385 * @mrq: data request to start
 386 *
 387 * Sets the done callback to be called when request is completed by the card.
 388 * Starts data mmc request execution
 389 */
 390static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
 391{
 392	int err;
 393
 394	mrq->done = mmc_wait_data_done;
 395	mrq->host = host;
 396
 397	err = mmc_start_request(host, mrq);
 398	if (err) {
 399		mrq->cmd->error = err;
 400		mmc_wait_data_done(mrq);
 401	}
 402
 403	return err;
 
 
 
 
 
 404}
 405
 406static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 407{
 408	int err;
 409
 
 
 410	init_completion(&mrq->completion);
 411	mrq->done = mmc_wait_done;
 412
 413	err = mmc_start_request(host, mrq);
 414	if (err) {
 415		mrq->cmd->error = err;
 
 416		complete(&mrq->completion);
 417	}
 418
 419	return err;
 420}
 421
 422/*
 423 * mmc_wait_for_data_req_done() - wait for request completed
 424 * @host: MMC host to prepare the command.
 425 * @mrq: MMC request to wait for
 426 *
 427 * Blocks MMC context till host controller will ack end of data request
 428 * execution or new request notification arrives from the block layer.
 429 * Handles command retries.
 430 *
 431 * Returns enum mmc_blk_status after checking errors.
 432 */
 433static int mmc_wait_for_data_req_done(struct mmc_host *host,
 434				      struct mmc_request *mrq,
 435				      struct mmc_async_req *next_req)
 436{
 437	struct mmc_command *cmd;
 438	struct mmc_context_info *context_info = &host->context_info;
 439	int err;
 440	unsigned long flags;
 441
 442	while (1) {
 443		wait_event_interruptible(context_info->wait,
 444				(context_info->is_done_rcv ||
 445				 context_info->is_new_req));
 446		spin_lock_irqsave(&context_info->lock, flags);
 447		context_info->is_waiting_last_req = false;
 448		spin_unlock_irqrestore(&context_info->lock, flags);
 449		if (context_info->is_done_rcv) {
 450			context_info->is_done_rcv = false;
 451			context_info->is_new_req = false;
 452			cmd = mrq->cmd;
 453
 454			if (!cmd->error || !cmd->retries ||
 455			    mmc_card_removed(host->card)) {
 456				err = host->areq->err_check(host->card,
 457							    host->areq);
 458				break; /* return err */
 459			} else {
 460				mmc_retune_recheck(host);
 461				pr_info("%s: req failed (CMD%u): %d, retrying...\n",
 462					mmc_hostname(host),
 463					cmd->opcode, cmd->error);
 464				cmd->retries--;
 465				cmd->error = 0;
 466				__mmc_start_request(host, mrq);
 467				continue; /* wait for done/new event again */
 468			}
 469		} else if (context_info->is_new_req) {
 470			context_info->is_new_req = false;
 471			if (!next_req)
 472				return MMC_BLK_NEW_REQUEST;
 473		}
 474	}
 475	mmc_retune_release(host);
 476	return err;
 477}
 478
 479static void mmc_wait_for_req_done(struct mmc_host *host,
 480				  struct mmc_request *mrq)
 481{
 482	struct mmc_command *cmd;
 483
 484	while (1) {
 485		wait_for_completion(&mrq->completion);
 486
 487		cmd = mrq->cmd;
 488
 489		/*
 490		 * If host has timed out waiting for the sanitize
 491		 * to complete, card might be still in programming state
 492		 * so let's try to bring the card out of programming
 493		 * state.
 494		 */
 495		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 496			if (!mmc_interrupt_hpi(host->card)) {
 497				pr_warn("%s: %s: Interrupted sanitize\n",
 498					mmc_hostname(host), __func__);
 499				cmd->error = 0;
 500				break;
 501			} else {
 502				pr_err("%s: %s: Failed to interrupt sanitize\n",
 503				       mmc_hostname(host), __func__);
 504			}
 505		}
 506		if (!cmd->error || !cmd->retries ||
 507		    mmc_card_removed(host->card))
 508			break;
 509
 510		mmc_retune_recheck(host);
 511
 512		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 513			 mmc_hostname(host), cmd->opcode, cmd->error);
 514		cmd->retries--;
 515		cmd->error = 0;
 516		__mmc_start_request(host, mrq);
 517	}
 518
 519	mmc_retune_release(host);
 520}
 
 521
 522/**
 523 *	mmc_pre_req - Prepare for a new request
 524 *	@host: MMC host to prepare command
 525 *	@mrq: MMC request to prepare for
 526 *	@is_first_req: true if there is no previous started request
 527 *                     that may run in parellel to this call, otherwise false
 528 *
 529 *	mmc_pre_req() is called in prior to mmc_start_req() to let
 530 *	host prepare for the new request. Preparation of a request may be
 531 *	performed while another request is running on the host.
 532 */
 533static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 534		 bool is_first_req)
 535{
 536	if (host->ops->pre_req)
 537		host->ops->pre_req(host, mrq, is_first_req);
 538}
 539
 540/**
 541 *	mmc_post_req - Post process a completed request
 542 *	@host: MMC host to post process command
 543 *	@mrq: MMC request to post process for
 544 *	@err: Error, if non zero, clean up any resources made in pre_req
 545 *
 546 *	Let the host post process a completed request. Post processing of
 547 *	a request may be performed while another reuqest is running.
 548 */
 549static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 550			 int err)
 551{
 552	if (host->ops->post_req)
 553		host->ops->post_req(host, mrq, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554}
 
 555
 556/**
 557 *	mmc_start_req - start a non-blocking request
 558 *	@host: MMC host to start command
 559 *	@areq: async request to start
 560 *	@error: out parameter returns 0 for success, otherwise non zero
 561 *
 562 *	Start a new MMC custom command request for a host.
 563 *	If there is on ongoing async request wait for completion
 564 *	of that request and start the new one and return.
 565 *	Does not wait for the new request to complete.
 566 *
 567 *      Returns the completed request, NULL in case of none completed.
 568 *	Wait for the an ongoing request (previoulsy started) to complete and
 569 *	return the completed request. If there is no ongoing request, NULL
 570 *	is returned without waiting. NULL is not an error condition.
 571 */
 572struct mmc_async_req *mmc_start_req(struct mmc_host *host,
 573				    struct mmc_async_req *areq, int *error)
 574{
 575	int err = 0;
 576	int start_err = 0;
 577	struct mmc_async_req *data = host->areq;
 578
 579	/* Prepare a new request */
 580	if (areq)
 581		mmc_pre_req(host, areq->mrq, !host->areq);
 582
 583	if (host->areq) {
 584		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq);
 585		if (err == MMC_BLK_NEW_REQUEST) {
 586			if (error)
 587				*error = err;
 588			/*
 589			 * The previous request was not completed,
 590			 * nothing to return
 591			 */
 592			return NULL;
 593		}
 594		/*
 595		 * Check BKOPS urgency for each R1 response
 596		 */
 597		if (host->card && mmc_card_mmc(host->card) &&
 598		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
 599		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
 600		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
 601
 602			/* Cancel the prepared request */
 603			if (areq)
 604				mmc_post_req(host, areq->mrq, -EINVAL);
 605
 606			mmc_start_bkops(host->card, true);
 607
 608			/* prepare the request again */
 609			if (areq)
 610				mmc_pre_req(host, areq->mrq, !host->areq);
 611		}
 612	}
 613
 614	if (!err && areq)
 615		start_err = __mmc_start_data_req(host, areq->mrq);
 
 
 616
 617	if (host->areq)
 618		mmc_post_req(host, host->areq->mrq, 0);
 619
 620	 /* Cancel a prepared request if it was not started. */
 621	if ((err || start_err) && areq)
 622		mmc_post_req(host, areq->mrq, -EINVAL);
 
 
 
 
 623
 624	if (err)
 625		host->areq = NULL;
 626	else
 627		host->areq = areq;
 
 628
 629	if (error)
 630		*error = err;
 631	return data;
 632}
 633EXPORT_SYMBOL(mmc_start_req);
 634
 635/**
 636 *	mmc_wait_for_req - start a request and wait for completion
 637 *	@host: MMC host to start command
 638 *	@mrq: MMC request to start
 639 *
 640 *	Start a new MMC custom command request for a host, and wait
 641 *	for the command to complete. Does not attempt to parse the
 642 *	response.
 643 */
 644void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 645{
 646	__mmc_start_req(host, mrq);
 647	mmc_wait_for_req_done(host, mrq);
 648}
 649EXPORT_SYMBOL(mmc_wait_for_req);
 650
 651/**
 652 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 653 *	@card: the MMC card associated with the HPI transfer
 
 
 
 654 *
 655 *	Issued High Priority Interrupt, and check for card status
 656 *	until out-of prg-state.
 
 
 657 */
 658int mmc_interrupt_hpi(struct mmc_card *card)
 659{
 
 660	int err;
 661	u32 status;
 662	unsigned long prg_wait;
 663
 664	BUG_ON(!card);
 665
 666	if (!card->ext_csd.hpi_en) {
 667		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 668		return 1;
 669	}
 
 670
 671	mmc_claim_host(card->host);
 672	err = mmc_send_status(card, &status);
 673	if (err) {
 674		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 675		goto out;
 676	}
 677
 678	switch (R1_CURRENT_STATE(status)) {
 679	case R1_STATE_IDLE:
 680	case R1_STATE_READY:
 681	case R1_STATE_STBY:
 682	case R1_STATE_TRAN:
 683		/*
 684		 * In idle and transfer states, HPI is not needed and the caller
 685		 * can issue the next intended command immediately
 686		 */
 687		goto out;
 688	case R1_STATE_PRG:
 689		break;
 690	default:
 691		/* In all other states, it's illegal to issue HPI */
 692		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 693			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 694		err = -EINVAL;
 695		goto out;
 696	}
 697
 698	err = mmc_send_hpi_cmd(card, &status);
 699	if (err)
 700		goto out;
 
 
 
 
 701
 702	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 703	do {
 704		err = mmc_send_status(card, &status);
 705
 706		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 707			break;
 708		if (time_after(jiffies, prg_wait))
 709			err = -ETIMEDOUT;
 710	} while (!err);
 711
 712out:
 713	mmc_release_host(card->host);
 714	return err;
 715}
 716EXPORT_SYMBOL(mmc_interrupt_hpi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717
 718/**
 719 *	mmc_wait_for_cmd - start a command and wait for completion
 720 *	@host: MMC host to start command
 721 *	@cmd: MMC command to start
 722 *	@retries: maximum number of retries
 723 *
 724 *	Start a new MMC command for a host, and wait for the command
 725 *	to complete.  Return any error that occurred while the command
 726 *	was executing.  Do not attempt to parse the response.
 727 */
 728int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 729{
 730	struct mmc_request mrq = {NULL};
 731
 732	WARN_ON(!host->claimed);
 733
 734	memset(cmd->resp, 0, sizeof(cmd->resp));
 735	cmd->retries = retries;
 736
 737	mrq.cmd = cmd;
 738	cmd->data = NULL;
 739
 740	mmc_wait_for_req(host, &mrq);
 741
 742	return cmd->error;
 743}
 744
 745EXPORT_SYMBOL(mmc_wait_for_cmd);
 746
 747/**
 748 *	mmc_stop_bkops - stop ongoing BKOPS
 749 *	@card: MMC card to check BKOPS
 750 *
 751 *	Send HPI command to stop ongoing background operations to
 752 *	allow rapid servicing of foreground operations, e.g. read/
 753 *	writes. Wait until the card comes out of the programming state
 754 *	to avoid errors in servicing read/write requests.
 755 */
 756int mmc_stop_bkops(struct mmc_card *card)
 757{
 758	int err = 0;
 759
 760	BUG_ON(!card);
 761	err = mmc_interrupt_hpi(card);
 762
 763	/*
 764	 * If err is EINVAL, we can't issue an HPI.
 765	 * It should complete the BKOPS.
 766	 */
 767	if (!err || (err == -EINVAL)) {
 768		mmc_card_clr_doing_bkops(card);
 769		mmc_retune_release(card->host);
 770		err = 0;
 771	}
 772
 773	return err;
 774}
 775EXPORT_SYMBOL(mmc_stop_bkops);
 776
 777int mmc_read_bkops_status(struct mmc_card *card)
 778{
 779	int err;
 780	u8 *ext_csd;
 781
 782	mmc_claim_host(card->host);
 783	err = mmc_get_ext_csd(card, &ext_csd);
 784	mmc_release_host(card->host);
 785	if (err)
 786		return err;
 787
 788	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 789	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 790	kfree(ext_csd);
 791	return 0;
 792}
 793EXPORT_SYMBOL(mmc_read_bkops_status);
 794
 795/**
 796 *	mmc_set_data_timeout - set the timeout for a data command
 797 *	@data: data phase for command
 798 *	@card: the MMC card associated with the data transfer
 799 *
 800 *	Computes the data timeout parameters according to the
 801 *	correct algorithm given the card type.
 802 */
 803void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 804{
 805	unsigned int mult;
 806
 807	/*
 808	 * SDIO cards only define an upper 1 s limit on access.
 809	 */
 810	if (mmc_card_sdio(card)) {
 811		data->timeout_ns = 1000000000;
 812		data->timeout_clks = 0;
 813		return;
 814	}
 815
 816	/*
 817	 * SD cards use a 100 multiplier rather than 10
 818	 */
 819	mult = mmc_card_sd(card) ? 100 : 10;
 820
 821	/*
 822	 * Scale up the multiplier (and therefore the timeout) by
 823	 * the r2w factor for writes.
 824	 */
 825	if (data->flags & MMC_DATA_WRITE)
 826		mult <<= card->csd.r2w_factor;
 827
 828	data->timeout_ns = card->csd.tacc_ns * mult;
 829	data->timeout_clks = card->csd.tacc_clks * mult;
 830
 831	/*
 832	 * SD cards also have an upper limit on the timeout.
 833	 */
 834	if (mmc_card_sd(card)) {
 835		unsigned int timeout_us, limit_us;
 836
 837		timeout_us = data->timeout_ns / 1000;
 838		if (card->host->ios.clock)
 839			timeout_us += data->timeout_clks * 1000 /
 840				(card->host->ios.clock / 1000);
 841
 842		if (data->flags & MMC_DATA_WRITE)
 843			/*
 844			 * The MMC spec "It is strongly recommended
 845			 * for hosts to implement more than 500ms
 846			 * timeout value even if the card indicates
 847			 * the 250ms maximum busy length."  Even the
 848			 * previous value of 300ms is known to be
 849			 * insufficient for some cards.
 850			 */
 851			limit_us = 3000000;
 852		else
 853			limit_us = 100000;
 854
 855		/*
 856		 * SDHC cards always use these fixed values.
 857		 */
 858		if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
 859			data->timeout_ns = limit_us * 1000;
 860			data->timeout_clks = 0;
 861		}
 862
 863		/* assign limit value if invalid */
 864		if (timeout_us == 0)
 865			data->timeout_ns = limit_us * 1000;
 866	}
 867
 868	/*
 869	 * Some cards require longer data read timeout than indicated in CSD.
 870	 * Address this by setting the read timeout to a "reasonably high"
 871	 * value. For the cards tested, 300ms has proven enough. If necessary,
 872	 * this value can be increased if other problematic cards require this.
 873	 */
 874	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 875		data->timeout_ns = 300000000;
 876		data->timeout_clks = 0;
 877	}
 878
 879	/*
 880	 * Some cards need very high timeouts if driven in SPI mode.
 881	 * The worst observed timeout was 900ms after writing a
 882	 * continuous stream of data until the internal logic
 883	 * overflowed.
 884	 */
 885	if (mmc_host_is_spi(card->host)) {
 886		if (data->flags & MMC_DATA_WRITE) {
 887			if (data->timeout_ns < 1000000000)
 888				data->timeout_ns = 1000000000;	/* 1s */
 889		} else {
 890			if (data->timeout_ns < 100000000)
 891				data->timeout_ns =  100000000;	/* 100ms */
 892		}
 893	}
 894}
 895EXPORT_SYMBOL(mmc_set_data_timeout);
 896
 897/**
 898 *	mmc_align_data_size - pads a transfer size to a more optimal value
 899 *	@card: the MMC card associated with the data transfer
 900 *	@sz: original transfer size
 901 *
 902 *	Pads the original data size with a number of extra bytes in
 903 *	order to avoid controller bugs and/or performance hits
 904 *	(e.g. some controllers revert to PIO for certain sizes).
 905 *
 906 *	Returns the improved size, which might be unmodified.
 907 *
 908 *	Note that this function is only relevant when issuing a
 909 *	single scatter gather entry.
 910 */
 911unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
 
 912{
 913	/*
 914	 * FIXME: We don't have a system for the controller to tell
 915	 * the core about its problems yet, so for now we just 32-bit
 916	 * align the size.
 917	 */
 918	sz = ((sz + 3) / 4) * 4;
 919
 920	return sz;
 
 
 
 
 
 
 
 
 
 
 
 921}
 922EXPORT_SYMBOL(mmc_align_data_size);
 923
 924/**
 925 *	__mmc_claim_host - exclusively claim a host
 926 *	@host: mmc host to claim
 
 
 927 *	@abort: whether or not the operation should be aborted
 928 *
 929 *	Claim a host for a set of operations.  If @abort is non null and
 930 *	dereference a non-zero value then this will return prematurely with
 931 *	that non-zero value without acquiring the lock.  Returns zero
 932 *	with the lock held otherwise.
 933 */
 934int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
 
 935{
 
 936	DECLARE_WAITQUEUE(wait, current);
 937	unsigned long flags;
 938	int stop;
 939	bool pm = false;
 940
 941	might_sleep();
 942
 943	add_wait_queue(&host->wq, &wait);
 944	spin_lock_irqsave(&host->lock, flags);
 945	while (1) {
 946		set_current_state(TASK_UNINTERRUPTIBLE);
 947		stop = abort ? atomic_read(abort) : 0;
 948		if (stop || !host->claimed || host->claimer == current)
 949			break;
 950		spin_unlock_irqrestore(&host->lock, flags);
 951		schedule();
 952		spin_lock_irqsave(&host->lock, flags);
 953	}
 954	set_current_state(TASK_RUNNING);
 955	if (!stop) {
 956		host->claimed = 1;
 957		host->claimer = current;
 958		host->claim_cnt += 1;
 959		if (host->claim_cnt == 1)
 960			pm = true;
 961	} else
 962		wake_up(&host->wq);
 963	spin_unlock_irqrestore(&host->lock, flags);
 964	remove_wait_queue(&host->wq, &wait);
 965
 966	if (pm)
 967		pm_runtime_get_sync(mmc_dev(host));
 968
 969	return stop;
 970}
 971EXPORT_SYMBOL(__mmc_claim_host);
 972
 973/**
 974 *	mmc_release_host - release a host
 975 *	@host: mmc host to release
 976 *
 977 *	Release a MMC host, allowing others to claim the host
 978 *	for their operations.
 979 */
 980void mmc_release_host(struct mmc_host *host)
 981{
 982	unsigned long flags;
 983
 984	WARN_ON(!host->claimed);
 985
 986	spin_lock_irqsave(&host->lock, flags);
 987	if (--host->claim_cnt) {
 988		/* Release for nested claim */
 989		spin_unlock_irqrestore(&host->lock, flags);
 990	} else {
 991		host->claimed = 0;
 
 992		host->claimer = NULL;
 993		spin_unlock_irqrestore(&host->lock, flags);
 994		wake_up(&host->wq);
 995		pm_runtime_mark_last_busy(mmc_dev(host));
 996		pm_runtime_put_autosuspend(mmc_dev(host));
 
 
 
 997	}
 998}
 999EXPORT_SYMBOL(mmc_release_host);
1000
1001/*
1002 * This is a helper function, which fetches a runtime pm reference for the
1003 * card device and also claims the host.
1004 */
1005void mmc_get_card(struct mmc_card *card)
1006{
1007	pm_runtime_get_sync(&card->dev);
1008	mmc_claim_host(card->host);
1009}
1010EXPORT_SYMBOL(mmc_get_card);
1011
1012/*
1013 * This is a helper function, which releases the host and drops the runtime
1014 * pm reference for the card device.
1015 */
1016void mmc_put_card(struct mmc_card *card)
1017{
1018	mmc_release_host(card->host);
 
 
 
 
1019	pm_runtime_mark_last_busy(&card->dev);
1020	pm_runtime_put_autosuspend(&card->dev);
1021}
1022EXPORT_SYMBOL(mmc_put_card);
1023
1024/*
1025 * Internal function that does the actual ios call to the host driver,
1026 * optionally printing some debug output.
1027 */
1028static inline void mmc_set_ios(struct mmc_host *host)
1029{
1030	struct mmc_ios *ios = &host->ios;
1031
1032	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1033		"width %u timing %u\n",
1034		 mmc_hostname(host), ios->clock, ios->bus_mode,
1035		 ios->power_mode, ios->chip_select, ios->vdd,
1036		 1 << ios->bus_width, ios->timing);
1037
1038	host->ops->set_ios(host, ios);
1039}
1040
1041/*
1042 * Control chip select pin on a host.
1043 */
1044void mmc_set_chip_select(struct mmc_host *host, int mode)
1045{
1046	host->ios.chip_select = mode;
1047	mmc_set_ios(host);
1048}
1049
1050/*
1051 * Sets the host clock to the highest possible frequency that
1052 * is below "hz".
1053 */
1054void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1055{
1056	WARN_ON(hz && hz < host->f_min);
1057
1058	if (hz > host->f_max)
1059		hz = host->f_max;
1060
1061	host->ios.clock = hz;
1062	mmc_set_ios(host);
1063}
1064
1065int mmc_execute_tuning(struct mmc_card *card)
1066{
1067	struct mmc_host *host = card->host;
1068	u32 opcode;
1069	int err;
1070
1071	if (!host->ops->execute_tuning)
1072		return 0;
1073
 
 
 
1074	if (mmc_card_mmc(card))
1075		opcode = MMC_SEND_TUNING_BLOCK_HS200;
1076	else
1077		opcode = MMC_SEND_TUNING_BLOCK;
1078
1079	err = host->ops->execute_tuning(host, opcode);
1080
1081	if (err)
1082		pr_err("%s: tuning execution failed: %d\n",
1083			mmc_hostname(host), err);
1084	else
1085		mmc_retune_enable(host);
1086
1087	return err;
1088}
1089
1090/*
1091 * Change the bus mode (open drain/push-pull) of a host.
1092 */
1093void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1094{
1095	host->ios.bus_mode = mode;
1096	mmc_set_ios(host);
1097}
1098
1099/*
1100 * Change data bus width of a host.
1101 */
1102void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1103{
1104	host->ios.bus_width = width;
1105	mmc_set_ios(host);
1106}
1107
1108/*
1109 * Set initial state after a power cycle or a hw_reset.
1110 */
1111void mmc_set_initial_state(struct mmc_host *host)
1112{
 
 
 
1113	mmc_retune_disable(host);
1114
1115	if (mmc_host_is_spi(host))
1116		host->ios.chip_select = MMC_CS_HIGH;
1117	else
1118		host->ios.chip_select = MMC_CS_DONTCARE;
1119	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1120	host->ios.bus_width = MMC_BUS_WIDTH_1;
1121	host->ios.timing = MMC_TIMING_LEGACY;
1122	host->ios.drv_type = 0;
 
 
 
 
 
 
 
 
 
1123
1124	mmc_set_ios(host);
1125}
1126
1127/**
1128 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1129 * @vdd:	voltage (mV)
1130 * @low_bits:	prefer low bits in boundary cases
1131 *
1132 * This function returns the OCR bit number according to the provided @vdd
1133 * value. If conversion is not possible a negative errno value returned.
1134 *
1135 * Depending on the @low_bits flag the function prefers low or high OCR bits
1136 * on boundary voltages. For example,
1137 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1138 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1139 *
1140 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1141 */
1142static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1143{
1144	const int max_bit = ilog2(MMC_VDD_35_36);
1145	int bit;
1146
1147	if (vdd < 1650 || vdd > 3600)
1148		return -EINVAL;
1149
1150	if (vdd >= 1650 && vdd <= 1950)
1151		return ilog2(MMC_VDD_165_195);
1152
1153	if (low_bits)
1154		vdd -= 1;
1155
1156	/* Base 2000 mV, step 100 mV, bit's base 8. */
1157	bit = (vdd - 2000) / 100 + 8;
1158	if (bit > max_bit)
1159		return max_bit;
1160	return bit;
1161}
1162
1163/**
1164 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1165 * @vdd_min:	minimum voltage value (mV)
1166 * @vdd_max:	maximum voltage value (mV)
1167 *
1168 * This function returns the OCR mask bits according to the provided @vdd_min
1169 * and @vdd_max values. If conversion is not possible the function returns 0.
1170 *
1171 * Notes wrt boundary cases:
1172 * This function sets the OCR bits for all boundary voltages, for example
1173 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1174 * MMC_VDD_34_35 mask.
1175 */
1176u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1177{
1178	u32 mask = 0;
1179
1180	if (vdd_max < vdd_min)
1181		return 0;
1182
1183	/* Prefer high bits for the boundary vdd_max values. */
1184	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1185	if (vdd_max < 0)
1186		return 0;
1187
1188	/* Prefer low bits for the boundary vdd_min values. */
1189	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1190	if (vdd_min < 0)
1191		return 0;
1192
1193	/* Fill the mask, from max bit to min bit. */
1194	while (vdd_max >= vdd_min)
1195		mask |= 1 << vdd_max--;
1196
1197	return mask;
1198}
1199EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1200
1201#ifdef CONFIG_OF
1202
1203/**
1204 * mmc_of_parse_voltage - return mask of supported voltages
1205 * @np: The device node need to be parsed.
1206 * @mask: mask of voltages available for MMC/SD/SDIO
1207 *
1208 * Parse the "voltage-ranges" DT property, returning zero if it is not
1209 * found, negative errno if the voltage-range specification is invalid,
1210 * or one if the voltage-range is specified and successfully parsed.
1211 */
1212int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1213{
1214	const u32 *voltage_ranges;
1215	int num_ranges, i;
1216
1217	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1218	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1219	if (!voltage_ranges) {
1220		pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1221		return 0;
1222	}
1223	if (!num_ranges) {
1224		pr_err("%s: voltage-ranges empty\n", np->full_name);
1225		return -EINVAL;
1226	}
1227
1228	for (i = 0; i < num_ranges; i++) {
1229		const int j = i * 2;
1230		u32 ocr_mask;
1231
1232		ocr_mask = mmc_vddrange_to_ocrmask(
1233				be32_to_cpu(voltage_ranges[j]),
1234				be32_to_cpu(voltage_ranges[j + 1]));
1235		if (!ocr_mask) {
1236			pr_err("%s: voltage-range #%d is invalid\n",
1237				np->full_name, i);
1238			return -EINVAL;
1239		}
1240		*mask |= ocr_mask;
1241	}
1242
1243	return 1;
1244}
1245EXPORT_SYMBOL(mmc_of_parse_voltage);
1246
1247#endif /* CONFIG_OF */
1248
1249static int mmc_of_get_func_num(struct device_node *node)
1250{
1251	u32 reg;
1252	int ret;
1253
1254	ret = of_property_read_u32(node, "reg", &reg);
1255	if (ret < 0)
1256		return ret;
1257
1258	return reg;
1259}
1260
1261struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1262		unsigned func_num)
1263{
1264	struct device_node *node;
1265
1266	if (!host->parent || !host->parent->of_node)
1267		return NULL;
1268
1269	for_each_child_of_node(host->parent->of_node, node) {
1270		if (mmc_of_get_func_num(node) == func_num)
1271			return node;
1272	}
1273
1274	return NULL;
1275}
1276
1277#ifdef CONFIG_REGULATOR
1278
1279/**
1280 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1281 * @vdd_bit:	OCR bit number
1282 * @min_uV:	minimum voltage value (mV)
1283 * @max_uV:	maximum voltage value (mV)
1284 *
1285 * This function returns the voltage range according to the provided OCR
1286 * bit number. If conversion is not possible a negative errno value returned.
1287 */
1288static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1289{
1290	int		tmp;
1291
1292	if (!vdd_bit)
1293		return -EINVAL;
1294
1295	/*
1296	 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1297	 * bits this regulator doesn't quite support ... don't
1298	 * be too picky, most cards and regulators are OK with
1299	 * a 0.1V range goof (it's a small error percentage).
1300	 */
1301	tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1302	if (tmp == 0) {
1303		*min_uV = 1650 * 1000;
1304		*max_uV = 1950 * 1000;
1305	} else {
1306		*min_uV = 1900 * 1000 + tmp * 100 * 1000;
1307		*max_uV = *min_uV + 100 * 1000;
1308	}
1309
1310	return 0;
1311}
1312
1313/**
1314 * mmc_regulator_get_ocrmask - return mask of supported voltages
1315 * @supply: regulator to use
1316 *
1317 * This returns either a negative errno, or a mask of voltages that
1318 * can be provided to MMC/SD/SDIO devices using the specified voltage
1319 * regulator.  This would normally be called before registering the
1320 * MMC host adapter.
1321 */
1322int mmc_regulator_get_ocrmask(struct regulator *supply)
1323{
1324	int			result = 0;
1325	int			count;
1326	int			i;
1327	int			vdd_uV;
1328	int			vdd_mV;
1329
1330	count = regulator_count_voltages(supply);
1331	if (count < 0)
1332		return count;
1333
1334	for (i = 0; i < count; i++) {
1335		vdd_uV = regulator_list_voltage(supply, i);
1336		if (vdd_uV <= 0)
1337			continue;
1338
1339		vdd_mV = vdd_uV / 1000;
1340		result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1341	}
1342
1343	if (!result) {
1344		vdd_uV = regulator_get_voltage(supply);
1345		if (vdd_uV <= 0)
1346			return vdd_uV;
1347
1348		vdd_mV = vdd_uV / 1000;
1349		result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1350	}
1351
1352	return result;
1353}
1354EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1355
1356/**
1357 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1358 * @mmc: the host to regulate
1359 * @supply: regulator to use
1360 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1361 *
1362 * Returns zero on success, else negative errno.
1363 *
1364 * MMC host drivers may use this to enable or disable a regulator using
1365 * a particular supply voltage.  This would normally be called from the
1366 * set_ios() method.
1367 */
1368int mmc_regulator_set_ocr(struct mmc_host *mmc,
1369			struct regulator *supply,
1370			unsigned short vdd_bit)
1371{
1372	int			result = 0;
1373	int			min_uV, max_uV;
1374
1375	if (vdd_bit) {
1376		mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1377
1378		result = regulator_set_voltage(supply, min_uV, max_uV);
1379		if (result == 0 && !mmc->regulator_enabled) {
1380			result = regulator_enable(supply);
1381			if (!result)
1382				mmc->regulator_enabled = true;
1383		}
1384	} else if (mmc->regulator_enabled) {
1385		result = regulator_disable(supply);
1386		if (result == 0)
1387			mmc->regulator_enabled = false;
1388	}
1389
1390	if (result)
1391		dev_err(mmc_dev(mmc),
1392			"could not set regulator OCR (%d)\n", result);
1393	return result;
1394}
1395EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1396
1397static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1398						  int min_uV, int target_uV,
1399						  int max_uV)
1400{
1401	/*
1402	 * Check if supported first to avoid errors since we may try several
1403	 * signal levels during power up and don't want to show errors.
1404	 */
1405	if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1406		return -EINVAL;
1407
1408	return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1409					     max_uV);
1410}
1411
1412/**
1413 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1414 *
1415 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1416 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1417 * by the same supply.  The Bus Operating conditions for 3.3V signaling in the
1418 * SD card spec also define VQMMC in terms of VMMC.
1419 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1420 *
1421 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1422 * requested voltage.  This is definitely a good idea for UHS where there's a
1423 * separate regulator on the card that's trying to make 1.8V and it's best if
1424 * we match.
1425 *
1426 * This function is expected to be used by a controller's
1427 * start_signal_voltage_switch() function.
1428 */
1429int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1430{
1431	struct device *dev = mmc_dev(mmc);
1432	int ret, volt, min_uV, max_uV;
1433
1434	/* If no vqmmc supply then we can't change the voltage */
1435	if (IS_ERR(mmc->supply.vqmmc))
1436		return -EINVAL;
1437
1438	switch (ios->signal_voltage) {
1439	case MMC_SIGNAL_VOLTAGE_120:
1440		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1441						1100000, 1200000, 1300000);
1442	case MMC_SIGNAL_VOLTAGE_180:
1443		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1444						1700000, 1800000, 1950000);
1445	case MMC_SIGNAL_VOLTAGE_330:
1446		ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1447		if (ret < 0)
1448			return ret;
1449
1450		dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1451			__func__, volt, max_uV);
1452
1453		min_uV = max(volt - 300000, 2700000);
1454		max_uV = min(max_uV + 200000, 3600000);
1455
1456		/*
1457		 * Due to a limitation in the current implementation of
1458		 * regulator_set_voltage_triplet() which is taking the lowest
1459		 * voltage possible if below the target, search for a suitable
1460		 * voltage in two steps and try to stay close to vmmc
1461		 * with a 0.3V tolerance at first.
1462		 */
1463		if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1464						min_uV, volt, max_uV))
1465			return 0;
1466
1467		return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1468						2700000, volt, 3600000);
1469	default:
1470		return -EINVAL;
1471	}
1472}
1473EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1474
1475#endif /* CONFIG_REGULATOR */
1476
1477int mmc_regulator_get_supply(struct mmc_host *mmc)
1478{
1479	struct device *dev = mmc_dev(mmc);
1480	int ret;
1481
1482	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1483	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1484
1485	if (IS_ERR(mmc->supply.vmmc)) {
1486		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1487			return -EPROBE_DEFER;
1488		dev_dbg(dev, "No vmmc regulator found\n");
1489	} else {
1490		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1491		if (ret > 0)
1492			mmc->ocr_avail = ret;
1493		else
1494			dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1495	}
1496
1497	if (IS_ERR(mmc->supply.vqmmc)) {
1498		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1499			return -EPROBE_DEFER;
1500		dev_dbg(dev, "No vqmmc regulator found\n");
1501	}
1502
1503	return 0;
1504}
1505EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1506
1507/*
1508 * Mask off any voltages we don't support and select
1509 * the lowest voltage
1510 */
1511u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1512{
1513	int bit;
1514
1515	/*
1516	 * Sanity check the voltages that the card claims to
1517	 * support.
1518	 */
1519	if (ocr & 0x7F) {
1520		dev_warn(mmc_dev(host),
1521		"card claims to support voltages below defined range\n");
1522		ocr &= ~0x7F;
1523	}
1524
1525	ocr &= host->ocr_avail;
1526	if (!ocr) {
1527		dev_warn(mmc_dev(host), "no support for card's volts\n");
1528		return 0;
1529	}
1530
1531	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1532		bit = ffs(ocr) - 1;
1533		ocr &= 3 << bit;
1534		mmc_power_cycle(host, ocr);
1535	} else {
1536		bit = fls(ocr) - 1;
1537		ocr &= 3 << bit;
1538		if (bit != host->ios.vdd)
1539			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1540	}
1541
1542	return ocr;
1543}
1544
1545int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1546{
1547	int err = 0;
1548	int old_signal_voltage = host->ios.signal_voltage;
1549
1550	host->ios.signal_voltage = signal_voltage;
1551	if (host->ops->start_signal_voltage_switch)
1552		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1553
1554	if (err)
1555		host->ios.signal_voltage = old_signal_voltage;
1556
1557	return err;
1558
1559}
1560
1561int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1562{
1563	struct mmc_command cmd = {0};
1564	int err = 0;
1565	u32 clock;
 
 
 
 
 
1566
1567	BUG_ON(!host);
 
 
1568
1569	/*
1570	 * Send CMD11 only if the request is to switch the card to
1571	 * 1.8V signalling.
1572	 */
1573	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1574		return __mmc_set_signal_voltage(host, signal_voltage);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575
1576	/*
1577	 * If we cannot switch voltages, return failure so the caller
1578	 * can continue without UHS mode
1579	 */
1580	if (!host->ops->start_signal_voltage_switch)
1581		return -EPERM;
1582	if (!host->ops->card_busy)
1583		pr_warn("%s: cannot verify signal voltage switch\n",
1584			mmc_hostname(host));
1585
1586	cmd.opcode = SD_SWITCH_VOLTAGE;
1587	cmd.arg = 0;
1588	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1589
1590	err = mmc_wait_for_cmd(host, &cmd, 0);
1591	if (err)
1592		return err;
1593
1594	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1595		return -EIO;
1596
1597	/*
1598	 * The card should drive cmd and dat[0:3] low immediately
1599	 * after the response of cmd11, but wait 1 ms to be sure
1600	 */
1601	mmc_delay(1);
1602	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1603		err = -EAGAIN;
1604		goto power_cycle;
1605	}
1606	/*
1607	 * During a signal voltage level switch, the clock must be gated
1608	 * for 5 ms according to the SD spec
1609	 */
1610	clock = host->ios.clock;
1611	host->ios.clock = 0;
1612	mmc_set_ios(host);
1613
1614	if (__mmc_set_signal_voltage(host, signal_voltage)) {
1615		/*
1616		 * Voltages may not have been switched, but we've already
1617		 * sent CMD11, so a power cycle is required anyway
1618		 */
1619		err = -EAGAIN;
1620		goto power_cycle;
1621	}
1622
1623	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1624	mmc_delay(10);
1625	host->ios.clock = clock;
1626	mmc_set_ios(host);
1627
1628	/* Wait for at least 1 ms according to spec */
1629	mmc_delay(1);
1630
1631	/*
1632	 * Failure to switch is indicated by the card holding
1633	 * dat[0:3] low
1634	 */
1635	if (host->ops->card_busy && host->ops->card_busy(host))
1636		err = -EAGAIN;
1637
1638power_cycle:
1639	if (err) {
1640		pr_debug("%s: Signal voltage switch failed, "
1641			"power cycling card\n", mmc_hostname(host));
1642		mmc_power_cycle(host, ocr);
1643	}
1644
1645	return err;
1646}
1647
1648/*
1649 * Select timing parameters for host.
1650 */
1651void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1652{
1653	host->ios.timing = timing;
1654	mmc_set_ios(host);
1655}
1656
1657/*
1658 * Select appropriate driver type for host.
1659 */
1660void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1661{
1662	host->ios.drv_type = drv_type;
1663	mmc_set_ios(host);
1664}
1665
1666int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1667			      int card_drv_type, int *drv_type)
1668{
1669	struct mmc_host *host = card->host;
1670	int host_drv_type = SD_DRIVER_TYPE_B;
1671
1672	*drv_type = 0;
1673
1674	if (!host->ops->select_drive_strength)
1675		return 0;
1676
1677	/* Use SD definition of driver strength for hosts */
1678	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1679		host_drv_type |= SD_DRIVER_TYPE_A;
1680
1681	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1682		host_drv_type |= SD_DRIVER_TYPE_C;
1683
1684	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1685		host_drv_type |= SD_DRIVER_TYPE_D;
1686
1687	/*
1688	 * The drive strength that the hardware can support
1689	 * depends on the board design.  Pass the appropriate
1690	 * information and let the hardware specific code
1691	 * return what is possible given the options
1692	 */
1693	return host->ops->select_drive_strength(card, max_dtr,
1694						host_drv_type,
1695						card_drv_type,
1696						drv_type);
1697}
1698
1699/*
1700 * Apply power to the MMC stack.  This is a two-stage process.
1701 * First, we enable power to the card without the clock running.
1702 * We then wait a bit for the power to stabilise.  Finally,
1703 * enable the bus drivers and clock to the card.
1704 *
1705 * We must _NOT_ enable the clock prior to power stablising.
1706 *
1707 * If a host does all the power sequencing itself, ignore the
1708 * initial MMC_POWER_UP stage.
1709 */
1710void mmc_power_up(struct mmc_host *host, u32 ocr)
1711{
1712	if (host->ios.power_mode == MMC_POWER_ON)
1713		return;
1714
1715	mmc_pwrseq_pre_power_on(host);
1716
1717	host->ios.vdd = fls(ocr) - 1;
1718	host->ios.power_mode = MMC_POWER_UP;
1719	/* Set initial state and call mmc_set_ios */
1720	mmc_set_initial_state(host);
1721
1722	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1723	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1724		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1725	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1726		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1727	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1728		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1729
1730	/*
1731	 * This delay should be sufficient to allow the power supply
1732	 * to reach the minimum voltage.
1733	 */
1734	mmc_delay(10);
1735
1736	mmc_pwrseq_post_power_on(host);
1737
1738	host->ios.clock = host->f_init;
1739
1740	host->ios.power_mode = MMC_POWER_ON;
1741	mmc_set_ios(host);
1742
1743	/*
1744	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1745	 * time required to reach a stable voltage.
1746	 */
1747	mmc_delay(10);
1748}
1749
1750void mmc_power_off(struct mmc_host *host)
1751{
1752	if (host->ios.power_mode == MMC_POWER_OFF)
1753		return;
1754
1755	mmc_pwrseq_power_off(host);
1756
1757	host->ios.clock = 0;
1758	host->ios.vdd = 0;
1759
1760	host->ios.power_mode = MMC_POWER_OFF;
1761	/* Set initial state and call mmc_set_ios */
1762	mmc_set_initial_state(host);
1763
1764	/*
1765	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1766	 * XO-1.5, require a short delay after poweroff before the card
1767	 * can be successfully turned on again.
1768	 */
1769	mmc_delay(1);
1770}
1771
1772void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1773{
1774	mmc_power_off(host);
1775	/* Wait at least 1 ms according to SD spec */
1776	mmc_delay(1);
1777	mmc_power_up(host, ocr);
1778}
1779
1780/*
1781 * Cleanup when the last reference to the bus operator is dropped.
1782 */
1783static void __mmc_release_bus(struct mmc_host *host)
1784{
1785	BUG_ON(!host);
1786	BUG_ON(host->bus_refs);
1787	BUG_ON(!host->bus_dead);
1788
1789	host->bus_ops = NULL;
1790}
1791
1792/*
1793 * Increase reference count of bus operator
1794 */
1795static inline void mmc_bus_get(struct mmc_host *host)
1796{
1797	unsigned long flags;
1798
1799	spin_lock_irqsave(&host->lock, flags);
1800	host->bus_refs++;
1801	spin_unlock_irqrestore(&host->lock, flags);
1802}
1803
1804/*
1805 * Decrease reference count of bus operator and free it if
1806 * it is the last reference.
1807 */
1808static inline void mmc_bus_put(struct mmc_host *host)
1809{
1810	unsigned long flags;
1811
1812	spin_lock_irqsave(&host->lock, flags);
1813	host->bus_refs--;
1814	if ((host->bus_refs == 0) && host->bus_ops)
1815		__mmc_release_bus(host);
1816	spin_unlock_irqrestore(&host->lock, flags);
1817}
1818
1819/*
1820 * Assign a mmc bus handler to a host. Only one bus handler may control a
1821 * host at any given time.
1822 */
1823void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1824{
1825	unsigned long flags;
1826
1827	BUG_ON(!host);
1828	BUG_ON(!ops);
1829
1830	WARN_ON(!host->claimed);
1831
1832	spin_lock_irqsave(&host->lock, flags);
1833
1834	BUG_ON(host->bus_ops);
1835	BUG_ON(host->bus_refs);
1836
1837	host->bus_ops = ops;
1838	host->bus_refs = 1;
1839	host->bus_dead = 0;
1840
1841	spin_unlock_irqrestore(&host->lock, flags);
1842}
1843
1844/*
1845 * Remove the current bus handler from a host.
1846 */
1847void mmc_detach_bus(struct mmc_host *host)
1848{
1849	unsigned long flags;
1850
1851	BUG_ON(!host);
1852
1853	WARN_ON(!host->claimed);
1854	WARN_ON(!host->bus_ops);
1855
1856	spin_lock_irqsave(&host->lock, flags);
1857
1858	host->bus_dead = 1;
1859
1860	spin_unlock_irqrestore(&host->lock, flags);
1861
1862	mmc_bus_put(host);
1863}
1864
1865static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1866				bool cd_irq)
1867{
1868#ifdef CONFIG_MMC_DEBUG
1869	unsigned long flags;
1870	spin_lock_irqsave(&host->lock, flags);
1871	WARN_ON(host->removed);
1872	spin_unlock_irqrestore(&host->lock, flags);
1873#endif
1874
1875	/*
1876	 * If the device is configured as wakeup, we prevent a new sleep for
1877	 * 5 s to give provision for user space to consume the event.
1878	 */
1879	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1880		device_can_wakeup(mmc_dev(host)))
1881		pm_wakeup_event(mmc_dev(host), 5000);
1882
1883	host->detect_change = 1;
1884	mmc_schedule_delayed_work(&host->detect, delay);
1885}
1886
1887/**
1888 *	mmc_detect_change - process change of state on a MMC socket
1889 *	@host: host which changed state.
1890 *	@delay: optional delay to wait before detection (jiffies)
1891 *
1892 *	MMC drivers should call this when they detect a card has been
1893 *	inserted or removed. The MMC layer will confirm that any
1894 *	present card is still functional, and initialize any newly
1895 *	inserted.
1896 */
1897void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1898{
1899	_mmc_detect_change(host, delay, true);
1900}
1901EXPORT_SYMBOL(mmc_detect_change);
1902
1903void mmc_init_erase(struct mmc_card *card)
1904{
1905	unsigned int sz;
1906
1907	if (is_power_of_2(card->erase_size))
1908		card->erase_shift = ffs(card->erase_size) - 1;
1909	else
1910		card->erase_shift = 0;
1911
1912	/*
1913	 * It is possible to erase an arbitrarily large area of an SD or MMC
1914	 * card.  That is not desirable because it can take a long time
1915	 * (minutes) potentially delaying more important I/O, and also the
1916	 * timeout calculations become increasingly hugely over-estimated.
1917	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1918	 * to that size and alignment.
1919	 *
1920	 * For SD cards that define Allocation Unit size, limit erases to one
1921	 * Allocation Unit at a time.  For MMC cards that define High Capacity
1922	 * Erase Size, whether it is switched on or not, limit to that size.
1923	 * Otherwise just have a stab at a good value.  For modern cards it
1924	 * will end up being 4MiB.  Note that if the value is too small, it
1925	 * can end up taking longer to erase.
1926	 */
1927	if (mmc_card_sd(card) && card->ssr.au) {
1928		card->pref_erase = card->ssr.au;
1929		card->erase_shift = ffs(card->ssr.au) - 1;
1930	} else if (card->ext_csd.hc_erase_size) {
1931		card->pref_erase = card->ext_csd.hc_erase_size;
1932	} else if (card->erase_size) {
1933		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1934		if (sz < 128)
1935			card->pref_erase = 512 * 1024 / 512;
1936		else if (sz < 512)
1937			card->pref_erase = 1024 * 1024 / 512;
1938		else if (sz < 1024)
1939			card->pref_erase = 2 * 1024 * 1024 / 512;
1940		else
1941			card->pref_erase = 4 * 1024 * 1024 / 512;
1942		if (card->pref_erase < card->erase_size)
1943			card->pref_erase = card->erase_size;
1944		else {
1945			sz = card->pref_erase % card->erase_size;
1946			if (sz)
1947				card->pref_erase += card->erase_size - sz;
1948		}
1949	} else
1950		card->pref_erase = 0;
1951}
1952
1953static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1954				          unsigned int arg, unsigned int qty)
1955{
1956	unsigned int erase_timeout;
1957
1958	if (arg == MMC_DISCARD_ARG ||
1959	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1960		erase_timeout = card->ext_csd.trim_timeout;
1961	} else if (card->ext_csd.erase_group_def & 1) {
1962		/* High Capacity Erase Group Size uses HC timeouts */
1963		if (arg == MMC_TRIM_ARG)
1964			erase_timeout = card->ext_csd.trim_timeout;
1965		else
1966			erase_timeout = card->ext_csd.hc_erase_timeout;
1967	} else {
1968		/* CSD Erase Group Size uses write timeout */
1969		unsigned int mult = (10 << card->csd.r2w_factor);
1970		unsigned int timeout_clks = card->csd.tacc_clks * mult;
1971		unsigned int timeout_us;
1972
1973		/* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1974		if (card->csd.tacc_ns < 1000000)
1975			timeout_us = (card->csd.tacc_ns * mult) / 1000;
1976		else
1977			timeout_us = (card->csd.tacc_ns / 1000) * mult;
1978
1979		/*
1980		 * ios.clock is only a target.  The real clock rate might be
1981		 * less but not that much less, so fudge it by multiplying by 2.
1982		 */
1983		timeout_clks <<= 1;
1984		timeout_us += (timeout_clks * 1000) /
1985			      (card->host->ios.clock / 1000);
1986
1987		erase_timeout = timeout_us / 1000;
1988
1989		/*
1990		 * Theoretically, the calculation could underflow so round up
1991		 * to 1ms in that case.
1992		 */
1993		if (!erase_timeout)
1994			erase_timeout = 1;
1995	}
1996
1997	/* Multiplier for secure operations */
1998	if (arg & MMC_SECURE_ARGS) {
1999		if (arg == MMC_SECURE_ERASE_ARG)
2000			erase_timeout *= card->ext_csd.sec_erase_mult;
2001		else
2002			erase_timeout *= card->ext_csd.sec_trim_mult;
2003	}
2004
2005	erase_timeout *= qty;
2006
2007	/*
2008	 * Ensure at least a 1 second timeout for SPI as per
2009	 * 'mmc_set_data_timeout()'
2010	 */
2011	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2012		erase_timeout = 1000;
2013
2014	return erase_timeout;
2015}
2016
2017static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2018					 unsigned int arg,
2019					 unsigned int qty)
2020{
2021	unsigned int erase_timeout;
2022
 
 
 
 
 
 
2023	if (card->ssr.erase_timeout) {
2024		/* Erase timeout specified in SD Status Register (SSR) */
2025		erase_timeout = card->ssr.erase_timeout * qty +
2026				card->ssr.erase_offset;
2027	} else {
2028		/*
2029		 * Erase timeout not specified in SD Status Register (SSR) so
2030		 * use 250ms per write block.
2031		 */
2032		erase_timeout = 250 * qty;
2033	}
2034
2035	/* Must not be less than 1 second */
2036	if (erase_timeout < 1000)
2037		erase_timeout = 1000;
2038
2039	return erase_timeout;
2040}
2041
2042static unsigned int mmc_erase_timeout(struct mmc_card *card,
2043				      unsigned int arg,
2044				      unsigned int qty)
2045{
2046	if (mmc_card_sd(card))
2047		return mmc_sd_erase_timeout(card, arg, qty);
2048	else
2049		return mmc_mmc_erase_timeout(card, arg, qty);
2050}
2051
2052static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2053			unsigned int to, unsigned int arg)
2054{
2055	struct mmc_command cmd = {0};
2056	unsigned int qty = 0;
 
2057	unsigned long timeout;
 
2058	int err;
2059
2060	mmc_retune_hold(card->host);
2061
2062	/*
2063	 * qty is used to calculate the erase timeout which depends on how many
2064	 * erase groups (or allocation units in SD terminology) are affected.
2065	 * We count erasing part of an erase group as one erase group.
2066	 * For SD, the allocation units are always a power of 2.  For MMC, the
2067	 * erase group size is almost certainly also power of 2, but it does not
2068	 * seem to insist on that in the JEDEC standard, so we fall back to
2069	 * division in that case.  SD may not specify an allocation unit size,
2070	 * in which case the timeout is based on the number of write blocks.
2071	 *
2072	 * Note that the timeout for secure trim 2 will only be correct if the
2073	 * number of erase groups specified is the same as the total of all
2074	 * preceding secure trim 1 commands.  Since the power may have been
2075	 * lost since the secure trim 1 commands occurred, it is generally
2076	 * impossible to calculate the secure trim 2 timeout correctly.
2077	 */
2078	if (card->erase_shift)
2079		qty += ((to >> card->erase_shift) -
2080			(from >> card->erase_shift)) + 1;
2081	else if (mmc_card_sd(card))
2082		qty += to - from + 1;
2083	else
2084		qty += ((to / card->erase_size) -
2085			(from / card->erase_size)) + 1;
2086
2087	if (!mmc_card_blockaddr(card)) {
2088		from <<= 9;
2089		to <<= 9;
2090	}
2091
2092	if (mmc_card_sd(card))
2093		cmd.opcode = SD_ERASE_WR_BLK_START;
2094	else
2095		cmd.opcode = MMC_ERASE_GROUP_START;
2096	cmd.arg = from;
2097	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2098	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2099	if (err) {
2100		pr_err("mmc_erase: group start error %d, "
2101		       "status %#x\n", err, cmd.resp[0]);
2102		err = -EIO;
2103		goto out;
2104	}
2105
2106	memset(&cmd, 0, sizeof(struct mmc_command));
2107	if (mmc_card_sd(card))
2108		cmd.opcode = SD_ERASE_WR_BLK_END;
2109	else
2110		cmd.opcode = MMC_ERASE_GROUP_END;
2111	cmd.arg = to;
2112	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2113	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2114	if (err) {
2115		pr_err("mmc_erase: group end error %d, status %#x\n",
2116		       err, cmd.resp[0]);
2117		err = -EIO;
2118		goto out;
2119	}
2120
2121	memset(&cmd, 0, sizeof(struct mmc_command));
2122	cmd.opcode = MMC_ERASE;
2123	cmd.arg = arg;
2124	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2125	cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126	err = mmc_wait_for_cmd(card->host, &cmd, 0);
2127	if (err) {
2128		pr_err("mmc_erase: erase error %d, status %#x\n",
2129		       err, cmd.resp[0]);
2130		err = -EIO;
2131		goto out;
2132	}
2133
2134	if (mmc_host_is_spi(card->host))
2135		goto out;
2136
2137	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
 
 
 
 
 
 
 
2138	do {
2139		memset(&cmd, 0, sizeof(struct mmc_command));
2140		cmd.opcode = MMC_SEND_STATUS;
2141		cmd.arg = card->rca << 16;
2142		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2143		/* Do not retry else we can't see errors */
2144		err = mmc_wait_for_cmd(card->host, &cmd, 0);
2145		if (err || (cmd.resp[0] & 0xFDF92000)) {
2146			pr_err("error %d requesting status %#x\n",
2147				err, cmd.resp[0]);
2148			err = -EIO;
2149			goto out;
2150		}
2151
2152		/* Timeout if the device never becomes ready for data and
2153		 * never leaves the program state.
2154		 */
2155		if (time_after(jiffies, timeout)) {
2156			pr_err("%s: Card stuck in programming state! %s\n",
2157				mmc_hostname(card->host), __func__);
2158			err =  -EIO;
2159			goto out;
2160		}
 
 
 
 
 
 
 
 
2161
2162	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2163		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2164out:
2165	mmc_retune_release(card->host);
2166	return err;
2167}
2168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2169/**
2170 * mmc_erase - erase sectors.
2171 * @card: card to erase
2172 * @from: first sector to erase
2173 * @nr: number of sectors to erase
2174 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2175 *
2176 * Caller must claim host before calling this function.
2177 */
2178int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2179	      unsigned int arg)
2180{
2181	unsigned int rem, to = from + nr;
2182	int err;
2183
2184	if (!(card->host->caps & MMC_CAP_ERASE) ||
2185	    !(card->csd.cmdclass & CCC_ERASE))
2186		return -EOPNOTSUPP;
2187
2188	if (!card->erase_size)
2189		return -EOPNOTSUPP;
2190
2191	if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2192		return -EOPNOTSUPP;
2193
2194	if ((arg & MMC_SECURE_ARGS) &&
2195	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2196		return -EOPNOTSUPP;
2197
2198	if ((arg & MMC_TRIM_ARGS) &&
2199	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2200		return -EOPNOTSUPP;
2201
2202	if (arg == MMC_SECURE_ERASE_ARG) {
2203		if (from % card->erase_size || nr % card->erase_size)
2204			return -EINVAL;
2205	}
2206
2207	if (arg == MMC_ERASE_ARG) {
2208		rem = from % card->erase_size;
2209		if (rem) {
2210			rem = card->erase_size - rem;
2211			from += rem;
2212			if (nr > rem)
2213				nr -= rem;
2214			else
2215				return 0;
2216		}
2217		rem = nr % card->erase_size;
2218		if (rem)
2219			nr -= rem;
2220	}
2221
2222	if (nr == 0)
2223		return 0;
2224
2225	to = from + nr;
2226
2227	if (to <= from)
2228		return -EINVAL;
2229
2230	/* 'from' and 'to' are inclusive */
2231	to -= 1;
2232
2233	/*
2234	 * Special case where only one erase-group fits in the timeout budget:
2235	 * If the region crosses an erase-group boundary on this particular
2236	 * case, we will be trimming more than one erase-group which, does not
2237	 * fit in the timeout budget of the controller, so we need to split it
2238	 * and call mmc_do_erase() twice if necessary. This special case is
2239	 * identified by the card->eg_boundary flag.
2240	 */
2241	rem = card->erase_size - (from % card->erase_size);
2242	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2243		err = mmc_do_erase(card, from, from + rem - 1, arg);
2244		from += rem;
2245		if ((err) || (to <= from))
2246			return err;
2247	}
2248
2249	return mmc_do_erase(card, from, to, arg);
2250}
2251EXPORT_SYMBOL(mmc_erase);
2252
2253int mmc_can_erase(struct mmc_card *card)
2254{
2255	if ((card->host->caps & MMC_CAP_ERASE) &&
2256	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2257		return 1;
2258	return 0;
2259}
2260EXPORT_SYMBOL(mmc_can_erase);
2261
2262int mmc_can_trim(struct mmc_card *card)
2263{
2264	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2265	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2266		return 1;
2267	return 0;
2268}
2269EXPORT_SYMBOL(mmc_can_trim);
2270
2271int mmc_can_discard(struct mmc_card *card)
2272{
2273	/*
2274	 * As there's no way to detect the discard support bit at v4.5
2275	 * use the s/w feature support filed.
2276	 */
2277	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2278		return 1;
2279	return 0;
2280}
2281EXPORT_SYMBOL(mmc_can_discard);
2282
2283int mmc_can_sanitize(struct mmc_card *card)
2284{
2285	if (!mmc_can_trim(card) && !mmc_can_erase(card))
2286		return 0;
2287	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2288		return 1;
2289	return 0;
2290}
2291EXPORT_SYMBOL(mmc_can_sanitize);
2292
2293int mmc_can_secure_erase_trim(struct mmc_card *card)
2294{
2295	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2296	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2297		return 1;
2298	return 0;
2299}
2300EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2301
2302int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2303			    unsigned int nr)
2304{
2305	if (!card->erase_size)
2306		return 0;
2307	if (from % card->erase_size || nr % card->erase_size)
2308		return 0;
2309	return 1;
2310}
2311EXPORT_SYMBOL(mmc_erase_group_aligned);
2312
2313static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2314					    unsigned int arg)
2315{
2316	struct mmc_host *host = card->host;
2317	unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2318	unsigned int last_timeout = 0;
 
 
2319
2320	if (card->erase_shift)
2321		max_qty = UINT_MAX >> card->erase_shift;
2322	else if (mmc_card_sd(card))
 
2323		max_qty = UINT_MAX;
2324	else
 
2325		max_qty = UINT_MAX / card->erase_size;
 
 
2326
2327	/* Find the largest qty with an OK timeout */
 
 
 
 
 
 
 
 
 
 
 
 
2328	do {
2329		y = 0;
2330		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2331			timeout = mmc_erase_timeout(card, arg, qty + x);
2332			if (timeout > host->max_busy_timeout)
 
2333				break;
 
2334			if (timeout < last_timeout)
2335				break;
2336			last_timeout = timeout;
2337			y = x;
2338		}
2339		qty += y;
2340	} while (y);
2341
2342	if (!qty)
2343		return 0;
2344
2345	/*
2346	 * When specifying a sector range to trim, chances are we might cross
2347	 * an erase-group boundary even if the amount of sectors is less than
2348	 * one erase-group.
2349	 * If we can only fit one erase-group in the controller timeout budget,
2350	 * we have to care that erase-group boundaries are not crossed by a
2351	 * single trim operation. We flag that special case with "eg_boundary".
2352	 * In all other cases we can just decrement qty and pretend that we
2353	 * always touch (qty + 1) erase-groups as a simple optimization.
2354	 */
2355	if (qty == 1)
2356		card->eg_boundary = 1;
2357	else
2358		qty--;
2359
2360	/* Convert qty to sectors */
2361	if (card->erase_shift)
2362		max_discard = qty << card->erase_shift;
2363	else if (mmc_card_sd(card))
2364		max_discard = qty + 1;
2365	else
2366		max_discard = qty * card->erase_size;
2367
2368	return max_discard;
2369}
2370
2371unsigned int mmc_calc_max_discard(struct mmc_card *card)
2372{
2373	struct mmc_host *host = card->host;
2374	unsigned int max_discard, max_trim;
2375
2376	if (!host->max_busy_timeout)
2377		return UINT_MAX;
2378
2379	/*
2380	 * Without erase_group_def set, MMC erase timeout depends on clock
2381	 * frequence which can change.  In that case, the best choice is
2382	 * just the preferred erase size.
2383	 */
2384	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2385		return card->pref_erase;
2386
2387	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2388	if (mmc_can_trim(card)) {
2389		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2390		if (max_trim < max_discard)
2391			max_discard = max_trim;
2392	} else if (max_discard < card->erase_size) {
2393		max_discard = 0;
2394	}
2395	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2396		 mmc_hostname(host), max_discard, host->max_busy_timeout);
 
2397	return max_discard;
2398}
2399EXPORT_SYMBOL(mmc_calc_max_discard);
2400
 
 
 
 
 
 
2401int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2402{
2403	struct mmc_command cmd = {0};
2404
2405	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
 
2406		return 0;
2407
2408	cmd.opcode = MMC_SET_BLOCKLEN;
2409	cmd.arg = blocklen;
2410	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2411	return mmc_wait_for_cmd(card->host, &cmd, 5);
2412}
2413EXPORT_SYMBOL(mmc_set_blocklen);
2414
2415int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2416			bool is_rel_write)
2417{
2418	struct mmc_command cmd = {0};
2419
2420	cmd.opcode = MMC_SET_BLOCK_COUNT;
2421	cmd.arg = blockcount & 0x0000FFFF;
2422	if (is_rel_write)
2423		cmd.arg |= 1 << 31;
2424	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2425	return mmc_wait_for_cmd(card->host, &cmd, 5);
2426}
2427EXPORT_SYMBOL(mmc_set_blockcount);
2428
2429static void mmc_hw_reset_for_init(struct mmc_host *host)
2430{
 
 
2431	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2432		return;
2433	host->ops->hw_reset(host);
2434}
2435
2436int mmc_hw_reset(struct mmc_host *host)
2437{
2438	int ret;
2439
2440	if (!host->card)
2441		return -EINVAL;
2442
2443	mmc_bus_get(host);
2444	if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2445		mmc_bus_put(host);
2446		return -EOPNOTSUPP;
2447	}
2448
2449	ret = host->bus_ops->reset(host);
2450	mmc_bus_put(host);
2451
2452	if (ret != -EOPNOTSUPP)
2453		pr_warn("%s: tried to reset card\n", mmc_hostname(host));
 
2454
2455	return ret;
2456}
2457EXPORT_SYMBOL(mmc_hw_reset);
2458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2459static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2460{
2461	host->f_init = freq;
2462
2463#ifdef CONFIG_MMC_DEBUG
2464	pr_info("%s: %s: trying to init card at %u Hz\n",
2465		mmc_hostname(host), __func__, host->f_init);
2466#endif
2467	mmc_power_up(host, host->ocr_avail);
2468
2469	/*
2470	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2471	 * do a hardware reset if possible.
2472	 */
2473	mmc_hw_reset_for_init(host);
2474
2475	/*
2476	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2477	 * if the card is being re-initialized, just send it.  CMD52
2478	 * should be ignored by SD/eMMC cards.
2479	 * Skip it if we already know that we do not support SDIO commands
2480	 */
2481	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2482		sdio_reset(host);
2483
2484	mmc_go_idle(host);
2485
2486	mmc_send_if_cond(host, host->ocr_avail);
 
2487
2488	/* Order's important: probe SDIO, then SD, then MMC */
2489	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2490		if (!mmc_attach_sdio(host))
2491			return 0;
2492
2493	if (!mmc_attach_sd(host))
2494		return 0;
2495	if (!mmc_attach_mmc(host))
2496		return 0;
 
 
 
2497
2498	mmc_power_off(host);
2499	return -EIO;
2500}
2501
2502int _mmc_detect_card_removed(struct mmc_host *host)
2503{
2504	int ret;
2505
2506	if (!host->card || mmc_card_removed(host->card))
2507		return 1;
2508
2509	ret = host->bus_ops->alive(host);
2510
2511	/*
2512	 * Card detect status and alive check may be out of sync if card is
2513	 * removed slowly, when card detect switch changes while card/slot
2514	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2515	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2516	 * detect work 200ms later for this case.
2517	 */
2518	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2519		mmc_detect_change(host, msecs_to_jiffies(200));
2520		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2521	}
2522
2523	if (ret) {
2524		mmc_card_set_removed(host->card);
2525		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2526	}
2527
2528	return ret;
2529}
2530
2531int mmc_detect_card_removed(struct mmc_host *host)
2532{
2533	struct mmc_card *card = host->card;
2534	int ret;
2535
2536	WARN_ON(!host->claimed);
2537
2538	if (!card)
2539		return 1;
2540
2541	if (!mmc_card_is_removable(host))
2542		return 0;
2543
2544	ret = mmc_card_removed(card);
2545	/*
2546	 * The card will be considered unchanged unless we have been asked to
2547	 * detect a change or host requires polling to provide card detection.
2548	 */
2549	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2550		return ret;
2551
2552	host->detect_change = 0;
2553	if (!ret) {
2554		ret = _mmc_detect_card_removed(host);
2555		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2556			/*
2557			 * Schedule a detect work as soon as possible to let a
2558			 * rescan handle the card removal.
2559			 */
2560			cancel_delayed_work(&host->detect);
2561			_mmc_detect_change(host, 0, false);
2562		}
2563	}
2564
2565	return ret;
2566}
2567EXPORT_SYMBOL(mmc_detect_card_removed);
2568
2569void mmc_rescan(struct work_struct *work)
2570{
2571	struct mmc_host *host =
2572		container_of(work, struct mmc_host, detect.work);
2573	int i;
2574
2575	if (host->rescan_disable)
2576		return;
2577
2578	/* If there is a non-removable card registered, only scan once */
2579	if (!mmc_card_is_removable(host) && host->rescan_entered)
2580		return;
2581	host->rescan_entered = 1;
2582
2583	if (host->trigger_card_event && host->ops->card_event) {
2584		mmc_claim_host(host);
2585		host->ops->card_event(host);
2586		mmc_release_host(host);
2587		host->trigger_card_event = false;
2588	}
2589
2590	mmc_bus_get(host);
2591
2592	/*
2593	 * if there is a _removable_ card registered, check whether it is
2594	 * still present
2595	 */
2596	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2597		host->bus_ops->detect(host);
2598
2599	host->detect_change = 0;
2600
2601	/*
2602	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2603	 * the card is no longer present.
2604	 */
2605	mmc_bus_put(host);
2606	mmc_bus_get(host);
2607
2608	/* if there still is a card present, stop here */
2609	if (host->bus_ops != NULL) {
2610		mmc_bus_put(host);
2611		goto out;
2612	}
2613
2614	/*
2615	 * Only we can add a new handler, so it's safe to
2616	 * release the lock here.
2617	 */
2618	mmc_bus_put(host);
2619
2620	mmc_claim_host(host);
2621	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2622			host->ops->get_cd(host) == 0) {
2623		mmc_power_off(host);
2624		mmc_release_host(host);
2625		goto out;
2626	}
2627
2628	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2629		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2630			break;
2631		if (freqs[i] <= host->f_min)
2632			break;
2633	}
2634	mmc_release_host(host);
2635
2636 out:
2637	if (host->caps & MMC_CAP_NEEDS_POLL)
2638		mmc_schedule_delayed_work(&host->detect, HZ);
2639}
2640
2641void mmc_start_host(struct mmc_host *host)
2642{
2643	host->f_init = max(freqs[0], host->f_min);
2644	host->rescan_disable = 0;
2645	host->ios.power_mode = MMC_POWER_UNDEFINED;
2646
2647	mmc_claim_host(host);
2648	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2649		mmc_power_off(host);
2650	else
2651		mmc_power_up(host, host->ocr_avail);
2652	mmc_release_host(host);
 
2653
2654	mmc_gpiod_request_cd_irq(host);
2655	_mmc_detect_change(host, 0, false);
2656}
2657
2658void mmc_stop_host(struct mmc_host *host)
2659{
2660#ifdef CONFIG_MMC_DEBUG
2661	unsigned long flags;
2662	spin_lock_irqsave(&host->lock, flags);
2663	host->removed = 1;
2664	spin_unlock_irqrestore(&host->lock, flags);
2665#endif
2666	if (host->slot.cd_irq >= 0)
2667		disable_irq(host->slot.cd_irq);
 
2668
2669	host->rescan_disable = 1;
2670	cancel_delayed_work_sync(&host->detect);
2671
2672	/* clear pm flags now and let card drivers set them as needed */
2673	host->pm_flags = 0;
2674
2675	mmc_bus_get(host);
2676	if (host->bus_ops && !host->bus_dead) {
2677		/* Calling bus_ops->remove() with a claimed host can deadlock */
2678		host->bus_ops->remove(host);
2679		mmc_claim_host(host);
2680		mmc_detach_bus(host);
2681		mmc_power_off(host);
2682		mmc_release_host(host);
2683		mmc_bus_put(host);
2684		return;
2685	}
2686	mmc_bus_put(host);
2687
2688	BUG_ON(host->card);
2689
2690	mmc_claim_host(host);
2691	mmc_power_off(host);
2692	mmc_release_host(host);
2693}
2694
2695int mmc_power_save_host(struct mmc_host *host)
2696{
2697	int ret = 0;
2698
2699#ifdef CONFIG_MMC_DEBUG
2700	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2701#endif
2702
2703	mmc_bus_get(host);
2704
2705	if (!host->bus_ops || host->bus_dead) {
2706		mmc_bus_put(host);
2707		return -EINVAL;
2708	}
2709
2710	if (host->bus_ops->power_save)
2711		ret = host->bus_ops->power_save(host);
2712
2713	mmc_bus_put(host);
2714
2715	mmc_power_off(host);
2716
2717	return ret;
2718}
2719EXPORT_SYMBOL(mmc_power_save_host);
2720
2721int mmc_power_restore_host(struct mmc_host *host)
2722{
2723	int ret;
2724
2725#ifdef CONFIG_MMC_DEBUG
2726	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2727#endif
2728
2729	mmc_bus_get(host);
2730
2731	if (!host->bus_ops || host->bus_dead) {
2732		mmc_bus_put(host);
2733		return -EINVAL;
2734	}
2735
2736	mmc_power_up(host, host->card->ocr);
2737	ret = host->bus_ops->power_restore(host);
2738
2739	mmc_bus_put(host);
2740
2741	return ret;
2742}
2743EXPORT_SYMBOL(mmc_power_restore_host);
2744
2745/*
2746 * Flush the cache to the non-volatile storage.
2747 */
2748int mmc_flush_cache(struct mmc_card *card)
2749{
2750	int err = 0;
2751
2752	if (mmc_card_mmc(card) &&
2753			(card->ext_csd.cache_size > 0) &&
2754			(card->ext_csd.cache_ctrl & 1)) {
2755		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2756				EXT_CSD_FLUSH_CACHE, 1, 0);
2757		if (err)
2758			pr_err("%s: cache flush error %d\n",
2759					mmc_hostname(card->host), err);
2760	}
2761
2762	return err;
2763}
2764EXPORT_SYMBOL(mmc_flush_cache);
2765
2766#ifdef CONFIG_PM_SLEEP
2767/* Do the card removal on suspend if card is assumed removeable
2768 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2769   to sync the card.
2770*/
2771static int mmc_pm_notify(struct notifier_block *notify_block,
2772			unsigned long mode, void *unused)
2773{
2774	struct mmc_host *host = container_of(
2775		notify_block, struct mmc_host, pm_notify);
2776	unsigned long flags;
2777	int err = 0;
2778
2779	switch (mode) {
2780	case PM_HIBERNATION_PREPARE:
2781	case PM_SUSPEND_PREPARE:
2782	case PM_RESTORE_PREPARE:
2783		spin_lock_irqsave(&host->lock, flags);
2784		host->rescan_disable = 1;
2785		spin_unlock_irqrestore(&host->lock, flags);
2786		cancel_delayed_work_sync(&host->detect);
2787
2788		if (!host->bus_ops)
2789			break;
2790
2791		/* Validate prerequisites for suspend */
2792		if (host->bus_ops->pre_suspend)
2793			err = host->bus_ops->pre_suspend(host);
2794		if (!err)
2795			break;
2796
 
 
 
 
 
 
 
 
2797		/* Calling bus_ops->remove() with a claimed host can deadlock */
2798		host->bus_ops->remove(host);
2799		mmc_claim_host(host);
2800		mmc_detach_bus(host);
2801		mmc_power_off(host);
2802		mmc_release_host(host);
2803		host->pm_flags = 0;
2804		break;
2805
2806	case PM_POST_SUSPEND:
2807	case PM_POST_HIBERNATION:
2808	case PM_POST_RESTORE:
2809
2810		spin_lock_irqsave(&host->lock, flags);
2811		host->rescan_disable = 0;
2812		spin_unlock_irqrestore(&host->lock, flags);
2813		_mmc_detect_change(host, 0, false);
2814
2815	}
2816
2817	return 0;
2818}
2819
2820void mmc_register_pm_notifier(struct mmc_host *host)
2821{
2822	host->pm_notify.notifier_call = mmc_pm_notify;
2823	register_pm_notifier(&host->pm_notify);
2824}
2825
2826void mmc_unregister_pm_notifier(struct mmc_host *host)
2827{
2828	unregister_pm_notifier(&host->pm_notify);
2829}
2830#endif
2831
2832/**
2833 * mmc_init_context_info() - init synchronization context
2834 * @host: mmc host
2835 *
2836 * Init struct context_info needed to implement asynchronous
2837 * request mechanism, used by mmc core, host driver and mmc requests
2838 * supplier.
2839 */
2840void mmc_init_context_info(struct mmc_host *host)
2841{
2842	spin_lock_init(&host->context_info.lock);
2843	host->context_info.is_new_req = false;
2844	host->context_info.is_done_rcv = false;
2845	host->context_info.is_waiting_last_req = false;
2846	init_waitqueue_head(&host->context_info.wait);
2847}
2848
2849static int __init mmc_init(void)
2850{
2851	int ret;
2852
2853	ret = mmc_register_bus();
2854	if (ret)
2855		return ret;
2856
2857	ret = mmc_register_host_class();
2858	if (ret)
2859		goto unregister_bus;
2860
2861	ret = sdio_register_bus();
2862	if (ret)
2863		goto unregister_host_class;
2864
2865	return 0;
2866
2867unregister_host_class:
2868	mmc_unregister_host_class();
2869unregister_bus:
2870	mmc_unregister_bus();
2871	return ret;
2872}
2873
2874static void __exit mmc_exit(void)
2875{
2876	sdio_unregister_bus();
2877	mmc_unregister_host_class();
2878	mmc_unregister_bus();
2879}
2880
2881subsys_initcall(mmc_init);
2882module_exit(mmc_exit);
2883
2884MODULE_LICENSE("GPL");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/mmc/core/core.c
   4 *
   5 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
   6 *  SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
   7 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
   8 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
 
 
 
 
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/completion.h>
  14#include <linux/device.h>
  15#include <linux/delay.h>
  16#include <linux/pagemap.h>
  17#include <linux/err.h>
  18#include <linux/leds.h>
  19#include <linux/scatterlist.h>
  20#include <linux/log2.h>
 
  21#include <linux/pm_runtime.h>
  22#include <linux/pm_wakeup.h>
  23#include <linux/suspend.h>
  24#include <linux/fault-inject.h>
  25#include <linux/random.h>
  26#include <linux/slab.h>
  27#include <linux/of.h>
  28
  29#include <linux/mmc/card.h>
  30#include <linux/mmc/host.h>
  31#include <linux/mmc/mmc.h>
  32#include <linux/mmc/sd.h>
  33#include <linux/mmc/slot-gpio.h>
  34
  35#define CREATE_TRACE_POINTS
  36#include <trace/events/mmc.h>
  37
  38#include "core.h"
  39#include "card.h"
  40#include "bus.h"
  41#include "host.h"
  42#include "sdio_bus.h"
  43#include "pwrseq.h"
  44
  45#include "mmc_ops.h"
  46#include "sd_ops.h"
  47#include "sdio_ops.h"
  48
  49/* The max erase timeout, used when host->max_busy_timeout isn't specified */
  50#define MMC_ERASE_TIMEOUT_MS	(60 * 1000) /* 60 s */
  51#define SD_DISCARD_TIMEOUT_MS	(250)
 
 
 
 
 
  52
  53static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  54
  55/*
  56 * Enabling software CRCs on the data blocks can be a significant (30%)
  57 * performance cost, and for other reasons may not always be desired.
  58 * So we allow it it to be disabled.
  59 */
  60bool use_spi_crc = 1;
  61module_param(use_spi_crc, bool, 0);
  62
  63static int mmc_schedule_delayed_work(struct delayed_work *work,
  64				     unsigned long delay)
  65{
  66	/*
  67	 * We use the system_freezable_wq, because of two reasons.
  68	 * First, it allows several works (not the same work item) to be
  69	 * executed simultaneously. Second, the queue becomes frozen when
  70	 * userspace becomes frozen during system PM.
  71	 */
  72	return queue_delayed_work(system_freezable_wq, work, delay);
  73}
  74
  75#ifdef CONFIG_FAIL_MMC_REQUEST
  76
  77/*
  78 * Internal function. Inject random data errors.
  79 * If mmc_data is NULL no errors are injected.
  80 */
  81static void mmc_should_fail_request(struct mmc_host *host,
  82				    struct mmc_request *mrq)
  83{
  84	struct mmc_command *cmd = mrq->cmd;
  85	struct mmc_data *data = mrq->data;
  86	static const int data_errors[] = {
  87		-ETIMEDOUT,
  88		-EILSEQ,
  89		-EIO,
  90	};
  91
  92	if (!data)
  93		return;
  94
  95	if ((cmd && cmd->error) || data->error ||
  96	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
  97		return;
  98
  99	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
 100	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
 101}
 102
 103#else /* CONFIG_FAIL_MMC_REQUEST */
 104
 105static inline void mmc_should_fail_request(struct mmc_host *host,
 106					   struct mmc_request *mrq)
 107{
 108}
 109
 110#endif /* CONFIG_FAIL_MMC_REQUEST */
 111
 112static inline void mmc_complete_cmd(struct mmc_request *mrq)
 113{
 114	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
 115		complete_all(&mrq->cmd_completion);
 116}
 117
 118void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
 119{
 120	if (!mrq->cap_cmd_during_tfr)
 121		return;
 122
 123	mmc_complete_cmd(mrq);
 124
 125	pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
 126		 mmc_hostname(host), mrq->cmd->opcode);
 127}
 128EXPORT_SYMBOL(mmc_command_done);
 129
 130/**
 131 *	mmc_request_done - finish processing an MMC request
 132 *	@host: MMC host which completed request
 133 *	@mrq: MMC request which request
 134 *
 135 *	MMC drivers should call this function when they have completed
 136 *	their processing of a request.
 137 */
 138void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
 139{
 140	struct mmc_command *cmd = mrq->cmd;
 141	int err = cmd->error;
 142
 143	/* Flag re-tuning needed on CRC errors */
 144	if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
 145	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
 146	    !host->retune_crc_disable &&
 147	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 148	    (mrq->data && mrq->data->error == -EILSEQ) ||
 149	    (mrq->stop && mrq->stop->error == -EILSEQ)))
 150		mmc_retune_needed(host);
 151
 152	if (err && cmd->retries && mmc_host_is_spi(host)) {
 153		if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
 154			cmd->retries = 0;
 155	}
 156
 157	if (host->ongoing_mrq == mrq)
 158		host->ongoing_mrq = NULL;
 159
 160	mmc_complete_cmd(mrq);
 161
 162	trace_mmc_request_done(host, mrq);
 163
 164	/*
 165	 * We list various conditions for the command to be considered
 166	 * properly done:
 167	 *
 168	 * - There was no error, OK fine then
 169	 * - We are not doing some kind of retry
 170	 * - The card was removed (...so just complete everything no matter
 171	 *   if there are errors or retries)
 172	 */
 173	if (!err || !cmd->retries || mmc_card_removed(host->card)) {
 174		mmc_should_fail_request(host, mrq);
 175
 176		if (!host->ongoing_mrq)
 177			led_trigger_event(host->led, LED_OFF);
 178
 179		if (mrq->sbc) {
 180			pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
 181				mmc_hostname(host), mrq->sbc->opcode,
 182				mrq->sbc->error,
 183				mrq->sbc->resp[0], mrq->sbc->resp[1],
 184				mrq->sbc->resp[2], mrq->sbc->resp[3]);
 185		}
 186
 187		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
 188			mmc_hostname(host), cmd->opcode, err,
 189			cmd->resp[0], cmd->resp[1],
 190			cmd->resp[2], cmd->resp[3]);
 191
 192		if (mrq->data) {
 193			pr_debug("%s:     %d bytes transferred: %d\n",
 194				mmc_hostname(host),
 195				mrq->data->bytes_xfered, mrq->data->error);
 196		}
 197
 198		if (mrq->stop) {
 199			pr_debug("%s:     (CMD%u): %d: %08x %08x %08x %08x\n",
 200				mmc_hostname(host), mrq->stop->opcode,
 201				mrq->stop->error,
 202				mrq->stop->resp[0], mrq->stop->resp[1],
 203				mrq->stop->resp[2], mrq->stop->resp[3]);
 204		}
 
 
 
 205	}
 206	/*
 207	 * Request starter must handle retries - see
 208	 * mmc_wait_for_req_done().
 209	 */
 210	if (mrq->done)
 211		mrq->done(mrq);
 212}
 213
 214EXPORT_SYMBOL(mmc_request_done);
 215
 216static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 217{
 218	int err;
 219
 220	/* Assumes host controller has been runtime resumed by mmc_claim_host */
 221	err = mmc_retune(host);
 222	if (err) {
 223		mrq->cmd->error = err;
 224		mmc_request_done(host, mrq);
 225		return;
 226	}
 227
 228	/*
 229	 * For sdio rw commands we must wait for card busy otherwise some
 230	 * sdio devices won't work properly.
 231	 * And bypass I/O abort, reset and bus suspend operations.
 232	 */
 233	if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
 234	    host->ops->card_busy) {
 235		int tries = 500; /* Wait aprox 500ms at maximum */
 236
 237		while (host->ops->card_busy(host) && --tries)
 238			mmc_delay(1);
 239
 240		if (tries == 0) {
 241			mrq->cmd->error = -EBUSY;
 242			mmc_request_done(host, mrq);
 243			return;
 244		}
 245	}
 246
 247	if (mrq->cap_cmd_during_tfr) {
 248		host->ongoing_mrq = mrq;
 249		/*
 250		 * Retry path could come through here without having waiting on
 251		 * cmd_completion, so ensure it is reinitialised.
 252		 */
 253		reinit_completion(&mrq->cmd_completion);
 254	}
 255
 256	trace_mmc_request_start(host, mrq);
 257
 258	if (host->cqe_on)
 259		host->cqe_ops->cqe_off(host);
 260
 261	host->ops->request(host, mrq);
 262}
 263
 264static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
 265			     bool cqe)
 266{
 
 
 
 
 
 
 
 
 
 267	if (mrq->sbc) {
 268		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
 269			 mmc_hostname(host), mrq->sbc->opcode,
 270			 mrq->sbc->arg, mrq->sbc->flags);
 271	}
 272
 273	if (mrq->cmd) {
 274		pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
 275			 mmc_hostname(host), cqe ? "CQE direct " : "",
 276			 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
 277	} else if (cqe) {
 278		pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
 279			 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
 280	}
 281
 282	if (mrq->data) {
 283		pr_debug("%s:     blksz %d blocks %d flags %08x "
 284			"tsac %d ms nsac %d\n",
 285			mmc_hostname(host), mrq->data->blksz,
 286			mrq->data->blocks, mrq->data->flags,
 287			mrq->data->timeout_ns / 1000000,
 288			mrq->data->timeout_clks);
 289	}
 290
 291	if (mrq->stop) {
 292		pr_debug("%s:     CMD%u arg %08x flags %08x\n",
 293			 mmc_hostname(host), mrq->stop->opcode,
 294			 mrq->stop->arg, mrq->stop->flags);
 295	}
 296}
 297
 298static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
 299{
 300	unsigned int i, sz = 0;
 301	struct scatterlist *sg;
 302
 303	if (mrq->cmd) {
 304		mrq->cmd->error = 0;
 305		mrq->cmd->mrq = mrq;
 306		mrq->cmd->data = mrq->data;
 307	}
 308	if (mrq->sbc) {
 309		mrq->sbc->error = 0;
 310		mrq->sbc->mrq = mrq;
 311	}
 312	if (mrq->data) {
 313		if (mrq->data->blksz > host->max_blk_size ||
 314		    mrq->data->blocks > host->max_blk_count ||
 315		    mrq->data->blocks * mrq->data->blksz > host->max_req_size)
 316			return -EINVAL;
 317
 
 
 318		for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
 319			sz += sg->length;
 320		if (sz != mrq->data->blocks * mrq->data->blksz)
 321			return -EINVAL;
 322
 
 323		mrq->data->error = 0;
 324		mrq->data->mrq = mrq;
 325		if (mrq->stop) {
 326			mrq->data->stop = mrq->stop;
 327			mrq->stop->error = 0;
 328			mrq->stop->mrq = mrq;
 329		}
 330	}
 
 
 331
 332	return 0;
 333}
 334
 335int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
 
 
 
 
 
 
 
 
 
 
 336{
 337	int err;
 
 
 
 
 338
 339	init_completion(&mrq->cmd_completion);
 
 
 
 
 
 
 
 
 
 
 
 340
 341	mmc_retune_hold(host);
 
 
 342
 343	if (mmc_card_removed(host->card))
 344		return -ENOMEDIUM;
 
 
 
 
 
 
 345
 346	mmc_mrq_pr_debug(host, mrq, false);
 347
 348	WARN_ON(!host->claimed);
 
 
 
 
 
 
 
 
 349
 350	err = mmc_mrq_prep(host, mrq);
 351	if (err)
 352		return err;
 
 
 
 
 
 
 
 
 
 
 353
 354	led_trigger_event(host->led, LED_FULL);
 355	__mmc_start_request(host, mrq);
 
 
 
 
 
 
 
 356
 357	return 0;
 
 358}
 359EXPORT_SYMBOL(mmc_start_request);
 360
 361static void mmc_wait_done(struct mmc_request *mrq)
 362{
 363	complete(&mrq->completion);
 364}
 365
 366static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
 
 
 
 
 
 
 
 
 367{
 368	struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
 
 
 
 
 
 
 
 
 
 369
 370	/*
 371	 * If there is an ongoing transfer, wait for the command line to become
 372	 * available.
 373	 */
 374	if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
 375		wait_for_completion(&ongoing_mrq->cmd_completion);
 376}
 377
 378static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
 379{
 380	int err;
 381
 382	mmc_wait_ongoing_tfr_cmd(host);
 383
 384	init_completion(&mrq->completion);
 385	mrq->done = mmc_wait_done;
 386
 387	err = mmc_start_request(host, mrq);
 388	if (err) {
 389		mrq->cmd->error = err;
 390		mmc_complete_cmd(mrq);
 391		complete(&mrq->completion);
 392	}
 393
 394	return err;
 395}
 396
 397void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398{
 399	struct mmc_command *cmd;
 400
 401	while (1) {
 402		wait_for_completion(&mrq->completion);
 403
 404		cmd = mrq->cmd;
 405
 406		/*
 407		 * If host has timed out waiting for the sanitize
 408		 * to complete, card might be still in programming state
 409		 * so let's try to bring the card out of programming
 410		 * state.
 411		 */
 412		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
 413			if (!mmc_interrupt_hpi(host->card)) {
 414				pr_warn("%s: %s: Interrupted sanitize\n",
 415					mmc_hostname(host), __func__);
 416				cmd->error = 0;
 417				break;
 418			} else {
 419				pr_err("%s: %s: Failed to interrupt sanitize\n",
 420				       mmc_hostname(host), __func__);
 421			}
 422		}
 423		if (!cmd->error || !cmd->retries ||
 424		    mmc_card_removed(host->card))
 425			break;
 426
 427		mmc_retune_recheck(host);
 428
 429		pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
 430			 mmc_hostname(host), cmd->opcode, cmd->error);
 431		cmd->retries--;
 432		cmd->error = 0;
 433		__mmc_start_request(host, mrq);
 434	}
 435
 436	mmc_retune_release(host);
 437}
 438EXPORT_SYMBOL(mmc_wait_for_req_done);
 439
 440/*
 441 * mmc_cqe_start_req - Start a CQE request.
 442 * @host: MMC host to start the request
 443 * @mrq: request to start
 
 
 444 *
 445 * Start the request, re-tuning if needed and it is possible. Returns an error
 446 * code if the request fails to start or -EBUSY if CQE is busy.
 
 447 */
 448int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
 
 449{
 450	int err;
 
 
 451
 452	/*
 453	 * CQE cannot process re-tuning commands. Caller must hold retuning
 454	 * while CQE is in use.  Re-tuning can happen here only when CQE has no
 455	 * active requests i.e. this is the first.  Note, re-tuning will call
 456	 * ->cqe_off().
 457	 */
 458	err = mmc_retune(host);
 459	if (err)
 460		goto out_err;
 461
 462	mrq->host = host;
 463
 464	mmc_mrq_pr_debug(host, mrq, true);
 465
 466	err = mmc_mrq_prep(host, mrq);
 467	if (err)
 468		goto out_err;
 469
 470	err = host->cqe_ops->cqe_request(host, mrq);
 471	if (err)
 472		goto out_err;
 473
 474	trace_mmc_request_start(host, mrq);
 475
 476	return 0;
 477
 478out_err:
 479	if (mrq->cmd) {
 480		pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
 481			 mmc_hostname(host), mrq->cmd->opcode, err);
 482	} else {
 483		pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
 484			 mmc_hostname(host), mrq->tag, err);
 485	}
 486	return err;
 487}
 488EXPORT_SYMBOL(mmc_cqe_start_req);
 489
 490/**
 491 *	mmc_cqe_request_done - CQE has finished processing an MMC request
 492 *	@host: MMC host which completed request
 493 *	@mrq: MMC request which completed
 
 
 
 
 
 
 494 *
 495 *	CQE drivers should call this function when they have completed
 496 *	their processing of a request.
 
 
 497 */
 498void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
 
 499{
 500	mmc_should_fail_request(host, mrq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501
 502	/* Flag re-tuning needed on CRC errors */
 503	if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
 504	    (mrq->data && mrq->data->error == -EILSEQ))
 505		mmc_retune_needed(host);
 506
 507	trace_mmc_request_done(host, mrq);
 
 508
 509	if (mrq->cmd) {
 510		pr_debug("%s: CQE req done (direct CMD%u): %d\n",
 511			 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
 512	} else {
 513		pr_debug("%s: CQE transfer done tag %d\n",
 514			 mmc_hostname(host), mrq->tag);
 515	}
 516
 517	if (mrq->data) {
 518		pr_debug("%s:     %d bytes transferred: %d\n",
 519			 mmc_hostname(host),
 520			 mrq->data->bytes_xfered, mrq->data->error);
 521	}
 522
 523	mrq->done(mrq);
 
 
 524}
 525EXPORT_SYMBOL(mmc_cqe_request_done);
 526
 527/**
 528 *	mmc_cqe_post_req - CQE post process of a completed MMC request
 529 *	@host: MMC host
 530 *	@mrq: MMC request to be processed
 
 
 
 
 531 */
 532void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
 533{
 534	if (host->cqe_ops->cqe_post_req)
 535		host->cqe_ops->cqe_post_req(host, mrq);
 536}
 537EXPORT_SYMBOL(mmc_cqe_post_req);
 538
 539/* Arbitrary 1 second timeout */
 540#define MMC_CQE_RECOVERY_TIMEOUT	1000
 541
 542/*
 543 * mmc_cqe_recovery - Recover from CQE errors.
 544 * @host: MMC host to recover
 545 *
 546 * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
 547 * in eMMC, and discarding the queue in CQE. CQE must call
 548 * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
 549 * fails to discard its queue.
 550 */
 551int mmc_cqe_recovery(struct mmc_host *host)
 552{
 553	struct mmc_command cmd;
 554	int err;
 
 
 555
 556	mmc_retune_hold_now(host);
 557
 558	/*
 559	 * Recovery is expected seldom, if at all, but it reduces performance,
 560	 * so make sure it is not completely silent.
 561	 */
 562	pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
 563
 564	host->cqe_ops->cqe_recovery_start(host);
 
 
 
 
 
 565
 566	memset(&cmd, 0, sizeof(cmd));
 567	cmd.opcode       = MMC_STOP_TRANSMISSION,
 568	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC,
 569	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 570	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 571	mmc_wait_for_cmd(host, &cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 572
 573	memset(&cmd, 0, sizeof(cmd));
 574	cmd.opcode       = MMC_CMDQ_TASK_MGMT;
 575	cmd.arg          = 1; /* Discard entire queue */
 576	cmd.flags        = MMC_RSP_R1B | MMC_CMD_AC;
 577	cmd.flags       &= ~MMC_RSP_CRC; /* Ignore CRC */
 578	cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
 579	err = mmc_wait_for_cmd(host, &cmd, 0);
 580
 581	host->cqe_ops->cqe_recovery_finish(host);
 
 
 582
 583	mmc_retune_release(host);
 
 
 
 
 584
 
 
 585	return err;
 586}
 587EXPORT_SYMBOL(mmc_cqe_recovery);
 588
 589/**
 590 *	mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
 591 *	@host: MMC host
 592 *	@mrq: MMC request
 593 *
 594 *	mmc_is_req_done() is used with requests that have
 595 *	mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
 596 *	starting a request and before waiting for it to complete. That is,
 597 *	either in between calls to mmc_start_req(), or after mmc_wait_for_req()
 598 *	and before mmc_wait_for_req_done(). If it is called at other times the
 599 *	result is not meaningful.
 600 */
 601bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
 602{
 603	return completion_done(&mrq->completion);
 604}
 605EXPORT_SYMBOL(mmc_is_req_done);
 606
 607/**
 608 *	mmc_wait_for_req - start a request and wait for completion
 609 *	@host: MMC host to start command
 610 *	@mrq: MMC request to start
 611 *
 612 *	Start a new MMC custom command request for a host, and wait
 613 *	for the command to complete. In the case of 'cap_cmd_during_tfr'
 614 *	requests, the transfer is ongoing and the caller can issue further
 615 *	commands that do not use the data lines, and then wait by calling
 616 *	mmc_wait_for_req_done().
 617 *	Does not attempt to parse the response.
 618 */
 619void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 620{
 621	__mmc_start_req(host, mrq);
 622
 623	if (!mrq->cap_cmd_during_tfr)
 624		mmc_wait_for_req_done(host, mrq);
 625}
 626EXPORT_SYMBOL(mmc_wait_for_req);
 627
 628/**
 629 *	mmc_wait_for_cmd - start a command and wait for completion
 630 *	@host: MMC host to start command
 631 *	@cmd: MMC command to start
 632 *	@retries: maximum number of retries
 633 *
 634 *	Start a new MMC command for a host, and wait for the command
 635 *	to complete.  Return any error that occurred while the command
 636 *	was executing.  Do not attempt to parse the response.
 637 */
 638int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
 639{
 640	struct mmc_request mrq = {};
 641
 642	WARN_ON(!host->claimed);
 643
 644	memset(cmd->resp, 0, sizeof(cmd->resp));
 645	cmd->retries = retries;
 646
 647	mrq.cmd = cmd;
 648	cmd->data = NULL;
 649
 650	mmc_wait_for_req(host, &mrq);
 651
 652	return cmd->error;
 653}
 654
 655EXPORT_SYMBOL(mmc_wait_for_cmd);
 656
 657/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658 *	mmc_set_data_timeout - set the timeout for a data command
 659 *	@data: data phase for command
 660 *	@card: the MMC card associated with the data transfer
 661 *
 662 *	Computes the data timeout parameters according to the
 663 *	correct algorithm given the card type.
 664 */
 665void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
 666{
 667	unsigned int mult;
 668
 669	/*
 670	 * SDIO cards only define an upper 1 s limit on access.
 671	 */
 672	if (mmc_card_sdio(card)) {
 673		data->timeout_ns = 1000000000;
 674		data->timeout_clks = 0;
 675		return;
 676	}
 677
 678	/*
 679	 * SD cards use a 100 multiplier rather than 10
 680	 */
 681	mult = mmc_card_sd(card) ? 100 : 10;
 682
 683	/*
 684	 * Scale up the multiplier (and therefore the timeout) by
 685	 * the r2w factor for writes.
 686	 */
 687	if (data->flags & MMC_DATA_WRITE)
 688		mult <<= card->csd.r2w_factor;
 689
 690	data->timeout_ns = card->csd.taac_ns * mult;
 691	data->timeout_clks = card->csd.taac_clks * mult;
 692
 693	/*
 694	 * SD cards also have an upper limit on the timeout.
 695	 */
 696	if (mmc_card_sd(card)) {
 697		unsigned int timeout_us, limit_us;
 698
 699		timeout_us = data->timeout_ns / 1000;
 700		if (card->host->ios.clock)
 701			timeout_us += data->timeout_clks * 1000 /
 702				(card->host->ios.clock / 1000);
 703
 704		if (data->flags & MMC_DATA_WRITE)
 705			/*
 706			 * The MMC spec "It is strongly recommended
 707			 * for hosts to implement more than 500ms
 708			 * timeout value even if the card indicates
 709			 * the 250ms maximum busy length."  Even the
 710			 * previous value of 300ms is known to be
 711			 * insufficient for some cards.
 712			 */
 713			limit_us = 3000000;
 714		else
 715			limit_us = 100000;
 716
 717		/*
 718		 * SDHC cards always use these fixed values.
 719		 */
 720		if (timeout_us > limit_us) {
 721			data->timeout_ns = limit_us * 1000;
 722			data->timeout_clks = 0;
 723		}
 724
 725		/* assign limit value if invalid */
 726		if (timeout_us == 0)
 727			data->timeout_ns = limit_us * 1000;
 728	}
 729
 730	/*
 731	 * Some cards require longer data read timeout than indicated in CSD.
 732	 * Address this by setting the read timeout to a "reasonably high"
 733	 * value. For the cards tested, 600ms has proven enough. If necessary,
 734	 * this value can be increased if other problematic cards require this.
 735	 */
 736	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
 737		data->timeout_ns = 600000000;
 738		data->timeout_clks = 0;
 739	}
 740
 741	/*
 742	 * Some cards need very high timeouts if driven in SPI mode.
 743	 * The worst observed timeout was 900ms after writing a
 744	 * continuous stream of data until the internal logic
 745	 * overflowed.
 746	 */
 747	if (mmc_host_is_spi(card->host)) {
 748		if (data->flags & MMC_DATA_WRITE) {
 749			if (data->timeout_ns < 1000000000)
 750				data->timeout_ns = 1000000000;	/* 1s */
 751		} else {
 752			if (data->timeout_ns < 100000000)
 753				data->timeout_ns =  100000000;	/* 100ms */
 754		}
 755	}
 756}
 757EXPORT_SYMBOL(mmc_set_data_timeout);
 758
 759/*
 760 * Allow claiming an already claimed host if the context is the same or there is
 761 * no context but the task is the same.
 
 
 
 
 
 
 
 
 
 
 762 */
 763static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
 764				   struct task_struct *task)
 765{
 766	return host->claimer == ctx ||
 767	       (!ctx && task && host->claimer->task == task);
 768}
 
 
 
 769
 770static inline void mmc_ctx_set_claimer(struct mmc_host *host,
 771				       struct mmc_ctx *ctx,
 772				       struct task_struct *task)
 773{
 774	if (!host->claimer) {
 775		if (ctx)
 776			host->claimer = ctx;
 777		else
 778			host->claimer = &host->default_ctx;
 779	}
 780	if (task)
 781		host->claimer->task = task;
 782}
 
 783
 784/**
 785 *	__mmc_claim_host - exclusively claim a host
 786 *	@host: mmc host to claim
 787 *	@ctx: context that claims the host or NULL in which case the default
 788 *	context will be used
 789 *	@abort: whether or not the operation should be aborted
 790 *
 791 *	Claim a host for a set of operations.  If @abort is non null and
 792 *	dereference a non-zero value then this will return prematurely with
 793 *	that non-zero value without acquiring the lock.  Returns zero
 794 *	with the lock held otherwise.
 795 */
 796int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
 797		     atomic_t *abort)
 798{
 799	struct task_struct *task = ctx ? NULL : current;
 800	DECLARE_WAITQUEUE(wait, current);
 801	unsigned long flags;
 802	int stop;
 803	bool pm = false;
 804
 805	might_sleep();
 806
 807	add_wait_queue(&host->wq, &wait);
 808	spin_lock_irqsave(&host->lock, flags);
 809	while (1) {
 810		set_current_state(TASK_UNINTERRUPTIBLE);
 811		stop = abort ? atomic_read(abort) : 0;
 812		if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
 813			break;
 814		spin_unlock_irqrestore(&host->lock, flags);
 815		schedule();
 816		spin_lock_irqsave(&host->lock, flags);
 817	}
 818	set_current_state(TASK_RUNNING);
 819	if (!stop) {
 820		host->claimed = 1;
 821		mmc_ctx_set_claimer(host, ctx, task);
 822		host->claim_cnt += 1;
 823		if (host->claim_cnt == 1)
 824			pm = true;
 825	} else
 826		wake_up(&host->wq);
 827	spin_unlock_irqrestore(&host->lock, flags);
 828	remove_wait_queue(&host->wq, &wait);
 829
 830	if (pm)
 831		pm_runtime_get_sync(mmc_dev(host));
 832
 833	return stop;
 834}
 835EXPORT_SYMBOL(__mmc_claim_host);
 836
 837/**
 838 *	mmc_release_host - release a host
 839 *	@host: mmc host to release
 840 *
 841 *	Release a MMC host, allowing others to claim the host
 842 *	for their operations.
 843 */
 844void mmc_release_host(struct mmc_host *host)
 845{
 846	unsigned long flags;
 847
 848	WARN_ON(!host->claimed);
 849
 850	spin_lock_irqsave(&host->lock, flags);
 851	if (--host->claim_cnt) {
 852		/* Release for nested claim */
 853		spin_unlock_irqrestore(&host->lock, flags);
 854	} else {
 855		host->claimed = 0;
 856		host->claimer->task = NULL;
 857		host->claimer = NULL;
 858		spin_unlock_irqrestore(&host->lock, flags);
 859		wake_up(&host->wq);
 860		pm_runtime_mark_last_busy(mmc_dev(host));
 861		if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
 862			pm_runtime_put_sync_suspend(mmc_dev(host));
 863		else
 864			pm_runtime_put_autosuspend(mmc_dev(host));
 865	}
 866}
 867EXPORT_SYMBOL(mmc_release_host);
 868
 869/*
 870 * This is a helper function, which fetches a runtime pm reference for the
 871 * card device and also claims the host.
 872 */
 873void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
 874{
 875	pm_runtime_get_sync(&card->dev);
 876	__mmc_claim_host(card->host, ctx, NULL);
 877}
 878EXPORT_SYMBOL(mmc_get_card);
 879
 880/*
 881 * This is a helper function, which releases the host and drops the runtime
 882 * pm reference for the card device.
 883 */
 884void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
 885{
 886	struct mmc_host *host = card->host;
 887
 888	WARN_ON(ctx && host->claimer != ctx);
 889
 890	mmc_release_host(host);
 891	pm_runtime_mark_last_busy(&card->dev);
 892	pm_runtime_put_autosuspend(&card->dev);
 893}
 894EXPORT_SYMBOL(mmc_put_card);
 895
 896/*
 897 * Internal function that does the actual ios call to the host driver,
 898 * optionally printing some debug output.
 899 */
 900static inline void mmc_set_ios(struct mmc_host *host)
 901{
 902	struct mmc_ios *ios = &host->ios;
 903
 904	pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
 905		"width %u timing %u\n",
 906		 mmc_hostname(host), ios->clock, ios->bus_mode,
 907		 ios->power_mode, ios->chip_select, ios->vdd,
 908		 1 << ios->bus_width, ios->timing);
 909
 910	host->ops->set_ios(host, ios);
 911}
 912
 913/*
 914 * Control chip select pin on a host.
 915 */
 916void mmc_set_chip_select(struct mmc_host *host, int mode)
 917{
 918	host->ios.chip_select = mode;
 919	mmc_set_ios(host);
 920}
 921
 922/*
 923 * Sets the host clock to the highest possible frequency that
 924 * is below "hz".
 925 */
 926void mmc_set_clock(struct mmc_host *host, unsigned int hz)
 927{
 928	WARN_ON(hz && hz < host->f_min);
 929
 930	if (hz > host->f_max)
 931		hz = host->f_max;
 932
 933	host->ios.clock = hz;
 934	mmc_set_ios(host);
 935}
 936
 937int mmc_execute_tuning(struct mmc_card *card)
 938{
 939	struct mmc_host *host = card->host;
 940	u32 opcode;
 941	int err;
 942
 943	if (!host->ops->execute_tuning)
 944		return 0;
 945
 946	if (host->cqe_on)
 947		host->cqe_ops->cqe_off(host);
 948
 949	if (mmc_card_mmc(card))
 950		opcode = MMC_SEND_TUNING_BLOCK_HS200;
 951	else
 952		opcode = MMC_SEND_TUNING_BLOCK;
 953
 954	err = host->ops->execute_tuning(host, opcode);
 955
 956	if (err)
 957		pr_err("%s: tuning execution failed: %d\n",
 958			mmc_hostname(host), err);
 959	else
 960		mmc_retune_enable(host);
 961
 962	return err;
 963}
 964
 965/*
 966 * Change the bus mode (open drain/push-pull) of a host.
 967 */
 968void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 969{
 970	host->ios.bus_mode = mode;
 971	mmc_set_ios(host);
 972}
 973
 974/*
 975 * Change data bus width of a host.
 976 */
 977void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 978{
 979	host->ios.bus_width = width;
 980	mmc_set_ios(host);
 981}
 982
 983/*
 984 * Set initial state after a power cycle or a hw_reset.
 985 */
 986void mmc_set_initial_state(struct mmc_host *host)
 987{
 988	if (host->cqe_on)
 989		host->cqe_ops->cqe_off(host);
 990
 991	mmc_retune_disable(host);
 992
 993	if (mmc_host_is_spi(host))
 994		host->ios.chip_select = MMC_CS_HIGH;
 995	else
 996		host->ios.chip_select = MMC_CS_DONTCARE;
 997	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
 998	host->ios.bus_width = MMC_BUS_WIDTH_1;
 999	host->ios.timing = MMC_TIMING_LEGACY;
1000	host->ios.drv_type = 0;
1001	host->ios.enhanced_strobe = false;
1002
1003	/*
1004	 * Make sure we are in non-enhanced strobe mode before we
1005	 * actually enable it in ext_csd.
1006	 */
1007	if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1008	     host->ops->hs400_enhanced_strobe)
1009		host->ops->hs400_enhanced_strobe(host, &host->ios);
1010
1011	mmc_set_ios(host);
1012}
1013
1014/**
1015 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1016 * @vdd:	voltage (mV)
1017 * @low_bits:	prefer low bits in boundary cases
1018 *
1019 * This function returns the OCR bit number according to the provided @vdd
1020 * value. If conversion is not possible a negative errno value returned.
1021 *
1022 * Depending on the @low_bits flag the function prefers low or high OCR bits
1023 * on boundary voltages. For example,
1024 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1025 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1026 *
1027 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1028 */
1029static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1030{
1031	const int max_bit = ilog2(MMC_VDD_35_36);
1032	int bit;
1033
1034	if (vdd < 1650 || vdd > 3600)
1035		return -EINVAL;
1036
1037	if (vdd >= 1650 && vdd <= 1950)
1038		return ilog2(MMC_VDD_165_195);
1039
1040	if (low_bits)
1041		vdd -= 1;
1042
1043	/* Base 2000 mV, step 100 mV, bit's base 8. */
1044	bit = (vdd - 2000) / 100 + 8;
1045	if (bit > max_bit)
1046		return max_bit;
1047	return bit;
1048}
1049
1050/**
1051 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1052 * @vdd_min:	minimum voltage value (mV)
1053 * @vdd_max:	maximum voltage value (mV)
1054 *
1055 * This function returns the OCR mask bits according to the provided @vdd_min
1056 * and @vdd_max values. If conversion is not possible the function returns 0.
1057 *
1058 * Notes wrt boundary cases:
1059 * This function sets the OCR bits for all boundary voltages, for example
1060 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1061 * MMC_VDD_34_35 mask.
1062 */
1063u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1064{
1065	u32 mask = 0;
1066
1067	if (vdd_max < vdd_min)
1068		return 0;
1069
1070	/* Prefer high bits for the boundary vdd_max values. */
1071	vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1072	if (vdd_max < 0)
1073		return 0;
1074
1075	/* Prefer low bits for the boundary vdd_min values. */
1076	vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1077	if (vdd_min < 0)
1078		return 0;
1079
1080	/* Fill the mask, from max bit to min bit. */
1081	while (vdd_max >= vdd_min)
1082		mask |= 1 << vdd_max--;
1083
1084	return mask;
1085}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1086
1087static int mmc_of_get_func_num(struct device_node *node)
1088{
1089	u32 reg;
1090	int ret;
1091
1092	ret = of_property_read_u32(node, "reg", &reg);
1093	if (ret < 0)
1094		return ret;
1095
1096	return reg;
1097}
1098
1099struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1100		unsigned func_num)
1101{
1102	struct device_node *node;
1103
1104	if (!host->parent || !host->parent->of_node)
1105		return NULL;
1106
1107	for_each_child_of_node(host->parent->of_node, node) {
1108		if (mmc_of_get_func_num(node) == func_num)
1109			return node;
1110	}
1111
1112	return NULL;
1113}
1114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1115/*
1116 * Mask off any voltages we don't support and select
1117 * the lowest voltage
1118 */
1119u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1120{
1121	int bit;
1122
1123	/*
1124	 * Sanity check the voltages that the card claims to
1125	 * support.
1126	 */
1127	if (ocr & 0x7F) {
1128		dev_warn(mmc_dev(host),
1129		"card claims to support voltages below defined range\n");
1130		ocr &= ~0x7F;
1131	}
1132
1133	ocr &= host->ocr_avail;
1134	if (!ocr) {
1135		dev_warn(mmc_dev(host), "no support for card's volts\n");
1136		return 0;
1137	}
1138
1139	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1140		bit = ffs(ocr) - 1;
1141		ocr &= 3 << bit;
1142		mmc_power_cycle(host, ocr);
1143	} else {
1144		bit = fls(ocr) - 1;
1145		ocr &= 3 << bit;
1146		if (bit != host->ios.vdd)
1147			dev_warn(mmc_dev(host), "exceeding card's volts\n");
1148	}
1149
1150	return ocr;
1151}
1152
1153int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1154{
1155	int err = 0;
1156	int old_signal_voltage = host->ios.signal_voltage;
1157
1158	host->ios.signal_voltage = signal_voltage;
1159	if (host->ops->start_signal_voltage_switch)
1160		err = host->ops->start_signal_voltage_switch(host, &host->ios);
1161
1162	if (err)
1163		host->ios.signal_voltage = old_signal_voltage;
1164
1165	return err;
1166
1167}
1168
1169void mmc_set_initial_signal_voltage(struct mmc_host *host)
1170{
1171	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1172	if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1173		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1174	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1175		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1176	else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1177		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1178}
1179
1180int mmc_host_set_uhs_voltage(struct mmc_host *host)
1181{
1182	u32 clock;
1183
1184	/*
1185	 * During a signal voltage level switch, the clock must be gated
1186	 * for 5 ms according to the SD spec
1187	 */
1188	clock = host->ios.clock;
1189	host->ios.clock = 0;
1190	mmc_set_ios(host);
1191
1192	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1193		return -EAGAIN;
1194
1195	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1196	mmc_delay(10);
1197	host->ios.clock = clock;
1198	mmc_set_ios(host);
1199
1200	return 0;
1201}
1202
1203int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1204{
1205	struct mmc_command cmd = {};
1206	int err = 0;
1207
1208	/*
1209	 * If we cannot switch voltages, return failure so the caller
1210	 * can continue without UHS mode
1211	 */
1212	if (!host->ops->start_signal_voltage_switch)
1213		return -EPERM;
1214	if (!host->ops->card_busy)
1215		pr_warn("%s: cannot verify signal voltage switch\n",
1216			mmc_hostname(host));
1217
1218	cmd.opcode = SD_SWITCH_VOLTAGE;
1219	cmd.arg = 0;
1220	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1221
1222	err = mmc_wait_for_cmd(host, &cmd, 0);
1223	if (err)
1224		return err;
1225
1226	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1227		return -EIO;
1228
1229	/*
1230	 * The card should drive cmd and dat[0:3] low immediately
1231	 * after the response of cmd11, but wait 1 ms to be sure
1232	 */
1233	mmc_delay(1);
1234	if (host->ops->card_busy && !host->ops->card_busy(host)) {
1235		err = -EAGAIN;
1236		goto power_cycle;
1237	}
 
 
 
 
 
 
 
1238
1239	if (mmc_host_set_uhs_voltage(host)) {
1240		/*
1241		 * Voltages may not have been switched, but we've already
1242		 * sent CMD11, so a power cycle is required anyway
1243		 */
1244		err = -EAGAIN;
1245		goto power_cycle;
1246	}
1247
 
 
 
 
 
1248	/* Wait for at least 1 ms according to spec */
1249	mmc_delay(1);
1250
1251	/*
1252	 * Failure to switch is indicated by the card holding
1253	 * dat[0:3] low
1254	 */
1255	if (host->ops->card_busy && host->ops->card_busy(host))
1256		err = -EAGAIN;
1257
1258power_cycle:
1259	if (err) {
1260		pr_debug("%s: Signal voltage switch failed, "
1261			"power cycling card\n", mmc_hostname(host));
1262		mmc_power_cycle(host, ocr);
1263	}
1264
1265	return err;
1266}
1267
1268/*
1269 * Select timing parameters for host.
1270 */
1271void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1272{
1273	host->ios.timing = timing;
1274	mmc_set_ios(host);
1275}
1276
1277/*
1278 * Select appropriate driver type for host.
1279 */
1280void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1281{
1282	host->ios.drv_type = drv_type;
1283	mmc_set_ios(host);
1284}
1285
1286int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1287			      int card_drv_type, int *drv_type)
1288{
1289	struct mmc_host *host = card->host;
1290	int host_drv_type = SD_DRIVER_TYPE_B;
1291
1292	*drv_type = 0;
1293
1294	if (!host->ops->select_drive_strength)
1295		return 0;
1296
1297	/* Use SD definition of driver strength for hosts */
1298	if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1299		host_drv_type |= SD_DRIVER_TYPE_A;
1300
1301	if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1302		host_drv_type |= SD_DRIVER_TYPE_C;
1303
1304	if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1305		host_drv_type |= SD_DRIVER_TYPE_D;
1306
1307	/*
1308	 * The drive strength that the hardware can support
1309	 * depends on the board design.  Pass the appropriate
1310	 * information and let the hardware specific code
1311	 * return what is possible given the options
1312	 */
1313	return host->ops->select_drive_strength(card, max_dtr,
1314						host_drv_type,
1315						card_drv_type,
1316						drv_type);
1317}
1318
1319/*
1320 * Apply power to the MMC stack.  This is a two-stage process.
1321 * First, we enable power to the card without the clock running.
1322 * We then wait a bit for the power to stabilise.  Finally,
1323 * enable the bus drivers and clock to the card.
1324 *
1325 * We must _NOT_ enable the clock prior to power stablising.
1326 *
1327 * If a host does all the power sequencing itself, ignore the
1328 * initial MMC_POWER_UP stage.
1329 */
1330void mmc_power_up(struct mmc_host *host, u32 ocr)
1331{
1332	if (host->ios.power_mode == MMC_POWER_ON)
1333		return;
1334
1335	mmc_pwrseq_pre_power_on(host);
1336
1337	host->ios.vdd = fls(ocr) - 1;
1338	host->ios.power_mode = MMC_POWER_UP;
1339	/* Set initial state and call mmc_set_ios */
1340	mmc_set_initial_state(host);
1341
1342	mmc_set_initial_signal_voltage(host);
 
 
 
 
 
 
1343
1344	/*
1345	 * This delay should be sufficient to allow the power supply
1346	 * to reach the minimum voltage.
1347	 */
1348	mmc_delay(host->ios.power_delay_ms);
1349
1350	mmc_pwrseq_post_power_on(host);
1351
1352	host->ios.clock = host->f_init;
1353
1354	host->ios.power_mode = MMC_POWER_ON;
1355	mmc_set_ios(host);
1356
1357	/*
1358	 * This delay must be at least 74 clock sizes, or 1 ms, or the
1359	 * time required to reach a stable voltage.
1360	 */
1361	mmc_delay(host->ios.power_delay_ms);
1362}
1363
1364void mmc_power_off(struct mmc_host *host)
1365{
1366	if (host->ios.power_mode == MMC_POWER_OFF)
1367		return;
1368
1369	mmc_pwrseq_power_off(host);
1370
1371	host->ios.clock = 0;
1372	host->ios.vdd = 0;
1373
1374	host->ios.power_mode = MMC_POWER_OFF;
1375	/* Set initial state and call mmc_set_ios */
1376	mmc_set_initial_state(host);
1377
1378	/*
1379	 * Some configurations, such as the 802.11 SDIO card in the OLPC
1380	 * XO-1.5, require a short delay after poweroff before the card
1381	 * can be successfully turned on again.
1382	 */
1383	mmc_delay(1);
1384}
1385
1386void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1387{
1388	mmc_power_off(host);
1389	/* Wait at least 1 ms according to SD spec */
1390	mmc_delay(1);
1391	mmc_power_up(host, ocr);
1392}
1393
1394/*
1395 * Cleanup when the last reference to the bus operator is dropped.
1396 */
1397static void __mmc_release_bus(struct mmc_host *host)
1398{
1399	WARN_ON(!host->bus_dead);
 
 
1400
1401	host->bus_ops = NULL;
1402}
1403
1404/*
1405 * Increase reference count of bus operator
1406 */
1407static inline void mmc_bus_get(struct mmc_host *host)
1408{
1409	unsigned long flags;
1410
1411	spin_lock_irqsave(&host->lock, flags);
1412	host->bus_refs++;
1413	spin_unlock_irqrestore(&host->lock, flags);
1414}
1415
1416/*
1417 * Decrease reference count of bus operator and free it if
1418 * it is the last reference.
1419 */
1420static inline void mmc_bus_put(struct mmc_host *host)
1421{
1422	unsigned long flags;
1423
1424	spin_lock_irqsave(&host->lock, flags);
1425	host->bus_refs--;
1426	if ((host->bus_refs == 0) && host->bus_ops)
1427		__mmc_release_bus(host);
1428	spin_unlock_irqrestore(&host->lock, flags);
1429}
1430
1431/*
1432 * Assign a mmc bus handler to a host. Only one bus handler may control a
1433 * host at any given time.
1434 */
1435void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1436{
1437	unsigned long flags;
1438
 
 
 
1439	WARN_ON(!host->claimed);
1440
1441	spin_lock_irqsave(&host->lock, flags);
1442
1443	WARN_ON(host->bus_ops);
1444	WARN_ON(host->bus_refs);
1445
1446	host->bus_ops = ops;
1447	host->bus_refs = 1;
1448	host->bus_dead = 0;
1449
1450	spin_unlock_irqrestore(&host->lock, flags);
1451}
1452
1453/*
1454 * Remove the current bus handler from a host.
1455 */
1456void mmc_detach_bus(struct mmc_host *host)
1457{
1458	unsigned long flags;
1459
 
 
1460	WARN_ON(!host->claimed);
1461	WARN_ON(!host->bus_ops);
1462
1463	spin_lock_irqsave(&host->lock, flags);
1464
1465	host->bus_dead = 1;
1466
1467	spin_unlock_irqrestore(&host->lock, flags);
1468
1469	mmc_bus_put(host);
1470}
1471
1472static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1473				bool cd_irq)
1474{
 
 
 
 
 
 
 
1475	/*
1476	 * If the device is configured as wakeup, we prevent a new sleep for
1477	 * 5 s to give provision for user space to consume the event.
1478	 */
1479	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1480		device_can_wakeup(mmc_dev(host)))
1481		pm_wakeup_event(mmc_dev(host), 5000);
1482
1483	host->detect_change = 1;
1484	mmc_schedule_delayed_work(&host->detect, delay);
1485}
1486
1487/**
1488 *	mmc_detect_change - process change of state on a MMC socket
1489 *	@host: host which changed state.
1490 *	@delay: optional delay to wait before detection (jiffies)
1491 *
1492 *	MMC drivers should call this when they detect a card has been
1493 *	inserted or removed. The MMC layer will confirm that any
1494 *	present card is still functional, and initialize any newly
1495 *	inserted.
1496 */
1497void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1498{
1499	_mmc_detect_change(host, delay, true);
1500}
1501EXPORT_SYMBOL(mmc_detect_change);
1502
1503void mmc_init_erase(struct mmc_card *card)
1504{
1505	unsigned int sz;
1506
1507	if (is_power_of_2(card->erase_size))
1508		card->erase_shift = ffs(card->erase_size) - 1;
1509	else
1510		card->erase_shift = 0;
1511
1512	/*
1513	 * It is possible to erase an arbitrarily large area of an SD or MMC
1514	 * card.  That is not desirable because it can take a long time
1515	 * (minutes) potentially delaying more important I/O, and also the
1516	 * timeout calculations become increasingly hugely over-estimated.
1517	 * Consequently, 'pref_erase' is defined as a guide to limit erases
1518	 * to that size and alignment.
1519	 *
1520	 * For SD cards that define Allocation Unit size, limit erases to one
1521	 * Allocation Unit at a time.
1522	 * For MMC, have a stab at ai good value and for modern cards it will
1523	 * end up being 4MiB. Note that if the value is too small, it can end
1524	 * up taking longer to erase. Also note, erase_size is already set to
1525	 * High Capacity Erase Size if available when this function is called.
1526	 */
1527	if (mmc_card_sd(card) && card->ssr.au) {
1528		card->pref_erase = card->ssr.au;
1529		card->erase_shift = ffs(card->ssr.au) - 1;
 
 
1530	} else if (card->erase_size) {
1531		sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1532		if (sz < 128)
1533			card->pref_erase = 512 * 1024 / 512;
1534		else if (sz < 512)
1535			card->pref_erase = 1024 * 1024 / 512;
1536		else if (sz < 1024)
1537			card->pref_erase = 2 * 1024 * 1024 / 512;
1538		else
1539			card->pref_erase = 4 * 1024 * 1024 / 512;
1540		if (card->pref_erase < card->erase_size)
1541			card->pref_erase = card->erase_size;
1542		else {
1543			sz = card->pref_erase % card->erase_size;
1544			if (sz)
1545				card->pref_erase += card->erase_size - sz;
1546		}
1547	} else
1548		card->pref_erase = 0;
1549}
1550
1551static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1552				          unsigned int arg, unsigned int qty)
1553{
1554	unsigned int erase_timeout;
1555
1556	if (arg == MMC_DISCARD_ARG ||
1557	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1558		erase_timeout = card->ext_csd.trim_timeout;
1559	} else if (card->ext_csd.erase_group_def & 1) {
1560		/* High Capacity Erase Group Size uses HC timeouts */
1561		if (arg == MMC_TRIM_ARG)
1562			erase_timeout = card->ext_csd.trim_timeout;
1563		else
1564			erase_timeout = card->ext_csd.hc_erase_timeout;
1565	} else {
1566		/* CSD Erase Group Size uses write timeout */
1567		unsigned int mult = (10 << card->csd.r2w_factor);
1568		unsigned int timeout_clks = card->csd.taac_clks * mult;
1569		unsigned int timeout_us;
1570
1571		/* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
1572		if (card->csd.taac_ns < 1000000)
1573			timeout_us = (card->csd.taac_ns * mult) / 1000;
1574		else
1575			timeout_us = (card->csd.taac_ns / 1000) * mult;
1576
1577		/*
1578		 * ios.clock is only a target.  The real clock rate might be
1579		 * less but not that much less, so fudge it by multiplying by 2.
1580		 */
1581		timeout_clks <<= 1;
1582		timeout_us += (timeout_clks * 1000) /
1583			      (card->host->ios.clock / 1000);
1584
1585		erase_timeout = timeout_us / 1000;
1586
1587		/*
1588		 * Theoretically, the calculation could underflow so round up
1589		 * to 1ms in that case.
1590		 */
1591		if (!erase_timeout)
1592			erase_timeout = 1;
1593	}
1594
1595	/* Multiplier for secure operations */
1596	if (arg & MMC_SECURE_ARGS) {
1597		if (arg == MMC_SECURE_ERASE_ARG)
1598			erase_timeout *= card->ext_csd.sec_erase_mult;
1599		else
1600			erase_timeout *= card->ext_csd.sec_trim_mult;
1601	}
1602
1603	erase_timeout *= qty;
1604
1605	/*
1606	 * Ensure at least a 1 second timeout for SPI as per
1607	 * 'mmc_set_data_timeout()'
1608	 */
1609	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1610		erase_timeout = 1000;
1611
1612	return erase_timeout;
1613}
1614
1615static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1616					 unsigned int arg,
1617					 unsigned int qty)
1618{
1619	unsigned int erase_timeout;
1620
1621	/* for DISCARD none of the below calculation applies.
1622	 * the busy timeout is 250msec per discard command.
1623	 */
1624	if (arg == SD_DISCARD_ARG)
1625		return SD_DISCARD_TIMEOUT_MS;
1626
1627	if (card->ssr.erase_timeout) {
1628		/* Erase timeout specified in SD Status Register (SSR) */
1629		erase_timeout = card->ssr.erase_timeout * qty +
1630				card->ssr.erase_offset;
1631	} else {
1632		/*
1633		 * Erase timeout not specified in SD Status Register (SSR) so
1634		 * use 250ms per write block.
1635		 */
1636		erase_timeout = 250 * qty;
1637	}
1638
1639	/* Must not be less than 1 second */
1640	if (erase_timeout < 1000)
1641		erase_timeout = 1000;
1642
1643	return erase_timeout;
1644}
1645
1646static unsigned int mmc_erase_timeout(struct mmc_card *card,
1647				      unsigned int arg,
1648				      unsigned int qty)
1649{
1650	if (mmc_card_sd(card))
1651		return mmc_sd_erase_timeout(card, arg, qty);
1652	else
1653		return mmc_mmc_erase_timeout(card, arg, qty);
1654}
1655
1656static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1657			unsigned int to, unsigned int arg)
1658{
1659	struct mmc_command cmd = {};
1660	unsigned int qty = 0, busy_timeout = 0;
1661	bool use_r1b_resp = false;
1662	unsigned long timeout;
1663	int loop_udelay=64, udelay_max=32768;
1664	int err;
1665
1666	mmc_retune_hold(card->host);
1667
1668	/*
1669	 * qty is used to calculate the erase timeout which depends on how many
1670	 * erase groups (or allocation units in SD terminology) are affected.
1671	 * We count erasing part of an erase group as one erase group.
1672	 * For SD, the allocation units are always a power of 2.  For MMC, the
1673	 * erase group size is almost certainly also power of 2, but it does not
1674	 * seem to insist on that in the JEDEC standard, so we fall back to
1675	 * division in that case.  SD may not specify an allocation unit size,
1676	 * in which case the timeout is based on the number of write blocks.
1677	 *
1678	 * Note that the timeout for secure trim 2 will only be correct if the
1679	 * number of erase groups specified is the same as the total of all
1680	 * preceding secure trim 1 commands.  Since the power may have been
1681	 * lost since the secure trim 1 commands occurred, it is generally
1682	 * impossible to calculate the secure trim 2 timeout correctly.
1683	 */
1684	if (card->erase_shift)
1685		qty += ((to >> card->erase_shift) -
1686			(from >> card->erase_shift)) + 1;
1687	else if (mmc_card_sd(card))
1688		qty += to - from + 1;
1689	else
1690		qty += ((to / card->erase_size) -
1691			(from / card->erase_size)) + 1;
1692
1693	if (!mmc_card_blockaddr(card)) {
1694		from <<= 9;
1695		to <<= 9;
1696	}
1697
1698	if (mmc_card_sd(card))
1699		cmd.opcode = SD_ERASE_WR_BLK_START;
1700	else
1701		cmd.opcode = MMC_ERASE_GROUP_START;
1702	cmd.arg = from;
1703	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1704	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1705	if (err) {
1706		pr_err("mmc_erase: group start error %d, "
1707		       "status %#x\n", err, cmd.resp[0]);
1708		err = -EIO;
1709		goto out;
1710	}
1711
1712	memset(&cmd, 0, sizeof(struct mmc_command));
1713	if (mmc_card_sd(card))
1714		cmd.opcode = SD_ERASE_WR_BLK_END;
1715	else
1716		cmd.opcode = MMC_ERASE_GROUP_END;
1717	cmd.arg = to;
1718	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1719	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1720	if (err) {
1721		pr_err("mmc_erase: group end error %d, status %#x\n",
1722		       err, cmd.resp[0]);
1723		err = -EIO;
1724		goto out;
1725	}
1726
1727	memset(&cmd, 0, sizeof(struct mmc_command));
1728	cmd.opcode = MMC_ERASE;
1729	cmd.arg = arg;
1730	busy_timeout = mmc_erase_timeout(card, arg, qty);
1731	/*
1732	 * If the host controller supports busy signalling and the timeout for
1733	 * the erase operation does not exceed the max_busy_timeout, we should
1734	 * use R1B response. Or we need to prevent the host from doing hw busy
1735	 * detection, which is done by converting to a R1 response instead.
1736	 */
1737	if (card->host->max_busy_timeout &&
1738	    busy_timeout > card->host->max_busy_timeout) {
1739		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1740	} else {
1741		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1742		cmd.busy_timeout = busy_timeout;
1743		use_r1b_resp = true;
1744	}
1745
1746	err = mmc_wait_for_cmd(card->host, &cmd, 0);
1747	if (err) {
1748		pr_err("mmc_erase: erase error %d, status %#x\n",
1749		       err, cmd.resp[0]);
1750		err = -EIO;
1751		goto out;
1752	}
1753
1754	if (mmc_host_is_spi(card->host))
1755		goto out;
1756
1757	/*
1758	 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
1759	 * shall be avoided.
1760	 */
1761	if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
1762		goto out;
1763
1764	timeout = jiffies + msecs_to_jiffies(busy_timeout);
1765	do {
1766		memset(&cmd, 0, sizeof(struct mmc_command));
1767		cmd.opcode = MMC_SEND_STATUS;
1768		cmd.arg = card->rca << 16;
1769		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1770		/* Do not retry else we can't see errors */
1771		err = mmc_wait_for_cmd(card->host, &cmd, 0);
1772		if (err || R1_STATUS(cmd.resp[0])) {
1773			pr_err("error %d requesting status %#x\n",
1774				err, cmd.resp[0]);
1775			err = -EIO;
1776			goto out;
1777		}
1778
1779		/* Timeout if the device never becomes ready for data and
1780		 * never leaves the program state.
1781		 */
1782		if (time_after(jiffies, timeout)) {
1783			pr_err("%s: Card stuck in programming state! %s\n",
1784				mmc_hostname(card->host), __func__);
1785			err =  -EIO;
1786			goto out;
1787		}
1788		if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
1789		    R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
1790			break;
1791
1792		usleep_range(loop_udelay, loop_udelay*2);
1793		if (loop_udelay < udelay_max)
1794			loop_udelay *= 2;
1795	} while (1);
1796
 
 
1797out:
1798	mmc_retune_release(card->host);
1799	return err;
1800}
1801
1802static unsigned int mmc_align_erase_size(struct mmc_card *card,
1803					 unsigned int *from,
1804					 unsigned int *to,
1805					 unsigned int nr)
1806{
1807	unsigned int from_new = *from, nr_new = nr, rem;
1808
1809	/*
1810	 * When the 'card->erase_size' is power of 2, we can use round_up/down()
1811	 * to align the erase size efficiently.
1812	 */
1813	if (is_power_of_2(card->erase_size)) {
1814		unsigned int temp = from_new;
1815
1816		from_new = round_up(temp, card->erase_size);
1817		rem = from_new - temp;
1818
1819		if (nr_new > rem)
1820			nr_new -= rem;
1821		else
1822			return 0;
1823
1824		nr_new = round_down(nr_new, card->erase_size);
1825	} else {
1826		rem = from_new % card->erase_size;
1827		if (rem) {
1828			rem = card->erase_size - rem;
1829			from_new += rem;
1830			if (nr_new > rem)
1831				nr_new -= rem;
1832			else
1833				return 0;
1834		}
1835
1836		rem = nr_new % card->erase_size;
1837		if (rem)
1838			nr_new -= rem;
1839	}
1840
1841	if (nr_new == 0)
1842		return 0;
1843
1844	*to = from_new + nr_new;
1845	*from = from_new;
1846
1847	return nr_new;
1848}
1849
1850/**
1851 * mmc_erase - erase sectors.
1852 * @card: card to erase
1853 * @from: first sector to erase
1854 * @nr: number of sectors to erase
1855 * @arg: erase command argument
1856 *
1857 * Caller must claim host before calling this function.
1858 */
1859int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1860	      unsigned int arg)
1861{
1862	unsigned int rem, to = from + nr;
1863	int err;
1864
1865	if (!(card->host->caps & MMC_CAP_ERASE) ||
1866	    !(card->csd.cmdclass & CCC_ERASE))
1867		return -EOPNOTSUPP;
1868
1869	if (!card->erase_size)
1870		return -EOPNOTSUPP;
1871
1872	if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
1873		return -EOPNOTSUPP;
1874
1875	if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
1876	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1877		return -EOPNOTSUPP;
1878
1879	if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
1880	    !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1881		return -EOPNOTSUPP;
1882
1883	if (arg == MMC_SECURE_ERASE_ARG) {
1884		if (from % card->erase_size || nr % card->erase_size)
1885			return -EINVAL;
1886	}
1887
1888	if (arg == MMC_ERASE_ARG)
1889		nr = mmc_align_erase_size(card, &from, &to, nr);
 
 
 
 
 
 
 
 
 
 
 
 
1890
1891	if (nr == 0)
1892		return 0;
1893
 
 
1894	if (to <= from)
1895		return -EINVAL;
1896
1897	/* 'from' and 'to' are inclusive */
1898	to -= 1;
1899
1900	/*
1901	 * Special case where only one erase-group fits in the timeout budget:
1902	 * If the region crosses an erase-group boundary on this particular
1903	 * case, we will be trimming more than one erase-group which, does not
1904	 * fit in the timeout budget of the controller, so we need to split it
1905	 * and call mmc_do_erase() twice if necessary. This special case is
1906	 * identified by the card->eg_boundary flag.
1907	 */
1908	rem = card->erase_size - (from % card->erase_size);
1909	if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
1910		err = mmc_do_erase(card, from, from + rem - 1, arg);
1911		from += rem;
1912		if ((err) || (to <= from))
1913			return err;
1914	}
1915
1916	return mmc_do_erase(card, from, to, arg);
1917}
1918EXPORT_SYMBOL(mmc_erase);
1919
1920int mmc_can_erase(struct mmc_card *card)
1921{
1922	if ((card->host->caps & MMC_CAP_ERASE) &&
1923	    (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1924		return 1;
1925	return 0;
1926}
1927EXPORT_SYMBOL(mmc_can_erase);
1928
1929int mmc_can_trim(struct mmc_card *card)
1930{
1931	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
1932	    (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
1933		return 1;
1934	return 0;
1935}
1936EXPORT_SYMBOL(mmc_can_trim);
1937
1938int mmc_can_discard(struct mmc_card *card)
1939{
1940	/*
1941	 * As there's no way to detect the discard support bit at v4.5
1942	 * use the s/w feature support filed.
1943	 */
1944	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1945		return 1;
1946	return 0;
1947}
1948EXPORT_SYMBOL(mmc_can_discard);
1949
1950int mmc_can_sanitize(struct mmc_card *card)
1951{
1952	if (!mmc_can_trim(card) && !mmc_can_erase(card))
1953		return 0;
1954	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1955		return 1;
1956	return 0;
1957}
1958EXPORT_SYMBOL(mmc_can_sanitize);
1959
1960int mmc_can_secure_erase_trim(struct mmc_card *card)
1961{
1962	if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
1963	    !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
1964		return 1;
1965	return 0;
1966}
1967EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1968
1969int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1970			    unsigned int nr)
1971{
1972	if (!card->erase_size)
1973		return 0;
1974	if (from % card->erase_size || nr % card->erase_size)
1975		return 0;
1976	return 1;
1977}
1978EXPORT_SYMBOL(mmc_erase_group_aligned);
1979
1980static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1981					    unsigned int arg)
1982{
1983	struct mmc_host *host = card->host;
1984	unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
1985	unsigned int last_timeout = 0;
1986	unsigned int max_busy_timeout = host->max_busy_timeout ?
1987			host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
1988
1989	if (card->erase_shift) {
1990		max_qty = UINT_MAX >> card->erase_shift;
1991		min_qty = card->pref_erase >> card->erase_shift;
1992	} else if (mmc_card_sd(card)) {
1993		max_qty = UINT_MAX;
1994		min_qty = card->pref_erase;
1995	} else {
1996		max_qty = UINT_MAX / card->erase_size;
1997		min_qty = card->pref_erase / card->erase_size;
1998	}
1999
2000	/*
2001	 * We should not only use 'host->max_busy_timeout' as the limitation
2002	 * when deciding the max discard sectors. We should set a balance value
2003	 * to improve the erase speed, and it can not get too long timeout at
2004	 * the same time.
2005	 *
2006	 * Here we set 'card->pref_erase' as the minimal discard sectors no
2007	 * matter what size of 'host->max_busy_timeout', but if the
2008	 * 'host->max_busy_timeout' is large enough for more discard sectors,
2009	 * then we can continue to increase the max discard sectors until we
2010	 * get a balance value. In cases when the 'host->max_busy_timeout'
2011	 * isn't specified, use the default max erase timeout.
2012	 */
2013	do {
2014		y = 0;
2015		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2016			timeout = mmc_erase_timeout(card, arg, qty + x);
2017
2018			if (qty + x > min_qty && timeout > max_busy_timeout)
2019				break;
2020
2021			if (timeout < last_timeout)
2022				break;
2023			last_timeout = timeout;
2024			y = x;
2025		}
2026		qty += y;
2027	} while (y);
2028
2029	if (!qty)
2030		return 0;
2031
2032	/*
2033	 * When specifying a sector range to trim, chances are we might cross
2034	 * an erase-group boundary even if the amount of sectors is less than
2035	 * one erase-group.
2036	 * If we can only fit one erase-group in the controller timeout budget,
2037	 * we have to care that erase-group boundaries are not crossed by a
2038	 * single trim operation. We flag that special case with "eg_boundary".
2039	 * In all other cases we can just decrement qty and pretend that we
2040	 * always touch (qty + 1) erase-groups as a simple optimization.
2041	 */
2042	if (qty == 1)
2043		card->eg_boundary = 1;
2044	else
2045		qty--;
2046
2047	/* Convert qty to sectors */
2048	if (card->erase_shift)
2049		max_discard = qty << card->erase_shift;
2050	else if (mmc_card_sd(card))
2051		max_discard = qty + 1;
2052	else
2053		max_discard = qty * card->erase_size;
2054
2055	return max_discard;
2056}
2057
2058unsigned int mmc_calc_max_discard(struct mmc_card *card)
2059{
2060	struct mmc_host *host = card->host;
2061	unsigned int max_discard, max_trim;
2062
 
 
 
2063	/*
2064	 * Without erase_group_def set, MMC erase timeout depends on clock
2065	 * frequence which can change.  In that case, the best choice is
2066	 * just the preferred erase size.
2067	 */
2068	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2069		return card->pref_erase;
2070
2071	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2072	if (mmc_can_trim(card)) {
2073		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2074		if (max_trim < max_discard || max_discard == 0)
2075			max_discard = max_trim;
2076	} else if (max_discard < card->erase_size) {
2077		max_discard = 0;
2078	}
2079	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2080		mmc_hostname(host), max_discard, host->max_busy_timeout ?
2081		host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2082	return max_discard;
2083}
2084EXPORT_SYMBOL(mmc_calc_max_discard);
2085
2086bool mmc_card_is_blockaddr(struct mmc_card *card)
2087{
2088	return card ? mmc_card_blockaddr(card) : false;
2089}
2090EXPORT_SYMBOL(mmc_card_is_blockaddr);
2091
2092int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2093{
2094	struct mmc_command cmd = {};
2095
2096	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2097	    mmc_card_hs400(card) || mmc_card_hs400es(card))
2098		return 0;
2099
2100	cmd.opcode = MMC_SET_BLOCKLEN;
2101	cmd.arg = blocklen;
2102	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2103	return mmc_wait_for_cmd(card->host, &cmd, 5);
2104}
2105EXPORT_SYMBOL(mmc_set_blocklen);
2106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107static void mmc_hw_reset_for_init(struct mmc_host *host)
2108{
2109	mmc_pwrseq_reset(host);
2110
2111	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2112		return;
2113	host->ops->hw_reset(host);
2114}
2115
2116int mmc_hw_reset(struct mmc_host *host)
2117{
2118	int ret;
2119
2120	if (!host->card)
2121		return -EINVAL;
2122
2123	mmc_bus_get(host);
2124	if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
2125		mmc_bus_put(host);
2126		return -EOPNOTSUPP;
2127	}
2128
2129	ret = host->bus_ops->hw_reset(host);
2130	mmc_bus_put(host);
2131
2132	if (ret)
2133		pr_warn("%s: tried to HW reset card, got error %d\n",
2134			mmc_hostname(host), ret);
2135
2136	return ret;
2137}
2138EXPORT_SYMBOL(mmc_hw_reset);
2139
2140int mmc_sw_reset(struct mmc_host *host)
2141{
2142	int ret;
2143
2144	if (!host->card)
2145		return -EINVAL;
2146
2147	mmc_bus_get(host);
2148	if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
2149		mmc_bus_put(host);
2150		return -EOPNOTSUPP;
2151	}
2152
2153	ret = host->bus_ops->sw_reset(host);
2154	mmc_bus_put(host);
2155
2156	if (ret)
2157		pr_warn("%s: tried to SW reset card, got error %d\n",
2158			mmc_hostname(host), ret);
2159
2160	return ret;
2161}
2162EXPORT_SYMBOL(mmc_sw_reset);
2163
2164static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2165{
2166	host->f_init = freq;
2167
2168	pr_debug("%s: %s: trying to init card at %u Hz\n",
 
2169		mmc_hostname(host), __func__, host->f_init);
2170
2171	mmc_power_up(host, host->ocr_avail);
2172
2173	/*
2174	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2175	 * do a hardware reset if possible.
2176	 */
2177	mmc_hw_reset_for_init(host);
2178
2179	/*
2180	 * sdio_reset sends CMD52 to reset card.  Since we do not know
2181	 * if the card is being re-initialized, just send it.  CMD52
2182	 * should be ignored by SD/eMMC cards.
2183	 * Skip it if we already know that we do not support SDIO commands
2184	 */
2185	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2186		sdio_reset(host);
2187
2188	mmc_go_idle(host);
2189
2190	if (!(host->caps2 & MMC_CAP2_NO_SD))
2191		mmc_send_if_cond(host, host->ocr_avail);
2192
2193	/* Order's important: probe SDIO, then SD, then MMC */
2194	if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2195		if (!mmc_attach_sdio(host))
2196			return 0;
2197
2198	if (!(host->caps2 & MMC_CAP2_NO_SD))
2199		if (!mmc_attach_sd(host))
2200			return 0;
2201
2202	if (!(host->caps2 & MMC_CAP2_NO_MMC))
2203		if (!mmc_attach_mmc(host))
2204			return 0;
2205
2206	mmc_power_off(host);
2207	return -EIO;
2208}
2209
2210int _mmc_detect_card_removed(struct mmc_host *host)
2211{
2212	int ret;
2213
2214	if (!host->card || mmc_card_removed(host->card))
2215		return 1;
2216
2217	ret = host->bus_ops->alive(host);
2218
2219	/*
2220	 * Card detect status and alive check may be out of sync if card is
2221	 * removed slowly, when card detect switch changes while card/slot
2222	 * pads are still contacted in hardware (refer to "SD Card Mechanical
2223	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2224	 * detect work 200ms later for this case.
2225	 */
2226	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2227		mmc_detect_change(host, msecs_to_jiffies(200));
2228		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2229	}
2230
2231	if (ret) {
2232		mmc_card_set_removed(host->card);
2233		pr_debug("%s: card remove detected\n", mmc_hostname(host));
2234	}
2235
2236	return ret;
2237}
2238
2239int mmc_detect_card_removed(struct mmc_host *host)
2240{
2241	struct mmc_card *card = host->card;
2242	int ret;
2243
2244	WARN_ON(!host->claimed);
2245
2246	if (!card)
2247		return 1;
2248
2249	if (!mmc_card_is_removable(host))
2250		return 0;
2251
2252	ret = mmc_card_removed(card);
2253	/*
2254	 * The card will be considered unchanged unless we have been asked to
2255	 * detect a change or host requires polling to provide card detection.
2256	 */
2257	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2258		return ret;
2259
2260	host->detect_change = 0;
2261	if (!ret) {
2262		ret = _mmc_detect_card_removed(host);
2263		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2264			/*
2265			 * Schedule a detect work as soon as possible to let a
2266			 * rescan handle the card removal.
2267			 */
2268			cancel_delayed_work(&host->detect);
2269			_mmc_detect_change(host, 0, false);
2270		}
2271	}
2272
2273	return ret;
2274}
2275EXPORT_SYMBOL(mmc_detect_card_removed);
2276
2277void mmc_rescan(struct work_struct *work)
2278{
2279	struct mmc_host *host =
2280		container_of(work, struct mmc_host, detect.work);
2281	int i;
2282
2283	if (host->rescan_disable)
2284		return;
2285
2286	/* If there is a non-removable card registered, only scan once */
2287	if (!mmc_card_is_removable(host) && host->rescan_entered)
2288		return;
2289	host->rescan_entered = 1;
2290
2291	if (host->trigger_card_event && host->ops->card_event) {
2292		mmc_claim_host(host);
2293		host->ops->card_event(host);
2294		mmc_release_host(host);
2295		host->trigger_card_event = false;
2296	}
2297
2298	mmc_bus_get(host);
2299
2300	/*
2301	 * if there is a _removable_ card registered, check whether it is
2302	 * still present
2303	 */
2304	if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2305		host->bus_ops->detect(host);
2306
2307	host->detect_change = 0;
2308
2309	/*
2310	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2311	 * the card is no longer present.
2312	 */
2313	mmc_bus_put(host);
2314	mmc_bus_get(host);
2315
2316	/* if there still is a card present, stop here */
2317	if (host->bus_ops != NULL) {
2318		mmc_bus_put(host);
2319		goto out;
2320	}
2321
2322	/*
2323	 * Only we can add a new handler, so it's safe to
2324	 * release the lock here.
2325	 */
2326	mmc_bus_put(host);
2327
2328	mmc_claim_host(host);
2329	if (mmc_card_is_removable(host) && host->ops->get_cd &&
2330			host->ops->get_cd(host) == 0) {
2331		mmc_power_off(host);
2332		mmc_release_host(host);
2333		goto out;
2334	}
2335
2336	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2337		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2338			break;
2339		if (freqs[i] <= host->f_min)
2340			break;
2341	}
2342	mmc_release_host(host);
2343
2344 out:
2345	if (host->caps & MMC_CAP_NEEDS_POLL)
2346		mmc_schedule_delayed_work(&host->detect, HZ);
2347}
2348
2349void mmc_start_host(struct mmc_host *host)
2350{
2351	host->f_init = max(freqs[0], host->f_min);
2352	host->rescan_disable = 0;
2353	host->ios.power_mode = MMC_POWER_UNDEFINED;
2354
2355	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2356		mmc_claim_host(host);
 
 
2357		mmc_power_up(host, host->ocr_avail);
2358		mmc_release_host(host);
2359	}
2360
2361	mmc_gpiod_request_cd_irq(host);
2362	_mmc_detect_change(host, 0, false);
2363}
2364
2365void mmc_stop_host(struct mmc_host *host)
2366{
2367	if (host->slot.cd_irq >= 0) {
2368		mmc_gpio_set_cd_wake(host, false);
 
 
 
 
 
2369		disable_irq(host->slot.cd_irq);
2370	}
2371
2372	host->rescan_disable = 1;
2373	cancel_delayed_work_sync(&host->detect);
2374
2375	/* clear pm flags now and let card drivers set them as needed */
2376	host->pm_flags = 0;
2377
2378	mmc_bus_get(host);
2379	if (host->bus_ops && !host->bus_dead) {
2380		/* Calling bus_ops->remove() with a claimed host can deadlock */
2381		host->bus_ops->remove(host);
2382		mmc_claim_host(host);
2383		mmc_detach_bus(host);
2384		mmc_power_off(host);
2385		mmc_release_host(host);
2386		mmc_bus_put(host);
2387		return;
2388	}
2389	mmc_bus_put(host);
2390
 
 
2391	mmc_claim_host(host);
2392	mmc_power_off(host);
2393	mmc_release_host(host);
2394}
2395
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2396#ifdef CONFIG_PM_SLEEP
2397/* Do the card removal on suspend if card is assumed removeable
2398 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2399   to sync the card.
2400*/
2401static int mmc_pm_notify(struct notifier_block *notify_block,
2402			unsigned long mode, void *unused)
2403{
2404	struct mmc_host *host = container_of(
2405		notify_block, struct mmc_host, pm_notify);
2406	unsigned long flags;
2407	int err = 0;
2408
2409	switch (mode) {
2410	case PM_HIBERNATION_PREPARE:
2411	case PM_SUSPEND_PREPARE:
2412	case PM_RESTORE_PREPARE:
2413		spin_lock_irqsave(&host->lock, flags);
2414		host->rescan_disable = 1;
2415		spin_unlock_irqrestore(&host->lock, flags);
2416		cancel_delayed_work_sync(&host->detect);
2417
2418		if (!host->bus_ops)
2419			break;
2420
2421		/* Validate prerequisites for suspend */
2422		if (host->bus_ops->pre_suspend)
2423			err = host->bus_ops->pre_suspend(host);
2424		if (!err)
2425			break;
2426
2427		if (!mmc_card_is_removable(host)) {
2428			dev_warn(mmc_dev(host),
2429				 "pre_suspend failed for non-removable host: "
2430				 "%d\n", err);
2431			/* Avoid removing non-removable hosts */
2432			break;
2433		}
2434
2435		/* Calling bus_ops->remove() with a claimed host can deadlock */
2436		host->bus_ops->remove(host);
2437		mmc_claim_host(host);
2438		mmc_detach_bus(host);
2439		mmc_power_off(host);
2440		mmc_release_host(host);
2441		host->pm_flags = 0;
2442		break;
2443
2444	case PM_POST_SUSPEND:
2445	case PM_POST_HIBERNATION:
2446	case PM_POST_RESTORE:
2447
2448		spin_lock_irqsave(&host->lock, flags);
2449		host->rescan_disable = 0;
2450		spin_unlock_irqrestore(&host->lock, flags);
2451		_mmc_detect_change(host, 0, false);
2452
2453	}
2454
2455	return 0;
2456}
2457
2458void mmc_register_pm_notifier(struct mmc_host *host)
2459{
2460	host->pm_notify.notifier_call = mmc_pm_notify;
2461	register_pm_notifier(&host->pm_notify);
2462}
2463
2464void mmc_unregister_pm_notifier(struct mmc_host *host)
2465{
2466	unregister_pm_notifier(&host->pm_notify);
2467}
2468#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2469
2470static int __init mmc_init(void)
2471{
2472	int ret;
2473
2474	ret = mmc_register_bus();
2475	if (ret)
2476		return ret;
2477
2478	ret = mmc_register_host_class();
2479	if (ret)
2480		goto unregister_bus;
2481
2482	ret = sdio_register_bus();
2483	if (ret)
2484		goto unregister_host_class;
2485
2486	return 0;
2487
2488unregister_host_class:
2489	mmc_unregister_host_class();
2490unregister_bus:
2491	mmc_unregister_bus();
2492	return ret;
2493}
2494
2495static void __exit mmc_exit(void)
2496{
2497	sdio_unregister_bus();
2498	mmc_unregister_host_class();
2499	mmc_unregister_bus();
2500}
2501
2502subsys_initcall(mmc_init);
2503module_exit(mmc_exit);
2504
2505MODULE_LICENSE("GPL");