Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
 
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
  23#define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
  24#define MMC_OP_COND_PERIOD_US		(4 * 1000) /* 4ms */
  25#define MMC_OP_COND_TIMEOUT_MS		1000 /* 1s */
  26
  27static const u8 tuning_blk_pattern_4bit[] = {
  28	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  29	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  30	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  31	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  32	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  33	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  34	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  35	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  36};
  37
  38static const u8 tuning_blk_pattern_8bit[] = {
  39	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  40	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  41	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  42	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  43	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  44	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  45	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  46	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  47	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  48	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  49	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  50	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  51	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  52	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  53	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  54	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  55};
  56
  57struct mmc_busy_data {
  58	struct mmc_card *card;
  59	bool retry_crc_err;
  60	enum mmc_busy_cmd busy_cmd;
  61};
  62
  63struct mmc_op_cond_busy_data {
  64	struct mmc_host *host;
  65	u32 ocr;
  66	struct mmc_command *cmd;
  67};
  68
  69int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  70{
  71	int err;
  72	struct mmc_command cmd = {};
  73
  74	cmd.opcode = MMC_SEND_STATUS;
  75	if (!mmc_host_is_spi(card->host))
  76		cmd.arg = card->rca << 16;
  77	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  78
  79	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  80	if (err)
  81		return err;
  82
  83	/* NOTE: callers are required to understand the difference
  84	 * between "native" and SPI format status words!
  85	 */
  86	if (status)
  87		*status = cmd.resp[0];
  88
  89	return 0;
  90}
  91EXPORT_SYMBOL_GPL(__mmc_send_status);
  92
  93int mmc_send_status(struct mmc_card *card, u32 *status)
  94{
  95	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  96}
  97EXPORT_SYMBOL_GPL(mmc_send_status);
  98
  99static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 100{
 101	struct mmc_command cmd = {};
 102
 103	cmd.opcode = MMC_SELECT_CARD;
 104
 105	if (card) {
 106		cmd.arg = card->rca << 16;
 107		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 108	} else {
 109		cmd.arg = 0;
 110		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 111	}
 112
 113	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 114}
 115
 116int mmc_select_card(struct mmc_card *card)
 117{
 118
 119	return _mmc_select_card(card->host, card);
 120}
 121
 122int mmc_deselect_cards(struct mmc_host *host)
 123{
 124	return _mmc_select_card(host, NULL);
 125}
 126
 127/*
 128 * Write the value specified in the device tree or board code into the optional
 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 131 * value is hardware dependant.
 132 * The presence of the DSR register can be determined from the CSD register,
 133 * bit 76.
 134 */
 135int mmc_set_dsr(struct mmc_host *host)
 136{
 137	struct mmc_command cmd = {};
 138
 139	cmd.opcode = MMC_SET_DSR;
 140
 141	cmd.arg = (host->dsr << 16) | 0xffff;
 142	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 143
 144	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 145}
 146
 147int mmc_go_idle(struct mmc_host *host)
 148{
 149	int err;
 150	struct mmc_command cmd = {};
 151
 152	/*
 153	 * Non-SPI hosts need to prevent chipselect going active during
 154	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 155	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 156	 *
 157	 * SPI hosts ignore ios.chip_select; it's managed according to
 158	 * rules that must accommodate non-MMC slaves which this layer
 159	 * won't even know about.
 160	 */
 161	if (!mmc_host_is_spi(host)) {
 162		mmc_set_chip_select(host, MMC_CS_HIGH);
 163		mmc_delay(1);
 164	}
 165
 166	cmd.opcode = MMC_GO_IDLE_STATE;
 167	cmd.arg = 0;
 168	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 169
 170	err = mmc_wait_for_cmd(host, &cmd, 0);
 171
 172	mmc_delay(1);
 173
 174	if (!mmc_host_is_spi(host)) {
 175		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 176		mmc_delay(1);
 177	}
 178
 179	host->use_spi_crc = 0;
 180
 181	return err;
 182}
 183
 184static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
 185{
 186	struct mmc_op_cond_busy_data *data = cb_data;
 187	struct mmc_host *host = data->host;
 188	struct mmc_command *cmd = data->cmd;
 189	u32 ocr = data->ocr;
 190	int err = 0;
 191
 192	err = mmc_wait_for_cmd(host, cmd, 0);
 193	if (err)
 194		return err;
 195
 196	if (mmc_host_is_spi(host)) {
 197		if (!(cmd->resp[0] & R1_SPI_IDLE)) {
 198			*busy = false;
 199			return 0;
 200		}
 201	} else {
 202		if (cmd->resp[0] & MMC_CARD_BUSY) {
 203			*busy = false;
 204			return 0;
 205		}
 206	}
 207
 208	*busy = true;
 209
 210	/*
 211	 * According to eMMC specification v5.1 section 6.4.3, we
 212	 * should issue CMD1 repeatedly in the idle state until
 213	 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 214	 * the inactive mode after mmc_init_card() issued CMD0 when
 215	 * the eMMC device is busy.
 216	 */
 217	if (!ocr && !mmc_host_is_spi(host))
 218		cmd->arg = cmd->resp[0] | BIT(30);
 219
 220	return 0;
 221}
 222
 223int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 224{
 225	struct mmc_command cmd = {};
 226	int err = 0;
 227	struct mmc_op_cond_busy_data cb_data = {
 228		.host = host,
 229		.ocr = ocr,
 230		.cmd = &cmd
 231	};
 232
 233	cmd.opcode = MMC_SEND_OP_COND;
 234	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 235	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 236
 237	err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
 238				  MMC_OP_COND_TIMEOUT_MS,
 239				  &__mmc_send_op_cond_cb, &cb_data);
 240	if (err)
 241		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242
 243	if (rocr && !mmc_host_is_spi(host))
 244		*rocr = cmd.resp[0];
 245
 246	return err;
 247}
 248
 249int mmc_set_relative_addr(struct mmc_card *card)
 250{
 251	struct mmc_command cmd = {};
 252
 253	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 254	cmd.arg = card->rca << 16;
 255	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 256
 257	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 258}
 259
 260static int
 261mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 262{
 263	int err;
 264	struct mmc_command cmd = {};
 265
 266	cmd.opcode = opcode;
 267	cmd.arg = arg;
 268	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 269
 270	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 271	if (err)
 272		return err;
 273
 274	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 275
 276	return 0;
 277}
 278
 279/*
 280 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 281 * buffer or on-stack buffer (with some overhead in callee).
 282 */
 283int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
 284		       u32 args, void *buf, unsigned len)
 
 285{
 286	struct mmc_request mrq = {};
 287	struct mmc_command cmd = {};
 288	struct mmc_data data = {};
 289	struct scatterlist sg;
 290
 291	mrq.cmd = &cmd;
 292	mrq.data = &data;
 293
 294	cmd.opcode = opcode;
 295	cmd.arg = args;
 296
 297	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 298	 * rely on callers to never use this with "native" calls for reading
 299	 * CSD or CID.  Native versions of those commands use the R2 type,
 300	 * not R1 plus a data block.
 301	 */
 302	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 303
 304	data.blksz = len;
 305	data.blocks = 1;
 306	data.flags = MMC_DATA_READ;
 307	data.sg = &sg;
 308	data.sg_len = 1;
 309
 310	sg_init_one(&sg, buf, len);
 311
 312	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 313		/*
 314		 * The spec states that CSR and CID accesses have a timeout
 315		 * of 64 clock cycles.
 316		 */
 317		data.timeout_ns = 0;
 318		data.timeout_clks = 64;
 319	} else
 320		mmc_set_data_timeout(&data, card);
 321
 322	mmc_wait_for_req(host, &mrq);
 323
 324	if (cmd.error)
 325		return cmd.error;
 326	if (data.error)
 327		return data.error;
 328
 329	return 0;
 330}
 331
 332static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
 333{
 334	int ret, i;
 335	__be32 *cxd_tmp;
 336
 337	cxd_tmp = kzalloc(16, GFP_KERNEL);
 338	if (!cxd_tmp)
 339		return -ENOMEM;
 340
 341	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
 342	if (ret)
 343		goto err;
 344
 345	for (i = 0; i < 4; i++)
 346		cxd[i] = be32_to_cpu(cxd_tmp[i]);
 347
 348err:
 349	kfree(cxd_tmp);
 350	return ret;
 351}
 352
 353int mmc_send_csd(struct mmc_card *card, u32 *csd)
 354{
 355	if (mmc_host_is_spi(card->host))
 356		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
 357
 358	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 359				MMC_SEND_CSD);
 360}
 361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362int mmc_send_cid(struct mmc_host *host, u32 *cid)
 363{
 364	if (mmc_host_is_spi(host))
 365		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
 366
 367	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 368}
 369
 370int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 371{
 372	int err;
 373	u8 *ext_csd;
 374
 375	if (!card || !new_ext_csd)
 376		return -EINVAL;
 377
 378	if (!mmc_can_ext_csd(card))
 379		return -EOPNOTSUPP;
 380
 381	/*
 382	 * As the ext_csd is so large and mostly unused, we don't store the
 383	 * raw block in mmc_card.
 384	 */
 385	ext_csd = kzalloc(512, GFP_KERNEL);
 386	if (!ext_csd)
 387		return -ENOMEM;
 388
 389	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
 390				512);
 391	if (err)
 392		kfree(ext_csd);
 393	else
 394		*new_ext_csd = ext_csd;
 395
 396	return err;
 397}
 398EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 399
 400int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 401{
 402	struct mmc_command cmd = {};
 403	int err;
 404
 405	cmd.opcode = MMC_SPI_READ_OCR;
 406	cmd.arg = highcap ? (1 << 30) : 0;
 407	cmd.flags = MMC_RSP_SPI_R3;
 408
 409	err = mmc_wait_for_cmd(host, &cmd, 0);
 410
 411	*ocrp = cmd.resp[1];
 412	return err;
 413}
 414
 415int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 416{
 417	struct mmc_command cmd = {};
 418	int err;
 419
 420	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 421	cmd.flags = MMC_RSP_SPI_R1;
 422	cmd.arg = use_crc;
 423
 424	err = mmc_wait_for_cmd(host, &cmd, 0);
 425	if (!err)
 426		host->use_spi_crc = use_crc;
 427	return err;
 428}
 429
 430static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 431{
 432	if (mmc_host_is_spi(host)) {
 433		if (status & R1_SPI_ILLEGAL_COMMAND)
 434			return -EBADMSG;
 435	} else {
 436		if (R1_STATUS(status))
 437			pr_warn("%s: unexpected status %#x after switch\n",
 438				mmc_hostname(host), status);
 439		if (status & R1_SWITCH_ERROR)
 440			return -EBADMSG;
 441	}
 442	return 0;
 443}
 444
 445/* Caller must hold re-tuning */
 446int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 447{
 448	u32 status;
 449	int err;
 450
 451	err = mmc_send_status(card, &status);
 452	if (!crc_err_fatal && err == -EILSEQ)
 453		return 0;
 454	if (err)
 455		return err;
 456
 457	return mmc_switch_status_error(card->host, status);
 458}
 459
 460static int mmc_busy_cb(void *cb_data, bool *busy)
 461{
 462	struct mmc_busy_data *data = cb_data;
 463	struct mmc_host *host = data->card->host;
 464	u32 status = 0;
 465	int err;
 466
 467	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
 468		*busy = host->ops->card_busy(host);
 469		return 0;
 470	}
 471
 472	err = mmc_send_status(data->card, &status);
 473	if (data->retry_crc_err && err == -EILSEQ) {
 474		*busy = true;
 475		return 0;
 476	}
 477	if (err)
 478		return err;
 479
 480	switch (data->busy_cmd) {
 481	case MMC_BUSY_CMD6:
 482		err = mmc_switch_status_error(host, status);
 483		break;
 484	case MMC_BUSY_ERASE:
 485		err = R1_STATUS(status) ? -EIO : 0;
 486		break;
 487	case MMC_BUSY_HPI:
 488	case MMC_BUSY_EXTR_SINGLE:
 489	case MMC_BUSY_IO:
 490		break;
 491	default:
 492		err = -EINVAL;
 493	}
 494
 495	if (err)
 496		return err;
 497
 498	*busy = !mmc_ready_for_data(status);
 499	return 0;
 500}
 501
 502int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
 503			unsigned int timeout_ms,
 504			int (*busy_cb)(void *cb_data, bool *busy),
 505			void *cb_data)
 506{
 
 507	int err;
 508	unsigned long timeout;
 509	unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
 510	bool expired = false;
 511	bool busy = false;
 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 514	do {
 515		/*
 516		 * Due to the possibility of being preempted while polling,
 517		 * check the expiration time first.
 518		 */
 519		expired = time_after(jiffies, timeout);
 520
 521		err = (*busy_cb)(cb_data, &busy);
 522		if (err)
 523			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 524
 525		/* Timeout if the device still remains busy. */
 526		if (expired && busy) {
 527			pr_err("%s: Card stuck being busy! %s\n",
 528				mmc_hostname(host), __func__);
 529			return -ETIMEDOUT;
 530		}
 531
 532		/* Throttle the polling rate to avoid hogging the CPU. */
 533		if (busy) {
 534			usleep_range(udelay, udelay * 2);
 535			if (udelay < udelay_max)
 536				udelay *= 2;
 537		}
 538	} while (busy);
 539
 540	return 0;
 541}
 542EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 543
 544int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 545		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
 546{
 547	struct mmc_host *host = card->host;
 548	struct mmc_busy_data cb_data;
 549
 550	cb_data.card = card;
 551	cb_data.retry_crc_err = retry_crc_err;
 552	cb_data.busy_cmd = busy_cmd;
 553
 554	return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
 555}
 556EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 557
 558bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
 559			  unsigned int timeout_ms)
 560{
 561	/*
 562	 * If the max_busy_timeout of the host is specified, make sure it's
 563	 * enough to fit the used timeout_ms. In case it's not, let's instruct
 564	 * the host to avoid HW busy detection, by converting to a R1 response
 565	 * instead of a R1B. Note, some hosts requires R1B, which also means
 566	 * they are on their own when it comes to deal with the busy timeout.
 567	 */
 568	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
 569	    (timeout_ms > host->max_busy_timeout)) {
 570		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
 571		return false;
 572	}
 573
 574	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 575	cmd->busy_timeout = timeout_ms;
 576	return true;
 577}
 578EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
 579
 580/**
 581 *	__mmc_switch - modify EXT_CSD register
 582 *	@card: the MMC card associated with the data transfer
 583 *	@set: cmd set values
 584 *	@index: EXT_CSD register index
 585 *	@value: value to program into EXT_CSD register
 586 *	@timeout_ms: timeout (ms) for operation performed by register write,
 587 *                   timeout of zero implies maximum possible timeout
 588 *	@timing: new timing to change to
 
 589 *	@send_status: send status cmd to poll for busy
 590 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 591 *	@retries: number of retries
 592 *
 593 *	Modifies the EXT_CSD register for selected card.
 594 */
 595int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 596		unsigned int timeout_ms, unsigned char timing,
 597		bool send_status, bool retry_crc_err, unsigned int retries)
 598{
 599	struct mmc_host *host = card->host;
 600	int err;
 601	struct mmc_command cmd = {};
 602	bool use_r1b_resp;
 603	unsigned char old_timing = host->ios.timing;
 604
 605	mmc_retune_hold(host);
 606
 607	if (!timeout_ms) {
 608		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
 609			mmc_hostname(host));
 610		timeout_ms = card->ext_csd.generic_cmd6_time;
 611	}
 
 
 
 
 612
 613	cmd.opcode = MMC_SWITCH;
 614	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 615		  (index << 16) |
 616		  (value << 8) |
 617		  set;
 618	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
 
 
 
 
 
 
 
 
 
 
 
 
 
 619
 620	err = mmc_wait_for_cmd(host, &cmd, retries);
 621	if (err)
 622		goto out;
 623
 
 
 
 
 624	/*If SPI or used HW busy detection above, then we don't need to poll. */
 625	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 626		mmc_host_is_spi(host))
 627		goto out_tim;
 628
 629	/*
 630	 * If the host doesn't support HW polling via the ->card_busy() ops and
 631	 * when it's not allowed to poll by using CMD13, then we need to rely on
 632	 * waiting the stated timeout to be sufficient.
 633	 */
 634	if (!send_status && !host->ops->card_busy) {
 635		mmc_delay(timeout_ms);
 636		goto out_tim;
 637	}
 638
 639	/* Let's try to poll to find out when the command is completed. */
 640	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
 641	if (err)
 642		goto out;
 643
 644out_tim:
 645	/* Switch to new timing before check switch status. */
 646	if (timing)
 647		mmc_set_timing(host, timing);
 648
 649	if (send_status) {
 650		err = mmc_switch_status(card, true);
 651		if (err && timing)
 652			mmc_set_timing(host, old_timing);
 653	}
 654out:
 655	mmc_retune_release(host);
 656
 657	return err;
 658}
 659
 660int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 661		unsigned int timeout_ms)
 662{
 663	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 664			    true, false, MMC_CMD_RETRIES);
 665}
 666EXPORT_SYMBOL_GPL(mmc_switch);
 667
 668int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 669{
 670	struct mmc_request mrq = {};
 671	struct mmc_command cmd = {};
 672	struct mmc_data data = {};
 673	struct scatterlist sg;
 674	struct mmc_ios *ios = &host->ios;
 675	const u8 *tuning_block_pattern;
 676	int size, err = 0;
 677	u8 *data_buf;
 678
 679	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 680		tuning_block_pattern = tuning_blk_pattern_8bit;
 681		size = sizeof(tuning_blk_pattern_8bit);
 682	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 683		tuning_block_pattern = tuning_blk_pattern_4bit;
 684		size = sizeof(tuning_blk_pattern_4bit);
 685	} else
 686		return -EINVAL;
 687
 688	data_buf = kzalloc(size, GFP_KERNEL);
 689	if (!data_buf)
 690		return -ENOMEM;
 691
 692	mrq.cmd = &cmd;
 693	mrq.data = &data;
 694
 695	cmd.opcode = opcode;
 696	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 697
 698	data.blksz = size;
 699	data.blocks = 1;
 700	data.flags = MMC_DATA_READ;
 701
 702	/*
 703	 * According to the tuning specs, Tuning process
 704	 * is normally shorter 40 executions of CMD19,
 705	 * and timeout value should be shorter than 150 ms
 706	 */
 707	data.timeout_ns = 150 * NSEC_PER_MSEC;
 708
 709	data.sg = &sg;
 710	data.sg_len = 1;
 711	sg_init_one(&sg, data_buf, size);
 712
 713	mmc_wait_for_req(host, &mrq);
 714
 715	if (cmd_error)
 716		*cmd_error = cmd.error;
 717
 718	if (cmd.error) {
 719		err = cmd.error;
 720		goto out;
 721	}
 722
 723	if (data.error) {
 724		err = data.error;
 725		goto out;
 726	}
 727
 728	if (memcmp(data_buf, tuning_block_pattern, size))
 729		err = -EIO;
 730
 731out:
 732	kfree(data_buf);
 733	return err;
 734}
 735EXPORT_SYMBOL_GPL(mmc_send_tuning);
 736
 737int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
 738{
 739	struct mmc_command cmd = {};
 740
 741	/*
 742	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 743	 * command, but SD specification does not, so do nothing unless it is
 744	 * eMMC.
 745	 */
 746	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 747		return 0;
 748
 749	cmd.opcode = MMC_STOP_TRANSMISSION;
 750	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 751
 752	/*
 753	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 754	 * on the tuning timeout i.e. 150ms.
 755	 */
 756	cmd.busy_timeout = 150;
 757
 758	return mmc_wait_for_cmd(host, &cmd, 0);
 759}
 760EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
 761
 762static int
 763mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 764		  u8 len)
 765{
 766	struct mmc_request mrq = {};
 767	struct mmc_command cmd = {};
 768	struct mmc_data data = {};
 769	struct scatterlist sg;
 770	u8 *data_buf;
 771	u8 *test_buf;
 772	int i, err;
 773	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 774	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 775
 776	/* dma onto stack is unsafe/nonportable, but callers to this
 777	 * routine normally provide temporary on-stack buffers ...
 778	 */
 779	data_buf = kmalloc(len, GFP_KERNEL);
 780	if (!data_buf)
 781		return -ENOMEM;
 782
 783	if (len == 8)
 784		test_buf = testdata_8bit;
 785	else if (len == 4)
 786		test_buf = testdata_4bit;
 787	else {
 788		pr_err("%s: Invalid bus_width %d\n",
 789		       mmc_hostname(host), len);
 790		kfree(data_buf);
 791		return -EINVAL;
 792	}
 793
 794	if (opcode == MMC_BUS_TEST_W)
 795		memcpy(data_buf, test_buf, len);
 796
 797	mrq.cmd = &cmd;
 798	mrq.data = &data;
 799	cmd.opcode = opcode;
 800	cmd.arg = 0;
 801
 802	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 803	 * rely on callers to never use this with "native" calls for reading
 804	 * CSD or CID.  Native versions of those commands use the R2 type,
 805	 * not R1 plus a data block.
 806	 */
 807	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 808
 809	data.blksz = len;
 810	data.blocks = 1;
 811	if (opcode == MMC_BUS_TEST_R)
 812		data.flags = MMC_DATA_READ;
 813	else
 814		data.flags = MMC_DATA_WRITE;
 815
 816	data.sg = &sg;
 817	data.sg_len = 1;
 818	mmc_set_data_timeout(&data, card);
 819	sg_init_one(&sg, data_buf, len);
 820	mmc_wait_for_req(host, &mrq);
 821	err = 0;
 822	if (opcode == MMC_BUS_TEST_R) {
 823		for (i = 0; i < len / 4; i++)
 824			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 825				err = -EIO;
 826				break;
 827			}
 828	}
 829	kfree(data_buf);
 830
 831	if (cmd.error)
 832		return cmd.error;
 833	if (data.error)
 834		return data.error;
 835
 836	return err;
 837}
 838
 839int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 840{
 841	int width;
 842
 843	if (bus_width == MMC_BUS_WIDTH_8)
 844		width = 8;
 845	else if (bus_width == MMC_BUS_WIDTH_4)
 846		width = 4;
 847	else if (bus_width == MMC_BUS_WIDTH_1)
 848		return 0; /* no need for test */
 849	else
 850		return -EINVAL;
 851
 852	/*
 853	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 854	 * is a problem.  This improves chances that the test will work.
 855	 */
 856	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 857	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 858}
 859
 860static int mmc_send_hpi_cmd(struct mmc_card *card)
 861{
 862	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
 863	struct mmc_host *host = card->host;
 864	bool use_r1b_resp = false;
 865	struct mmc_command cmd = {};
 
 866	int err;
 867
 868	cmd.opcode = card->ext_csd.hpi_cmd;
 869	cmd.arg = card->rca << 16 | 1;
 870	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
 
 
 
 
 
 
 
 871
 872	if (cmd.opcode == MMC_STOP_TRANSMISSION)
 873		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
 874						    busy_timeout_ms);
 875
 876	err = mmc_wait_for_cmd(host, &cmd, 0);
 877	if (err) {
 878		pr_warn("%s: HPI error %d. Command response %#x\n",
 879			mmc_hostname(host), err, cmd.resp[0]);
 
 880		return err;
 881	}
 
 
 882
 883	/* No need to poll when using HW busy detection. */
 884	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
 885		return 0;
 886
 887	/* Let's poll to find out when the HPI request completes. */
 888	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
 889}
 890
 891/**
 892 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 893 *	@card: the MMC card associated with the HPI transfer
 894 *
 895 *	Issued High Priority Interrupt, and check for card status
 896 *	until out-of prg-state.
 897 */
 898static int mmc_interrupt_hpi(struct mmc_card *card)
 899{
 900	int err;
 901	u32 status;
 
 902
 903	if (!card->ext_csd.hpi_en) {
 904		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 905		return 1;
 906	}
 907
 908	err = mmc_send_status(card, &status);
 909	if (err) {
 910		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 911		goto out;
 912	}
 913
 914	switch (R1_CURRENT_STATE(status)) {
 915	case R1_STATE_IDLE:
 916	case R1_STATE_READY:
 917	case R1_STATE_STBY:
 918	case R1_STATE_TRAN:
 919		/*
 920		 * In idle and transfer states, HPI is not needed and the caller
 921		 * can issue the next intended command immediately
 922		 */
 923		goto out;
 924	case R1_STATE_PRG:
 925		break;
 926	default:
 927		/* In all other states, it's illegal to issue HPI */
 928		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 929			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 930		err = -EINVAL;
 931		goto out;
 932	}
 933
 934	err = mmc_send_hpi_cmd(card);
 
 
 
 
 
 
 
 
 
 
 
 
 
 935out:
 936	return err;
 937}
 938
 939int mmc_can_ext_csd(struct mmc_card *card)
 940{
 941	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 942}
 943
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944static int mmc_read_bkops_status(struct mmc_card *card)
 945{
 946	int err;
 947	u8 *ext_csd;
 948
 949	err = mmc_get_ext_csd(card, &ext_csd);
 950	if (err)
 951		return err;
 952
 953	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 954	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 955	kfree(ext_csd);
 956	return 0;
 957}
 958
 959/**
 960 *	mmc_run_bkops - Run BKOPS for supported cards
 961 *	@card: MMC card to run BKOPS for
 
 
 962 *
 963 *	Run background operations synchronously for cards having manual BKOPS
 964 *	enabled and in case it reports urgent BKOPS level.
 
 965*/
 966void mmc_run_bkops(struct mmc_card *card)
 967{
 968	int err;
 
 
 969
 970	if (!card->ext_csd.man_bkops_en)
 971		return;
 972
 973	err = mmc_read_bkops_status(card);
 974	if (err) {
 975		pr_err("%s: Failed to read bkops status: %d\n",
 976		       mmc_hostname(card->host), err);
 977		return;
 978	}
 979
 980	if (!card->ext_csd.raw_bkops_status ||
 981	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 982		return;
 983
 
 
 
 
 
 
 
 
 
 
 
 
 984	mmc_retune_hold(card->host);
 985
 
 
 
 
 
 
 
 
 
 
 986	/*
 987	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 988	 * synchronously. Future wise, we may consider to start BKOPS, for less
 989	 * urgent levels by using an asynchronous background task, when idle.
 990	 */
 991	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 992			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
 993	/*
 994	 * If the BKOPS timed out, the card is probably still busy in the
 995	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
 996	 * it with a HPI command to get back into R1_STATE_TRAN.
 997	 */
 998	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
 999		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
1000	else if (err)
1001		pr_warn("%s: Error %d running bkops\n",
1002			mmc_hostname(card->host), err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1003
1004	mmc_retune_release(card->host);
1005}
1006EXPORT_SYMBOL(mmc_run_bkops);
1007
1008static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1009{
1010	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1011	int err;
1012
1013	if (!card->ext_csd.cmdq_support)
1014		return -EOPNOTSUPP;
1015
1016	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1017			 val, card->ext_csd.generic_cmd6_time);
1018	if (!err)
1019		card->ext_csd.cmdq_en = enable;
1020
1021	return err;
1022}
1023
1024int mmc_cmdq_enable(struct mmc_card *card)
1025{
1026	return mmc_cmdq_switch(card, true);
1027}
1028EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1029
1030int mmc_cmdq_disable(struct mmc_card *card)
1031{
1032	return mmc_cmdq_switch(card, false);
1033}
1034EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1035
1036int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1037{
1038	struct mmc_host *host = card->host;
1039	int err;
1040
1041	if (!mmc_can_sanitize(card)) {
1042		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1043		return -EOPNOTSUPP;
1044	}
1045
1046	if (!timeout_ms)
1047		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1048
1049	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1050
1051	mmc_retune_hold(host);
1052
1053	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1054			   1, timeout_ms, 0, true, false, 0);
1055	if (err)
1056		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1057
1058	/*
1059	 * If the sanitize operation timed out, the card is probably still busy
1060	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1061	 * it with a HPI command to get back into R1_STATE_TRAN.
1062	 */
1063	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1064		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1065
1066	mmc_retune_release(host);
1067
1068	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1069	return err;
1070}
1071EXPORT_SYMBOL_GPL(mmc_sanitize);
v4.17
 
   1/*
   2 *  linux/drivers/mmc/core/mmc_ops.h
   3 *
   4 *  Copyright 2006-2007 Pierre Ossman
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or (at
   9 * your option) any later version.
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/export.h>
  14#include <linux/types.h>
  15#include <linux/scatterlist.h>
  16
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19#include <linux/mmc/mmc.h>
  20
  21#include "core.h"
  22#include "card.h"
  23#include "host.h"
  24#include "mmc_ops.h"
  25
  26#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 
 
 
  27
  28static const u8 tuning_blk_pattern_4bit[] = {
  29	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  30	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  31	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  32	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  33	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  34	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  35	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  36	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  37};
  38
  39static const u8 tuning_blk_pattern_8bit[] = {
  40	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  41	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  42	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  43	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  44	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  45	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  46	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  47	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  48	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  49	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  50	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  51	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  52	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  53	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  54	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  55	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  56};
  57
 
 
 
 
 
 
 
 
 
 
 
 
  58int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  59{
  60	int err;
  61	struct mmc_command cmd = {};
  62
  63	cmd.opcode = MMC_SEND_STATUS;
  64	if (!mmc_host_is_spi(card->host))
  65		cmd.arg = card->rca << 16;
  66	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  67
  68	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  69	if (err)
  70		return err;
  71
  72	/* NOTE: callers are required to understand the difference
  73	 * between "native" and SPI format status words!
  74	 */
  75	if (status)
  76		*status = cmd.resp[0];
  77
  78	return 0;
  79}
  80EXPORT_SYMBOL_GPL(__mmc_send_status);
  81
  82int mmc_send_status(struct mmc_card *card, u32 *status)
  83{
  84	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  85}
  86EXPORT_SYMBOL_GPL(mmc_send_status);
  87
  88static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  89{
  90	struct mmc_command cmd = {};
  91
  92	cmd.opcode = MMC_SELECT_CARD;
  93
  94	if (card) {
  95		cmd.arg = card->rca << 16;
  96		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  97	} else {
  98		cmd.arg = 0;
  99		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 100	}
 101
 102	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 103}
 104
 105int mmc_select_card(struct mmc_card *card)
 106{
 107
 108	return _mmc_select_card(card->host, card);
 109}
 110
 111int mmc_deselect_cards(struct mmc_host *host)
 112{
 113	return _mmc_select_card(host, NULL);
 114}
 115
 116/*
 117 * Write the value specified in the device tree or board code into the optional
 118 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 119 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 120 * value is hardware dependant.
 121 * The presence of the DSR register can be determined from the CSD register,
 122 * bit 76.
 123 */
 124int mmc_set_dsr(struct mmc_host *host)
 125{
 126	struct mmc_command cmd = {};
 127
 128	cmd.opcode = MMC_SET_DSR;
 129
 130	cmd.arg = (host->dsr << 16) | 0xffff;
 131	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 132
 133	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 134}
 135
 136int mmc_go_idle(struct mmc_host *host)
 137{
 138	int err;
 139	struct mmc_command cmd = {};
 140
 141	/*
 142	 * Non-SPI hosts need to prevent chipselect going active during
 143	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 144	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 145	 *
 146	 * SPI hosts ignore ios.chip_select; it's managed according to
 147	 * rules that must accommodate non-MMC slaves which this layer
 148	 * won't even know about.
 149	 */
 150	if (!mmc_host_is_spi(host)) {
 151		mmc_set_chip_select(host, MMC_CS_HIGH);
 152		mmc_delay(1);
 153	}
 154
 155	cmd.opcode = MMC_GO_IDLE_STATE;
 156	cmd.arg = 0;
 157	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 158
 159	err = mmc_wait_for_cmd(host, &cmd, 0);
 160
 161	mmc_delay(1);
 162
 163	if (!mmc_host_is_spi(host)) {
 164		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 165		mmc_delay(1);
 166	}
 167
 168	host->use_spi_crc = 0;
 169
 170	return err;
 171}
 172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 174{
 175	struct mmc_command cmd = {};
 176	int i, err = 0;
 
 
 
 
 
 177
 178	cmd.opcode = MMC_SEND_OP_COND;
 179	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 180	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 181
 182	for (i = 100; i; i--) {
 183		err = mmc_wait_for_cmd(host, &cmd, 0);
 184		if (err)
 185			break;
 186
 187		/* if we're just probing, do a single pass */
 188		if (ocr == 0)
 189			break;
 190
 191		/* otherwise wait until reset completes */
 192		if (mmc_host_is_spi(host)) {
 193			if (!(cmd.resp[0] & R1_SPI_IDLE))
 194				break;
 195		} else {
 196			if (cmd.resp[0] & MMC_CARD_BUSY)
 197				break;
 198		}
 199
 200		err = -ETIMEDOUT;
 201
 202		mmc_delay(10);
 203	}
 204
 205	if (rocr && !mmc_host_is_spi(host))
 206		*rocr = cmd.resp[0];
 207
 208	return err;
 209}
 210
 211int mmc_set_relative_addr(struct mmc_card *card)
 212{
 213	struct mmc_command cmd = {};
 214
 215	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 216	cmd.arg = card->rca << 16;
 217	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 218
 219	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 220}
 221
 222static int
 223mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 224{
 225	int err;
 226	struct mmc_command cmd = {};
 227
 228	cmd.opcode = opcode;
 229	cmd.arg = arg;
 230	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 231
 232	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 233	if (err)
 234		return err;
 235
 236	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 237
 238	return 0;
 239}
 240
 241/*
 242 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 243 * buffer or on-stack buffer (with some overhead in callee).
 244 */
 245static int
 246mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 247		u32 opcode, void *buf, unsigned len)
 248{
 249	struct mmc_request mrq = {};
 250	struct mmc_command cmd = {};
 251	struct mmc_data data = {};
 252	struct scatterlist sg;
 253
 254	mrq.cmd = &cmd;
 255	mrq.data = &data;
 256
 257	cmd.opcode = opcode;
 258	cmd.arg = 0;
 259
 260	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 261	 * rely on callers to never use this with "native" calls for reading
 262	 * CSD or CID.  Native versions of those commands use the R2 type,
 263	 * not R1 plus a data block.
 264	 */
 265	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 266
 267	data.blksz = len;
 268	data.blocks = 1;
 269	data.flags = MMC_DATA_READ;
 270	data.sg = &sg;
 271	data.sg_len = 1;
 272
 273	sg_init_one(&sg, buf, len);
 274
 275	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 276		/*
 277		 * The spec states that CSR and CID accesses have a timeout
 278		 * of 64 clock cycles.
 279		 */
 280		data.timeout_ns = 0;
 281		data.timeout_clks = 64;
 282	} else
 283		mmc_set_data_timeout(&data, card);
 284
 285	mmc_wait_for_req(host, &mrq);
 286
 287	if (cmd.error)
 288		return cmd.error;
 289	if (data.error)
 290		return data.error;
 291
 292	return 0;
 293}
 294
 295static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
 296{
 297	int ret, i;
 298	__be32 *csd_tmp;
 299
 300	csd_tmp = kzalloc(16, GFP_KERNEL);
 301	if (!csd_tmp)
 302		return -ENOMEM;
 303
 304	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
 305	if (ret)
 306		goto err;
 307
 308	for (i = 0; i < 4; i++)
 309		csd[i] = be32_to_cpu(csd_tmp[i]);
 310
 311err:
 312	kfree(csd_tmp);
 313	return ret;
 314}
 315
 316int mmc_send_csd(struct mmc_card *card, u32 *csd)
 317{
 318	if (mmc_host_is_spi(card->host))
 319		return mmc_spi_send_csd(card, csd);
 320
 321	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 322				MMC_SEND_CSD);
 323}
 324
 325static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
 326{
 327	int ret, i;
 328	__be32 *cid_tmp;
 329
 330	cid_tmp = kzalloc(16, GFP_KERNEL);
 331	if (!cid_tmp)
 332		return -ENOMEM;
 333
 334	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
 335	if (ret)
 336		goto err;
 337
 338	for (i = 0; i < 4; i++)
 339		cid[i] = be32_to_cpu(cid_tmp[i]);
 340
 341err:
 342	kfree(cid_tmp);
 343	return ret;
 344}
 345
 346int mmc_send_cid(struct mmc_host *host, u32 *cid)
 347{
 348	if (mmc_host_is_spi(host))
 349		return mmc_spi_send_cid(host, cid);
 350
 351	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 352}
 353
 354int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 355{
 356	int err;
 357	u8 *ext_csd;
 358
 359	if (!card || !new_ext_csd)
 360		return -EINVAL;
 361
 362	if (!mmc_can_ext_csd(card))
 363		return -EOPNOTSUPP;
 364
 365	/*
 366	 * As the ext_csd is so large and mostly unused, we don't store the
 367	 * raw block in mmc_card.
 368	 */
 369	ext_csd = kzalloc(512, GFP_KERNEL);
 370	if (!ext_csd)
 371		return -ENOMEM;
 372
 373	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
 374				512);
 375	if (err)
 376		kfree(ext_csd);
 377	else
 378		*new_ext_csd = ext_csd;
 379
 380	return err;
 381}
 382EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 383
 384int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 385{
 386	struct mmc_command cmd = {};
 387	int err;
 388
 389	cmd.opcode = MMC_SPI_READ_OCR;
 390	cmd.arg = highcap ? (1 << 30) : 0;
 391	cmd.flags = MMC_RSP_SPI_R3;
 392
 393	err = mmc_wait_for_cmd(host, &cmd, 0);
 394
 395	*ocrp = cmd.resp[1];
 396	return err;
 397}
 398
 399int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 400{
 401	struct mmc_command cmd = {};
 402	int err;
 403
 404	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 405	cmd.flags = MMC_RSP_SPI_R1;
 406	cmd.arg = use_crc;
 407
 408	err = mmc_wait_for_cmd(host, &cmd, 0);
 409	if (!err)
 410		host->use_spi_crc = use_crc;
 411	return err;
 412}
 413
 414static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 415{
 416	if (mmc_host_is_spi(host)) {
 417		if (status & R1_SPI_ILLEGAL_COMMAND)
 418			return -EBADMSG;
 419	} else {
 420		if (status & 0xFDFFA000)
 421			pr_warn("%s: unexpected status %#x after switch\n",
 422				mmc_hostname(host), status);
 423		if (status & R1_SWITCH_ERROR)
 424			return -EBADMSG;
 425	}
 426	return 0;
 427}
 428
 429/* Caller must hold re-tuning */
 430int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 431{
 432	u32 status;
 433	int err;
 434
 435	err = mmc_send_status(card, &status);
 436	if (!crc_err_fatal && err == -EILSEQ)
 437		return 0;
 438	if (err)
 439		return err;
 440
 441	return mmc_switch_status_error(card->host, status);
 442}
 443
 444int mmc_switch_status(struct mmc_card *card)
 445{
 446	return __mmc_switch_status(card, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447}
 448
 449static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 450			bool send_status, bool retry_crc_err)
 
 
 451{
 452	struct mmc_host *host = card->host;
 453	int err;
 454	unsigned long timeout;
 455	u32 status = 0;
 456	bool expired = false;
 457	bool busy = false;
 458
 459	/* We have an unspecified cmd timeout, use the fallback value. */
 460	if (!timeout_ms)
 461		timeout_ms = MMC_OPS_TIMEOUT_MS;
 462
 463	/*
 464	 * In cases when not allowed to poll by using CMD13 or because we aren't
 465	 * capable of polling by using ->card_busy(), then rely on waiting the
 466	 * stated timeout to be sufficient.
 467	 */
 468	if (!send_status && !host->ops->card_busy) {
 469		mmc_delay(timeout_ms);
 470		return 0;
 471	}
 472
 473	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 474	do {
 475		/*
 476		 * Due to the possibility of being preempted while polling,
 477		 * check the expiration time first.
 478		 */
 479		expired = time_after(jiffies, timeout);
 480
 481		if (host->ops->card_busy) {
 482			busy = host->ops->card_busy(host);
 483		} else {
 484			err = mmc_send_status(card, &status);
 485			if (retry_crc_err && err == -EILSEQ) {
 486				busy = true;
 487			} else if (err) {
 488				return err;
 489			} else {
 490				err = mmc_switch_status_error(host, status);
 491				if (err)
 492					return err;
 493				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
 494			}
 495		}
 496
 497		/* Timeout if the device still remains busy. */
 498		if (expired && busy) {
 499			pr_err("%s: Card stuck being busy! %s\n",
 500				mmc_hostname(host), __func__);
 501			return -ETIMEDOUT;
 502		}
 
 
 
 
 
 
 
 503	} while (busy);
 504
 505	return 0;
 506}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507
 508/**
 509 *	__mmc_switch - modify EXT_CSD register
 510 *	@card: the MMC card associated with the data transfer
 511 *	@set: cmd set values
 512 *	@index: EXT_CSD register index
 513 *	@value: value to program into EXT_CSD register
 514 *	@timeout_ms: timeout (ms) for operation performed by register write,
 515 *                   timeout of zero implies maximum possible timeout
 516 *	@timing: new timing to change to
 517 *	@use_busy_signal: use the busy signal as response type
 518 *	@send_status: send status cmd to poll for busy
 519 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 
 520 *
 521 *	Modifies the EXT_CSD register for selected card.
 522 */
 523int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 524		unsigned int timeout_ms, unsigned char timing,
 525		bool use_busy_signal, bool send_status,	bool retry_crc_err)
 526{
 527	struct mmc_host *host = card->host;
 528	int err;
 529	struct mmc_command cmd = {};
 530	bool use_r1b_resp = use_busy_signal;
 531	unsigned char old_timing = host->ios.timing;
 532
 533	mmc_retune_hold(host);
 534
 535	/*
 536	 * If the cmd timeout and the max_busy_timeout of the host are both
 537	 * specified, let's validate them. A failure means we need to prevent
 538	 * the host from doing hw busy detection, which is done by converting
 539	 * to a R1 response instead of a R1B.
 540	 */
 541	if (timeout_ms && host->max_busy_timeout &&
 542		(timeout_ms > host->max_busy_timeout))
 543		use_r1b_resp = false;
 544
 545	cmd.opcode = MMC_SWITCH;
 546	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 547		  (index << 16) |
 548		  (value << 8) |
 549		  set;
 550	cmd.flags = MMC_CMD_AC;
 551	if (use_r1b_resp) {
 552		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 553		/*
 554		 * A busy_timeout of zero means the host can decide to use
 555		 * whatever value it finds suitable.
 556		 */
 557		cmd.busy_timeout = timeout_ms;
 558	} else {
 559		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
 560	}
 561
 562	if (index == EXT_CSD_SANITIZE_START)
 563		cmd.sanitize_busy = true;
 564
 565	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 566	if (err)
 567		goto out;
 568
 569	/* No need to check card status in case of unblocking command */
 570	if (!use_busy_signal)
 571		goto out;
 572
 573	/*If SPI or used HW busy detection above, then we don't need to poll. */
 574	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 575		mmc_host_is_spi(host))
 576		goto out_tim;
 577
 
 
 
 
 
 
 
 
 
 
 578	/* Let's try to poll to find out when the command is completed. */
 579	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
 580	if (err)
 581		goto out;
 582
 583out_tim:
 584	/* Switch to new timing before check switch status. */
 585	if (timing)
 586		mmc_set_timing(host, timing);
 587
 588	if (send_status) {
 589		err = mmc_switch_status(card);
 590		if (err && timing)
 591			mmc_set_timing(host, old_timing);
 592	}
 593out:
 594	mmc_retune_release(host);
 595
 596	return err;
 597}
 598
 599int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 600		unsigned int timeout_ms)
 601{
 602	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 603			true, true, false);
 604}
 605EXPORT_SYMBOL_GPL(mmc_switch);
 606
 607int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 608{
 609	struct mmc_request mrq = {};
 610	struct mmc_command cmd = {};
 611	struct mmc_data data = {};
 612	struct scatterlist sg;
 613	struct mmc_ios *ios = &host->ios;
 614	const u8 *tuning_block_pattern;
 615	int size, err = 0;
 616	u8 *data_buf;
 617
 618	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 619		tuning_block_pattern = tuning_blk_pattern_8bit;
 620		size = sizeof(tuning_blk_pattern_8bit);
 621	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 622		tuning_block_pattern = tuning_blk_pattern_4bit;
 623		size = sizeof(tuning_blk_pattern_4bit);
 624	} else
 625		return -EINVAL;
 626
 627	data_buf = kzalloc(size, GFP_KERNEL);
 628	if (!data_buf)
 629		return -ENOMEM;
 630
 631	mrq.cmd = &cmd;
 632	mrq.data = &data;
 633
 634	cmd.opcode = opcode;
 635	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 636
 637	data.blksz = size;
 638	data.blocks = 1;
 639	data.flags = MMC_DATA_READ;
 640
 641	/*
 642	 * According to the tuning specs, Tuning process
 643	 * is normally shorter 40 executions of CMD19,
 644	 * and timeout value should be shorter than 150 ms
 645	 */
 646	data.timeout_ns = 150 * NSEC_PER_MSEC;
 647
 648	data.sg = &sg;
 649	data.sg_len = 1;
 650	sg_init_one(&sg, data_buf, size);
 651
 652	mmc_wait_for_req(host, &mrq);
 653
 654	if (cmd_error)
 655		*cmd_error = cmd.error;
 656
 657	if (cmd.error) {
 658		err = cmd.error;
 659		goto out;
 660	}
 661
 662	if (data.error) {
 663		err = data.error;
 664		goto out;
 665	}
 666
 667	if (memcmp(data_buf, tuning_block_pattern, size))
 668		err = -EIO;
 669
 670out:
 671	kfree(data_buf);
 672	return err;
 673}
 674EXPORT_SYMBOL_GPL(mmc_send_tuning);
 675
 676int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
 677{
 678	struct mmc_command cmd = {};
 679
 680	/*
 681	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 682	 * command, but SD specification does not, so do nothing unless it is
 683	 * eMMC.
 684	 */
 685	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 686		return 0;
 687
 688	cmd.opcode = MMC_STOP_TRANSMISSION;
 689	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 690
 691	/*
 692	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 693	 * on the tuning timeout i.e. 150ms.
 694	 */
 695	cmd.busy_timeout = 150;
 696
 697	return mmc_wait_for_cmd(host, &cmd, 0);
 698}
 699EXPORT_SYMBOL_GPL(mmc_abort_tuning);
 700
 701static int
 702mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 703		  u8 len)
 704{
 705	struct mmc_request mrq = {};
 706	struct mmc_command cmd = {};
 707	struct mmc_data data = {};
 708	struct scatterlist sg;
 709	u8 *data_buf;
 710	u8 *test_buf;
 711	int i, err;
 712	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 713	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 714
 715	/* dma onto stack is unsafe/nonportable, but callers to this
 716	 * routine normally provide temporary on-stack buffers ...
 717	 */
 718	data_buf = kmalloc(len, GFP_KERNEL);
 719	if (!data_buf)
 720		return -ENOMEM;
 721
 722	if (len == 8)
 723		test_buf = testdata_8bit;
 724	else if (len == 4)
 725		test_buf = testdata_4bit;
 726	else {
 727		pr_err("%s: Invalid bus_width %d\n",
 728		       mmc_hostname(host), len);
 729		kfree(data_buf);
 730		return -EINVAL;
 731	}
 732
 733	if (opcode == MMC_BUS_TEST_W)
 734		memcpy(data_buf, test_buf, len);
 735
 736	mrq.cmd = &cmd;
 737	mrq.data = &data;
 738	cmd.opcode = opcode;
 739	cmd.arg = 0;
 740
 741	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 742	 * rely on callers to never use this with "native" calls for reading
 743	 * CSD or CID.  Native versions of those commands use the R2 type,
 744	 * not R1 plus a data block.
 745	 */
 746	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 747
 748	data.blksz = len;
 749	data.blocks = 1;
 750	if (opcode == MMC_BUS_TEST_R)
 751		data.flags = MMC_DATA_READ;
 752	else
 753		data.flags = MMC_DATA_WRITE;
 754
 755	data.sg = &sg;
 756	data.sg_len = 1;
 757	mmc_set_data_timeout(&data, card);
 758	sg_init_one(&sg, data_buf, len);
 759	mmc_wait_for_req(host, &mrq);
 760	err = 0;
 761	if (opcode == MMC_BUS_TEST_R) {
 762		for (i = 0; i < len / 4; i++)
 763			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 764				err = -EIO;
 765				break;
 766			}
 767	}
 768	kfree(data_buf);
 769
 770	if (cmd.error)
 771		return cmd.error;
 772	if (data.error)
 773		return data.error;
 774
 775	return err;
 776}
 777
 778int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 779{
 780	int width;
 781
 782	if (bus_width == MMC_BUS_WIDTH_8)
 783		width = 8;
 784	else if (bus_width == MMC_BUS_WIDTH_4)
 785		width = 4;
 786	else if (bus_width == MMC_BUS_WIDTH_1)
 787		return 0; /* no need for test */
 788	else
 789		return -EINVAL;
 790
 791	/*
 792	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 793	 * is a problem.  This improves chances that the test will work.
 794	 */
 795	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 796	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 797}
 798
 799static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 800{
 
 
 
 801	struct mmc_command cmd = {};
 802	unsigned int opcode;
 803	int err;
 804
 805	if (!card->ext_csd.hpi) {
 806		pr_warn("%s: Card didn't support HPI command\n",
 807			mmc_hostname(card->host));
 808		return -EINVAL;
 809	}
 810
 811	opcode = card->ext_csd.hpi_cmd;
 812	if (opcode == MMC_STOP_TRANSMISSION)
 813		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 814	else if (opcode == MMC_SEND_STATUS)
 815		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 816
 817	cmd.opcode = opcode;
 818	cmd.arg = card->rca << 16 | 1;
 
 819
 820	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 821	if (err) {
 822		pr_warn("%s: error %d interrupting operation. "
 823			"HPI command response %#x\n", mmc_hostname(card->host),
 824			err, cmd.resp[0]);
 825		return err;
 826	}
 827	if (status)
 828		*status = cmd.resp[0];
 829
 830	return 0;
 
 
 
 
 
 831}
 832
 833/**
 834 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 835 *	@card: the MMC card associated with the HPI transfer
 836 *
 837 *	Issued High Priority Interrupt, and check for card status
 838 *	until out-of prg-state.
 839 */
 840int mmc_interrupt_hpi(struct mmc_card *card)
 841{
 842	int err;
 843	u32 status;
 844	unsigned long prg_wait;
 845
 846	if (!card->ext_csd.hpi_en) {
 847		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 848		return 1;
 849	}
 850
 851	err = mmc_send_status(card, &status);
 852	if (err) {
 853		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 854		goto out;
 855	}
 856
 857	switch (R1_CURRENT_STATE(status)) {
 858	case R1_STATE_IDLE:
 859	case R1_STATE_READY:
 860	case R1_STATE_STBY:
 861	case R1_STATE_TRAN:
 862		/*
 863		 * In idle and transfer states, HPI is not needed and the caller
 864		 * can issue the next intended command immediately
 865		 */
 866		goto out;
 867	case R1_STATE_PRG:
 868		break;
 869	default:
 870		/* In all other states, it's illegal to issue HPI */
 871		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 872			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 873		err = -EINVAL;
 874		goto out;
 875	}
 876
 877	err = mmc_send_hpi_cmd(card, &status);
 878	if (err)
 879		goto out;
 880
 881	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 882	do {
 883		err = mmc_send_status(card, &status);
 884
 885		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 886			break;
 887		if (time_after(jiffies, prg_wait))
 888			err = -ETIMEDOUT;
 889	} while (!err);
 890
 891out:
 892	return err;
 893}
 894
 895int mmc_can_ext_csd(struct mmc_card *card)
 896{
 897	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 898}
 899
 900/**
 901 *	mmc_stop_bkops - stop ongoing BKOPS
 902 *	@card: MMC card to check BKOPS
 903 *
 904 *	Send HPI command to stop ongoing background operations to
 905 *	allow rapid servicing of foreground operations, e.g. read/
 906 *	writes. Wait until the card comes out of the programming state
 907 *	to avoid errors in servicing read/write requests.
 908 */
 909int mmc_stop_bkops(struct mmc_card *card)
 910{
 911	int err = 0;
 912
 913	err = mmc_interrupt_hpi(card);
 914
 915	/*
 916	 * If err is EINVAL, we can't issue an HPI.
 917	 * It should complete the BKOPS.
 918	 */
 919	if (!err || (err == -EINVAL)) {
 920		mmc_card_clr_doing_bkops(card);
 921		mmc_retune_release(card->host);
 922		err = 0;
 923	}
 924
 925	return err;
 926}
 927
 928static int mmc_read_bkops_status(struct mmc_card *card)
 929{
 930	int err;
 931	u8 *ext_csd;
 932
 933	err = mmc_get_ext_csd(card, &ext_csd);
 934	if (err)
 935		return err;
 936
 937	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 938	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 939	kfree(ext_csd);
 940	return 0;
 941}
 942
 943/**
 944 *	mmc_start_bkops - start BKOPS for supported cards
 945 *	@card: MMC card to start BKOPS
 946 *	@from_exception: A flag to indicate if this function was
 947 *			 called due to an exception raised by the card
 948 *
 949 *	Start background operations whenever requested.
 950 *	When the urgent BKOPS bit is set in a R1 command response
 951 *	then background operations should be started immediately.
 952*/
 953void mmc_start_bkops(struct mmc_card *card, bool from_exception)
 954{
 955	int err;
 956	int timeout;
 957	bool use_busy_signal;
 958
 959	if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
 960		return;
 961
 962	err = mmc_read_bkops_status(card);
 963	if (err) {
 964		pr_err("%s: Failed to read bkops status: %d\n",
 965		       mmc_hostname(card->host), err);
 966		return;
 967	}
 968
 969	if (!card->ext_csd.raw_bkops_status)
 
 970		return;
 971
 972	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
 973	    from_exception)
 974		return;
 975
 976	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
 977		timeout = MMC_OPS_TIMEOUT_MS;
 978		use_busy_signal = true;
 979	} else {
 980		timeout = 0;
 981		use_busy_signal = false;
 982	}
 983
 984	mmc_retune_hold(card->host);
 985
 986	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 987			EXT_CSD_BKOPS_START, 1, timeout, 0,
 988			use_busy_signal, true, false);
 989	if (err) {
 990		pr_warn("%s: Error %d starting bkops\n",
 991			mmc_hostname(card->host), err);
 992		mmc_retune_release(card->host);
 993		return;
 994	}
 995
 996	/*
 997	 * For urgent bkops status (LEVEL_2 and more)
 998	 * bkops executed synchronously, otherwise
 999	 * the operation is in progress
 
 
 
 
 
 
 
1000	 */
1001	if (!use_busy_signal)
1002		mmc_card_set_doing_bkops(card);
1003	else
1004		mmc_retune_release(card->host);
1005}
1006EXPORT_SYMBOL(mmc_start_bkops);
1007
1008/*
1009 * Flush the cache to the non-volatile storage.
1010 */
1011int mmc_flush_cache(struct mmc_card *card)
1012{
1013	int err = 0;
1014
1015	if (mmc_card_mmc(card) &&
1016			(card->ext_csd.cache_size > 0) &&
1017			(card->ext_csd.cache_ctrl & 1)) {
1018		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1019				EXT_CSD_FLUSH_CACHE, 1, 0);
1020		if (err)
1021			pr_err("%s: cache flush error %d\n",
1022					mmc_hostname(card->host), err);
1023	}
1024
1025	return err;
1026}
1027EXPORT_SYMBOL(mmc_flush_cache);
1028
1029static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1030{
1031	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1032	int err;
1033
1034	if (!card->ext_csd.cmdq_support)
1035		return -EOPNOTSUPP;
1036
1037	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1038			 val, card->ext_csd.generic_cmd6_time);
1039	if (!err)
1040		card->ext_csd.cmdq_en = enable;
1041
1042	return err;
1043}
1044
1045int mmc_cmdq_enable(struct mmc_card *card)
1046{
1047	return mmc_cmdq_switch(card, true);
1048}
1049EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1050
1051int mmc_cmdq_disable(struct mmc_card *card)
1052{
1053	return mmc_cmdq_switch(card, false);
1054}
1055EXPORT_SYMBOL_GPL(mmc_cmdq_disable);