Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
 
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
  23#define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
  24#define MMC_OP_COND_PERIOD_US		(4 * 1000) /* 4ms */
  25#define MMC_OP_COND_TIMEOUT_MS		1000 /* 1s */
  26
  27static const u8 tuning_blk_pattern_4bit[] = {
  28	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  29	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  30	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  31	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  32	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  33	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  34	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  35	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  36};
  37
  38static const u8 tuning_blk_pattern_8bit[] = {
  39	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  40	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  41	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  42	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  43	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  44	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  45	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  46	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  47	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  48	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  49	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  50	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  51	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  52	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  53	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  54	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  55};
  56
  57struct mmc_busy_data {
  58	struct mmc_card *card;
  59	bool retry_crc_err;
  60	enum mmc_busy_cmd busy_cmd;
  61};
  62
  63struct mmc_op_cond_busy_data {
  64	struct mmc_host *host;
  65	u32 ocr;
  66	struct mmc_command *cmd;
  67};
  68
  69int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  70{
  71	int err;
  72	struct mmc_command cmd = {};
  73
  74	cmd.opcode = MMC_SEND_STATUS;
  75	if (!mmc_host_is_spi(card->host))
  76		cmd.arg = card->rca << 16;
  77	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  78
  79	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  80	if (err)
  81		return err;
  82
  83	/* NOTE: callers are required to understand the difference
  84	 * between "native" and SPI format status words!
  85	 */
  86	if (status)
  87		*status = cmd.resp[0];
  88
  89	return 0;
  90}
  91EXPORT_SYMBOL_GPL(__mmc_send_status);
  92
  93int mmc_send_status(struct mmc_card *card, u32 *status)
  94{
  95	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  96}
  97EXPORT_SYMBOL_GPL(mmc_send_status);
  98
  99static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 100{
 101	struct mmc_command cmd = {};
 102
 103	cmd.opcode = MMC_SELECT_CARD;
 104
 105	if (card) {
 106		cmd.arg = card->rca << 16;
 107		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 108	} else {
 109		cmd.arg = 0;
 110		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 111	}
 112
 113	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 114}
 115
 116int mmc_select_card(struct mmc_card *card)
 117{
 
 118
 119	return _mmc_select_card(card->host, card);
 120}
 121
 122int mmc_deselect_cards(struct mmc_host *host)
 123{
 124	return _mmc_select_card(host, NULL);
 125}
 126
 127/*
 128 * Write the value specified in the device tree or board code into the optional
 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 131 * value is hardware dependant.
 132 * The presence of the DSR register can be determined from the CSD register,
 133 * bit 76.
 134 */
 135int mmc_set_dsr(struct mmc_host *host)
 136{
 137	struct mmc_command cmd = {};
 
 
 138
 139	cmd.opcode = MMC_SET_DSR;
 
 140
 141	cmd.arg = (host->dsr << 16) | 0xffff;
 142	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 
 
 
 
 
 
 
 143
 144	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 
 
 
 
 
 
 
 
 145}
 146
 147int mmc_go_idle(struct mmc_host *host)
 148{
 149	int err;
 150	struct mmc_command cmd = {};
 151
 152	/*
 153	 * Non-SPI hosts need to prevent chipselect going active during
 154	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 155	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 156	 *
 157	 * SPI hosts ignore ios.chip_select; it's managed according to
 158	 * rules that must accommodate non-MMC slaves which this layer
 159	 * won't even know about.
 160	 */
 161	if (!mmc_host_is_spi(host)) {
 162		mmc_set_chip_select(host, MMC_CS_HIGH);
 163		mmc_delay(1);
 164	}
 165
 166	cmd.opcode = MMC_GO_IDLE_STATE;
 167	cmd.arg = 0;
 168	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 169
 170	err = mmc_wait_for_cmd(host, &cmd, 0);
 171
 172	mmc_delay(1);
 173
 174	if (!mmc_host_is_spi(host)) {
 175		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 176		mmc_delay(1);
 177	}
 178
 179	host->use_spi_crc = 0;
 180
 181	return err;
 182}
 183
 184static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
 185{
 186	struct mmc_op_cond_busy_data *data = cb_data;
 187	struct mmc_host *host = data->host;
 188	struct mmc_command *cmd = data->cmd;
 189	u32 ocr = data->ocr;
 190	int err = 0;
 191
 192	err = mmc_wait_for_cmd(host, cmd, 0);
 193	if (err)
 194		return err;
 195
 196	if (mmc_host_is_spi(host)) {
 197		if (!(cmd->resp[0] & R1_SPI_IDLE)) {
 198			*busy = false;
 199			return 0;
 200		}
 201	} else {
 202		if (cmd->resp[0] & MMC_CARD_BUSY) {
 203			*busy = false;
 204			return 0;
 
 
 
 
 
 
 
 
 
 
 
 205		}
 206	}
 207
 208	*busy = true;
 209
 210	/*
 211	 * According to eMMC specification v5.1 section 6.4.3, we
 212	 * should issue CMD1 repeatedly in the idle state until
 213	 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 214	 * the inactive mode after mmc_init_card() issued CMD0 when
 215	 * the eMMC device is busy.
 216	 */
 217	if (!ocr && !mmc_host_is_spi(host))
 218		cmd->arg = cmd->resp[0] | BIT(30);
 219
 220	return 0;
 
 
 
 221}
 222
 223int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 224{
 225	struct mmc_command cmd = {};
 226	int err = 0;
 227	struct mmc_op_cond_busy_data cb_data = {
 228		.host = host,
 229		.ocr = ocr,
 230		.cmd = &cmd
 231	};
 232
 233	cmd.opcode = MMC_SEND_OP_COND;
 234	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 235	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 236
 237	err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
 238				  MMC_OP_COND_TIMEOUT_MS,
 239				  &__mmc_send_op_cond_cb, &cb_data);
 
 
 240	if (err)
 241		return err;
 242
 243	if (rocr && !mmc_host_is_spi(host))
 244		*rocr = cmd.resp[0];
 245
 246	return err;
 247}
 248
 249int mmc_set_relative_addr(struct mmc_card *card)
 250{
 251	struct mmc_command cmd = {};
 
 
 
 
 252
 253	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 254	cmd.arg = card->rca << 16;
 255	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 256
 257	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 258}
 259
 260static int
 261mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 262{
 263	int err;
 264	struct mmc_command cmd = {};
 
 
 
 265
 266	cmd.opcode = opcode;
 267	cmd.arg = arg;
 268	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 269
 270	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 271	if (err)
 272		return err;
 273
 274	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 275
 276	return 0;
 277}
 278
 279/*
 280 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 281 * buffer or on-stack buffer (with some overhead in callee).
 282 */
 283int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
 284		       u32 args, void *buf, unsigned len)
 285{
 286	struct mmc_request mrq = {};
 287	struct mmc_command cmd = {};
 288	struct mmc_data data = {};
 289	struct scatterlist sg;
 
 
 
 
 
 
 
 
 290
 291	mrq.cmd = &cmd;
 292	mrq.data = &data;
 293
 294	cmd.opcode = opcode;
 295	cmd.arg = args;
 296
 297	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 298	 * rely on callers to never use this with "native" calls for reading
 299	 * CSD or CID.  Native versions of those commands use the R2 type,
 300	 * not R1 plus a data block.
 301	 */
 302	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 303
 304	data.blksz = len;
 305	data.blocks = 1;
 306	data.flags = MMC_DATA_READ;
 307	data.sg = &sg;
 308	data.sg_len = 1;
 309
 310	sg_init_one(&sg, buf, len);
 311
 312	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 313		/*
 314		 * The spec states that CSR and CID accesses have a timeout
 315		 * of 64 clock cycles.
 316		 */
 317		data.timeout_ns = 0;
 318		data.timeout_clks = 64;
 319	} else
 320		mmc_set_data_timeout(&data, card);
 321
 322	mmc_wait_for_req(host, &mrq);
 323
 
 
 
 324	if (cmd.error)
 325		return cmd.error;
 326	if (data.error)
 327		return data.error;
 328
 329	return 0;
 330}
 331
 332static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
 333{
 334	int ret, i;
 335	__be32 *cxd_tmp;
 336
 337	cxd_tmp = kzalloc(16, GFP_KERNEL);
 338	if (!cxd_tmp)
 339		return -ENOMEM;
 340
 341	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
 342	if (ret)
 343		goto err;
 344
 345	for (i = 0; i < 4; i++)
 346		cxd[i] = be32_to_cpu(cxd_tmp[i]);
 347
 348err:
 349	kfree(cxd_tmp);
 350	return ret;
 351}
 352
 353int mmc_send_csd(struct mmc_card *card, u32 *csd)
 354{
 355	if (mmc_host_is_spi(card->host))
 356		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
 357
 358	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 359				MMC_SEND_CSD);
 360}
 361
 362int mmc_send_cid(struct mmc_host *host, u32 *cid)
 363{
 364	if (mmc_host_is_spi(host))
 365		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
 366
 367	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 368}
 369
 370int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 371{
 372	int err;
 373	u8 *ext_csd;
 374
 375	if (!card || !new_ext_csd)
 376		return -EINVAL;
 
 
 
 
 377
 378	if (!mmc_can_ext_csd(card))
 379		return -EOPNOTSUPP;
 
 380
 381	/*
 382	 * As the ext_csd is so large and mostly unused, we don't store the
 383	 * raw block in mmc_card.
 384	 */
 385	ext_csd = kzalloc(512, GFP_KERNEL);
 386	if (!ext_csd)
 387		return -ENOMEM;
 388
 389	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
 390				512);
 391	if (err)
 392		kfree(ext_csd);
 393	else
 394		*new_ext_csd = ext_csd;
 395
 396	return err;
 
 
 
 397}
 398EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 399
 400int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 401{
 402	struct mmc_command cmd = {};
 403	int err;
 404
 405	cmd.opcode = MMC_SPI_READ_OCR;
 406	cmd.arg = highcap ? (1 << 30) : 0;
 407	cmd.flags = MMC_RSP_SPI_R3;
 408
 409	err = mmc_wait_for_cmd(host, &cmd, 0);
 410
 411	*ocrp = cmd.resp[1];
 412	return err;
 413}
 414
 415int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 416{
 417	struct mmc_command cmd = {};
 418	int err;
 419
 420	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 421	cmd.flags = MMC_RSP_SPI_R1;
 422	cmd.arg = use_crc;
 423
 424	err = mmc_wait_for_cmd(host, &cmd, 0);
 425	if (!err)
 426		host->use_spi_crc = use_crc;
 427	return err;
 428}
 429
 430static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 431{
 432	if (mmc_host_is_spi(host)) {
 433		if (status & R1_SPI_ILLEGAL_COMMAND)
 434			return -EBADMSG;
 435	} else {
 436		if (R1_STATUS(status))
 437			pr_warn("%s: unexpected status %#x after switch\n",
 438				mmc_hostname(host), status);
 439		if (status & R1_SWITCH_ERROR)
 440			return -EBADMSG;
 441	}
 442	return 0;
 443}
 444
 445/* Caller must hold re-tuning */
 446int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 447{
 448	u32 status;
 449	int err;
 450
 451	err = mmc_send_status(card, &status);
 452	if (!crc_err_fatal && err == -EILSEQ)
 453		return 0;
 454	if (err)
 455		return err;
 456
 457	return mmc_switch_status_error(card->host, status);
 458}
 459
 460static int mmc_busy_cb(void *cb_data, bool *busy)
 461{
 462	struct mmc_busy_data *data = cb_data;
 463	struct mmc_host *host = data->card->host;
 464	u32 status = 0;
 465	int err;
 466
 467	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
 468		*busy = host->ops->card_busy(host);
 469		return 0;
 470	}
 471
 472	err = mmc_send_status(data->card, &status);
 473	if (data->retry_crc_err && err == -EILSEQ) {
 474		*busy = true;
 475		return 0;
 476	}
 477	if (err)
 478		return err;
 479
 480	switch (data->busy_cmd) {
 481	case MMC_BUSY_CMD6:
 482		err = mmc_switch_status_error(host, status);
 483		break;
 484	case MMC_BUSY_ERASE:
 485		err = R1_STATUS(status) ? -EIO : 0;
 486		break;
 487	case MMC_BUSY_HPI:
 488	case MMC_BUSY_EXTR_SINGLE:
 489	case MMC_BUSY_IO:
 490		break;
 491	default:
 492		err = -EINVAL;
 493	}
 494
 495	if (err)
 496		return err;
 497
 498	*busy = !mmc_ready_for_data(status);
 499	return 0;
 500}
 501
 502int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
 503			unsigned int timeout_ms,
 504			int (*busy_cb)(void *cb_data, bool *busy),
 505			void *cb_data)
 506{
 507	int err;
 508	unsigned long timeout;
 509	unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
 510	bool expired = false;
 511	bool busy = false;
 512
 513	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 514	do {
 515		/*
 516		 * Due to the possibility of being preempted while polling,
 517		 * check the expiration time first.
 518		 */
 519		expired = time_after(jiffies, timeout);
 520
 521		err = (*busy_cb)(cb_data, &busy);
 522		if (err)
 523			return err;
 524
 525		/* Timeout if the device still remains busy. */
 526		if (expired && busy) {
 527			pr_err("%s: Card stuck being busy! %s\n",
 528				mmc_hostname(host), __func__);
 529			return -ETIMEDOUT;
 530		}
 531
 532		/* Throttle the polling rate to avoid hogging the CPU. */
 533		if (busy) {
 534			usleep_range(udelay, udelay * 2);
 535			if (udelay < udelay_max)
 536				udelay *= 2;
 537		}
 538	} while (busy);
 539
 540	return 0;
 541}
 542EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 543
 544int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 545		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
 546{
 547	struct mmc_host *host = card->host;
 548	struct mmc_busy_data cb_data;
 549
 550	cb_data.card = card;
 551	cb_data.retry_crc_err = retry_crc_err;
 552	cb_data.busy_cmd = busy_cmd;
 553
 554	return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
 555}
 556EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 557
 558bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
 559			  unsigned int timeout_ms)
 560{
 561	/*
 562	 * If the max_busy_timeout of the host is specified, make sure it's
 563	 * enough to fit the used timeout_ms. In case it's not, let's instruct
 564	 * the host to avoid HW busy detection, by converting to a R1 response
 565	 * instead of a R1B. Note, some hosts requires R1B, which also means
 566	 * they are on their own when it comes to deal with the busy timeout.
 567	 */
 568	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
 569	    (timeout_ms > host->max_busy_timeout)) {
 570		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
 571		return false;
 572	}
 573
 574	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 575	cmd->busy_timeout = timeout_ms;
 576	return true;
 577}
 578
 579/**
 580 *	__mmc_switch - modify EXT_CSD register
 581 *	@card: the MMC card associated with the data transfer
 582 *	@set: cmd set values
 583 *	@index: EXT_CSD register index
 584 *	@value: value to program into EXT_CSD register
 585 *	@timeout_ms: timeout (ms) for operation performed by register write,
 586 *                   timeout of zero implies maximum possible timeout
 587 *	@timing: new timing to change to
 588 *	@send_status: send status cmd to poll for busy
 589 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 590 *	@retries: number of retries
 591 *
 592 *	Modifies the EXT_CSD register for selected card.
 593 */
 594int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 595		unsigned int timeout_ms, unsigned char timing,
 596		bool send_status, bool retry_crc_err, unsigned int retries)
 597{
 598	struct mmc_host *host = card->host;
 599	int err;
 600	struct mmc_command cmd = {};
 601	bool use_r1b_resp;
 602	unsigned char old_timing = host->ios.timing;
 603
 604	mmc_retune_hold(host);
 605
 606	if (!timeout_ms) {
 607		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
 608			mmc_hostname(host));
 609		timeout_ms = card->ext_csd.generic_cmd6_time;
 610	}
 611
 612	cmd.opcode = MMC_SWITCH;
 613	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 614		  (index << 16) |
 615		  (value << 8) |
 616		  set;
 617	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
 
 618
 619	err = mmc_wait_for_cmd(host, &cmd, retries);
 620	if (err)
 621		goto out;
 622
 623	/*If SPI or used HW busy detection above, then we don't need to poll. */
 624	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 625		mmc_host_is_spi(host))
 626		goto out_tim;
 627
 628	/*
 629	 * If the host doesn't support HW polling via the ->card_busy() ops and
 630	 * when it's not allowed to poll by using CMD13, then we need to rely on
 631	 * waiting the stated timeout to be sufficient.
 632	 */
 633	if (!send_status && !host->ops->card_busy) {
 634		mmc_delay(timeout_ms);
 635		goto out_tim;
 636	}
 637
 638	/* Let's try to poll to find out when the command is completed. */
 639	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
 640	if (err)
 641		goto out;
 
 
 
 
 
 
 642
 643out_tim:
 644	/* Switch to new timing before check switch status. */
 645	if (timing)
 646		mmc_set_timing(host, timing);
 647
 648	if (send_status) {
 649		err = mmc_switch_status(card, true);
 650		if (err && timing)
 651			mmc_set_timing(host, old_timing);
 652	}
 653out:
 654	mmc_retune_release(host);
 655
 656	return err;
 657}
 658
 659int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 660		unsigned int timeout_ms)
 661{
 662	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 663			    true, false, MMC_CMD_RETRIES);
 664}
 665EXPORT_SYMBOL_GPL(mmc_switch);
 666
 667int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 668{
 669	struct mmc_request mrq = {};
 670	struct mmc_command cmd = {};
 671	struct mmc_data data = {};
 672	struct scatterlist sg;
 673	struct mmc_ios *ios = &host->ios;
 674	const u8 *tuning_block_pattern;
 675	int size, err = 0;
 676	u8 *data_buf;
 677
 678	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 679		tuning_block_pattern = tuning_blk_pattern_8bit;
 680		size = sizeof(tuning_blk_pattern_8bit);
 681	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 682		tuning_block_pattern = tuning_blk_pattern_4bit;
 683		size = sizeof(tuning_blk_pattern_4bit);
 684	} else
 685		return -EINVAL;
 686
 687	data_buf = kzalloc(size, GFP_KERNEL);
 688	if (!data_buf)
 689		return -ENOMEM;
 690
 691	mrq.cmd = &cmd;
 692	mrq.data = &data;
 693
 694	cmd.opcode = opcode;
 695	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 696
 697	data.blksz = size;
 698	data.blocks = 1;
 699	data.flags = MMC_DATA_READ;
 700
 701	/*
 702	 * According to the tuning specs, Tuning process
 703	 * is normally shorter 40 executions of CMD19,
 704	 * and timeout value should be shorter than 150 ms
 705	 */
 706	data.timeout_ns = 150 * NSEC_PER_MSEC;
 707
 708	data.sg = &sg;
 709	data.sg_len = 1;
 710	sg_init_one(&sg, data_buf, size);
 711
 712	mmc_wait_for_req(host, &mrq);
 713
 714	if (cmd_error)
 715		*cmd_error = cmd.error;
 716
 717	if (cmd.error) {
 718		err = cmd.error;
 719		goto out;
 720	}
 721
 722	if (data.error) {
 723		err = data.error;
 724		goto out;
 725	}
 726
 727	if (memcmp(data_buf, tuning_block_pattern, size))
 728		err = -EIO;
 729
 730out:
 731	kfree(data_buf);
 732	return err;
 733}
 734EXPORT_SYMBOL_GPL(mmc_send_tuning);
 735
 736int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
 737{
 738	struct mmc_command cmd = {};
 739
 740	/*
 741	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 742	 * command, but SD specification does not, so do nothing unless it is
 743	 * eMMC.
 744	 */
 745	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 746		return 0;
 747
 748	cmd.opcode = MMC_STOP_TRANSMISSION;
 749	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 
 750
 751	/*
 752	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 753	 * on the tuning timeout i.e. 150ms.
 754	 */
 755	cmd.busy_timeout = 150;
 
 756
 757	return mmc_wait_for_cmd(host, &cmd, 0);
 758}
 759EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
 760
 761static int
 762mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 763		  u8 len)
 764{
 765	struct mmc_request mrq = {};
 766	struct mmc_command cmd = {};
 767	struct mmc_data data = {};
 768	struct scatterlist sg;
 769	u8 *data_buf;
 770	u8 *test_buf;
 771	int i, err;
 772	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 773	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 774
 775	/* dma onto stack is unsafe/nonportable, but callers to this
 776	 * routine normally provide temporary on-stack buffers ...
 777	 */
 778	data_buf = kmalloc(len, GFP_KERNEL);
 779	if (!data_buf)
 780		return -ENOMEM;
 781
 782	if (len == 8)
 783		test_buf = testdata_8bit;
 784	else if (len == 4)
 785		test_buf = testdata_4bit;
 786	else {
 787		pr_err("%s: Invalid bus_width %d\n",
 788		       mmc_hostname(host), len);
 789		kfree(data_buf);
 790		return -EINVAL;
 791	}
 792
 793	if (opcode == MMC_BUS_TEST_W)
 794		memcpy(data_buf, test_buf, len);
 795
 796	mrq.cmd = &cmd;
 797	mrq.data = &data;
 798	cmd.opcode = opcode;
 799	cmd.arg = 0;
 800
 801	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 802	 * rely on callers to never use this with "native" calls for reading
 803	 * CSD or CID.  Native versions of those commands use the R2 type,
 804	 * not R1 plus a data block.
 805	 */
 806	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 807
 808	data.blksz = len;
 809	data.blocks = 1;
 810	if (opcode == MMC_BUS_TEST_R)
 811		data.flags = MMC_DATA_READ;
 812	else
 813		data.flags = MMC_DATA_WRITE;
 814
 815	data.sg = &sg;
 816	data.sg_len = 1;
 817	mmc_set_data_timeout(&data, card);
 818	sg_init_one(&sg, data_buf, len);
 819	mmc_wait_for_req(host, &mrq);
 820	err = 0;
 821	if (opcode == MMC_BUS_TEST_R) {
 822		for (i = 0; i < len / 4; i++)
 823			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 824				err = -EIO;
 825				break;
 826			}
 827	}
 828	kfree(data_buf);
 829
 830	if (cmd.error)
 831		return cmd.error;
 832	if (data.error)
 833		return data.error;
 834
 835	return err;
 836}
 837
 838int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 839{
 840	int width;
 841
 842	if (bus_width == MMC_BUS_WIDTH_8)
 843		width = 8;
 844	else if (bus_width == MMC_BUS_WIDTH_4)
 845		width = 4;
 846	else if (bus_width == MMC_BUS_WIDTH_1)
 847		return 0; /* no need for test */
 848	else
 849		return -EINVAL;
 850
 851	/*
 852	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 853	 * is a problem.  This improves chances that the test will work.
 854	 */
 855	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 856	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 857}
 858
 859static int mmc_send_hpi_cmd(struct mmc_card *card)
 860{
 861	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
 862	struct mmc_host *host = card->host;
 863	bool use_r1b_resp = false;
 864	struct mmc_command cmd = {};
 865	int err;
 866
 867	cmd.opcode = card->ext_csd.hpi_cmd;
 868	cmd.arg = card->rca << 16 | 1;
 869	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 870
 871	if (cmd.opcode == MMC_STOP_TRANSMISSION)
 872		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
 873						    busy_timeout_ms);
 874
 875	err = mmc_wait_for_cmd(host, &cmd, 0);
 876	if (err) {
 877		pr_warn("%s: HPI error %d. Command response %#x\n",
 878			mmc_hostname(host), err, cmd.resp[0]);
 879		return err;
 880	}
 881
 882	/* No need to poll when using HW busy detection. */
 883	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
 884		return 0;
 885
 886	/* Let's poll to find out when the HPI request completes. */
 887	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
 888}
 889
 890/**
 891 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 892 *	@card: the MMC card associated with the HPI transfer
 893 *
 894 *	Issued High Priority Interrupt, and check for card status
 895 *	until out-of prg-state.
 896 */
 897static int mmc_interrupt_hpi(struct mmc_card *card)
 898{
 899	int err;
 900	u32 status;
 901
 902	if (!card->ext_csd.hpi_en) {
 903		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 904		return 1;
 905	}
 906
 907	err = mmc_send_status(card, &status);
 908	if (err) {
 909		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 910		goto out;
 911	}
 912
 913	switch (R1_CURRENT_STATE(status)) {
 914	case R1_STATE_IDLE:
 915	case R1_STATE_READY:
 916	case R1_STATE_STBY:
 917	case R1_STATE_TRAN:
 918		/*
 919		 * In idle and transfer states, HPI is not needed and the caller
 920		 * can issue the next intended command immediately
 921		 */
 922		goto out;
 923	case R1_STATE_PRG:
 924		break;
 925	default:
 926		/* In all other states, it's illegal to issue HPI */
 927		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 928			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 929		err = -EINVAL;
 930		goto out;
 931	}
 932
 933	err = mmc_send_hpi_cmd(card);
 934out:
 935	return err;
 936}
 937
 938int mmc_can_ext_csd(struct mmc_card *card)
 939{
 940	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 941}
 942
 943static int mmc_read_bkops_status(struct mmc_card *card)
 944{
 945	int err;
 946	u8 *ext_csd;
 947
 948	err = mmc_get_ext_csd(card, &ext_csd);
 949	if (err)
 950		return err;
 951
 952	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 953	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 954	kfree(ext_csd);
 955	return 0;
 956}
 957
 958/**
 959 *	mmc_run_bkops - Run BKOPS for supported cards
 960 *	@card: MMC card to run BKOPS for
 961 *
 962 *	Run background operations synchronously for cards having manual BKOPS
 963 *	enabled and in case it reports urgent BKOPS level.
 964*/
 965void mmc_run_bkops(struct mmc_card *card)
 966{
 967	int err;
 968
 969	if (!card->ext_csd.man_bkops_en)
 970		return;
 971
 972	err = mmc_read_bkops_status(card);
 973	if (err) {
 974		pr_err("%s: Failed to read bkops status: %d\n",
 975		       mmc_hostname(card->host), err);
 976		return;
 977	}
 978
 979	if (!card->ext_csd.raw_bkops_status ||
 980	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 981		return;
 982
 983	mmc_retune_hold(card->host);
 984
 985	/*
 986	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 987	 * synchronously. Future wise, we may consider to start BKOPS, for less
 988	 * urgent levels by using an asynchronous background task, when idle.
 989	 */
 990	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 991			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
 992	/*
 993	 * If the BKOPS timed out, the card is probably still busy in the
 994	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
 995	 * it with a HPI command to get back into R1_STATE_TRAN.
 996	 */
 997	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
 998		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
 999	else if (err)
1000		pr_warn("%s: Error %d running bkops\n",
1001			mmc_hostname(card->host), err);
1002
1003	mmc_retune_release(card->host);
1004}
1005EXPORT_SYMBOL(mmc_run_bkops);
1006
1007static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1008{
1009	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1010	int err;
1011
1012	if (!card->ext_csd.cmdq_support)
1013		return -EOPNOTSUPP;
1014
1015	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1016			 val, card->ext_csd.generic_cmd6_time);
1017	if (!err)
1018		card->ext_csd.cmdq_en = enable;
1019
1020	return err;
1021}
1022
1023int mmc_cmdq_enable(struct mmc_card *card)
1024{
1025	return mmc_cmdq_switch(card, true);
1026}
1027EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1028
1029int mmc_cmdq_disable(struct mmc_card *card)
1030{
1031	return mmc_cmdq_switch(card, false);
1032}
1033EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1034
1035int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1036{
1037	struct mmc_host *host = card->host;
1038	int err;
1039
1040	if (!mmc_can_sanitize(card)) {
1041		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1042		return -EOPNOTSUPP;
1043	}
1044
1045	if (!timeout_ms)
1046		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1047
1048	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1049
1050	mmc_retune_hold(host);
1051
1052	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1053			   1, timeout_ms, 0, true, false, 0);
1054	if (err)
1055		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1056
1057	/*
1058	 * If the sanitize operation timed out, the card is probably still busy
1059	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1060	 * it with a HPI command to get back into R1_STATE_TRAN.
1061	 */
1062	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1063		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1064
1065	mmc_retune_release(host);
1066
1067	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1068	return err;
1069}
1070EXPORT_SYMBOL_GPL(mmc_sanitize);
v3.1
 
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 
 13#include <linux/types.h>
 14#include <linux/scatterlist.h>
 15
 16#include <linux/mmc/host.h>
 17#include <linux/mmc/card.h>
 18#include <linux/mmc/mmc.h>
 19
 20#include "core.h"
 
 
 21#include "mmc_ops.h"
 22
 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24{
 25	int err;
 26	struct mmc_command cmd = {0};
 
 
 
 
 
 
 
 
 
 27
 28	BUG_ON(!host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30	cmd.opcode = MMC_SELECT_CARD;
 31
 32	if (card) {
 33		cmd.arg = card->rca << 16;
 34		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 35	} else {
 36		cmd.arg = 0;
 37		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 38	}
 39
 40	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 41	if (err)
 42		return err;
 43
 44	return 0;
 45}
 46
 47int mmc_select_card(struct mmc_card *card)
 48{
 49	BUG_ON(!card);
 50
 51	return _mmc_select_card(card->host, card);
 52}
 53
 54int mmc_deselect_cards(struct mmc_host *host)
 55{
 56	return _mmc_select_card(host, NULL);
 57}
 58
 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 
 
 
 
 
 
 
 
 60{
 61	struct mmc_command cmd = {0};
 62	struct mmc_card *card = host->card;
 63	int err;
 64
 65	if (sleep)
 66		mmc_deselect_cards(host);
 67
 68	cmd.opcode = MMC_SLEEP_AWAKE;
 69	cmd.arg = card->rca << 16;
 70	if (sleep)
 71		cmd.arg |= 1 << 15;
 72
 73	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 74	err = mmc_wait_for_cmd(host, &cmd, 0);
 75	if (err)
 76		return err;
 77
 78	/*
 79	 * If the host does not wait while the card signals busy, then we will
 80	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
 81	 * SEND_STATUS command to poll the status because that command (and most
 82	 * others) is invalid while the card sleeps.
 83	 */
 84	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 85		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
 86
 87	if (!sleep)
 88		err = mmc_select_card(card);
 89
 90	return err;
 91}
 92
 93int mmc_go_idle(struct mmc_host *host)
 94{
 95	int err;
 96	struct mmc_command cmd = {0};
 97
 98	/*
 99	 * Non-SPI hosts need to prevent chipselect going active during
100	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
101	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
102	 *
103	 * SPI hosts ignore ios.chip_select; it's managed according to
104	 * rules that must accommodate non-MMC slaves which this layer
105	 * won't even know about.
106	 */
107	if (!mmc_host_is_spi(host)) {
108		mmc_set_chip_select(host, MMC_CS_HIGH);
109		mmc_delay(1);
110	}
111
112	cmd.opcode = MMC_GO_IDLE_STATE;
113	cmd.arg = 0;
114	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
115
116	err = mmc_wait_for_cmd(host, &cmd, 0);
117
118	mmc_delay(1);
119
120	if (!mmc_host_is_spi(host)) {
121		mmc_set_chip_select(host, MMC_CS_DONTCARE);
122		mmc_delay(1);
123	}
124
125	host->use_spi_crc = 0;
126
127	return err;
128}
129
130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
131{
132	struct mmc_command cmd = {0};
133	int i, err = 0;
 
 
 
134
135	BUG_ON(!host);
 
 
136
137	cmd.opcode = MMC_SEND_OP_COND;
138	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
139	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
140
141	for (i = 100; i; i--) {
142		err = mmc_wait_for_cmd(host, &cmd, 0);
143		if (err)
144			break;
145
146		/* if we're just probing, do a single pass */
147		if (ocr == 0)
148			break;
149
150		/* otherwise wait until reset completes */
151		if (mmc_host_is_spi(host)) {
152			if (!(cmd.resp[0] & R1_SPI_IDLE))
153				break;
154		} else {
155			if (cmd.resp[0] & MMC_CARD_BUSY)
156				break;
157		}
 
158
159		err = -ETIMEDOUT;
160
161		mmc_delay(10);
162	}
 
 
 
 
 
 
 
163
164	if (rocr && !mmc_host_is_spi(host))
165		*rocr = cmd.resp[0];
166
167	return err;
168}
169
170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
171{
172	int err;
173	struct mmc_command cmd = {0};
 
 
 
 
 
174
175	BUG_ON(!host);
176	BUG_ON(!cid);
 
177
178	cmd.opcode = MMC_ALL_SEND_CID;
179	cmd.arg = 0;
180	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
181
182	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
183	if (err)
184		return err;
185
186	memcpy(cid, cmd.resp, sizeof(u32) * 4);
 
187
188	return 0;
189}
190
191int mmc_set_relative_addr(struct mmc_card *card)
192{
193	int err;
194	struct mmc_command cmd = {0};
195
196	BUG_ON(!card);
197	BUG_ON(!card->host);
198
199	cmd.opcode = MMC_SET_RELATIVE_ADDR;
200	cmd.arg = card->rca << 16;
201	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
202
203	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
204	if (err)
205		return err;
206
207	return 0;
208}
209
210static int
211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
212{
213	int err;
214	struct mmc_command cmd = {0};
215
216	BUG_ON(!host);
217	BUG_ON(!cxd);
218
219	cmd.opcode = opcode;
220	cmd.arg = arg;
221	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
222
223	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
224	if (err)
225		return err;
226
227	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
228
229	return 0;
230}
231
232static int
233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
234		u32 opcode, void *buf, unsigned len)
 
 
 
235{
236	struct mmc_request mrq = {0};
237	struct mmc_command cmd = {0};
238	struct mmc_data data = {0};
239	struct scatterlist sg;
240	void *data_buf;
241
242	/* dma onto stack is unsafe/nonportable, but callers to this
243	 * routine normally provide temporary on-stack buffers ...
244	 */
245	data_buf = kmalloc(len, GFP_KERNEL);
246	if (data_buf == NULL)
247		return -ENOMEM;
248
249	mrq.cmd = &cmd;
250	mrq.data = &data;
251
252	cmd.opcode = opcode;
253	cmd.arg = 0;
254
255	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
256	 * rely on callers to never use this with "native" calls for reading
257	 * CSD or CID.  Native versions of those commands use the R2 type,
258	 * not R1 plus a data block.
259	 */
260	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
261
262	data.blksz = len;
263	data.blocks = 1;
264	data.flags = MMC_DATA_READ;
265	data.sg = &sg;
266	data.sg_len = 1;
267
268	sg_init_one(&sg, data_buf, len);
269
270	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
271		/*
272		 * The spec states that CSR and CID accesses have a timeout
273		 * of 64 clock cycles.
274		 */
275		data.timeout_ns = 0;
276		data.timeout_clks = 64;
277	} else
278		mmc_set_data_timeout(&data, card);
279
280	mmc_wait_for_req(host, &mrq);
281
282	memcpy(buf, data_buf, len);
283	kfree(data_buf);
284
285	if (cmd.error)
286		return cmd.error;
287	if (data.error)
288		return data.error;
289
290	return 0;
291}
292
293int mmc_send_csd(struct mmc_card *card, u32 *csd)
294{
295	int ret, i;
 
296
297	if (!mmc_host_is_spi(card->host))
298		return mmc_send_cxd_native(card->host, card->rca << 16,
299				csd, MMC_SEND_CSD);
300
301	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
302	if (ret)
303		return ret;
304
305	for (i = 0;i < 4;i++)
306		csd[i] = be32_to_cpu(csd[i]);
307
308	return 0;
 
 
 
 
 
 
 
 
 
 
 
309}
310
311int mmc_send_cid(struct mmc_host *host, u32 *cid)
312{
313	int ret, i;
 
 
 
 
 
 
 
 
 
314
315	if (!mmc_host_is_spi(host)) {
316		if (!host->card)
317			return -EINVAL;
318		return mmc_send_cxd_native(host, host->card->rca << 16,
319				cid, MMC_SEND_CID);
320	}
321
322	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
323	if (ret)
324		return ret;
325
326	for (i = 0;i < 4;i++)
327		cid[i] = be32_to_cpu(cid[i]);
 
 
 
 
 
328
329	return 0;
330}
 
 
 
 
331
332int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
333{
334	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
335			ext_csd, 512);
336}
 
337
338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
339{
340	struct mmc_command cmd = {0};
341	int err;
342
343	cmd.opcode = MMC_SPI_READ_OCR;
344	cmd.arg = highcap ? (1 << 30) : 0;
345	cmd.flags = MMC_RSP_SPI_R3;
346
347	err = mmc_wait_for_cmd(host, &cmd, 0);
348
349	*ocrp = cmd.resp[1];
350	return err;
351}
352
353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
354{
355	struct mmc_command cmd = {0};
356	int err;
357
358	cmd.opcode = MMC_SPI_CRC_ON_OFF;
359	cmd.flags = MMC_RSP_SPI_R1;
360	cmd.arg = use_crc;
361
362	err = mmc_wait_for_cmd(host, &cmd, 0);
363	if (!err)
364		host->use_spi_crc = use_crc;
365	return err;
366}
367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368/**
369 *	mmc_switch - modify EXT_CSD register
370 *	@card: the MMC card associated with the data transfer
371 *	@set: cmd set values
372 *	@index: EXT_CSD register index
373 *	@value: value to program into EXT_CSD register
374 *	@timeout_ms: timeout (ms) for operation performed by register write,
375 *                   timeout of zero implies maximum possible timeout
 
 
 
 
376 *
377 *	Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380	       unsigned int timeout_ms)
 
381{
 
382	int err;
383	struct mmc_command cmd = {0};
384	u32 status;
385
386	BUG_ON(!card);
387	BUG_ON(!card->host);
 
 
 
 
 
 
388
389	cmd.opcode = MMC_SWITCH;
390	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
391		  (index << 16) |
392		  (value << 8) |
393		  set;
394	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395	cmd.cmd_timeout_ms = timeout_ms;
396
397	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
398	if (err)
399		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
401	/* Must check status to be sure of no errors */
402	do {
403		err = mmc_send_status(card, &status);
404		if (err)
405			return err;
406		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
407			break;
408		if (mmc_host_is_spi(card->host))
409			break;
410	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411
412	if (mmc_host_is_spi(card->host)) {
413		if (status & R1_SPI_ILLEGAL_COMMAND)
414			return -EBADMSG;
415	} else {
416		if (status & 0xFDFFA000)
417			printk(KERN_WARNING "%s: unexpected status %#x after "
418			       "switch", mmc_hostname(card->host), status);
419		if (status & R1_SWITCH_ERROR)
420			return -EBADMSG;
421	}
 
 
 
 
 
422
423	return 0;
 
 
 
 
424}
425EXPORT_SYMBOL_GPL(mmc_switch);
426
427int mmc_send_status(struct mmc_card *card, u32 *status)
428{
429	int err;
430	struct mmc_command cmd = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
432	BUG_ON(!card);
433	BUG_ON(!card->host);
 
434
435	cmd.opcode = MMC_SEND_STATUS;
436	if (!mmc_host_is_spi(card->host))
437		cmd.arg = card->rca << 16;
438	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 
 
 
439
440	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
441	if (err)
442		return err;
443
444	/* NOTE: callers are required to understand the difference
445	 * between "native" and SPI format status words!
 
446	 */
447	if (status)
448		*status = cmd.resp[0];
449
450	return 0;
451}
 
452
453static int
454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
455		  u8 len)
456{
457	struct mmc_request mrq = {0};
458	struct mmc_command cmd = {0};
459	struct mmc_data data = {0};
460	struct scatterlist sg;
461	u8 *data_buf;
462	u8 *test_buf;
463	int i, err;
464	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
465	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
466
467	/* dma onto stack is unsafe/nonportable, but callers to this
468	 * routine normally provide temporary on-stack buffers ...
469	 */
470	data_buf = kmalloc(len, GFP_KERNEL);
471	if (!data_buf)
472		return -ENOMEM;
473
474	if (len == 8)
475		test_buf = testdata_8bit;
476	else if (len == 4)
477		test_buf = testdata_4bit;
478	else {
479		printk(KERN_ERR "%s: Invalid bus_width %d\n",
480		       mmc_hostname(host), len);
481		kfree(data_buf);
482		return -EINVAL;
483	}
484
485	if (opcode == MMC_BUS_TEST_W)
486		memcpy(data_buf, test_buf, len);
487
488	mrq.cmd = &cmd;
489	mrq.data = &data;
490	cmd.opcode = opcode;
491	cmd.arg = 0;
492
493	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
494	 * rely on callers to never use this with "native" calls for reading
495	 * CSD or CID.  Native versions of those commands use the R2 type,
496	 * not R1 plus a data block.
497	 */
498	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
499
500	data.blksz = len;
501	data.blocks = 1;
502	if (opcode == MMC_BUS_TEST_R)
503		data.flags = MMC_DATA_READ;
504	else
505		data.flags = MMC_DATA_WRITE;
506
507	data.sg = &sg;
508	data.sg_len = 1;
 
509	sg_init_one(&sg, data_buf, len);
510	mmc_wait_for_req(host, &mrq);
511	err = 0;
512	if (opcode == MMC_BUS_TEST_R) {
513		for (i = 0; i < len / 4; i++)
514			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
515				err = -EIO;
516				break;
517			}
518	}
519	kfree(data_buf);
520
521	if (cmd.error)
522		return cmd.error;
523	if (data.error)
524		return data.error;
525
526	return err;
527}
528
529int mmc_bus_test(struct mmc_card *card, u8 bus_width)
530{
531	int err, width;
532
533	if (bus_width == MMC_BUS_WIDTH_8)
534		width = 8;
535	else if (bus_width == MMC_BUS_WIDTH_4)
536		width = 4;
537	else if (bus_width == MMC_BUS_WIDTH_1)
538		return 0; /* no need for test */
539	else
540		return -EINVAL;
541
542	/*
543	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
544	 * is a problem.  This improves chances that the test will work.
545	 */
546	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
547	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548	return err;
549}