Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 
 13#include <linux/types.h>
 14#include <linux/scatterlist.h>
 15
 16#include <linux/mmc/host.h>
 17#include <linux/mmc/card.h>
 18#include <linux/mmc/mmc.h>
 19
 20#include "core.h"
 
 
 21#include "mmc_ops.h"
 22
 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24{
 25	int err;
 26	struct mmc_command cmd = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28	BUG_ON(!host);
 
 
 
 
 
 
 
 
 29
 30	cmd.opcode = MMC_SELECT_CARD;
 31
 32	if (card) {
 33		cmd.arg = card->rca << 16;
 34		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 35	} else {
 36		cmd.arg = 0;
 37		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 38	}
 39
 40	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 41	if (err)
 42		return err;
 43
 44	return 0;
 45}
 46
 47int mmc_select_card(struct mmc_card *card)
 48{
 49	BUG_ON(!card);
 50
 51	return _mmc_select_card(card->host, card);
 52}
 53
 54int mmc_deselect_cards(struct mmc_host *host)
 55{
 56	return _mmc_select_card(host, NULL);
 57}
 58
 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 
 
 
 
 
 
 
 
 60{
 61	struct mmc_command cmd = {0};
 62	struct mmc_card *card = host->card;
 63	int err;
 64
 65	if (sleep)
 66		mmc_deselect_cards(host);
 67
 68	cmd.opcode = MMC_SLEEP_AWAKE;
 69	cmd.arg = card->rca << 16;
 70	if (sleep)
 71		cmd.arg |= 1 << 15;
 72
 73	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 74	err = mmc_wait_for_cmd(host, &cmd, 0);
 75	if (err)
 76		return err;
 77
 78	/*
 79	 * If the host does not wait while the card signals busy, then we will
 80	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
 81	 * SEND_STATUS command to poll the status because that command (and most
 82	 * others) is invalid while the card sleeps.
 83	 */
 84	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 85		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
 86
 87	if (!sleep)
 88		err = mmc_select_card(card);
 89
 90	return err;
 91}
 92
 93int mmc_go_idle(struct mmc_host *host)
 94{
 95	int err;
 96	struct mmc_command cmd = {0};
 97
 98	/*
 99	 * Non-SPI hosts need to prevent chipselect going active during
100	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
101	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
102	 *
103	 * SPI hosts ignore ios.chip_select; it's managed according to
104	 * rules that must accommodate non-MMC slaves which this layer
105	 * won't even know about.
106	 */
107	if (!mmc_host_is_spi(host)) {
108		mmc_set_chip_select(host, MMC_CS_HIGH);
109		mmc_delay(1);
110	}
111
112	cmd.opcode = MMC_GO_IDLE_STATE;
113	cmd.arg = 0;
114	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
115
116	err = mmc_wait_for_cmd(host, &cmd, 0);
117
118	mmc_delay(1);
119
120	if (!mmc_host_is_spi(host)) {
121		mmc_set_chip_select(host, MMC_CS_DONTCARE);
122		mmc_delay(1);
123	}
124
125	host->use_spi_crc = 0;
126
127	return err;
128}
129
130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
131{
132	struct mmc_command cmd = {0};
133	int i, err = 0;
134
135	BUG_ON(!host);
136
137	cmd.opcode = MMC_SEND_OP_COND;
138	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
139	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
140
141	for (i = 100; i; i--) {
142		err = mmc_wait_for_cmd(host, &cmd, 0);
143		if (err)
144			break;
145
146		/* if we're just probing, do a single pass */
147		if (ocr == 0)
148			break;
149
150		/* otherwise wait until reset completes */
151		if (mmc_host_is_spi(host)) {
152			if (!(cmd.resp[0] & R1_SPI_IDLE))
153				break;
154		} else {
155			if (cmd.resp[0] & MMC_CARD_BUSY)
156				break;
157		}
158
159		err = -ETIMEDOUT;
160
161		mmc_delay(10);
 
 
 
 
 
 
 
 
 
 
162	}
163
164	if (rocr && !mmc_host_is_spi(host))
165		*rocr = cmd.resp[0];
166
167	return err;
168}
169
170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
171{
172	int err;
173	struct mmc_command cmd = {0};
174
175	BUG_ON(!host);
176	BUG_ON(!cid);
177
178	cmd.opcode = MMC_ALL_SEND_CID;
179	cmd.arg = 0;
180	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
181
182	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
183	if (err)
184		return err;
185
186	memcpy(cid, cmd.resp, sizeof(u32) * 4);
187
188	return 0;
189}
190
191int mmc_set_relative_addr(struct mmc_card *card)
192{
193	int err;
194	struct mmc_command cmd = {0};
195
196	BUG_ON(!card);
197	BUG_ON(!card->host);
198
199	cmd.opcode = MMC_SET_RELATIVE_ADDR;
200	cmd.arg = card->rca << 16;
201	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
202
203	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
204	if (err)
205		return err;
206
207	return 0;
208}
209
210static int
211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
212{
213	int err;
214	struct mmc_command cmd = {0};
215
216	BUG_ON(!host);
217	BUG_ON(!cxd);
218
219	cmd.opcode = opcode;
220	cmd.arg = arg;
221	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
222
223	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
224	if (err)
225		return err;
226
227	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
228
229	return 0;
230}
231
 
 
 
 
232static int
233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
234		u32 opcode, void *buf, unsigned len)
235{
236	struct mmc_request mrq = {0};
237	struct mmc_command cmd = {0};
238	struct mmc_data data = {0};
239	struct scatterlist sg;
240	void *data_buf;
241
242	/* dma onto stack is unsafe/nonportable, but callers to this
243	 * routine normally provide temporary on-stack buffers ...
244	 */
245	data_buf = kmalloc(len, GFP_KERNEL);
246	if (data_buf == NULL)
247		return -ENOMEM;
248
249	mrq.cmd = &cmd;
250	mrq.data = &data;
251
252	cmd.opcode = opcode;
253	cmd.arg = 0;
254
255	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
256	 * rely on callers to never use this with "native" calls for reading
257	 * CSD or CID.  Native versions of those commands use the R2 type,
258	 * not R1 plus a data block.
259	 */
260	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
261
262	data.blksz = len;
263	data.blocks = 1;
264	data.flags = MMC_DATA_READ;
265	data.sg = &sg;
266	data.sg_len = 1;
267
268	sg_init_one(&sg, data_buf, len);
269
270	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
271		/*
272		 * The spec states that CSR and CID accesses have a timeout
273		 * of 64 clock cycles.
274		 */
275		data.timeout_ns = 0;
276		data.timeout_clks = 64;
277	} else
278		mmc_set_data_timeout(&data, card);
279
280	mmc_wait_for_req(host, &mrq);
281
282	memcpy(buf, data_buf, len);
283	kfree(data_buf);
284
285	if (cmd.error)
286		return cmd.error;
287	if (data.error)
288		return data.error;
289
290	return 0;
291}
292
293int mmc_send_csd(struct mmc_card *card, u32 *csd)
294{
295	int ret, i;
 
296
297	if (!mmc_host_is_spi(card->host))
298		return mmc_send_cxd_native(card->host, card->rca << 16,
299				csd, MMC_SEND_CSD);
300
301	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
302	if (ret)
303		return ret;
304
305	for (i = 0;i < 4;i++)
306		csd[i] = be32_to_cpu(csd[i]);
307
308	return 0;
 
 
309}
310
311int mmc_send_cid(struct mmc_host *host, u32 *cid)
 
 
 
 
 
 
 
 
 
312{
313	int ret, i;
 
314
315	if (!mmc_host_is_spi(host)) {
316		if (!host->card)
317			return -EINVAL;
318		return mmc_send_cxd_native(host, host->card->rca << 16,
319				cid, MMC_SEND_CID);
320	}
321
322	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
323	if (ret)
324		return ret;
325
326	for (i = 0;i < 4;i++)
327		cid[i] = be32_to_cpu(cid[i]);
328
329	return 0;
 
 
330}
331
332int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
333{
334	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
335			ext_csd, 512);
 
 
336}
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
339{
340	struct mmc_command cmd = {0};
341	int err;
342
343	cmd.opcode = MMC_SPI_READ_OCR;
344	cmd.arg = highcap ? (1 << 30) : 0;
345	cmd.flags = MMC_RSP_SPI_R3;
346
347	err = mmc_wait_for_cmd(host, &cmd, 0);
348
349	*ocrp = cmd.resp[1];
350	return err;
351}
352
353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
354{
355	struct mmc_command cmd = {0};
356	int err;
357
358	cmd.opcode = MMC_SPI_CRC_ON_OFF;
359	cmd.flags = MMC_RSP_SPI_R1;
360	cmd.arg = use_crc;
361
362	err = mmc_wait_for_cmd(host, &cmd, 0);
363	if (!err)
364		host->use_spi_crc = use_crc;
365	return err;
366}
367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368/**
369 *	mmc_switch - modify EXT_CSD register
370 *	@card: the MMC card associated with the data transfer
371 *	@set: cmd set values
372 *	@index: EXT_CSD register index
373 *	@value: value to program into EXT_CSD register
374 *	@timeout_ms: timeout (ms) for operation performed by register write,
375 *                   timeout of zero implies maximum possible timeout
 
 
 
 
376 *
377 *	Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380	       unsigned int timeout_ms)
 
381{
 
382	int err;
383	struct mmc_command cmd = {0};
384	u32 status;
 
 
 
385
386	BUG_ON(!card);
387	BUG_ON(!card->host);
 
 
 
 
 
 
 
388
389	cmd.opcode = MMC_SWITCH;
390	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
391		  (index << 16) |
392		  (value << 8) |
393		  set;
394	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395	cmd.cmd_timeout_ms = timeout_ms;
 
 
 
 
 
 
 
 
 
 
 
 
396
397	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
398	if (err)
399		return err;
400
401	/* Must check status to be sure of no errors */
402	do {
403		err = mmc_send_status(card, &status);
404		if (err)
405			return err;
406		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
407			break;
408		if (mmc_host_is_spi(card->host))
409			break;
410	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411
412	if (mmc_host_is_spi(card->host)) {
413		if (status & R1_SPI_ILLEGAL_COMMAND)
414			return -EBADMSG;
415	} else {
416		if (status & 0xFDFFA000)
417			printk(KERN_WARNING "%s: unexpected status %#x after "
418			       "switch", mmc_hostname(card->host), status);
419		if (status & R1_SWITCH_ERROR)
420			return -EBADMSG;
 
 
 
 
 
421	}
 
 
422
423	return 0;
 
 
 
 
 
 
 
424}
425EXPORT_SYMBOL_GPL(mmc_switch);
426
427int mmc_send_status(struct mmc_card *card, u32 *status)
428{
429	int err;
430	struct mmc_command cmd = {0};
 
 
 
 
 
 
431
432	BUG_ON(!card);
433	BUG_ON(!card->host);
 
 
 
 
 
 
434
435	cmd.opcode = MMC_SEND_STATUS;
436	if (!mmc_host_is_spi(card->host))
437		cmd.arg = card->rca << 16;
438	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
439
440	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
441	if (err)
442		return err;
443
444	/* NOTE: callers are required to understand the difference
445	 * between "native" and SPI format status words!
 
 
 
 
 
 
 
 
 
446	 */
447	if (status)
448		*status = cmd.resp[0];
449
450	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
453static int
454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
455		  u8 len)
456{
457	struct mmc_request mrq = {0};
458	struct mmc_command cmd = {0};
459	struct mmc_data data = {0};
460	struct scatterlist sg;
461	u8 *data_buf;
462	u8 *test_buf;
463	int i, err;
464	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
465	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
466
467	/* dma onto stack is unsafe/nonportable, but callers to this
468	 * routine normally provide temporary on-stack buffers ...
469	 */
470	data_buf = kmalloc(len, GFP_KERNEL);
471	if (!data_buf)
472		return -ENOMEM;
473
474	if (len == 8)
475		test_buf = testdata_8bit;
476	else if (len == 4)
477		test_buf = testdata_4bit;
478	else {
479		printk(KERN_ERR "%s: Invalid bus_width %d\n",
480		       mmc_hostname(host), len);
481		kfree(data_buf);
482		return -EINVAL;
483	}
484
485	if (opcode == MMC_BUS_TEST_W)
486		memcpy(data_buf, test_buf, len);
487
488	mrq.cmd = &cmd;
489	mrq.data = &data;
490	cmd.opcode = opcode;
491	cmd.arg = 0;
492
493	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
494	 * rely on callers to never use this with "native" calls for reading
495	 * CSD or CID.  Native versions of those commands use the R2 type,
496	 * not R1 plus a data block.
497	 */
498	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
499
500	data.blksz = len;
501	data.blocks = 1;
502	if (opcode == MMC_BUS_TEST_R)
503		data.flags = MMC_DATA_READ;
504	else
505		data.flags = MMC_DATA_WRITE;
506
507	data.sg = &sg;
508	data.sg_len = 1;
 
509	sg_init_one(&sg, data_buf, len);
510	mmc_wait_for_req(host, &mrq);
511	err = 0;
512	if (opcode == MMC_BUS_TEST_R) {
513		for (i = 0; i < len / 4; i++)
514			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
515				err = -EIO;
516				break;
517			}
518	}
519	kfree(data_buf);
520
521	if (cmd.error)
522		return cmd.error;
523	if (data.error)
524		return data.error;
525
526	return err;
527}
528
529int mmc_bus_test(struct mmc_card *card, u8 bus_width)
530{
531	int err, width;
532
533	if (bus_width == MMC_BUS_WIDTH_8)
534		width = 8;
535	else if (bus_width == MMC_BUS_WIDTH_4)
536		width = 4;
537	else if (bus_width == MMC_BUS_WIDTH_1)
538		return 0; /* no need for test */
539	else
540		return -EINVAL;
541
542	/*
543	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
544	 * is a problem.  This improves chances that the test will work.
545	 */
546	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
547	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548	return err;
549}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
 
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
  23
  24static const u8 tuning_blk_pattern_4bit[] = {
  25	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  26	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  27	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  28	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  29	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  30	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  31	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  32	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  33};
  34
  35static const u8 tuning_blk_pattern_8bit[] = {
  36	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  37	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  38	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  39	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  40	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  41	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  42	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  43	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  44	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  45	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  46	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  47	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  48	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  49	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  50	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  51	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  52};
  53
  54int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  55{
  56	int err;
  57	struct mmc_command cmd = {};
  58
  59	cmd.opcode = MMC_SEND_STATUS;
  60	if (!mmc_host_is_spi(card->host))
  61		cmd.arg = card->rca << 16;
  62	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  63
  64	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  65	if (err)
  66		return err;
  67
  68	/* NOTE: callers are required to understand the difference
  69	 * between "native" and SPI format status words!
  70	 */
  71	if (status)
  72		*status = cmd.resp[0];
  73
  74	return 0;
  75}
  76EXPORT_SYMBOL_GPL(__mmc_send_status);
  77
  78int mmc_send_status(struct mmc_card *card, u32 *status)
  79{
  80	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  81}
  82EXPORT_SYMBOL_GPL(mmc_send_status);
  83
  84static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  85{
  86	struct mmc_command cmd = {};
  87
  88	cmd.opcode = MMC_SELECT_CARD;
  89
  90	if (card) {
  91		cmd.arg = card->rca << 16;
  92		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  93	} else {
  94		cmd.arg = 0;
  95		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  96	}
  97
  98	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
  99}
 100
 101int mmc_select_card(struct mmc_card *card)
 102{
 
 103
 104	return _mmc_select_card(card->host, card);
 105}
 106
 107int mmc_deselect_cards(struct mmc_host *host)
 108{
 109	return _mmc_select_card(host, NULL);
 110}
 111
 112/*
 113 * Write the value specified in the device tree or board code into the optional
 114 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 115 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 116 * value is hardware dependant.
 117 * The presence of the DSR register can be determined from the CSD register,
 118 * bit 76.
 119 */
 120int mmc_set_dsr(struct mmc_host *host)
 121{
 122	struct mmc_command cmd = {};
 
 
 123
 124	cmd.opcode = MMC_SET_DSR;
 
 
 
 
 
 
 125
 126	cmd.arg = (host->dsr << 16) | 0xffff;
 127	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 
 
 128
 129	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 
 
 
 
 
 
 
 
 130}
 131
 132int mmc_go_idle(struct mmc_host *host)
 133{
 134	int err;
 135	struct mmc_command cmd = {};
 136
 137	/*
 138	 * Non-SPI hosts need to prevent chipselect going active during
 139	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 140	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 141	 *
 142	 * SPI hosts ignore ios.chip_select; it's managed according to
 143	 * rules that must accommodate non-MMC slaves which this layer
 144	 * won't even know about.
 145	 */
 146	if (!mmc_host_is_spi(host)) {
 147		mmc_set_chip_select(host, MMC_CS_HIGH);
 148		mmc_delay(1);
 149	}
 150
 151	cmd.opcode = MMC_GO_IDLE_STATE;
 152	cmd.arg = 0;
 153	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 154
 155	err = mmc_wait_for_cmd(host, &cmd, 0);
 156
 157	mmc_delay(1);
 158
 159	if (!mmc_host_is_spi(host)) {
 160		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 161		mmc_delay(1);
 162	}
 163
 164	host->use_spi_crc = 0;
 165
 166	return err;
 167}
 168
 169int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 170{
 171	struct mmc_command cmd = {};
 172	int i, err = 0;
 173
 
 
 174	cmd.opcode = MMC_SEND_OP_COND;
 175	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 176	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 177
 178	for (i = 100; i; i--) {
 179		err = mmc_wait_for_cmd(host, &cmd, 0);
 180		if (err)
 181			break;
 182
 183		/* wait until reset completes */
 
 
 
 
 184		if (mmc_host_is_spi(host)) {
 185			if (!(cmd.resp[0] & R1_SPI_IDLE))
 186				break;
 187		} else {
 188			if (cmd.resp[0] & MMC_CARD_BUSY)
 189				break;
 190		}
 191
 192		err = -ETIMEDOUT;
 193
 194		mmc_delay(10);
 195
 196		/*
 197		 * According to eMMC specification v5.1 section 6.4.3, we
 198		 * should issue CMD1 repeatedly in the idle state until
 199		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 200		 * the inactive mode after mmc_init_card() issued CMD0 when
 201		 * the eMMC device is busy.
 202		 */
 203		if (!ocr && !mmc_host_is_spi(host))
 204			cmd.arg = cmd.resp[0] | BIT(30);
 205	}
 206
 207	if (rocr && !mmc_host_is_spi(host))
 208		*rocr = cmd.resp[0];
 209
 210	return err;
 211}
 212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213int mmc_set_relative_addr(struct mmc_card *card)
 214{
 215	struct mmc_command cmd = {};
 
 
 
 
 216
 217	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 218	cmd.arg = card->rca << 16;
 219	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 220
 221	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 222}
 223
 224static int
 225mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 226{
 227	int err;
 228	struct mmc_command cmd = {};
 
 
 
 229
 230	cmd.opcode = opcode;
 231	cmd.arg = arg;
 232	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 233
 234	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 235	if (err)
 236		return err;
 237
 238	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 239
 240	return 0;
 241}
 242
 243/*
 244 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 245 * buffer or on-stack buffer (with some overhead in callee).
 246 */
 247static int
 248mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
 249		u32 opcode, void *buf, unsigned len)
 250{
 251	struct mmc_request mrq = {};
 252	struct mmc_command cmd = {};
 253	struct mmc_data data = {};
 254	struct scatterlist sg;
 
 
 
 
 
 
 
 
 255
 256	mrq.cmd = &cmd;
 257	mrq.data = &data;
 258
 259	cmd.opcode = opcode;
 260	cmd.arg = 0;
 261
 262	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 263	 * rely on callers to never use this with "native" calls for reading
 264	 * CSD or CID.  Native versions of those commands use the R2 type,
 265	 * not R1 plus a data block.
 266	 */
 267	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 268
 269	data.blksz = len;
 270	data.blocks = 1;
 271	data.flags = MMC_DATA_READ;
 272	data.sg = &sg;
 273	data.sg_len = 1;
 274
 275	sg_init_one(&sg, buf, len);
 276
 277	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 278		/*
 279		 * The spec states that CSR and CID accesses have a timeout
 280		 * of 64 clock cycles.
 281		 */
 282		data.timeout_ns = 0;
 283		data.timeout_clks = 64;
 284	} else
 285		mmc_set_data_timeout(&data, card);
 286
 287	mmc_wait_for_req(host, &mrq);
 288
 
 
 
 289	if (cmd.error)
 290		return cmd.error;
 291	if (data.error)
 292		return data.error;
 293
 294	return 0;
 295}
 296
 297static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
 298{
 299	int ret, i;
 300	__be32 *csd_tmp;
 301
 302	csd_tmp = kzalloc(16, GFP_KERNEL);
 303	if (!csd_tmp)
 304		return -ENOMEM;
 305
 306	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
 307	if (ret)
 308		goto err;
 309
 310	for (i = 0; i < 4; i++)
 311		csd[i] = be32_to_cpu(csd_tmp[i]);
 312
 313err:
 314	kfree(csd_tmp);
 315	return ret;
 316}
 317
 318int mmc_send_csd(struct mmc_card *card, u32 *csd)
 319{
 320	if (mmc_host_is_spi(card->host))
 321		return mmc_spi_send_csd(card, csd);
 322
 323	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 324				MMC_SEND_CSD);
 325}
 326
 327static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
 328{
 329	int ret, i;
 330	__be32 *cid_tmp;
 331
 332	cid_tmp = kzalloc(16, GFP_KERNEL);
 333	if (!cid_tmp)
 334		return -ENOMEM;
 
 
 
 335
 336	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
 337	if (ret)
 338		goto err;
 339
 340	for (i = 0; i < 4; i++)
 341		cid[i] = be32_to_cpu(cid_tmp[i]);
 342
 343err:
 344	kfree(cid_tmp);
 345	return ret;
 346}
 347
 348int mmc_send_cid(struct mmc_host *host, u32 *cid)
 349{
 350	if (mmc_host_is_spi(host))
 351		return mmc_spi_send_cid(host, cid);
 352
 353	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 354}
 355
 356int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 357{
 358	int err;
 359	u8 *ext_csd;
 360
 361	if (!card || !new_ext_csd)
 362		return -EINVAL;
 363
 364	if (!mmc_can_ext_csd(card))
 365		return -EOPNOTSUPP;
 366
 367	/*
 368	 * As the ext_csd is so large and mostly unused, we don't store the
 369	 * raw block in mmc_card.
 370	 */
 371	ext_csd = kzalloc(512, GFP_KERNEL);
 372	if (!ext_csd)
 373		return -ENOMEM;
 374
 375	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
 376				512);
 377	if (err)
 378		kfree(ext_csd);
 379	else
 380		*new_ext_csd = ext_csd;
 381
 382	return err;
 383}
 384EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 385
 386int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 387{
 388	struct mmc_command cmd = {};
 389	int err;
 390
 391	cmd.opcode = MMC_SPI_READ_OCR;
 392	cmd.arg = highcap ? (1 << 30) : 0;
 393	cmd.flags = MMC_RSP_SPI_R3;
 394
 395	err = mmc_wait_for_cmd(host, &cmd, 0);
 396
 397	*ocrp = cmd.resp[1];
 398	return err;
 399}
 400
 401int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 402{
 403	struct mmc_command cmd = {};
 404	int err;
 405
 406	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 407	cmd.flags = MMC_RSP_SPI_R1;
 408	cmd.arg = use_crc;
 409
 410	err = mmc_wait_for_cmd(host, &cmd, 0);
 411	if (!err)
 412		host->use_spi_crc = use_crc;
 413	return err;
 414}
 415
 416static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 417{
 418	if (mmc_host_is_spi(host)) {
 419		if (status & R1_SPI_ILLEGAL_COMMAND)
 420			return -EBADMSG;
 421	} else {
 422		if (R1_STATUS(status))
 423			pr_warn("%s: unexpected status %#x after switch\n",
 424				mmc_hostname(host), status);
 425		if (status & R1_SWITCH_ERROR)
 426			return -EBADMSG;
 427	}
 428	return 0;
 429}
 430
 431/* Caller must hold re-tuning */
 432int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 433{
 434	u32 status;
 435	int err;
 436
 437	err = mmc_send_status(card, &status);
 438	if (!crc_err_fatal && err == -EILSEQ)
 439		return 0;
 440	if (err)
 441		return err;
 442
 443	return mmc_switch_status_error(card->host, status);
 444}
 445
 446int mmc_switch_status(struct mmc_card *card)
 447{
 448	return __mmc_switch_status(card, true);
 449}
 450
 451static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 452			bool send_status, bool retry_crc_err)
 453{
 454	struct mmc_host *host = card->host;
 455	int err;
 456	unsigned long timeout;
 457	u32 status = 0;
 458	bool expired = false;
 459	bool busy = false;
 460
 461	/* We have an unspecified cmd timeout, use the fallback value. */
 462	if (!timeout_ms)
 463		timeout_ms = MMC_OPS_TIMEOUT_MS;
 464
 465	/*
 466	 * In cases when not allowed to poll by using CMD13 or because we aren't
 467	 * capable of polling by using ->card_busy(), then rely on waiting the
 468	 * stated timeout to be sufficient.
 469	 */
 470	if (!send_status && !host->ops->card_busy) {
 471		mmc_delay(timeout_ms);
 472		return 0;
 473	}
 474
 475	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 476	do {
 477		/*
 478		 * Due to the possibility of being preempted while polling,
 479		 * check the expiration time first.
 480		 */
 481		expired = time_after(jiffies, timeout);
 482
 483		if (host->ops->card_busy) {
 484			busy = host->ops->card_busy(host);
 485		} else {
 486			err = mmc_send_status(card, &status);
 487			if (retry_crc_err && err == -EILSEQ) {
 488				busy = true;
 489			} else if (err) {
 490				return err;
 491			} else {
 492				err = mmc_switch_status_error(host, status);
 493				if (err)
 494					return err;
 495				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
 496			}
 497		}
 498
 499		/* Timeout if the device still remains busy. */
 500		if (expired && busy) {
 501			pr_err("%s: Card stuck being busy! %s\n",
 502				mmc_hostname(host), __func__);
 503			return -ETIMEDOUT;
 504		}
 505	} while (busy);
 506
 507	return 0;
 508}
 509
 510/**
 511 *	__mmc_switch - modify EXT_CSD register
 512 *	@card: the MMC card associated with the data transfer
 513 *	@set: cmd set values
 514 *	@index: EXT_CSD register index
 515 *	@value: value to program into EXT_CSD register
 516 *	@timeout_ms: timeout (ms) for operation performed by register write,
 517 *                   timeout of zero implies maximum possible timeout
 518 *	@timing: new timing to change to
 519 *	@use_busy_signal: use the busy signal as response type
 520 *	@send_status: send status cmd to poll for busy
 521 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 522 *
 523 *	Modifies the EXT_CSD register for selected card.
 524 */
 525int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 526		unsigned int timeout_ms, unsigned char timing,
 527		bool use_busy_signal, bool send_status,	bool retry_crc_err)
 528{
 529	struct mmc_host *host = card->host;
 530	int err;
 531	struct mmc_command cmd = {};
 532	bool use_r1b_resp = use_busy_signal;
 533	unsigned char old_timing = host->ios.timing;
 534
 535	mmc_retune_hold(host);
 536
 537	/*
 538	 * If the cmd timeout and the max_busy_timeout of the host are both
 539	 * specified, let's validate them. A failure means we need to prevent
 540	 * the host from doing hw busy detection, which is done by converting
 541	 * to a R1 response instead of a R1B.
 542	 */
 543	if (timeout_ms && host->max_busy_timeout &&
 544		(timeout_ms > host->max_busy_timeout))
 545		use_r1b_resp = false;
 546
 547	cmd.opcode = MMC_SWITCH;
 548	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 549		  (index << 16) |
 550		  (value << 8) |
 551		  set;
 552	cmd.flags = MMC_CMD_AC;
 553	if (use_r1b_resp) {
 554		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 555		/*
 556		 * A busy_timeout of zero means the host can decide to use
 557		 * whatever value it finds suitable.
 558		 */
 559		cmd.busy_timeout = timeout_ms;
 560	} else {
 561		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
 562	}
 563
 564	if (index == EXT_CSD_SANITIZE_START)
 565		cmd.sanitize_busy = true;
 566
 567	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 568	if (err)
 569		goto out;
 570
 571	/* No need to check card status in case of unblocking command */
 572	if (!use_busy_signal)
 573		goto out;
 574
 575	/*If SPI or used HW busy detection above, then we don't need to poll. */
 576	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 577		mmc_host_is_spi(host))
 578		goto out_tim;
 
 
 579
 580	/* Let's try to poll to find out when the command is completed. */
 581	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
 582	if (err)
 583		goto out;
 584
 585out_tim:
 586	/* Switch to new timing before check switch status. */
 587	if (timing)
 588		mmc_set_timing(host, timing);
 589
 590	if (send_status) {
 591		err = mmc_switch_status(card);
 592		if (err && timing)
 593			mmc_set_timing(host, old_timing);
 594	}
 595out:
 596	mmc_retune_release(host);
 597
 598	return err;
 599}
 600
 601int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 602		unsigned int timeout_ms)
 603{
 604	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 605			true, true, false);
 606}
 607EXPORT_SYMBOL_GPL(mmc_switch);
 608
 609int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 610{
 611	struct mmc_request mrq = {};
 612	struct mmc_command cmd = {};
 613	struct mmc_data data = {};
 614	struct scatterlist sg;
 615	struct mmc_ios *ios = &host->ios;
 616	const u8 *tuning_block_pattern;
 617	int size, err = 0;
 618	u8 *data_buf;
 619
 620	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 621		tuning_block_pattern = tuning_blk_pattern_8bit;
 622		size = sizeof(tuning_blk_pattern_8bit);
 623	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 624		tuning_block_pattern = tuning_blk_pattern_4bit;
 625		size = sizeof(tuning_blk_pattern_4bit);
 626	} else
 627		return -EINVAL;
 628
 629	data_buf = kzalloc(size, GFP_KERNEL);
 630	if (!data_buf)
 631		return -ENOMEM;
 
 632
 633	mrq.cmd = &cmd;
 634	mrq.data = &data;
 
 635
 636	cmd.opcode = opcode;
 637	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 638
 639	data.blksz = size;
 640	data.blocks = 1;
 641	data.flags = MMC_DATA_READ;
 642
 643	/*
 644	 * According to the tuning specs, Tuning process
 645	 * is normally shorter 40 executions of CMD19,
 646	 * and timeout value should be shorter than 150 ms
 647	 */
 648	data.timeout_ns = 150 * NSEC_PER_MSEC;
 
 649
 650	data.sg = &sg;
 651	data.sg_len = 1;
 652	sg_init_one(&sg, data_buf, size);
 653
 654	mmc_wait_for_req(host, &mrq);
 655
 656	if (cmd_error)
 657		*cmd_error = cmd.error;
 658
 659	if (cmd.error) {
 660		err = cmd.error;
 661		goto out;
 662	}
 663
 664	if (data.error) {
 665		err = data.error;
 666		goto out;
 667	}
 668
 669	if (memcmp(data_buf, tuning_block_pattern, size))
 670		err = -EIO;
 671
 672out:
 673	kfree(data_buf);
 674	return err;
 675}
 676EXPORT_SYMBOL_GPL(mmc_send_tuning);
 677
 678int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
 679{
 680	struct mmc_command cmd = {};
 681
 682	/*
 683	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 684	 * command, but SD specification does not, so do nothing unless it is
 685	 * eMMC.
 686	 */
 687	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 688		return 0;
 689
 690	cmd.opcode = MMC_STOP_TRANSMISSION;
 691	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 692
 693	/*
 694	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 695	 * on the tuning timeout i.e. 150ms.
 696	 */
 697	cmd.busy_timeout = 150;
 698
 699	return mmc_wait_for_cmd(host, &cmd, 0);
 700}
 701EXPORT_SYMBOL_GPL(mmc_abort_tuning);
 702
 703static int
 704mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 705		  u8 len)
 706{
 707	struct mmc_request mrq = {};
 708	struct mmc_command cmd = {};
 709	struct mmc_data data = {};
 710	struct scatterlist sg;
 711	u8 *data_buf;
 712	u8 *test_buf;
 713	int i, err;
 714	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 715	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 716
 717	/* dma onto stack is unsafe/nonportable, but callers to this
 718	 * routine normally provide temporary on-stack buffers ...
 719	 */
 720	data_buf = kmalloc(len, GFP_KERNEL);
 721	if (!data_buf)
 722		return -ENOMEM;
 723
 724	if (len == 8)
 725		test_buf = testdata_8bit;
 726	else if (len == 4)
 727		test_buf = testdata_4bit;
 728	else {
 729		pr_err("%s: Invalid bus_width %d\n",
 730		       mmc_hostname(host), len);
 731		kfree(data_buf);
 732		return -EINVAL;
 733	}
 734
 735	if (opcode == MMC_BUS_TEST_W)
 736		memcpy(data_buf, test_buf, len);
 737
 738	mrq.cmd = &cmd;
 739	mrq.data = &data;
 740	cmd.opcode = opcode;
 741	cmd.arg = 0;
 742
 743	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 744	 * rely on callers to never use this with "native" calls for reading
 745	 * CSD or CID.  Native versions of those commands use the R2 type,
 746	 * not R1 plus a data block.
 747	 */
 748	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 749
 750	data.blksz = len;
 751	data.blocks = 1;
 752	if (opcode == MMC_BUS_TEST_R)
 753		data.flags = MMC_DATA_READ;
 754	else
 755		data.flags = MMC_DATA_WRITE;
 756
 757	data.sg = &sg;
 758	data.sg_len = 1;
 759	mmc_set_data_timeout(&data, card);
 760	sg_init_one(&sg, data_buf, len);
 761	mmc_wait_for_req(host, &mrq);
 762	err = 0;
 763	if (opcode == MMC_BUS_TEST_R) {
 764		for (i = 0; i < len / 4; i++)
 765			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 766				err = -EIO;
 767				break;
 768			}
 769	}
 770	kfree(data_buf);
 771
 772	if (cmd.error)
 773		return cmd.error;
 774	if (data.error)
 775		return data.error;
 776
 777	return err;
 778}
 779
 780int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 781{
 782	int width;
 783
 784	if (bus_width == MMC_BUS_WIDTH_8)
 785		width = 8;
 786	else if (bus_width == MMC_BUS_WIDTH_4)
 787		width = 4;
 788	else if (bus_width == MMC_BUS_WIDTH_1)
 789		return 0; /* no need for test */
 790	else
 791		return -EINVAL;
 792
 793	/*
 794	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 795	 * is a problem.  This improves chances that the test will work.
 796	 */
 797	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 798	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 799}
 800
 801static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
 802{
 803	struct mmc_command cmd = {};
 804	unsigned int opcode;
 805	int err;
 806
 807	opcode = card->ext_csd.hpi_cmd;
 808	if (opcode == MMC_STOP_TRANSMISSION)
 809		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 810	else if (opcode == MMC_SEND_STATUS)
 811		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 812
 813	cmd.opcode = opcode;
 814	cmd.arg = card->rca << 16 | 1;
 815
 816	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 817	if (err) {
 818		pr_warn("%s: error %d interrupting operation. "
 819			"HPI command response %#x\n", mmc_hostname(card->host),
 820			err, cmd.resp[0]);
 821		return err;
 822	}
 823	if (status)
 824		*status = cmd.resp[0];
 825
 826	return 0;
 827}
 828
 829/**
 830 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 831 *	@card: the MMC card associated with the HPI transfer
 832 *
 833 *	Issued High Priority Interrupt, and check for card status
 834 *	until out-of prg-state.
 835 */
 836int mmc_interrupt_hpi(struct mmc_card *card)
 837{
 838	int err;
 839	u32 status;
 840	unsigned long prg_wait;
 841
 842	if (!card->ext_csd.hpi_en) {
 843		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 844		return 1;
 845	}
 846
 847	err = mmc_send_status(card, &status);
 848	if (err) {
 849		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 850		goto out;
 851	}
 852
 853	switch (R1_CURRENT_STATE(status)) {
 854	case R1_STATE_IDLE:
 855	case R1_STATE_READY:
 856	case R1_STATE_STBY:
 857	case R1_STATE_TRAN:
 858		/*
 859		 * In idle and transfer states, HPI is not needed and the caller
 860		 * can issue the next intended command immediately
 861		 */
 862		goto out;
 863	case R1_STATE_PRG:
 864		break;
 865	default:
 866		/* In all other states, it's illegal to issue HPI */
 867		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 868			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 869		err = -EINVAL;
 870		goto out;
 871	}
 872
 873	err = mmc_send_hpi_cmd(card, &status);
 874	if (err)
 875		goto out;
 876
 877	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
 878	do {
 879		err = mmc_send_status(card, &status);
 880
 881		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 882			break;
 883		if (time_after(jiffies, prg_wait))
 884			err = -ETIMEDOUT;
 885	} while (!err);
 886
 887out:
 888	return err;
 889}
 890
 891int mmc_can_ext_csd(struct mmc_card *card)
 892{
 893	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 894}
 895
 896static int mmc_read_bkops_status(struct mmc_card *card)
 897{
 898	int err;
 899	u8 *ext_csd;
 900
 901	err = mmc_get_ext_csd(card, &ext_csd);
 902	if (err)
 903		return err;
 904
 905	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 906	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 907	kfree(ext_csd);
 908	return 0;
 909}
 910
 911/**
 912 *	mmc_run_bkops - Run BKOPS for supported cards
 913 *	@card: MMC card to run BKOPS for
 914 *
 915 *	Run background operations synchronously for cards having manual BKOPS
 916 *	enabled and in case it reports urgent BKOPS level.
 917*/
 918void mmc_run_bkops(struct mmc_card *card)
 919{
 920	int err;
 921
 922	if (!card->ext_csd.man_bkops_en)
 923		return;
 924
 925	err = mmc_read_bkops_status(card);
 926	if (err) {
 927		pr_err("%s: Failed to read bkops status: %d\n",
 928		       mmc_hostname(card->host), err);
 929		return;
 930	}
 931
 932	if (!card->ext_csd.raw_bkops_status ||
 933	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 934		return;
 935
 936	mmc_retune_hold(card->host);
 937
 938	/*
 939	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 940	 * synchronously. Future wise, we may consider to start BKOPS, for less
 941	 * urgent levels by using an asynchronous background task, when idle.
 942	 */
 943	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 944			EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
 945	if (err)
 946		pr_warn("%s: Error %d starting bkops\n",
 947			mmc_hostname(card->host), err);
 948
 949	mmc_retune_release(card->host);
 950}
 951EXPORT_SYMBOL(mmc_run_bkops);
 952
 953/*
 954 * Flush the cache to the non-volatile storage.
 955 */
 956int mmc_flush_cache(struct mmc_card *card)
 957{
 958	int err = 0;
 959
 960	if (mmc_card_mmc(card) &&
 961			(card->ext_csd.cache_size > 0) &&
 962			(card->ext_csd.cache_ctrl & 1)) {
 963		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 964				EXT_CSD_FLUSH_CACHE, 1, 0);
 965		if (err)
 966			pr_err("%s: cache flush error %d\n",
 967					mmc_hostname(card->host), err);
 968	}
 969
 970	return err;
 971}
 972EXPORT_SYMBOL(mmc_flush_cache);
 973
 974static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
 975{
 976	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
 977	int err;
 978
 979	if (!card->ext_csd.cmdq_support)
 980		return -EOPNOTSUPP;
 981
 982	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
 983			 val, card->ext_csd.generic_cmd6_time);
 984	if (!err)
 985		card->ext_csd.cmdq_en = enable;
 986
 987	return err;
 988}
 989
 990int mmc_cmdq_enable(struct mmc_card *card)
 991{
 992	return mmc_cmdq_switch(card, true);
 993}
 994EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
 995
 996int mmc_cmdq_disable(struct mmc_card *card)
 997{
 998	return mmc_cmdq_switch(card, false);
 999}
1000EXPORT_SYMBOL_GPL(mmc_cmdq_disable);