Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/export.h>
 14#include <linux/types.h>
 15#include <linux/scatterlist.h>
 16
 17#include <linux/mmc/host.h>
 18#include <linux/mmc/card.h>
 19#include <linux/mmc/mmc.h>
 20
 21#include "core.h"
 
 22#include "mmc_ops.h"
 23
 24static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25{
 26	int err;
 27	struct mmc_command cmd = {0};
 28
 29	BUG_ON(!host);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30
 31	cmd.opcode = MMC_SELECT_CARD;
 32
 33	if (card) {
 34		cmd.arg = card->rca << 16;
 35		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 36	} else {
 37		cmd.arg = 0;
 38		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 39	}
 40
 41	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 42	if (err)
 43		return err;
 44
 45	return 0;
 46}
 47
 48int mmc_select_card(struct mmc_card *card)
 49{
 50	BUG_ON(!card);
 51
 52	return _mmc_select_card(card->host, card);
 53}
 54
 55int mmc_deselect_cards(struct mmc_host *host)
 56{
 57	return _mmc_select_card(host, NULL);
 58}
 59
 60int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 
 
 
 
 
 
 
 
 61{
 62	struct mmc_command cmd = {0};
 63	struct mmc_card *card = host->card;
 64	int err;
 65
 66	if (sleep)
 67		mmc_deselect_cards(host);
 68
 69	cmd.opcode = MMC_SLEEP_AWAKE;
 70	cmd.arg = card->rca << 16;
 71	if (sleep)
 72		cmd.arg |= 1 << 15;
 73
 74	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 75	err = mmc_wait_for_cmd(host, &cmd, 0);
 76	if (err)
 77		return err;
 78
 79	/*
 80	 * If the host does not wait while the card signals busy, then we will
 81	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
 82	 * SEND_STATUS command to poll the status because that command (and most
 83	 * others) is invalid while the card sleeps.
 84	 */
 85	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 86		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
 87
 88	if (!sleep)
 89		err = mmc_select_card(card);
 90
 91	return err;
 92}
 93
 94int mmc_go_idle(struct mmc_host *host)
 95{
 96	int err;
 97	struct mmc_command cmd = {0};
 98
 99	/*
100	 * Non-SPI hosts need to prevent chipselect going active during
101	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
102	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
103	 *
104	 * SPI hosts ignore ios.chip_select; it's managed according to
105	 * rules that must accommodate non-MMC slaves which this layer
106	 * won't even know about.
107	 */
108	if (!mmc_host_is_spi(host)) {
109		mmc_set_chip_select(host, MMC_CS_HIGH);
110		mmc_delay(1);
111	}
112
113	cmd.opcode = MMC_GO_IDLE_STATE;
114	cmd.arg = 0;
115	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
116
117	err = mmc_wait_for_cmd(host, &cmd, 0);
118
119	mmc_delay(1);
120
121	if (!mmc_host_is_spi(host)) {
122		mmc_set_chip_select(host, MMC_CS_DONTCARE);
123		mmc_delay(1);
124	}
125
126	host->use_spi_crc = 0;
127
128	return err;
129}
130
131int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
132{
133	struct mmc_command cmd = {0};
134	int i, err = 0;
135
136	BUG_ON(!host);
137
138	cmd.opcode = MMC_SEND_OP_COND;
139	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
140	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
141
142	for (i = 100; i; i--) {
143		err = mmc_wait_for_cmd(host, &cmd, 0);
144		if (err)
145			break;
146
147		/* if we're just probing, do a single pass */
148		if (ocr == 0)
149			break;
150
151		/* otherwise wait until reset completes */
152		if (mmc_host_is_spi(host)) {
153			if (!(cmd.resp[0] & R1_SPI_IDLE))
154				break;
155		} else {
156			if (cmd.resp[0] & MMC_CARD_BUSY)
157				break;
158		}
159
160		err = -ETIMEDOUT;
161
162		mmc_delay(10);
163	}
164
165	if (rocr && !mmc_host_is_spi(host))
166		*rocr = cmd.resp[0];
167
168	return err;
169}
170
171int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
172{
173	int err;
174	struct mmc_command cmd = {0};
175
176	BUG_ON(!host);
177	BUG_ON(!cid);
178
179	cmd.opcode = MMC_ALL_SEND_CID;
180	cmd.arg = 0;
181	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
182
183	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
184	if (err)
185		return err;
186
187	memcpy(cid, cmd.resp, sizeof(u32) * 4);
188
189	return 0;
190}
191
192int mmc_set_relative_addr(struct mmc_card *card)
193{
194	int err;
195	struct mmc_command cmd = {0};
196
197	BUG_ON(!card);
198	BUG_ON(!card->host);
199
200	cmd.opcode = MMC_SET_RELATIVE_ADDR;
201	cmd.arg = card->rca << 16;
202	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
203
204	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
205	if (err)
206		return err;
207
208	return 0;
209}
210
211static int
212mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
213{
214	int err;
215	struct mmc_command cmd = {0};
216
217	BUG_ON(!host);
218	BUG_ON(!cxd);
219
220	cmd.opcode = opcode;
221	cmd.arg = arg;
222	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
223
224	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
225	if (err)
226		return err;
227
228	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
229
230	return 0;
231}
232
 
 
 
 
233static int
234mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
235		u32 opcode, void *buf, unsigned len)
236{
237	struct mmc_request mrq = {NULL};
238	struct mmc_command cmd = {0};
239	struct mmc_data data = {0};
240	struct scatterlist sg;
241	void *data_buf;
242
243	/* dma onto stack is unsafe/nonportable, but callers to this
244	 * routine normally provide temporary on-stack buffers ...
245	 */
246	data_buf = kmalloc(len, GFP_KERNEL);
247	if (data_buf == NULL)
248		return -ENOMEM;
249
250	mrq.cmd = &cmd;
251	mrq.data = &data;
252
253	cmd.opcode = opcode;
254	cmd.arg = 0;
255
256	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
257	 * rely on callers to never use this with "native" calls for reading
258	 * CSD or CID.  Native versions of those commands use the R2 type,
259	 * not R1 plus a data block.
260	 */
261	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
262
263	data.blksz = len;
264	data.blocks = 1;
265	data.flags = MMC_DATA_READ;
266	data.sg = &sg;
267	data.sg_len = 1;
268
269	sg_init_one(&sg, data_buf, len);
270
271	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
272		/*
273		 * The spec states that CSR and CID accesses have a timeout
274		 * of 64 clock cycles.
275		 */
276		data.timeout_ns = 0;
277		data.timeout_clks = 64;
278	} else
279		mmc_set_data_timeout(&data, card);
280
281	mmc_wait_for_req(host, &mrq);
282
283	memcpy(buf, data_buf, len);
284	kfree(data_buf);
285
286	if (cmd.error)
287		return cmd.error;
288	if (data.error)
289		return data.error;
290
291	return 0;
292}
293
294int mmc_send_csd(struct mmc_card *card, u32 *csd)
295{
296	int ret, i;
 
297
298	if (!mmc_host_is_spi(card->host))
299		return mmc_send_cxd_native(card->host, card->rca << 16,
300				csd, MMC_SEND_CSD);
301
302	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
 
 
 
 
303	if (ret)
304		return ret;
305
306	for (i = 0;i < 4;i++)
307		csd[i] = be32_to_cpu(csd[i]);
308
309	return 0;
 
 
310}
311
312int mmc_send_cid(struct mmc_host *host, u32 *cid)
313{
314	int ret, i;
 
315
316	if (!mmc_host_is_spi(host)) {
317		if (!host->card)
318			return -EINVAL;
319		return mmc_send_cxd_native(host, host->card->rca << 16,
320				cid, MMC_SEND_CID);
321	}
322
323	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
 
 
 
 
324	if (ret)
325		return ret;
326
327	for (i = 0;i < 4;i++)
328		cid[i] = be32_to_cpu(cid[i]);
329
330	return 0;
 
 
331}
332
333int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
334{
335	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
336			ext_csd, 512);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337}
 
338
339int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
340{
341	struct mmc_command cmd = {0};
342	int err;
343
344	cmd.opcode = MMC_SPI_READ_OCR;
345	cmd.arg = highcap ? (1 << 30) : 0;
346	cmd.flags = MMC_RSP_SPI_R3;
347
348	err = mmc_wait_for_cmd(host, &cmd, 0);
349
350	*ocrp = cmd.resp[1];
351	return err;
352}
353
354int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
355{
356	struct mmc_command cmd = {0};
357	int err;
358
359	cmd.opcode = MMC_SPI_CRC_ON_OFF;
360	cmd.flags = MMC_RSP_SPI_R1;
361	cmd.arg = use_crc;
362
363	err = mmc_wait_for_cmd(host, &cmd, 0);
364	if (!err)
365		host->use_spi_crc = use_crc;
366	return err;
367}
368
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369/**
370 *	mmc_switch - modify EXT_CSD register
371 *	@card: the MMC card associated with the data transfer
372 *	@set: cmd set values
373 *	@index: EXT_CSD register index
374 *	@value: value to program into EXT_CSD register
375 *	@timeout_ms: timeout (ms) for operation performed by register write,
376 *                   timeout of zero implies maximum possible timeout
 
 
 
 
377 *
378 *	Modifies the EXT_CSD register for selected card.
379 */
380int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
381	       unsigned int timeout_ms)
 
382{
 
383	int err;
384	struct mmc_command cmd = {0};
385	u32 status;
 
386
387	BUG_ON(!card);
388	BUG_ON(!card->host);
 
 
 
 
 
 
 
 
 
389
390	cmd.opcode = MMC_SWITCH;
391	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
392		  (index << 16) |
393		  (value << 8) |
394		  set;
395	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
396	cmd.cmd_timeout_ms = timeout_ms;
 
 
 
 
 
 
 
 
 
397
398	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 
 
 
399	if (err)
400		return err;
401
402	/* Must check status to be sure of no errors */
403	do {
404		err = mmc_send_status(card, &status);
405		if (err)
406			return err;
407		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
408			break;
409		if (mmc_host_is_spi(card->host))
410			break;
411	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
412
413	if (mmc_host_is_spi(card->host)) {
414		if (status & R1_SPI_ILLEGAL_COMMAND)
415			return -EBADMSG;
416	} else {
417		if (status & 0xFDFFA000)
418			pr_warning("%s: unexpected status %#x after "
419			       "switch", mmc_hostname(card->host), status);
420		if (status & R1_SWITCH_ERROR)
421			return -EBADMSG;
 
 
 
 
 
422	}
 
 
423
424	return 0;
 
 
 
 
 
 
 
425}
426EXPORT_SYMBOL_GPL(mmc_switch);
427
428int mmc_send_status(struct mmc_card *card, u32 *status)
429{
430	int err;
431	struct mmc_command cmd = {0};
 
 
 
 
 
 
432
433	BUG_ON(!card);
434	BUG_ON(!card->host);
 
 
 
 
 
 
435
436	cmd.opcode = MMC_SEND_STATUS;
437	if (!mmc_host_is_spi(card->host))
438		cmd.arg = card->rca << 16;
439	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
440
441	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
442	if (err)
443		return err;
444
445	/* NOTE: callers are required to understand the difference
446	 * between "native" and SPI format status words!
 
 
 
 
 
 
 
 
 
447	 */
448	if (status)
449		*status = cmd.resp[0];
450
451	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452}
 
453
454static int
455mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
456		  u8 len)
457{
458	struct mmc_request mrq = {NULL};
459	struct mmc_command cmd = {0};
460	struct mmc_data data = {0};
461	struct scatterlist sg;
462	u8 *data_buf;
463	u8 *test_buf;
464	int i, err;
465	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
466	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
467
468	/* dma onto stack is unsafe/nonportable, but callers to this
469	 * routine normally provide temporary on-stack buffers ...
470	 */
471	data_buf = kmalloc(len, GFP_KERNEL);
472	if (!data_buf)
473		return -ENOMEM;
474
475	if (len == 8)
476		test_buf = testdata_8bit;
477	else if (len == 4)
478		test_buf = testdata_4bit;
479	else {
480		pr_err("%s: Invalid bus_width %d\n",
481		       mmc_hostname(host), len);
482		kfree(data_buf);
483		return -EINVAL;
484	}
485
486	if (opcode == MMC_BUS_TEST_W)
487		memcpy(data_buf, test_buf, len);
488
489	mrq.cmd = &cmd;
490	mrq.data = &data;
491	cmd.opcode = opcode;
492	cmd.arg = 0;
493
494	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
495	 * rely on callers to never use this with "native" calls for reading
496	 * CSD or CID.  Native versions of those commands use the R2 type,
497	 * not R1 plus a data block.
498	 */
499	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
500
501	data.blksz = len;
502	data.blocks = 1;
503	if (opcode == MMC_BUS_TEST_R)
504		data.flags = MMC_DATA_READ;
505	else
506		data.flags = MMC_DATA_WRITE;
507
508	data.sg = &sg;
509	data.sg_len = 1;
 
510	sg_init_one(&sg, data_buf, len);
511	mmc_wait_for_req(host, &mrq);
512	err = 0;
513	if (opcode == MMC_BUS_TEST_R) {
514		for (i = 0; i < len / 4; i++)
515			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
516				err = -EIO;
517				break;
518			}
519	}
520	kfree(data_buf);
521
522	if (cmd.error)
523		return cmd.error;
524	if (data.error)
525		return data.error;
526
527	return err;
528}
529
530int mmc_bus_test(struct mmc_card *card, u8 bus_width)
531{
532	int err, width;
533
534	if (bus_width == MMC_BUS_WIDTH_8)
535		width = 8;
536	else if (bus_width == MMC_BUS_WIDTH_4)
537		width = 4;
538	else if (bus_width == MMC_BUS_WIDTH_1)
539		return 0; /* no need for test */
540	else
541		return -EINVAL;
542
543	/*
544	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
545	 * is a problem.  This improves chances that the test will work.
546	 */
547	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
548	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
549	return err;
550}
551
552int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
553{
554	struct mmc_command cmd = {0};
555	unsigned int opcode;
556	int err;
557
558	if (!card->ext_csd.hpi) {
559		pr_warning("%s: Card didn't support HPI command\n",
560			   mmc_hostname(card->host));
561		return -EINVAL;
562	}
563
564	opcode = card->ext_csd.hpi_cmd;
565	if (opcode == MMC_STOP_TRANSMISSION)
566		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
567	else if (opcode == MMC_SEND_STATUS)
568		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
569
570	cmd.opcode = opcode;
571	cmd.arg = card->rca << 16 | 1;
572	cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
573
574	err = mmc_wait_for_cmd(card->host, &cmd, 0);
575	if (err) {
576		pr_warn("%s: error %d interrupting operation. "
577			"HPI command response %#x\n", mmc_hostname(card->host),
578			err, cmd.resp[0]);
579		return err;
580	}
581	if (status)
582		*status = cmd.resp[0];
583
584	return 0;
 
 
 
 
 
585}
v4.10.11
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/export.h>
 14#include <linux/types.h>
 15#include <linux/scatterlist.h>
 16
 17#include <linux/mmc/host.h>
 18#include <linux/mmc/card.h>
 19#include <linux/mmc/mmc.h>
 20
 21#include "core.h"
 22#include "host.h"
 23#include "mmc_ops.h"
 24
 25#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 26
 27static const u8 tuning_blk_pattern_4bit[] = {
 28	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
 29	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
 30	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
 31	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
 32	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
 33	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
 34	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
 35	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
 36};
 37
 38static const u8 tuning_blk_pattern_8bit[] = {
 39	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
 40	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
 41	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
 42	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
 43	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
 44	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
 45	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
 46	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
 47	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
 48	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
 49	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
 50	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
 51	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
 52	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
 53	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
 54	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 55};
 56
 57int mmc_send_status(struct mmc_card *card, u32 *status)
 58{
 59	int err;
 60	struct mmc_command cmd = {0};
 61
 62	cmd.opcode = MMC_SEND_STATUS;
 63	if (!mmc_host_is_spi(card->host))
 64		cmd.arg = card->rca << 16;
 65	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 66
 67	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 68	if (err)
 69		return err;
 70
 71	/* NOTE: callers are required to understand the difference
 72	 * between "native" and SPI format status words!
 73	 */
 74	if (status)
 75		*status = cmd.resp[0];
 76
 77	return 0;
 78}
 79
 80static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 81{
 82	struct mmc_command cmd = {0};
 83
 84	cmd.opcode = MMC_SELECT_CARD;
 85
 86	if (card) {
 87		cmd.arg = card->rca << 16;
 88		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 89	} else {
 90		cmd.arg = 0;
 91		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 92	}
 93
 94	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 95}
 96
 97int mmc_select_card(struct mmc_card *card)
 98{
 
 99
100	return _mmc_select_card(card->host, card);
101}
102
103int mmc_deselect_cards(struct mmc_host *host)
104{
105	return _mmc_select_card(host, NULL);
106}
107
108/*
109 * Write the value specified in the device tree or board code into the optional
110 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
111 * drive strength of the DAT and CMD outputs. The actual meaning of a given
112 * value is hardware dependant.
113 * The presence of the DSR register can be determined from the CSD register,
114 * bit 76.
115 */
116int mmc_set_dsr(struct mmc_host *host)
117{
118	struct mmc_command cmd = {0};
 
 
 
 
 
 
 
 
 
 
119
120	cmd.opcode = MMC_SET_DSR;
 
 
 
 
 
 
 
 
 
 
 
 
121
122	cmd.arg = (host->dsr << 16) | 0xffff;
123	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
124
125	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
126}
127
128int mmc_go_idle(struct mmc_host *host)
129{
130	int err;
131	struct mmc_command cmd = {0};
132
133	/*
134	 * Non-SPI hosts need to prevent chipselect going active during
135	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
136	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
137	 *
138	 * SPI hosts ignore ios.chip_select; it's managed according to
139	 * rules that must accommodate non-MMC slaves which this layer
140	 * won't even know about.
141	 */
142	if (!mmc_host_is_spi(host)) {
143		mmc_set_chip_select(host, MMC_CS_HIGH);
144		mmc_delay(1);
145	}
146
147	cmd.opcode = MMC_GO_IDLE_STATE;
148	cmd.arg = 0;
149	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
150
151	err = mmc_wait_for_cmd(host, &cmd, 0);
152
153	mmc_delay(1);
154
155	if (!mmc_host_is_spi(host)) {
156		mmc_set_chip_select(host, MMC_CS_DONTCARE);
157		mmc_delay(1);
158	}
159
160	host->use_spi_crc = 0;
161
162	return err;
163}
164
165int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
166{
167	struct mmc_command cmd = {0};
168	int i, err = 0;
169
 
 
170	cmd.opcode = MMC_SEND_OP_COND;
171	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
172	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
173
174	for (i = 100; i; i--) {
175		err = mmc_wait_for_cmd(host, &cmd, 0);
176		if (err)
177			break;
178
179		/* if we're just probing, do a single pass */
180		if (ocr == 0)
181			break;
182
183		/* otherwise wait until reset completes */
184		if (mmc_host_is_spi(host)) {
185			if (!(cmd.resp[0] & R1_SPI_IDLE))
186				break;
187		} else {
188			if (cmd.resp[0] & MMC_CARD_BUSY)
189				break;
190		}
191
192		err = -ETIMEDOUT;
193
194		mmc_delay(10);
195	}
196
197	if (rocr && !mmc_host_is_spi(host))
198		*rocr = cmd.resp[0];
199
200	return err;
201}
202
203int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
204{
205	int err;
206	struct mmc_command cmd = {0};
207
 
 
 
208	cmd.opcode = MMC_ALL_SEND_CID;
209	cmd.arg = 0;
210	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
211
212	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
213	if (err)
214		return err;
215
216	memcpy(cid, cmd.resp, sizeof(u32) * 4);
217
218	return 0;
219}
220
221int mmc_set_relative_addr(struct mmc_card *card)
222{
 
223	struct mmc_command cmd = {0};
224
 
 
 
225	cmd.opcode = MMC_SET_RELATIVE_ADDR;
226	cmd.arg = card->rca << 16;
227	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
228
229	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
230}
231
232static int
233mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
234{
235	int err;
236	struct mmc_command cmd = {0};
237
 
 
 
238	cmd.opcode = opcode;
239	cmd.arg = arg;
240	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
241
242	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
243	if (err)
244		return err;
245
246	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
247
248	return 0;
249}
250
251/*
252 * NOTE: void *buf, caller for the buf is required to use DMA-capable
253 * buffer or on-stack buffer (with some overhead in callee).
254 */
255static int
256mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
257		u32 opcode, void *buf, unsigned len)
258{
259	struct mmc_request mrq = {NULL};
260	struct mmc_command cmd = {0};
261	struct mmc_data data = {0};
262	struct scatterlist sg;
 
 
 
 
 
 
 
 
263
264	mrq.cmd = &cmd;
265	mrq.data = &data;
266
267	cmd.opcode = opcode;
268	cmd.arg = 0;
269
270	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
271	 * rely on callers to never use this with "native" calls for reading
272	 * CSD or CID.  Native versions of those commands use the R2 type,
273	 * not R1 plus a data block.
274	 */
275	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
276
277	data.blksz = len;
278	data.blocks = 1;
279	data.flags = MMC_DATA_READ;
280	data.sg = &sg;
281	data.sg_len = 1;
282
283	sg_init_one(&sg, buf, len);
284
285	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
286		/*
287		 * The spec states that CSR and CID accesses have a timeout
288		 * of 64 clock cycles.
289		 */
290		data.timeout_ns = 0;
291		data.timeout_clks = 64;
292	} else
293		mmc_set_data_timeout(&data, card);
294
295	mmc_wait_for_req(host, &mrq);
296
 
 
 
297	if (cmd.error)
298		return cmd.error;
299	if (data.error)
300		return data.error;
301
302	return 0;
303}
304
305int mmc_send_csd(struct mmc_card *card, u32 *csd)
306{
307	int ret, i;
308	u32 *csd_tmp;
309
310	if (!mmc_host_is_spi(card->host))
311		return mmc_send_cxd_native(card->host, card->rca << 16,
312				csd, MMC_SEND_CSD);
313
314	csd_tmp = kzalloc(16, GFP_KERNEL);
315	if (!csd_tmp)
316		return -ENOMEM;
317
318	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
319	if (ret)
320		goto err;
321
322	for (i = 0;i < 4;i++)
323		csd[i] = be32_to_cpu(csd_tmp[i]);
324
325err:
326	kfree(csd_tmp);
327	return ret;
328}
329
330int mmc_send_cid(struct mmc_host *host, u32 *cid)
331{
332	int ret, i;
333	u32 *cid_tmp;
334
335	if (!mmc_host_is_spi(host)) {
336		if (!host->card)
337			return -EINVAL;
338		return mmc_send_cxd_native(host, host->card->rca << 16,
339				cid, MMC_SEND_CID);
340	}
341
342	cid_tmp = kzalloc(16, GFP_KERNEL);
343	if (!cid_tmp)
344		return -ENOMEM;
345
346	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
347	if (ret)
348		goto err;
349
350	for (i = 0;i < 4;i++)
351		cid[i] = be32_to_cpu(cid_tmp[i]);
352
353err:
354	kfree(cid_tmp);
355	return ret;
356}
357
358int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
359{
360	int err;
361	u8 *ext_csd;
362
363	if (!card || !new_ext_csd)
364		return -EINVAL;
365
366	if (!mmc_can_ext_csd(card))
367		return -EOPNOTSUPP;
368
369	/*
370	 * As the ext_csd is so large and mostly unused, we don't store the
371	 * raw block in mmc_card.
372	 */
373	ext_csd = kzalloc(512, GFP_KERNEL);
374	if (!ext_csd)
375		return -ENOMEM;
376
377	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
378				512);
379	if (err)
380		kfree(ext_csd);
381	else
382		*new_ext_csd = ext_csd;
383
384	return err;
385}
386EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
387
388int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
389{
390	struct mmc_command cmd = {0};
391	int err;
392
393	cmd.opcode = MMC_SPI_READ_OCR;
394	cmd.arg = highcap ? (1 << 30) : 0;
395	cmd.flags = MMC_RSP_SPI_R3;
396
397	err = mmc_wait_for_cmd(host, &cmd, 0);
398
399	*ocrp = cmd.resp[1];
400	return err;
401}
402
403int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
404{
405	struct mmc_command cmd = {0};
406	int err;
407
408	cmd.opcode = MMC_SPI_CRC_ON_OFF;
409	cmd.flags = MMC_RSP_SPI_R1;
410	cmd.arg = use_crc;
411
412	err = mmc_wait_for_cmd(host, &cmd, 0);
413	if (!err)
414		host->use_spi_crc = use_crc;
415	return err;
416}
417
418static int mmc_switch_status_error(struct mmc_host *host, u32 status)
419{
420	if (mmc_host_is_spi(host)) {
421		if (status & R1_SPI_ILLEGAL_COMMAND)
422			return -EBADMSG;
423	} else {
424		if (status & 0xFDFFA000)
425			pr_warn("%s: unexpected status %#x after switch\n",
426				mmc_hostname(host), status);
427		if (status & R1_SWITCH_ERROR)
428			return -EBADMSG;
429	}
430	return 0;
431}
432
433/* Caller must hold re-tuning */
434int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
435{
436	u32 status;
437	int err;
438
439	err = mmc_send_status(card, &status);
440	if (!crc_err_fatal && err == -EILSEQ)
441		return 0;
442	if (err)
443		return err;
444
445	return mmc_switch_status_error(card->host, status);
446}
447
448int mmc_switch_status(struct mmc_card *card)
449{
450	return __mmc_switch_status(card, true);
451}
452
453static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
454			bool send_status, bool retry_crc_err)
455{
456	struct mmc_host *host = card->host;
457	int err;
458	unsigned long timeout;
459	u32 status = 0;
460	bool expired = false;
461	bool busy = false;
462
463	/* We have an unspecified cmd timeout, use the fallback value. */
464	if (!timeout_ms)
465		timeout_ms = MMC_OPS_TIMEOUT_MS;
466
467	/*
468	 * In cases when not allowed to poll by using CMD13 or because we aren't
469	 * capable of polling by using ->card_busy(), then rely on waiting the
470	 * stated timeout to be sufficient.
471	 */
472	if (!send_status && !host->ops->card_busy) {
473		mmc_delay(timeout_ms);
474		return 0;
475	}
476
477	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
478	do {
479		/*
480		 * Due to the possibility of being preempted while polling,
481		 * check the expiration time first.
482		 */
483		expired = time_after(jiffies, timeout);
484
485		if (host->ops->card_busy) {
486			busy = host->ops->card_busy(host);
487		} else {
488			err = mmc_send_status(card, &status);
489			if (retry_crc_err && err == -EILSEQ) {
490				busy = true;
491			} else if (err) {
492				return err;
493			} else {
494				err = mmc_switch_status_error(host, status);
495				if (err)
496					return err;
497				busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
498			}
499		}
500
501		/* Timeout if the device still remains busy. */
502		if (expired && busy) {
503			pr_err("%s: Card stuck being busy! %s\n",
504				mmc_hostname(host), __func__);
505			return -ETIMEDOUT;
506		}
507	} while (busy);
508
509	return 0;
510}
511
512/**
513 *	__mmc_switch - modify EXT_CSD register
514 *	@card: the MMC card associated with the data transfer
515 *	@set: cmd set values
516 *	@index: EXT_CSD register index
517 *	@value: value to program into EXT_CSD register
518 *	@timeout_ms: timeout (ms) for operation performed by register write,
519 *                   timeout of zero implies maximum possible timeout
520 *	@timing: new timing to change to
521 *	@use_busy_signal: use the busy signal as response type
522 *	@send_status: send status cmd to poll for busy
523 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
524 *
525 *	Modifies the EXT_CSD register for selected card.
526 */
527int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
528		unsigned int timeout_ms, unsigned char timing,
529		bool use_busy_signal, bool send_status,	bool retry_crc_err)
530{
531	struct mmc_host *host = card->host;
532	int err;
533	struct mmc_command cmd = {0};
534	bool use_r1b_resp = use_busy_signal;
535	unsigned char old_timing = host->ios.timing;
536
537	mmc_retune_hold(host);
538
539	/*
540	 * If the cmd timeout and the max_busy_timeout of the host are both
541	 * specified, let's validate them. A failure means we need to prevent
542	 * the host from doing hw busy detection, which is done by converting
543	 * to a R1 response instead of a R1B.
544	 */
545	if (timeout_ms && host->max_busy_timeout &&
546		(timeout_ms > host->max_busy_timeout))
547		use_r1b_resp = false;
548
549	cmd.opcode = MMC_SWITCH;
550	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
551		  (index << 16) |
552		  (value << 8) |
553		  set;
554	cmd.flags = MMC_CMD_AC;
555	if (use_r1b_resp) {
556		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
557		/*
558		 * A busy_timeout of zero means the host can decide to use
559		 * whatever value it finds suitable.
560		 */
561		cmd.busy_timeout = timeout_ms;
562	} else {
563		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
564	}
565
566	if (index == EXT_CSD_SANITIZE_START)
567		cmd.sanitize_busy = true;
568
569	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
570	if (err)
571		goto out;
572
573	/* No need to check card status in case of unblocking command */
574	if (!use_busy_signal)
575		goto out;
576
577	/*If SPI or used HW busy detection above, then we don't need to poll. */
578	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
579		mmc_host_is_spi(host))
580		goto out_tim;
 
 
581
582	/* Let's try to poll to find out when the command is completed. */
583	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
584	if (err)
585		goto out;
586
587out_tim:
588	/* Switch to new timing before check switch status. */
589	if (timing)
590		mmc_set_timing(host, timing);
591
592	if (send_status) {
593		err = mmc_switch_status(card);
594		if (err && timing)
595			mmc_set_timing(host, old_timing);
596	}
597out:
598	mmc_retune_release(host);
599
600	return err;
601}
602
603int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
604		unsigned int timeout_ms)
605{
606	return __mmc_switch(card, set, index, value, timeout_ms, 0,
607			true, true, false);
608}
609EXPORT_SYMBOL_GPL(mmc_switch);
610
611int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
612{
613	struct mmc_request mrq = {NULL};
614	struct mmc_command cmd = {0};
615	struct mmc_data data = {0};
616	struct scatterlist sg;
617	struct mmc_ios *ios = &host->ios;
618	const u8 *tuning_block_pattern;
619	int size, err = 0;
620	u8 *data_buf;
621
622	if (ios->bus_width == MMC_BUS_WIDTH_8) {
623		tuning_block_pattern = tuning_blk_pattern_8bit;
624		size = sizeof(tuning_blk_pattern_8bit);
625	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
626		tuning_block_pattern = tuning_blk_pattern_4bit;
627		size = sizeof(tuning_blk_pattern_4bit);
628	} else
629		return -EINVAL;
630
631	data_buf = kzalloc(size, GFP_KERNEL);
632	if (!data_buf)
633		return -ENOMEM;
 
634
635	mrq.cmd = &cmd;
636	mrq.data = &data;
 
637
638	cmd.opcode = opcode;
639	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
640
641	data.blksz = size;
642	data.blocks = 1;
643	data.flags = MMC_DATA_READ;
644
645	/*
646	 * According to the tuning specs, Tuning process
647	 * is normally shorter 40 executions of CMD19,
648	 * and timeout value should be shorter than 150 ms
649	 */
650	data.timeout_ns = 150 * NSEC_PER_MSEC;
 
651
652	data.sg = &sg;
653	data.sg_len = 1;
654	sg_init_one(&sg, data_buf, size);
655
656	mmc_wait_for_req(host, &mrq);
657
658	if (cmd_error)
659		*cmd_error = cmd.error;
660
661	if (cmd.error) {
662		err = cmd.error;
663		goto out;
664	}
665
666	if (data.error) {
667		err = data.error;
668		goto out;
669	}
670
671	if (memcmp(data_buf, tuning_block_pattern, size))
672		err = -EIO;
673
674out:
675	kfree(data_buf);
676	return err;
677}
678EXPORT_SYMBOL_GPL(mmc_send_tuning);
679
680int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
681{
682	struct mmc_command cmd = {0};
683
684	/*
685	 * eMMC specification specifies that CMD12 can be used to stop a tuning
686	 * command, but SD specification does not, so do nothing unless it is
687	 * eMMC.
688	 */
689	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
690		return 0;
691
692	cmd.opcode = MMC_STOP_TRANSMISSION;
693	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
694
695	/*
696	 * For drivers that override R1 to R1b, set an arbitrary timeout based
697	 * on the tuning timeout i.e. 150ms.
698	 */
699	cmd.busy_timeout = 150;
700
701	return mmc_wait_for_cmd(host, &cmd, 0);
702}
703EXPORT_SYMBOL_GPL(mmc_abort_tuning);
704
705static int
706mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
707		  u8 len)
708{
709	struct mmc_request mrq = {NULL};
710	struct mmc_command cmd = {0};
711	struct mmc_data data = {0};
712	struct scatterlist sg;
713	u8 *data_buf;
714	u8 *test_buf;
715	int i, err;
716	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
717	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
718
719	/* dma onto stack is unsafe/nonportable, but callers to this
720	 * routine normally provide temporary on-stack buffers ...
721	 */
722	data_buf = kmalloc(len, GFP_KERNEL);
723	if (!data_buf)
724		return -ENOMEM;
725
726	if (len == 8)
727		test_buf = testdata_8bit;
728	else if (len == 4)
729		test_buf = testdata_4bit;
730	else {
731		pr_err("%s: Invalid bus_width %d\n",
732		       mmc_hostname(host), len);
733		kfree(data_buf);
734		return -EINVAL;
735	}
736
737	if (opcode == MMC_BUS_TEST_W)
738		memcpy(data_buf, test_buf, len);
739
740	mrq.cmd = &cmd;
741	mrq.data = &data;
742	cmd.opcode = opcode;
743	cmd.arg = 0;
744
745	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
746	 * rely on callers to never use this with "native" calls for reading
747	 * CSD or CID.  Native versions of those commands use the R2 type,
748	 * not R1 plus a data block.
749	 */
750	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
751
752	data.blksz = len;
753	data.blocks = 1;
754	if (opcode == MMC_BUS_TEST_R)
755		data.flags = MMC_DATA_READ;
756	else
757		data.flags = MMC_DATA_WRITE;
758
759	data.sg = &sg;
760	data.sg_len = 1;
761	mmc_set_data_timeout(&data, card);
762	sg_init_one(&sg, data_buf, len);
763	mmc_wait_for_req(host, &mrq);
764	err = 0;
765	if (opcode == MMC_BUS_TEST_R) {
766		for (i = 0; i < len / 4; i++)
767			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
768				err = -EIO;
769				break;
770			}
771	}
772	kfree(data_buf);
773
774	if (cmd.error)
775		return cmd.error;
776	if (data.error)
777		return data.error;
778
779	return err;
780}
781
782int mmc_bus_test(struct mmc_card *card, u8 bus_width)
783{
784	int width;
785
786	if (bus_width == MMC_BUS_WIDTH_8)
787		width = 8;
788	else if (bus_width == MMC_BUS_WIDTH_4)
789		width = 4;
790	else if (bus_width == MMC_BUS_WIDTH_1)
791		return 0; /* no need for test */
792	else
793		return -EINVAL;
794
795	/*
796	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
797	 * is a problem.  This improves chances that the test will work.
798	 */
799	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
800	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 
801}
802
803int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
804{
805	struct mmc_command cmd = {0};
806	unsigned int opcode;
807	int err;
808
809	if (!card->ext_csd.hpi) {
810		pr_warn("%s: Card didn't support HPI command\n",
811			mmc_hostname(card->host));
812		return -EINVAL;
813	}
814
815	opcode = card->ext_csd.hpi_cmd;
816	if (opcode == MMC_STOP_TRANSMISSION)
817		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
818	else if (opcode == MMC_SEND_STATUS)
819		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
820
821	cmd.opcode = opcode;
822	cmd.arg = card->rca << 16 | 1;
 
823
824	err = mmc_wait_for_cmd(card->host, &cmd, 0);
825	if (err) {
826		pr_warn("%s: error %d interrupting operation. "
827			"HPI command response %#x\n", mmc_hostname(card->host),
828			err, cmd.resp[0]);
829		return err;
830	}
831	if (status)
832		*status = cmd.resp[0];
833
834	return 0;
835}
836
837int mmc_can_ext_csd(struct mmc_card *card)
838{
839	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
840}