Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/export.h>
 14#include <linux/types.h>
 15#include <linux/scatterlist.h>
 16
 17#include <linux/mmc/host.h>
 18#include <linux/mmc/card.h>
 19#include <linux/mmc/mmc.h>
 20
 21#include "core.h"
 
 
 22#include "mmc_ops.h"
 23
 24#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
 27				    bool ignore_crc)
 28{
 29	int err;
 30	struct mmc_command cmd = {0};
 31
 32	BUG_ON(!card);
 33	BUG_ON(!card->host);
 34
 35	cmd.opcode = MMC_SEND_STATUS;
 36	if (!mmc_host_is_spi(card->host))
 37		cmd.arg = card->rca << 16;
 38	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 39	if (ignore_crc)
 40		cmd.flags &= ~MMC_RSP_CRC;
 41
 42	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 43	if (err)
 44		return err;
 45
 46	/* NOTE: callers are required to understand the difference
 47	 * between "native" and SPI format status words!
 48	 */
 49	if (status)
 50		*status = cmd.resp[0];
 51
 52	return 0;
 53}
 
 54
 55int mmc_send_status(struct mmc_card *card, u32 *status)
 56{
 57	return __mmc_send_status(card, status, false);
 58}
 
 59
 60static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 61{
 62	int err;
 63	struct mmc_command cmd = {0};
 64
 65	BUG_ON(!host);
 66
 67	cmd.opcode = MMC_SELECT_CARD;
 68
 69	if (card) {
 70		cmd.arg = card->rca << 16;
 71		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 72	} else {
 73		cmd.arg = 0;
 74		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 75	}
 76
 77	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 78	if (err)
 79		return err;
 80
 81	return 0;
 82}
 83
 84int mmc_select_card(struct mmc_card *card)
 85{
 86	BUG_ON(!card);
 87
 88	return _mmc_select_card(card->host, card);
 89}
 90
 91int mmc_deselect_cards(struct mmc_host *host)
 92{
 93	return _mmc_select_card(host, NULL);
 94}
 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96int mmc_go_idle(struct mmc_host *host)
 97{
 98	int err;
 99	struct mmc_command cmd = {0};
100
101	/*
102	 * Non-SPI hosts need to prevent chipselect going active during
103	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
104	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
105	 *
106	 * SPI hosts ignore ios.chip_select; it's managed according to
107	 * rules that must accommodate non-MMC slaves which this layer
108	 * won't even know about.
109	 */
110	if (!mmc_host_is_spi(host)) {
111		mmc_set_chip_select(host, MMC_CS_HIGH);
112		mmc_delay(1);
113	}
114
115	cmd.opcode = MMC_GO_IDLE_STATE;
116	cmd.arg = 0;
117	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
118
119	err = mmc_wait_for_cmd(host, &cmd, 0);
120
121	mmc_delay(1);
122
123	if (!mmc_host_is_spi(host)) {
124		mmc_set_chip_select(host, MMC_CS_DONTCARE);
125		mmc_delay(1);
126	}
127
128	host->use_spi_crc = 0;
129
130	return err;
131}
132
133int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
134{
135	struct mmc_command cmd = {0};
136	int i, err = 0;
137
138	BUG_ON(!host);
139
140	cmd.opcode = MMC_SEND_OP_COND;
141	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
142	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
143
144	for (i = 100; i; i--) {
145		err = mmc_wait_for_cmd(host, &cmd, 0);
146		if (err)
147			break;
148
149		/* if we're just probing, do a single pass */
150		if (ocr == 0)
151			break;
152
153		/* otherwise wait until reset completes */
154		if (mmc_host_is_spi(host)) {
155			if (!(cmd.resp[0] & R1_SPI_IDLE))
156				break;
157		} else {
158			if (cmd.resp[0] & MMC_CARD_BUSY)
159				break;
160		}
161
162		err = -ETIMEDOUT;
163
164		mmc_delay(10);
 
 
 
 
 
 
 
 
 
 
165	}
166
167	if (rocr && !mmc_host_is_spi(host))
168		*rocr = cmd.resp[0];
169
170	return err;
171}
172
173int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
174{
175	int err;
176	struct mmc_command cmd = {0};
177
178	BUG_ON(!host);
179	BUG_ON(!cid);
180
181	cmd.opcode = MMC_ALL_SEND_CID;
182	cmd.arg = 0;
183	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
184
185	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
186	if (err)
187		return err;
188
189	memcpy(cid, cmd.resp, sizeof(u32) * 4);
190
191	return 0;
192}
193
194int mmc_set_relative_addr(struct mmc_card *card)
195{
196	int err;
197	struct mmc_command cmd = {0};
198
199	BUG_ON(!card);
200	BUG_ON(!card->host);
201
202	cmd.opcode = MMC_SET_RELATIVE_ADDR;
203	cmd.arg = card->rca << 16;
204	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
205
206	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
207	if (err)
208		return err;
209
210	return 0;
211}
212
213static int
214mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
215{
216	int err;
217	struct mmc_command cmd = {0};
218
219	BUG_ON(!host);
220	BUG_ON(!cxd);
221
222	cmd.opcode = opcode;
223	cmd.arg = arg;
224	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
225
226	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
227	if (err)
228		return err;
229
230	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
231
232	return 0;
233}
234
235/*
236 * NOTE: void *buf, caller for the buf is required to use DMA-capable
237 * buffer or on-stack buffer (with some overhead in callee).
238 */
239static int
240mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
241		u32 opcode, void *buf, unsigned len)
242{
243	struct mmc_request mrq = {NULL};
244	struct mmc_command cmd = {0};
245	struct mmc_data data = {0};
246	struct scatterlist sg;
247	void *data_buf;
248	int is_on_stack;
249
250	is_on_stack = object_is_on_stack(buf);
251	if (is_on_stack) {
252		/*
253		 * dma onto stack is unsafe/nonportable, but callers to this
254		 * routine normally provide temporary on-stack buffers ...
255		 */
256		data_buf = kmalloc(len, GFP_KERNEL);
257		if (!data_buf)
258			return -ENOMEM;
259	} else
260		data_buf = buf;
261
262	mrq.cmd = &cmd;
263	mrq.data = &data;
264
265	cmd.opcode = opcode;
266	cmd.arg = 0;
267
268	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
269	 * rely on callers to never use this with "native" calls for reading
270	 * CSD or CID.  Native versions of those commands use the R2 type,
271	 * not R1 plus a data block.
272	 */
273	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
274
275	data.blksz = len;
276	data.blocks = 1;
277	data.flags = MMC_DATA_READ;
278	data.sg = &sg;
279	data.sg_len = 1;
280
281	sg_init_one(&sg, data_buf, len);
282
283	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
284		/*
285		 * The spec states that CSR and CID accesses have a timeout
286		 * of 64 clock cycles.
287		 */
288		data.timeout_ns = 0;
289		data.timeout_clks = 64;
290	} else
291		mmc_set_data_timeout(&data, card);
292
293	mmc_wait_for_req(host, &mrq);
294
295	if (is_on_stack) {
296		memcpy(buf, data_buf, len);
297		kfree(data_buf);
298	}
299
300	if (cmd.error)
301		return cmd.error;
302	if (data.error)
303		return data.error;
304
305	return 0;
306}
307
308int mmc_send_csd(struct mmc_card *card, u32 *csd)
309{
310	int ret, i;
311	u32 *csd_tmp;
312
313	if (!mmc_host_is_spi(card->host))
314		return mmc_send_cxd_native(card->host, card->rca << 16,
315				csd, MMC_SEND_CSD);
316
317	csd_tmp = kmalloc(16, GFP_KERNEL);
318	if (!csd_tmp)
319		return -ENOMEM;
320
321	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
322	if (ret)
323		goto err;
324
325	for (i = 0;i < 4;i++)
326		csd[i] = be32_to_cpu(csd_tmp[i]);
327
328err:
329	kfree(csd_tmp);
330	return ret;
331}
332
 
 
 
 
 
 
 
 
 
333int mmc_send_cid(struct mmc_host *host, u32 *cid)
334{
335	int ret, i;
336	u32 *cid_tmp;
337
338	if (!mmc_host_is_spi(host)) {
339		if (!host->card)
340			return -EINVAL;
341		return mmc_send_cxd_native(host, host->card->rca << 16,
342				cid, MMC_SEND_CID);
343	}
344
345	cid_tmp = kmalloc(16, GFP_KERNEL);
346	if (!cid_tmp)
347		return -ENOMEM;
 
348
349	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
350	if (ret)
351		goto err;
352
353	for (i = 0;i < 4;i++)
354		cid[i] = be32_to_cpu(cid_tmp[i]);
355
356err:
357	kfree(cid_tmp);
358	return ret;
359}
 
 
 
360
361int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
362{
363	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
364			ext_csd, 512);
 
 
 
 
365}
366EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
367
368int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
369{
370	struct mmc_command cmd = {0};
371	int err;
372
373	cmd.opcode = MMC_SPI_READ_OCR;
374	cmd.arg = highcap ? (1 << 30) : 0;
375	cmd.flags = MMC_RSP_SPI_R3;
376
377	err = mmc_wait_for_cmd(host, &cmd, 0);
378
379	*ocrp = cmd.resp[1];
380	return err;
381}
382
383int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
384{
385	struct mmc_command cmd = {0};
386	int err;
387
388	cmd.opcode = MMC_SPI_CRC_ON_OFF;
389	cmd.flags = MMC_RSP_SPI_R1;
390	cmd.arg = use_crc;
391
392	err = mmc_wait_for_cmd(host, &cmd, 0);
393	if (!err)
394		host->use_spi_crc = use_crc;
395	return err;
396}
397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398/**
399 *	__mmc_switch - modify EXT_CSD register
400 *	@card: the MMC card associated with the data transfer
401 *	@set: cmd set values
402 *	@index: EXT_CSD register index
403 *	@value: value to program into EXT_CSD register
404 *	@timeout_ms: timeout (ms) for operation performed by register write,
405 *                   timeout of zero implies maximum possible timeout
406 *	@use_busy_signal: use the busy signal as response type
407 *	@send_status: send status cmd to poll for busy
408 *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
 
409 *
410 *	Modifies the EXT_CSD register for selected card.
411 */
412int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
413		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
414		bool ignore_crc)
415{
416	struct mmc_host *host = card->host;
417	int err;
418	struct mmc_command cmd = {0};
419	unsigned long timeout;
420	u32 status = 0;
421	bool use_r1b_resp = use_busy_signal;
422
423	/*
424	 * If the cmd timeout and the max_busy_timeout of the host are both
425	 * specified, let's validate them. A failure means we need to prevent
426	 * the host from doing hw busy detection, which is done by converting
427	 * to a R1 response instead of a R1B.
428	 */
429	if (timeout_ms && host->max_busy_timeout &&
430		(timeout_ms > host->max_busy_timeout))
431		use_r1b_resp = false;
432
433	cmd.opcode = MMC_SWITCH;
434	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
435		  (index << 16) |
436		  (value << 8) |
437		  set;
438	cmd.flags = MMC_CMD_AC;
439	if (use_r1b_resp) {
440		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
441		/*
442		 * A busy_timeout of zero means the host can decide to use
443		 * whatever value it finds suitable.
444		 */
445		cmd.busy_timeout = timeout_ms;
446	} else {
447		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
448	}
449
450	if (index == EXT_CSD_SANITIZE_START)
451		cmd.sanitize_busy = true;
 
 
452
453	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 
 
 
 
 
 
 
454	if (err)
455		return err;
456
457	/* No need to check card status in case of unblocking command */
458	if (!use_busy_signal)
459		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
461	/*
462	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
463	 * to detect busy completion.
 
464	 */
465	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
466		ignore_crc = false;
467
468	/* We have an unspecified cmd timeout, use the fallback value. */
469	if (!timeout_ms)
470		timeout_ms = MMC_OPS_TIMEOUT_MS;
471
472	/* Must check status to be sure of no errors. */
473	timeout = jiffies + msecs_to_jiffies(timeout_ms);
474	do {
475		if (send_status) {
476			err = __mmc_send_status(card, &status, ignore_crc);
477			if (err)
478				return err;
479		}
480		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
481			break;
482		if (mmc_host_is_spi(host))
483			break;
484
485		/*
486		 * We are not allowed to issue a status command and the host
487		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
488		 * rely on waiting for the stated timeout to be sufficient.
489		 */
490		if (!send_status) {
491			mmc_delay(timeout_ms);
492			return 0;
493		}
494
495		/* Timeout if the device never leaves the program state. */
496		if (time_after(jiffies, timeout)) {
497			pr_err("%s: Card stuck in programming state! %s\n",
498				mmc_hostname(host), __func__);
499			return -ETIMEDOUT;
500		}
501	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
502
503	if (mmc_host_is_spi(host)) {
504		if (status & R1_SPI_ILLEGAL_COMMAND)
505			return -EBADMSG;
506	} else {
507		if (status & 0xFDFFA000)
508			pr_warn("%s: unexpected status %#x after switch\n",
509				mmc_hostname(host), status);
510		if (status & R1_SWITCH_ERROR)
511			return -EBADMSG;
512	}
513
514	return 0;
 
 
 
 
 
515}
516EXPORT_SYMBOL_GPL(__mmc_switch);
517
518int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
519		unsigned int timeout_ms)
520{
521	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
522				false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523}
524EXPORT_SYMBOL_GPL(mmc_switch);
525
526static int
527mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
528		  u8 len)
529{
530	struct mmc_request mrq = {NULL};
531	struct mmc_command cmd = {0};
532	struct mmc_data data = {0};
533	struct scatterlist sg;
534	u8 *data_buf;
535	u8 *test_buf;
536	int i, err;
537	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
538	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
539
540	/* dma onto stack is unsafe/nonportable, but callers to this
541	 * routine normally provide temporary on-stack buffers ...
542	 */
543	data_buf = kmalloc(len, GFP_KERNEL);
544	if (!data_buf)
545		return -ENOMEM;
546
547	if (len == 8)
548		test_buf = testdata_8bit;
549	else if (len == 4)
550		test_buf = testdata_4bit;
551	else {
552		pr_err("%s: Invalid bus_width %d\n",
553		       mmc_hostname(host), len);
554		kfree(data_buf);
555		return -EINVAL;
556	}
557
558	if (opcode == MMC_BUS_TEST_W)
559		memcpy(data_buf, test_buf, len);
560
561	mrq.cmd = &cmd;
562	mrq.data = &data;
563	cmd.opcode = opcode;
564	cmd.arg = 0;
565
566	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
567	 * rely on callers to never use this with "native" calls for reading
568	 * CSD or CID.  Native versions of those commands use the R2 type,
569	 * not R1 plus a data block.
570	 */
571	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
572
573	data.blksz = len;
574	data.blocks = 1;
575	if (opcode == MMC_BUS_TEST_R)
576		data.flags = MMC_DATA_READ;
577	else
578		data.flags = MMC_DATA_WRITE;
579
580	data.sg = &sg;
581	data.sg_len = 1;
582	mmc_set_data_timeout(&data, card);
583	sg_init_one(&sg, data_buf, len);
584	mmc_wait_for_req(host, &mrq);
585	err = 0;
586	if (opcode == MMC_BUS_TEST_R) {
587		for (i = 0; i < len / 4; i++)
588			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
589				err = -EIO;
590				break;
591			}
592	}
593	kfree(data_buf);
594
595	if (cmd.error)
596		return cmd.error;
597	if (data.error)
598		return data.error;
599
600	return err;
601}
602
603int mmc_bus_test(struct mmc_card *card, u8 bus_width)
604{
605	int err, width;
606
607	if (bus_width == MMC_BUS_WIDTH_8)
608		width = 8;
609	else if (bus_width == MMC_BUS_WIDTH_4)
610		width = 4;
611	else if (bus_width == MMC_BUS_WIDTH_1)
612		return 0; /* no need for test */
613	else
614		return -EINVAL;
615
616	/*
617	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
618	 * is a problem.  This improves chances that the test will work.
619	 */
620	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
621	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
622	return err;
623}
624
625int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
626{
627	struct mmc_command cmd = {0};
628	unsigned int opcode;
 
 
629	int err;
630
631	if (!card->ext_csd.hpi) {
632		pr_warning("%s: Card didn't support HPI command\n",
633			   mmc_hostname(card->host));
634		return -EINVAL;
 
 
 
 
 
 
 
 
 
635	}
636
637	opcode = card->ext_csd.hpi_cmd;
638	if (opcode == MMC_STOP_TRANSMISSION)
639		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
640	else if (opcode == MMC_SEND_STATUS)
641		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
642
643	cmd.opcode = opcode;
644	cmd.arg = card->rca << 16 | 1;
 
645
646	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647	if (err) {
648		pr_warn("%s: error %d interrupting operation. "
649			"HPI command response %#x\n", mmc_hostname(card->host),
650			err, cmd.resp[0]);
651		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652	}
653	if (status)
654		*status = cmd.resp[0];
655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656	return 0;
657}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
 
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
  23#define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
  24
  25static const u8 tuning_blk_pattern_4bit[] = {
  26	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  27	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  28	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  29	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  30	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  31	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  32	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  33	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  34};
  35
  36static const u8 tuning_blk_pattern_8bit[] = {
  37	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  38	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  39	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  40	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  41	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  42	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  43	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  44	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  45	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  46	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  47	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  48	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  49	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  50	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  51	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  52	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  53};
  54
  55struct mmc_busy_data {
  56	struct mmc_card *card;
  57	bool retry_crc_err;
  58	enum mmc_busy_cmd busy_cmd;
  59};
  60
  61int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
 
  62{
  63	int err;
  64	struct mmc_command cmd = {};
 
 
 
  65
  66	cmd.opcode = MMC_SEND_STATUS;
  67	if (!mmc_host_is_spi(card->host))
  68		cmd.arg = card->rca << 16;
  69	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 
 
  70
  71	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  72	if (err)
  73		return err;
  74
  75	/* NOTE: callers are required to understand the difference
  76	 * between "native" and SPI format status words!
  77	 */
  78	if (status)
  79		*status = cmd.resp[0];
  80
  81	return 0;
  82}
  83EXPORT_SYMBOL_GPL(__mmc_send_status);
  84
  85int mmc_send_status(struct mmc_card *card, u32 *status)
  86{
  87	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  88}
  89EXPORT_SYMBOL_GPL(mmc_send_status);
  90
  91static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  92{
  93	struct mmc_command cmd = {};
 
 
 
  94
  95	cmd.opcode = MMC_SELECT_CARD;
  96
  97	if (card) {
  98		cmd.arg = card->rca << 16;
  99		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 100	} else {
 101		cmd.arg = 0;
 102		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 103	}
 104
 105	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 106}
 107
 108int mmc_select_card(struct mmc_card *card)
 109{
 
 110
 111	return _mmc_select_card(card->host, card);
 112}
 113
 114int mmc_deselect_cards(struct mmc_host *host)
 115{
 116	return _mmc_select_card(host, NULL);
 117}
 118
 119/*
 120 * Write the value specified in the device tree or board code into the optional
 121 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 122 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 123 * value is hardware dependant.
 124 * The presence of the DSR register can be determined from the CSD register,
 125 * bit 76.
 126 */
 127int mmc_set_dsr(struct mmc_host *host)
 128{
 129	struct mmc_command cmd = {};
 130
 131	cmd.opcode = MMC_SET_DSR;
 132
 133	cmd.arg = (host->dsr << 16) | 0xffff;
 134	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 135
 136	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 137}
 138
 139int mmc_go_idle(struct mmc_host *host)
 140{
 141	int err;
 142	struct mmc_command cmd = {};
 143
 144	/*
 145	 * Non-SPI hosts need to prevent chipselect going active during
 146	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 147	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 148	 *
 149	 * SPI hosts ignore ios.chip_select; it's managed according to
 150	 * rules that must accommodate non-MMC slaves which this layer
 151	 * won't even know about.
 152	 */
 153	if (!mmc_host_is_spi(host)) {
 154		mmc_set_chip_select(host, MMC_CS_HIGH);
 155		mmc_delay(1);
 156	}
 157
 158	cmd.opcode = MMC_GO_IDLE_STATE;
 159	cmd.arg = 0;
 160	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 161
 162	err = mmc_wait_for_cmd(host, &cmd, 0);
 163
 164	mmc_delay(1);
 165
 166	if (!mmc_host_is_spi(host)) {
 167		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 168		mmc_delay(1);
 169	}
 170
 171	host->use_spi_crc = 0;
 172
 173	return err;
 174}
 175
 176int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 177{
 178	struct mmc_command cmd = {};
 179	int i, err = 0;
 180
 
 
 181	cmd.opcode = MMC_SEND_OP_COND;
 182	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 183	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 184
 185	for (i = 100; i; i--) {
 186		err = mmc_wait_for_cmd(host, &cmd, 0);
 187		if (err)
 188			break;
 189
 190		/* wait until reset completes */
 
 
 
 
 191		if (mmc_host_is_spi(host)) {
 192			if (!(cmd.resp[0] & R1_SPI_IDLE))
 193				break;
 194		} else {
 195			if (cmd.resp[0] & MMC_CARD_BUSY)
 196				break;
 197		}
 198
 199		err = -ETIMEDOUT;
 200
 201		mmc_delay(10);
 202
 203		/*
 204		 * According to eMMC specification v5.1 section 6.4.3, we
 205		 * should issue CMD1 repeatedly in the idle state until
 206		 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 207		 * the inactive mode after mmc_init_card() issued CMD0 when
 208		 * the eMMC device is busy.
 209		 */
 210		if (!ocr && !mmc_host_is_spi(host))
 211			cmd.arg = cmd.resp[0] | BIT(30);
 212	}
 213
 214	if (rocr && !mmc_host_is_spi(host))
 215		*rocr = cmd.resp[0];
 216
 217	return err;
 218}
 219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220int mmc_set_relative_addr(struct mmc_card *card)
 221{
 222	struct mmc_command cmd = {};
 
 
 
 
 223
 224	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 225	cmd.arg = card->rca << 16;
 226	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 227
 228	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 
 
 
 
 229}
 230
 231static int
 232mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 233{
 234	int err;
 235	struct mmc_command cmd = {};
 
 
 
 236
 237	cmd.opcode = opcode;
 238	cmd.arg = arg;
 239	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 240
 241	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 242	if (err)
 243		return err;
 244
 245	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 246
 247	return 0;
 248}
 249
 250/*
 251 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 252 * buffer or on-stack buffer (with some overhead in callee).
 253 */
 254int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
 255		       u32 args, void *buf, unsigned len)
 
 256{
 257	struct mmc_request mrq = {};
 258	struct mmc_command cmd = {};
 259	struct mmc_data data = {};
 260	struct scatterlist sg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261
 262	mrq.cmd = &cmd;
 263	mrq.data = &data;
 264
 265	cmd.opcode = opcode;
 266	cmd.arg = args;
 267
 268	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 269	 * rely on callers to never use this with "native" calls for reading
 270	 * CSD or CID.  Native versions of those commands use the R2 type,
 271	 * not R1 plus a data block.
 272	 */
 273	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 274
 275	data.blksz = len;
 276	data.blocks = 1;
 277	data.flags = MMC_DATA_READ;
 278	data.sg = &sg;
 279	data.sg_len = 1;
 280
 281	sg_init_one(&sg, buf, len);
 282
 283	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 284		/*
 285		 * The spec states that CSR and CID accesses have a timeout
 286		 * of 64 clock cycles.
 287		 */
 288		data.timeout_ns = 0;
 289		data.timeout_clks = 64;
 290	} else
 291		mmc_set_data_timeout(&data, card);
 292
 293	mmc_wait_for_req(host, &mrq);
 294
 
 
 
 
 
 295	if (cmd.error)
 296		return cmd.error;
 297	if (data.error)
 298		return data.error;
 299
 300	return 0;
 301}
 302
 303static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
 304{
 305	int ret, i;
 306	__be32 *cxd_tmp;
 
 
 
 
 307
 308	cxd_tmp = kzalloc(16, GFP_KERNEL);
 309	if (!cxd_tmp)
 310		return -ENOMEM;
 311
 312	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
 313	if (ret)
 314		goto err;
 315
 316	for (i = 0; i < 4; i++)
 317		cxd[i] = be32_to_cpu(cxd_tmp[i]);
 318
 319err:
 320	kfree(cxd_tmp);
 321	return ret;
 322}
 323
 324int mmc_send_csd(struct mmc_card *card, u32 *csd)
 325{
 326	if (mmc_host_is_spi(card->host))
 327		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
 328
 329	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 330				MMC_SEND_CSD);
 331}
 332
 333int mmc_send_cid(struct mmc_host *host, u32 *cid)
 334{
 335	if (mmc_host_is_spi(host))
 336		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
 337
 338	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 339}
 
 
 
 
 340
 341int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 342{
 343	int err;
 344	u8 *ext_csd;
 345
 346	if (!card || !new_ext_csd)
 347		return -EINVAL;
 
 348
 349	if (!mmc_can_ext_csd(card))
 350		return -EOPNOTSUPP;
 351
 352	/*
 353	 * As the ext_csd is so large and mostly unused, we don't store the
 354	 * raw block in mmc_card.
 355	 */
 356	ext_csd = kzalloc(512, GFP_KERNEL);
 357	if (!ext_csd)
 358		return -ENOMEM;
 359
 360	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
 361				512);
 362	if (err)
 363		kfree(ext_csd);
 364	else
 365		*new_ext_csd = ext_csd;
 366
 367	return err;
 368}
 369EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 370
 371int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 372{
 373	struct mmc_command cmd = {};
 374	int err;
 375
 376	cmd.opcode = MMC_SPI_READ_OCR;
 377	cmd.arg = highcap ? (1 << 30) : 0;
 378	cmd.flags = MMC_RSP_SPI_R3;
 379
 380	err = mmc_wait_for_cmd(host, &cmd, 0);
 381
 382	*ocrp = cmd.resp[1];
 383	return err;
 384}
 385
 386int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 387{
 388	struct mmc_command cmd = {};
 389	int err;
 390
 391	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 392	cmd.flags = MMC_RSP_SPI_R1;
 393	cmd.arg = use_crc;
 394
 395	err = mmc_wait_for_cmd(host, &cmd, 0);
 396	if (!err)
 397		host->use_spi_crc = use_crc;
 398	return err;
 399}
 400
 401static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 402{
 403	if (mmc_host_is_spi(host)) {
 404		if (status & R1_SPI_ILLEGAL_COMMAND)
 405			return -EBADMSG;
 406	} else {
 407		if (R1_STATUS(status))
 408			pr_warn("%s: unexpected status %#x after switch\n",
 409				mmc_hostname(host), status);
 410		if (status & R1_SWITCH_ERROR)
 411			return -EBADMSG;
 412	}
 413	return 0;
 414}
 415
 416/* Caller must hold re-tuning */
 417int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 418{
 419	u32 status;
 420	int err;
 421
 422	err = mmc_send_status(card, &status);
 423	if (!crc_err_fatal && err == -EILSEQ)
 424		return 0;
 425	if (err)
 426		return err;
 427
 428	return mmc_switch_status_error(card->host, status);
 429}
 430
 431static int mmc_busy_cb(void *cb_data, bool *busy)
 432{
 433	struct mmc_busy_data *data = cb_data;
 434	struct mmc_host *host = data->card->host;
 435	u32 status = 0;
 436	int err;
 437
 438	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
 439		*busy = host->ops->card_busy(host);
 440		return 0;
 441	}
 442
 443	err = mmc_send_status(data->card, &status);
 444	if (data->retry_crc_err && err == -EILSEQ) {
 445		*busy = true;
 446		return 0;
 447	}
 448	if (err)
 449		return err;
 450
 451	switch (data->busy_cmd) {
 452	case MMC_BUSY_CMD6:
 453		err = mmc_switch_status_error(host, status);
 454		break;
 455	case MMC_BUSY_ERASE:
 456		err = R1_STATUS(status) ? -EIO : 0;
 457		break;
 458	case MMC_BUSY_HPI:
 459	case MMC_BUSY_EXTR_SINGLE:
 460	case MMC_BUSY_IO:
 461		break;
 462	default:
 463		err = -EINVAL;
 464	}
 465
 466	if (err)
 467		return err;
 468
 469	*busy = !mmc_ready_for_data(status);
 470	return 0;
 471}
 472
 473int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 474			int (*busy_cb)(void *cb_data, bool *busy),
 475			void *cb_data)
 476{
 477	struct mmc_host *host = card->host;
 478	int err;
 479	unsigned long timeout;
 480	unsigned int udelay = 32, udelay_max = 32768;
 481	bool expired = false;
 482	bool busy = false;
 483
 484	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 485	do {
 486		/*
 487		 * Due to the possibility of being preempted while polling,
 488		 * check the expiration time first.
 489		 */
 490		expired = time_after(jiffies, timeout);
 491
 492		err = (*busy_cb)(cb_data, &busy);
 493		if (err)
 494			return err;
 495
 496		/* Timeout if the device still remains busy. */
 497		if (expired && busy) {
 498			pr_err("%s: Card stuck being busy! %s\n",
 499				mmc_hostname(host), __func__);
 500			return -ETIMEDOUT;
 501		}
 502
 503		/* Throttle the polling rate to avoid hogging the CPU. */
 504		if (busy) {
 505			usleep_range(udelay, udelay * 2);
 506			if (udelay < udelay_max)
 507				udelay *= 2;
 508		}
 509	} while (busy);
 510
 511	return 0;
 512}
 513EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 514
 515int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 516		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
 517{
 518	struct mmc_busy_data cb_data;
 519
 520	cb_data.card = card;
 521	cb_data.retry_crc_err = retry_crc_err;
 522	cb_data.busy_cmd = busy_cmd;
 523
 524	return __mmc_poll_for_busy(card, timeout_ms, &mmc_busy_cb, &cb_data);
 525}
 526EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 527
 528bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
 529			  unsigned int timeout_ms)
 530{
 531	/*
 532	 * If the max_busy_timeout of the host is specified, make sure it's
 533	 * enough to fit the used timeout_ms. In case it's not, let's instruct
 534	 * the host to avoid HW busy detection, by converting to a R1 response
 535	 * instead of a R1B. Note, some hosts requires R1B, which also means
 536	 * they are on their own when it comes to deal with the busy timeout.
 537	 */
 538	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
 539	    (timeout_ms > host->max_busy_timeout)) {
 540		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
 541		return false;
 542	}
 543
 544	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 545	cmd->busy_timeout = timeout_ms;
 546	return true;
 547}
 548
 549/**
 550 *	__mmc_switch - modify EXT_CSD register
 551 *	@card: the MMC card associated with the data transfer
 552 *	@set: cmd set values
 553 *	@index: EXT_CSD register index
 554 *	@value: value to program into EXT_CSD register
 555 *	@timeout_ms: timeout (ms) for operation performed by register write,
 556 *                   timeout of zero implies maximum possible timeout
 557 *	@timing: new timing to change to
 558 *	@send_status: send status cmd to poll for busy
 559 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 560 *	@retries: number of retries
 561 *
 562 *	Modifies the EXT_CSD register for selected card.
 563 */
 564int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 565		unsigned int timeout_ms, unsigned char timing,
 566		bool send_status, bool retry_crc_err, unsigned int retries)
 567{
 568	struct mmc_host *host = card->host;
 569	int err;
 570	struct mmc_command cmd = {};
 571	bool use_r1b_resp;
 572	unsigned char old_timing = host->ios.timing;
 573
 574	mmc_retune_hold(host);
 575
 576	if (!timeout_ms) {
 577		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
 578			mmc_hostname(host));
 579		timeout_ms = card->ext_csd.generic_cmd6_time;
 580	}
 
 
 
 581
 582	cmd.opcode = MMC_SWITCH;
 583	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 584		  (index << 16) |
 585		  (value << 8) |
 586		  set;
 587	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
 588
 589	err = mmc_wait_for_cmd(host, &cmd, retries);
 590	if (err)
 591		goto out;
 
 
 
 
 
 
 592
 593	/*If SPI or used HW busy detection above, then we don't need to poll. */
 594	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 595		mmc_host_is_spi(host))
 596		goto out_tim;
 597
 598	/*
 599	 * If the host doesn't support HW polling via the ->card_busy() ops and
 600	 * when it's not allowed to poll by using CMD13, then we need to rely on
 601	 * waiting the stated timeout to be sufficient.
 602	 */
 603	if (!send_status && !host->ops->card_busy) {
 604		mmc_delay(timeout_ms);
 605		goto out_tim;
 606	}
 607
 608	/* Let's try to poll to find out when the command is completed. */
 609	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
 610	if (err)
 611		goto out;
 612
 613out_tim:
 614	/* Switch to new timing before check switch status. */
 615	if (timing)
 616		mmc_set_timing(host, timing);
 617
 618	if (send_status) {
 619		err = mmc_switch_status(card, true);
 620		if (err && timing)
 621			mmc_set_timing(host, old_timing);
 622	}
 623out:
 624	mmc_retune_release(host);
 625
 626	return err;
 627}
 628
 629int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 630		unsigned int timeout_ms)
 631{
 632	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 633			    true, false, MMC_CMD_RETRIES);
 634}
 635EXPORT_SYMBOL_GPL(mmc_switch);
 636
 637int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 638{
 639	struct mmc_request mrq = {};
 640	struct mmc_command cmd = {};
 641	struct mmc_data data = {};
 642	struct scatterlist sg;
 643	struct mmc_ios *ios = &host->ios;
 644	const u8 *tuning_block_pattern;
 645	int size, err = 0;
 646	u8 *data_buf;
 647
 648	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 649		tuning_block_pattern = tuning_blk_pattern_8bit;
 650		size = sizeof(tuning_blk_pattern_8bit);
 651	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 652		tuning_block_pattern = tuning_blk_pattern_4bit;
 653		size = sizeof(tuning_blk_pattern_4bit);
 654	} else
 655		return -EINVAL;
 656
 657	data_buf = kzalloc(size, GFP_KERNEL);
 658	if (!data_buf)
 659		return -ENOMEM;
 660
 661	mrq.cmd = &cmd;
 662	mrq.data = &data;
 663
 664	cmd.opcode = opcode;
 665	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 666
 667	data.blksz = size;
 668	data.blocks = 1;
 669	data.flags = MMC_DATA_READ;
 670
 671	/*
 672	 * According to the tuning specs, Tuning process
 673	 * is normally shorter 40 executions of CMD19,
 674	 * and timeout value should be shorter than 150 ms
 675	 */
 676	data.timeout_ns = 150 * NSEC_PER_MSEC;
 
 677
 678	data.sg = &sg;
 679	data.sg_len = 1;
 680	sg_init_one(&sg, data_buf, size);
 681
 682	mmc_wait_for_req(host, &mrq);
 
 
 
 
 
 
 
 
 
 
 
 683
 684	if (cmd_error)
 685		*cmd_error = cmd.error;
 
 
 
 
 
 
 
 686
 687	if (cmd.error) {
 688		err = cmd.error;
 689		goto out;
 690	}
 
 
 
 691
 692	if (data.error) {
 693		err = data.error;
 694		goto out;
 
 
 
 
 
 
 695	}
 696
 697	if (memcmp(data_buf, tuning_block_pattern, size))
 698		err = -EIO;
 699
 700out:
 701	kfree(data_buf);
 702	return err;
 703}
 704EXPORT_SYMBOL_GPL(mmc_send_tuning);
 705
 706int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
 
 707{
 708	struct mmc_command cmd = {};
 709
 710	/*
 711	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 712	 * command, but SD specification does not, so do nothing unless it is
 713	 * eMMC.
 714	 */
 715	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 716		return 0;
 717
 718	cmd.opcode = MMC_STOP_TRANSMISSION;
 719	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 720
 721	/*
 722	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 723	 * on the tuning timeout i.e. 150ms.
 724	 */
 725	cmd.busy_timeout = 150;
 726
 727	return mmc_wait_for_cmd(host, &cmd, 0);
 728}
 729EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
 730
 731static int
 732mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 733		  u8 len)
 734{
 735	struct mmc_request mrq = {};
 736	struct mmc_command cmd = {};
 737	struct mmc_data data = {};
 738	struct scatterlist sg;
 739	u8 *data_buf;
 740	u8 *test_buf;
 741	int i, err;
 742	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 743	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 744
 745	/* dma onto stack is unsafe/nonportable, but callers to this
 746	 * routine normally provide temporary on-stack buffers ...
 747	 */
 748	data_buf = kmalloc(len, GFP_KERNEL);
 749	if (!data_buf)
 750		return -ENOMEM;
 751
 752	if (len == 8)
 753		test_buf = testdata_8bit;
 754	else if (len == 4)
 755		test_buf = testdata_4bit;
 756	else {
 757		pr_err("%s: Invalid bus_width %d\n",
 758		       mmc_hostname(host), len);
 759		kfree(data_buf);
 760		return -EINVAL;
 761	}
 762
 763	if (opcode == MMC_BUS_TEST_W)
 764		memcpy(data_buf, test_buf, len);
 765
 766	mrq.cmd = &cmd;
 767	mrq.data = &data;
 768	cmd.opcode = opcode;
 769	cmd.arg = 0;
 770
 771	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 772	 * rely on callers to never use this with "native" calls for reading
 773	 * CSD or CID.  Native versions of those commands use the R2 type,
 774	 * not R1 plus a data block.
 775	 */
 776	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 777
 778	data.blksz = len;
 779	data.blocks = 1;
 780	if (opcode == MMC_BUS_TEST_R)
 781		data.flags = MMC_DATA_READ;
 782	else
 783		data.flags = MMC_DATA_WRITE;
 784
 785	data.sg = &sg;
 786	data.sg_len = 1;
 787	mmc_set_data_timeout(&data, card);
 788	sg_init_one(&sg, data_buf, len);
 789	mmc_wait_for_req(host, &mrq);
 790	err = 0;
 791	if (opcode == MMC_BUS_TEST_R) {
 792		for (i = 0; i < len / 4; i++)
 793			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 794				err = -EIO;
 795				break;
 796			}
 797	}
 798	kfree(data_buf);
 799
 800	if (cmd.error)
 801		return cmd.error;
 802	if (data.error)
 803		return data.error;
 804
 805	return err;
 806}
 807
 808int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 809{
 810	int width;
 811
 812	if (bus_width == MMC_BUS_WIDTH_8)
 813		width = 8;
 814	else if (bus_width == MMC_BUS_WIDTH_4)
 815		width = 4;
 816	else if (bus_width == MMC_BUS_WIDTH_1)
 817		return 0; /* no need for test */
 818	else
 819		return -EINVAL;
 820
 821	/*
 822	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 823	 * is a problem.  This improves chances that the test will work.
 824	 */
 825	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 826	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 
 827}
 828
 829static int mmc_send_hpi_cmd(struct mmc_card *card)
 830{
 831	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
 832	struct mmc_host *host = card->host;
 833	bool use_r1b_resp = false;
 834	struct mmc_command cmd = {};
 835	int err;
 836
 837	cmd.opcode = card->ext_csd.hpi_cmd;
 838	cmd.arg = card->rca << 16 | 1;
 839	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 840
 841	if (cmd.opcode == MMC_STOP_TRANSMISSION)
 842		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
 843						    busy_timeout_ms);
 844
 845	err = mmc_wait_for_cmd(host, &cmd, 0);
 846	if (err) {
 847		pr_warn("%s: HPI error %d. Command response %#x\n",
 848			mmc_hostname(host), err, cmd.resp[0]);
 849		return err;
 850	}
 851
 852	/* No need to poll when using HW busy detection. */
 853	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
 854		return 0;
 
 
 855
 856	/* Let's poll to find out when the HPI request completes. */
 857	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
 858}
 859
 860/**
 861 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 862 *	@card: the MMC card associated with the HPI transfer
 863 *
 864 *	Issued High Priority Interrupt, and check for card status
 865 *	until out-of prg-state.
 866 */
 867static int mmc_interrupt_hpi(struct mmc_card *card)
 868{
 869	int err;
 870	u32 status;
 871
 872	if (!card->ext_csd.hpi_en) {
 873		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 874		return 1;
 875	}
 876
 877	err = mmc_send_status(card, &status);
 878	if (err) {
 879		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 880		goto out;
 881	}
 882
 883	switch (R1_CURRENT_STATE(status)) {
 884	case R1_STATE_IDLE:
 885	case R1_STATE_READY:
 886	case R1_STATE_STBY:
 887	case R1_STATE_TRAN:
 888		/*
 889		 * In idle and transfer states, HPI is not needed and the caller
 890		 * can issue the next intended command immediately
 891		 */
 892		goto out;
 893	case R1_STATE_PRG:
 894		break;
 895	default:
 896		/* In all other states, it's illegal to issue HPI */
 897		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 898			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 899		err = -EINVAL;
 900		goto out;
 901	}
 
 
 902
 903	err = mmc_send_hpi_cmd(card);
 904out:
 905	return err;
 906}
 907
 908int mmc_can_ext_csd(struct mmc_card *card)
 909{
 910	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 911}
 912
 913static int mmc_read_bkops_status(struct mmc_card *card)
 914{
 915	int err;
 916	u8 *ext_csd;
 917
 918	err = mmc_get_ext_csd(card, &ext_csd);
 919	if (err)
 920		return err;
 921
 922	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 923	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 924	kfree(ext_csd);
 925	return 0;
 926}
 927
 928/**
 929 *	mmc_run_bkops - Run BKOPS for supported cards
 930 *	@card: MMC card to run BKOPS for
 931 *
 932 *	Run background operations synchronously for cards having manual BKOPS
 933 *	enabled and in case it reports urgent BKOPS level.
 934*/
 935void mmc_run_bkops(struct mmc_card *card)
 936{
 937	int err;
 938
 939	if (!card->ext_csd.man_bkops_en)
 940		return;
 941
 942	err = mmc_read_bkops_status(card);
 943	if (err) {
 944		pr_err("%s: Failed to read bkops status: %d\n",
 945		       mmc_hostname(card->host), err);
 946		return;
 947	}
 948
 949	if (!card->ext_csd.raw_bkops_status ||
 950	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 951		return;
 952
 953	mmc_retune_hold(card->host);
 954
 955	/*
 956	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 957	 * synchronously. Future wise, we may consider to start BKOPS, for less
 958	 * urgent levels by using an asynchronous background task, when idle.
 959	 */
 960	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 961			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
 962	if (err)
 963		pr_warn("%s: Error %d starting bkops\n",
 964			mmc_hostname(card->host), err);
 965
 966	mmc_retune_release(card->host);
 967}
 968EXPORT_SYMBOL(mmc_run_bkops);
 969
 970static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
 971{
 972	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
 973	int err;
 974
 975	if (!card->ext_csd.cmdq_support)
 976		return -EOPNOTSUPP;
 977
 978	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
 979			 val, card->ext_csd.generic_cmd6_time);
 980	if (!err)
 981		card->ext_csd.cmdq_en = enable;
 982
 983	return err;
 984}
 985
 986int mmc_cmdq_enable(struct mmc_card *card)
 987{
 988	return mmc_cmdq_switch(card, true);
 989}
 990EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
 991
 992int mmc_cmdq_disable(struct mmc_card *card)
 993{
 994	return mmc_cmdq_switch(card, false);
 995}
 996EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
 997
 998int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
 999{
1000	struct mmc_host *host = card->host;
1001	int err;
1002
1003	if (!mmc_can_sanitize(card)) {
1004		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1005		return -EOPNOTSUPP;
1006	}
1007
1008	if (!timeout_ms)
1009		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1010
1011	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1012
1013	mmc_retune_hold(host);
1014
1015	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1016			   1, timeout_ms, 0, true, false, 0);
1017	if (err)
1018		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1019
1020	/*
1021	 * If the sanitize operation timed out, the card is probably still busy
1022	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1023	 * it with a HPI command to get back into R1_STATE_TRAN.
1024	 */
1025	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1026		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1027
1028	mmc_retune_release(host);
1029
1030	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1031	return err;
1032}
1033EXPORT_SYMBOL_GPL(mmc_sanitize);