Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/export.h>
 14#include <linux/types.h>
 15#include <linux/scatterlist.h>
 16
 17#include <linux/mmc/host.h>
 18#include <linux/mmc/card.h>
 19#include <linux/mmc/mmc.h>
 20
 21#include "core.h"
 
 22#include "host.h"
 23#include "mmc_ops.h"
 24
 25#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */
 
 
 
 26
 27static const u8 tuning_blk_pattern_4bit[] = {
 28	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
 29	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
 30	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
 31	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
 32	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
 33	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
 34	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
 35	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
 36};
 37
 38static const u8 tuning_blk_pattern_8bit[] = {
 39	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
 40	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
 41	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
 42	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
 43	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
 44	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
 45	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
 46	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
 47	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
 48	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
 49	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
 50	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
 51	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
 52	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
 53	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
 54	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 55};
 56
 57static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
 58				    bool ignore_crc)
 
 
 
 
 
 
 
 
 
 
 
 59{
 60	int err;
 61	struct mmc_command cmd = {0};
 62
 63	BUG_ON(!card);
 64	BUG_ON(!card->host);
 65
 66	cmd.opcode = MMC_SEND_STATUS;
 67	if (!mmc_host_is_spi(card->host))
 68		cmd.arg = card->rca << 16;
 69	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 70	if (ignore_crc)
 71		cmd.flags &= ~MMC_RSP_CRC;
 72
 73	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 74	if (err)
 75		return err;
 76
 77	/* NOTE: callers are required to understand the difference
 78	 * between "native" and SPI format status words!
 79	 */
 80	if (status)
 81		*status = cmd.resp[0];
 82
 83	return 0;
 84}
 
 85
 86int mmc_send_status(struct mmc_card *card, u32 *status)
 87{
 88	return __mmc_send_status(card, status, false);
 89}
 
 90
 91static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 92{
 93	struct mmc_command cmd = {0};
 94
 95	BUG_ON(!host);
 96
 97	cmd.opcode = MMC_SELECT_CARD;
 98
 99	if (card) {
100		cmd.arg = card->rca << 16;
101		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
102	} else {
103		cmd.arg = 0;
104		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
105	}
106
107	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
108}
109
110int mmc_select_card(struct mmc_card *card)
111{
112	BUG_ON(!card);
113
114	return _mmc_select_card(card->host, card);
115}
116
117int mmc_deselect_cards(struct mmc_host *host)
118{
119	return _mmc_select_card(host, NULL);
120}
121
122/*
123 * Write the value specified in the device tree or board code into the optional
124 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
125 * drive strength of the DAT and CMD outputs. The actual meaning of a given
126 * value is hardware dependant.
127 * The presence of the DSR register can be determined from the CSD register,
128 * bit 76.
129 */
130int mmc_set_dsr(struct mmc_host *host)
131{
132	struct mmc_command cmd = {0};
133
134	cmd.opcode = MMC_SET_DSR;
135
136	cmd.arg = (host->dsr << 16) | 0xffff;
137	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
138
139	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
140}
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142int mmc_go_idle(struct mmc_host *host)
143{
144	int err;
145	struct mmc_command cmd = {0};
146
147	/*
148	 * Non-SPI hosts need to prevent chipselect going active during
149	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
150	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
151	 *
152	 * SPI hosts ignore ios.chip_select; it's managed according to
153	 * rules that must accommodate non-MMC slaves which this layer
154	 * won't even know about.
155	 */
156	if (!mmc_host_is_spi(host)) {
157		mmc_set_chip_select(host, MMC_CS_HIGH);
158		mmc_delay(1);
159	}
160
161	cmd.opcode = MMC_GO_IDLE_STATE;
162	cmd.arg = 0;
163	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
164
165	err = mmc_wait_for_cmd(host, &cmd, 0);
166
167	mmc_delay(1);
168
169	if (!mmc_host_is_spi(host)) {
170		mmc_set_chip_select(host, MMC_CS_DONTCARE);
171		mmc_delay(1);
172	}
173
174	host->use_spi_crc = 0;
175
176	return err;
177}
178
179int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
180{
181	struct mmc_command cmd = {0};
182	int i, err = 0;
183
184	BUG_ON(!host);
185
186	cmd.opcode = MMC_SEND_OP_COND;
187	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
188	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
189
190	for (i = 100; i; i--) {
191		err = mmc_wait_for_cmd(host, &cmd, 0);
192		if (err)
193			break;
194
195		/* if we're just probing, do a single pass */
196		if (ocr == 0)
197			break;
198
199		/* otherwise wait until reset completes */
200		if (mmc_host_is_spi(host)) {
201			if (!(cmd.resp[0] & R1_SPI_IDLE))
202				break;
203		} else {
204			if (cmd.resp[0] & MMC_CARD_BUSY)
205				break;
206		}
207
208		err = -ETIMEDOUT;
209
210		mmc_delay(10);
211	}
212
213	if (rocr && !mmc_host_is_spi(host))
214		*rocr = cmd.resp[0];
215
216	return err;
 
 
 
 
 
 
 
 
 
 
217}
218
219int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
220{
221	int err;
222	struct mmc_command cmd = {0};
223
224	BUG_ON(!host);
225	BUG_ON(!cid);
 
 
226
227	cmd.opcode = MMC_ALL_SEND_CID;
228	cmd.arg = 0;
229	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
230
231	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 
 
232	if (err)
233		return err;
234
235	memcpy(cid, cmd.resp, sizeof(u32) * 4);
 
236
237	return 0;
238}
239
240int mmc_set_relative_addr(struct mmc_card *card)
241{
242	struct mmc_command cmd = {0};
243
244	BUG_ON(!card);
245	BUG_ON(!card->host);
246
247	cmd.opcode = MMC_SET_RELATIVE_ADDR;
248	cmd.arg = card->rca << 16;
249	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
250
251	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
252}
253
254static int
255mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
256{
257	int err;
258	struct mmc_command cmd = {0};
259
260	BUG_ON(!host);
261	BUG_ON(!cxd);
262
263	cmd.opcode = opcode;
264	cmd.arg = arg;
265	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
266
267	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
268	if (err)
269		return err;
270
271	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
272
273	return 0;
274}
275
276/*
277 * NOTE: void *buf, caller for the buf is required to use DMA-capable
278 * buffer or on-stack buffer (with some overhead in callee).
279 */
280static int
281mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
282		u32 opcode, void *buf, unsigned len)
283{
284	struct mmc_request mrq = {NULL};
285	struct mmc_command cmd = {0};
286	struct mmc_data data = {0};
287	struct scatterlist sg;
288
289	mrq.cmd = &cmd;
290	mrq.data = &data;
291
292	cmd.opcode = opcode;
293	cmd.arg = 0;
294
295	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
296	 * rely on callers to never use this with "native" calls for reading
297	 * CSD or CID.  Native versions of those commands use the R2 type,
298	 * not R1 plus a data block.
299	 */
300	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
301
302	data.blksz = len;
303	data.blocks = 1;
304	data.flags = MMC_DATA_READ;
305	data.sg = &sg;
306	data.sg_len = 1;
307
308	sg_init_one(&sg, buf, len);
309
310	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
311		/*
312		 * The spec states that CSR and CID accesses have a timeout
313		 * of 64 clock cycles.
314		 */
315		data.timeout_ns = 0;
316		data.timeout_clks = 64;
317	} else
318		mmc_set_data_timeout(&data, card);
319
320	mmc_wait_for_req(host, &mrq);
321
322	if (cmd.error)
323		return cmd.error;
324	if (data.error)
325		return data.error;
326
327	return 0;
328}
329
330int mmc_send_csd(struct mmc_card *card, u32 *csd)
331{
332	int ret, i;
333	u32 *csd_tmp;
334
335	if (!mmc_host_is_spi(card->host))
336		return mmc_send_cxd_native(card->host, card->rca << 16,
337				csd, MMC_SEND_CSD);
338
339	csd_tmp = kzalloc(16, GFP_KERNEL);
340	if (!csd_tmp)
341		return -ENOMEM;
342
343	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
344	if (ret)
345		goto err;
346
347	for (i = 0;i < 4;i++)
348		csd[i] = be32_to_cpu(csd_tmp[i]);
349
350err:
351	kfree(csd_tmp);
352	return ret;
353}
354
355int mmc_send_cid(struct mmc_host *host, u32 *cid)
356{
357	int ret, i;
358	u32 *cid_tmp;
359
360	if (!mmc_host_is_spi(host)) {
361		if (!host->card)
362			return -EINVAL;
363		return mmc_send_cxd_native(host, host->card->rca << 16,
364				cid, MMC_SEND_CID);
365	}
366
367	cid_tmp = kzalloc(16, GFP_KERNEL);
368	if (!cid_tmp)
369		return -ENOMEM;
370
371	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
372	if (ret)
373		goto err;
374
375	for (i = 0;i < 4;i++)
376		cid[i] = be32_to_cpu(cid_tmp[i]);
 
 
377
378err:
379	kfree(cid_tmp);
380	return ret;
381}
382
383int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
384{
385	int err;
386	u8 *ext_csd;
387
388	if (!card || !new_ext_csd)
389		return -EINVAL;
390
391	if (!mmc_can_ext_csd(card))
392		return -EOPNOTSUPP;
393
394	/*
395	 * As the ext_csd is so large and mostly unused, we don't store the
396	 * raw block in mmc_card.
397	 */
398	ext_csd = kzalloc(512, GFP_KERNEL);
399	if (!ext_csd)
400		return -ENOMEM;
401
402	err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
403				512);
404	if (err)
405		kfree(ext_csd);
406	else
407		*new_ext_csd = ext_csd;
408
409	return err;
410}
411EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
412
413int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
414{
415	struct mmc_command cmd = {0};
416	int err;
417
418	cmd.opcode = MMC_SPI_READ_OCR;
419	cmd.arg = highcap ? (1 << 30) : 0;
420	cmd.flags = MMC_RSP_SPI_R3;
421
422	err = mmc_wait_for_cmd(host, &cmd, 0);
423
424	*ocrp = cmd.resp[1];
425	return err;
426}
427
428int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
429{
430	struct mmc_command cmd = {0};
431	int err;
432
433	cmd.opcode = MMC_SPI_CRC_ON_OFF;
434	cmd.flags = MMC_RSP_SPI_R1;
435	cmd.arg = use_crc;
436
437	err = mmc_wait_for_cmd(host, &cmd, 0);
438	if (!err)
439		host->use_spi_crc = use_crc;
440	return err;
441}
442
443int mmc_switch_status_error(struct mmc_host *host, u32 status)
444{
445	if (mmc_host_is_spi(host)) {
446		if (status & R1_SPI_ILLEGAL_COMMAND)
447			return -EBADMSG;
448	} else {
449		if (status & 0xFDFFA000)
450			pr_warn("%s: unexpected status %#x after switch\n",
451				mmc_hostname(host), status);
452		if (status & R1_SWITCH_ERROR)
453			return -EBADMSG;
454	}
455	return 0;
456}
457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458/**
459 *	__mmc_switch - modify EXT_CSD register
460 *	@card: the MMC card associated with the data transfer
461 *	@set: cmd set values
462 *	@index: EXT_CSD register index
463 *	@value: value to program into EXT_CSD register
464 *	@timeout_ms: timeout (ms) for operation performed by register write,
465 *                   timeout of zero implies maximum possible timeout
466 *	@use_busy_signal: use the busy signal as response type
467 *	@send_status: send status cmd to poll for busy
468 *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy
 
469 *
470 *	Modifies the EXT_CSD register for selected card.
471 */
472int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
473		unsigned int timeout_ms, bool use_busy_signal, bool send_status,
474		bool ignore_crc)
475{
476	struct mmc_host *host = card->host;
477	int err;
478	struct mmc_command cmd = {0};
479	unsigned long timeout;
480	u32 status = 0;
481	bool use_r1b_resp = use_busy_signal;
482	bool expired = false;
483
484	mmc_retune_hold(host);
485
486	/*
487	 * If the cmd timeout and the max_busy_timeout of the host are both
488	 * specified, let's validate them. A failure means we need to prevent
489	 * the host from doing hw busy detection, which is done by converting
490	 * to a R1 response instead of a R1B.
491	 */
492	if (timeout_ms && host->max_busy_timeout &&
493		(timeout_ms > host->max_busy_timeout))
494		use_r1b_resp = false;
495
496	cmd.opcode = MMC_SWITCH;
497	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
498		  (index << 16) |
499		  (value << 8) |
500		  set;
501	cmd.flags = MMC_CMD_AC;
502	if (use_r1b_resp) {
503		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
504		/*
505		 * A busy_timeout of zero means the host can decide to use
506		 * whatever value it finds suitable.
507		 */
508		cmd.busy_timeout = timeout_ms;
509	} else {
510		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
511	}
512
513	if (index == EXT_CSD_SANITIZE_START)
514		cmd.sanitize_busy = true;
515
516	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
517	if (err)
518		goto out;
519
520	/* No need to check card status in case of unblocking command */
521	if (!use_busy_signal)
522		goto out;
 
523
524	/*
525	 * CRC errors shall only be ignored in cases were CMD13 is used to poll
526	 * to detect busy completion.
 
527	 */
528	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
529		ignore_crc = false;
530
531	/* We have an unspecified cmd timeout, use the fallback value. */
532	if (!timeout_ms)
533		timeout_ms = MMC_OPS_TIMEOUT_MS;
534
535	/* Must check status to be sure of no errors. */
536	timeout = jiffies + msecs_to_jiffies(timeout_ms);
537	do {
538		if (send_status) {
539			/*
540			 * Due to the possibility of being preempted after
541			 * sending the status command, check the expiration
542			 * time first.
543			 */
544			expired = time_after(jiffies, timeout);
545			err = __mmc_send_status(card, &status, ignore_crc);
546			if (err)
547				goto out;
548		}
549		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
550			break;
551		if (mmc_host_is_spi(host))
552			break;
553
554		/*
555		 * We are not allowed to issue a status command and the host
556		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
557		 * rely on waiting for the stated timeout to be sufficient.
558		 */
559		if (!send_status) {
560			mmc_delay(timeout_ms);
561			goto out;
562		}
563
564		/* Timeout if the device never leaves the program state. */
565		if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
566			pr_err("%s: Card stuck in programming state! %s\n",
567				mmc_hostname(host), __func__);
568			err = -ETIMEDOUT;
569			goto out;
570		}
571	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
572
573	err = mmc_switch_status_error(host, status);
 
 
 
 
 
 
 
 
 
574out:
575	mmc_retune_release(host);
576
577	return err;
578}
579
580int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
581		unsigned int timeout_ms)
582{
583	return __mmc_switch(card, set, index, value, timeout_ms, true, true,
584				false);
585}
586EXPORT_SYMBOL_GPL(mmc_switch);
587
588int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
589{
590	struct mmc_request mrq = {NULL};
591	struct mmc_command cmd = {0};
592	struct mmc_data data = {0};
593	struct scatterlist sg;
594	struct mmc_ios *ios = &host->ios;
595	const u8 *tuning_block_pattern;
596	int size, err = 0;
597	u8 *data_buf;
598
599	if (ios->bus_width == MMC_BUS_WIDTH_8) {
600		tuning_block_pattern = tuning_blk_pattern_8bit;
601		size = sizeof(tuning_blk_pattern_8bit);
602	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
603		tuning_block_pattern = tuning_blk_pattern_4bit;
604		size = sizeof(tuning_blk_pattern_4bit);
605	} else
606		return -EINVAL;
607
608	data_buf = kzalloc(size, GFP_KERNEL);
609	if (!data_buf)
610		return -ENOMEM;
611
612	mrq.cmd = &cmd;
613	mrq.data = &data;
614
615	cmd.opcode = opcode;
616	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
617
618	data.blksz = size;
619	data.blocks = 1;
620	data.flags = MMC_DATA_READ;
621
622	/*
623	 * According to the tuning specs, Tuning process
624	 * is normally shorter 40 executions of CMD19,
625	 * and timeout value should be shorter than 150 ms
626	 */
627	data.timeout_ns = 150 * NSEC_PER_MSEC;
628
629	data.sg = &sg;
630	data.sg_len = 1;
631	sg_init_one(&sg, data_buf, size);
632
633	mmc_wait_for_req(host, &mrq);
634
635	if (cmd_error)
636		*cmd_error = cmd.error;
637
638	if (cmd.error) {
639		err = cmd.error;
640		goto out;
641	}
642
643	if (data.error) {
644		err = data.error;
645		goto out;
646	}
647
648	if (memcmp(data_buf, tuning_block_pattern, size))
649		err = -EIO;
650
651out:
652	kfree(data_buf);
653	return err;
654}
655EXPORT_SYMBOL_GPL(mmc_send_tuning);
656
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
657static int
658mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
659		  u8 len)
660{
661	struct mmc_request mrq = {NULL};
662	struct mmc_command cmd = {0};
663	struct mmc_data data = {0};
664	struct scatterlist sg;
665	u8 *data_buf;
666	u8 *test_buf;
667	int i, err;
668	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
669	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
670
671	/* dma onto stack is unsafe/nonportable, but callers to this
672	 * routine normally provide temporary on-stack buffers ...
673	 */
674	data_buf = kmalloc(len, GFP_KERNEL);
675	if (!data_buf)
676		return -ENOMEM;
677
678	if (len == 8)
679		test_buf = testdata_8bit;
680	else if (len == 4)
681		test_buf = testdata_4bit;
682	else {
683		pr_err("%s: Invalid bus_width %d\n",
684		       mmc_hostname(host), len);
685		kfree(data_buf);
686		return -EINVAL;
687	}
688
689	if (opcode == MMC_BUS_TEST_W)
690		memcpy(data_buf, test_buf, len);
691
692	mrq.cmd = &cmd;
693	mrq.data = &data;
694	cmd.opcode = opcode;
695	cmd.arg = 0;
696
697	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
698	 * rely on callers to never use this with "native" calls for reading
699	 * CSD or CID.  Native versions of those commands use the R2 type,
700	 * not R1 plus a data block.
701	 */
702	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
703
704	data.blksz = len;
705	data.blocks = 1;
706	if (opcode == MMC_BUS_TEST_R)
707		data.flags = MMC_DATA_READ;
708	else
709		data.flags = MMC_DATA_WRITE;
710
711	data.sg = &sg;
712	data.sg_len = 1;
713	mmc_set_data_timeout(&data, card);
714	sg_init_one(&sg, data_buf, len);
715	mmc_wait_for_req(host, &mrq);
716	err = 0;
717	if (opcode == MMC_BUS_TEST_R) {
718		for (i = 0; i < len / 4; i++)
719			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
720				err = -EIO;
721				break;
722			}
723	}
724	kfree(data_buf);
725
726	if (cmd.error)
727		return cmd.error;
728	if (data.error)
729		return data.error;
730
731	return err;
732}
733
734int mmc_bus_test(struct mmc_card *card, u8 bus_width)
735{
736	int width;
737
738	if (bus_width == MMC_BUS_WIDTH_8)
739		width = 8;
740	else if (bus_width == MMC_BUS_WIDTH_4)
741		width = 4;
742	else if (bus_width == MMC_BUS_WIDTH_1)
743		return 0; /* no need for test */
744	else
745		return -EINVAL;
746
747	/*
748	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
749	 * is a problem.  This improves chances that the test will work.
750	 */
751	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
752	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
753}
754
755int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
756{
757	struct mmc_command cmd = {0};
758	unsigned int opcode;
 
 
759	int err;
760
761	if (!card->ext_csd.hpi) {
762		pr_warn("%s: Card didn't support HPI command\n",
763			mmc_hostname(card->host));
764		return -EINVAL;
 
 
 
 
 
 
 
 
 
765	}
766
767	opcode = card->ext_csd.hpi_cmd;
768	if (opcode == MMC_STOP_TRANSMISSION)
769		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
770	else if (opcode == MMC_SEND_STATUS)
771		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
772
773	cmd.opcode = opcode;
774	cmd.arg = card->rca << 16 | 1;
 
775
776	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777	if (err) {
778		pr_warn("%s: error %d interrupting operation. "
779			"HPI command response %#x\n", mmc_hostname(card->host),
780			err, cmd.resp[0]);
781		return err;
782	}
783	if (status)
784		*status = cmd.resp[0];
785
786	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
787}
788
789int mmc_can_ext_csd(struct mmc_card *card)
790{
791	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
792}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/mmc/core/mmc_ops.h
   4 *
   5 *  Copyright 2006-2007 Pierre Ossman
 
 
 
 
 
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/export.h>
  10#include <linux/types.h>
  11#include <linux/scatterlist.h>
  12
  13#include <linux/mmc/host.h>
  14#include <linux/mmc/card.h>
  15#include <linux/mmc/mmc.h>
  16
  17#include "core.h"
  18#include "card.h"
  19#include "host.h"
  20#include "mmc_ops.h"
  21
  22#define MMC_BKOPS_TIMEOUT_MS		(120 * 1000) /* 120s */
  23#define MMC_SANITIZE_TIMEOUT_MS		(240 * 1000) /* 240s */
  24#define MMC_OP_COND_PERIOD_US		(4 * 1000) /* 4ms */
  25#define MMC_OP_COND_TIMEOUT_MS		1000 /* 1s */
  26
  27static const u8 tuning_blk_pattern_4bit[] = {
  28	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  29	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  30	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  31	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  32	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  33	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  34	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  35	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  36};
  37
  38static const u8 tuning_blk_pattern_8bit[] = {
  39	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  40	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  41	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  42	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  43	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  44	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  45	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  46	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  47	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  48	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  49	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  50	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  51	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  52	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  53	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  54	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  55};
  56
  57struct mmc_busy_data {
  58	struct mmc_card *card;
  59	bool retry_crc_err;
  60	enum mmc_busy_cmd busy_cmd;
  61};
  62
  63struct mmc_op_cond_busy_data {
  64	struct mmc_host *host;
  65	u32 ocr;
  66	struct mmc_command *cmd;
  67};
  68
  69int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  70{
  71	int err;
  72	struct mmc_command cmd = {};
 
 
 
  73
  74	cmd.opcode = MMC_SEND_STATUS;
  75	if (!mmc_host_is_spi(card->host))
  76		cmd.arg = card->rca << 16;
  77	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 
 
  78
  79	err = mmc_wait_for_cmd(card->host, &cmd, retries);
  80	if (err)
  81		return err;
  82
  83	/* NOTE: callers are required to understand the difference
  84	 * between "native" and SPI format status words!
  85	 */
  86	if (status)
  87		*status = cmd.resp[0];
  88
  89	return 0;
  90}
  91EXPORT_SYMBOL_GPL(__mmc_send_status);
  92
  93int mmc_send_status(struct mmc_card *card, u32 *status)
  94{
  95	return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  96}
  97EXPORT_SYMBOL_GPL(mmc_send_status);
  98
  99static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 100{
 101	struct mmc_command cmd = {};
 
 
 102
 103	cmd.opcode = MMC_SELECT_CARD;
 104
 105	if (card) {
 106		cmd.arg = card->rca << 16;
 107		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 108	} else {
 109		cmd.arg = 0;
 110		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 111	}
 112
 113	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 114}
 115
 116int mmc_select_card(struct mmc_card *card)
 117{
 
 118
 119	return _mmc_select_card(card->host, card);
 120}
 121
 122int mmc_deselect_cards(struct mmc_host *host)
 123{
 124	return _mmc_select_card(host, NULL);
 125}
 126
 127/*
 128 * Write the value specified in the device tree or board code into the optional
 129 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
 130 * drive strength of the DAT and CMD outputs. The actual meaning of a given
 131 * value is hardware dependant.
 132 * The presence of the DSR register can be determined from the CSD register,
 133 * bit 76.
 134 */
 135int mmc_set_dsr(struct mmc_host *host)
 136{
 137	struct mmc_command cmd = {};
 138
 139	cmd.opcode = MMC_SET_DSR;
 140
 141	cmd.arg = (host->dsr << 16) | 0xffff;
 142	cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 143
 144	return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 145}
 146
 147int __mmc_go_idle(struct mmc_host *host)
 148{
 149	struct mmc_command cmd = {};
 150	int err;
 151
 152	cmd.opcode = MMC_GO_IDLE_STATE;
 153	cmd.arg = 0;
 154	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
 155
 156	err = mmc_wait_for_cmd(host, &cmd, 0);
 157	mmc_delay(1);
 158
 159	return err;
 160}
 161
 162int mmc_go_idle(struct mmc_host *host)
 163{
 164	int err;
 
 165
 166	/*
 167	 * Non-SPI hosts need to prevent chipselect going active during
 168	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
 169	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
 170	 *
 171	 * SPI hosts ignore ios.chip_select; it's managed according to
 172	 * rules that must accommodate non-MMC slaves which this layer
 173	 * won't even know about.
 174	 */
 175	if (!mmc_host_is_spi(host)) {
 176		mmc_set_chip_select(host, MMC_CS_HIGH);
 177		mmc_delay(1);
 178	}
 179
 180	err = __mmc_go_idle(host);
 
 
 
 
 
 
 181
 182	if (!mmc_host_is_spi(host)) {
 183		mmc_set_chip_select(host, MMC_CS_DONTCARE);
 184		mmc_delay(1);
 185	}
 186
 187	host->use_spi_crc = 0;
 188
 189	return err;
 190}
 191
 192static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
 193{
 194	struct mmc_op_cond_busy_data *data = cb_data;
 195	struct mmc_host *host = data->host;
 196	struct mmc_command *cmd = data->cmd;
 197	u32 ocr = data->ocr;
 198	int err = 0;
 
 
 
 199
 200	err = mmc_wait_for_cmd(host, cmd, 0);
 201	if (err)
 202		return err;
 
 203
 204	if (mmc_host_is_spi(host)) {
 205		if (!(cmd->resp[0] & R1_SPI_IDLE)) {
 206			*busy = false;
 207			return 0;
 208		}
 209	} else {
 210		if (cmd->resp[0] & MMC_CARD_BUSY) {
 211			*busy = false;
 212			return 0;
 
 
 213		}
 
 
 
 
 214	}
 215
 216	*busy = true;
 
 217
 218	/*
 219	 * According to eMMC specification v5.1 section 6.4.3, we
 220	 * should issue CMD1 repeatedly in the idle state until
 221	 * the eMMC is ready. Otherwise some eMMC devices seem to enter
 222	 * the inactive mode after mmc_init_card() issued CMD0 when
 223	 * the eMMC device is busy.
 224	 */
 225	if (!ocr && !mmc_host_is_spi(host))
 226		cmd->arg = cmd->resp[0] | BIT(30);
 227
 228	return 0;
 229}
 230
 231int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
 232{
 233	struct mmc_command cmd = {};
 234	int err = 0;
 235	struct mmc_op_cond_busy_data cb_data = {
 236		.host = host,
 237		.ocr = ocr,
 238		.cmd = &cmd
 239	};
 240
 241	cmd.opcode = MMC_SEND_OP_COND;
 242	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
 243	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
 244
 245	err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
 246				  MMC_OP_COND_TIMEOUT_MS,
 247				  &__mmc_send_op_cond_cb, &cb_data);
 248	if (err)
 249		return err;
 250
 251	if (rocr && !mmc_host_is_spi(host))
 252		*rocr = cmd.resp[0];
 253
 254	return err;
 255}
 256
 257int mmc_set_relative_addr(struct mmc_card *card)
 258{
 259	struct mmc_command cmd = {};
 
 
 
 260
 261	cmd.opcode = MMC_SET_RELATIVE_ADDR;
 262	cmd.arg = card->rca << 16;
 263	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 264
 265	return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 266}
 267
 268static int
 269mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
 270{
 271	int err;
 272	struct mmc_command cmd = {};
 
 
 
 273
 274	cmd.opcode = opcode;
 275	cmd.arg = arg;
 276	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
 277
 278	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 279	if (err)
 280		return err;
 281
 282	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
 283
 284	return 0;
 285}
 286
 287/*
 288 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 289 * buffer or on-stack buffer (with some overhead in callee).
 290 */
 291int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
 292		       u32 args, void *buf, unsigned len)
 
 293{
 294	struct mmc_request mrq = {};
 295	struct mmc_command cmd = {};
 296	struct mmc_data data = {};
 297	struct scatterlist sg;
 298
 299	mrq.cmd = &cmd;
 300	mrq.data = &data;
 301
 302	cmd.opcode = opcode;
 303	cmd.arg = args;
 304
 305	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 306	 * rely on callers to never use this with "native" calls for reading
 307	 * CSD or CID.  Native versions of those commands use the R2 type,
 308	 * not R1 plus a data block.
 309	 */
 310	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 311
 312	data.blksz = len;
 313	data.blocks = 1;
 314	data.flags = MMC_DATA_READ;
 315	data.sg = &sg;
 316	data.sg_len = 1;
 317
 318	sg_init_one(&sg, buf, len);
 319
 320	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
 321		/*
 322		 * The spec states that CSR and CID accesses have a timeout
 323		 * of 64 clock cycles.
 324		 */
 325		data.timeout_ns = 0;
 326		data.timeout_clks = 64;
 327	} else
 328		mmc_set_data_timeout(&data, card);
 329
 330	mmc_wait_for_req(host, &mrq);
 331
 332	if (cmd.error)
 333		return cmd.error;
 334	if (data.error)
 335		return data.error;
 336
 337	return 0;
 338}
 339
 340static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
 341{
 342	int ret, i;
 343	__be32 *cxd_tmp;
 
 
 
 
 344
 345	cxd_tmp = kzalloc(16, GFP_KERNEL);
 346	if (!cxd_tmp)
 347		return -ENOMEM;
 348
 349	ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
 350	if (ret)
 351		goto err;
 352
 353	for (i = 0; i < 4; i++)
 354		cxd[i] = be32_to_cpu(cxd_tmp[i]);
 355
 356err:
 357	kfree(cxd_tmp);
 358	return ret;
 359}
 360
 361int mmc_send_csd(struct mmc_card *card, u32 *csd)
 362{
 363	if (mmc_host_is_spi(card->host))
 364		return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
 
 
 
 
 
 
 
 365
 366	return mmc_send_cxd_native(card->host, card->rca << 16,	csd,
 367				MMC_SEND_CSD);
 368}
 
 
 
 
 369
 370int mmc_send_cid(struct mmc_host *host, u32 *cid)
 371{
 372	if (mmc_host_is_spi(host))
 373		return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
 374
 375	return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
 
 
 376}
 377
 378int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
 379{
 380	int err;
 381	u8 *ext_csd;
 382
 383	if (!card || !new_ext_csd)
 384		return -EINVAL;
 385
 386	if (!mmc_can_ext_csd(card))
 387		return -EOPNOTSUPP;
 388
 389	/*
 390	 * As the ext_csd is so large and mostly unused, we don't store the
 391	 * raw block in mmc_card.
 392	 */
 393	ext_csd = kzalloc(512, GFP_KERNEL);
 394	if (!ext_csd)
 395		return -ENOMEM;
 396
 397	err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
 398				512);
 399	if (err)
 400		kfree(ext_csd);
 401	else
 402		*new_ext_csd = ext_csd;
 403
 404	return err;
 405}
 406EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
 407
 408int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
 409{
 410	struct mmc_command cmd = {};
 411	int err;
 412
 413	cmd.opcode = MMC_SPI_READ_OCR;
 414	cmd.arg = highcap ? (1 << 30) : 0;
 415	cmd.flags = MMC_RSP_SPI_R3;
 416
 417	err = mmc_wait_for_cmd(host, &cmd, 0);
 418
 419	*ocrp = cmd.resp[1];
 420	return err;
 421}
 422
 423int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
 424{
 425	struct mmc_command cmd = {};
 426	int err;
 427
 428	cmd.opcode = MMC_SPI_CRC_ON_OFF;
 429	cmd.flags = MMC_RSP_SPI_R1;
 430	cmd.arg = use_crc;
 431
 432	err = mmc_wait_for_cmd(host, &cmd, 0);
 433	if (!err)
 434		host->use_spi_crc = use_crc;
 435	return err;
 436}
 437
 438static int mmc_switch_status_error(struct mmc_host *host, u32 status)
 439{
 440	if (mmc_host_is_spi(host)) {
 441		if (status & R1_SPI_ILLEGAL_COMMAND)
 442			return -EBADMSG;
 443	} else {
 444		if (R1_STATUS(status))
 445			pr_warn("%s: unexpected status %#x after switch\n",
 446				mmc_hostname(host), status);
 447		if (status & R1_SWITCH_ERROR)
 448			return -EBADMSG;
 449	}
 450	return 0;
 451}
 452
 453/* Caller must hold re-tuning */
 454int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
 455{
 456	u32 status;
 457	int err;
 458
 459	err = mmc_send_status(card, &status);
 460	if (!crc_err_fatal && err == -EILSEQ)
 461		return 0;
 462	if (err)
 463		return err;
 464
 465	return mmc_switch_status_error(card->host, status);
 466}
 467
 468static int mmc_busy_cb(void *cb_data, bool *busy)
 469{
 470	struct mmc_busy_data *data = cb_data;
 471	struct mmc_host *host = data->card->host;
 472	u32 status = 0;
 473	int err;
 474
 475	if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
 476		*busy = host->ops->card_busy(host);
 477		return 0;
 478	}
 479
 480	err = mmc_send_status(data->card, &status);
 481	if (data->retry_crc_err && err == -EILSEQ) {
 482		*busy = true;
 483		return 0;
 484	}
 485	if (err)
 486		return err;
 487
 488	switch (data->busy_cmd) {
 489	case MMC_BUSY_CMD6:
 490		err = mmc_switch_status_error(host, status);
 491		break;
 492	case MMC_BUSY_ERASE:
 493		err = R1_STATUS(status) ? -EIO : 0;
 494		break;
 495	case MMC_BUSY_HPI:
 496	case MMC_BUSY_EXTR_SINGLE:
 497	case MMC_BUSY_IO:
 498		break;
 499	default:
 500		err = -EINVAL;
 501	}
 502
 503	if (err)
 504		return err;
 505
 506	*busy = !mmc_ready_for_data(status);
 507	return 0;
 508}
 509
 510int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
 511			unsigned int timeout_ms,
 512			int (*busy_cb)(void *cb_data, bool *busy),
 513			void *cb_data)
 514{
 515	int err;
 516	unsigned long timeout;
 517	unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
 518	bool expired = false;
 519	bool busy = false;
 520
 521	timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
 522	do {
 523		/*
 524		 * Due to the possibility of being preempted while polling,
 525		 * check the expiration time first.
 526		 */
 527		expired = time_after(jiffies, timeout);
 528
 529		err = (*busy_cb)(cb_data, &busy);
 530		if (err)
 531			return err;
 532
 533		/* Timeout if the device still remains busy. */
 534		if (expired && busy) {
 535			pr_err("%s: Card stuck being busy! %s\n",
 536				mmc_hostname(host), __func__);
 537			return -ETIMEDOUT;
 538		}
 539
 540		/* Throttle the polling rate to avoid hogging the CPU. */
 541		if (busy) {
 542			usleep_range(udelay, udelay * 2);
 543			if (udelay < udelay_max)
 544				udelay *= 2;
 545		}
 546	} while (busy);
 547
 548	return 0;
 549}
 550EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
 551
 552int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 553		      bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
 554{
 555	struct mmc_host *host = card->host;
 556	struct mmc_busy_data cb_data;
 557
 558	cb_data.card = card;
 559	cb_data.retry_crc_err = retry_crc_err;
 560	cb_data.busy_cmd = busy_cmd;
 561
 562	return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
 563}
 564EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
 565
 566bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
 567			  unsigned int timeout_ms)
 568{
 569	/*
 570	 * If the max_busy_timeout of the host is specified, make sure it's
 571	 * enough to fit the used timeout_ms. In case it's not, let's instruct
 572	 * the host to avoid HW busy detection, by converting to a R1 response
 573	 * instead of a R1B. Note, some hosts requires R1B, which also means
 574	 * they are on their own when it comes to deal with the busy timeout.
 575	 */
 576	if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
 577	    (timeout_ms > host->max_busy_timeout)) {
 578		cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
 579		return false;
 580	}
 581
 582	cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
 583	cmd->busy_timeout = timeout_ms;
 584	return true;
 585}
 586EXPORT_SYMBOL_GPL(mmc_prepare_busy_cmd);
 587
 588/**
 589 *	__mmc_switch - modify EXT_CSD register
 590 *	@card: the MMC card associated with the data transfer
 591 *	@set: cmd set values
 592 *	@index: EXT_CSD register index
 593 *	@value: value to program into EXT_CSD register
 594 *	@timeout_ms: timeout (ms) for operation performed by register write,
 595 *                   timeout of zero implies maximum possible timeout
 596 *	@timing: new timing to change to
 597 *	@send_status: send status cmd to poll for busy
 598 *	@retry_crc_err: retry when CRC errors when polling with CMD13 for busy
 599 *	@retries: number of retries
 600 *
 601 *	Modifies the EXT_CSD register for selected card.
 602 */
 603int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 604		unsigned int timeout_ms, unsigned char timing,
 605		bool send_status, bool retry_crc_err, unsigned int retries)
 606{
 607	struct mmc_host *host = card->host;
 608	int err;
 609	struct mmc_command cmd = {};
 610	bool use_r1b_resp;
 611	unsigned char old_timing = host->ios.timing;
 
 
 612
 613	mmc_retune_hold(host);
 614
 615	if (!timeout_ms) {
 616		pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
 617			mmc_hostname(host));
 618		timeout_ms = card->ext_csd.generic_cmd6_time;
 619	}
 
 
 
 
 620
 621	cmd.opcode = MMC_SWITCH;
 622	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
 623		  (index << 16) |
 624		  (value << 8) |
 625		  set;
 626	use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
 
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628	err = mmc_wait_for_cmd(host, &cmd, retries);
 629	if (err)
 630		goto out;
 631
 632	/*If SPI or used HW busy detection above, then we don't need to poll. */
 633	if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
 634		mmc_host_is_spi(host))
 635		goto out_tim;
 636
 637	/*
 638	 * If the host doesn't support HW polling via the ->card_busy() ops and
 639	 * when it's not allowed to poll by using CMD13, then we need to rely on
 640	 * waiting the stated timeout to be sufficient.
 641	 */
 642	if (!send_status && !host->ops->card_busy) {
 643		mmc_delay(timeout_ms);
 644		goto out_tim;
 645	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646
 647	/* Let's try to poll to find out when the command is completed. */
 648	err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
 649	if (err)
 650		goto out;
 
 
 
 
 651
 652out_tim:
 653	/* Switch to new timing before check switch status. */
 654	if (timing)
 655		mmc_set_timing(host, timing);
 656
 657	if (send_status) {
 658		err = mmc_switch_status(card, true);
 659		if (err && timing)
 660			mmc_set_timing(host, old_timing);
 661	}
 662out:
 663	mmc_retune_release(host);
 664
 665	return err;
 666}
 667
 668int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 669		unsigned int timeout_ms)
 670{
 671	return __mmc_switch(card, set, index, value, timeout_ms, 0,
 672			    true, false, MMC_CMD_RETRIES);
 673}
 674EXPORT_SYMBOL_GPL(mmc_switch);
 675
 676int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
 677{
 678	struct mmc_request mrq = {};
 679	struct mmc_command cmd = {};
 680	struct mmc_data data = {};
 681	struct scatterlist sg;
 682	struct mmc_ios *ios = &host->ios;
 683	const u8 *tuning_block_pattern;
 684	int size, err = 0;
 685	u8 *data_buf;
 686
 687	if (ios->bus_width == MMC_BUS_WIDTH_8) {
 688		tuning_block_pattern = tuning_blk_pattern_8bit;
 689		size = sizeof(tuning_blk_pattern_8bit);
 690	} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
 691		tuning_block_pattern = tuning_blk_pattern_4bit;
 692		size = sizeof(tuning_blk_pattern_4bit);
 693	} else
 694		return -EINVAL;
 695
 696	data_buf = kzalloc(size, GFP_KERNEL);
 697	if (!data_buf)
 698		return -ENOMEM;
 699
 700	mrq.cmd = &cmd;
 701	mrq.data = &data;
 702
 703	cmd.opcode = opcode;
 704	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 705
 706	data.blksz = size;
 707	data.blocks = 1;
 708	data.flags = MMC_DATA_READ;
 709
 710	/*
 711	 * According to the tuning specs, Tuning process
 712	 * is normally shorter 40 executions of CMD19,
 713	 * and timeout value should be shorter than 150 ms
 714	 */
 715	data.timeout_ns = 150 * NSEC_PER_MSEC;
 716
 717	data.sg = &sg;
 718	data.sg_len = 1;
 719	sg_init_one(&sg, data_buf, size);
 720
 721	mmc_wait_for_req(host, &mrq);
 722
 723	if (cmd_error)
 724		*cmd_error = cmd.error;
 725
 726	if (cmd.error) {
 727		err = cmd.error;
 728		goto out;
 729	}
 730
 731	if (data.error) {
 732		err = data.error;
 733		goto out;
 734	}
 735
 736	if (memcmp(data_buf, tuning_block_pattern, size))
 737		err = -EIO;
 738
 739out:
 740	kfree(data_buf);
 741	return err;
 742}
 743EXPORT_SYMBOL_GPL(mmc_send_tuning);
 744
 745int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
 746{
 747	struct mmc_command cmd = {};
 748
 749	/*
 750	 * eMMC specification specifies that CMD12 can be used to stop a tuning
 751	 * command, but SD specification does not, so do nothing unless it is
 752	 * eMMC.
 753	 */
 754	if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
 755		return 0;
 756
 757	cmd.opcode = MMC_STOP_TRANSMISSION;
 758	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
 759
 760	/*
 761	 * For drivers that override R1 to R1b, set an arbitrary timeout based
 762	 * on the tuning timeout i.e. 150ms.
 763	 */
 764	cmd.busy_timeout = 150;
 765
 766	return mmc_wait_for_cmd(host, &cmd, 0);
 767}
 768EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
 769
 770static int
 771mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 772		  u8 len)
 773{
 774	struct mmc_request mrq = {};
 775	struct mmc_command cmd = {};
 776	struct mmc_data data = {};
 777	struct scatterlist sg;
 778	u8 *data_buf;
 779	u8 *test_buf;
 780	int i, err;
 781	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
 782	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
 783
 784	/* dma onto stack is unsafe/nonportable, but callers to this
 785	 * routine normally provide temporary on-stack buffers ...
 786	 */
 787	data_buf = kmalloc(len, GFP_KERNEL);
 788	if (!data_buf)
 789		return -ENOMEM;
 790
 791	if (len == 8)
 792		test_buf = testdata_8bit;
 793	else if (len == 4)
 794		test_buf = testdata_4bit;
 795	else {
 796		pr_err("%s: Invalid bus_width %d\n",
 797		       mmc_hostname(host), len);
 798		kfree(data_buf);
 799		return -EINVAL;
 800	}
 801
 802	if (opcode == MMC_BUS_TEST_W)
 803		memcpy(data_buf, test_buf, len);
 804
 805	mrq.cmd = &cmd;
 806	mrq.data = &data;
 807	cmd.opcode = opcode;
 808	cmd.arg = 0;
 809
 810	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
 811	 * rely on callers to never use this with "native" calls for reading
 812	 * CSD or CID.  Native versions of those commands use the R2 type,
 813	 * not R1 plus a data block.
 814	 */
 815	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
 816
 817	data.blksz = len;
 818	data.blocks = 1;
 819	if (opcode == MMC_BUS_TEST_R)
 820		data.flags = MMC_DATA_READ;
 821	else
 822		data.flags = MMC_DATA_WRITE;
 823
 824	data.sg = &sg;
 825	data.sg_len = 1;
 826	mmc_set_data_timeout(&data, card);
 827	sg_init_one(&sg, data_buf, len);
 828	mmc_wait_for_req(host, &mrq);
 829	err = 0;
 830	if (opcode == MMC_BUS_TEST_R) {
 831		for (i = 0; i < len / 4; i++)
 832			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
 833				err = -EIO;
 834				break;
 835			}
 836	}
 837	kfree(data_buf);
 838
 839	if (cmd.error)
 840		return cmd.error;
 841	if (data.error)
 842		return data.error;
 843
 844	return err;
 845}
 846
 847int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 848{
 849	int width;
 850
 851	if (bus_width == MMC_BUS_WIDTH_8)
 852		width = 8;
 853	else if (bus_width == MMC_BUS_WIDTH_4)
 854		width = 4;
 855	else if (bus_width == MMC_BUS_WIDTH_1)
 856		return 0; /* no need for test */
 857	else
 858		return -EINVAL;
 859
 860	/*
 861	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
 862	 * is a problem.  This improves chances that the test will work.
 863	 */
 864	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
 865	return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 866}
 867
 868static int mmc_send_hpi_cmd(struct mmc_card *card)
 869{
 870	unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
 871	struct mmc_host *host = card->host;
 872	bool use_r1b_resp = false;
 873	struct mmc_command cmd = {};
 874	int err;
 875
 876	cmd.opcode = card->ext_csd.hpi_cmd;
 877	cmd.arg = card->rca << 16 | 1;
 878	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 879
 880	if (cmd.opcode == MMC_STOP_TRANSMISSION)
 881		use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
 882						    busy_timeout_ms);
 883
 884	err = mmc_wait_for_cmd(host, &cmd, 0);
 885	if (err) {
 886		pr_warn("%s: HPI error %d. Command response %#x\n",
 887			mmc_hostname(host), err, cmd.resp[0]);
 888		return err;
 889	}
 890
 891	/* No need to poll when using HW busy detection. */
 892	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
 893		return 0;
 
 
 894
 895	/* Let's poll to find out when the HPI request completes. */
 896	return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
 897}
 898
 899/**
 900 *	mmc_interrupt_hpi - Issue for High priority Interrupt
 901 *	@card: the MMC card associated with the HPI transfer
 902 *
 903 *	Issued High Priority Interrupt, and check for card status
 904 *	until out-of prg-state.
 905 */
 906static int mmc_interrupt_hpi(struct mmc_card *card)
 907{
 908	int err;
 909	u32 status;
 910
 911	if (!card->ext_csd.hpi_en) {
 912		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
 913		return 1;
 914	}
 915
 916	err = mmc_send_status(card, &status);
 917	if (err) {
 918		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
 919		goto out;
 
 
 920	}
 
 
 921
 922	switch (R1_CURRENT_STATE(status)) {
 923	case R1_STATE_IDLE:
 924	case R1_STATE_READY:
 925	case R1_STATE_STBY:
 926	case R1_STATE_TRAN:
 927		/*
 928		 * In idle and transfer states, HPI is not needed and the caller
 929		 * can issue the next intended command immediately
 930		 */
 931		goto out;
 932	case R1_STATE_PRG:
 933		break;
 934	default:
 935		/* In all other states, it's illegal to issue HPI */
 936		pr_debug("%s: HPI cannot be sent. Card state=%d\n",
 937			mmc_hostname(card->host), R1_CURRENT_STATE(status));
 938		err = -EINVAL;
 939		goto out;
 940	}
 941
 942	err = mmc_send_hpi_cmd(card);
 943out:
 944	return err;
 945}
 946
 947int mmc_can_ext_csd(struct mmc_card *card)
 948{
 949	return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
 950}
 951
 952static int mmc_read_bkops_status(struct mmc_card *card)
 953{
 954	int err;
 955	u8 *ext_csd;
 956
 957	err = mmc_get_ext_csd(card, &ext_csd);
 958	if (err)
 959		return err;
 960
 961	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
 962	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
 963	kfree(ext_csd);
 964	return 0;
 965}
 966
 967/**
 968 *	mmc_run_bkops - Run BKOPS for supported cards
 969 *	@card: MMC card to run BKOPS for
 970 *
 971 *	Run background operations synchronously for cards having manual BKOPS
 972 *	enabled and in case it reports urgent BKOPS level.
 973*/
 974void mmc_run_bkops(struct mmc_card *card)
 975{
 976	int err;
 977
 978	if (!card->ext_csd.man_bkops_en)
 979		return;
 980
 981	err = mmc_read_bkops_status(card);
 982	if (err) {
 983		pr_err("%s: Failed to read bkops status: %d\n",
 984		       mmc_hostname(card->host), err);
 985		return;
 986	}
 987
 988	if (!card->ext_csd.raw_bkops_status ||
 989	    card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
 990		return;
 991
 992	mmc_retune_hold(card->host);
 993
 994	/*
 995	 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
 996	 * synchronously. Future wise, we may consider to start BKOPS, for less
 997	 * urgent levels by using an asynchronous background task, when idle.
 998	 */
 999	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1000			 EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
1001	/*
1002	 * If the BKOPS timed out, the card is probably still busy in the
1003	 * R1_STATE_PRG. Rather than continue to wait, let's try to abort
1004	 * it with a HPI command to get back into R1_STATE_TRAN.
1005	 */
1006	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1007		pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
1008	else if (err)
1009		pr_warn("%s: Error %d running bkops\n",
1010			mmc_hostname(card->host), err);
1011
1012	mmc_retune_release(card->host);
1013}
1014EXPORT_SYMBOL(mmc_run_bkops);
1015
1016static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1017{
1018	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1019	int err;
1020
1021	if (!card->ext_csd.cmdq_support)
1022		return -EOPNOTSUPP;
1023
1024	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1025			 val, card->ext_csd.generic_cmd6_time);
1026	if (!err)
1027		card->ext_csd.cmdq_en = enable;
1028
1029	return err;
1030}
1031
1032int mmc_cmdq_enable(struct mmc_card *card)
1033{
1034	return mmc_cmdq_switch(card, true);
1035}
1036EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1037
1038int mmc_cmdq_disable(struct mmc_card *card)
1039{
1040	return mmc_cmdq_switch(card, false);
1041}
1042EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1043
1044int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1045{
1046	struct mmc_host *host = card->host;
1047	int err;
1048
1049	if (!mmc_can_sanitize(card)) {
1050		pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1051		return -EOPNOTSUPP;
1052	}
1053
1054	if (!timeout_ms)
1055		timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1056
1057	pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1058
1059	mmc_retune_hold(host);
1060
1061	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1062			   1, timeout_ms, 0, true, false, 0);
1063	if (err)
1064		pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1065
1066	/*
1067	 * If the sanitize operation timed out, the card is probably still busy
1068	 * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1069	 * it with a HPI command to get back into R1_STATE_TRAN.
1070	 */
1071	if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1072		pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1073
1074	mmc_retune_release(host);
1075
1076	pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1077	return err;
1078}
1079EXPORT_SYMBOL_GPL(mmc_sanitize);