Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 
 13#include <linux/types.h>
 14#include <linux/scatterlist.h>
 15
 16#include <linux/mmc/host.h>
 17#include <linux/mmc/card.h>
 18#include <linux/mmc/mmc.h>
 19
 20#include "core.h"
 21#include "mmc_ops.h"
 22
 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 24{
 25	int err;
 26	struct mmc_command cmd = {0};
 27
 28	BUG_ON(!host);
 29
 30	cmd.opcode = MMC_SELECT_CARD;
 31
 32	if (card) {
 33		cmd.arg = card->rca << 16;
 34		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 35	} else {
 36		cmd.arg = 0;
 37		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 38	}
 39
 40	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 41	if (err)
 42		return err;
 43
 44	return 0;
 45}
 46
 47int mmc_select_card(struct mmc_card *card)
 48{
 49	BUG_ON(!card);
 50
 51	return _mmc_select_card(card->host, card);
 52}
 53
 54int mmc_deselect_cards(struct mmc_host *host)
 55{
 56	return _mmc_select_card(host, NULL);
 57}
 58
 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 60{
 61	struct mmc_command cmd = {0};
 62	struct mmc_card *card = host->card;
 63	int err;
 64
 65	if (sleep)
 66		mmc_deselect_cards(host);
 67
 68	cmd.opcode = MMC_SLEEP_AWAKE;
 69	cmd.arg = card->rca << 16;
 70	if (sleep)
 71		cmd.arg |= 1 << 15;
 72
 73	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 74	err = mmc_wait_for_cmd(host, &cmd, 0);
 75	if (err)
 76		return err;
 77
 78	/*
 79	 * If the host does not wait while the card signals busy, then we will
 80	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
 81	 * SEND_STATUS command to poll the status because that command (and most
 82	 * others) is invalid while the card sleeps.
 83	 */
 84	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 85		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
 86
 87	if (!sleep)
 88		err = mmc_select_card(card);
 89
 90	return err;
 91}
 92
 93int mmc_go_idle(struct mmc_host *host)
 94{
 95	int err;
 96	struct mmc_command cmd = {0};
 97
 98	/*
 99	 * Non-SPI hosts need to prevent chipselect going active during
100	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
101	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
102	 *
103	 * SPI hosts ignore ios.chip_select; it's managed according to
104	 * rules that must accommodate non-MMC slaves which this layer
105	 * won't even know about.
106	 */
107	if (!mmc_host_is_spi(host)) {
108		mmc_set_chip_select(host, MMC_CS_HIGH);
109		mmc_delay(1);
110	}
111
112	cmd.opcode = MMC_GO_IDLE_STATE;
113	cmd.arg = 0;
114	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
115
116	err = mmc_wait_for_cmd(host, &cmd, 0);
117
118	mmc_delay(1);
119
120	if (!mmc_host_is_spi(host)) {
121		mmc_set_chip_select(host, MMC_CS_DONTCARE);
122		mmc_delay(1);
123	}
124
125	host->use_spi_crc = 0;
126
127	return err;
128}
129
130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
131{
132	struct mmc_command cmd = {0};
133	int i, err = 0;
134
135	BUG_ON(!host);
136
137	cmd.opcode = MMC_SEND_OP_COND;
138	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
139	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
140
141	for (i = 100; i; i--) {
142		err = mmc_wait_for_cmd(host, &cmd, 0);
143		if (err)
144			break;
145
146		/* if we're just probing, do a single pass */
147		if (ocr == 0)
148			break;
149
150		/* otherwise wait until reset completes */
151		if (mmc_host_is_spi(host)) {
152			if (!(cmd.resp[0] & R1_SPI_IDLE))
153				break;
154		} else {
155			if (cmd.resp[0] & MMC_CARD_BUSY)
156				break;
157		}
158
159		err = -ETIMEDOUT;
160
161		mmc_delay(10);
162	}
163
164	if (rocr && !mmc_host_is_spi(host))
165		*rocr = cmd.resp[0];
166
167	return err;
168}
169
170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
171{
172	int err;
173	struct mmc_command cmd = {0};
174
175	BUG_ON(!host);
176	BUG_ON(!cid);
177
178	cmd.opcode = MMC_ALL_SEND_CID;
179	cmd.arg = 0;
180	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
181
182	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
183	if (err)
184		return err;
185
186	memcpy(cid, cmd.resp, sizeof(u32) * 4);
187
188	return 0;
189}
190
191int mmc_set_relative_addr(struct mmc_card *card)
192{
193	int err;
194	struct mmc_command cmd = {0};
195
196	BUG_ON(!card);
197	BUG_ON(!card->host);
198
199	cmd.opcode = MMC_SET_RELATIVE_ADDR;
200	cmd.arg = card->rca << 16;
201	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
202
203	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
204	if (err)
205		return err;
206
207	return 0;
208}
209
210static int
211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
212{
213	int err;
214	struct mmc_command cmd = {0};
215
216	BUG_ON(!host);
217	BUG_ON(!cxd);
218
219	cmd.opcode = opcode;
220	cmd.arg = arg;
221	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
222
223	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
224	if (err)
225		return err;
226
227	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
228
229	return 0;
230}
231
232static int
233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
234		u32 opcode, void *buf, unsigned len)
235{
236	struct mmc_request mrq = {0};
237	struct mmc_command cmd = {0};
238	struct mmc_data data = {0};
239	struct scatterlist sg;
240	void *data_buf;
241
242	/* dma onto stack is unsafe/nonportable, but callers to this
243	 * routine normally provide temporary on-stack buffers ...
244	 */
245	data_buf = kmalloc(len, GFP_KERNEL);
246	if (data_buf == NULL)
247		return -ENOMEM;
248
249	mrq.cmd = &cmd;
250	mrq.data = &data;
251
252	cmd.opcode = opcode;
253	cmd.arg = 0;
254
255	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
256	 * rely on callers to never use this with "native" calls for reading
257	 * CSD or CID.  Native versions of those commands use the R2 type,
258	 * not R1 plus a data block.
259	 */
260	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
261
262	data.blksz = len;
263	data.blocks = 1;
264	data.flags = MMC_DATA_READ;
265	data.sg = &sg;
266	data.sg_len = 1;
267
268	sg_init_one(&sg, data_buf, len);
269
270	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
271		/*
272		 * The spec states that CSR and CID accesses have a timeout
273		 * of 64 clock cycles.
274		 */
275		data.timeout_ns = 0;
276		data.timeout_clks = 64;
277	} else
278		mmc_set_data_timeout(&data, card);
279
280	mmc_wait_for_req(host, &mrq);
281
282	memcpy(buf, data_buf, len);
283	kfree(data_buf);
284
285	if (cmd.error)
286		return cmd.error;
287	if (data.error)
288		return data.error;
289
290	return 0;
291}
292
293int mmc_send_csd(struct mmc_card *card, u32 *csd)
294{
295	int ret, i;
296
297	if (!mmc_host_is_spi(card->host))
298		return mmc_send_cxd_native(card->host, card->rca << 16,
299				csd, MMC_SEND_CSD);
300
301	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
302	if (ret)
303		return ret;
304
305	for (i = 0;i < 4;i++)
306		csd[i] = be32_to_cpu(csd[i]);
307
308	return 0;
309}
310
311int mmc_send_cid(struct mmc_host *host, u32 *cid)
312{
313	int ret, i;
314
315	if (!mmc_host_is_spi(host)) {
316		if (!host->card)
317			return -EINVAL;
318		return mmc_send_cxd_native(host, host->card->rca << 16,
319				cid, MMC_SEND_CID);
320	}
321
322	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
323	if (ret)
324		return ret;
325
326	for (i = 0;i < 4;i++)
327		cid[i] = be32_to_cpu(cid[i]);
328
329	return 0;
330}
331
332int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
333{
334	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
335			ext_csd, 512);
336}
337
338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
339{
340	struct mmc_command cmd = {0};
341	int err;
342
343	cmd.opcode = MMC_SPI_READ_OCR;
344	cmd.arg = highcap ? (1 << 30) : 0;
345	cmd.flags = MMC_RSP_SPI_R3;
346
347	err = mmc_wait_for_cmd(host, &cmd, 0);
348
349	*ocrp = cmd.resp[1];
350	return err;
351}
352
353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
354{
355	struct mmc_command cmd = {0};
356	int err;
357
358	cmd.opcode = MMC_SPI_CRC_ON_OFF;
359	cmd.flags = MMC_RSP_SPI_R1;
360	cmd.arg = use_crc;
361
362	err = mmc_wait_for_cmd(host, &cmd, 0);
363	if (!err)
364		host->use_spi_crc = use_crc;
365	return err;
366}
367
368/**
369 *	mmc_switch - modify EXT_CSD register
370 *	@card: the MMC card associated with the data transfer
371 *	@set: cmd set values
372 *	@index: EXT_CSD register index
373 *	@value: value to program into EXT_CSD register
374 *	@timeout_ms: timeout (ms) for operation performed by register write,
375 *                   timeout of zero implies maximum possible timeout
376 *
377 *	Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380	       unsigned int timeout_ms)
381{
382	int err;
383	struct mmc_command cmd = {0};
384	u32 status;
385
386	BUG_ON(!card);
387	BUG_ON(!card->host);
388
389	cmd.opcode = MMC_SWITCH;
390	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
391		  (index << 16) |
392		  (value << 8) |
393		  set;
394	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395	cmd.cmd_timeout_ms = timeout_ms;
396
397	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
398	if (err)
399		return err;
400
401	/* Must check status to be sure of no errors */
402	do {
403		err = mmc_send_status(card, &status);
404		if (err)
405			return err;
406		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
407			break;
408		if (mmc_host_is_spi(card->host))
409			break;
410	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
411
412	if (mmc_host_is_spi(card->host)) {
413		if (status & R1_SPI_ILLEGAL_COMMAND)
414			return -EBADMSG;
415	} else {
416		if (status & 0xFDFFA000)
417			printk(KERN_WARNING "%s: unexpected status %#x after "
418			       "switch", mmc_hostname(card->host), status);
419		if (status & R1_SWITCH_ERROR)
420			return -EBADMSG;
421	}
422
423	return 0;
424}
425EXPORT_SYMBOL_GPL(mmc_switch);
426
427int mmc_send_status(struct mmc_card *card, u32 *status)
428{
429	int err;
430	struct mmc_command cmd = {0};
431
432	BUG_ON(!card);
433	BUG_ON(!card->host);
434
435	cmd.opcode = MMC_SEND_STATUS;
436	if (!mmc_host_is_spi(card->host))
437		cmd.arg = card->rca << 16;
438	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
439
440	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
441	if (err)
442		return err;
443
444	/* NOTE: callers are required to understand the difference
445	 * between "native" and SPI format status words!
446	 */
447	if (status)
448		*status = cmd.resp[0];
449
450	return 0;
451}
452
453static int
454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
455		  u8 len)
456{
457	struct mmc_request mrq = {0};
458	struct mmc_command cmd = {0};
459	struct mmc_data data = {0};
460	struct scatterlist sg;
461	u8 *data_buf;
462	u8 *test_buf;
463	int i, err;
464	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
465	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
466
467	/* dma onto stack is unsafe/nonportable, but callers to this
468	 * routine normally provide temporary on-stack buffers ...
469	 */
470	data_buf = kmalloc(len, GFP_KERNEL);
471	if (!data_buf)
472		return -ENOMEM;
473
474	if (len == 8)
475		test_buf = testdata_8bit;
476	else if (len == 4)
477		test_buf = testdata_4bit;
478	else {
479		printk(KERN_ERR "%s: Invalid bus_width %d\n",
480		       mmc_hostname(host), len);
481		kfree(data_buf);
482		return -EINVAL;
483	}
484
485	if (opcode == MMC_BUS_TEST_W)
486		memcpy(data_buf, test_buf, len);
487
488	mrq.cmd = &cmd;
489	mrq.data = &data;
490	cmd.opcode = opcode;
491	cmd.arg = 0;
492
493	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
494	 * rely on callers to never use this with "native" calls for reading
495	 * CSD or CID.  Native versions of those commands use the R2 type,
496	 * not R1 plus a data block.
497	 */
498	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
499
500	data.blksz = len;
501	data.blocks = 1;
502	if (opcode == MMC_BUS_TEST_R)
503		data.flags = MMC_DATA_READ;
504	else
505		data.flags = MMC_DATA_WRITE;
506
507	data.sg = &sg;
508	data.sg_len = 1;
509	sg_init_one(&sg, data_buf, len);
510	mmc_wait_for_req(host, &mrq);
511	err = 0;
512	if (opcode == MMC_BUS_TEST_R) {
513		for (i = 0; i < len / 4; i++)
514			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
515				err = -EIO;
516				break;
517			}
518	}
519	kfree(data_buf);
520
521	if (cmd.error)
522		return cmd.error;
523	if (data.error)
524		return data.error;
525
526	return err;
527}
528
529int mmc_bus_test(struct mmc_card *card, u8 bus_width)
530{
531	int err, width;
532
533	if (bus_width == MMC_BUS_WIDTH_8)
534		width = 8;
535	else if (bus_width == MMC_BUS_WIDTH_4)
536		width = 4;
537	else if (bus_width == MMC_BUS_WIDTH_1)
538		return 0; /* no need for test */
539	else
540		return -EINVAL;
541
542	/*
543	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
544	 * is a problem.  This improves chances that the test will work.
545	 */
546	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
547	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
548	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549}
v3.5.6
  1/*
  2 *  linux/drivers/mmc/core/mmc_ops.h
  3 *
  4 *  Copyright 2006-2007 Pierre Ossman
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or (at
  9 * your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/export.h>
 14#include <linux/types.h>
 15#include <linux/scatterlist.h>
 16
 17#include <linux/mmc/host.h>
 18#include <linux/mmc/card.h>
 19#include <linux/mmc/mmc.h>
 20
 21#include "core.h"
 22#include "mmc_ops.h"
 23
 24static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 25{
 26	int err;
 27	struct mmc_command cmd = {0};
 28
 29	BUG_ON(!host);
 30
 31	cmd.opcode = MMC_SELECT_CARD;
 32
 33	if (card) {
 34		cmd.arg = card->rca << 16;
 35		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 36	} else {
 37		cmd.arg = 0;
 38		cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
 39	}
 40
 41	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 42	if (err)
 43		return err;
 44
 45	return 0;
 46}
 47
 48int mmc_select_card(struct mmc_card *card)
 49{
 50	BUG_ON(!card);
 51
 52	return _mmc_select_card(card->host, card);
 53}
 54
 55int mmc_deselect_cards(struct mmc_host *host)
 56{
 57	return _mmc_select_card(host, NULL);
 58}
 59
 60int mmc_card_sleepawake(struct mmc_host *host, int sleep)
 61{
 62	struct mmc_command cmd = {0};
 63	struct mmc_card *card = host->card;
 64	int err;
 65
 66	if (sleep)
 67		mmc_deselect_cards(host);
 68
 69	cmd.opcode = MMC_SLEEP_AWAKE;
 70	cmd.arg = card->rca << 16;
 71	if (sleep)
 72		cmd.arg |= 1 << 15;
 73
 74	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
 75	err = mmc_wait_for_cmd(host, &cmd, 0);
 76	if (err)
 77		return err;
 78
 79	/*
 80	 * If the host does not wait while the card signals busy, then we will
 81	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
 82	 * SEND_STATUS command to poll the status because that command (and most
 83	 * others) is invalid while the card sleeps.
 84	 */
 85	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 86		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
 87
 88	if (!sleep)
 89		err = mmc_select_card(card);
 90
 91	return err;
 92}
 93
 94int mmc_go_idle(struct mmc_host *host)
 95{
 96	int err;
 97	struct mmc_command cmd = {0};
 98
 99	/*
100	 * Non-SPI hosts need to prevent chipselect going active during
101	 * GO_IDLE; that would put chips into SPI mode.  Remind them of
102	 * that in case of hardware that won't pull up DAT3/nCS otherwise.
103	 *
104	 * SPI hosts ignore ios.chip_select; it's managed according to
105	 * rules that must accommodate non-MMC slaves which this layer
106	 * won't even know about.
107	 */
108	if (!mmc_host_is_spi(host)) {
109		mmc_set_chip_select(host, MMC_CS_HIGH);
110		mmc_delay(1);
111	}
112
113	cmd.opcode = MMC_GO_IDLE_STATE;
114	cmd.arg = 0;
115	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
116
117	err = mmc_wait_for_cmd(host, &cmd, 0);
118
119	mmc_delay(1);
120
121	if (!mmc_host_is_spi(host)) {
122		mmc_set_chip_select(host, MMC_CS_DONTCARE);
123		mmc_delay(1);
124	}
125
126	host->use_spi_crc = 0;
127
128	return err;
129}
130
131int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
132{
133	struct mmc_command cmd = {0};
134	int i, err = 0;
135
136	BUG_ON(!host);
137
138	cmd.opcode = MMC_SEND_OP_COND;
139	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
140	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
141
142	for (i = 100; i; i--) {
143		err = mmc_wait_for_cmd(host, &cmd, 0);
144		if (err)
145			break;
146
147		/* if we're just probing, do a single pass */
148		if (ocr == 0)
149			break;
150
151		/* otherwise wait until reset completes */
152		if (mmc_host_is_spi(host)) {
153			if (!(cmd.resp[0] & R1_SPI_IDLE))
154				break;
155		} else {
156			if (cmd.resp[0] & MMC_CARD_BUSY)
157				break;
158		}
159
160		err = -ETIMEDOUT;
161
162		mmc_delay(10);
163	}
164
165	if (rocr && !mmc_host_is_spi(host))
166		*rocr = cmd.resp[0];
167
168	return err;
169}
170
171int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
172{
173	int err;
174	struct mmc_command cmd = {0};
175
176	BUG_ON(!host);
177	BUG_ON(!cid);
178
179	cmd.opcode = MMC_ALL_SEND_CID;
180	cmd.arg = 0;
181	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
182
183	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
184	if (err)
185		return err;
186
187	memcpy(cid, cmd.resp, sizeof(u32) * 4);
188
189	return 0;
190}
191
192int mmc_set_relative_addr(struct mmc_card *card)
193{
194	int err;
195	struct mmc_command cmd = {0};
196
197	BUG_ON(!card);
198	BUG_ON(!card->host);
199
200	cmd.opcode = MMC_SET_RELATIVE_ADDR;
201	cmd.arg = card->rca << 16;
202	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
203
204	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
205	if (err)
206		return err;
207
208	return 0;
209}
210
211static int
212mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
213{
214	int err;
215	struct mmc_command cmd = {0};
216
217	BUG_ON(!host);
218	BUG_ON(!cxd);
219
220	cmd.opcode = opcode;
221	cmd.arg = arg;
222	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
223
224	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
225	if (err)
226		return err;
227
228	memcpy(cxd, cmd.resp, sizeof(u32) * 4);
229
230	return 0;
231}
232
233static int
234mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
235		u32 opcode, void *buf, unsigned len)
236{
237	struct mmc_request mrq = {NULL};
238	struct mmc_command cmd = {0};
239	struct mmc_data data = {0};
240	struct scatterlist sg;
241	void *data_buf;
242
243	/* dma onto stack is unsafe/nonportable, but callers to this
244	 * routine normally provide temporary on-stack buffers ...
245	 */
246	data_buf = kmalloc(len, GFP_KERNEL);
247	if (data_buf == NULL)
248		return -ENOMEM;
249
250	mrq.cmd = &cmd;
251	mrq.data = &data;
252
253	cmd.opcode = opcode;
254	cmd.arg = 0;
255
256	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
257	 * rely on callers to never use this with "native" calls for reading
258	 * CSD or CID.  Native versions of those commands use the R2 type,
259	 * not R1 plus a data block.
260	 */
261	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
262
263	data.blksz = len;
264	data.blocks = 1;
265	data.flags = MMC_DATA_READ;
266	data.sg = &sg;
267	data.sg_len = 1;
268
269	sg_init_one(&sg, data_buf, len);
270
271	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
272		/*
273		 * The spec states that CSR and CID accesses have a timeout
274		 * of 64 clock cycles.
275		 */
276		data.timeout_ns = 0;
277		data.timeout_clks = 64;
278	} else
279		mmc_set_data_timeout(&data, card);
280
281	mmc_wait_for_req(host, &mrq);
282
283	memcpy(buf, data_buf, len);
284	kfree(data_buf);
285
286	if (cmd.error)
287		return cmd.error;
288	if (data.error)
289		return data.error;
290
291	return 0;
292}
293
294int mmc_send_csd(struct mmc_card *card, u32 *csd)
295{
296	int ret, i;
297
298	if (!mmc_host_is_spi(card->host))
299		return mmc_send_cxd_native(card->host, card->rca << 16,
300				csd, MMC_SEND_CSD);
301
302	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16);
303	if (ret)
304		return ret;
305
306	for (i = 0;i < 4;i++)
307		csd[i] = be32_to_cpu(csd[i]);
308
309	return 0;
310}
311
312int mmc_send_cid(struct mmc_host *host, u32 *cid)
313{
314	int ret, i;
315
316	if (!mmc_host_is_spi(host)) {
317		if (!host->card)
318			return -EINVAL;
319		return mmc_send_cxd_native(host, host->card->rca << 16,
320				cid, MMC_SEND_CID);
321	}
322
323	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16);
324	if (ret)
325		return ret;
326
327	for (i = 0;i < 4;i++)
328		cid[i] = be32_to_cpu(cid[i]);
329
330	return 0;
331}
332
333int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
334{
335	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
336			ext_csd, 512);
337}
338
339int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
340{
341	struct mmc_command cmd = {0};
342	int err;
343
344	cmd.opcode = MMC_SPI_READ_OCR;
345	cmd.arg = highcap ? (1 << 30) : 0;
346	cmd.flags = MMC_RSP_SPI_R3;
347
348	err = mmc_wait_for_cmd(host, &cmd, 0);
349
350	*ocrp = cmd.resp[1];
351	return err;
352}
353
354int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
355{
356	struct mmc_command cmd = {0};
357	int err;
358
359	cmd.opcode = MMC_SPI_CRC_ON_OFF;
360	cmd.flags = MMC_RSP_SPI_R1;
361	cmd.arg = use_crc;
362
363	err = mmc_wait_for_cmd(host, &cmd, 0);
364	if (!err)
365		host->use_spi_crc = use_crc;
366	return err;
367}
368
369/**
370 *	mmc_switch - modify EXT_CSD register
371 *	@card: the MMC card associated with the data transfer
372 *	@set: cmd set values
373 *	@index: EXT_CSD register index
374 *	@value: value to program into EXT_CSD register
375 *	@timeout_ms: timeout (ms) for operation performed by register write,
376 *                   timeout of zero implies maximum possible timeout
377 *
378 *	Modifies the EXT_CSD register for selected card.
379 */
380int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
381	       unsigned int timeout_ms)
382{
383	int err;
384	struct mmc_command cmd = {0};
385	u32 status;
386
387	BUG_ON(!card);
388	BUG_ON(!card->host);
389
390	cmd.opcode = MMC_SWITCH;
391	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
392		  (index << 16) |
393		  (value << 8) |
394		  set;
395	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
396	cmd.cmd_timeout_ms = timeout_ms;
397
398	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
399	if (err)
400		return err;
401
402	/* Must check status to be sure of no errors */
403	do {
404		err = mmc_send_status(card, &status);
405		if (err)
406			return err;
407		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
408			break;
409		if (mmc_host_is_spi(card->host))
410			break;
411	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
412
413	if (mmc_host_is_spi(card->host)) {
414		if (status & R1_SPI_ILLEGAL_COMMAND)
415			return -EBADMSG;
416	} else {
417		if (status & 0xFDFFA000)
418			pr_warning("%s: unexpected status %#x after "
419			       "switch", mmc_hostname(card->host), status);
420		if (status & R1_SWITCH_ERROR)
421			return -EBADMSG;
422	}
423
424	return 0;
425}
426EXPORT_SYMBOL_GPL(mmc_switch);
427
428int mmc_send_status(struct mmc_card *card, u32 *status)
429{
430	int err;
431	struct mmc_command cmd = {0};
432
433	BUG_ON(!card);
434	BUG_ON(!card->host);
435
436	cmd.opcode = MMC_SEND_STATUS;
437	if (!mmc_host_is_spi(card->host))
438		cmd.arg = card->rca << 16;
439	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
440
441	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
442	if (err)
443		return err;
444
445	/* NOTE: callers are required to understand the difference
446	 * between "native" and SPI format status words!
447	 */
448	if (status)
449		*status = cmd.resp[0];
450
451	return 0;
452}
453
454static int
455mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
456		  u8 len)
457{
458	struct mmc_request mrq = {NULL};
459	struct mmc_command cmd = {0};
460	struct mmc_data data = {0};
461	struct scatterlist sg;
462	u8 *data_buf;
463	u8 *test_buf;
464	int i, err;
465	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
466	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
467
468	/* dma onto stack is unsafe/nonportable, but callers to this
469	 * routine normally provide temporary on-stack buffers ...
470	 */
471	data_buf = kmalloc(len, GFP_KERNEL);
472	if (!data_buf)
473		return -ENOMEM;
474
475	if (len == 8)
476		test_buf = testdata_8bit;
477	else if (len == 4)
478		test_buf = testdata_4bit;
479	else {
480		pr_err("%s: Invalid bus_width %d\n",
481		       mmc_hostname(host), len);
482		kfree(data_buf);
483		return -EINVAL;
484	}
485
486	if (opcode == MMC_BUS_TEST_W)
487		memcpy(data_buf, test_buf, len);
488
489	mrq.cmd = &cmd;
490	mrq.data = &data;
491	cmd.opcode = opcode;
492	cmd.arg = 0;
493
494	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
495	 * rely on callers to never use this with "native" calls for reading
496	 * CSD or CID.  Native versions of those commands use the R2 type,
497	 * not R1 plus a data block.
498	 */
499	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
500
501	data.blksz = len;
502	data.blocks = 1;
503	if (opcode == MMC_BUS_TEST_R)
504		data.flags = MMC_DATA_READ;
505	else
506		data.flags = MMC_DATA_WRITE;
507
508	data.sg = &sg;
509	data.sg_len = 1;
510	sg_init_one(&sg, data_buf, len);
511	mmc_wait_for_req(host, &mrq);
512	err = 0;
513	if (opcode == MMC_BUS_TEST_R) {
514		for (i = 0; i < len / 4; i++)
515			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
516				err = -EIO;
517				break;
518			}
519	}
520	kfree(data_buf);
521
522	if (cmd.error)
523		return cmd.error;
524	if (data.error)
525		return data.error;
526
527	return err;
528}
529
530int mmc_bus_test(struct mmc_card *card, u8 bus_width)
531{
532	int err, width;
533
534	if (bus_width == MMC_BUS_WIDTH_8)
535		width = 8;
536	else if (bus_width == MMC_BUS_WIDTH_4)
537		width = 4;
538	else if (bus_width == MMC_BUS_WIDTH_1)
539		return 0; /* no need for test */
540	else
541		return -EINVAL;
542
543	/*
544	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
545	 * is a problem.  This improves chances that the test will work.
546	 */
547	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
548	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
549	return err;
550}
551
552int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
553{
554	struct mmc_command cmd = {0};
555	unsigned int opcode;
556	int err;
557
558	if (!card->ext_csd.hpi) {
559		pr_warning("%s: Card didn't support HPI command\n",
560			   mmc_hostname(card->host));
561		return -EINVAL;
562	}
563
564	opcode = card->ext_csd.hpi_cmd;
565	if (opcode == MMC_STOP_TRANSMISSION)
566		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
567	else if (opcode == MMC_SEND_STATUS)
568		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
569
570	cmd.opcode = opcode;
571	cmd.arg = card->rca << 16 | 1;
572	cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
573
574	err = mmc_wait_for_cmd(card->host, &cmd, 0);
575	if (err) {
576		pr_warn("%s: error %d interrupting operation. "
577			"HPI command response %#x\n", mmc_hostname(card->host),
578			err, cmd.resp[0]);
579		return err;
580	}
581	if (status)
582		*status = cmd.resp[0];
583
584	return 0;
585}