Loading...
1/*
2 * linux/drivers/mmc/core/mmc_ops.h
3 *
4 * Copyright 2006-2007 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/slab.h>
13#include <linux/export.h>
14#include <linux/types.h>
15#include <linux/scatterlist.h>
16
17#include <linux/mmc/host.h>
18#include <linux/mmc/card.h>
19#include <linux/mmc/mmc.h>
20
21#include "core.h"
22#include "host.h"
23#include "mmc_ops.h"
24
25#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
26
27static const u8 tuning_blk_pattern_4bit[] = {
28 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
29 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
30 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
31 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
32 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
33 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
34 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
35 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
36};
37
38static const u8 tuning_blk_pattern_8bit[] = {
39 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
40 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
41 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
42 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
43 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
44 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
45 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
46 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
47 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
48 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
49 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
50 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
51 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
52 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
53 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
54 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
55};
56
57static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
58 bool ignore_crc)
59{
60 int err;
61 struct mmc_command cmd = {0};
62
63 BUG_ON(!card);
64 BUG_ON(!card->host);
65
66 cmd.opcode = MMC_SEND_STATUS;
67 if (!mmc_host_is_spi(card->host))
68 cmd.arg = card->rca << 16;
69 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
70 if (ignore_crc)
71 cmd.flags &= ~MMC_RSP_CRC;
72
73 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
74 if (err)
75 return err;
76
77 /* NOTE: callers are required to understand the difference
78 * between "native" and SPI format status words!
79 */
80 if (status)
81 *status = cmd.resp[0];
82
83 return 0;
84}
85
86int mmc_send_status(struct mmc_card *card, u32 *status)
87{
88 return __mmc_send_status(card, status, false);
89}
90
91static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
92{
93 struct mmc_command cmd = {0};
94
95 BUG_ON(!host);
96
97 cmd.opcode = MMC_SELECT_CARD;
98
99 if (card) {
100 cmd.arg = card->rca << 16;
101 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
102 } else {
103 cmd.arg = 0;
104 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
105 }
106
107 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
108}
109
110int mmc_select_card(struct mmc_card *card)
111{
112 BUG_ON(!card);
113
114 return _mmc_select_card(card->host, card);
115}
116
117int mmc_deselect_cards(struct mmc_host *host)
118{
119 return _mmc_select_card(host, NULL);
120}
121
122/*
123 * Write the value specified in the device tree or board code into the optional
124 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
125 * drive strength of the DAT and CMD outputs. The actual meaning of a given
126 * value is hardware dependant.
127 * The presence of the DSR register can be determined from the CSD register,
128 * bit 76.
129 */
130int mmc_set_dsr(struct mmc_host *host)
131{
132 struct mmc_command cmd = {0};
133
134 cmd.opcode = MMC_SET_DSR;
135
136 cmd.arg = (host->dsr << 16) | 0xffff;
137 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
138
139 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
140}
141
142int mmc_go_idle(struct mmc_host *host)
143{
144 int err;
145 struct mmc_command cmd = {0};
146
147 /*
148 * Non-SPI hosts need to prevent chipselect going active during
149 * GO_IDLE; that would put chips into SPI mode. Remind them of
150 * that in case of hardware that won't pull up DAT3/nCS otherwise.
151 *
152 * SPI hosts ignore ios.chip_select; it's managed according to
153 * rules that must accommodate non-MMC slaves which this layer
154 * won't even know about.
155 */
156 if (!mmc_host_is_spi(host)) {
157 mmc_set_chip_select(host, MMC_CS_HIGH);
158 mmc_delay(1);
159 }
160
161 cmd.opcode = MMC_GO_IDLE_STATE;
162 cmd.arg = 0;
163 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
164
165 err = mmc_wait_for_cmd(host, &cmd, 0);
166
167 mmc_delay(1);
168
169 if (!mmc_host_is_spi(host)) {
170 mmc_set_chip_select(host, MMC_CS_DONTCARE);
171 mmc_delay(1);
172 }
173
174 host->use_spi_crc = 0;
175
176 return err;
177}
178
179int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
180{
181 struct mmc_command cmd = {0};
182 int i, err = 0;
183
184 BUG_ON(!host);
185
186 cmd.opcode = MMC_SEND_OP_COND;
187 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
188 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
189
190 for (i = 100; i; i--) {
191 err = mmc_wait_for_cmd(host, &cmd, 0);
192 if (err)
193 break;
194
195 /* if we're just probing, do a single pass */
196 if (ocr == 0)
197 break;
198
199 /* otherwise wait until reset completes */
200 if (mmc_host_is_spi(host)) {
201 if (!(cmd.resp[0] & R1_SPI_IDLE))
202 break;
203 } else {
204 if (cmd.resp[0] & MMC_CARD_BUSY)
205 break;
206 }
207
208 err = -ETIMEDOUT;
209
210 mmc_delay(10);
211 }
212
213 if (rocr && !mmc_host_is_spi(host))
214 *rocr = cmd.resp[0];
215
216 return err;
217}
218
219int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
220{
221 int err;
222 struct mmc_command cmd = {0};
223
224 BUG_ON(!host);
225 BUG_ON(!cid);
226
227 cmd.opcode = MMC_ALL_SEND_CID;
228 cmd.arg = 0;
229 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
230
231 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
232 if (err)
233 return err;
234
235 memcpy(cid, cmd.resp, sizeof(u32) * 4);
236
237 return 0;
238}
239
240int mmc_set_relative_addr(struct mmc_card *card)
241{
242 struct mmc_command cmd = {0};
243
244 BUG_ON(!card);
245 BUG_ON(!card->host);
246
247 cmd.opcode = MMC_SET_RELATIVE_ADDR;
248 cmd.arg = card->rca << 16;
249 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
250
251 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
252}
253
254static int
255mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
256{
257 int err;
258 struct mmc_command cmd = {0};
259
260 BUG_ON(!host);
261 BUG_ON(!cxd);
262
263 cmd.opcode = opcode;
264 cmd.arg = arg;
265 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
266
267 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
268 if (err)
269 return err;
270
271 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
272
273 return 0;
274}
275
276/*
277 * NOTE: void *buf, caller for the buf is required to use DMA-capable
278 * buffer or on-stack buffer (with some overhead in callee).
279 */
280static int
281mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
282 u32 opcode, void *buf, unsigned len)
283{
284 struct mmc_request mrq = {NULL};
285 struct mmc_command cmd = {0};
286 struct mmc_data data = {0};
287 struct scatterlist sg;
288
289 mrq.cmd = &cmd;
290 mrq.data = &data;
291
292 cmd.opcode = opcode;
293 cmd.arg = 0;
294
295 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
296 * rely on callers to never use this with "native" calls for reading
297 * CSD or CID. Native versions of those commands use the R2 type,
298 * not R1 plus a data block.
299 */
300 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
301
302 data.blksz = len;
303 data.blocks = 1;
304 data.flags = MMC_DATA_READ;
305 data.sg = &sg;
306 data.sg_len = 1;
307
308 sg_init_one(&sg, buf, len);
309
310 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
311 /*
312 * The spec states that CSR and CID accesses have a timeout
313 * of 64 clock cycles.
314 */
315 data.timeout_ns = 0;
316 data.timeout_clks = 64;
317 } else
318 mmc_set_data_timeout(&data, card);
319
320 mmc_wait_for_req(host, &mrq);
321
322 if (cmd.error)
323 return cmd.error;
324 if (data.error)
325 return data.error;
326
327 return 0;
328}
329
330int mmc_send_csd(struct mmc_card *card, u32 *csd)
331{
332 int ret, i;
333 u32 *csd_tmp;
334
335 if (!mmc_host_is_spi(card->host))
336 return mmc_send_cxd_native(card->host, card->rca << 16,
337 csd, MMC_SEND_CSD);
338
339 csd_tmp = kzalloc(16, GFP_KERNEL);
340 if (!csd_tmp)
341 return -ENOMEM;
342
343 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
344 if (ret)
345 goto err;
346
347 for (i = 0;i < 4;i++)
348 csd[i] = be32_to_cpu(csd_tmp[i]);
349
350err:
351 kfree(csd_tmp);
352 return ret;
353}
354
355int mmc_send_cid(struct mmc_host *host, u32 *cid)
356{
357 int ret, i;
358 u32 *cid_tmp;
359
360 if (!mmc_host_is_spi(host)) {
361 if (!host->card)
362 return -EINVAL;
363 return mmc_send_cxd_native(host, host->card->rca << 16,
364 cid, MMC_SEND_CID);
365 }
366
367 cid_tmp = kzalloc(16, GFP_KERNEL);
368 if (!cid_tmp)
369 return -ENOMEM;
370
371 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
372 if (ret)
373 goto err;
374
375 for (i = 0;i < 4;i++)
376 cid[i] = be32_to_cpu(cid_tmp[i]);
377
378err:
379 kfree(cid_tmp);
380 return ret;
381}
382
383int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
384{
385 int err;
386 u8 *ext_csd;
387
388 if (!card || !new_ext_csd)
389 return -EINVAL;
390
391 if (!mmc_can_ext_csd(card))
392 return -EOPNOTSUPP;
393
394 /*
395 * As the ext_csd is so large and mostly unused, we don't store the
396 * raw block in mmc_card.
397 */
398 ext_csd = kzalloc(512, GFP_KERNEL);
399 if (!ext_csd)
400 return -ENOMEM;
401
402 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
403 512);
404 if (err)
405 kfree(ext_csd);
406 else
407 *new_ext_csd = ext_csd;
408
409 return err;
410}
411EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
412
413int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
414{
415 struct mmc_command cmd = {0};
416 int err;
417
418 cmd.opcode = MMC_SPI_READ_OCR;
419 cmd.arg = highcap ? (1 << 30) : 0;
420 cmd.flags = MMC_RSP_SPI_R3;
421
422 err = mmc_wait_for_cmd(host, &cmd, 0);
423
424 *ocrp = cmd.resp[1];
425 return err;
426}
427
428int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
429{
430 struct mmc_command cmd = {0};
431 int err;
432
433 cmd.opcode = MMC_SPI_CRC_ON_OFF;
434 cmd.flags = MMC_RSP_SPI_R1;
435 cmd.arg = use_crc;
436
437 err = mmc_wait_for_cmd(host, &cmd, 0);
438 if (!err)
439 host->use_spi_crc = use_crc;
440 return err;
441}
442
443int mmc_switch_status_error(struct mmc_host *host, u32 status)
444{
445 if (mmc_host_is_spi(host)) {
446 if (status & R1_SPI_ILLEGAL_COMMAND)
447 return -EBADMSG;
448 } else {
449 if (status & 0xFDFFA000)
450 pr_warn("%s: unexpected status %#x after switch\n",
451 mmc_hostname(host), status);
452 if (status & R1_SWITCH_ERROR)
453 return -EBADMSG;
454 }
455 return 0;
456}
457
458/**
459 * __mmc_switch - modify EXT_CSD register
460 * @card: the MMC card associated with the data transfer
461 * @set: cmd set values
462 * @index: EXT_CSD register index
463 * @value: value to program into EXT_CSD register
464 * @timeout_ms: timeout (ms) for operation performed by register write,
465 * timeout of zero implies maximum possible timeout
466 * @use_busy_signal: use the busy signal as response type
467 * @send_status: send status cmd to poll for busy
468 * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
469 *
470 * Modifies the EXT_CSD register for selected card.
471 */
472int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
473 unsigned int timeout_ms, bool use_busy_signal, bool send_status,
474 bool ignore_crc)
475{
476 struct mmc_host *host = card->host;
477 int err;
478 struct mmc_command cmd = {0};
479 unsigned long timeout;
480 u32 status = 0;
481 bool use_r1b_resp = use_busy_signal;
482 bool expired = false;
483
484 mmc_retune_hold(host);
485
486 /*
487 * If the cmd timeout and the max_busy_timeout of the host are both
488 * specified, let's validate them. A failure means we need to prevent
489 * the host from doing hw busy detection, which is done by converting
490 * to a R1 response instead of a R1B.
491 */
492 if (timeout_ms && host->max_busy_timeout &&
493 (timeout_ms > host->max_busy_timeout))
494 use_r1b_resp = false;
495
496 cmd.opcode = MMC_SWITCH;
497 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
498 (index << 16) |
499 (value << 8) |
500 set;
501 cmd.flags = MMC_CMD_AC;
502 if (use_r1b_resp) {
503 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
504 /*
505 * A busy_timeout of zero means the host can decide to use
506 * whatever value it finds suitable.
507 */
508 cmd.busy_timeout = timeout_ms;
509 } else {
510 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
511 }
512
513 if (index == EXT_CSD_SANITIZE_START)
514 cmd.sanitize_busy = true;
515
516 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
517 if (err)
518 goto out;
519
520 /* No need to check card status in case of unblocking command */
521 if (!use_busy_signal)
522 goto out;
523
524 /*
525 * CRC errors shall only be ignored in cases were CMD13 is used to poll
526 * to detect busy completion.
527 */
528 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
529 ignore_crc = false;
530
531 /* We have an unspecified cmd timeout, use the fallback value. */
532 if (!timeout_ms)
533 timeout_ms = MMC_OPS_TIMEOUT_MS;
534
535 /* Must check status to be sure of no errors. */
536 timeout = jiffies + msecs_to_jiffies(timeout_ms);
537 do {
538 if (send_status) {
539 /*
540 * Due to the possibility of being preempted after
541 * sending the status command, check the expiration
542 * time first.
543 */
544 expired = time_after(jiffies, timeout);
545 err = __mmc_send_status(card, &status, ignore_crc);
546 if (err)
547 goto out;
548 }
549 if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
550 break;
551 if (mmc_host_is_spi(host))
552 break;
553
554 /*
555 * We are not allowed to issue a status command and the host
556 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
557 * rely on waiting for the stated timeout to be sufficient.
558 */
559 if (!send_status) {
560 mmc_delay(timeout_ms);
561 goto out;
562 }
563
564 /* Timeout if the device never leaves the program state. */
565 if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
566 pr_err("%s: Card stuck in programming state! %s\n",
567 mmc_hostname(host), __func__);
568 err = -ETIMEDOUT;
569 goto out;
570 }
571 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
572
573 err = mmc_switch_status_error(host, status);
574out:
575 mmc_retune_release(host);
576
577 return err;
578}
579
580int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
581 unsigned int timeout_ms)
582{
583 return __mmc_switch(card, set, index, value, timeout_ms, true, true,
584 false);
585}
586EXPORT_SYMBOL_GPL(mmc_switch);
587
588int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
589{
590 struct mmc_request mrq = {NULL};
591 struct mmc_command cmd = {0};
592 struct mmc_data data = {0};
593 struct scatterlist sg;
594 struct mmc_ios *ios = &host->ios;
595 const u8 *tuning_block_pattern;
596 int size, err = 0;
597 u8 *data_buf;
598
599 if (ios->bus_width == MMC_BUS_WIDTH_8) {
600 tuning_block_pattern = tuning_blk_pattern_8bit;
601 size = sizeof(tuning_blk_pattern_8bit);
602 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
603 tuning_block_pattern = tuning_blk_pattern_4bit;
604 size = sizeof(tuning_blk_pattern_4bit);
605 } else
606 return -EINVAL;
607
608 data_buf = kzalloc(size, GFP_KERNEL);
609 if (!data_buf)
610 return -ENOMEM;
611
612 mrq.cmd = &cmd;
613 mrq.data = &data;
614
615 cmd.opcode = opcode;
616 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
617
618 data.blksz = size;
619 data.blocks = 1;
620 data.flags = MMC_DATA_READ;
621
622 /*
623 * According to the tuning specs, Tuning process
624 * is normally shorter 40 executions of CMD19,
625 * and timeout value should be shorter than 150 ms
626 */
627 data.timeout_ns = 150 * NSEC_PER_MSEC;
628
629 data.sg = &sg;
630 data.sg_len = 1;
631 sg_init_one(&sg, data_buf, size);
632
633 mmc_wait_for_req(host, &mrq);
634
635 if (cmd_error)
636 *cmd_error = cmd.error;
637
638 if (cmd.error) {
639 err = cmd.error;
640 goto out;
641 }
642
643 if (data.error) {
644 err = data.error;
645 goto out;
646 }
647
648 if (memcmp(data_buf, tuning_block_pattern, size))
649 err = -EIO;
650
651out:
652 kfree(data_buf);
653 return err;
654}
655EXPORT_SYMBOL_GPL(mmc_send_tuning);
656
657static int
658mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
659 u8 len)
660{
661 struct mmc_request mrq = {NULL};
662 struct mmc_command cmd = {0};
663 struct mmc_data data = {0};
664 struct scatterlist sg;
665 u8 *data_buf;
666 u8 *test_buf;
667 int i, err;
668 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
669 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
670
671 /* dma onto stack is unsafe/nonportable, but callers to this
672 * routine normally provide temporary on-stack buffers ...
673 */
674 data_buf = kmalloc(len, GFP_KERNEL);
675 if (!data_buf)
676 return -ENOMEM;
677
678 if (len == 8)
679 test_buf = testdata_8bit;
680 else if (len == 4)
681 test_buf = testdata_4bit;
682 else {
683 pr_err("%s: Invalid bus_width %d\n",
684 mmc_hostname(host), len);
685 kfree(data_buf);
686 return -EINVAL;
687 }
688
689 if (opcode == MMC_BUS_TEST_W)
690 memcpy(data_buf, test_buf, len);
691
692 mrq.cmd = &cmd;
693 mrq.data = &data;
694 cmd.opcode = opcode;
695 cmd.arg = 0;
696
697 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
698 * rely on callers to never use this with "native" calls for reading
699 * CSD or CID. Native versions of those commands use the R2 type,
700 * not R1 plus a data block.
701 */
702 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
703
704 data.blksz = len;
705 data.blocks = 1;
706 if (opcode == MMC_BUS_TEST_R)
707 data.flags = MMC_DATA_READ;
708 else
709 data.flags = MMC_DATA_WRITE;
710
711 data.sg = &sg;
712 data.sg_len = 1;
713 mmc_set_data_timeout(&data, card);
714 sg_init_one(&sg, data_buf, len);
715 mmc_wait_for_req(host, &mrq);
716 err = 0;
717 if (opcode == MMC_BUS_TEST_R) {
718 for (i = 0; i < len / 4; i++)
719 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
720 err = -EIO;
721 break;
722 }
723 }
724 kfree(data_buf);
725
726 if (cmd.error)
727 return cmd.error;
728 if (data.error)
729 return data.error;
730
731 return err;
732}
733
734int mmc_bus_test(struct mmc_card *card, u8 bus_width)
735{
736 int width;
737
738 if (bus_width == MMC_BUS_WIDTH_8)
739 width = 8;
740 else if (bus_width == MMC_BUS_WIDTH_4)
741 width = 4;
742 else if (bus_width == MMC_BUS_WIDTH_1)
743 return 0; /* no need for test */
744 else
745 return -EINVAL;
746
747 /*
748 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
749 * is a problem. This improves chances that the test will work.
750 */
751 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
752 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
753}
754
755int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
756{
757 struct mmc_command cmd = {0};
758 unsigned int opcode;
759 int err;
760
761 if (!card->ext_csd.hpi) {
762 pr_warn("%s: Card didn't support HPI command\n",
763 mmc_hostname(card->host));
764 return -EINVAL;
765 }
766
767 opcode = card->ext_csd.hpi_cmd;
768 if (opcode == MMC_STOP_TRANSMISSION)
769 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
770 else if (opcode == MMC_SEND_STATUS)
771 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
772
773 cmd.opcode = opcode;
774 cmd.arg = card->rca << 16 | 1;
775
776 err = mmc_wait_for_cmd(card->host, &cmd, 0);
777 if (err) {
778 pr_warn("%s: error %d interrupting operation. "
779 "HPI command response %#x\n", mmc_hostname(card->host),
780 err, cmd.resp[0]);
781 return err;
782 }
783 if (status)
784 *status = cmd.resp[0];
785
786 return 0;
787}
788
789int mmc_can_ext_csd(struct mmc_card *card)
790{
791 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
792}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * linux/drivers/mmc/core/mmc_ops.h
4 *
5 * Copyright 2006-2007 Pierre Ossman
6 */
7
8#include <linux/slab.h>
9#include <linux/export.h>
10#include <linux/types.h>
11#include <linux/scatterlist.h>
12
13#include <linux/mmc/host.h>
14#include <linux/mmc/card.h>
15#include <linux/mmc/mmc.h>
16
17#include "core.h"
18#include "card.h"
19#include "host.h"
20#include "mmc_ops.h"
21
22#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
23
24static const u8 tuning_blk_pattern_4bit[] = {
25 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
26 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
27 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
28 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
29 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
30 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
31 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
32 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
33};
34
35static const u8 tuning_blk_pattern_8bit[] = {
36 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
37 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
38 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
39 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
40 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
41 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
42 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
43 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
44 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
45 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
46 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
47 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
48 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
49 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
50 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
51 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
52};
53
54int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
55{
56 int err;
57 struct mmc_command cmd = {};
58
59 cmd.opcode = MMC_SEND_STATUS;
60 if (!mmc_host_is_spi(card->host))
61 cmd.arg = card->rca << 16;
62 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
63
64 err = mmc_wait_for_cmd(card->host, &cmd, retries);
65 if (err)
66 return err;
67
68 /* NOTE: callers are required to understand the difference
69 * between "native" and SPI format status words!
70 */
71 if (status)
72 *status = cmd.resp[0];
73
74 return 0;
75}
76EXPORT_SYMBOL_GPL(__mmc_send_status);
77
78int mmc_send_status(struct mmc_card *card, u32 *status)
79{
80 return __mmc_send_status(card, status, MMC_CMD_RETRIES);
81}
82EXPORT_SYMBOL_GPL(mmc_send_status);
83
84static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
85{
86 struct mmc_command cmd = {};
87
88 cmd.opcode = MMC_SELECT_CARD;
89
90 if (card) {
91 cmd.arg = card->rca << 16;
92 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
93 } else {
94 cmd.arg = 0;
95 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
96 }
97
98 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
99}
100
101int mmc_select_card(struct mmc_card *card)
102{
103
104 return _mmc_select_card(card->host, card);
105}
106
107int mmc_deselect_cards(struct mmc_host *host)
108{
109 return _mmc_select_card(host, NULL);
110}
111
112/*
113 * Write the value specified in the device tree or board code into the optional
114 * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
115 * drive strength of the DAT and CMD outputs. The actual meaning of a given
116 * value is hardware dependant.
117 * The presence of the DSR register can be determined from the CSD register,
118 * bit 76.
119 */
120int mmc_set_dsr(struct mmc_host *host)
121{
122 struct mmc_command cmd = {};
123
124 cmd.opcode = MMC_SET_DSR;
125
126 cmd.arg = (host->dsr << 16) | 0xffff;
127 cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
128
129 return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
130}
131
132int mmc_go_idle(struct mmc_host *host)
133{
134 int err;
135 struct mmc_command cmd = {};
136
137 /*
138 * Non-SPI hosts need to prevent chipselect going active during
139 * GO_IDLE; that would put chips into SPI mode. Remind them of
140 * that in case of hardware that won't pull up DAT3/nCS otherwise.
141 *
142 * SPI hosts ignore ios.chip_select; it's managed according to
143 * rules that must accommodate non-MMC slaves which this layer
144 * won't even know about.
145 */
146 if (!mmc_host_is_spi(host)) {
147 mmc_set_chip_select(host, MMC_CS_HIGH);
148 mmc_delay(1);
149 }
150
151 cmd.opcode = MMC_GO_IDLE_STATE;
152 cmd.arg = 0;
153 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
154
155 err = mmc_wait_for_cmd(host, &cmd, 0);
156
157 mmc_delay(1);
158
159 if (!mmc_host_is_spi(host)) {
160 mmc_set_chip_select(host, MMC_CS_DONTCARE);
161 mmc_delay(1);
162 }
163
164 host->use_spi_crc = 0;
165
166 return err;
167}
168
169int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
170{
171 struct mmc_command cmd = {};
172 int i, err = 0;
173
174 cmd.opcode = MMC_SEND_OP_COND;
175 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
176 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
177
178 for (i = 100; i; i--) {
179 err = mmc_wait_for_cmd(host, &cmd, 0);
180 if (err)
181 break;
182
183 /* wait until reset completes */
184 if (mmc_host_is_spi(host)) {
185 if (!(cmd.resp[0] & R1_SPI_IDLE))
186 break;
187 } else {
188 if (cmd.resp[0] & MMC_CARD_BUSY)
189 break;
190 }
191
192 err = -ETIMEDOUT;
193
194 mmc_delay(10);
195
196 /*
197 * According to eMMC specification v5.1 section 6.4.3, we
198 * should issue CMD1 repeatedly in the idle state until
199 * the eMMC is ready. Otherwise some eMMC devices seem to enter
200 * the inactive mode after mmc_init_card() issued CMD0 when
201 * the eMMC device is busy.
202 */
203 if (!ocr && !mmc_host_is_spi(host))
204 cmd.arg = cmd.resp[0] | BIT(30);
205 }
206
207 if (rocr && !mmc_host_is_spi(host))
208 *rocr = cmd.resp[0];
209
210 return err;
211}
212
213int mmc_set_relative_addr(struct mmc_card *card)
214{
215 struct mmc_command cmd = {};
216
217 cmd.opcode = MMC_SET_RELATIVE_ADDR;
218 cmd.arg = card->rca << 16;
219 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
220
221 return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
222}
223
224static int
225mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
226{
227 int err;
228 struct mmc_command cmd = {};
229
230 cmd.opcode = opcode;
231 cmd.arg = arg;
232 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
233
234 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
235 if (err)
236 return err;
237
238 memcpy(cxd, cmd.resp, sizeof(u32) * 4);
239
240 return 0;
241}
242
243/*
244 * NOTE: void *buf, caller for the buf is required to use DMA-capable
245 * buffer or on-stack buffer (with some overhead in callee).
246 */
247static int
248mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
249 u32 opcode, void *buf, unsigned len)
250{
251 struct mmc_request mrq = {};
252 struct mmc_command cmd = {};
253 struct mmc_data data = {};
254 struct scatterlist sg;
255
256 mrq.cmd = &cmd;
257 mrq.data = &data;
258
259 cmd.opcode = opcode;
260 cmd.arg = 0;
261
262 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
263 * rely on callers to never use this with "native" calls for reading
264 * CSD or CID. Native versions of those commands use the R2 type,
265 * not R1 plus a data block.
266 */
267 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
268
269 data.blksz = len;
270 data.blocks = 1;
271 data.flags = MMC_DATA_READ;
272 data.sg = &sg;
273 data.sg_len = 1;
274
275 sg_init_one(&sg, buf, len);
276
277 if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
278 /*
279 * The spec states that CSR and CID accesses have a timeout
280 * of 64 clock cycles.
281 */
282 data.timeout_ns = 0;
283 data.timeout_clks = 64;
284 } else
285 mmc_set_data_timeout(&data, card);
286
287 mmc_wait_for_req(host, &mrq);
288
289 if (cmd.error)
290 return cmd.error;
291 if (data.error)
292 return data.error;
293
294 return 0;
295}
296
297static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
298{
299 int ret, i;
300 __be32 *csd_tmp;
301
302 csd_tmp = kzalloc(16, GFP_KERNEL);
303 if (!csd_tmp)
304 return -ENOMEM;
305
306 ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
307 if (ret)
308 goto err;
309
310 for (i = 0; i < 4; i++)
311 csd[i] = be32_to_cpu(csd_tmp[i]);
312
313err:
314 kfree(csd_tmp);
315 return ret;
316}
317
318int mmc_send_csd(struct mmc_card *card, u32 *csd)
319{
320 if (mmc_host_is_spi(card->host))
321 return mmc_spi_send_csd(card, csd);
322
323 return mmc_send_cxd_native(card->host, card->rca << 16, csd,
324 MMC_SEND_CSD);
325}
326
327static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
328{
329 int ret, i;
330 __be32 *cid_tmp;
331
332 cid_tmp = kzalloc(16, GFP_KERNEL);
333 if (!cid_tmp)
334 return -ENOMEM;
335
336 ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
337 if (ret)
338 goto err;
339
340 for (i = 0; i < 4; i++)
341 cid[i] = be32_to_cpu(cid_tmp[i]);
342
343err:
344 kfree(cid_tmp);
345 return ret;
346}
347
348int mmc_send_cid(struct mmc_host *host, u32 *cid)
349{
350 if (mmc_host_is_spi(host))
351 return mmc_spi_send_cid(host, cid);
352
353 return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
354}
355
356int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
357{
358 int err;
359 u8 *ext_csd;
360
361 if (!card || !new_ext_csd)
362 return -EINVAL;
363
364 if (!mmc_can_ext_csd(card))
365 return -EOPNOTSUPP;
366
367 /*
368 * As the ext_csd is so large and mostly unused, we don't store the
369 * raw block in mmc_card.
370 */
371 ext_csd = kzalloc(512, GFP_KERNEL);
372 if (!ext_csd)
373 return -ENOMEM;
374
375 err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
376 512);
377 if (err)
378 kfree(ext_csd);
379 else
380 *new_ext_csd = ext_csd;
381
382 return err;
383}
384EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
385
386int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
387{
388 struct mmc_command cmd = {};
389 int err;
390
391 cmd.opcode = MMC_SPI_READ_OCR;
392 cmd.arg = highcap ? (1 << 30) : 0;
393 cmd.flags = MMC_RSP_SPI_R3;
394
395 err = mmc_wait_for_cmd(host, &cmd, 0);
396
397 *ocrp = cmd.resp[1];
398 return err;
399}
400
401int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
402{
403 struct mmc_command cmd = {};
404 int err;
405
406 cmd.opcode = MMC_SPI_CRC_ON_OFF;
407 cmd.flags = MMC_RSP_SPI_R1;
408 cmd.arg = use_crc;
409
410 err = mmc_wait_for_cmd(host, &cmd, 0);
411 if (!err)
412 host->use_spi_crc = use_crc;
413 return err;
414}
415
416static int mmc_switch_status_error(struct mmc_host *host, u32 status)
417{
418 if (mmc_host_is_spi(host)) {
419 if (status & R1_SPI_ILLEGAL_COMMAND)
420 return -EBADMSG;
421 } else {
422 if (R1_STATUS(status))
423 pr_warn("%s: unexpected status %#x after switch\n",
424 mmc_hostname(host), status);
425 if (status & R1_SWITCH_ERROR)
426 return -EBADMSG;
427 }
428 return 0;
429}
430
431/* Caller must hold re-tuning */
432int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
433{
434 u32 status;
435 int err;
436
437 err = mmc_send_status(card, &status);
438 if (!crc_err_fatal && err == -EILSEQ)
439 return 0;
440 if (err)
441 return err;
442
443 return mmc_switch_status_error(card->host, status);
444}
445
446int mmc_switch_status(struct mmc_card *card)
447{
448 return __mmc_switch_status(card, true);
449}
450
451static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
452 bool send_status, bool retry_crc_err)
453{
454 struct mmc_host *host = card->host;
455 int err;
456 unsigned long timeout;
457 u32 status = 0;
458 bool expired = false;
459 bool busy = false;
460
461 /* We have an unspecified cmd timeout, use the fallback value. */
462 if (!timeout_ms)
463 timeout_ms = MMC_OPS_TIMEOUT_MS;
464
465 /*
466 * In cases when not allowed to poll by using CMD13 or because we aren't
467 * capable of polling by using ->card_busy(), then rely on waiting the
468 * stated timeout to be sufficient.
469 */
470 if (!send_status && !host->ops->card_busy) {
471 mmc_delay(timeout_ms);
472 return 0;
473 }
474
475 timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
476 do {
477 /*
478 * Due to the possibility of being preempted while polling,
479 * check the expiration time first.
480 */
481 expired = time_after(jiffies, timeout);
482
483 if (host->ops->card_busy) {
484 busy = host->ops->card_busy(host);
485 } else {
486 err = mmc_send_status(card, &status);
487 if (retry_crc_err && err == -EILSEQ) {
488 busy = true;
489 } else if (err) {
490 return err;
491 } else {
492 err = mmc_switch_status_error(host, status);
493 if (err)
494 return err;
495 busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
496 }
497 }
498
499 /* Timeout if the device still remains busy. */
500 if (expired && busy) {
501 pr_err("%s: Card stuck being busy! %s\n",
502 mmc_hostname(host), __func__);
503 return -ETIMEDOUT;
504 }
505 } while (busy);
506
507 return 0;
508}
509
510/**
511 * __mmc_switch - modify EXT_CSD register
512 * @card: the MMC card associated with the data transfer
513 * @set: cmd set values
514 * @index: EXT_CSD register index
515 * @value: value to program into EXT_CSD register
516 * @timeout_ms: timeout (ms) for operation performed by register write,
517 * timeout of zero implies maximum possible timeout
518 * @timing: new timing to change to
519 * @use_busy_signal: use the busy signal as response type
520 * @send_status: send status cmd to poll for busy
521 * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
522 *
523 * Modifies the EXT_CSD register for selected card.
524 */
525int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
526 unsigned int timeout_ms, unsigned char timing,
527 bool use_busy_signal, bool send_status, bool retry_crc_err)
528{
529 struct mmc_host *host = card->host;
530 int err;
531 struct mmc_command cmd = {};
532 bool use_r1b_resp = use_busy_signal;
533 unsigned char old_timing = host->ios.timing;
534
535 mmc_retune_hold(host);
536
537 /*
538 * If the cmd timeout and the max_busy_timeout of the host are both
539 * specified, let's validate them. A failure means we need to prevent
540 * the host from doing hw busy detection, which is done by converting
541 * to a R1 response instead of a R1B.
542 */
543 if (timeout_ms && host->max_busy_timeout &&
544 (timeout_ms > host->max_busy_timeout))
545 use_r1b_resp = false;
546
547 cmd.opcode = MMC_SWITCH;
548 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
549 (index << 16) |
550 (value << 8) |
551 set;
552 cmd.flags = MMC_CMD_AC;
553 if (use_r1b_resp) {
554 cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
555 /*
556 * A busy_timeout of zero means the host can decide to use
557 * whatever value it finds suitable.
558 */
559 cmd.busy_timeout = timeout_ms;
560 } else {
561 cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
562 }
563
564 if (index == EXT_CSD_SANITIZE_START)
565 cmd.sanitize_busy = true;
566
567 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
568 if (err)
569 goto out;
570
571 /* No need to check card status in case of unblocking command */
572 if (!use_busy_signal)
573 goto out;
574
575 /*If SPI or used HW busy detection above, then we don't need to poll. */
576 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
577 mmc_host_is_spi(host))
578 goto out_tim;
579
580 /* Let's try to poll to find out when the command is completed. */
581 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
582 if (err)
583 goto out;
584
585out_tim:
586 /* Switch to new timing before check switch status. */
587 if (timing)
588 mmc_set_timing(host, timing);
589
590 if (send_status) {
591 err = mmc_switch_status(card);
592 if (err && timing)
593 mmc_set_timing(host, old_timing);
594 }
595out:
596 mmc_retune_release(host);
597
598 return err;
599}
600
601int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
602 unsigned int timeout_ms)
603{
604 return __mmc_switch(card, set, index, value, timeout_ms, 0,
605 true, true, false);
606}
607EXPORT_SYMBOL_GPL(mmc_switch);
608
609int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
610{
611 struct mmc_request mrq = {};
612 struct mmc_command cmd = {};
613 struct mmc_data data = {};
614 struct scatterlist sg;
615 struct mmc_ios *ios = &host->ios;
616 const u8 *tuning_block_pattern;
617 int size, err = 0;
618 u8 *data_buf;
619
620 if (ios->bus_width == MMC_BUS_WIDTH_8) {
621 tuning_block_pattern = tuning_blk_pattern_8bit;
622 size = sizeof(tuning_blk_pattern_8bit);
623 } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
624 tuning_block_pattern = tuning_blk_pattern_4bit;
625 size = sizeof(tuning_blk_pattern_4bit);
626 } else
627 return -EINVAL;
628
629 data_buf = kzalloc(size, GFP_KERNEL);
630 if (!data_buf)
631 return -ENOMEM;
632
633 mrq.cmd = &cmd;
634 mrq.data = &data;
635
636 cmd.opcode = opcode;
637 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
638
639 data.blksz = size;
640 data.blocks = 1;
641 data.flags = MMC_DATA_READ;
642
643 /*
644 * According to the tuning specs, Tuning process
645 * is normally shorter 40 executions of CMD19,
646 * and timeout value should be shorter than 150 ms
647 */
648 data.timeout_ns = 150 * NSEC_PER_MSEC;
649
650 data.sg = &sg;
651 data.sg_len = 1;
652 sg_init_one(&sg, data_buf, size);
653
654 mmc_wait_for_req(host, &mrq);
655
656 if (cmd_error)
657 *cmd_error = cmd.error;
658
659 if (cmd.error) {
660 err = cmd.error;
661 goto out;
662 }
663
664 if (data.error) {
665 err = data.error;
666 goto out;
667 }
668
669 if (memcmp(data_buf, tuning_block_pattern, size))
670 err = -EIO;
671
672out:
673 kfree(data_buf);
674 return err;
675}
676EXPORT_SYMBOL_GPL(mmc_send_tuning);
677
678int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
679{
680 struct mmc_command cmd = {};
681
682 /*
683 * eMMC specification specifies that CMD12 can be used to stop a tuning
684 * command, but SD specification does not, so do nothing unless it is
685 * eMMC.
686 */
687 if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
688 return 0;
689
690 cmd.opcode = MMC_STOP_TRANSMISSION;
691 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
692
693 /*
694 * For drivers that override R1 to R1b, set an arbitrary timeout based
695 * on the tuning timeout i.e. 150ms.
696 */
697 cmd.busy_timeout = 150;
698
699 return mmc_wait_for_cmd(host, &cmd, 0);
700}
701EXPORT_SYMBOL_GPL(mmc_abort_tuning);
702
703static int
704mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
705 u8 len)
706{
707 struct mmc_request mrq = {};
708 struct mmc_command cmd = {};
709 struct mmc_data data = {};
710 struct scatterlist sg;
711 u8 *data_buf;
712 u8 *test_buf;
713 int i, err;
714 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
715 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
716
717 /* dma onto stack is unsafe/nonportable, but callers to this
718 * routine normally provide temporary on-stack buffers ...
719 */
720 data_buf = kmalloc(len, GFP_KERNEL);
721 if (!data_buf)
722 return -ENOMEM;
723
724 if (len == 8)
725 test_buf = testdata_8bit;
726 else if (len == 4)
727 test_buf = testdata_4bit;
728 else {
729 pr_err("%s: Invalid bus_width %d\n",
730 mmc_hostname(host), len);
731 kfree(data_buf);
732 return -EINVAL;
733 }
734
735 if (opcode == MMC_BUS_TEST_W)
736 memcpy(data_buf, test_buf, len);
737
738 mrq.cmd = &cmd;
739 mrq.data = &data;
740 cmd.opcode = opcode;
741 cmd.arg = 0;
742
743 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
744 * rely on callers to never use this with "native" calls for reading
745 * CSD or CID. Native versions of those commands use the R2 type,
746 * not R1 plus a data block.
747 */
748 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
749
750 data.blksz = len;
751 data.blocks = 1;
752 if (opcode == MMC_BUS_TEST_R)
753 data.flags = MMC_DATA_READ;
754 else
755 data.flags = MMC_DATA_WRITE;
756
757 data.sg = &sg;
758 data.sg_len = 1;
759 mmc_set_data_timeout(&data, card);
760 sg_init_one(&sg, data_buf, len);
761 mmc_wait_for_req(host, &mrq);
762 err = 0;
763 if (opcode == MMC_BUS_TEST_R) {
764 for (i = 0; i < len / 4; i++)
765 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
766 err = -EIO;
767 break;
768 }
769 }
770 kfree(data_buf);
771
772 if (cmd.error)
773 return cmd.error;
774 if (data.error)
775 return data.error;
776
777 return err;
778}
779
780int mmc_bus_test(struct mmc_card *card, u8 bus_width)
781{
782 int width;
783
784 if (bus_width == MMC_BUS_WIDTH_8)
785 width = 8;
786 else if (bus_width == MMC_BUS_WIDTH_4)
787 width = 4;
788 else if (bus_width == MMC_BUS_WIDTH_1)
789 return 0; /* no need for test */
790 else
791 return -EINVAL;
792
793 /*
794 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
795 * is a problem. This improves chances that the test will work.
796 */
797 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
798 return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
799}
800
801static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
802{
803 struct mmc_command cmd = {};
804 unsigned int opcode;
805 int err;
806
807 opcode = card->ext_csd.hpi_cmd;
808 if (opcode == MMC_STOP_TRANSMISSION)
809 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
810 else if (opcode == MMC_SEND_STATUS)
811 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
812
813 cmd.opcode = opcode;
814 cmd.arg = card->rca << 16 | 1;
815
816 err = mmc_wait_for_cmd(card->host, &cmd, 0);
817 if (err) {
818 pr_warn("%s: error %d interrupting operation. "
819 "HPI command response %#x\n", mmc_hostname(card->host),
820 err, cmd.resp[0]);
821 return err;
822 }
823 if (status)
824 *status = cmd.resp[0];
825
826 return 0;
827}
828
829/**
830 * mmc_interrupt_hpi - Issue for High priority Interrupt
831 * @card: the MMC card associated with the HPI transfer
832 *
833 * Issued High Priority Interrupt, and check for card status
834 * until out-of prg-state.
835 */
836int mmc_interrupt_hpi(struct mmc_card *card)
837{
838 int err;
839 u32 status;
840 unsigned long prg_wait;
841
842 if (!card->ext_csd.hpi_en) {
843 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
844 return 1;
845 }
846
847 err = mmc_send_status(card, &status);
848 if (err) {
849 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
850 goto out;
851 }
852
853 switch (R1_CURRENT_STATE(status)) {
854 case R1_STATE_IDLE:
855 case R1_STATE_READY:
856 case R1_STATE_STBY:
857 case R1_STATE_TRAN:
858 /*
859 * In idle and transfer states, HPI is not needed and the caller
860 * can issue the next intended command immediately
861 */
862 goto out;
863 case R1_STATE_PRG:
864 break;
865 default:
866 /* In all other states, it's illegal to issue HPI */
867 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
868 mmc_hostname(card->host), R1_CURRENT_STATE(status));
869 err = -EINVAL;
870 goto out;
871 }
872
873 err = mmc_send_hpi_cmd(card, &status);
874 if (err)
875 goto out;
876
877 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
878 do {
879 err = mmc_send_status(card, &status);
880
881 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
882 break;
883 if (time_after(jiffies, prg_wait))
884 err = -ETIMEDOUT;
885 } while (!err);
886
887out:
888 return err;
889}
890
891int mmc_can_ext_csd(struct mmc_card *card)
892{
893 return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
894}
895
896static int mmc_read_bkops_status(struct mmc_card *card)
897{
898 int err;
899 u8 *ext_csd;
900
901 err = mmc_get_ext_csd(card, &ext_csd);
902 if (err)
903 return err;
904
905 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
906 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
907 kfree(ext_csd);
908 return 0;
909}
910
911/**
912 * mmc_run_bkops - Run BKOPS for supported cards
913 * @card: MMC card to run BKOPS for
914 *
915 * Run background operations synchronously for cards having manual BKOPS
916 * enabled and in case it reports urgent BKOPS level.
917*/
918void mmc_run_bkops(struct mmc_card *card)
919{
920 int err;
921
922 if (!card->ext_csd.man_bkops_en)
923 return;
924
925 err = mmc_read_bkops_status(card);
926 if (err) {
927 pr_err("%s: Failed to read bkops status: %d\n",
928 mmc_hostname(card->host), err);
929 return;
930 }
931
932 if (!card->ext_csd.raw_bkops_status ||
933 card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
934 return;
935
936 mmc_retune_hold(card->host);
937
938 /*
939 * For urgent BKOPS status, LEVEL_2 and higher, let's execute
940 * synchronously. Future wise, we may consider to start BKOPS, for less
941 * urgent levels by using an asynchronous background task, when idle.
942 */
943 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
944 EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS);
945 if (err)
946 pr_warn("%s: Error %d starting bkops\n",
947 mmc_hostname(card->host), err);
948
949 mmc_retune_release(card->host);
950}
951EXPORT_SYMBOL(mmc_run_bkops);
952
953/*
954 * Flush the cache to the non-volatile storage.
955 */
956int mmc_flush_cache(struct mmc_card *card)
957{
958 int err = 0;
959
960 if (mmc_card_mmc(card) &&
961 (card->ext_csd.cache_size > 0) &&
962 (card->ext_csd.cache_ctrl & 1)) {
963 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
964 EXT_CSD_FLUSH_CACHE, 1, 0);
965 if (err)
966 pr_err("%s: cache flush error %d\n",
967 mmc_hostname(card->host), err);
968 }
969
970 return err;
971}
972EXPORT_SYMBOL(mmc_flush_cache);
973
974static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
975{
976 u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
977 int err;
978
979 if (!card->ext_csd.cmdq_support)
980 return -EOPNOTSUPP;
981
982 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
983 val, card->ext_csd.generic_cmd6_time);
984 if (!err)
985 card->ext_csd.cmdq_en = enable;
986
987 return err;
988}
989
990int mmc_cmdq_enable(struct mmc_card *card)
991{
992 return mmc_cmdq_switch(card, true);
993}
994EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
995
996int mmc_cmdq_disable(struct mmc_card *card)
997{
998 return mmc_cmdq_switch(card, false);
999}
1000EXPORT_SYMBOL_GPL(mmc_cmdq_disable);