Loading...
1/*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm_wakeup.h>
27#include <linux/suspend.h>
28#include <linux/fault-inject.h>
29#include <linux/random.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37#include <linux/mmc/slot-gpio.h>
38
39#include "core.h"
40#include "bus.h"
41#include "host.h"
42#include "sdio_bus.h"
43#include "pwrseq.h"
44
45#include "mmc_ops.h"
46#include "sd_ops.h"
47#include "sdio_ops.h"
48
49/* If the device is not responding */
50#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
51
52/*
53 * Background operations can take a long time, depending on the housekeeping
54 * operations the card has to perform.
55 */
56#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
57
58static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
59
60/*
61 * Enabling software CRCs on the data blocks can be a significant (30%)
62 * performance cost, and for other reasons may not always be desired.
63 * So we allow it it to be disabled.
64 */
65bool use_spi_crc = 1;
66module_param(use_spi_crc, bool, 0);
67
68static int mmc_schedule_delayed_work(struct delayed_work *work,
69 unsigned long delay)
70{
71 /*
72 * We use the system_freezable_wq, because of two reasons.
73 * First, it allows several works (not the same work item) to be
74 * executed simultaneously. Second, the queue becomes frozen when
75 * userspace becomes frozen during system PM.
76 */
77 return queue_delayed_work(system_freezable_wq, work, delay);
78}
79
80#ifdef CONFIG_FAIL_MMC_REQUEST
81
82/*
83 * Internal function. Inject random data errors.
84 * If mmc_data is NULL no errors are injected.
85 */
86static void mmc_should_fail_request(struct mmc_host *host,
87 struct mmc_request *mrq)
88{
89 struct mmc_command *cmd = mrq->cmd;
90 struct mmc_data *data = mrq->data;
91 static const int data_errors[] = {
92 -ETIMEDOUT,
93 -EILSEQ,
94 -EIO,
95 };
96
97 if (!data)
98 return;
99
100 if (cmd->error || data->error ||
101 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
102 return;
103
104 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
105 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
106}
107
108#else /* CONFIG_FAIL_MMC_REQUEST */
109
110static inline void mmc_should_fail_request(struct mmc_host *host,
111 struct mmc_request *mrq)
112{
113}
114
115#endif /* CONFIG_FAIL_MMC_REQUEST */
116
117/**
118 * mmc_request_done - finish processing an MMC request
119 * @host: MMC host which completed request
120 * @mrq: MMC request which request
121 *
122 * MMC drivers should call this function when they have completed
123 * their processing of a request.
124 */
125void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
126{
127 struct mmc_command *cmd = mrq->cmd;
128 int err = cmd->error;
129
130 /* Flag re-tuning needed on CRC errors */
131 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
132 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
133 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
134 (mrq->data && mrq->data->error == -EILSEQ) ||
135 (mrq->stop && mrq->stop->error == -EILSEQ)))
136 mmc_retune_needed(host);
137
138 if (err && cmd->retries && mmc_host_is_spi(host)) {
139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
140 cmd->retries = 0;
141 }
142
143 if (err && cmd->retries && !mmc_card_removed(host->card)) {
144 /*
145 * Request starter must handle retries - see
146 * mmc_wait_for_req_done().
147 */
148 if (mrq->done)
149 mrq->done(mrq);
150 } else {
151 mmc_should_fail_request(host, mrq);
152
153 led_trigger_event(host->led, LED_OFF);
154
155 if (mrq->sbc) {
156 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
157 mmc_hostname(host), mrq->sbc->opcode,
158 mrq->sbc->error,
159 mrq->sbc->resp[0], mrq->sbc->resp[1],
160 mrq->sbc->resp[2], mrq->sbc->resp[3]);
161 }
162
163 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
164 mmc_hostname(host), cmd->opcode, err,
165 cmd->resp[0], cmd->resp[1],
166 cmd->resp[2], cmd->resp[3]);
167
168 if (mrq->data) {
169 pr_debug("%s: %d bytes transferred: %d\n",
170 mmc_hostname(host),
171 mrq->data->bytes_xfered, mrq->data->error);
172 }
173
174 if (mrq->stop) {
175 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
176 mmc_hostname(host), mrq->stop->opcode,
177 mrq->stop->error,
178 mrq->stop->resp[0], mrq->stop->resp[1],
179 mrq->stop->resp[2], mrq->stop->resp[3]);
180 }
181
182 if (mrq->done)
183 mrq->done(mrq);
184 }
185}
186
187EXPORT_SYMBOL(mmc_request_done);
188
189static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190{
191 int err;
192
193 /* Assumes host controller has been runtime resumed by mmc_claim_host */
194 err = mmc_retune(host);
195 if (err) {
196 mrq->cmd->error = err;
197 mmc_request_done(host, mrq);
198 return;
199 }
200
201 /*
202 * For sdio rw commands we must wait for card busy otherwise some
203 * sdio devices won't work properly.
204 */
205 if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
206 int tries = 500; /* Wait aprox 500ms at maximum */
207
208 while (host->ops->card_busy(host) && --tries)
209 mmc_delay(1);
210
211 if (tries == 0) {
212 mrq->cmd->error = -EBUSY;
213 mmc_request_done(host, mrq);
214 return;
215 }
216 }
217
218 host->ops->request(host, mrq);
219}
220
221static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
222{
223#ifdef CONFIG_MMC_DEBUG
224 unsigned int i, sz;
225 struct scatterlist *sg;
226#endif
227 mmc_retune_hold(host);
228
229 if (mmc_card_removed(host->card))
230 return -ENOMEDIUM;
231
232 if (mrq->sbc) {
233 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
234 mmc_hostname(host), mrq->sbc->opcode,
235 mrq->sbc->arg, mrq->sbc->flags);
236 }
237
238 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
239 mmc_hostname(host), mrq->cmd->opcode,
240 mrq->cmd->arg, mrq->cmd->flags);
241
242 if (mrq->data) {
243 pr_debug("%s: blksz %d blocks %d flags %08x "
244 "tsac %d ms nsac %d\n",
245 mmc_hostname(host), mrq->data->blksz,
246 mrq->data->blocks, mrq->data->flags,
247 mrq->data->timeout_ns / 1000000,
248 mrq->data->timeout_clks);
249 }
250
251 if (mrq->stop) {
252 pr_debug("%s: CMD%u arg %08x flags %08x\n",
253 mmc_hostname(host), mrq->stop->opcode,
254 mrq->stop->arg, mrq->stop->flags);
255 }
256
257 WARN_ON(!host->claimed);
258
259 mrq->cmd->error = 0;
260 mrq->cmd->mrq = mrq;
261 if (mrq->sbc) {
262 mrq->sbc->error = 0;
263 mrq->sbc->mrq = mrq;
264 }
265 if (mrq->data) {
266 BUG_ON(mrq->data->blksz > host->max_blk_size);
267 BUG_ON(mrq->data->blocks > host->max_blk_count);
268 BUG_ON(mrq->data->blocks * mrq->data->blksz >
269 host->max_req_size);
270
271#ifdef CONFIG_MMC_DEBUG
272 sz = 0;
273 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
274 sz += sg->length;
275 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
276#endif
277
278 mrq->cmd->data = mrq->data;
279 mrq->data->error = 0;
280 mrq->data->mrq = mrq;
281 if (mrq->stop) {
282 mrq->data->stop = mrq->stop;
283 mrq->stop->error = 0;
284 mrq->stop->mrq = mrq;
285 }
286 }
287 led_trigger_event(host->led, LED_FULL);
288 __mmc_start_request(host, mrq);
289
290 return 0;
291}
292
293/**
294 * mmc_start_bkops - start BKOPS for supported cards
295 * @card: MMC card to start BKOPS
296 * @form_exception: A flag to indicate if this function was
297 * called due to an exception raised by the card
298 *
299 * Start background operations whenever requested.
300 * When the urgent BKOPS bit is set in a R1 command response
301 * then background operations should be started immediately.
302*/
303void mmc_start_bkops(struct mmc_card *card, bool from_exception)
304{
305 int err;
306 int timeout;
307 bool use_busy_signal;
308
309 BUG_ON(!card);
310
311 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
312 return;
313
314 err = mmc_read_bkops_status(card);
315 if (err) {
316 pr_err("%s: Failed to read bkops status: %d\n",
317 mmc_hostname(card->host), err);
318 return;
319 }
320
321 if (!card->ext_csd.raw_bkops_status)
322 return;
323
324 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
325 from_exception)
326 return;
327
328 mmc_claim_host(card->host);
329 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
330 timeout = MMC_BKOPS_MAX_TIMEOUT;
331 use_busy_signal = true;
332 } else {
333 timeout = 0;
334 use_busy_signal = false;
335 }
336
337 mmc_retune_hold(card->host);
338
339 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
340 EXT_CSD_BKOPS_START, 1, timeout,
341 use_busy_signal, true, false);
342 if (err) {
343 pr_warn("%s: Error %d starting bkops\n",
344 mmc_hostname(card->host), err);
345 mmc_retune_release(card->host);
346 goto out;
347 }
348
349 /*
350 * For urgent bkops status (LEVEL_2 and more)
351 * bkops executed synchronously, otherwise
352 * the operation is in progress
353 */
354 if (!use_busy_signal)
355 mmc_card_set_doing_bkops(card);
356 else
357 mmc_retune_release(card->host);
358out:
359 mmc_release_host(card->host);
360}
361EXPORT_SYMBOL(mmc_start_bkops);
362
363/*
364 * mmc_wait_data_done() - done callback for data request
365 * @mrq: done data request
366 *
367 * Wakes up mmc context, passed as a callback to host controller driver
368 */
369static void mmc_wait_data_done(struct mmc_request *mrq)
370{
371 struct mmc_context_info *context_info = &mrq->host->context_info;
372
373 context_info->is_done_rcv = true;
374 wake_up_interruptible(&context_info->wait);
375}
376
377static void mmc_wait_done(struct mmc_request *mrq)
378{
379 complete(&mrq->completion);
380}
381
382/*
383 *__mmc_start_data_req() - starts data request
384 * @host: MMC host to start the request
385 * @mrq: data request to start
386 *
387 * Sets the done callback to be called when request is completed by the card.
388 * Starts data mmc request execution
389 */
390static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
391{
392 int err;
393
394 mrq->done = mmc_wait_data_done;
395 mrq->host = host;
396
397 err = mmc_start_request(host, mrq);
398 if (err) {
399 mrq->cmd->error = err;
400 mmc_wait_data_done(mrq);
401 }
402
403 return err;
404}
405
406static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
407{
408 int err;
409
410 init_completion(&mrq->completion);
411 mrq->done = mmc_wait_done;
412
413 err = mmc_start_request(host, mrq);
414 if (err) {
415 mrq->cmd->error = err;
416 complete(&mrq->completion);
417 }
418
419 return err;
420}
421
422/*
423 * mmc_wait_for_data_req_done() - wait for request completed
424 * @host: MMC host to prepare the command.
425 * @mrq: MMC request to wait for
426 *
427 * Blocks MMC context till host controller will ack end of data request
428 * execution or new request notification arrives from the block layer.
429 * Handles command retries.
430 *
431 * Returns enum mmc_blk_status after checking errors.
432 */
433static int mmc_wait_for_data_req_done(struct mmc_host *host,
434 struct mmc_request *mrq,
435 struct mmc_async_req *next_req)
436{
437 struct mmc_command *cmd;
438 struct mmc_context_info *context_info = &host->context_info;
439 int err;
440 unsigned long flags;
441
442 while (1) {
443 wait_event_interruptible(context_info->wait,
444 (context_info->is_done_rcv ||
445 context_info->is_new_req));
446 spin_lock_irqsave(&context_info->lock, flags);
447 context_info->is_waiting_last_req = false;
448 spin_unlock_irqrestore(&context_info->lock, flags);
449 if (context_info->is_done_rcv) {
450 context_info->is_done_rcv = false;
451 context_info->is_new_req = false;
452 cmd = mrq->cmd;
453
454 if (!cmd->error || !cmd->retries ||
455 mmc_card_removed(host->card)) {
456 err = host->areq->err_check(host->card,
457 host->areq);
458 break; /* return err */
459 } else {
460 mmc_retune_recheck(host);
461 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
462 mmc_hostname(host),
463 cmd->opcode, cmd->error);
464 cmd->retries--;
465 cmd->error = 0;
466 __mmc_start_request(host, mrq);
467 continue; /* wait for done/new event again */
468 }
469 } else if (context_info->is_new_req) {
470 context_info->is_new_req = false;
471 if (!next_req)
472 return MMC_BLK_NEW_REQUEST;
473 }
474 }
475 mmc_retune_release(host);
476 return err;
477}
478
479static void mmc_wait_for_req_done(struct mmc_host *host,
480 struct mmc_request *mrq)
481{
482 struct mmc_command *cmd;
483
484 while (1) {
485 wait_for_completion(&mrq->completion);
486
487 cmd = mrq->cmd;
488
489 /*
490 * If host has timed out waiting for the sanitize
491 * to complete, card might be still in programming state
492 * so let's try to bring the card out of programming
493 * state.
494 */
495 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
496 if (!mmc_interrupt_hpi(host->card)) {
497 pr_warn("%s: %s: Interrupted sanitize\n",
498 mmc_hostname(host), __func__);
499 cmd->error = 0;
500 break;
501 } else {
502 pr_err("%s: %s: Failed to interrupt sanitize\n",
503 mmc_hostname(host), __func__);
504 }
505 }
506 if (!cmd->error || !cmd->retries ||
507 mmc_card_removed(host->card))
508 break;
509
510 mmc_retune_recheck(host);
511
512 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
513 mmc_hostname(host), cmd->opcode, cmd->error);
514 cmd->retries--;
515 cmd->error = 0;
516 __mmc_start_request(host, mrq);
517 }
518
519 mmc_retune_release(host);
520}
521
522/**
523 * mmc_pre_req - Prepare for a new request
524 * @host: MMC host to prepare command
525 * @mrq: MMC request to prepare for
526 * @is_first_req: true if there is no previous started request
527 * that may run in parellel to this call, otherwise false
528 *
529 * mmc_pre_req() is called in prior to mmc_start_req() to let
530 * host prepare for the new request. Preparation of a request may be
531 * performed while another request is running on the host.
532 */
533static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
534 bool is_first_req)
535{
536 if (host->ops->pre_req)
537 host->ops->pre_req(host, mrq, is_first_req);
538}
539
540/**
541 * mmc_post_req - Post process a completed request
542 * @host: MMC host to post process command
543 * @mrq: MMC request to post process for
544 * @err: Error, if non zero, clean up any resources made in pre_req
545 *
546 * Let the host post process a completed request. Post processing of
547 * a request may be performed while another reuqest is running.
548 */
549static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
550 int err)
551{
552 if (host->ops->post_req)
553 host->ops->post_req(host, mrq, err);
554}
555
556/**
557 * mmc_start_req - start a non-blocking request
558 * @host: MMC host to start command
559 * @areq: async request to start
560 * @error: out parameter returns 0 for success, otherwise non zero
561 *
562 * Start a new MMC custom command request for a host.
563 * If there is on ongoing async request wait for completion
564 * of that request and start the new one and return.
565 * Does not wait for the new request to complete.
566 *
567 * Returns the completed request, NULL in case of none completed.
568 * Wait for the an ongoing request (previoulsy started) to complete and
569 * return the completed request. If there is no ongoing request, NULL
570 * is returned without waiting. NULL is not an error condition.
571 */
572struct mmc_async_req *mmc_start_req(struct mmc_host *host,
573 struct mmc_async_req *areq, int *error)
574{
575 int err = 0;
576 int start_err = 0;
577 struct mmc_async_req *data = host->areq;
578
579 /* Prepare a new request */
580 if (areq)
581 mmc_pre_req(host, areq->mrq, !host->areq);
582
583 if (host->areq) {
584 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
585 if (err == MMC_BLK_NEW_REQUEST) {
586 if (error)
587 *error = err;
588 /*
589 * The previous request was not completed,
590 * nothing to return
591 */
592 return NULL;
593 }
594 /*
595 * Check BKOPS urgency for each R1 response
596 */
597 if (host->card && mmc_card_mmc(host->card) &&
598 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
599 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
600 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
601
602 /* Cancel the prepared request */
603 if (areq)
604 mmc_post_req(host, areq->mrq, -EINVAL);
605
606 mmc_start_bkops(host->card, true);
607
608 /* prepare the request again */
609 if (areq)
610 mmc_pre_req(host, areq->mrq, !host->areq);
611 }
612 }
613
614 if (!err && areq)
615 start_err = __mmc_start_data_req(host, areq->mrq);
616
617 if (host->areq)
618 mmc_post_req(host, host->areq->mrq, 0);
619
620 /* Cancel a prepared request if it was not started. */
621 if ((err || start_err) && areq)
622 mmc_post_req(host, areq->mrq, -EINVAL);
623
624 if (err)
625 host->areq = NULL;
626 else
627 host->areq = areq;
628
629 if (error)
630 *error = err;
631 return data;
632}
633EXPORT_SYMBOL(mmc_start_req);
634
635/**
636 * mmc_wait_for_req - start a request and wait for completion
637 * @host: MMC host to start command
638 * @mrq: MMC request to start
639 *
640 * Start a new MMC custom command request for a host, and wait
641 * for the command to complete. Does not attempt to parse the
642 * response.
643 */
644void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
645{
646 __mmc_start_req(host, mrq);
647 mmc_wait_for_req_done(host, mrq);
648}
649EXPORT_SYMBOL(mmc_wait_for_req);
650
651/**
652 * mmc_interrupt_hpi - Issue for High priority Interrupt
653 * @card: the MMC card associated with the HPI transfer
654 *
655 * Issued High Priority Interrupt, and check for card status
656 * until out-of prg-state.
657 */
658int mmc_interrupt_hpi(struct mmc_card *card)
659{
660 int err;
661 u32 status;
662 unsigned long prg_wait;
663
664 BUG_ON(!card);
665
666 if (!card->ext_csd.hpi_en) {
667 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
668 return 1;
669 }
670
671 mmc_claim_host(card->host);
672 err = mmc_send_status(card, &status);
673 if (err) {
674 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
675 goto out;
676 }
677
678 switch (R1_CURRENT_STATE(status)) {
679 case R1_STATE_IDLE:
680 case R1_STATE_READY:
681 case R1_STATE_STBY:
682 case R1_STATE_TRAN:
683 /*
684 * In idle and transfer states, HPI is not needed and the caller
685 * can issue the next intended command immediately
686 */
687 goto out;
688 case R1_STATE_PRG:
689 break;
690 default:
691 /* In all other states, it's illegal to issue HPI */
692 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
693 mmc_hostname(card->host), R1_CURRENT_STATE(status));
694 err = -EINVAL;
695 goto out;
696 }
697
698 err = mmc_send_hpi_cmd(card, &status);
699 if (err)
700 goto out;
701
702 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
703 do {
704 err = mmc_send_status(card, &status);
705
706 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
707 break;
708 if (time_after(jiffies, prg_wait))
709 err = -ETIMEDOUT;
710 } while (!err);
711
712out:
713 mmc_release_host(card->host);
714 return err;
715}
716EXPORT_SYMBOL(mmc_interrupt_hpi);
717
718/**
719 * mmc_wait_for_cmd - start a command and wait for completion
720 * @host: MMC host to start command
721 * @cmd: MMC command to start
722 * @retries: maximum number of retries
723 *
724 * Start a new MMC command for a host, and wait for the command
725 * to complete. Return any error that occurred while the command
726 * was executing. Do not attempt to parse the response.
727 */
728int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
729{
730 struct mmc_request mrq = {NULL};
731
732 WARN_ON(!host->claimed);
733
734 memset(cmd->resp, 0, sizeof(cmd->resp));
735 cmd->retries = retries;
736
737 mrq.cmd = cmd;
738 cmd->data = NULL;
739
740 mmc_wait_for_req(host, &mrq);
741
742 return cmd->error;
743}
744
745EXPORT_SYMBOL(mmc_wait_for_cmd);
746
747/**
748 * mmc_stop_bkops - stop ongoing BKOPS
749 * @card: MMC card to check BKOPS
750 *
751 * Send HPI command to stop ongoing background operations to
752 * allow rapid servicing of foreground operations, e.g. read/
753 * writes. Wait until the card comes out of the programming state
754 * to avoid errors in servicing read/write requests.
755 */
756int mmc_stop_bkops(struct mmc_card *card)
757{
758 int err = 0;
759
760 BUG_ON(!card);
761 err = mmc_interrupt_hpi(card);
762
763 /*
764 * If err is EINVAL, we can't issue an HPI.
765 * It should complete the BKOPS.
766 */
767 if (!err || (err == -EINVAL)) {
768 mmc_card_clr_doing_bkops(card);
769 mmc_retune_release(card->host);
770 err = 0;
771 }
772
773 return err;
774}
775EXPORT_SYMBOL(mmc_stop_bkops);
776
777int mmc_read_bkops_status(struct mmc_card *card)
778{
779 int err;
780 u8 *ext_csd;
781
782 mmc_claim_host(card->host);
783 err = mmc_get_ext_csd(card, &ext_csd);
784 mmc_release_host(card->host);
785 if (err)
786 return err;
787
788 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
789 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
790 kfree(ext_csd);
791 return 0;
792}
793EXPORT_SYMBOL(mmc_read_bkops_status);
794
795/**
796 * mmc_set_data_timeout - set the timeout for a data command
797 * @data: data phase for command
798 * @card: the MMC card associated with the data transfer
799 *
800 * Computes the data timeout parameters according to the
801 * correct algorithm given the card type.
802 */
803void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
804{
805 unsigned int mult;
806
807 /*
808 * SDIO cards only define an upper 1 s limit on access.
809 */
810 if (mmc_card_sdio(card)) {
811 data->timeout_ns = 1000000000;
812 data->timeout_clks = 0;
813 return;
814 }
815
816 /*
817 * SD cards use a 100 multiplier rather than 10
818 */
819 mult = mmc_card_sd(card) ? 100 : 10;
820
821 /*
822 * Scale up the multiplier (and therefore the timeout) by
823 * the r2w factor for writes.
824 */
825 if (data->flags & MMC_DATA_WRITE)
826 mult <<= card->csd.r2w_factor;
827
828 data->timeout_ns = card->csd.tacc_ns * mult;
829 data->timeout_clks = card->csd.tacc_clks * mult;
830
831 /*
832 * SD cards also have an upper limit on the timeout.
833 */
834 if (mmc_card_sd(card)) {
835 unsigned int timeout_us, limit_us;
836
837 timeout_us = data->timeout_ns / 1000;
838 if (card->host->ios.clock)
839 timeout_us += data->timeout_clks * 1000 /
840 (card->host->ios.clock / 1000);
841
842 if (data->flags & MMC_DATA_WRITE)
843 /*
844 * The MMC spec "It is strongly recommended
845 * for hosts to implement more than 500ms
846 * timeout value even if the card indicates
847 * the 250ms maximum busy length." Even the
848 * previous value of 300ms is known to be
849 * insufficient for some cards.
850 */
851 limit_us = 3000000;
852 else
853 limit_us = 100000;
854
855 /*
856 * SDHC cards always use these fixed values.
857 */
858 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
859 data->timeout_ns = limit_us * 1000;
860 data->timeout_clks = 0;
861 }
862
863 /* assign limit value if invalid */
864 if (timeout_us == 0)
865 data->timeout_ns = limit_us * 1000;
866 }
867
868 /*
869 * Some cards require longer data read timeout than indicated in CSD.
870 * Address this by setting the read timeout to a "reasonably high"
871 * value. For the cards tested, 300ms has proven enough. If necessary,
872 * this value can be increased if other problematic cards require this.
873 */
874 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
875 data->timeout_ns = 300000000;
876 data->timeout_clks = 0;
877 }
878
879 /*
880 * Some cards need very high timeouts if driven in SPI mode.
881 * The worst observed timeout was 900ms after writing a
882 * continuous stream of data until the internal logic
883 * overflowed.
884 */
885 if (mmc_host_is_spi(card->host)) {
886 if (data->flags & MMC_DATA_WRITE) {
887 if (data->timeout_ns < 1000000000)
888 data->timeout_ns = 1000000000; /* 1s */
889 } else {
890 if (data->timeout_ns < 100000000)
891 data->timeout_ns = 100000000; /* 100ms */
892 }
893 }
894}
895EXPORT_SYMBOL(mmc_set_data_timeout);
896
897/**
898 * mmc_align_data_size - pads a transfer size to a more optimal value
899 * @card: the MMC card associated with the data transfer
900 * @sz: original transfer size
901 *
902 * Pads the original data size with a number of extra bytes in
903 * order to avoid controller bugs and/or performance hits
904 * (e.g. some controllers revert to PIO for certain sizes).
905 *
906 * Returns the improved size, which might be unmodified.
907 *
908 * Note that this function is only relevant when issuing a
909 * single scatter gather entry.
910 */
911unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
912{
913 /*
914 * FIXME: We don't have a system for the controller to tell
915 * the core about its problems yet, so for now we just 32-bit
916 * align the size.
917 */
918 sz = ((sz + 3) / 4) * 4;
919
920 return sz;
921}
922EXPORT_SYMBOL(mmc_align_data_size);
923
924/**
925 * __mmc_claim_host - exclusively claim a host
926 * @host: mmc host to claim
927 * @abort: whether or not the operation should be aborted
928 *
929 * Claim a host for a set of operations. If @abort is non null and
930 * dereference a non-zero value then this will return prematurely with
931 * that non-zero value without acquiring the lock. Returns zero
932 * with the lock held otherwise.
933 */
934int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
935{
936 DECLARE_WAITQUEUE(wait, current);
937 unsigned long flags;
938 int stop;
939 bool pm = false;
940
941 might_sleep();
942
943 add_wait_queue(&host->wq, &wait);
944 spin_lock_irqsave(&host->lock, flags);
945 while (1) {
946 set_current_state(TASK_UNINTERRUPTIBLE);
947 stop = abort ? atomic_read(abort) : 0;
948 if (stop || !host->claimed || host->claimer == current)
949 break;
950 spin_unlock_irqrestore(&host->lock, flags);
951 schedule();
952 spin_lock_irqsave(&host->lock, flags);
953 }
954 set_current_state(TASK_RUNNING);
955 if (!stop) {
956 host->claimed = 1;
957 host->claimer = current;
958 host->claim_cnt += 1;
959 if (host->claim_cnt == 1)
960 pm = true;
961 } else
962 wake_up(&host->wq);
963 spin_unlock_irqrestore(&host->lock, flags);
964 remove_wait_queue(&host->wq, &wait);
965
966 if (pm)
967 pm_runtime_get_sync(mmc_dev(host));
968
969 return stop;
970}
971EXPORT_SYMBOL(__mmc_claim_host);
972
973/**
974 * mmc_release_host - release a host
975 * @host: mmc host to release
976 *
977 * Release a MMC host, allowing others to claim the host
978 * for their operations.
979 */
980void mmc_release_host(struct mmc_host *host)
981{
982 unsigned long flags;
983
984 WARN_ON(!host->claimed);
985
986 spin_lock_irqsave(&host->lock, flags);
987 if (--host->claim_cnt) {
988 /* Release for nested claim */
989 spin_unlock_irqrestore(&host->lock, flags);
990 } else {
991 host->claimed = 0;
992 host->claimer = NULL;
993 spin_unlock_irqrestore(&host->lock, flags);
994 wake_up(&host->wq);
995 pm_runtime_mark_last_busy(mmc_dev(host));
996 pm_runtime_put_autosuspend(mmc_dev(host));
997 }
998}
999EXPORT_SYMBOL(mmc_release_host);
1000
1001/*
1002 * This is a helper function, which fetches a runtime pm reference for the
1003 * card device and also claims the host.
1004 */
1005void mmc_get_card(struct mmc_card *card)
1006{
1007 pm_runtime_get_sync(&card->dev);
1008 mmc_claim_host(card->host);
1009}
1010EXPORT_SYMBOL(mmc_get_card);
1011
1012/*
1013 * This is a helper function, which releases the host and drops the runtime
1014 * pm reference for the card device.
1015 */
1016void mmc_put_card(struct mmc_card *card)
1017{
1018 mmc_release_host(card->host);
1019 pm_runtime_mark_last_busy(&card->dev);
1020 pm_runtime_put_autosuspend(&card->dev);
1021}
1022EXPORT_SYMBOL(mmc_put_card);
1023
1024/*
1025 * Internal function that does the actual ios call to the host driver,
1026 * optionally printing some debug output.
1027 */
1028static inline void mmc_set_ios(struct mmc_host *host)
1029{
1030 struct mmc_ios *ios = &host->ios;
1031
1032 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1033 "width %u timing %u\n",
1034 mmc_hostname(host), ios->clock, ios->bus_mode,
1035 ios->power_mode, ios->chip_select, ios->vdd,
1036 1 << ios->bus_width, ios->timing);
1037
1038 host->ops->set_ios(host, ios);
1039}
1040
1041/*
1042 * Control chip select pin on a host.
1043 */
1044void mmc_set_chip_select(struct mmc_host *host, int mode)
1045{
1046 host->ios.chip_select = mode;
1047 mmc_set_ios(host);
1048}
1049
1050/*
1051 * Sets the host clock to the highest possible frequency that
1052 * is below "hz".
1053 */
1054void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1055{
1056 WARN_ON(hz && hz < host->f_min);
1057
1058 if (hz > host->f_max)
1059 hz = host->f_max;
1060
1061 host->ios.clock = hz;
1062 mmc_set_ios(host);
1063}
1064
1065int mmc_execute_tuning(struct mmc_card *card)
1066{
1067 struct mmc_host *host = card->host;
1068 u32 opcode;
1069 int err;
1070
1071 if (!host->ops->execute_tuning)
1072 return 0;
1073
1074 if (mmc_card_mmc(card))
1075 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1076 else
1077 opcode = MMC_SEND_TUNING_BLOCK;
1078
1079 err = host->ops->execute_tuning(host, opcode);
1080
1081 if (err)
1082 pr_err("%s: tuning execution failed: %d\n",
1083 mmc_hostname(host), err);
1084 else
1085 mmc_retune_enable(host);
1086
1087 return err;
1088}
1089
1090/*
1091 * Change the bus mode (open drain/push-pull) of a host.
1092 */
1093void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1094{
1095 host->ios.bus_mode = mode;
1096 mmc_set_ios(host);
1097}
1098
1099/*
1100 * Change data bus width of a host.
1101 */
1102void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1103{
1104 host->ios.bus_width = width;
1105 mmc_set_ios(host);
1106}
1107
1108/*
1109 * Set initial state after a power cycle or a hw_reset.
1110 */
1111void mmc_set_initial_state(struct mmc_host *host)
1112{
1113 mmc_retune_disable(host);
1114
1115 if (mmc_host_is_spi(host))
1116 host->ios.chip_select = MMC_CS_HIGH;
1117 else
1118 host->ios.chip_select = MMC_CS_DONTCARE;
1119 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1120 host->ios.bus_width = MMC_BUS_WIDTH_1;
1121 host->ios.timing = MMC_TIMING_LEGACY;
1122 host->ios.drv_type = 0;
1123
1124 mmc_set_ios(host);
1125}
1126
1127/**
1128 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1129 * @vdd: voltage (mV)
1130 * @low_bits: prefer low bits in boundary cases
1131 *
1132 * This function returns the OCR bit number according to the provided @vdd
1133 * value. If conversion is not possible a negative errno value returned.
1134 *
1135 * Depending on the @low_bits flag the function prefers low or high OCR bits
1136 * on boundary voltages. For example,
1137 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1138 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1139 *
1140 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1141 */
1142static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1143{
1144 const int max_bit = ilog2(MMC_VDD_35_36);
1145 int bit;
1146
1147 if (vdd < 1650 || vdd > 3600)
1148 return -EINVAL;
1149
1150 if (vdd >= 1650 && vdd <= 1950)
1151 return ilog2(MMC_VDD_165_195);
1152
1153 if (low_bits)
1154 vdd -= 1;
1155
1156 /* Base 2000 mV, step 100 mV, bit's base 8. */
1157 bit = (vdd - 2000) / 100 + 8;
1158 if (bit > max_bit)
1159 return max_bit;
1160 return bit;
1161}
1162
1163/**
1164 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1165 * @vdd_min: minimum voltage value (mV)
1166 * @vdd_max: maximum voltage value (mV)
1167 *
1168 * This function returns the OCR mask bits according to the provided @vdd_min
1169 * and @vdd_max values. If conversion is not possible the function returns 0.
1170 *
1171 * Notes wrt boundary cases:
1172 * This function sets the OCR bits for all boundary voltages, for example
1173 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1174 * MMC_VDD_34_35 mask.
1175 */
1176u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1177{
1178 u32 mask = 0;
1179
1180 if (vdd_max < vdd_min)
1181 return 0;
1182
1183 /* Prefer high bits for the boundary vdd_max values. */
1184 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1185 if (vdd_max < 0)
1186 return 0;
1187
1188 /* Prefer low bits for the boundary vdd_min values. */
1189 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1190 if (vdd_min < 0)
1191 return 0;
1192
1193 /* Fill the mask, from max bit to min bit. */
1194 while (vdd_max >= vdd_min)
1195 mask |= 1 << vdd_max--;
1196
1197 return mask;
1198}
1199EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1200
1201#ifdef CONFIG_OF
1202
1203/**
1204 * mmc_of_parse_voltage - return mask of supported voltages
1205 * @np: The device node need to be parsed.
1206 * @mask: mask of voltages available for MMC/SD/SDIO
1207 *
1208 * Parse the "voltage-ranges" DT property, returning zero if it is not
1209 * found, negative errno if the voltage-range specification is invalid,
1210 * or one if the voltage-range is specified and successfully parsed.
1211 */
1212int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1213{
1214 const u32 *voltage_ranges;
1215 int num_ranges, i;
1216
1217 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1218 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1219 if (!voltage_ranges) {
1220 pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
1221 return 0;
1222 }
1223 if (!num_ranges) {
1224 pr_err("%s: voltage-ranges empty\n", np->full_name);
1225 return -EINVAL;
1226 }
1227
1228 for (i = 0; i < num_ranges; i++) {
1229 const int j = i * 2;
1230 u32 ocr_mask;
1231
1232 ocr_mask = mmc_vddrange_to_ocrmask(
1233 be32_to_cpu(voltage_ranges[j]),
1234 be32_to_cpu(voltage_ranges[j + 1]));
1235 if (!ocr_mask) {
1236 pr_err("%s: voltage-range #%d is invalid\n",
1237 np->full_name, i);
1238 return -EINVAL;
1239 }
1240 *mask |= ocr_mask;
1241 }
1242
1243 return 1;
1244}
1245EXPORT_SYMBOL(mmc_of_parse_voltage);
1246
1247#endif /* CONFIG_OF */
1248
1249static int mmc_of_get_func_num(struct device_node *node)
1250{
1251 u32 reg;
1252 int ret;
1253
1254 ret = of_property_read_u32(node, "reg", ®);
1255 if (ret < 0)
1256 return ret;
1257
1258 return reg;
1259}
1260
1261struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1262 unsigned func_num)
1263{
1264 struct device_node *node;
1265
1266 if (!host->parent || !host->parent->of_node)
1267 return NULL;
1268
1269 for_each_child_of_node(host->parent->of_node, node) {
1270 if (mmc_of_get_func_num(node) == func_num)
1271 return node;
1272 }
1273
1274 return NULL;
1275}
1276
1277#ifdef CONFIG_REGULATOR
1278
1279/**
1280 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1281 * @vdd_bit: OCR bit number
1282 * @min_uV: minimum voltage value (mV)
1283 * @max_uV: maximum voltage value (mV)
1284 *
1285 * This function returns the voltage range according to the provided OCR
1286 * bit number. If conversion is not possible a negative errno value returned.
1287 */
1288static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1289{
1290 int tmp;
1291
1292 if (!vdd_bit)
1293 return -EINVAL;
1294
1295 /*
1296 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1297 * bits this regulator doesn't quite support ... don't
1298 * be too picky, most cards and regulators are OK with
1299 * a 0.1V range goof (it's a small error percentage).
1300 */
1301 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1302 if (tmp == 0) {
1303 *min_uV = 1650 * 1000;
1304 *max_uV = 1950 * 1000;
1305 } else {
1306 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1307 *max_uV = *min_uV + 100 * 1000;
1308 }
1309
1310 return 0;
1311}
1312
1313/**
1314 * mmc_regulator_get_ocrmask - return mask of supported voltages
1315 * @supply: regulator to use
1316 *
1317 * This returns either a negative errno, or a mask of voltages that
1318 * can be provided to MMC/SD/SDIO devices using the specified voltage
1319 * regulator. This would normally be called before registering the
1320 * MMC host adapter.
1321 */
1322int mmc_regulator_get_ocrmask(struct regulator *supply)
1323{
1324 int result = 0;
1325 int count;
1326 int i;
1327 int vdd_uV;
1328 int vdd_mV;
1329
1330 count = regulator_count_voltages(supply);
1331 if (count < 0)
1332 return count;
1333
1334 for (i = 0; i < count; i++) {
1335 vdd_uV = regulator_list_voltage(supply, i);
1336 if (vdd_uV <= 0)
1337 continue;
1338
1339 vdd_mV = vdd_uV / 1000;
1340 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1341 }
1342
1343 if (!result) {
1344 vdd_uV = regulator_get_voltage(supply);
1345 if (vdd_uV <= 0)
1346 return vdd_uV;
1347
1348 vdd_mV = vdd_uV / 1000;
1349 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1350 }
1351
1352 return result;
1353}
1354EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1355
1356/**
1357 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1358 * @mmc: the host to regulate
1359 * @supply: regulator to use
1360 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1361 *
1362 * Returns zero on success, else negative errno.
1363 *
1364 * MMC host drivers may use this to enable or disable a regulator using
1365 * a particular supply voltage. This would normally be called from the
1366 * set_ios() method.
1367 */
1368int mmc_regulator_set_ocr(struct mmc_host *mmc,
1369 struct regulator *supply,
1370 unsigned short vdd_bit)
1371{
1372 int result = 0;
1373 int min_uV, max_uV;
1374
1375 if (vdd_bit) {
1376 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1377
1378 result = regulator_set_voltage(supply, min_uV, max_uV);
1379 if (result == 0 && !mmc->regulator_enabled) {
1380 result = regulator_enable(supply);
1381 if (!result)
1382 mmc->regulator_enabled = true;
1383 }
1384 } else if (mmc->regulator_enabled) {
1385 result = regulator_disable(supply);
1386 if (result == 0)
1387 mmc->regulator_enabled = false;
1388 }
1389
1390 if (result)
1391 dev_err(mmc_dev(mmc),
1392 "could not set regulator OCR (%d)\n", result);
1393 return result;
1394}
1395EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1396
1397static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1398 int min_uV, int target_uV,
1399 int max_uV)
1400{
1401 /*
1402 * Check if supported first to avoid errors since we may try several
1403 * signal levels during power up and don't want to show errors.
1404 */
1405 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1406 return -EINVAL;
1407
1408 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1409 max_uV);
1410}
1411
1412/**
1413 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1414 *
1415 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1416 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1417 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1418 * SD card spec also define VQMMC in terms of VMMC.
1419 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1420 *
1421 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1422 * requested voltage. This is definitely a good idea for UHS where there's a
1423 * separate regulator on the card that's trying to make 1.8V and it's best if
1424 * we match.
1425 *
1426 * This function is expected to be used by a controller's
1427 * start_signal_voltage_switch() function.
1428 */
1429int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1430{
1431 struct device *dev = mmc_dev(mmc);
1432 int ret, volt, min_uV, max_uV;
1433
1434 /* If no vqmmc supply then we can't change the voltage */
1435 if (IS_ERR(mmc->supply.vqmmc))
1436 return -EINVAL;
1437
1438 switch (ios->signal_voltage) {
1439 case MMC_SIGNAL_VOLTAGE_120:
1440 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1441 1100000, 1200000, 1300000);
1442 case MMC_SIGNAL_VOLTAGE_180:
1443 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1444 1700000, 1800000, 1950000);
1445 case MMC_SIGNAL_VOLTAGE_330:
1446 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1447 if (ret < 0)
1448 return ret;
1449
1450 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1451 __func__, volt, max_uV);
1452
1453 min_uV = max(volt - 300000, 2700000);
1454 max_uV = min(max_uV + 200000, 3600000);
1455
1456 /*
1457 * Due to a limitation in the current implementation of
1458 * regulator_set_voltage_triplet() which is taking the lowest
1459 * voltage possible if below the target, search for a suitable
1460 * voltage in two steps and try to stay close to vmmc
1461 * with a 0.3V tolerance at first.
1462 */
1463 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1464 min_uV, volt, max_uV))
1465 return 0;
1466
1467 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1468 2700000, volt, 3600000);
1469 default:
1470 return -EINVAL;
1471 }
1472}
1473EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1474
1475#endif /* CONFIG_REGULATOR */
1476
1477int mmc_regulator_get_supply(struct mmc_host *mmc)
1478{
1479 struct device *dev = mmc_dev(mmc);
1480 int ret;
1481
1482 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1483 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1484
1485 if (IS_ERR(mmc->supply.vmmc)) {
1486 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1487 return -EPROBE_DEFER;
1488 dev_dbg(dev, "No vmmc regulator found\n");
1489 } else {
1490 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1491 if (ret > 0)
1492 mmc->ocr_avail = ret;
1493 else
1494 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1495 }
1496
1497 if (IS_ERR(mmc->supply.vqmmc)) {
1498 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1499 return -EPROBE_DEFER;
1500 dev_dbg(dev, "No vqmmc regulator found\n");
1501 }
1502
1503 return 0;
1504}
1505EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1506
1507/*
1508 * Mask off any voltages we don't support and select
1509 * the lowest voltage
1510 */
1511u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1512{
1513 int bit;
1514
1515 /*
1516 * Sanity check the voltages that the card claims to
1517 * support.
1518 */
1519 if (ocr & 0x7F) {
1520 dev_warn(mmc_dev(host),
1521 "card claims to support voltages below defined range\n");
1522 ocr &= ~0x7F;
1523 }
1524
1525 ocr &= host->ocr_avail;
1526 if (!ocr) {
1527 dev_warn(mmc_dev(host), "no support for card's volts\n");
1528 return 0;
1529 }
1530
1531 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1532 bit = ffs(ocr) - 1;
1533 ocr &= 3 << bit;
1534 mmc_power_cycle(host, ocr);
1535 } else {
1536 bit = fls(ocr) - 1;
1537 ocr &= 3 << bit;
1538 if (bit != host->ios.vdd)
1539 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1540 }
1541
1542 return ocr;
1543}
1544
1545int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1546{
1547 int err = 0;
1548 int old_signal_voltage = host->ios.signal_voltage;
1549
1550 host->ios.signal_voltage = signal_voltage;
1551 if (host->ops->start_signal_voltage_switch)
1552 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1553
1554 if (err)
1555 host->ios.signal_voltage = old_signal_voltage;
1556
1557 return err;
1558
1559}
1560
1561int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1562{
1563 struct mmc_command cmd = {0};
1564 int err = 0;
1565 u32 clock;
1566
1567 BUG_ON(!host);
1568
1569 /*
1570 * Send CMD11 only if the request is to switch the card to
1571 * 1.8V signalling.
1572 */
1573 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1574 return __mmc_set_signal_voltage(host, signal_voltage);
1575
1576 /*
1577 * If we cannot switch voltages, return failure so the caller
1578 * can continue without UHS mode
1579 */
1580 if (!host->ops->start_signal_voltage_switch)
1581 return -EPERM;
1582 if (!host->ops->card_busy)
1583 pr_warn("%s: cannot verify signal voltage switch\n",
1584 mmc_hostname(host));
1585
1586 cmd.opcode = SD_SWITCH_VOLTAGE;
1587 cmd.arg = 0;
1588 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1589
1590 err = mmc_wait_for_cmd(host, &cmd, 0);
1591 if (err)
1592 return err;
1593
1594 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1595 return -EIO;
1596
1597 /*
1598 * The card should drive cmd and dat[0:3] low immediately
1599 * after the response of cmd11, but wait 1 ms to be sure
1600 */
1601 mmc_delay(1);
1602 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1603 err = -EAGAIN;
1604 goto power_cycle;
1605 }
1606 /*
1607 * During a signal voltage level switch, the clock must be gated
1608 * for 5 ms according to the SD spec
1609 */
1610 clock = host->ios.clock;
1611 host->ios.clock = 0;
1612 mmc_set_ios(host);
1613
1614 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1615 /*
1616 * Voltages may not have been switched, but we've already
1617 * sent CMD11, so a power cycle is required anyway
1618 */
1619 err = -EAGAIN;
1620 goto power_cycle;
1621 }
1622
1623 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1624 mmc_delay(10);
1625 host->ios.clock = clock;
1626 mmc_set_ios(host);
1627
1628 /* Wait for at least 1 ms according to spec */
1629 mmc_delay(1);
1630
1631 /*
1632 * Failure to switch is indicated by the card holding
1633 * dat[0:3] low
1634 */
1635 if (host->ops->card_busy && host->ops->card_busy(host))
1636 err = -EAGAIN;
1637
1638power_cycle:
1639 if (err) {
1640 pr_debug("%s: Signal voltage switch failed, "
1641 "power cycling card\n", mmc_hostname(host));
1642 mmc_power_cycle(host, ocr);
1643 }
1644
1645 return err;
1646}
1647
1648/*
1649 * Select timing parameters for host.
1650 */
1651void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1652{
1653 host->ios.timing = timing;
1654 mmc_set_ios(host);
1655}
1656
1657/*
1658 * Select appropriate driver type for host.
1659 */
1660void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1661{
1662 host->ios.drv_type = drv_type;
1663 mmc_set_ios(host);
1664}
1665
1666int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1667 int card_drv_type, int *drv_type)
1668{
1669 struct mmc_host *host = card->host;
1670 int host_drv_type = SD_DRIVER_TYPE_B;
1671
1672 *drv_type = 0;
1673
1674 if (!host->ops->select_drive_strength)
1675 return 0;
1676
1677 /* Use SD definition of driver strength for hosts */
1678 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1679 host_drv_type |= SD_DRIVER_TYPE_A;
1680
1681 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1682 host_drv_type |= SD_DRIVER_TYPE_C;
1683
1684 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1685 host_drv_type |= SD_DRIVER_TYPE_D;
1686
1687 /*
1688 * The drive strength that the hardware can support
1689 * depends on the board design. Pass the appropriate
1690 * information and let the hardware specific code
1691 * return what is possible given the options
1692 */
1693 return host->ops->select_drive_strength(card, max_dtr,
1694 host_drv_type,
1695 card_drv_type,
1696 drv_type);
1697}
1698
1699/*
1700 * Apply power to the MMC stack. This is a two-stage process.
1701 * First, we enable power to the card without the clock running.
1702 * We then wait a bit for the power to stabilise. Finally,
1703 * enable the bus drivers and clock to the card.
1704 *
1705 * We must _NOT_ enable the clock prior to power stablising.
1706 *
1707 * If a host does all the power sequencing itself, ignore the
1708 * initial MMC_POWER_UP stage.
1709 */
1710void mmc_power_up(struct mmc_host *host, u32 ocr)
1711{
1712 if (host->ios.power_mode == MMC_POWER_ON)
1713 return;
1714
1715 mmc_pwrseq_pre_power_on(host);
1716
1717 host->ios.vdd = fls(ocr) - 1;
1718 host->ios.power_mode = MMC_POWER_UP;
1719 /* Set initial state and call mmc_set_ios */
1720 mmc_set_initial_state(host);
1721
1722 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1723 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1724 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1725 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1726 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1727 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1728 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1729
1730 /*
1731 * This delay should be sufficient to allow the power supply
1732 * to reach the minimum voltage.
1733 */
1734 mmc_delay(10);
1735
1736 mmc_pwrseq_post_power_on(host);
1737
1738 host->ios.clock = host->f_init;
1739
1740 host->ios.power_mode = MMC_POWER_ON;
1741 mmc_set_ios(host);
1742
1743 /*
1744 * This delay must be at least 74 clock sizes, or 1 ms, or the
1745 * time required to reach a stable voltage.
1746 */
1747 mmc_delay(10);
1748}
1749
1750void mmc_power_off(struct mmc_host *host)
1751{
1752 if (host->ios.power_mode == MMC_POWER_OFF)
1753 return;
1754
1755 mmc_pwrseq_power_off(host);
1756
1757 host->ios.clock = 0;
1758 host->ios.vdd = 0;
1759
1760 host->ios.power_mode = MMC_POWER_OFF;
1761 /* Set initial state and call mmc_set_ios */
1762 mmc_set_initial_state(host);
1763
1764 /*
1765 * Some configurations, such as the 802.11 SDIO card in the OLPC
1766 * XO-1.5, require a short delay after poweroff before the card
1767 * can be successfully turned on again.
1768 */
1769 mmc_delay(1);
1770}
1771
1772void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1773{
1774 mmc_power_off(host);
1775 /* Wait at least 1 ms according to SD spec */
1776 mmc_delay(1);
1777 mmc_power_up(host, ocr);
1778}
1779
1780/*
1781 * Cleanup when the last reference to the bus operator is dropped.
1782 */
1783static void __mmc_release_bus(struct mmc_host *host)
1784{
1785 BUG_ON(!host);
1786 BUG_ON(host->bus_refs);
1787 BUG_ON(!host->bus_dead);
1788
1789 host->bus_ops = NULL;
1790}
1791
1792/*
1793 * Increase reference count of bus operator
1794 */
1795static inline void mmc_bus_get(struct mmc_host *host)
1796{
1797 unsigned long flags;
1798
1799 spin_lock_irqsave(&host->lock, flags);
1800 host->bus_refs++;
1801 spin_unlock_irqrestore(&host->lock, flags);
1802}
1803
1804/*
1805 * Decrease reference count of bus operator and free it if
1806 * it is the last reference.
1807 */
1808static inline void mmc_bus_put(struct mmc_host *host)
1809{
1810 unsigned long flags;
1811
1812 spin_lock_irqsave(&host->lock, flags);
1813 host->bus_refs--;
1814 if ((host->bus_refs == 0) && host->bus_ops)
1815 __mmc_release_bus(host);
1816 spin_unlock_irqrestore(&host->lock, flags);
1817}
1818
1819/*
1820 * Assign a mmc bus handler to a host. Only one bus handler may control a
1821 * host at any given time.
1822 */
1823void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1824{
1825 unsigned long flags;
1826
1827 BUG_ON(!host);
1828 BUG_ON(!ops);
1829
1830 WARN_ON(!host->claimed);
1831
1832 spin_lock_irqsave(&host->lock, flags);
1833
1834 BUG_ON(host->bus_ops);
1835 BUG_ON(host->bus_refs);
1836
1837 host->bus_ops = ops;
1838 host->bus_refs = 1;
1839 host->bus_dead = 0;
1840
1841 spin_unlock_irqrestore(&host->lock, flags);
1842}
1843
1844/*
1845 * Remove the current bus handler from a host.
1846 */
1847void mmc_detach_bus(struct mmc_host *host)
1848{
1849 unsigned long flags;
1850
1851 BUG_ON(!host);
1852
1853 WARN_ON(!host->claimed);
1854 WARN_ON(!host->bus_ops);
1855
1856 spin_lock_irqsave(&host->lock, flags);
1857
1858 host->bus_dead = 1;
1859
1860 spin_unlock_irqrestore(&host->lock, flags);
1861
1862 mmc_bus_put(host);
1863}
1864
1865static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1866 bool cd_irq)
1867{
1868#ifdef CONFIG_MMC_DEBUG
1869 unsigned long flags;
1870 spin_lock_irqsave(&host->lock, flags);
1871 WARN_ON(host->removed);
1872 spin_unlock_irqrestore(&host->lock, flags);
1873#endif
1874
1875 /*
1876 * If the device is configured as wakeup, we prevent a new sleep for
1877 * 5 s to give provision for user space to consume the event.
1878 */
1879 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1880 device_can_wakeup(mmc_dev(host)))
1881 pm_wakeup_event(mmc_dev(host), 5000);
1882
1883 host->detect_change = 1;
1884 mmc_schedule_delayed_work(&host->detect, delay);
1885}
1886
1887/**
1888 * mmc_detect_change - process change of state on a MMC socket
1889 * @host: host which changed state.
1890 * @delay: optional delay to wait before detection (jiffies)
1891 *
1892 * MMC drivers should call this when they detect a card has been
1893 * inserted or removed. The MMC layer will confirm that any
1894 * present card is still functional, and initialize any newly
1895 * inserted.
1896 */
1897void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1898{
1899 _mmc_detect_change(host, delay, true);
1900}
1901EXPORT_SYMBOL(mmc_detect_change);
1902
1903void mmc_init_erase(struct mmc_card *card)
1904{
1905 unsigned int sz;
1906
1907 if (is_power_of_2(card->erase_size))
1908 card->erase_shift = ffs(card->erase_size) - 1;
1909 else
1910 card->erase_shift = 0;
1911
1912 /*
1913 * It is possible to erase an arbitrarily large area of an SD or MMC
1914 * card. That is not desirable because it can take a long time
1915 * (minutes) potentially delaying more important I/O, and also the
1916 * timeout calculations become increasingly hugely over-estimated.
1917 * Consequently, 'pref_erase' is defined as a guide to limit erases
1918 * to that size and alignment.
1919 *
1920 * For SD cards that define Allocation Unit size, limit erases to one
1921 * Allocation Unit at a time. For MMC cards that define High Capacity
1922 * Erase Size, whether it is switched on or not, limit to that size.
1923 * Otherwise just have a stab at a good value. For modern cards it
1924 * will end up being 4MiB. Note that if the value is too small, it
1925 * can end up taking longer to erase.
1926 */
1927 if (mmc_card_sd(card) && card->ssr.au) {
1928 card->pref_erase = card->ssr.au;
1929 card->erase_shift = ffs(card->ssr.au) - 1;
1930 } else if (card->ext_csd.hc_erase_size) {
1931 card->pref_erase = card->ext_csd.hc_erase_size;
1932 } else if (card->erase_size) {
1933 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1934 if (sz < 128)
1935 card->pref_erase = 512 * 1024 / 512;
1936 else if (sz < 512)
1937 card->pref_erase = 1024 * 1024 / 512;
1938 else if (sz < 1024)
1939 card->pref_erase = 2 * 1024 * 1024 / 512;
1940 else
1941 card->pref_erase = 4 * 1024 * 1024 / 512;
1942 if (card->pref_erase < card->erase_size)
1943 card->pref_erase = card->erase_size;
1944 else {
1945 sz = card->pref_erase % card->erase_size;
1946 if (sz)
1947 card->pref_erase += card->erase_size - sz;
1948 }
1949 } else
1950 card->pref_erase = 0;
1951}
1952
1953static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1954 unsigned int arg, unsigned int qty)
1955{
1956 unsigned int erase_timeout;
1957
1958 if (arg == MMC_DISCARD_ARG ||
1959 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1960 erase_timeout = card->ext_csd.trim_timeout;
1961 } else if (card->ext_csd.erase_group_def & 1) {
1962 /* High Capacity Erase Group Size uses HC timeouts */
1963 if (arg == MMC_TRIM_ARG)
1964 erase_timeout = card->ext_csd.trim_timeout;
1965 else
1966 erase_timeout = card->ext_csd.hc_erase_timeout;
1967 } else {
1968 /* CSD Erase Group Size uses write timeout */
1969 unsigned int mult = (10 << card->csd.r2w_factor);
1970 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1971 unsigned int timeout_us;
1972
1973 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1974 if (card->csd.tacc_ns < 1000000)
1975 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1976 else
1977 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1978
1979 /*
1980 * ios.clock is only a target. The real clock rate might be
1981 * less but not that much less, so fudge it by multiplying by 2.
1982 */
1983 timeout_clks <<= 1;
1984 timeout_us += (timeout_clks * 1000) /
1985 (card->host->ios.clock / 1000);
1986
1987 erase_timeout = timeout_us / 1000;
1988
1989 /*
1990 * Theoretically, the calculation could underflow so round up
1991 * to 1ms in that case.
1992 */
1993 if (!erase_timeout)
1994 erase_timeout = 1;
1995 }
1996
1997 /* Multiplier for secure operations */
1998 if (arg & MMC_SECURE_ARGS) {
1999 if (arg == MMC_SECURE_ERASE_ARG)
2000 erase_timeout *= card->ext_csd.sec_erase_mult;
2001 else
2002 erase_timeout *= card->ext_csd.sec_trim_mult;
2003 }
2004
2005 erase_timeout *= qty;
2006
2007 /*
2008 * Ensure at least a 1 second timeout for SPI as per
2009 * 'mmc_set_data_timeout()'
2010 */
2011 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
2012 erase_timeout = 1000;
2013
2014 return erase_timeout;
2015}
2016
2017static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
2018 unsigned int arg,
2019 unsigned int qty)
2020{
2021 unsigned int erase_timeout;
2022
2023 if (card->ssr.erase_timeout) {
2024 /* Erase timeout specified in SD Status Register (SSR) */
2025 erase_timeout = card->ssr.erase_timeout * qty +
2026 card->ssr.erase_offset;
2027 } else {
2028 /*
2029 * Erase timeout not specified in SD Status Register (SSR) so
2030 * use 250ms per write block.
2031 */
2032 erase_timeout = 250 * qty;
2033 }
2034
2035 /* Must not be less than 1 second */
2036 if (erase_timeout < 1000)
2037 erase_timeout = 1000;
2038
2039 return erase_timeout;
2040}
2041
2042static unsigned int mmc_erase_timeout(struct mmc_card *card,
2043 unsigned int arg,
2044 unsigned int qty)
2045{
2046 if (mmc_card_sd(card))
2047 return mmc_sd_erase_timeout(card, arg, qty);
2048 else
2049 return mmc_mmc_erase_timeout(card, arg, qty);
2050}
2051
2052static int mmc_do_erase(struct mmc_card *card, unsigned int from,
2053 unsigned int to, unsigned int arg)
2054{
2055 struct mmc_command cmd = {0};
2056 unsigned int qty = 0;
2057 unsigned long timeout;
2058 int err;
2059
2060 mmc_retune_hold(card->host);
2061
2062 /*
2063 * qty is used to calculate the erase timeout which depends on how many
2064 * erase groups (or allocation units in SD terminology) are affected.
2065 * We count erasing part of an erase group as one erase group.
2066 * For SD, the allocation units are always a power of 2. For MMC, the
2067 * erase group size is almost certainly also power of 2, but it does not
2068 * seem to insist on that in the JEDEC standard, so we fall back to
2069 * division in that case. SD may not specify an allocation unit size,
2070 * in which case the timeout is based on the number of write blocks.
2071 *
2072 * Note that the timeout for secure trim 2 will only be correct if the
2073 * number of erase groups specified is the same as the total of all
2074 * preceding secure trim 1 commands. Since the power may have been
2075 * lost since the secure trim 1 commands occurred, it is generally
2076 * impossible to calculate the secure trim 2 timeout correctly.
2077 */
2078 if (card->erase_shift)
2079 qty += ((to >> card->erase_shift) -
2080 (from >> card->erase_shift)) + 1;
2081 else if (mmc_card_sd(card))
2082 qty += to - from + 1;
2083 else
2084 qty += ((to / card->erase_size) -
2085 (from / card->erase_size)) + 1;
2086
2087 if (!mmc_card_blockaddr(card)) {
2088 from <<= 9;
2089 to <<= 9;
2090 }
2091
2092 if (mmc_card_sd(card))
2093 cmd.opcode = SD_ERASE_WR_BLK_START;
2094 else
2095 cmd.opcode = MMC_ERASE_GROUP_START;
2096 cmd.arg = from;
2097 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2098 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2099 if (err) {
2100 pr_err("mmc_erase: group start error %d, "
2101 "status %#x\n", err, cmd.resp[0]);
2102 err = -EIO;
2103 goto out;
2104 }
2105
2106 memset(&cmd, 0, sizeof(struct mmc_command));
2107 if (mmc_card_sd(card))
2108 cmd.opcode = SD_ERASE_WR_BLK_END;
2109 else
2110 cmd.opcode = MMC_ERASE_GROUP_END;
2111 cmd.arg = to;
2112 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2113 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2114 if (err) {
2115 pr_err("mmc_erase: group end error %d, status %#x\n",
2116 err, cmd.resp[0]);
2117 err = -EIO;
2118 goto out;
2119 }
2120
2121 memset(&cmd, 0, sizeof(struct mmc_command));
2122 cmd.opcode = MMC_ERASE;
2123 cmd.arg = arg;
2124 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2125 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2126 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2127 if (err) {
2128 pr_err("mmc_erase: erase error %d, status %#x\n",
2129 err, cmd.resp[0]);
2130 err = -EIO;
2131 goto out;
2132 }
2133
2134 if (mmc_host_is_spi(card->host))
2135 goto out;
2136
2137 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2138 do {
2139 memset(&cmd, 0, sizeof(struct mmc_command));
2140 cmd.opcode = MMC_SEND_STATUS;
2141 cmd.arg = card->rca << 16;
2142 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2143 /* Do not retry else we can't see errors */
2144 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2145 if (err || (cmd.resp[0] & 0xFDF92000)) {
2146 pr_err("error %d requesting status %#x\n",
2147 err, cmd.resp[0]);
2148 err = -EIO;
2149 goto out;
2150 }
2151
2152 /* Timeout if the device never becomes ready for data and
2153 * never leaves the program state.
2154 */
2155 if (time_after(jiffies, timeout)) {
2156 pr_err("%s: Card stuck in programming state! %s\n",
2157 mmc_hostname(card->host), __func__);
2158 err = -EIO;
2159 goto out;
2160 }
2161
2162 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2163 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2164out:
2165 mmc_retune_release(card->host);
2166 return err;
2167}
2168
2169/**
2170 * mmc_erase - erase sectors.
2171 * @card: card to erase
2172 * @from: first sector to erase
2173 * @nr: number of sectors to erase
2174 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2175 *
2176 * Caller must claim host before calling this function.
2177 */
2178int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2179 unsigned int arg)
2180{
2181 unsigned int rem, to = from + nr;
2182 int err;
2183
2184 if (!(card->host->caps & MMC_CAP_ERASE) ||
2185 !(card->csd.cmdclass & CCC_ERASE))
2186 return -EOPNOTSUPP;
2187
2188 if (!card->erase_size)
2189 return -EOPNOTSUPP;
2190
2191 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2192 return -EOPNOTSUPP;
2193
2194 if ((arg & MMC_SECURE_ARGS) &&
2195 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2196 return -EOPNOTSUPP;
2197
2198 if ((arg & MMC_TRIM_ARGS) &&
2199 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2200 return -EOPNOTSUPP;
2201
2202 if (arg == MMC_SECURE_ERASE_ARG) {
2203 if (from % card->erase_size || nr % card->erase_size)
2204 return -EINVAL;
2205 }
2206
2207 if (arg == MMC_ERASE_ARG) {
2208 rem = from % card->erase_size;
2209 if (rem) {
2210 rem = card->erase_size - rem;
2211 from += rem;
2212 if (nr > rem)
2213 nr -= rem;
2214 else
2215 return 0;
2216 }
2217 rem = nr % card->erase_size;
2218 if (rem)
2219 nr -= rem;
2220 }
2221
2222 if (nr == 0)
2223 return 0;
2224
2225 to = from + nr;
2226
2227 if (to <= from)
2228 return -EINVAL;
2229
2230 /* 'from' and 'to' are inclusive */
2231 to -= 1;
2232
2233 /*
2234 * Special case where only one erase-group fits in the timeout budget:
2235 * If the region crosses an erase-group boundary on this particular
2236 * case, we will be trimming more than one erase-group which, does not
2237 * fit in the timeout budget of the controller, so we need to split it
2238 * and call mmc_do_erase() twice if necessary. This special case is
2239 * identified by the card->eg_boundary flag.
2240 */
2241 rem = card->erase_size - (from % card->erase_size);
2242 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2243 err = mmc_do_erase(card, from, from + rem - 1, arg);
2244 from += rem;
2245 if ((err) || (to <= from))
2246 return err;
2247 }
2248
2249 return mmc_do_erase(card, from, to, arg);
2250}
2251EXPORT_SYMBOL(mmc_erase);
2252
2253int mmc_can_erase(struct mmc_card *card)
2254{
2255 if ((card->host->caps & MMC_CAP_ERASE) &&
2256 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2257 return 1;
2258 return 0;
2259}
2260EXPORT_SYMBOL(mmc_can_erase);
2261
2262int mmc_can_trim(struct mmc_card *card)
2263{
2264 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2265 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2266 return 1;
2267 return 0;
2268}
2269EXPORT_SYMBOL(mmc_can_trim);
2270
2271int mmc_can_discard(struct mmc_card *card)
2272{
2273 /*
2274 * As there's no way to detect the discard support bit at v4.5
2275 * use the s/w feature support filed.
2276 */
2277 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2278 return 1;
2279 return 0;
2280}
2281EXPORT_SYMBOL(mmc_can_discard);
2282
2283int mmc_can_sanitize(struct mmc_card *card)
2284{
2285 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2286 return 0;
2287 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2288 return 1;
2289 return 0;
2290}
2291EXPORT_SYMBOL(mmc_can_sanitize);
2292
2293int mmc_can_secure_erase_trim(struct mmc_card *card)
2294{
2295 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2296 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2297 return 1;
2298 return 0;
2299}
2300EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2301
2302int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2303 unsigned int nr)
2304{
2305 if (!card->erase_size)
2306 return 0;
2307 if (from % card->erase_size || nr % card->erase_size)
2308 return 0;
2309 return 1;
2310}
2311EXPORT_SYMBOL(mmc_erase_group_aligned);
2312
2313static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2314 unsigned int arg)
2315{
2316 struct mmc_host *host = card->host;
2317 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2318 unsigned int last_timeout = 0;
2319
2320 if (card->erase_shift)
2321 max_qty = UINT_MAX >> card->erase_shift;
2322 else if (mmc_card_sd(card))
2323 max_qty = UINT_MAX;
2324 else
2325 max_qty = UINT_MAX / card->erase_size;
2326
2327 /* Find the largest qty with an OK timeout */
2328 do {
2329 y = 0;
2330 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2331 timeout = mmc_erase_timeout(card, arg, qty + x);
2332 if (timeout > host->max_busy_timeout)
2333 break;
2334 if (timeout < last_timeout)
2335 break;
2336 last_timeout = timeout;
2337 y = x;
2338 }
2339 qty += y;
2340 } while (y);
2341
2342 if (!qty)
2343 return 0;
2344
2345 /*
2346 * When specifying a sector range to trim, chances are we might cross
2347 * an erase-group boundary even if the amount of sectors is less than
2348 * one erase-group.
2349 * If we can only fit one erase-group in the controller timeout budget,
2350 * we have to care that erase-group boundaries are not crossed by a
2351 * single trim operation. We flag that special case with "eg_boundary".
2352 * In all other cases we can just decrement qty and pretend that we
2353 * always touch (qty + 1) erase-groups as a simple optimization.
2354 */
2355 if (qty == 1)
2356 card->eg_boundary = 1;
2357 else
2358 qty--;
2359
2360 /* Convert qty to sectors */
2361 if (card->erase_shift)
2362 max_discard = qty << card->erase_shift;
2363 else if (mmc_card_sd(card))
2364 max_discard = qty + 1;
2365 else
2366 max_discard = qty * card->erase_size;
2367
2368 return max_discard;
2369}
2370
2371unsigned int mmc_calc_max_discard(struct mmc_card *card)
2372{
2373 struct mmc_host *host = card->host;
2374 unsigned int max_discard, max_trim;
2375
2376 if (!host->max_busy_timeout)
2377 return UINT_MAX;
2378
2379 /*
2380 * Without erase_group_def set, MMC erase timeout depends on clock
2381 * frequence which can change. In that case, the best choice is
2382 * just the preferred erase size.
2383 */
2384 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2385 return card->pref_erase;
2386
2387 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2388 if (mmc_can_trim(card)) {
2389 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2390 if (max_trim < max_discard)
2391 max_discard = max_trim;
2392 } else if (max_discard < card->erase_size) {
2393 max_discard = 0;
2394 }
2395 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2396 mmc_hostname(host), max_discard, host->max_busy_timeout);
2397 return max_discard;
2398}
2399EXPORT_SYMBOL(mmc_calc_max_discard);
2400
2401int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2402{
2403 struct mmc_command cmd = {0};
2404
2405 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2406 return 0;
2407
2408 cmd.opcode = MMC_SET_BLOCKLEN;
2409 cmd.arg = blocklen;
2410 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2411 return mmc_wait_for_cmd(card->host, &cmd, 5);
2412}
2413EXPORT_SYMBOL(mmc_set_blocklen);
2414
2415int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2416 bool is_rel_write)
2417{
2418 struct mmc_command cmd = {0};
2419
2420 cmd.opcode = MMC_SET_BLOCK_COUNT;
2421 cmd.arg = blockcount & 0x0000FFFF;
2422 if (is_rel_write)
2423 cmd.arg |= 1 << 31;
2424 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2425 return mmc_wait_for_cmd(card->host, &cmd, 5);
2426}
2427EXPORT_SYMBOL(mmc_set_blockcount);
2428
2429static void mmc_hw_reset_for_init(struct mmc_host *host)
2430{
2431 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2432 return;
2433 host->ops->hw_reset(host);
2434}
2435
2436int mmc_hw_reset(struct mmc_host *host)
2437{
2438 int ret;
2439
2440 if (!host->card)
2441 return -EINVAL;
2442
2443 mmc_bus_get(host);
2444 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2445 mmc_bus_put(host);
2446 return -EOPNOTSUPP;
2447 }
2448
2449 ret = host->bus_ops->reset(host);
2450 mmc_bus_put(host);
2451
2452 if (ret != -EOPNOTSUPP)
2453 pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2454
2455 return ret;
2456}
2457EXPORT_SYMBOL(mmc_hw_reset);
2458
2459static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2460{
2461 host->f_init = freq;
2462
2463#ifdef CONFIG_MMC_DEBUG
2464 pr_info("%s: %s: trying to init card at %u Hz\n",
2465 mmc_hostname(host), __func__, host->f_init);
2466#endif
2467 mmc_power_up(host, host->ocr_avail);
2468
2469 /*
2470 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2471 * do a hardware reset if possible.
2472 */
2473 mmc_hw_reset_for_init(host);
2474
2475 /*
2476 * sdio_reset sends CMD52 to reset card. Since we do not know
2477 * if the card is being re-initialized, just send it. CMD52
2478 * should be ignored by SD/eMMC cards.
2479 * Skip it if we already know that we do not support SDIO commands
2480 */
2481 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2482 sdio_reset(host);
2483
2484 mmc_go_idle(host);
2485
2486 mmc_send_if_cond(host, host->ocr_avail);
2487
2488 /* Order's important: probe SDIO, then SD, then MMC */
2489 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2490 if (!mmc_attach_sdio(host))
2491 return 0;
2492
2493 if (!mmc_attach_sd(host))
2494 return 0;
2495 if (!mmc_attach_mmc(host))
2496 return 0;
2497
2498 mmc_power_off(host);
2499 return -EIO;
2500}
2501
2502int _mmc_detect_card_removed(struct mmc_host *host)
2503{
2504 int ret;
2505
2506 if (!host->card || mmc_card_removed(host->card))
2507 return 1;
2508
2509 ret = host->bus_ops->alive(host);
2510
2511 /*
2512 * Card detect status and alive check may be out of sync if card is
2513 * removed slowly, when card detect switch changes while card/slot
2514 * pads are still contacted in hardware (refer to "SD Card Mechanical
2515 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2516 * detect work 200ms later for this case.
2517 */
2518 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2519 mmc_detect_change(host, msecs_to_jiffies(200));
2520 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2521 }
2522
2523 if (ret) {
2524 mmc_card_set_removed(host->card);
2525 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2526 }
2527
2528 return ret;
2529}
2530
2531int mmc_detect_card_removed(struct mmc_host *host)
2532{
2533 struct mmc_card *card = host->card;
2534 int ret;
2535
2536 WARN_ON(!host->claimed);
2537
2538 if (!card)
2539 return 1;
2540
2541 if (!mmc_card_is_removable(host))
2542 return 0;
2543
2544 ret = mmc_card_removed(card);
2545 /*
2546 * The card will be considered unchanged unless we have been asked to
2547 * detect a change or host requires polling to provide card detection.
2548 */
2549 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2550 return ret;
2551
2552 host->detect_change = 0;
2553 if (!ret) {
2554 ret = _mmc_detect_card_removed(host);
2555 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2556 /*
2557 * Schedule a detect work as soon as possible to let a
2558 * rescan handle the card removal.
2559 */
2560 cancel_delayed_work(&host->detect);
2561 _mmc_detect_change(host, 0, false);
2562 }
2563 }
2564
2565 return ret;
2566}
2567EXPORT_SYMBOL(mmc_detect_card_removed);
2568
2569void mmc_rescan(struct work_struct *work)
2570{
2571 struct mmc_host *host =
2572 container_of(work, struct mmc_host, detect.work);
2573 int i;
2574
2575 if (host->rescan_disable)
2576 return;
2577
2578 /* If there is a non-removable card registered, only scan once */
2579 if (!mmc_card_is_removable(host) && host->rescan_entered)
2580 return;
2581 host->rescan_entered = 1;
2582
2583 if (host->trigger_card_event && host->ops->card_event) {
2584 mmc_claim_host(host);
2585 host->ops->card_event(host);
2586 mmc_release_host(host);
2587 host->trigger_card_event = false;
2588 }
2589
2590 mmc_bus_get(host);
2591
2592 /*
2593 * if there is a _removable_ card registered, check whether it is
2594 * still present
2595 */
2596 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2597 host->bus_ops->detect(host);
2598
2599 host->detect_change = 0;
2600
2601 /*
2602 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2603 * the card is no longer present.
2604 */
2605 mmc_bus_put(host);
2606 mmc_bus_get(host);
2607
2608 /* if there still is a card present, stop here */
2609 if (host->bus_ops != NULL) {
2610 mmc_bus_put(host);
2611 goto out;
2612 }
2613
2614 /*
2615 * Only we can add a new handler, so it's safe to
2616 * release the lock here.
2617 */
2618 mmc_bus_put(host);
2619
2620 mmc_claim_host(host);
2621 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2622 host->ops->get_cd(host) == 0) {
2623 mmc_power_off(host);
2624 mmc_release_host(host);
2625 goto out;
2626 }
2627
2628 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2629 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2630 break;
2631 if (freqs[i] <= host->f_min)
2632 break;
2633 }
2634 mmc_release_host(host);
2635
2636 out:
2637 if (host->caps & MMC_CAP_NEEDS_POLL)
2638 mmc_schedule_delayed_work(&host->detect, HZ);
2639}
2640
2641void mmc_start_host(struct mmc_host *host)
2642{
2643 host->f_init = max(freqs[0], host->f_min);
2644 host->rescan_disable = 0;
2645 host->ios.power_mode = MMC_POWER_UNDEFINED;
2646
2647 mmc_claim_host(host);
2648 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2649 mmc_power_off(host);
2650 else
2651 mmc_power_up(host, host->ocr_avail);
2652 mmc_release_host(host);
2653
2654 mmc_gpiod_request_cd_irq(host);
2655 _mmc_detect_change(host, 0, false);
2656}
2657
2658void mmc_stop_host(struct mmc_host *host)
2659{
2660#ifdef CONFIG_MMC_DEBUG
2661 unsigned long flags;
2662 spin_lock_irqsave(&host->lock, flags);
2663 host->removed = 1;
2664 spin_unlock_irqrestore(&host->lock, flags);
2665#endif
2666 if (host->slot.cd_irq >= 0)
2667 disable_irq(host->slot.cd_irq);
2668
2669 host->rescan_disable = 1;
2670 cancel_delayed_work_sync(&host->detect);
2671
2672 /* clear pm flags now and let card drivers set them as needed */
2673 host->pm_flags = 0;
2674
2675 mmc_bus_get(host);
2676 if (host->bus_ops && !host->bus_dead) {
2677 /* Calling bus_ops->remove() with a claimed host can deadlock */
2678 host->bus_ops->remove(host);
2679 mmc_claim_host(host);
2680 mmc_detach_bus(host);
2681 mmc_power_off(host);
2682 mmc_release_host(host);
2683 mmc_bus_put(host);
2684 return;
2685 }
2686 mmc_bus_put(host);
2687
2688 BUG_ON(host->card);
2689
2690 mmc_claim_host(host);
2691 mmc_power_off(host);
2692 mmc_release_host(host);
2693}
2694
2695int mmc_power_save_host(struct mmc_host *host)
2696{
2697 int ret = 0;
2698
2699#ifdef CONFIG_MMC_DEBUG
2700 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2701#endif
2702
2703 mmc_bus_get(host);
2704
2705 if (!host->bus_ops || host->bus_dead) {
2706 mmc_bus_put(host);
2707 return -EINVAL;
2708 }
2709
2710 if (host->bus_ops->power_save)
2711 ret = host->bus_ops->power_save(host);
2712
2713 mmc_bus_put(host);
2714
2715 mmc_power_off(host);
2716
2717 return ret;
2718}
2719EXPORT_SYMBOL(mmc_power_save_host);
2720
2721int mmc_power_restore_host(struct mmc_host *host)
2722{
2723 int ret;
2724
2725#ifdef CONFIG_MMC_DEBUG
2726 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2727#endif
2728
2729 mmc_bus_get(host);
2730
2731 if (!host->bus_ops || host->bus_dead) {
2732 mmc_bus_put(host);
2733 return -EINVAL;
2734 }
2735
2736 mmc_power_up(host, host->card->ocr);
2737 ret = host->bus_ops->power_restore(host);
2738
2739 mmc_bus_put(host);
2740
2741 return ret;
2742}
2743EXPORT_SYMBOL(mmc_power_restore_host);
2744
2745/*
2746 * Flush the cache to the non-volatile storage.
2747 */
2748int mmc_flush_cache(struct mmc_card *card)
2749{
2750 int err = 0;
2751
2752 if (mmc_card_mmc(card) &&
2753 (card->ext_csd.cache_size > 0) &&
2754 (card->ext_csd.cache_ctrl & 1)) {
2755 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2756 EXT_CSD_FLUSH_CACHE, 1, 0);
2757 if (err)
2758 pr_err("%s: cache flush error %d\n",
2759 mmc_hostname(card->host), err);
2760 }
2761
2762 return err;
2763}
2764EXPORT_SYMBOL(mmc_flush_cache);
2765
2766#ifdef CONFIG_PM_SLEEP
2767/* Do the card removal on suspend if card is assumed removeable
2768 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2769 to sync the card.
2770*/
2771static int mmc_pm_notify(struct notifier_block *notify_block,
2772 unsigned long mode, void *unused)
2773{
2774 struct mmc_host *host = container_of(
2775 notify_block, struct mmc_host, pm_notify);
2776 unsigned long flags;
2777 int err = 0;
2778
2779 switch (mode) {
2780 case PM_HIBERNATION_PREPARE:
2781 case PM_SUSPEND_PREPARE:
2782 case PM_RESTORE_PREPARE:
2783 spin_lock_irqsave(&host->lock, flags);
2784 host->rescan_disable = 1;
2785 spin_unlock_irqrestore(&host->lock, flags);
2786 cancel_delayed_work_sync(&host->detect);
2787
2788 if (!host->bus_ops)
2789 break;
2790
2791 /* Validate prerequisites for suspend */
2792 if (host->bus_ops->pre_suspend)
2793 err = host->bus_ops->pre_suspend(host);
2794 if (!err)
2795 break;
2796
2797 /* Calling bus_ops->remove() with a claimed host can deadlock */
2798 host->bus_ops->remove(host);
2799 mmc_claim_host(host);
2800 mmc_detach_bus(host);
2801 mmc_power_off(host);
2802 mmc_release_host(host);
2803 host->pm_flags = 0;
2804 break;
2805
2806 case PM_POST_SUSPEND:
2807 case PM_POST_HIBERNATION:
2808 case PM_POST_RESTORE:
2809
2810 spin_lock_irqsave(&host->lock, flags);
2811 host->rescan_disable = 0;
2812 spin_unlock_irqrestore(&host->lock, flags);
2813 _mmc_detect_change(host, 0, false);
2814
2815 }
2816
2817 return 0;
2818}
2819
2820void mmc_register_pm_notifier(struct mmc_host *host)
2821{
2822 host->pm_notify.notifier_call = mmc_pm_notify;
2823 register_pm_notifier(&host->pm_notify);
2824}
2825
2826void mmc_unregister_pm_notifier(struct mmc_host *host)
2827{
2828 unregister_pm_notifier(&host->pm_notify);
2829}
2830#endif
2831
2832/**
2833 * mmc_init_context_info() - init synchronization context
2834 * @host: mmc host
2835 *
2836 * Init struct context_info needed to implement asynchronous
2837 * request mechanism, used by mmc core, host driver and mmc requests
2838 * supplier.
2839 */
2840void mmc_init_context_info(struct mmc_host *host)
2841{
2842 spin_lock_init(&host->context_info.lock);
2843 host->context_info.is_new_req = false;
2844 host->context_info.is_done_rcv = false;
2845 host->context_info.is_waiting_last_req = false;
2846 init_waitqueue_head(&host->context_info.wait);
2847}
2848
2849static int __init mmc_init(void)
2850{
2851 int ret;
2852
2853 ret = mmc_register_bus();
2854 if (ret)
2855 return ret;
2856
2857 ret = mmc_register_host_class();
2858 if (ret)
2859 goto unregister_bus;
2860
2861 ret = sdio_register_bus();
2862 if (ret)
2863 goto unregister_host_class;
2864
2865 return 0;
2866
2867unregister_host_class:
2868 mmc_unregister_host_class();
2869unregister_bus:
2870 mmc_unregister_bus();
2871 return ret;
2872}
2873
2874static void __exit mmc_exit(void)
2875{
2876 sdio_unregister_bus();
2877 mmc_unregister_host_class();
2878 mmc_unregister_bus();
2879}
2880
2881subsys_initcall(mmc_init);
2882module_exit(mmc_exit);
2883
2884MODULE_LICENSE("GPL");
1/*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/suspend.h>
27#include <linux/fault-inject.h>
28#include <linux/random.h>
29
30#include <linux/mmc/card.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/sd.h>
34
35#include "core.h"
36#include "bus.h"
37#include "host.h"
38#include "sdio_bus.h"
39
40#include "mmc_ops.h"
41#include "sd_ops.h"
42#include "sdio_ops.h"
43
44static struct workqueue_struct *workqueue;
45static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
46
47/*
48 * Enabling software CRCs on the data blocks can be a significant (30%)
49 * performance cost, and for other reasons may not always be desired.
50 * So we allow it it to be disabled.
51 */
52bool use_spi_crc = 1;
53module_param(use_spi_crc, bool, 0);
54
55/*
56 * We normally treat cards as removed during suspend if they are not
57 * known to be on a non-removable bus, to avoid the risk of writing
58 * back data to a different card after resume. Allow this to be
59 * overridden if necessary.
60 */
61#ifdef CONFIG_MMC_UNSAFE_RESUME
62bool mmc_assume_removable;
63#else
64bool mmc_assume_removable = 1;
65#endif
66EXPORT_SYMBOL(mmc_assume_removable);
67module_param_named(removable, mmc_assume_removable, bool, 0644);
68MODULE_PARM_DESC(
69 removable,
70 "MMC/SD cards are removable and may be removed during suspend");
71
72/*
73 * Internal function. Schedule delayed work in the MMC work queue.
74 */
75static int mmc_schedule_delayed_work(struct delayed_work *work,
76 unsigned long delay)
77{
78 return queue_delayed_work(workqueue, work, delay);
79}
80
81/*
82 * Internal function. Flush all scheduled work from the MMC work queue.
83 */
84static void mmc_flush_scheduled_work(void)
85{
86 flush_workqueue(workqueue);
87}
88
89#ifdef CONFIG_FAIL_MMC_REQUEST
90
91/*
92 * Internal function. Inject random data errors.
93 * If mmc_data is NULL no errors are injected.
94 */
95static void mmc_should_fail_request(struct mmc_host *host,
96 struct mmc_request *mrq)
97{
98 struct mmc_command *cmd = mrq->cmd;
99 struct mmc_data *data = mrq->data;
100 static const int data_errors[] = {
101 -ETIMEDOUT,
102 -EILSEQ,
103 -EIO,
104 };
105
106 if (!data)
107 return;
108
109 if (cmd->error || data->error ||
110 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
111 return;
112
113 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
114 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
115}
116
117#else /* CONFIG_FAIL_MMC_REQUEST */
118
119static inline void mmc_should_fail_request(struct mmc_host *host,
120 struct mmc_request *mrq)
121{
122}
123
124#endif /* CONFIG_FAIL_MMC_REQUEST */
125
126/**
127 * mmc_request_done - finish processing an MMC request
128 * @host: MMC host which completed request
129 * @mrq: MMC request which request
130 *
131 * MMC drivers should call this function when they have completed
132 * their processing of a request.
133 */
134void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
135{
136 struct mmc_command *cmd = mrq->cmd;
137 int err = cmd->error;
138
139 if (err && cmd->retries && mmc_host_is_spi(host)) {
140 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
141 cmd->retries = 0;
142 }
143
144 if (err && cmd->retries && !mmc_card_removed(host->card)) {
145 /*
146 * Request starter must handle retries - see
147 * mmc_wait_for_req_done().
148 */
149 if (mrq->done)
150 mrq->done(mrq);
151 } else {
152 mmc_should_fail_request(host, mrq);
153
154 led_trigger_event(host->led, LED_OFF);
155
156 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
157 mmc_hostname(host), cmd->opcode, err,
158 cmd->resp[0], cmd->resp[1],
159 cmd->resp[2], cmd->resp[3]);
160
161 if (mrq->data) {
162 pr_debug("%s: %d bytes transferred: %d\n",
163 mmc_hostname(host),
164 mrq->data->bytes_xfered, mrq->data->error);
165 }
166
167 if (mrq->stop) {
168 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
169 mmc_hostname(host), mrq->stop->opcode,
170 mrq->stop->error,
171 mrq->stop->resp[0], mrq->stop->resp[1],
172 mrq->stop->resp[2], mrq->stop->resp[3]);
173 }
174
175 if (mrq->done)
176 mrq->done(mrq);
177
178 mmc_host_clk_release(host);
179 }
180}
181
182EXPORT_SYMBOL(mmc_request_done);
183
184static void
185mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
186{
187#ifdef CONFIG_MMC_DEBUG
188 unsigned int i, sz;
189 struct scatterlist *sg;
190#endif
191
192 if (mrq->sbc) {
193 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
194 mmc_hostname(host), mrq->sbc->opcode,
195 mrq->sbc->arg, mrq->sbc->flags);
196 }
197
198 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
199 mmc_hostname(host), mrq->cmd->opcode,
200 mrq->cmd->arg, mrq->cmd->flags);
201
202 if (mrq->data) {
203 pr_debug("%s: blksz %d blocks %d flags %08x "
204 "tsac %d ms nsac %d\n",
205 mmc_hostname(host), mrq->data->blksz,
206 mrq->data->blocks, mrq->data->flags,
207 mrq->data->timeout_ns / 1000000,
208 mrq->data->timeout_clks);
209 }
210
211 if (mrq->stop) {
212 pr_debug("%s: CMD%u arg %08x flags %08x\n",
213 mmc_hostname(host), mrq->stop->opcode,
214 mrq->stop->arg, mrq->stop->flags);
215 }
216
217 WARN_ON(!host->claimed);
218
219 mrq->cmd->error = 0;
220 mrq->cmd->mrq = mrq;
221 if (mrq->data) {
222 BUG_ON(mrq->data->blksz > host->max_blk_size);
223 BUG_ON(mrq->data->blocks > host->max_blk_count);
224 BUG_ON(mrq->data->blocks * mrq->data->blksz >
225 host->max_req_size);
226
227#ifdef CONFIG_MMC_DEBUG
228 sz = 0;
229 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
230 sz += sg->length;
231 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
232#endif
233
234 mrq->cmd->data = mrq->data;
235 mrq->data->error = 0;
236 mrq->data->mrq = mrq;
237 if (mrq->stop) {
238 mrq->data->stop = mrq->stop;
239 mrq->stop->error = 0;
240 mrq->stop->mrq = mrq;
241 }
242 }
243 mmc_host_clk_hold(host);
244 led_trigger_event(host->led, LED_FULL);
245 host->ops->request(host, mrq);
246}
247
248static void mmc_wait_done(struct mmc_request *mrq)
249{
250 complete(&mrq->completion);
251}
252
253static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
254{
255 init_completion(&mrq->completion);
256 mrq->done = mmc_wait_done;
257 if (mmc_card_removed(host->card)) {
258 mrq->cmd->error = -ENOMEDIUM;
259 complete(&mrq->completion);
260 return -ENOMEDIUM;
261 }
262 mmc_start_request(host, mrq);
263 return 0;
264}
265
266static void mmc_wait_for_req_done(struct mmc_host *host,
267 struct mmc_request *mrq)
268{
269 struct mmc_command *cmd;
270
271 while (1) {
272 wait_for_completion(&mrq->completion);
273
274 cmd = mrq->cmd;
275 if (!cmd->error || !cmd->retries ||
276 mmc_card_removed(host->card))
277 break;
278
279 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
280 mmc_hostname(host), cmd->opcode, cmd->error);
281 cmd->retries--;
282 cmd->error = 0;
283 host->ops->request(host, mrq);
284 }
285}
286
287/**
288 * mmc_pre_req - Prepare for a new request
289 * @host: MMC host to prepare command
290 * @mrq: MMC request to prepare for
291 * @is_first_req: true if there is no previous started request
292 * that may run in parellel to this call, otherwise false
293 *
294 * mmc_pre_req() is called in prior to mmc_start_req() to let
295 * host prepare for the new request. Preparation of a request may be
296 * performed while another request is running on the host.
297 */
298static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
299 bool is_first_req)
300{
301 if (host->ops->pre_req) {
302 mmc_host_clk_hold(host);
303 host->ops->pre_req(host, mrq, is_first_req);
304 mmc_host_clk_release(host);
305 }
306}
307
308/**
309 * mmc_post_req - Post process a completed request
310 * @host: MMC host to post process command
311 * @mrq: MMC request to post process for
312 * @err: Error, if non zero, clean up any resources made in pre_req
313 *
314 * Let the host post process a completed request. Post processing of
315 * a request may be performed while another reuqest is running.
316 */
317static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
318 int err)
319{
320 if (host->ops->post_req) {
321 mmc_host_clk_hold(host);
322 host->ops->post_req(host, mrq, err);
323 mmc_host_clk_release(host);
324 }
325}
326
327/**
328 * mmc_start_req - start a non-blocking request
329 * @host: MMC host to start command
330 * @areq: async request to start
331 * @error: out parameter returns 0 for success, otherwise non zero
332 *
333 * Start a new MMC custom command request for a host.
334 * If there is on ongoing async request wait for completion
335 * of that request and start the new one and return.
336 * Does not wait for the new request to complete.
337 *
338 * Returns the completed request, NULL in case of none completed.
339 * Wait for the an ongoing request (previoulsy started) to complete and
340 * return the completed request. If there is no ongoing request, NULL
341 * is returned without waiting. NULL is not an error condition.
342 */
343struct mmc_async_req *mmc_start_req(struct mmc_host *host,
344 struct mmc_async_req *areq, int *error)
345{
346 int err = 0;
347 int start_err = 0;
348 struct mmc_async_req *data = host->areq;
349
350 /* Prepare a new request */
351 if (areq)
352 mmc_pre_req(host, areq->mrq, !host->areq);
353
354 if (host->areq) {
355 mmc_wait_for_req_done(host, host->areq->mrq);
356 err = host->areq->err_check(host->card, host->areq);
357 }
358
359 if (!err && areq)
360 start_err = __mmc_start_req(host, areq->mrq);
361
362 if (host->areq)
363 mmc_post_req(host, host->areq->mrq, 0);
364
365 /* Cancel a prepared request if it was not started. */
366 if ((err || start_err) && areq)
367 mmc_post_req(host, areq->mrq, -EINVAL);
368
369 if (err)
370 host->areq = NULL;
371 else
372 host->areq = areq;
373
374 if (error)
375 *error = err;
376 return data;
377}
378EXPORT_SYMBOL(mmc_start_req);
379
380/**
381 * mmc_wait_for_req - start a request and wait for completion
382 * @host: MMC host to start command
383 * @mrq: MMC request to start
384 *
385 * Start a new MMC custom command request for a host, and wait
386 * for the command to complete. Does not attempt to parse the
387 * response.
388 */
389void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
390{
391 __mmc_start_req(host, mrq);
392 mmc_wait_for_req_done(host, mrq);
393}
394EXPORT_SYMBOL(mmc_wait_for_req);
395
396/**
397 * mmc_interrupt_hpi - Issue for High priority Interrupt
398 * @card: the MMC card associated with the HPI transfer
399 *
400 * Issued High Priority Interrupt, and check for card status
401 * util out-of prg-state.
402 */
403int mmc_interrupt_hpi(struct mmc_card *card)
404{
405 int err;
406 u32 status;
407
408 BUG_ON(!card);
409
410 if (!card->ext_csd.hpi_en) {
411 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
412 return 1;
413 }
414
415 mmc_claim_host(card->host);
416 err = mmc_send_status(card, &status);
417 if (err) {
418 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
419 goto out;
420 }
421
422 /*
423 * If the card status is in PRG-state, we can send the HPI command.
424 */
425 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
426 do {
427 /*
428 * We don't know when the HPI command will finish
429 * processing, so we need to resend HPI until out
430 * of prg-state, and keep checking the card status
431 * with SEND_STATUS. If a timeout error occurs when
432 * sending the HPI command, we are already out of
433 * prg-state.
434 */
435 err = mmc_send_hpi_cmd(card, &status);
436 if (err)
437 pr_debug("%s: abort HPI (%d error)\n",
438 mmc_hostname(card->host), err);
439
440 err = mmc_send_status(card, &status);
441 if (err)
442 break;
443 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
444 } else
445 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
446
447out:
448 mmc_release_host(card->host);
449 return err;
450}
451EXPORT_SYMBOL(mmc_interrupt_hpi);
452
453/**
454 * mmc_wait_for_cmd - start a command and wait for completion
455 * @host: MMC host to start command
456 * @cmd: MMC command to start
457 * @retries: maximum number of retries
458 *
459 * Start a new MMC command for a host, and wait for the command
460 * to complete. Return any error that occurred while the command
461 * was executing. Do not attempt to parse the response.
462 */
463int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
464{
465 struct mmc_request mrq = {NULL};
466
467 WARN_ON(!host->claimed);
468
469 memset(cmd->resp, 0, sizeof(cmd->resp));
470 cmd->retries = retries;
471
472 mrq.cmd = cmd;
473 cmd->data = NULL;
474
475 mmc_wait_for_req(host, &mrq);
476
477 return cmd->error;
478}
479
480EXPORT_SYMBOL(mmc_wait_for_cmd);
481
482/**
483 * mmc_set_data_timeout - set the timeout for a data command
484 * @data: data phase for command
485 * @card: the MMC card associated with the data transfer
486 *
487 * Computes the data timeout parameters according to the
488 * correct algorithm given the card type.
489 */
490void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
491{
492 unsigned int mult;
493
494 /*
495 * SDIO cards only define an upper 1 s limit on access.
496 */
497 if (mmc_card_sdio(card)) {
498 data->timeout_ns = 1000000000;
499 data->timeout_clks = 0;
500 return;
501 }
502
503 /*
504 * SD cards use a 100 multiplier rather than 10
505 */
506 mult = mmc_card_sd(card) ? 100 : 10;
507
508 /*
509 * Scale up the multiplier (and therefore the timeout) by
510 * the r2w factor for writes.
511 */
512 if (data->flags & MMC_DATA_WRITE)
513 mult <<= card->csd.r2w_factor;
514
515 data->timeout_ns = card->csd.tacc_ns * mult;
516 data->timeout_clks = card->csd.tacc_clks * mult;
517
518 /*
519 * SD cards also have an upper limit on the timeout.
520 */
521 if (mmc_card_sd(card)) {
522 unsigned int timeout_us, limit_us;
523
524 timeout_us = data->timeout_ns / 1000;
525 if (mmc_host_clk_rate(card->host))
526 timeout_us += data->timeout_clks * 1000 /
527 (mmc_host_clk_rate(card->host) / 1000);
528
529 if (data->flags & MMC_DATA_WRITE)
530 /*
531 * The MMC spec "It is strongly recommended
532 * for hosts to implement more than 500ms
533 * timeout value even if the card indicates
534 * the 250ms maximum busy length." Even the
535 * previous value of 300ms is known to be
536 * insufficient for some cards.
537 */
538 limit_us = 3000000;
539 else
540 limit_us = 100000;
541
542 /*
543 * SDHC cards always use these fixed values.
544 */
545 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
546 data->timeout_ns = limit_us * 1000;
547 data->timeout_clks = 0;
548 }
549 }
550
551 /*
552 * Some cards require longer data read timeout than indicated in CSD.
553 * Address this by setting the read timeout to a "reasonably high"
554 * value. For the cards tested, 300ms has proven enough. If necessary,
555 * this value can be increased if other problematic cards require this.
556 */
557 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
558 data->timeout_ns = 300000000;
559 data->timeout_clks = 0;
560 }
561
562 /*
563 * Some cards need very high timeouts if driven in SPI mode.
564 * The worst observed timeout was 900ms after writing a
565 * continuous stream of data until the internal logic
566 * overflowed.
567 */
568 if (mmc_host_is_spi(card->host)) {
569 if (data->flags & MMC_DATA_WRITE) {
570 if (data->timeout_ns < 1000000000)
571 data->timeout_ns = 1000000000; /* 1s */
572 } else {
573 if (data->timeout_ns < 100000000)
574 data->timeout_ns = 100000000; /* 100ms */
575 }
576 }
577}
578EXPORT_SYMBOL(mmc_set_data_timeout);
579
580/**
581 * mmc_align_data_size - pads a transfer size to a more optimal value
582 * @card: the MMC card associated with the data transfer
583 * @sz: original transfer size
584 *
585 * Pads the original data size with a number of extra bytes in
586 * order to avoid controller bugs and/or performance hits
587 * (e.g. some controllers revert to PIO for certain sizes).
588 *
589 * Returns the improved size, which might be unmodified.
590 *
591 * Note that this function is only relevant when issuing a
592 * single scatter gather entry.
593 */
594unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
595{
596 /*
597 * FIXME: We don't have a system for the controller to tell
598 * the core about its problems yet, so for now we just 32-bit
599 * align the size.
600 */
601 sz = ((sz + 3) / 4) * 4;
602
603 return sz;
604}
605EXPORT_SYMBOL(mmc_align_data_size);
606
607/**
608 * __mmc_claim_host - exclusively claim a host
609 * @host: mmc host to claim
610 * @abort: whether or not the operation should be aborted
611 *
612 * Claim a host for a set of operations. If @abort is non null and
613 * dereference a non-zero value then this will return prematurely with
614 * that non-zero value without acquiring the lock. Returns zero
615 * with the lock held otherwise.
616 */
617int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
618{
619 DECLARE_WAITQUEUE(wait, current);
620 unsigned long flags;
621 int stop;
622
623 might_sleep();
624
625 add_wait_queue(&host->wq, &wait);
626 spin_lock_irqsave(&host->lock, flags);
627 while (1) {
628 set_current_state(TASK_UNINTERRUPTIBLE);
629 stop = abort ? atomic_read(abort) : 0;
630 if (stop || !host->claimed || host->claimer == current)
631 break;
632 spin_unlock_irqrestore(&host->lock, flags);
633 schedule();
634 spin_lock_irqsave(&host->lock, flags);
635 }
636 set_current_state(TASK_RUNNING);
637 if (!stop) {
638 host->claimed = 1;
639 host->claimer = current;
640 host->claim_cnt += 1;
641 } else
642 wake_up(&host->wq);
643 spin_unlock_irqrestore(&host->lock, flags);
644 remove_wait_queue(&host->wq, &wait);
645 if (host->ops->enable && !stop && host->claim_cnt == 1)
646 host->ops->enable(host);
647 return stop;
648}
649
650EXPORT_SYMBOL(__mmc_claim_host);
651
652/**
653 * mmc_try_claim_host - try exclusively to claim a host
654 * @host: mmc host to claim
655 *
656 * Returns %1 if the host is claimed, %0 otherwise.
657 */
658int mmc_try_claim_host(struct mmc_host *host)
659{
660 int claimed_host = 0;
661 unsigned long flags;
662
663 spin_lock_irqsave(&host->lock, flags);
664 if (!host->claimed || host->claimer == current) {
665 host->claimed = 1;
666 host->claimer = current;
667 host->claim_cnt += 1;
668 claimed_host = 1;
669 }
670 spin_unlock_irqrestore(&host->lock, flags);
671 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
672 host->ops->enable(host);
673 return claimed_host;
674}
675EXPORT_SYMBOL(mmc_try_claim_host);
676
677/**
678 * mmc_release_host - release a host
679 * @host: mmc host to release
680 *
681 * Release a MMC host, allowing others to claim the host
682 * for their operations.
683 */
684void mmc_release_host(struct mmc_host *host)
685{
686 unsigned long flags;
687
688 WARN_ON(!host->claimed);
689
690 if (host->ops->disable && host->claim_cnt == 1)
691 host->ops->disable(host);
692
693 spin_lock_irqsave(&host->lock, flags);
694 if (--host->claim_cnt) {
695 /* Release for nested claim */
696 spin_unlock_irqrestore(&host->lock, flags);
697 } else {
698 host->claimed = 0;
699 host->claimer = NULL;
700 spin_unlock_irqrestore(&host->lock, flags);
701 wake_up(&host->wq);
702 }
703}
704EXPORT_SYMBOL(mmc_release_host);
705
706/*
707 * Internal function that does the actual ios call to the host driver,
708 * optionally printing some debug output.
709 */
710static inline void mmc_set_ios(struct mmc_host *host)
711{
712 struct mmc_ios *ios = &host->ios;
713
714 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
715 "width %u timing %u\n",
716 mmc_hostname(host), ios->clock, ios->bus_mode,
717 ios->power_mode, ios->chip_select, ios->vdd,
718 ios->bus_width, ios->timing);
719
720 if (ios->clock > 0)
721 mmc_set_ungated(host);
722 host->ops->set_ios(host, ios);
723}
724
725/*
726 * Control chip select pin on a host.
727 */
728void mmc_set_chip_select(struct mmc_host *host, int mode)
729{
730 mmc_host_clk_hold(host);
731 host->ios.chip_select = mode;
732 mmc_set_ios(host);
733 mmc_host_clk_release(host);
734}
735
736/*
737 * Sets the host clock to the highest possible frequency that
738 * is below "hz".
739 */
740static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
741{
742 WARN_ON(hz < host->f_min);
743
744 if (hz > host->f_max)
745 hz = host->f_max;
746
747 host->ios.clock = hz;
748 mmc_set_ios(host);
749}
750
751void mmc_set_clock(struct mmc_host *host, unsigned int hz)
752{
753 mmc_host_clk_hold(host);
754 __mmc_set_clock(host, hz);
755 mmc_host_clk_release(host);
756}
757
758#ifdef CONFIG_MMC_CLKGATE
759/*
760 * This gates the clock by setting it to 0 Hz.
761 */
762void mmc_gate_clock(struct mmc_host *host)
763{
764 unsigned long flags;
765
766 spin_lock_irqsave(&host->clk_lock, flags);
767 host->clk_old = host->ios.clock;
768 host->ios.clock = 0;
769 host->clk_gated = true;
770 spin_unlock_irqrestore(&host->clk_lock, flags);
771 mmc_set_ios(host);
772}
773
774/*
775 * This restores the clock from gating by using the cached
776 * clock value.
777 */
778void mmc_ungate_clock(struct mmc_host *host)
779{
780 /*
781 * We should previously have gated the clock, so the clock shall
782 * be 0 here! The clock may however be 0 during initialization,
783 * when some request operations are performed before setting
784 * the frequency. When ungate is requested in that situation
785 * we just ignore the call.
786 */
787 if (host->clk_old) {
788 BUG_ON(host->ios.clock);
789 /* This call will also set host->clk_gated to false */
790 __mmc_set_clock(host, host->clk_old);
791 }
792}
793
794void mmc_set_ungated(struct mmc_host *host)
795{
796 unsigned long flags;
797
798 /*
799 * We've been given a new frequency while the clock is gated,
800 * so make sure we regard this as ungating it.
801 */
802 spin_lock_irqsave(&host->clk_lock, flags);
803 host->clk_gated = false;
804 spin_unlock_irqrestore(&host->clk_lock, flags);
805}
806
807#else
808void mmc_set_ungated(struct mmc_host *host)
809{
810}
811#endif
812
813/*
814 * Change the bus mode (open drain/push-pull) of a host.
815 */
816void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
817{
818 mmc_host_clk_hold(host);
819 host->ios.bus_mode = mode;
820 mmc_set_ios(host);
821 mmc_host_clk_release(host);
822}
823
824/*
825 * Change data bus width of a host.
826 */
827void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
828{
829 mmc_host_clk_hold(host);
830 host->ios.bus_width = width;
831 mmc_set_ios(host);
832 mmc_host_clk_release(host);
833}
834
835/**
836 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
837 * @vdd: voltage (mV)
838 * @low_bits: prefer low bits in boundary cases
839 *
840 * This function returns the OCR bit number according to the provided @vdd
841 * value. If conversion is not possible a negative errno value returned.
842 *
843 * Depending on the @low_bits flag the function prefers low or high OCR bits
844 * on boundary voltages. For example,
845 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
846 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
847 *
848 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
849 */
850static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
851{
852 const int max_bit = ilog2(MMC_VDD_35_36);
853 int bit;
854
855 if (vdd < 1650 || vdd > 3600)
856 return -EINVAL;
857
858 if (vdd >= 1650 && vdd <= 1950)
859 return ilog2(MMC_VDD_165_195);
860
861 if (low_bits)
862 vdd -= 1;
863
864 /* Base 2000 mV, step 100 mV, bit's base 8. */
865 bit = (vdd - 2000) / 100 + 8;
866 if (bit > max_bit)
867 return max_bit;
868 return bit;
869}
870
871/**
872 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
873 * @vdd_min: minimum voltage value (mV)
874 * @vdd_max: maximum voltage value (mV)
875 *
876 * This function returns the OCR mask bits according to the provided @vdd_min
877 * and @vdd_max values. If conversion is not possible the function returns 0.
878 *
879 * Notes wrt boundary cases:
880 * This function sets the OCR bits for all boundary voltages, for example
881 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
882 * MMC_VDD_34_35 mask.
883 */
884u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
885{
886 u32 mask = 0;
887
888 if (vdd_max < vdd_min)
889 return 0;
890
891 /* Prefer high bits for the boundary vdd_max values. */
892 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
893 if (vdd_max < 0)
894 return 0;
895
896 /* Prefer low bits for the boundary vdd_min values. */
897 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
898 if (vdd_min < 0)
899 return 0;
900
901 /* Fill the mask, from max bit to min bit. */
902 while (vdd_max >= vdd_min)
903 mask |= 1 << vdd_max--;
904
905 return mask;
906}
907EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
908
909#ifdef CONFIG_REGULATOR
910
911/**
912 * mmc_regulator_get_ocrmask - return mask of supported voltages
913 * @supply: regulator to use
914 *
915 * This returns either a negative errno, or a mask of voltages that
916 * can be provided to MMC/SD/SDIO devices using the specified voltage
917 * regulator. This would normally be called before registering the
918 * MMC host adapter.
919 */
920int mmc_regulator_get_ocrmask(struct regulator *supply)
921{
922 int result = 0;
923 int count;
924 int i;
925
926 count = regulator_count_voltages(supply);
927 if (count < 0)
928 return count;
929
930 for (i = 0; i < count; i++) {
931 int vdd_uV;
932 int vdd_mV;
933
934 vdd_uV = regulator_list_voltage(supply, i);
935 if (vdd_uV <= 0)
936 continue;
937
938 vdd_mV = vdd_uV / 1000;
939 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
940 }
941
942 return result;
943}
944EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
945
946/**
947 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
948 * @mmc: the host to regulate
949 * @supply: regulator to use
950 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
951 *
952 * Returns zero on success, else negative errno.
953 *
954 * MMC host drivers may use this to enable or disable a regulator using
955 * a particular supply voltage. This would normally be called from the
956 * set_ios() method.
957 */
958int mmc_regulator_set_ocr(struct mmc_host *mmc,
959 struct regulator *supply,
960 unsigned short vdd_bit)
961{
962 int result = 0;
963 int min_uV, max_uV;
964
965 if (vdd_bit) {
966 int tmp;
967 int voltage;
968
969 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
970 * bits this regulator doesn't quite support ... don't
971 * be too picky, most cards and regulators are OK with
972 * a 0.1V range goof (it's a small error percentage).
973 */
974 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
975 if (tmp == 0) {
976 min_uV = 1650 * 1000;
977 max_uV = 1950 * 1000;
978 } else {
979 min_uV = 1900 * 1000 + tmp * 100 * 1000;
980 max_uV = min_uV + 100 * 1000;
981 }
982
983 /* avoid needless changes to this voltage; the regulator
984 * might not allow this operation
985 */
986 voltage = regulator_get_voltage(supply);
987
988 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
989 min_uV = max_uV = voltage;
990
991 if (voltage < 0)
992 result = voltage;
993 else if (voltage < min_uV || voltage > max_uV)
994 result = regulator_set_voltage(supply, min_uV, max_uV);
995 else
996 result = 0;
997
998 if (result == 0 && !mmc->regulator_enabled) {
999 result = regulator_enable(supply);
1000 if (!result)
1001 mmc->regulator_enabled = true;
1002 }
1003 } else if (mmc->regulator_enabled) {
1004 result = regulator_disable(supply);
1005 if (result == 0)
1006 mmc->regulator_enabled = false;
1007 }
1008
1009 if (result)
1010 dev_err(mmc_dev(mmc),
1011 "could not set regulator OCR (%d)\n", result);
1012 return result;
1013}
1014EXPORT_SYMBOL(mmc_regulator_set_ocr);
1015
1016#endif /* CONFIG_REGULATOR */
1017
1018/*
1019 * Mask off any voltages we don't support and select
1020 * the lowest voltage
1021 */
1022u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1023{
1024 int bit;
1025
1026 ocr &= host->ocr_avail;
1027
1028 bit = ffs(ocr);
1029 if (bit) {
1030 bit -= 1;
1031
1032 ocr &= 3 << bit;
1033
1034 mmc_host_clk_hold(host);
1035 host->ios.vdd = bit;
1036 mmc_set_ios(host);
1037 mmc_host_clk_release(host);
1038 } else {
1039 pr_warning("%s: host doesn't support card's voltages\n",
1040 mmc_hostname(host));
1041 ocr = 0;
1042 }
1043
1044 return ocr;
1045}
1046
1047int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1048{
1049 struct mmc_command cmd = {0};
1050 int err = 0;
1051
1052 BUG_ON(!host);
1053
1054 /*
1055 * Send CMD11 only if the request is to switch the card to
1056 * 1.8V signalling.
1057 */
1058 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1059 cmd.opcode = SD_SWITCH_VOLTAGE;
1060 cmd.arg = 0;
1061 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1062
1063 err = mmc_wait_for_cmd(host, &cmd, 0);
1064 if (err)
1065 return err;
1066
1067 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1068 return -EIO;
1069 }
1070
1071 host->ios.signal_voltage = signal_voltage;
1072
1073 if (host->ops->start_signal_voltage_switch) {
1074 mmc_host_clk_hold(host);
1075 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1076 mmc_host_clk_release(host);
1077 }
1078
1079 return err;
1080}
1081
1082/*
1083 * Select timing parameters for host.
1084 */
1085void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1086{
1087 mmc_host_clk_hold(host);
1088 host->ios.timing = timing;
1089 mmc_set_ios(host);
1090 mmc_host_clk_release(host);
1091}
1092
1093/*
1094 * Select appropriate driver type for host.
1095 */
1096void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1097{
1098 mmc_host_clk_hold(host);
1099 host->ios.drv_type = drv_type;
1100 mmc_set_ios(host);
1101 mmc_host_clk_release(host);
1102}
1103
1104static void mmc_poweroff_notify(struct mmc_host *host)
1105{
1106 struct mmc_card *card;
1107 unsigned int timeout;
1108 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1109 int err = 0;
1110
1111 card = host->card;
1112 mmc_claim_host(host);
1113
1114 /*
1115 * Send power notify command only if card
1116 * is mmc and notify state is powered ON
1117 */
1118 if (card && mmc_card_mmc(card) &&
1119 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1120
1121 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1122 notify_type = EXT_CSD_POWER_OFF_SHORT;
1123 timeout = card->ext_csd.generic_cmd6_time;
1124 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1125 } else {
1126 notify_type = EXT_CSD_POWER_OFF_LONG;
1127 timeout = card->ext_csd.power_off_longtime;
1128 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1129 }
1130
1131 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1132 EXT_CSD_POWER_OFF_NOTIFICATION,
1133 notify_type, timeout);
1134
1135 if (err && err != -EBADMSG)
1136 pr_err("Device failed to respond within %d poweroff "
1137 "time. Forcefully powering down the device\n",
1138 timeout);
1139
1140 /* Set the card state to no notification after the poweroff */
1141 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1142 }
1143 mmc_release_host(host);
1144}
1145
1146/*
1147 * Apply power to the MMC stack. This is a two-stage process.
1148 * First, we enable power to the card without the clock running.
1149 * We then wait a bit for the power to stabilise. Finally,
1150 * enable the bus drivers and clock to the card.
1151 *
1152 * We must _NOT_ enable the clock prior to power stablising.
1153 *
1154 * If a host does all the power sequencing itself, ignore the
1155 * initial MMC_POWER_UP stage.
1156 */
1157static void mmc_power_up(struct mmc_host *host)
1158{
1159 int bit;
1160
1161 if (host->ios.power_mode == MMC_POWER_ON)
1162 return;
1163
1164 mmc_host_clk_hold(host);
1165
1166 /* If ocr is set, we use it */
1167 if (host->ocr)
1168 bit = ffs(host->ocr) - 1;
1169 else
1170 bit = fls(host->ocr_avail) - 1;
1171
1172 host->ios.vdd = bit;
1173 if (mmc_host_is_spi(host))
1174 host->ios.chip_select = MMC_CS_HIGH;
1175 else
1176 host->ios.chip_select = MMC_CS_DONTCARE;
1177 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1178 host->ios.power_mode = MMC_POWER_UP;
1179 host->ios.bus_width = MMC_BUS_WIDTH_1;
1180 host->ios.timing = MMC_TIMING_LEGACY;
1181 mmc_set_ios(host);
1182
1183 /*
1184 * This delay should be sufficient to allow the power supply
1185 * to reach the minimum voltage.
1186 */
1187 mmc_delay(10);
1188
1189 host->ios.clock = host->f_init;
1190
1191 host->ios.power_mode = MMC_POWER_ON;
1192 mmc_set_ios(host);
1193
1194 /*
1195 * This delay must be at least 74 clock sizes, or 1 ms, or the
1196 * time required to reach a stable voltage.
1197 */
1198 mmc_delay(10);
1199
1200 mmc_host_clk_release(host);
1201}
1202
1203void mmc_power_off(struct mmc_host *host)
1204{
1205 int err = 0;
1206
1207 if (host->ios.power_mode == MMC_POWER_OFF)
1208 return;
1209
1210 mmc_host_clk_hold(host);
1211
1212 host->ios.clock = 0;
1213 host->ios.vdd = 0;
1214
1215 /*
1216 * For eMMC 4.5 device send AWAKE command before
1217 * POWER_OFF_NOTIFY command, because in sleep state
1218 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1219 */
1220 if (host->card && mmc_card_is_sleep(host->card) &&
1221 host->bus_ops->resume) {
1222 err = host->bus_ops->resume(host);
1223
1224 if (!err)
1225 mmc_poweroff_notify(host);
1226 else
1227 pr_warning("%s: error %d during resume "
1228 "(continue with poweroff sequence)\n",
1229 mmc_hostname(host), err);
1230 }
1231
1232 /*
1233 * Reset ocr mask to be the highest possible voltage supported for
1234 * this mmc host. This value will be used at next power up.
1235 */
1236 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1237
1238 if (!mmc_host_is_spi(host)) {
1239 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1240 host->ios.chip_select = MMC_CS_DONTCARE;
1241 }
1242 host->ios.power_mode = MMC_POWER_OFF;
1243 host->ios.bus_width = MMC_BUS_WIDTH_1;
1244 host->ios.timing = MMC_TIMING_LEGACY;
1245 mmc_set_ios(host);
1246
1247 /*
1248 * Some configurations, such as the 802.11 SDIO card in the OLPC
1249 * XO-1.5, require a short delay after poweroff before the card
1250 * can be successfully turned on again.
1251 */
1252 mmc_delay(1);
1253
1254 mmc_host_clk_release(host);
1255}
1256
1257/*
1258 * Cleanup when the last reference to the bus operator is dropped.
1259 */
1260static void __mmc_release_bus(struct mmc_host *host)
1261{
1262 BUG_ON(!host);
1263 BUG_ON(host->bus_refs);
1264 BUG_ON(!host->bus_dead);
1265
1266 host->bus_ops = NULL;
1267}
1268
1269/*
1270 * Increase reference count of bus operator
1271 */
1272static inline void mmc_bus_get(struct mmc_host *host)
1273{
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&host->lock, flags);
1277 host->bus_refs++;
1278 spin_unlock_irqrestore(&host->lock, flags);
1279}
1280
1281/*
1282 * Decrease reference count of bus operator and free it if
1283 * it is the last reference.
1284 */
1285static inline void mmc_bus_put(struct mmc_host *host)
1286{
1287 unsigned long flags;
1288
1289 spin_lock_irqsave(&host->lock, flags);
1290 host->bus_refs--;
1291 if ((host->bus_refs == 0) && host->bus_ops)
1292 __mmc_release_bus(host);
1293 spin_unlock_irqrestore(&host->lock, flags);
1294}
1295
1296/*
1297 * Assign a mmc bus handler to a host. Only one bus handler may control a
1298 * host at any given time.
1299 */
1300void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1301{
1302 unsigned long flags;
1303
1304 BUG_ON(!host);
1305 BUG_ON(!ops);
1306
1307 WARN_ON(!host->claimed);
1308
1309 spin_lock_irqsave(&host->lock, flags);
1310
1311 BUG_ON(host->bus_ops);
1312 BUG_ON(host->bus_refs);
1313
1314 host->bus_ops = ops;
1315 host->bus_refs = 1;
1316 host->bus_dead = 0;
1317
1318 spin_unlock_irqrestore(&host->lock, flags);
1319}
1320
1321/*
1322 * Remove the current bus handler from a host.
1323 */
1324void mmc_detach_bus(struct mmc_host *host)
1325{
1326 unsigned long flags;
1327
1328 BUG_ON(!host);
1329
1330 WARN_ON(!host->claimed);
1331 WARN_ON(!host->bus_ops);
1332
1333 spin_lock_irqsave(&host->lock, flags);
1334
1335 host->bus_dead = 1;
1336
1337 spin_unlock_irqrestore(&host->lock, flags);
1338
1339 mmc_bus_put(host);
1340}
1341
1342/**
1343 * mmc_detect_change - process change of state on a MMC socket
1344 * @host: host which changed state.
1345 * @delay: optional delay to wait before detection (jiffies)
1346 *
1347 * MMC drivers should call this when they detect a card has been
1348 * inserted or removed. The MMC layer will confirm that any
1349 * present card is still functional, and initialize any newly
1350 * inserted.
1351 */
1352void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1353{
1354#ifdef CONFIG_MMC_DEBUG
1355 unsigned long flags;
1356 spin_lock_irqsave(&host->lock, flags);
1357 WARN_ON(host->removed);
1358 spin_unlock_irqrestore(&host->lock, flags);
1359#endif
1360 host->detect_change = 1;
1361 mmc_schedule_delayed_work(&host->detect, delay);
1362}
1363
1364EXPORT_SYMBOL(mmc_detect_change);
1365
1366void mmc_init_erase(struct mmc_card *card)
1367{
1368 unsigned int sz;
1369
1370 if (is_power_of_2(card->erase_size))
1371 card->erase_shift = ffs(card->erase_size) - 1;
1372 else
1373 card->erase_shift = 0;
1374
1375 /*
1376 * It is possible to erase an arbitrarily large area of an SD or MMC
1377 * card. That is not desirable because it can take a long time
1378 * (minutes) potentially delaying more important I/O, and also the
1379 * timeout calculations become increasingly hugely over-estimated.
1380 * Consequently, 'pref_erase' is defined as a guide to limit erases
1381 * to that size and alignment.
1382 *
1383 * For SD cards that define Allocation Unit size, limit erases to one
1384 * Allocation Unit at a time. For MMC cards that define High Capacity
1385 * Erase Size, whether it is switched on or not, limit to that size.
1386 * Otherwise just have a stab at a good value. For modern cards it
1387 * will end up being 4MiB. Note that if the value is too small, it
1388 * can end up taking longer to erase.
1389 */
1390 if (mmc_card_sd(card) && card->ssr.au) {
1391 card->pref_erase = card->ssr.au;
1392 card->erase_shift = ffs(card->ssr.au) - 1;
1393 } else if (card->ext_csd.hc_erase_size) {
1394 card->pref_erase = card->ext_csd.hc_erase_size;
1395 } else {
1396 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1397 if (sz < 128)
1398 card->pref_erase = 512 * 1024 / 512;
1399 else if (sz < 512)
1400 card->pref_erase = 1024 * 1024 / 512;
1401 else if (sz < 1024)
1402 card->pref_erase = 2 * 1024 * 1024 / 512;
1403 else
1404 card->pref_erase = 4 * 1024 * 1024 / 512;
1405 if (card->pref_erase < card->erase_size)
1406 card->pref_erase = card->erase_size;
1407 else {
1408 sz = card->pref_erase % card->erase_size;
1409 if (sz)
1410 card->pref_erase += card->erase_size - sz;
1411 }
1412 }
1413}
1414
1415static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1416 unsigned int arg, unsigned int qty)
1417{
1418 unsigned int erase_timeout;
1419
1420 if (arg == MMC_DISCARD_ARG ||
1421 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1422 erase_timeout = card->ext_csd.trim_timeout;
1423 } else if (card->ext_csd.erase_group_def & 1) {
1424 /* High Capacity Erase Group Size uses HC timeouts */
1425 if (arg == MMC_TRIM_ARG)
1426 erase_timeout = card->ext_csd.trim_timeout;
1427 else
1428 erase_timeout = card->ext_csd.hc_erase_timeout;
1429 } else {
1430 /* CSD Erase Group Size uses write timeout */
1431 unsigned int mult = (10 << card->csd.r2w_factor);
1432 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1433 unsigned int timeout_us;
1434
1435 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1436 if (card->csd.tacc_ns < 1000000)
1437 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1438 else
1439 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1440
1441 /*
1442 * ios.clock is only a target. The real clock rate might be
1443 * less but not that much less, so fudge it by multiplying by 2.
1444 */
1445 timeout_clks <<= 1;
1446 timeout_us += (timeout_clks * 1000) /
1447 (mmc_host_clk_rate(card->host) / 1000);
1448
1449 erase_timeout = timeout_us / 1000;
1450
1451 /*
1452 * Theoretically, the calculation could underflow so round up
1453 * to 1ms in that case.
1454 */
1455 if (!erase_timeout)
1456 erase_timeout = 1;
1457 }
1458
1459 /* Multiplier for secure operations */
1460 if (arg & MMC_SECURE_ARGS) {
1461 if (arg == MMC_SECURE_ERASE_ARG)
1462 erase_timeout *= card->ext_csd.sec_erase_mult;
1463 else
1464 erase_timeout *= card->ext_csd.sec_trim_mult;
1465 }
1466
1467 erase_timeout *= qty;
1468
1469 /*
1470 * Ensure at least a 1 second timeout for SPI as per
1471 * 'mmc_set_data_timeout()'
1472 */
1473 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1474 erase_timeout = 1000;
1475
1476 return erase_timeout;
1477}
1478
1479static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1480 unsigned int arg,
1481 unsigned int qty)
1482{
1483 unsigned int erase_timeout;
1484
1485 if (card->ssr.erase_timeout) {
1486 /* Erase timeout specified in SD Status Register (SSR) */
1487 erase_timeout = card->ssr.erase_timeout * qty +
1488 card->ssr.erase_offset;
1489 } else {
1490 /*
1491 * Erase timeout not specified in SD Status Register (SSR) so
1492 * use 250ms per write block.
1493 */
1494 erase_timeout = 250 * qty;
1495 }
1496
1497 /* Must not be less than 1 second */
1498 if (erase_timeout < 1000)
1499 erase_timeout = 1000;
1500
1501 return erase_timeout;
1502}
1503
1504static unsigned int mmc_erase_timeout(struct mmc_card *card,
1505 unsigned int arg,
1506 unsigned int qty)
1507{
1508 if (mmc_card_sd(card))
1509 return mmc_sd_erase_timeout(card, arg, qty);
1510 else
1511 return mmc_mmc_erase_timeout(card, arg, qty);
1512}
1513
1514static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1515 unsigned int to, unsigned int arg)
1516{
1517 struct mmc_command cmd = {0};
1518 unsigned int qty = 0;
1519 int err;
1520
1521 /*
1522 * qty is used to calculate the erase timeout which depends on how many
1523 * erase groups (or allocation units in SD terminology) are affected.
1524 * We count erasing part of an erase group as one erase group.
1525 * For SD, the allocation units are always a power of 2. For MMC, the
1526 * erase group size is almost certainly also power of 2, but it does not
1527 * seem to insist on that in the JEDEC standard, so we fall back to
1528 * division in that case. SD may not specify an allocation unit size,
1529 * in which case the timeout is based on the number of write blocks.
1530 *
1531 * Note that the timeout for secure trim 2 will only be correct if the
1532 * number of erase groups specified is the same as the total of all
1533 * preceding secure trim 1 commands. Since the power may have been
1534 * lost since the secure trim 1 commands occurred, it is generally
1535 * impossible to calculate the secure trim 2 timeout correctly.
1536 */
1537 if (card->erase_shift)
1538 qty += ((to >> card->erase_shift) -
1539 (from >> card->erase_shift)) + 1;
1540 else if (mmc_card_sd(card))
1541 qty += to - from + 1;
1542 else
1543 qty += ((to / card->erase_size) -
1544 (from / card->erase_size)) + 1;
1545
1546 if (!mmc_card_blockaddr(card)) {
1547 from <<= 9;
1548 to <<= 9;
1549 }
1550
1551 if (mmc_card_sd(card))
1552 cmd.opcode = SD_ERASE_WR_BLK_START;
1553 else
1554 cmd.opcode = MMC_ERASE_GROUP_START;
1555 cmd.arg = from;
1556 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1557 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1558 if (err) {
1559 pr_err("mmc_erase: group start error %d, "
1560 "status %#x\n", err, cmd.resp[0]);
1561 err = -EIO;
1562 goto out;
1563 }
1564
1565 memset(&cmd, 0, sizeof(struct mmc_command));
1566 if (mmc_card_sd(card))
1567 cmd.opcode = SD_ERASE_WR_BLK_END;
1568 else
1569 cmd.opcode = MMC_ERASE_GROUP_END;
1570 cmd.arg = to;
1571 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1572 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1573 if (err) {
1574 pr_err("mmc_erase: group end error %d, status %#x\n",
1575 err, cmd.resp[0]);
1576 err = -EIO;
1577 goto out;
1578 }
1579
1580 memset(&cmd, 0, sizeof(struct mmc_command));
1581 cmd.opcode = MMC_ERASE;
1582 cmd.arg = arg;
1583 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1584 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1585 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1586 if (err) {
1587 pr_err("mmc_erase: erase error %d, status %#x\n",
1588 err, cmd.resp[0]);
1589 err = -EIO;
1590 goto out;
1591 }
1592
1593 if (mmc_host_is_spi(card->host))
1594 goto out;
1595
1596 do {
1597 memset(&cmd, 0, sizeof(struct mmc_command));
1598 cmd.opcode = MMC_SEND_STATUS;
1599 cmd.arg = card->rca << 16;
1600 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1601 /* Do not retry else we can't see errors */
1602 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1603 if (err || (cmd.resp[0] & 0xFDF92000)) {
1604 pr_err("error %d requesting status %#x\n",
1605 err, cmd.resp[0]);
1606 err = -EIO;
1607 goto out;
1608 }
1609 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1610 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1611out:
1612 return err;
1613}
1614
1615/**
1616 * mmc_erase - erase sectors.
1617 * @card: card to erase
1618 * @from: first sector to erase
1619 * @nr: number of sectors to erase
1620 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1621 *
1622 * Caller must claim host before calling this function.
1623 */
1624int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1625 unsigned int arg)
1626{
1627 unsigned int rem, to = from + nr;
1628
1629 if (!(card->host->caps & MMC_CAP_ERASE) ||
1630 !(card->csd.cmdclass & CCC_ERASE))
1631 return -EOPNOTSUPP;
1632
1633 if (!card->erase_size)
1634 return -EOPNOTSUPP;
1635
1636 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1637 return -EOPNOTSUPP;
1638
1639 if ((arg & MMC_SECURE_ARGS) &&
1640 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1641 return -EOPNOTSUPP;
1642
1643 if ((arg & MMC_TRIM_ARGS) &&
1644 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1645 return -EOPNOTSUPP;
1646
1647 if (arg == MMC_SECURE_ERASE_ARG) {
1648 if (from % card->erase_size || nr % card->erase_size)
1649 return -EINVAL;
1650 }
1651
1652 if (arg == MMC_ERASE_ARG) {
1653 rem = from % card->erase_size;
1654 if (rem) {
1655 rem = card->erase_size - rem;
1656 from += rem;
1657 if (nr > rem)
1658 nr -= rem;
1659 else
1660 return 0;
1661 }
1662 rem = nr % card->erase_size;
1663 if (rem)
1664 nr -= rem;
1665 }
1666
1667 if (nr == 0)
1668 return 0;
1669
1670 to = from + nr;
1671
1672 if (to <= from)
1673 return -EINVAL;
1674
1675 /* 'from' and 'to' are inclusive */
1676 to -= 1;
1677
1678 return mmc_do_erase(card, from, to, arg);
1679}
1680EXPORT_SYMBOL(mmc_erase);
1681
1682int mmc_can_erase(struct mmc_card *card)
1683{
1684 if ((card->host->caps & MMC_CAP_ERASE) &&
1685 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1686 return 1;
1687 return 0;
1688}
1689EXPORT_SYMBOL(mmc_can_erase);
1690
1691int mmc_can_trim(struct mmc_card *card)
1692{
1693 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1694 return 1;
1695 return 0;
1696}
1697EXPORT_SYMBOL(mmc_can_trim);
1698
1699int mmc_can_discard(struct mmc_card *card)
1700{
1701 /*
1702 * As there's no way to detect the discard support bit at v4.5
1703 * use the s/w feature support filed.
1704 */
1705 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1706 return 1;
1707 return 0;
1708}
1709EXPORT_SYMBOL(mmc_can_discard);
1710
1711int mmc_can_sanitize(struct mmc_card *card)
1712{
1713 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1714 return 0;
1715 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1716 return 1;
1717 return 0;
1718}
1719EXPORT_SYMBOL(mmc_can_sanitize);
1720
1721int mmc_can_secure_erase_trim(struct mmc_card *card)
1722{
1723 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1724 return 1;
1725 return 0;
1726}
1727EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1728
1729int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1730 unsigned int nr)
1731{
1732 if (!card->erase_size)
1733 return 0;
1734 if (from % card->erase_size || nr % card->erase_size)
1735 return 0;
1736 return 1;
1737}
1738EXPORT_SYMBOL(mmc_erase_group_aligned);
1739
1740static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1741 unsigned int arg)
1742{
1743 struct mmc_host *host = card->host;
1744 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1745 unsigned int last_timeout = 0;
1746
1747 if (card->erase_shift)
1748 max_qty = UINT_MAX >> card->erase_shift;
1749 else if (mmc_card_sd(card))
1750 max_qty = UINT_MAX;
1751 else
1752 max_qty = UINT_MAX / card->erase_size;
1753
1754 /* Find the largest qty with an OK timeout */
1755 do {
1756 y = 0;
1757 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1758 timeout = mmc_erase_timeout(card, arg, qty + x);
1759 if (timeout > host->max_discard_to)
1760 break;
1761 if (timeout < last_timeout)
1762 break;
1763 last_timeout = timeout;
1764 y = x;
1765 }
1766 qty += y;
1767 } while (y);
1768
1769 if (!qty)
1770 return 0;
1771
1772 if (qty == 1)
1773 return 1;
1774
1775 /* Convert qty to sectors */
1776 if (card->erase_shift)
1777 max_discard = --qty << card->erase_shift;
1778 else if (mmc_card_sd(card))
1779 max_discard = qty;
1780 else
1781 max_discard = --qty * card->erase_size;
1782
1783 return max_discard;
1784}
1785
1786unsigned int mmc_calc_max_discard(struct mmc_card *card)
1787{
1788 struct mmc_host *host = card->host;
1789 unsigned int max_discard, max_trim;
1790
1791 if (!host->max_discard_to)
1792 return UINT_MAX;
1793
1794 /*
1795 * Without erase_group_def set, MMC erase timeout depends on clock
1796 * frequence which can change. In that case, the best choice is
1797 * just the preferred erase size.
1798 */
1799 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1800 return card->pref_erase;
1801
1802 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1803 if (mmc_can_trim(card)) {
1804 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1805 if (max_trim < max_discard)
1806 max_discard = max_trim;
1807 } else if (max_discard < card->erase_size) {
1808 max_discard = 0;
1809 }
1810 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1811 mmc_hostname(host), max_discard, host->max_discard_to);
1812 return max_discard;
1813}
1814EXPORT_SYMBOL(mmc_calc_max_discard);
1815
1816int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1817{
1818 struct mmc_command cmd = {0};
1819
1820 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1821 return 0;
1822
1823 cmd.opcode = MMC_SET_BLOCKLEN;
1824 cmd.arg = blocklen;
1825 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1826 return mmc_wait_for_cmd(card->host, &cmd, 5);
1827}
1828EXPORT_SYMBOL(mmc_set_blocklen);
1829
1830static void mmc_hw_reset_for_init(struct mmc_host *host)
1831{
1832 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1833 return;
1834 mmc_host_clk_hold(host);
1835 host->ops->hw_reset(host);
1836 mmc_host_clk_release(host);
1837}
1838
1839int mmc_can_reset(struct mmc_card *card)
1840{
1841 u8 rst_n_function;
1842
1843 if (!mmc_card_mmc(card))
1844 return 0;
1845 rst_n_function = card->ext_csd.rst_n_function;
1846 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1847 return 0;
1848 return 1;
1849}
1850EXPORT_SYMBOL(mmc_can_reset);
1851
1852static int mmc_do_hw_reset(struct mmc_host *host, int check)
1853{
1854 struct mmc_card *card = host->card;
1855
1856 if (!host->bus_ops->power_restore)
1857 return -EOPNOTSUPP;
1858
1859 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1860 return -EOPNOTSUPP;
1861
1862 if (!card)
1863 return -EINVAL;
1864
1865 if (!mmc_can_reset(card))
1866 return -EOPNOTSUPP;
1867
1868 mmc_host_clk_hold(host);
1869 mmc_set_clock(host, host->f_init);
1870
1871 host->ops->hw_reset(host);
1872
1873 /* If the reset has happened, then a status command will fail */
1874 if (check) {
1875 struct mmc_command cmd = {0};
1876 int err;
1877
1878 cmd.opcode = MMC_SEND_STATUS;
1879 if (!mmc_host_is_spi(card->host))
1880 cmd.arg = card->rca << 16;
1881 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1882 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1883 if (!err) {
1884 mmc_host_clk_release(host);
1885 return -ENOSYS;
1886 }
1887 }
1888
1889 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1890 if (mmc_host_is_spi(host)) {
1891 host->ios.chip_select = MMC_CS_HIGH;
1892 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1893 } else {
1894 host->ios.chip_select = MMC_CS_DONTCARE;
1895 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1896 }
1897 host->ios.bus_width = MMC_BUS_WIDTH_1;
1898 host->ios.timing = MMC_TIMING_LEGACY;
1899 mmc_set_ios(host);
1900
1901 mmc_host_clk_release(host);
1902
1903 return host->bus_ops->power_restore(host);
1904}
1905
1906int mmc_hw_reset(struct mmc_host *host)
1907{
1908 return mmc_do_hw_reset(host, 0);
1909}
1910EXPORT_SYMBOL(mmc_hw_reset);
1911
1912int mmc_hw_reset_check(struct mmc_host *host)
1913{
1914 return mmc_do_hw_reset(host, 1);
1915}
1916EXPORT_SYMBOL(mmc_hw_reset_check);
1917
1918static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1919{
1920 host->f_init = freq;
1921
1922#ifdef CONFIG_MMC_DEBUG
1923 pr_info("%s: %s: trying to init card at %u Hz\n",
1924 mmc_hostname(host), __func__, host->f_init);
1925#endif
1926 mmc_power_up(host);
1927
1928 /*
1929 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
1930 * do a hardware reset if possible.
1931 */
1932 mmc_hw_reset_for_init(host);
1933
1934 /* Initialization should be done at 3.3 V I/O voltage. */
1935 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
1936
1937 /*
1938 * sdio_reset sends CMD52 to reset card. Since we do not know
1939 * if the card is being re-initialized, just send it. CMD52
1940 * should be ignored by SD/eMMC cards.
1941 */
1942 sdio_reset(host);
1943 mmc_go_idle(host);
1944
1945 mmc_send_if_cond(host, host->ocr_avail);
1946
1947 /* Order's important: probe SDIO, then SD, then MMC */
1948 if (!mmc_attach_sdio(host))
1949 return 0;
1950 if (!mmc_attach_sd(host))
1951 return 0;
1952 if (!mmc_attach_mmc(host))
1953 return 0;
1954
1955 mmc_power_off(host);
1956 return -EIO;
1957}
1958
1959int _mmc_detect_card_removed(struct mmc_host *host)
1960{
1961 int ret;
1962
1963 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1964 return 0;
1965
1966 if (!host->card || mmc_card_removed(host->card))
1967 return 1;
1968
1969 ret = host->bus_ops->alive(host);
1970 if (ret) {
1971 mmc_card_set_removed(host->card);
1972 pr_debug("%s: card remove detected\n", mmc_hostname(host));
1973 }
1974
1975 return ret;
1976}
1977
1978int mmc_detect_card_removed(struct mmc_host *host)
1979{
1980 struct mmc_card *card = host->card;
1981 int ret;
1982
1983 WARN_ON(!host->claimed);
1984
1985 if (!card)
1986 return 1;
1987
1988 ret = mmc_card_removed(card);
1989 /*
1990 * The card will be considered unchanged unless we have been asked to
1991 * detect a change or host requires polling to provide card detection.
1992 */
1993 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1994 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
1995 return ret;
1996
1997 host->detect_change = 0;
1998 if (!ret) {
1999 ret = _mmc_detect_card_removed(host);
2000 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2001 /*
2002 * Schedule a detect work as soon as possible to let a
2003 * rescan handle the card removal.
2004 */
2005 cancel_delayed_work(&host->detect);
2006 mmc_detect_change(host, 0);
2007 }
2008 }
2009
2010 return ret;
2011}
2012EXPORT_SYMBOL(mmc_detect_card_removed);
2013
2014void mmc_rescan(struct work_struct *work)
2015{
2016 struct mmc_host *host =
2017 container_of(work, struct mmc_host, detect.work);
2018 int i;
2019
2020 if (host->rescan_disable)
2021 return;
2022
2023 mmc_bus_get(host);
2024
2025 /*
2026 * if there is a _removable_ card registered, check whether it is
2027 * still present
2028 */
2029 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2030 && !(host->caps & MMC_CAP_NONREMOVABLE))
2031 host->bus_ops->detect(host);
2032
2033 host->detect_change = 0;
2034
2035 /*
2036 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2037 * the card is no longer present.
2038 */
2039 mmc_bus_put(host);
2040 mmc_bus_get(host);
2041
2042 /* if there still is a card present, stop here */
2043 if (host->bus_ops != NULL) {
2044 mmc_bus_put(host);
2045 goto out;
2046 }
2047
2048 /*
2049 * Only we can add a new handler, so it's safe to
2050 * release the lock here.
2051 */
2052 mmc_bus_put(host);
2053
2054 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2055 mmc_claim_host(host);
2056 mmc_power_off(host);
2057 mmc_release_host(host);
2058 goto out;
2059 }
2060
2061 mmc_claim_host(host);
2062 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2063 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2064 break;
2065 if (freqs[i] <= host->f_min)
2066 break;
2067 }
2068 mmc_release_host(host);
2069
2070 out:
2071 if (host->caps & MMC_CAP_NEEDS_POLL)
2072 mmc_schedule_delayed_work(&host->detect, HZ);
2073}
2074
2075void mmc_start_host(struct mmc_host *host)
2076{
2077 host->f_init = max(freqs[0], host->f_min);
2078 mmc_power_up(host);
2079 mmc_detect_change(host, 0);
2080}
2081
2082void mmc_stop_host(struct mmc_host *host)
2083{
2084#ifdef CONFIG_MMC_DEBUG
2085 unsigned long flags;
2086 spin_lock_irqsave(&host->lock, flags);
2087 host->removed = 1;
2088 spin_unlock_irqrestore(&host->lock, flags);
2089#endif
2090
2091 cancel_delayed_work_sync(&host->detect);
2092 mmc_flush_scheduled_work();
2093
2094 /* clear pm flags now and let card drivers set them as needed */
2095 host->pm_flags = 0;
2096
2097 mmc_bus_get(host);
2098 if (host->bus_ops && !host->bus_dead) {
2099 /* Calling bus_ops->remove() with a claimed host can deadlock */
2100 if (host->bus_ops->remove)
2101 host->bus_ops->remove(host);
2102
2103 mmc_claim_host(host);
2104 mmc_detach_bus(host);
2105 mmc_power_off(host);
2106 mmc_release_host(host);
2107 mmc_bus_put(host);
2108 return;
2109 }
2110 mmc_bus_put(host);
2111
2112 BUG_ON(host->card);
2113
2114 mmc_power_off(host);
2115}
2116
2117int mmc_power_save_host(struct mmc_host *host)
2118{
2119 int ret = 0;
2120
2121#ifdef CONFIG_MMC_DEBUG
2122 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2123#endif
2124
2125 mmc_bus_get(host);
2126
2127 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2128 mmc_bus_put(host);
2129 return -EINVAL;
2130 }
2131
2132 if (host->bus_ops->power_save)
2133 ret = host->bus_ops->power_save(host);
2134
2135 mmc_bus_put(host);
2136
2137 mmc_power_off(host);
2138
2139 return ret;
2140}
2141EXPORT_SYMBOL(mmc_power_save_host);
2142
2143int mmc_power_restore_host(struct mmc_host *host)
2144{
2145 int ret;
2146
2147#ifdef CONFIG_MMC_DEBUG
2148 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2149#endif
2150
2151 mmc_bus_get(host);
2152
2153 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2154 mmc_bus_put(host);
2155 return -EINVAL;
2156 }
2157
2158 mmc_power_up(host);
2159 ret = host->bus_ops->power_restore(host);
2160
2161 mmc_bus_put(host);
2162
2163 return ret;
2164}
2165EXPORT_SYMBOL(mmc_power_restore_host);
2166
2167int mmc_card_awake(struct mmc_host *host)
2168{
2169 int err = -ENOSYS;
2170
2171 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2172 return 0;
2173
2174 mmc_bus_get(host);
2175
2176 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2177 err = host->bus_ops->awake(host);
2178
2179 mmc_bus_put(host);
2180
2181 return err;
2182}
2183EXPORT_SYMBOL(mmc_card_awake);
2184
2185int mmc_card_sleep(struct mmc_host *host)
2186{
2187 int err = -ENOSYS;
2188
2189 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2190 return 0;
2191
2192 mmc_bus_get(host);
2193
2194 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2195 err = host->bus_ops->sleep(host);
2196
2197 mmc_bus_put(host);
2198
2199 return err;
2200}
2201EXPORT_SYMBOL(mmc_card_sleep);
2202
2203int mmc_card_can_sleep(struct mmc_host *host)
2204{
2205 struct mmc_card *card = host->card;
2206
2207 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2208 return 1;
2209 return 0;
2210}
2211EXPORT_SYMBOL(mmc_card_can_sleep);
2212
2213/*
2214 * Flush the cache to the non-volatile storage.
2215 */
2216int mmc_flush_cache(struct mmc_card *card)
2217{
2218 struct mmc_host *host = card->host;
2219 int err = 0;
2220
2221 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2222 return err;
2223
2224 if (mmc_card_mmc(card) &&
2225 (card->ext_csd.cache_size > 0) &&
2226 (card->ext_csd.cache_ctrl & 1)) {
2227 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2228 EXT_CSD_FLUSH_CACHE, 1, 0);
2229 if (err)
2230 pr_err("%s: cache flush error %d\n",
2231 mmc_hostname(card->host), err);
2232 }
2233
2234 return err;
2235}
2236EXPORT_SYMBOL(mmc_flush_cache);
2237
2238/*
2239 * Turn the cache ON/OFF.
2240 * Turning the cache OFF shall trigger flushing of the data
2241 * to the non-volatile storage.
2242 */
2243int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2244{
2245 struct mmc_card *card = host->card;
2246 unsigned int timeout;
2247 int err = 0;
2248
2249 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2250 mmc_card_is_removable(host))
2251 return err;
2252
2253 mmc_claim_host(host);
2254 if (card && mmc_card_mmc(card) &&
2255 (card->ext_csd.cache_size > 0)) {
2256 enable = !!enable;
2257
2258 if (card->ext_csd.cache_ctrl ^ enable) {
2259 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2260 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2261 EXT_CSD_CACHE_CTRL, enable, timeout);
2262 if (err)
2263 pr_err("%s: cache %s error %d\n",
2264 mmc_hostname(card->host),
2265 enable ? "on" : "off",
2266 err);
2267 else
2268 card->ext_csd.cache_ctrl = enable;
2269 }
2270 }
2271 mmc_release_host(host);
2272
2273 return err;
2274}
2275EXPORT_SYMBOL(mmc_cache_ctrl);
2276
2277#ifdef CONFIG_PM
2278
2279/**
2280 * mmc_suspend_host - suspend a host
2281 * @host: mmc host
2282 */
2283int mmc_suspend_host(struct mmc_host *host)
2284{
2285 int err = 0;
2286
2287 cancel_delayed_work(&host->detect);
2288 mmc_flush_scheduled_work();
2289
2290 err = mmc_cache_ctrl(host, 0);
2291 if (err)
2292 goto out;
2293
2294 mmc_bus_get(host);
2295 if (host->bus_ops && !host->bus_dead) {
2296
2297 if (host->bus_ops->suspend)
2298 err = host->bus_ops->suspend(host);
2299
2300 if (err == -ENOSYS || !host->bus_ops->resume) {
2301 /*
2302 * We simply "remove" the card in this case.
2303 * It will be redetected on resume. (Calling
2304 * bus_ops->remove() with a claimed host can
2305 * deadlock.)
2306 */
2307 if (host->bus_ops->remove)
2308 host->bus_ops->remove(host);
2309 mmc_claim_host(host);
2310 mmc_detach_bus(host);
2311 mmc_power_off(host);
2312 mmc_release_host(host);
2313 host->pm_flags = 0;
2314 err = 0;
2315 }
2316 }
2317 mmc_bus_put(host);
2318
2319 if (!err && !mmc_card_keep_power(host))
2320 mmc_power_off(host);
2321
2322out:
2323 return err;
2324}
2325
2326EXPORT_SYMBOL(mmc_suspend_host);
2327
2328/**
2329 * mmc_resume_host - resume a previously suspended host
2330 * @host: mmc host
2331 */
2332int mmc_resume_host(struct mmc_host *host)
2333{
2334 int err = 0;
2335
2336 mmc_bus_get(host);
2337 if (host->bus_ops && !host->bus_dead) {
2338 if (!mmc_card_keep_power(host)) {
2339 mmc_power_up(host);
2340 mmc_select_voltage(host, host->ocr);
2341 /*
2342 * Tell runtime PM core we just powered up the card,
2343 * since it still believes the card is powered off.
2344 * Note that currently runtime PM is only enabled
2345 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2346 */
2347 if (mmc_card_sdio(host->card) &&
2348 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2349 pm_runtime_disable(&host->card->dev);
2350 pm_runtime_set_active(&host->card->dev);
2351 pm_runtime_enable(&host->card->dev);
2352 }
2353 }
2354 BUG_ON(!host->bus_ops->resume);
2355 err = host->bus_ops->resume(host);
2356 if (err) {
2357 pr_warning("%s: error %d during resume "
2358 "(card was removed?)\n",
2359 mmc_hostname(host), err);
2360 err = 0;
2361 }
2362 }
2363 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2364 mmc_bus_put(host);
2365
2366 return err;
2367}
2368EXPORT_SYMBOL(mmc_resume_host);
2369
2370/* Do the card removal on suspend if card is assumed removeable
2371 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2372 to sync the card.
2373*/
2374int mmc_pm_notify(struct notifier_block *notify_block,
2375 unsigned long mode, void *unused)
2376{
2377 struct mmc_host *host = container_of(
2378 notify_block, struct mmc_host, pm_notify);
2379 unsigned long flags;
2380
2381
2382 switch (mode) {
2383 case PM_HIBERNATION_PREPARE:
2384 case PM_SUSPEND_PREPARE:
2385
2386 spin_lock_irqsave(&host->lock, flags);
2387 host->rescan_disable = 1;
2388 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2389 spin_unlock_irqrestore(&host->lock, flags);
2390 cancel_delayed_work_sync(&host->detect);
2391
2392 if (!host->bus_ops || host->bus_ops->suspend)
2393 break;
2394
2395 /* Calling bus_ops->remove() with a claimed host can deadlock */
2396 if (host->bus_ops->remove)
2397 host->bus_ops->remove(host);
2398
2399 mmc_claim_host(host);
2400 mmc_detach_bus(host);
2401 mmc_power_off(host);
2402 mmc_release_host(host);
2403 host->pm_flags = 0;
2404 break;
2405
2406 case PM_POST_SUSPEND:
2407 case PM_POST_HIBERNATION:
2408 case PM_POST_RESTORE:
2409
2410 spin_lock_irqsave(&host->lock, flags);
2411 host->rescan_disable = 0;
2412 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2413 spin_unlock_irqrestore(&host->lock, flags);
2414 mmc_detect_change(host, 0);
2415
2416 }
2417
2418 return 0;
2419}
2420#endif
2421
2422static int __init mmc_init(void)
2423{
2424 int ret;
2425
2426 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2427 if (!workqueue)
2428 return -ENOMEM;
2429
2430 ret = mmc_register_bus();
2431 if (ret)
2432 goto destroy_workqueue;
2433
2434 ret = mmc_register_host_class();
2435 if (ret)
2436 goto unregister_bus;
2437
2438 ret = sdio_register_bus();
2439 if (ret)
2440 goto unregister_host_class;
2441
2442 return 0;
2443
2444unregister_host_class:
2445 mmc_unregister_host_class();
2446unregister_bus:
2447 mmc_unregister_bus();
2448destroy_workqueue:
2449 destroy_workqueue(workqueue);
2450
2451 return ret;
2452}
2453
2454static void __exit mmc_exit(void)
2455{
2456 sdio_unregister_bus();
2457 mmc_unregister_host_class();
2458 mmc_unregister_bus();
2459 destroy_workqueue(workqueue);
2460}
2461
2462subsys_initcall(mmc_init);
2463module_exit(mmc_exit);
2464
2465MODULE_LICENSE("GPL");