Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
5 */
6#include <linux/slab.h>
7#include <linux/module.h>
8#include <linux/blkdev.h>
9#include <linux/freezer.h>
10#include <linux/kthread.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-mapping.h>
13#include <linux/backing-dev.h>
14
15#include <linux/mmc/card.h>
16#include <linux/mmc/host.h>
17
18#include "queue.h"
19#include "block.h"
20#include "core.h"
21#include "card.h"
22#include "host.h"
23
24#define MMC_DMA_MAP_MERGE_SEGMENTS 512
25
26static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
27{
28 /* Allow only 1 DCMD at a time */
29 return mq->in_flight[MMC_ISSUE_DCMD];
30}
31
32void mmc_cqe_check_busy(struct mmc_queue *mq)
33{
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
36
37 mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
38}
39
40static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
41{
42 return host->caps2 & MMC_CAP2_CQE_DCMD;
43}
44
45static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
46 struct request *req)
47{
48 switch (req_op(req)) {
49 case REQ_OP_DRV_IN:
50 case REQ_OP_DRV_OUT:
51 case REQ_OP_DISCARD:
52 case REQ_OP_SECURE_ERASE:
53 return MMC_ISSUE_SYNC;
54 case REQ_OP_FLUSH:
55 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
56 default:
57 return MMC_ISSUE_ASYNC;
58 }
59}
60
61enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
62{
63 struct mmc_host *host = mq->card->host;
64
65 if (mq->use_cqe && !host->hsq_enabled)
66 return mmc_cqe_issue_type(host, req);
67
68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
69 return MMC_ISSUE_ASYNC;
70
71 return MMC_ISSUE_SYNC;
72}
73
74static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
75{
76 if (!mq->recovery_needed) {
77 mq->recovery_needed = true;
78 schedule_work(&mq->recovery_work);
79 }
80}
81
82void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
83{
84 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
85 brq.mrq);
86 struct request *req = mmc_queue_req_to_req(mqrq);
87 struct request_queue *q = req->q;
88 struct mmc_queue *mq = q->queuedata;
89 unsigned long flags;
90
91 spin_lock_irqsave(&mq->lock, flags);
92 __mmc_cqe_recovery_notifier(mq);
93 spin_unlock_irqrestore(&mq->lock, flags);
94}
95
96static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
97{
98 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
99 struct mmc_request *mrq = &mqrq->brq.mrq;
100 struct mmc_queue *mq = req->q->queuedata;
101 struct mmc_host *host = mq->card->host;
102 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
103 bool recovery_needed = false;
104
105 switch (issue_type) {
106 case MMC_ISSUE_ASYNC:
107 case MMC_ISSUE_DCMD:
108 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
109 if (recovery_needed)
110 mmc_cqe_recovery_notifier(mrq);
111 return BLK_EH_RESET_TIMER;
112 }
113 /* The request has gone already */
114 return BLK_EH_DONE;
115 default:
116 /* Timeout is handled by mmc core */
117 return BLK_EH_RESET_TIMER;
118 }
119}
120
121static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
122 bool reserved)
123{
124 struct request_queue *q = req->q;
125 struct mmc_queue *mq = q->queuedata;
126 struct mmc_card *card = mq->card;
127 struct mmc_host *host = card->host;
128 unsigned long flags;
129 bool ignore_tout;
130
131 spin_lock_irqsave(&mq->lock, flags);
132 ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
133 spin_unlock_irqrestore(&mq->lock, flags);
134
135 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
136}
137
138static void mmc_mq_recovery_handler(struct work_struct *work)
139{
140 struct mmc_queue *mq = container_of(work, struct mmc_queue,
141 recovery_work);
142 struct request_queue *q = mq->queue;
143 struct mmc_host *host = mq->card->host;
144
145 mmc_get_card(mq->card, &mq->ctx);
146
147 mq->in_recovery = true;
148
149 if (mq->use_cqe && !host->hsq_enabled)
150 mmc_blk_cqe_recovery(mq);
151 else
152 mmc_blk_mq_recovery(mq);
153
154 mq->in_recovery = false;
155
156 spin_lock_irq(&mq->lock);
157 mq->recovery_needed = false;
158 spin_unlock_irq(&mq->lock);
159
160 if (host->hsq_enabled)
161 host->cqe_ops->cqe_recovery_finish(host);
162
163 mmc_put_card(mq->card, &mq->ctx);
164
165 blk_mq_run_hw_queues(q, true);
166}
167
168static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
169{
170 struct scatterlist *sg;
171
172 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
173 if (sg)
174 sg_init_table(sg, sg_len);
175
176 return sg;
177}
178
179static void mmc_queue_setup_discard(struct request_queue *q,
180 struct mmc_card *card)
181{
182 unsigned max_discard;
183
184 max_discard = mmc_calc_max_discard(card);
185 if (!max_discard)
186 return;
187
188 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
189 blk_queue_max_discard_sectors(q, max_discard);
190 q->limits.discard_granularity = card->pref_erase << 9;
191 /* granularity must not be greater than max. discard */
192 if (card->pref_erase > max_discard)
193 q->limits.discard_granularity = SECTOR_SIZE;
194 if (mmc_can_secure_erase_trim(card))
195 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
196}
197
198static unsigned int mmc_get_max_segments(struct mmc_host *host)
199{
200 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
201 host->max_segs;
202}
203
204/**
205 * mmc_init_request() - initialize the MMC-specific per-request data
206 * @mq: the request queue
207 * @req: the request
208 * @gfp: memory allocation policy
209 */
210static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
211 gfp_t gfp)
212{
213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
214 struct mmc_card *card = mq->card;
215 struct mmc_host *host = card->host;
216
217 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
218 if (!mq_rq->sg)
219 return -ENOMEM;
220
221 return 0;
222}
223
224static void mmc_exit_request(struct request_queue *q, struct request *req)
225{
226 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
227
228 kfree(mq_rq->sg);
229 mq_rq->sg = NULL;
230}
231
232static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
233 unsigned int hctx_idx, unsigned int numa_node)
234{
235 return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
236}
237
238static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
239 unsigned int hctx_idx)
240{
241 struct mmc_queue *mq = set->driver_data;
242
243 mmc_exit_request(mq->queue, req);
244}
245
246static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
247 const struct blk_mq_queue_data *bd)
248{
249 struct request *req = bd->rq;
250 struct request_queue *q = req->q;
251 struct mmc_queue *mq = q->queuedata;
252 struct mmc_card *card = mq->card;
253 struct mmc_host *host = card->host;
254 enum mmc_issue_type issue_type;
255 enum mmc_issued issued;
256 bool get_card, cqe_retune_ok;
257 int ret;
258
259 if (mmc_card_removed(mq->card)) {
260 req->rq_flags |= RQF_QUIET;
261 return BLK_STS_IOERR;
262 }
263
264 issue_type = mmc_issue_type(mq, req);
265
266 spin_lock_irq(&mq->lock);
267
268 if (mq->recovery_needed || mq->busy) {
269 spin_unlock_irq(&mq->lock);
270 return BLK_STS_RESOURCE;
271 }
272
273 switch (issue_type) {
274 case MMC_ISSUE_DCMD:
275 if (mmc_cqe_dcmd_busy(mq)) {
276 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
277 spin_unlock_irq(&mq->lock);
278 return BLK_STS_RESOURCE;
279 }
280 break;
281 case MMC_ISSUE_ASYNC:
282 /*
283 * For MMC host software queue, we only allow 2 requests in
284 * flight to avoid a long latency.
285 */
286 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
287 spin_unlock_irq(&mq->lock);
288 return BLK_STS_RESOURCE;
289 }
290 break;
291 default:
292 /*
293 * Timeouts are handled by mmc core, and we don't have a host
294 * API to abort requests, so we can't handle the timeout anyway.
295 * However, when the timeout happens, blk_mq_complete_request()
296 * no longer works (to stop the request disappearing under us).
297 * To avoid racing with that, set a large timeout.
298 */
299 req->timeout = 600 * HZ;
300 break;
301 }
302
303 /* Parallel dispatch of requests is not supported at the moment */
304 mq->busy = true;
305
306 mq->in_flight[issue_type] += 1;
307 get_card = (mmc_tot_in_flight(mq) == 1);
308 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
309
310 spin_unlock_irq(&mq->lock);
311
312 if (!(req->rq_flags & RQF_DONTPREP)) {
313 req_to_mmc_queue_req(req)->retries = 0;
314 req->rq_flags |= RQF_DONTPREP;
315 }
316
317 if (get_card)
318 mmc_get_card(card, &mq->ctx);
319
320 if (mq->use_cqe) {
321 host->retune_now = host->need_retune && cqe_retune_ok &&
322 !host->hold_retune;
323 }
324
325 blk_mq_start_request(req);
326
327 issued = mmc_blk_mq_issue_rq(mq, req);
328
329 switch (issued) {
330 case MMC_REQ_BUSY:
331 ret = BLK_STS_RESOURCE;
332 break;
333 case MMC_REQ_FAILED_TO_START:
334 ret = BLK_STS_IOERR;
335 break;
336 default:
337 ret = BLK_STS_OK;
338 break;
339 }
340
341 if (issued != MMC_REQ_STARTED) {
342 bool put_card = false;
343
344 spin_lock_irq(&mq->lock);
345 mq->in_flight[issue_type] -= 1;
346 if (mmc_tot_in_flight(mq) == 0)
347 put_card = true;
348 mq->busy = false;
349 spin_unlock_irq(&mq->lock);
350 if (put_card)
351 mmc_put_card(card, &mq->ctx);
352 } else {
353 WRITE_ONCE(mq->busy, false);
354 }
355
356 return ret;
357}
358
359static const struct blk_mq_ops mmc_mq_ops = {
360 .queue_rq = mmc_mq_queue_rq,
361 .init_request = mmc_mq_init_request,
362 .exit_request = mmc_mq_exit_request,
363 .complete = mmc_blk_mq_complete,
364 .timeout = mmc_mq_timed_out,
365};
366
367static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
368{
369 struct mmc_host *host = card->host;
370 unsigned block_size = 512;
371
372 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
373 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
374 if (mmc_can_erase(card))
375 mmc_queue_setup_discard(mq->queue, card);
376
377 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
378 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
379 blk_queue_max_hw_sectors(mq->queue,
380 min(host->max_blk_count, host->max_req_size / 512));
381 if (host->can_dma_map_merge)
382 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
383 mmc_dev(host)),
384 "merging was advertised but not possible");
385 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
386
387 if (mmc_card_mmc(card))
388 block_size = card->ext_csd.data_sector_size;
389
390 blk_queue_logical_block_size(mq->queue, block_size);
391 /*
392 * After blk_queue_can_use_dma_map_merging() was called with succeed,
393 * since it calls blk_queue_virt_boundary(), the mmc should not call
394 * both blk_queue_max_segment_size().
395 */
396 if (!host->can_dma_map_merge)
397 blk_queue_max_segment_size(mq->queue,
398 round_down(host->max_seg_size, block_size));
399
400 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
401
402 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
403 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
404
405 mutex_init(&mq->complete_lock);
406
407 init_waitqueue_head(&mq->wait);
408}
409
410static inline bool mmc_merge_capable(struct mmc_host *host)
411{
412 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
413}
414
415/* Set queue depth to get a reasonable value for q->nr_requests */
416#define MMC_QUEUE_DEPTH 64
417
418/**
419 * mmc_init_queue - initialise a queue structure.
420 * @mq: mmc queue
421 * @card: mmc card to attach this queue
422 *
423 * Initialise a MMC card request queue.
424 */
425int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
426{
427 struct mmc_host *host = card->host;
428 int ret;
429
430 mq->card = card;
431 mq->use_cqe = host->cqe_enabled;
432
433 spin_lock_init(&mq->lock);
434
435 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
436 mq->tag_set.ops = &mmc_mq_ops;
437 /*
438 * The queue depth for CQE must match the hardware because the request
439 * tag is used to index the hardware queue.
440 */
441 if (mq->use_cqe && !host->hsq_enabled)
442 mq->tag_set.queue_depth =
443 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
444 else
445 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
446 mq->tag_set.numa_node = NUMA_NO_NODE;
447 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
448 mq->tag_set.nr_hw_queues = 1;
449 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
450 mq->tag_set.driver_data = mq;
451
452 /*
453 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
454 * the host->can_dma_map_merge should be set before to get max_segs
455 * from mmc_get_max_segments().
456 */
457 if (mmc_merge_capable(host) &&
458 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
459 dma_get_merge_boundary(mmc_dev(host)))
460 host->can_dma_map_merge = 1;
461 else
462 host->can_dma_map_merge = 0;
463
464 ret = blk_mq_alloc_tag_set(&mq->tag_set);
465 if (ret)
466 return ret;
467
468 mq->queue = blk_mq_init_queue(&mq->tag_set);
469 if (IS_ERR(mq->queue)) {
470 ret = PTR_ERR(mq->queue);
471 goto free_tag_set;
472 }
473
474 if (mmc_host_is_spi(host) && host->use_spi_crc)
475 mq->queue->backing_dev_info->capabilities |=
476 BDI_CAP_STABLE_WRITES;
477
478 mq->queue->queuedata = mq;
479 blk_queue_rq_timeout(mq->queue, 60 * HZ);
480
481 mmc_setup_queue(mq, card);
482 return 0;
483
484free_tag_set:
485 blk_mq_free_tag_set(&mq->tag_set);
486 return ret;
487}
488
489void mmc_queue_suspend(struct mmc_queue *mq)
490{
491 blk_mq_quiesce_queue(mq->queue);
492
493 /*
494 * The host remains claimed while there are outstanding requests, so
495 * simply claiming and releasing here ensures there are none.
496 */
497 mmc_claim_host(mq->card->host);
498 mmc_release_host(mq->card->host);
499}
500
501void mmc_queue_resume(struct mmc_queue *mq)
502{
503 blk_mq_unquiesce_queue(mq->queue);
504}
505
506void mmc_cleanup_queue(struct mmc_queue *mq)
507{
508 struct request_queue *q = mq->queue;
509
510 /*
511 * The legacy code handled the possibility of being suspended,
512 * so do that here too.
513 */
514 if (blk_queue_quiesced(q))
515 blk_mq_unquiesce_queue(q);
516
517 blk_cleanup_queue(q);
518 blk_mq_free_tag_set(&mq->tag_set);
519
520 /*
521 * A request can be completed before the next request, potentially
522 * leaving a complete_work with nothing to do. Such a work item might
523 * still be queued at this point. Flush it.
524 */
525 flush_work(&mq->complete_work);
526
527 mq->card = NULL;
528}
529
530/*
531 * Prepare the sg list(s) to be handed of to the host driver
532 */
533unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
534{
535 struct request *req = mmc_queue_req_to_req(mqrq);
536
537 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
538}
1/*
2 * Copyright (C) 2003 Russell King, All Rights Reserved.
3 * Copyright 2006-2007 Pierre Ossman
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/blkdev.h>
13#include <linux/freezer.h>
14#include <linux/kthread.h>
15#include <linux/scatterlist.h>
16#include <linux/dma-mapping.h>
17
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
20
21#include "queue.h"
22#include "block.h"
23
24#define MMC_QUEUE_BOUNCESZ 65536
25
26/*
27 * Prepare a MMC request. This just filters out odd stuff.
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
31 struct mmc_queue *mq = q->queuedata;
32
33 /*
34 * We only like normal block requests and discards.
35 */
36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37 req_op(req) != REQ_OP_SECURE_ERASE) {
38 blk_dump_rq_flags(req, "MMC bad request");
39 return BLKPREP_KILL;
40 }
41
42 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
43 return BLKPREP_KILL;
44
45 req->rq_flags |= RQF_DONTPREP;
46
47 return BLKPREP_OK;
48}
49
50static int mmc_queue_thread(void *d)
51{
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
54 struct mmc_context_info *cntx = &mq->card->host->context_info;
55
56 current->flags |= PF_MEMALLOC;
57
58 down(&mq->thread_sem);
59 do {
60 struct request *req = NULL;
61
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
64 req = blk_fetch_request(q);
65 mq->asleep = false;
66 cntx->is_waiting_last_req = false;
67 cntx->is_new_req = false;
68 if (!req) {
69 /*
70 * Dispatch queue is empty so set flags for
71 * mmc_request_fn() to wake us up.
72 */
73 if (mq->mqrq_prev->req)
74 cntx->is_waiting_last_req = true;
75 else
76 mq->asleep = true;
77 }
78 mq->mqrq_cur->req = req;
79 spin_unlock_irq(q->queue_lock);
80
81 if (req || mq->mqrq_prev->req) {
82 bool req_is_special = mmc_req_is_special(req);
83
84 set_current_state(TASK_RUNNING);
85 mmc_blk_issue_rq(mq, req);
86 cond_resched();
87 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
88 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
89 continue; /* fetch again */
90 }
91
92 /*
93 * Current request becomes previous request
94 * and vice versa.
95 * In case of special requests, current request
96 * has been finished. Do not assign it to previous
97 * request.
98 */
99 if (req_is_special)
100 mq->mqrq_cur->req = NULL;
101
102 mq->mqrq_prev->brq.mrq.data = NULL;
103 mq->mqrq_prev->req = NULL;
104 swap(mq->mqrq_prev, mq->mqrq_cur);
105 } else {
106 if (kthread_should_stop()) {
107 set_current_state(TASK_RUNNING);
108 break;
109 }
110 up(&mq->thread_sem);
111 schedule();
112 down(&mq->thread_sem);
113 }
114 } while (1);
115 up(&mq->thread_sem);
116
117 return 0;
118}
119
120/*
121 * Generic MMC request handler. This is called for any queue on a
122 * particular host. When the host is not busy, we look for a request
123 * on any queue on this host, and attempt to issue it. This may
124 * not be the queue we were asked to process.
125 */
126static void mmc_request_fn(struct request_queue *q)
127{
128 struct mmc_queue *mq = q->queuedata;
129 struct request *req;
130 struct mmc_context_info *cntx;
131
132 if (!mq) {
133 while ((req = blk_fetch_request(q)) != NULL) {
134 req->rq_flags |= RQF_QUIET;
135 __blk_end_request_all(req, -EIO);
136 }
137 return;
138 }
139
140 cntx = &mq->card->host->context_info;
141
142 if (cntx->is_waiting_last_req) {
143 cntx->is_new_req = true;
144 wake_up_interruptible(&cntx->wait);
145 }
146
147 if (mq->asleep)
148 wake_up_process(mq->thread);
149}
150
151static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
152{
153 struct scatterlist *sg;
154
155 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
156 if (!sg)
157 *err = -ENOMEM;
158 else {
159 *err = 0;
160 sg_init_table(sg, sg_len);
161 }
162
163 return sg;
164}
165
166static void mmc_queue_setup_discard(struct request_queue *q,
167 struct mmc_card *card)
168{
169 unsigned max_discard;
170
171 max_discard = mmc_calc_max_discard(card);
172 if (!max_discard)
173 return;
174
175 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
176 blk_queue_max_discard_sectors(q, max_discard);
177 if (card->erased_byte == 0 && !mmc_can_discard(card))
178 q->limits.discard_zeroes_data = 1;
179 q->limits.discard_granularity = card->pref_erase << 9;
180 /* granularity must not be greater than max. discard */
181 if (card->pref_erase > max_discard)
182 q->limits.discard_granularity = 0;
183 if (mmc_can_secure_erase_trim(card))
184 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
185}
186
187#ifdef CONFIG_MMC_BLOCK_BOUNCE
188static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
189 unsigned int bouncesz)
190{
191 int i;
192
193 for (i = 0; i < mq->qdepth; i++) {
194 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
195 if (!mq->mqrq[i].bounce_buf)
196 goto out_err;
197 }
198
199 return true;
200
201out_err:
202 while (--i >= 0) {
203 kfree(mq->mqrq[i].bounce_buf);
204 mq->mqrq[i].bounce_buf = NULL;
205 }
206 pr_warn("%s: unable to allocate bounce buffers\n",
207 mmc_card_name(mq->card));
208 return false;
209}
210
211static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
212 unsigned int bouncesz)
213{
214 int i, ret;
215
216 for (i = 0; i < mq->qdepth; i++) {
217 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
218 if (ret)
219 return ret;
220
221 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
222 if (ret)
223 return ret;
224 }
225
226 return 0;
227}
228#endif
229
230static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
231{
232 int i, ret;
233
234 for (i = 0; i < mq->qdepth; i++) {
235 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
236 if (ret)
237 return ret;
238 }
239
240 return 0;
241}
242
243static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
244{
245 kfree(mqrq->bounce_sg);
246 mqrq->bounce_sg = NULL;
247
248 kfree(mqrq->sg);
249 mqrq->sg = NULL;
250
251 kfree(mqrq->bounce_buf);
252 mqrq->bounce_buf = NULL;
253}
254
255static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
256{
257 int i;
258
259 for (i = 0; i < mq->qdepth; i++)
260 mmc_queue_req_free_bufs(&mq->mqrq[i]);
261}
262
263/**
264 * mmc_init_queue - initialise a queue structure.
265 * @mq: mmc queue
266 * @card: mmc card to attach this queue
267 * @lock: queue lock
268 * @subname: partition subname
269 *
270 * Initialise a MMC card request queue.
271 */
272int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
273 spinlock_t *lock, const char *subname)
274{
275 struct mmc_host *host = card->host;
276 u64 limit = BLK_BOUNCE_HIGH;
277 bool bounce = false;
278 int ret = -ENOMEM;
279
280 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
281 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
282
283 mq->card = card;
284 mq->queue = blk_init_queue(mmc_request_fn, lock);
285 if (!mq->queue)
286 return -ENOMEM;
287
288 mq->qdepth = 2;
289 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
290 GFP_KERNEL);
291 if (!mq->mqrq)
292 goto blk_cleanup;
293 mq->mqrq_cur = &mq->mqrq[0];
294 mq->mqrq_prev = &mq->mqrq[1];
295 mq->queue->queuedata = mq;
296
297 blk_queue_prep_rq(mq->queue, mmc_prep_request);
298 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
299 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
300 if (mmc_can_erase(card))
301 mmc_queue_setup_discard(mq->queue, card);
302
303#ifdef CONFIG_MMC_BLOCK_BOUNCE
304 if (host->max_segs == 1) {
305 unsigned int bouncesz;
306
307 bouncesz = MMC_QUEUE_BOUNCESZ;
308
309 if (bouncesz > host->max_req_size)
310 bouncesz = host->max_req_size;
311 if (bouncesz > host->max_seg_size)
312 bouncesz = host->max_seg_size;
313 if (bouncesz > (host->max_blk_count * 512))
314 bouncesz = host->max_blk_count * 512;
315
316 if (bouncesz > 512 &&
317 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
318 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
319 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
320 blk_queue_max_segments(mq->queue, bouncesz / 512);
321 blk_queue_max_segment_size(mq->queue, bouncesz);
322
323 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
324 if (ret)
325 goto cleanup_queue;
326 bounce = true;
327 }
328 }
329#endif
330
331 if (!bounce) {
332 blk_queue_bounce_limit(mq->queue, limit);
333 blk_queue_max_hw_sectors(mq->queue,
334 min(host->max_blk_count, host->max_req_size / 512));
335 blk_queue_max_segments(mq->queue, host->max_segs);
336 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
337
338 ret = mmc_queue_alloc_sgs(mq, host->max_segs);
339 if (ret)
340 goto cleanup_queue;
341 }
342
343 sema_init(&mq->thread_sem, 1);
344
345 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
346 host->index, subname ? subname : "");
347
348 if (IS_ERR(mq->thread)) {
349 ret = PTR_ERR(mq->thread);
350 goto cleanup_queue;
351 }
352
353 return 0;
354
355 cleanup_queue:
356 mmc_queue_reqs_free_bufs(mq);
357 kfree(mq->mqrq);
358 mq->mqrq = NULL;
359blk_cleanup:
360 blk_cleanup_queue(mq->queue);
361 return ret;
362}
363
364void mmc_cleanup_queue(struct mmc_queue *mq)
365{
366 struct request_queue *q = mq->queue;
367 unsigned long flags;
368
369 /* Make sure the queue isn't suspended, as that will deadlock */
370 mmc_queue_resume(mq);
371
372 /* Then terminate our worker thread */
373 kthread_stop(mq->thread);
374
375 /* Empty the queue */
376 spin_lock_irqsave(q->queue_lock, flags);
377 q->queuedata = NULL;
378 blk_start_queue(q);
379 spin_unlock_irqrestore(q->queue_lock, flags);
380
381 mmc_queue_reqs_free_bufs(mq);
382 kfree(mq->mqrq);
383 mq->mqrq = NULL;
384
385 mq->card = NULL;
386}
387EXPORT_SYMBOL(mmc_cleanup_queue);
388
389/**
390 * mmc_queue_suspend - suspend a MMC request queue
391 * @mq: MMC queue to suspend
392 *
393 * Stop the block request queue, and wait for our thread to
394 * complete any outstanding requests. This ensures that we
395 * won't suspend while a request is being processed.
396 */
397void mmc_queue_suspend(struct mmc_queue *mq)
398{
399 struct request_queue *q = mq->queue;
400 unsigned long flags;
401
402 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
403 mq->flags |= MMC_QUEUE_SUSPENDED;
404
405 spin_lock_irqsave(q->queue_lock, flags);
406 blk_stop_queue(q);
407 spin_unlock_irqrestore(q->queue_lock, flags);
408
409 down(&mq->thread_sem);
410 }
411}
412
413/**
414 * mmc_queue_resume - resume a previously suspended MMC request queue
415 * @mq: MMC queue to resume
416 */
417void mmc_queue_resume(struct mmc_queue *mq)
418{
419 struct request_queue *q = mq->queue;
420 unsigned long flags;
421
422 if (mq->flags & MMC_QUEUE_SUSPENDED) {
423 mq->flags &= ~MMC_QUEUE_SUSPENDED;
424
425 up(&mq->thread_sem);
426
427 spin_lock_irqsave(q->queue_lock, flags);
428 blk_start_queue(q);
429 spin_unlock_irqrestore(q->queue_lock, flags);
430 }
431}
432
433/*
434 * Prepare the sg list(s) to be handed of to the host driver
435 */
436unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
437{
438 unsigned int sg_len;
439 size_t buflen;
440 struct scatterlist *sg;
441 int i;
442
443 if (!mqrq->bounce_buf)
444 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
445
446 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
447
448 mqrq->bounce_sg_len = sg_len;
449
450 buflen = 0;
451 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
452 buflen += sg->length;
453
454 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
455
456 return 1;
457}
458
459/*
460 * If writing, bounce the data to the buffer before the request
461 * is sent to the host driver
462 */
463void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
464{
465 if (!mqrq->bounce_buf)
466 return;
467
468 if (rq_data_dir(mqrq->req) != WRITE)
469 return;
470
471 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
472 mqrq->bounce_buf, mqrq->sg[0].length);
473}
474
475/*
476 * If reading, bounce the data from the buffer after the request
477 * has been handled by the host driver
478 */
479void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
480{
481 if (!mqrq->bounce_buf)
482 return;
483
484 if (rq_data_dir(mqrq->req) != READ)
485 return;
486
487 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
488 mqrq->bounce_buf, mqrq->sg[0].length);
489}