Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
5 */
6#include <linux/slab.h>
7#include <linux/module.h>
8#include <linux/blkdev.h>
9#include <linux/freezer.h>
10#include <linux/scatterlist.h>
11#include <linux/dma-mapping.h>
12#include <linux/backing-dev.h>
13
14#include <linux/mmc/card.h>
15#include <linux/mmc/host.h>
16
17#include "queue.h"
18#include "block.h"
19#include "core.h"
20#include "card.h"
21#include "crypto.h"
22#include "host.h"
23
24#define MMC_DMA_MAP_MERGE_SEGMENTS 512
25
26static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
27{
28 /* Allow only 1 DCMD at a time */
29 return mq->in_flight[MMC_ISSUE_DCMD];
30}
31
32void mmc_cqe_check_busy(struct mmc_queue *mq)
33{
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
36}
37
38static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
39{
40 return host->caps2 & MMC_CAP2_CQE_DCMD;
41}
42
43static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
44 struct request *req)
45{
46 switch (req_op(req)) {
47 case REQ_OP_DRV_IN:
48 case REQ_OP_DRV_OUT:
49 case REQ_OP_DISCARD:
50 case REQ_OP_SECURE_ERASE:
51 case REQ_OP_WRITE_ZEROES:
52 return MMC_ISSUE_SYNC;
53 case REQ_OP_FLUSH:
54 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
55 default:
56 return MMC_ISSUE_ASYNC;
57 }
58}
59
60enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
61{
62 struct mmc_host *host = mq->card->host;
63
64 if (host->cqe_enabled && !host->hsq_enabled)
65 return mmc_cqe_issue_type(host, req);
66
67 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
68 return MMC_ISSUE_ASYNC;
69
70 return MMC_ISSUE_SYNC;
71}
72
73static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
74{
75 if (!mq->recovery_needed) {
76 mq->recovery_needed = true;
77 schedule_work(&mq->recovery_work);
78 }
79}
80
81void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
82{
83 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
84 brq.mrq);
85 struct request *req = mmc_queue_req_to_req(mqrq);
86 struct request_queue *q = req->q;
87 struct mmc_queue *mq = q->queuedata;
88 unsigned long flags;
89
90 spin_lock_irqsave(&mq->lock, flags);
91 __mmc_cqe_recovery_notifier(mq);
92 spin_unlock_irqrestore(&mq->lock, flags);
93}
94
95static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
96{
97 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
98 struct mmc_request *mrq = &mqrq->brq.mrq;
99 struct mmc_queue *mq = req->q->queuedata;
100 struct mmc_host *host = mq->card->host;
101 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
102 bool recovery_needed = false;
103
104 switch (issue_type) {
105 case MMC_ISSUE_ASYNC:
106 case MMC_ISSUE_DCMD:
107 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
108 if (recovery_needed)
109 mmc_cqe_recovery_notifier(mrq);
110 return BLK_EH_RESET_TIMER;
111 }
112 /* The request has gone already */
113 return BLK_EH_DONE;
114 default:
115 /* Timeout is handled by mmc core */
116 return BLK_EH_RESET_TIMER;
117 }
118}
119
120static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
121{
122 struct request_queue *q = req->q;
123 struct mmc_queue *mq = q->queuedata;
124 struct mmc_card *card = mq->card;
125 struct mmc_host *host = card->host;
126 unsigned long flags;
127 bool ignore_tout;
128
129 spin_lock_irqsave(&mq->lock, flags);
130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
131 spin_unlock_irqrestore(&mq->lock, flags);
132
133 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
134}
135
136static void mmc_mq_recovery_handler(struct work_struct *work)
137{
138 struct mmc_queue *mq = container_of(work, struct mmc_queue,
139 recovery_work);
140 struct request_queue *q = mq->queue;
141 struct mmc_host *host = mq->card->host;
142
143 mmc_get_card(mq->card, &mq->ctx);
144
145 mq->in_recovery = true;
146
147 if (host->cqe_enabled && !host->hsq_enabled)
148 mmc_blk_cqe_recovery(mq);
149 else
150 mmc_blk_mq_recovery(mq);
151
152 mq->in_recovery = false;
153
154 spin_lock_irq(&mq->lock);
155 mq->recovery_needed = false;
156 spin_unlock_irq(&mq->lock);
157
158 if (host->hsq_enabled)
159 host->cqe_ops->cqe_recovery_finish(host);
160
161 mmc_put_card(mq->card, &mq->ctx);
162
163 blk_mq_run_hw_queues(q, true);
164}
165
166static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
167{
168 struct scatterlist *sg;
169
170 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
171 if (sg)
172 sg_init_table(sg, sg_len);
173
174 return sg;
175}
176
177static void mmc_queue_setup_discard(struct request_queue *q,
178 struct mmc_card *card)
179{
180 unsigned max_discard;
181
182 max_discard = mmc_calc_max_discard(card);
183 if (!max_discard)
184 return;
185
186 blk_queue_max_discard_sectors(q, max_discard);
187 q->limits.discard_granularity = card->pref_erase << 9;
188 /* granularity must not be greater than max. discard */
189 if (card->pref_erase > max_discard)
190 q->limits.discard_granularity = SECTOR_SIZE;
191 if (mmc_can_secure_erase_trim(card))
192 blk_queue_max_secure_erase_sectors(q, max_discard);
193 if (mmc_can_trim(card) && card->erased_byte == 0)
194 blk_queue_max_write_zeroes_sectors(q, max_discard);
195}
196
197static unsigned short mmc_get_max_segments(struct mmc_host *host)
198{
199 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
200 host->max_segs;
201}
202
203static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
204 unsigned int hctx_idx, unsigned int numa_node)
205{
206 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
207 struct mmc_queue *mq = set->driver_data;
208 struct mmc_card *card = mq->card;
209 struct mmc_host *host = card->host;
210
211 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
212 if (!mq_rq->sg)
213 return -ENOMEM;
214
215 return 0;
216}
217
218static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
219 unsigned int hctx_idx)
220{
221 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
222
223 kfree(mq_rq->sg);
224 mq_rq->sg = NULL;
225}
226
227static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
228 const struct blk_mq_queue_data *bd)
229{
230 struct request *req = bd->rq;
231 struct request_queue *q = req->q;
232 struct mmc_queue *mq = q->queuedata;
233 struct mmc_card *card = mq->card;
234 struct mmc_host *host = card->host;
235 enum mmc_issue_type issue_type;
236 enum mmc_issued issued;
237 bool get_card, cqe_retune_ok;
238 blk_status_t ret;
239
240 if (mmc_card_removed(mq->card)) {
241 req->rq_flags |= RQF_QUIET;
242 return BLK_STS_IOERR;
243 }
244
245 issue_type = mmc_issue_type(mq, req);
246
247 spin_lock_irq(&mq->lock);
248
249 if (mq->recovery_needed || mq->busy) {
250 spin_unlock_irq(&mq->lock);
251 return BLK_STS_RESOURCE;
252 }
253
254 switch (issue_type) {
255 case MMC_ISSUE_DCMD:
256 if (mmc_cqe_dcmd_busy(mq)) {
257 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
258 spin_unlock_irq(&mq->lock);
259 return BLK_STS_RESOURCE;
260 }
261 break;
262 case MMC_ISSUE_ASYNC:
263 if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) {
264 spin_unlock_irq(&mq->lock);
265 return BLK_STS_RESOURCE;
266 }
267 break;
268 default:
269 /*
270 * Timeouts are handled by mmc core, and we don't have a host
271 * API to abort requests, so we can't handle the timeout anyway.
272 * However, when the timeout happens, blk_mq_complete_request()
273 * no longer works (to stop the request disappearing under us).
274 * To avoid racing with that, set a large timeout.
275 */
276 req->timeout = 600 * HZ;
277 break;
278 }
279
280 /* Parallel dispatch of requests is not supported at the moment */
281 mq->busy = true;
282
283 mq->in_flight[issue_type] += 1;
284 get_card = (mmc_tot_in_flight(mq) == 1);
285 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
286
287 spin_unlock_irq(&mq->lock);
288
289 if (!(req->rq_flags & RQF_DONTPREP)) {
290 req_to_mmc_queue_req(req)->retries = 0;
291 req->rq_flags |= RQF_DONTPREP;
292 }
293
294 if (get_card)
295 mmc_get_card(card, &mq->ctx);
296
297 if (host->cqe_enabled) {
298 host->retune_now = host->need_retune && cqe_retune_ok &&
299 !host->hold_retune;
300 }
301
302 blk_mq_start_request(req);
303
304 issued = mmc_blk_mq_issue_rq(mq, req);
305
306 switch (issued) {
307 case MMC_REQ_BUSY:
308 ret = BLK_STS_RESOURCE;
309 break;
310 case MMC_REQ_FAILED_TO_START:
311 ret = BLK_STS_IOERR;
312 break;
313 default:
314 ret = BLK_STS_OK;
315 break;
316 }
317
318 if (issued != MMC_REQ_STARTED) {
319 bool put_card = false;
320
321 spin_lock_irq(&mq->lock);
322 mq->in_flight[issue_type] -= 1;
323 if (mmc_tot_in_flight(mq) == 0)
324 put_card = true;
325 mq->busy = false;
326 spin_unlock_irq(&mq->lock);
327 if (put_card)
328 mmc_put_card(card, &mq->ctx);
329 } else {
330 WRITE_ONCE(mq->busy, false);
331 }
332
333 return ret;
334}
335
336static const struct blk_mq_ops mmc_mq_ops = {
337 .queue_rq = mmc_mq_queue_rq,
338 .init_request = mmc_mq_init_request,
339 .exit_request = mmc_mq_exit_request,
340 .complete = mmc_blk_mq_complete,
341 .timeout = mmc_mq_timed_out,
342};
343
344static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
345{
346 struct mmc_host *host = card->host;
347 unsigned block_size = 512;
348
349 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
350 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
351 if (mmc_can_erase(card))
352 mmc_queue_setup_discard(mq->queue, card);
353
354 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
355 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
356 blk_queue_max_hw_sectors(mq->queue,
357 min(host->max_blk_count, host->max_req_size / 512));
358 if (host->can_dma_map_merge)
359 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
360 mmc_dev(host)),
361 "merging was advertised but not possible");
362 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
363
364 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
365 block_size = card->ext_csd.data_sector_size;
366 WARN_ON(block_size != 512 && block_size != 4096);
367 }
368
369 blk_queue_logical_block_size(mq->queue, block_size);
370 /*
371 * After blk_queue_can_use_dma_map_merging() was called with succeed,
372 * since it calls blk_queue_virt_boundary(), the mmc should not call
373 * both blk_queue_max_segment_size().
374 */
375 if (!host->can_dma_map_merge)
376 blk_queue_max_segment_size(mq->queue,
377 round_down(host->max_seg_size, block_size));
378
379 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
380
381 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
382 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
383
384 mutex_init(&mq->complete_lock);
385
386 init_waitqueue_head(&mq->wait);
387
388 mmc_crypto_setup_queue(mq->queue, host);
389}
390
391static inline bool mmc_merge_capable(struct mmc_host *host)
392{
393 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
394}
395
396/* Set queue depth to get a reasonable value for q->nr_requests */
397#define MMC_QUEUE_DEPTH 64
398
399/**
400 * mmc_init_queue - initialise a queue structure.
401 * @mq: mmc queue
402 * @card: mmc card to attach this queue
403 *
404 * Initialise a MMC card request queue.
405 */
406struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
407{
408 struct mmc_host *host = card->host;
409 struct gendisk *disk;
410 int ret;
411
412 mq->card = card;
413
414 spin_lock_init(&mq->lock);
415
416 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
417 mq->tag_set.ops = &mmc_mq_ops;
418 /*
419 * The queue depth for CQE must match the hardware because the request
420 * tag is used to index the hardware queue.
421 */
422 if (host->cqe_enabled && !host->hsq_enabled)
423 mq->tag_set.queue_depth =
424 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
425 else
426 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
427 mq->tag_set.numa_node = NUMA_NO_NODE;
428 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
429 mq->tag_set.nr_hw_queues = 1;
430 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
431 mq->tag_set.driver_data = mq;
432
433 /*
434 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
435 * the host->can_dma_map_merge should be set before to get max_segs
436 * from mmc_get_max_segments().
437 */
438 if (mmc_merge_capable(host) &&
439 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
440 dma_get_merge_boundary(mmc_dev(host)))
441 host->can_dma_map_merge = 1;
442 else
443 host->can_dma_map_merge = 0;
444
445 ret = blk_mq_alloc_tag_set(&mq->tag_set);
446 if (ret)
447 return ERR_PTR(ret);
448
449
450 disk = blk_mq_alloc_disk(&mq->tag_set, mq);
451 if (IS_ERR(disk)) {
452 blk_mq_free_tag_set(&mq->tag_set);
453 return disk;
454 }
455 mq->queue = disk->queue;
456
457 if (mmc_host_is_spi(host) && host->use_spi_crc)
458 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
459 blk_queue_rq_timeout(mq->queue, 60 * HZ);
460
461 mmc_setup_queue(mq, card);
462 return disk;
463}
464
465void mmc_queue_suspend(struct mmc_queue *mq)
466{
467 blk_mq_quiesce_queue(mq->queue);
468
469 /*
470 * The host remains claimed while there are outstanding requests, so
471 * simply claiming and releasing here ensures there are none.
472 */
473 mmc_claim_host(mq->card->host);
474 mmc_release_host(mq->card->host);
475}
476
477void mmc_queue_resume(struct mmc_queue *mq)
478{
479 blk_mq_unquiesce_queue(mq->queue);
480}
481
482void mmc_cleanup_queue(struct mmc_queue *mq)
483{
484 struct request_queue *q = mq->queue;
485
486 /*
487 * The legacy code handled the possibility of being suspended,
488 * so do that here too.
489 */
490 if (blk_queue_quiesced(q))
491 blk_mq_unquiesce_queue(q);
492
493 /*
494 * If the recovery completes the last (and only remaining) request in
495 * the queue, and the card has been removed, we could end up here with
496 * the recovery not quite finished yet, so cancel it.
497 */
498 cancel_work_sync(&mq->recovery_work);
499
500 blk_mq_free_tag_set(&mq->tag_set);
501
502 /*
503 * A request can be completed before the next request, potentially
504 * leaving a complete_work with nothing to do. Such a work item might
505 * still be queued at this point. Flush it.
506 */
507 flush_work(&mq->complete_work);
508
509 mq->card = NULL;
510}
511
512/*
513 * Prepare the sg list(s) to be handed of to the host driver
514 */
515unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
516{
517 struct request *req = mmc_queue_req_to_req(mqrq);
518
519 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
520}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2003 Russell King, All Rights Reserved.
4 * Copyright 2006-2007 Pierre Ossman
5 */
6#include <linux/slab.h>
7#include <linux/module.h>
8#include <linux/blkdev.h>
9#include <linux/freezer.h>
10#include <linux/scatterlist.h>
11#include <linux/dma-mapping.h>
12#include <linux/backing-dev.h>
13
14#include <linux/mmc/card.h>
15#include <linux/mmc/host.h>
16
17#include "queue.h"
18#include "block.h"
19#include "core.h"
20#include "card.h"
21#include "crypto.h"
22#include "host.h"
23
24#define MMC_DMA_MAP_MERGE_SEGMENTS 512
25
26static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
27{
28 /* Allow only 1 DCMD at a time */
29 return mq->in_flight[MMC_ISSUE_DCMD];
30}
31
32void mmc_cqe_check_busy(struct mmc_queue *mq)
33{
34 if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
35 mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
36}
37
38static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
39{
40 return host->caps2 & MMC_CAP2_CQE_DCMD;
41}
42
43static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
44 struct request *req)
45{
46 switch (req_op(req)) {
47 case REQ_OP_DRV_IN:
48 case REQ_OP_DRV_OUT:
49 case REQ_OP_DISCARD:
50 case REQ_OP_SECURE_ERASE:
51 return MMC_ISSUE_SYNC;
52 case REQ_OP_FLUSH:
53 return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
54 default:
55 return MMC_ISSUE_ASYNC;
56 }
57}
58
59enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
60{
61 struct mmc_host *host = mq->card->host;
62
63 if (host->cqe_enabled && !host->hsq_enabled)
64 return mmc_cqe_issue_type(host, req);
65
66 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
67 return MMC_ISSUE_ASYNC;
68
69 return MMC_ISSUE_SYNC;
70}
71
72static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
73{
74 if (!mq->recovery_needed) {
75 mq->recovery_needed = true;
76 schedule_work(&mq->recovery_work);
77 }
78}
79
80void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
81{
82 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
83 brq.mrq);
84 struct request *req = mmc_queue_req_to_req(mqrq);
85 struct request_queue *q = req->q;
86 struct mmc_queue *mq = q->queuedata;
87 unsigned long flags;
88
89 spin_lock_irqsave(&mq->lock, flags);
90 __mmc_cqe_recovery_notifier(mq);
91 spin_unlock_irqrestore(&mq->lock, flags);
92}
93
94static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
95{
96 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
97 struct mmc_request *mrq = &mqrq->brq.mrq;
98 struct mmc_queue *mq = req->q->queuedata;
99 struct mmc_host *host = mq->card->host;
100 enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
101 bool recovery_needed = false;
102
103 switch (issue_type) {
104 case MMC_ISSUE_ASYNC:
105 case MMC_ISSUE_DCMD:
106 if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
107 if (recovery_needed)
108 mmc_cqe_recovery_notifier(mrq);
109 return BLK_EH_RESET_TIMER;
110 }
111 /* The request has gone already */
112 return BLK_EH_DONE;
113 default:
114 /* Timeout is handled by mmc core */
115 return BLK_EH_RESET_TIMER;
116 }
117}
118
119static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
120 bool reserved)
121{
122 struct request_queue *q = req->q;
123 struct mmc_queue *mq = q->queuedata;
124 struct mmc_card *card = mq->card;
125 struct mmc_host *host = card->host;
126 unsigned long flags;
127 bool ignore_tout;
128
129 spin_lock_irqsave(&mq->lock, flags);
130 ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
131 spin_unlock_irqrestore(&mq->lock, flags);
132
133 return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
134}
135
136static void mmc_mq_recovery_handler(struct work_struct *work)
137{
138 struct mmc_queue *mq = container_of(work, struct mmc_queue,
139 recovery_work);
140 struct request_queue *q = mq->queue;
141 struct mmc_host *host = mq->card->host;
142
143 mmc_get_card(mq->card, &mq->ctx);
144
145 mq->in_recovery = true;
146
147 if (host->cqe_enabled && !host->hsq_enabled)
148 mmc_blk_cqe_recovery(mq);
149 else
150 mmc_blk_mq_recovery(mq);
151
152 mq->in_recovery = false;
153
154 spin_lock_irq(&mq->lock);
155 mq->recovery_needed = false;
156 spin_unlock_irq(&mq->lock);
157
158 if (host->hsq_enabled)
159 host->cqe_ops->cqe_recovery_finish(host);
160
161 mmc_put_card(mq->card, &mq->ctx);
162
163 blk_mq_run_hw_queues(q, true);
164}
165
166static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
167{
168 struct scatterlist *sg;
169
170 sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
171 if (sg)
172 sg_init_table(sg, sg_len);
173
174 return sg;
175}
176
177static void mmc_queue_setup_discard(struct request_queue *q,
178 struct mmc_card *card)
179{
180 unsigned max_discard;
181
182 max_discard = mmc_calc_max_discard(card);
183 if (!max_discard)
184 return;
185
186 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
187 blk_queue_max_discard_sectors(q, max_discard);
188 q->limits.discard_granularity = card->pref_erase << 9;
189 /* granularity must not be greater than max. discard */
190 if (card->pref_erase > max_discard)
191 q->limits.discard_granularity = SECTOR_SIZE;
192 if (mmc_can_secure_erase_trim(card))
193 blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
194}
195
196static unsigned int mmc_get_max_segments(struct mmc_host *host)
197{
198 return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
199 host->max_segs;
200}
201
202/**
203 * mmc_init_request() - initialize the MMC-specific per-request data
204 * @mq: the request queue
205 * @req: the request
206 * @gfp: memory allocation policy
207 */
208static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
209 gfp_t gfp)
210{
211 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
212 struct mmc_card *card = mq->card;
213 struct mmc_host *host = card->host;
214
215 mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
216 if (!mq_rq->sg)
217 return -ENOMEM;
218
219 return 0;
220}
221
222static void mmc_exit_request(struct request_queue *q, struct request *req)
223{
224 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
225
226 kfree(mq_rq->sg);
227 mq_rq->sg = NULL;
228}
229
230static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
231 unsigned int hctx_idx, unsigned int numa_node)
232{
233 return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
234}
235
236static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
237 unsigned int hctx_idx)
238{
239 struct mmc_queue *mq = set->driver_data;
240
241 mmc_exit_request(mq->queue, req);
242}
243
244static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
245 const struct blk_mq_queue_data *bd)
246{
247 struct request *req = bd->rq;
248 struct request_queue *q = req->q;
249 struct mmc_queue *mq = q->queuedata;
250 struct mmc_card *card = mq->card;
251 struct mmc_host *host = card->host;
252 enum mmc_issue_type issue_type;
253 enum mmc_issued issued;
254 bool get_card, cqe_retune_ok;
255 int ret;
256
257 if (mmc_card_removed(mq->card)) {
258 req->rq_flags |= RQF_QUIET;
259 return BLK_STS_IOERR;
260 }
261
262 issue_type = mmc_issue_type(mq, req);
263
264 spin_lock_irq(&mq->lock);
265
266 if (mq->recovery_needed || mq->busy) {
267 spin_unlock_irq(&mq->lock);
268 return BLK_STS_RESOURCE;
269 }
270
271 switch (issue_type) {
272 case MMC_ISSUE_DCMD:
273 if (mmc_cqe_dcmd_busy(mq)) {
274 mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
275 spin_unlock_irq(&mq->lock);
276 return BLK_STS_RESOURCE;
277 }
278 break;
279 case MMC_ISSUE_ASYNC:
280 /*
281 * For MMC host software queue, we only allow 2 requests in
282 * flight to avoid a long latency.
283 */
284 if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
285 spin_unlock_irq(&mq->lock);
286 return BLK_STS_RESOURCE;
287 }
288 break;
289 default:
290 /*
291 * Timeouts are handled by mmc core, and we don't have a host
292 * API to abort requests, so we can't handle the timeout anyway.
293 * However, when the timeout happens, blk_mq_complete_request()
294 * no longer works (to stop the request disappearing under us).
295 * To avoid racing with that, set a large timeout.
296 */
297 req->timeout = 600 * HZ;
298 break;
299 }
300
301 /* Parallel dispatch of requests is not supported at the moment */
302 mq->busy = true;
303
304 mq->in_flight[issue_type] += 1;
305 get_card = (mmc_tot_in_flight(mq) == 1);
306 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
307
308 spin_unlock_irq(&mq->lock);
309
310 if (!(req->rq_flags & RQF_DONTPREP)) {
311 req_to_mmc_queue_req(req)->retries = 0;
312 req->rq_flags |= RQF_DONTPREP;
313 }
314
315 if (get_card)
316 mmc_get_card(card, &mq->ctx);
317
318 if (host->cqe_enabled) {
319 host->retune_now = host->need_retune && cqe_retune_ok &&
320 !host->hold_retune;
321 }
322
323 blk_mq_start_request(req);
324
325 issued = mmc_blk_mq_issue_rq(mq, req);
326
327 switch (issued) {
328 case MMC_REQ_BUSY:
329 ret = BLK_STS_RESOURCE;
330 break;
331 case MMC_REQ_FAILED_TO_START:
332 ret = BLK_STS_IOERR;
333 break;
334 default:
335 ret = BLK_STS_OK;
336 break;
337 }
338
339 if (issued != MMC_REQ_STARTED) {
340 bool put_card = false;
341
342 spin_lock_irq(&mq->lock);
343 mq->in_flight[issue_type] -= 1;
344 if (mmc_tot_in_flight(mq) == 0)
345 put_card = true;
346 mq->busy = false;
347 spin_unlock_irq(&mq->lock);
348 if (put_card)
349 mmc_put_card(card, &mq->ctx);
350 } else {
351 WRITE_ONCE(mq->busy, false);
352 }
353
354 return ret;
355}
356
357static const struct blk_mq_ops mmc_mq_ops = {
358 .queue_rq = mmc_mq_queue_rq,
359 .init_request = mmc_mq_init_request,
360 .exit_request = mmc_mq_exit_request,
361 .complete = mmc_blk_mq_complete,
362 .timeout = mmc_mq_timed_out,
363};
364
365static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
366{
367 struct mmc_host *host = card->host;
368 unsigned block_size = 512;
369
370 blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
371 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
372 if (mmc_can_erase(card))
373 mmc_queue_setup_discard(mq->queue, card);
374
375 if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
376 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
377 blk_queue_max_hw_sectors(mq->queue,
378 min(host->max_blk_count, host->max_req_size / 512));
379 if (host->can_dma_map_merge)
380 WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
381 mmc_dev(host)),
382 "merging was advertised but not possible");
383 blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
384
385 if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
386 block_size = card->ext_csd.data_sector_size;
387 WARN_ON(block_size != 512 && block_size != 4096);
388 }
389
390 blk_queue_logical_block_size(mq->queue, block_size);
391 /*
392 * After blk_queue_can_use_dma_map_merging() was called with succeed,
393 * since it calls blk_queue_virt_boundary(), the mmc should not call
394 * both blk_queue_max_segment_size().
395 */
396 if (!host->can_dma_map_merge)
397 blk_queue_max_segment_size(mq->queue,
398 round_down(host->max_seg_size, block_size));
399
400 dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
401
402 INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
403 INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
404
405 mutex_init(&mq->complete_lock);
406
407 init_waitqueue_head(&mq->wait);
408
409 mmc_crypto_setup_queue(mq->queue, host);
410}
411
412static inline bool mmc_merge_capable(struct mmc_host *host)
413{
414 return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
415}
416
417/* Set queue depth to get a reasonable value for q->nr_requests */
418#define MMC_QUEUE_DEPTH 64
419
420/**
421 * mmc_init_queue - initialise a queue structure.
422 * @mq: mmc queue
423 * @card: mmc card to attach this queue
424 *
425 * Initialise a MMC card request queue.
426 */
427struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
428{
429 struct mmc_host *host = card->host;
430 struct gendisk *disk;
431 int ret;
432
433 mq->card = card;
434
435 spin_lock_init(&mq->lock);
436
437 memset(&mq->tag_set, 0, sizeof(mq->tag_set));
438 mq->tag_set.ops = &mmc_mq_ops;
439 /*
440 * The queue depth for CQE must match the hardware because the request
441 * tag is used to index the hardware queue.
442 */
443 if (host->cqe_enabled && !host->hsq_enabled)
444 mq->tag_set.queue_depth =
445 min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
446 else
447 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
448 mq->tag_set.numa_node = NUMA_NO_NODE;
449 mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
450 mq->tag_set.nr_hw_queues = 1;
451 mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
452 mq->tag_set.driver_data = mq;
453
454 /*
455 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
456 * the host->can_dma_map_merge should be set before to get max_segs
457 * from mmc_get_max_segments().
458 */
459 if (mmc_merge_capable(host) &&
460 host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
461 dma_get_merge_boundary(mmc_dev(host)))
462 host->can_dma_map_merge = 1;
463 else
464 host->can_dma_map_merge = 0;
465
466 ret = blk_mq_alloc_tag_set(&mq->tag_set);
467 if (ret)
468 return ERR_PTR(ret);
469
470
471 disk = blk_mq_alloc_disk(&mq->tag_set, mq);
472 if (IS_ERR(disk)) {
473 blk_mq_free_tag_set(&mq->tag_set);
474 return disk;
475 }
476 mq->queue = disk->queue;
477
478 if (mmc_host_is_spi(host) && host->use_spi_crc)
479 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
480 blk_queue_rq_timeout(mq->queue, 60 * HZ);
481
482 mmc_setup_queue(mq, card);
483 return disk;
484}
485
486void mmc_queue_suspend(struct mmc_queue *mq)
487{
488 blk_mq_quiesce_queue(mq->queue);
489
490 /*
491 * The host remains claimed while there are outstanding requests, so
492 * simply claiming and releasing here ensures there are none.
493 */
494 mmc_claim_host(mq->card->host);
495 mmc_release_host(mq->card->host);
496}
497
498void mmc_queue_resume(struct mmc_queue *mq)
499{
500 blk_mq_unquiesce_queue(mq->queue);
501}
502
503void mmc_cleanup_queue(struct mmc_queue *mq)
504{
505 struct request_queue *q = mq->queue;
506
507 /*
508 * The legacy code handled the possibility of being suspended,
509 * so do that here too.
510 */
511 if (blk_queue_quiesced(q))
512 blk_mq_unquiesce_queue(q);
513
514 blk_cleanup_queue(q);
515 blk_mq_free_tag_set(&mq->tag_set);
516
517 /*
518 * A request can be completed before the next request, potentially
519 * leaving a complete_work with nothing to do. Such a work item might
520 * still be queued at this point. Flush it.
521 */
522 flush_work(&mq->complete_work);
523
524 mq->card = NULL;
525}
526
527/*
528 * Prepare the sg list(s) to be handed of to the host driver
529 */
530unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
531{
532 struct request *req = mmc_queue_req_to_req(mqrq);
533
534 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
535}