Loading...
1/*
2 * Copyright (C) 2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/blkdev.h>
19#include <linux/debugfs.h>
20
21#include <linux/blk-mq.h>
22#include "blk.h"
23#include "blk-mq.h"
24#include "blk-mq-debugfs.h"
25#include "blk-mq-tag.h"
26
27static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
28{
29 if (stat->nr_samples) {
30 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
31 stat->nr_samples, stat->mean, stat->min, stat->max);
32 } else {
33 seq_puts(m, "samples=0");
34 }
35}
36
37static int queue_poll_stat_show(void *data, struct seq_file *m)
38{
39 struct request_queue *q = data;
40 int bucket;
41
42 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
43 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
44 print_stat(m, &q->poll_stat[2*bucket]);
45 seq_puts(m, "\n");
46
47 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
48 print_stat(m, &q->poll_stat[2*bucket+1]);
49 seq_puts(m, "\n");
50 }
51 return 0;
52}
53
54static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
55 __acquires(&q->requeue_lock)
56{
57 struct request_queue *q = m->private;
58
59 spin_lock_irq(&q->requeue_lock);
60 return seq_list_start(&q->requeue_list, *pos);
61}
62
63static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
64{
65 struct request_queue *q = m->private;
66
67 return seq_list_next(v, &q->requeue_list, pos);
68}
69
70static void queue_requeue_list_stop(struct seq_file *m, void *v)
71 __releases(&q->requeue_lock)
72{
73 struct request_queue *q = m->private;
74
75 spin_unlock_irq(&q->requeue_lock);
76}
77
78static const struct seq_operations queue_requeue_list_seq_ops = {
79 .start = queue_requeue_list_start,
80 .next = queue_requeue_list_next,
81 .stop = queue_requeue_list_stop,
82 .show = blk_mq_debugfs_rq_show,
83};
84
85static int blk_flags_show(struct seq_file *m, const unsigned long flags,
86 const char *const *flag_name, int flag_name_count)
87{
88 bool sep = false;
89 int i;
90
91 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
92 if (!(flags & BIT(i)))
93 continue;
94 if (sep)
95 seq_puts(m, "|");
96 sep = true;
97 if (i < flag_name_count && flag_name[i])
98 seq_puts(m, flag_name[i]);
99 else
100 seq_printf(m, "%d", i);
101 }
102 return 0;
103}
104
105#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
106static const char *const blk_queue_flag_name[] = {
107 QUEUE_FLAG_NAME(QUEUED),
108 QUEUE_FLAG_NAME(STOPPED),
109 QUEUE_FLAG_NAME(DYING),
110 QUEUE_FLAG_NAME(BYPASS),
111 QUEUE_FLAG_NAME(BIDI),
112 QUEUE_FLAG_NAME(NOMERGES),
113 QUEUE_FLAG_NAME(SAME_COMP),
114 QUEUE_FLAG_NAME(FAIL_IO),
115 QUEUE_FLAG_NAME(NONROT),
116 QUEUE_FLAG_NAME(IO_STAT),
117 QUEUE_FLAG_NAME(DISCARD),
118 QUEUE_FLAG_NAME(NOXMERGES),
119 QUEUE_FLAG_NAME(ADD_RANDOM),
120 QUEUE_FLAG_NAME(SECERASE),
121 QUEUE_FLAG_NAME(SAME_FORCE),
122 QUEUE_FLAG_NAME(DEAD),
123 QUEUE_FLAG_NAME(INIT_DONE),
124 QUEUE_FLAG_NAME(NO_SG_MERGE),
125 QUEUE_FLAG_NAME(POLL),
126 QUEUE_FLAG_NAME(WC),
127 QUEUE_FLAG_NAME(FUA),
128 QUEUE_FLAG_NAME(FLUSH_NQ),
129 QUEUE_FLAG_NAME(DAX),
130 QUEUE_FLAG_NAME(STATS),
131 QUEUE_FLAG_NAME(POLL_STATS),
132 QUEUE_FLAG_NAME(REGISTERED),
133 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
134 QUEUE_FLAG_NAME(QUIESCED),
135 QUEUE_FLAG_NAME(PREEMPT_ONLY),
136};
137#undef QUEUE_FLAG_NAME
138
139static int queue_state_show(void *data, struct seq_file *m)
140{
141 struct request_queue *q = data;
142
143 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
144 ARRAY_SIZE(blk_queue_flag_name));
145 seq_puts(m, "\n");
146 return 0;
147}
148
149static ssize_t queue_state_write(void *data, const char __user *buf,
150 size_t count, loff_t *ppos)
151{
152 struct request_queue *q = data;
153 char opbuf[16] = { }, *op;
154
155 /*
156 * The "state" attribute is removed after blk_cleanup_queue() has called
157 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
158 * triggering a use-after-free.
159 */
160 if (blk_queue_dead(q))
161 return -ENOENT;
162
163 if (count >= sizeof(opbuf)) {
164 pr_err("%s: operation too long\n", __func__);
165 goto inval;
166 }
167
168 if (copy_from_user(opbuf, buf, count))
169 return -EFAULT;
170 op = strstrip(opbuf);
171 if (strcmp(op, "run") == 0) {
172 blk_mq_run_hw_queues(q, true);
173 } else if (strcmp(op, "start") == 0) {
174 blk_mq_start_stopped_hw_queues(q, true);
175 } else if (strcmp(op, "kick") == 0) {
176 blk_mq_kick_requeue_list(q);
177 } else {
178 pr_err("%s: unsupported operation '%s'\n", __func__, op);
179inval:
180 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
181 return -EINVAL;
182 }
183 return count;
184}
185
186static int queue_write_hint_show(void *data, struct seq_file *m)
187{
188 struct request_queue *q = data;
189 int i;
190
191 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
192 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
193
194 return 0;
195}
196
197static ssize_t queue_write_hint_store(void *data, const char __user *buf,
198 size_t count, loff_t *ppos)
199{
200 struct request_queue *q = data;
201 int i;
202
203 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
204 q->write_hints[i] = 0;
205
206 return count;
207}
208
209static int queue_zone_wlock_show(void *data, struct seq_file *m)
210{
211 struct request_queue *q = data;
212 unsigned int i;
213
214 if (!q->seq_zones_wlock)
215 return 0;
216
217 for (i = 0; i < blk_queue_nr_zones(q); i++)
218 if (test_bit(i, q->seq_zones_wlock))
219 seq_printf(m, "%u\n", i);
220
221 return 0;
222}
223
224static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
225 { "poll_stat", 0400, queue_poll_stat_show },
226 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
227 { "state", 0600, queue_state_show, queue_state_write },
228 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
229 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
230 { },
231};
232
233#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
234static const char *const hctx_state_name[] = {
235 HCTX_STATE_NAME(STOPPED),
236 HCTX_STATE_NAME(TAG_ACTIVE),
237 HCTX_STATE_NAME(SCHED_RESTART),
238};
239#undef HCTX_STATE_NAME
240
241static int hctx_state_show(void *data, struct seq_file *m)
242{
243 struct blk_mq_hw_ctx *hctx = data;
244
245 blk_flags_show(m, hctx->state, hctx_state_name,
246 ARRAY_SIZE(hctx_state_name));
247 seq_puts(m, "\n");
248 return 0;
249}
250
251#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
252static const char *const alloc_policy_name[] = {
253 BLK_TAG_ALLOC_NAME(FIFO),
254 BLK_TAG_ALLOC_NAME(RR),
255};
256#undef BLK_TAG_ALLOC_NAME
257
258#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
259static const char *const hctx_flag_name[] = {
260 HCTX_FLAG_NAME(SHOULD_MERGE),
261 HCTX_FLAG_NAME(TAG_SHARED),
262 HCTX_FLAG_NAME(SG_MERGE),
263 HCTX_FLAG_NAME(BLOCKING),
264 HCTX_FLAG_NAME(NO_SCHED),
265};
266#undef HCTX_FLAG_NAME
267
268static int hctx_flags_show(void *data, struct seq_file *m)
269{
270 struct blk_mq_hw_ctx *hctx = data;
271 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
272
273 seq_puts(m, "alloc_policy=");
274 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
275 alloc_policy_name[alloc_policy])
276 seq_puts(m, alloc_policy_name[alloc_policy]);
277 else
278 seq_printf(m, "%d", alloc_policy);
279 seq_puts(m, " ");
280 blk_flags_show(m,
281 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
282 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
283 seq_puts(m, "\n");
284 return 0;
285}
286
287#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
288static const char *const op_name[] = {
289 REQ_OP_NAME(READ),
290 REQ_OP_NAME(WRITE),
291 REQ_OP_NAME(FLUSH),
292 REQ_OP_NAME(DISCARD),
293 REQ_OP_NAME(ZONE_REPORT),
294 REQ_OP_NAME(SECURE_ERASE),
295 REQ_OP_NAME(ZONE_RESET),
296 REQ_OP_NAME(WRITE_SAME),
297 REQ_OP_NAME(WRITE_ZEROES),
298 REQ_OP_NAME(SCSI_IN),
299 REQ_OP_NAME(SCSI_OUT),
300 REQ_OP_NAME(DRV_IN),
301 REQ_OP_NAME(DRV_OUT),
302};
303#undef REQ_OP_NAME
304
305#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
306static const char *const cmd_flag_name[] = {
307 CMD_FLAG_NAME(FAILFAST_DEV),
308 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
309 CMD_FLAG_NAME(FAILFAST_DRIVER),
310 CMD_FLAG_NAME(SYNC),
311 CMD_FLAG_NAME(META),
312 CMD_FLAG_NAME(PRIO),
313 CMD_FLAG_NAME(NOMERGE),
314 CMD_FLAG_NAME(IDLE),
315 CMD_FLAG_NAME(INTEGRITY),
316 CMD_FLAG_NAME(FUA),
317 CMD_FLAG_NAME(PREFLUSH),
318 CMD_FLAG_NAME(RAHEAD),
319 CMD_FLAG_NAME(BACKGROUND),
320 CMD_FLAG_NAME(NOUNMAP),
321 CMD_FLAG_NAME(NOWAIT),
322};
323#undef CMD_FLAG_NAME
324
325#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
326static const char *const rqf_name[] = {
327 RQF_NAME(SORTED),
328 RQF_NAME(STARTED),
329 RQF_NAME(QUEUED),
330 RQF_NAME(SOFTBARRIER),
331 RQF_NAME(FLUSH_SEQ),
332 RQF_NAME(MIXED_MERGE),
333 RQF_NAME(MQ_INFLIGHT),
334 RQF_NAME(DONTPREP),
335 RQF_NAME(PREEMPT),
336 RQF_NAME(COPY_USER),
337 RQF_NAME(FAILED),
338 RQF_NAME(QUIET),
339 RQF_NAME(ELVPRIV),
340 RQF_NAME(IO_STAT),
341 RQF_NAME(ALLOCED),
342 RQF_NAME(PM),
343 RQF_NAME(HASHED),
344 RQF_NAME(STATS),
345 RQF_NAME(SPECIAL_PAYLOAD),
346 RQF_NAME(ZONE_WRITE_LOCKED),
347 RQF_NAME(MQ_TIMEOUT_EXPIRED),
348 RQF_NAME(MQ_POLL_SLEPT),
349};
350#undef RQF_NAME
351
352static const char *const blk_mq_rq_state_name_array[] = {
353 [MQ_RQ_IDLE] = "idle",
354 [MQ_RQ_IN_FLIGHT] = "in_flight",
355 [MQ_RQ_COMPLETE] = "complete",
356};
357
358static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
359{
360 if (WARN_ON_ONCE((unsigned int)rq_state >
361 ARRAY_SIZE(blk_mq_rq_state_name_array)))
362 return "(?)";
363 return blk_mq_rq_state_name_array[rq_state];
364}
365
366int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
367{
368 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
369 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
370
371 seq_printf(m, "%p {.op=", rq);
372 if (op < ARRAY_SIZE(op_name) && op_name[op])
373 seq_printf(m, "%s", op_name[op]);
374 else
375 seq_printf(m, "%d", op);
376 seq_puts(m, ", .cmd_flags=");
377 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
378 ARRAY_SIZE(cmd_flag_name));
379 seq_puts(m, ", .rq_flags=");
380 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
381 ARRAY_SIZE(rqf_name));
382 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
383 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
384 rq->internal_tag);
385 if (mq_ops->show_rq)
386 mq_ops->show_rq(m, rq);
387 seq_puts(m, "}\n");
388 return 0;
389}
390EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
391
392int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
393{
394 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
395}
396EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
397
398static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
399 __acquires(&hctx->lock)
400{
401 struct blk_mq_hw_ctx *hctx = m->private;
402
403 spin_lock(&hctx->lock);
404 return seq_list_start(&hctx->dispatch, *pos);
405}
406
407static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
408{
409 struct blk_mq_hw_ctx *hctx = m->private;
410
411 return seq_list_next(v, &hctx->dispatch, pos);
412}
413
414static void hctx_dispatch_stop(struct seq_file *m, void *v)
415 __releases(&hctx->lock)
416{
417 struct blk_mq_hw_ctx *hctx = m->private;
418
419 spin_unlock(&hctx->lock);
420}
421
422static const struct seq_operations hctx_dispatch_seq_ops = {
423 .start = hctx_dispatch_start,
424 .next = hctx_dispatch_next,
425 .stop = hctx_dispatch_stop,
426 .show = blk_mq_debugfs_rq_show,
427};
428
429struct show_busy_params {
430 struct seq_file *m;
431 struct blk_mq_hw_ctx *hctx;
432};
433
434/*
435 * Note: the state of a request may change while this function is in progress,
436 * e.g. due to a concurrent blk_mq_finish_request() call.
437 */
438static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
439{
440 const struct show_busy_params *params = data;
441
442 if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
443 blk_mq_rq_state(rq) != MQ_RQ_IDLE)
444 __blk_mq_debugfs_rq_show(params->m,
445 list_entry_rq(&rq->queuelist));
446}
447
448static int hctx_busy_show(void *data, struct seq_file *m)
449{
450 struct blk_mq_hw_ctx *hctx = data;
451 struct show_busy_params params = { .m = m, .hctx = hctx };
452
453 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
454 ¶ms);
455
456 return 0;
457}
458
459static int hctx_ctx_map_show(void *data, struct seq_file *m)
460{
461 struct blk_mq_hw_ctx *hctx = data;
462
463 sbitmap_bitmap_show(&hctx->ctx_map, m);
464 return 0;
465}
466
467static void blk_mq_debugfs_tags_show(struct seq_file *m,
468 struct blk_mq_tags *tags)
469{
470 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
471 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
472 seq_printf(m, "active_queues=%d\n",
473 atomic_read(&tags->active_queues));
474
475 seq_puts(m, "\nbitmap_tags:\n");
476 sbitmap_queue_show(&tags->bitmap_tags, m);
477
478 if (tags->nr_reserved_tags) {
479 seq_puts(m, "\nbreserved_tags:\n");
480 sbitmap_queue_show(&tags->breserved_tags, m);
481 }
482}
483
484static int hctx_tags_show(void *data, struct seq_file *m)
485{
486 struct blk_mq_hw_ctx *hctx = data;
487 struct request_queue *q = hctx->queue;
488 int res;
489
490 res = mutex_lock_interruptible(&q->sysfs_lock);
491 if (res)
492 goto out;
493 if (hctx->tags)
494 blk_mq_debugfs_tags_show(m, hctx->tags);
495 mutex_unlock(&q->sysfs_lock);
496
497out:
498 return res;
499}
500
501static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
502{
503 struct blk_mq_hw_ctx *hctx = data;
504 struct request_queue *q = hctx->queue;
505 int res;
506
507 res = mutex_lock_interruptible(&q->sysfs_lock);
508 if (res)
509 goto out;
510 if (hctx->tags)
511 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
512 mutex_unlock(&q->sysfs_lock);
513
514out:
515 return res;
516}
517
518static int hctx_sched_tags_show(void *data, struct seq_file *m)
519{
520 struct blk_mq_hw_ctx *hctx = data;
521 struct request_queue *q = hctx->queue;
522 int res;
523
524 res = mutex_lock_interruptible(&q->sysfs_lock);
525 if (res)
526 goto out;
527 if (hctx->sched_tags)
528 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
529 mutex_unlock(&q->sysfs_lock);
530
531out:
532 return res;
533}
534
535static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
536{
537 struct blk_mq_hw_ctx *hctx = data;
538 struct request_queue *q = hctx->queue;
539 int res;
540
541 res = mutex_lock_interruptible(&q->sysfs_lock);
542 if (res)
543 goto out;
544 if (hctx->sched_tags)
545 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
546 mutex_unlock(&q->sysfs_lock);
547
548out:
549 return res;
550}
551
552static int hctx_io_poll_show(void *data, struct seq_file *m)
553{
554 struct blk_mq_hw_ctx *hctx = data;
555
556 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
557 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
558 seq_printf(m, "success=%lu\n", hctx->poll_success);
559 return 0;
560}
561
562static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
563 size_t count, loff_t *ppos)
564{
565 struct blk_mq_hw_ctx *hctx = data;
566
567 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
568 return count;
569}
570
571static int hctx_dispatched_show(void *data, struct seq_file *m)
572{
573 struct blk_mq_hw_ctx *hctx = data;
574 int i;
575
576 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
577
578 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
579 unsigned int d = 1U << (i - 1);
580
581 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
582 }
583
584 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
585 return 0;
586}
587
588static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
589 size_t count, loff_t *ppos)
590{
591 struct blk_mq_hw_ctx *hctx = data;
592 int i;
593
594 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
595 hctx->dispatched[i] = 0;
596 return count;
597}
598
599static int hctx_queued_show(void *data, struct seq_file *m)
600{
601 struct blk_mq_hw_ctx *hctx = data;
602
603 seq_printf(m, "%lu\n", hctx->queued);
604 return 0;
605}
606
607static ssize_t hctx_queued_write(void *data, const char __user *buf,
608 size_t count, loff_t *ppos)
609{
610 struct blk_mq_hw_ctx *hctx = data;
611
612 hctx->queued = 0;
613 return count;
614}
615
616static int hctx_run_show(void *data, struct seq_file *m)
617{
618 struct blk_mq_hw_ctx *hctx = data;
619
620 seq_printf(m, "%lu\n", hctx->run);
621 return 0;
622}
623
624static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
625 loff_t *ppos)
626{
627 struct blk_mq_hw_ctx *hctx = data;
628
629 hctx->run = 0;
630 return count;
631}
632
633static int hctx_active_show(void *data, struct seq_file *m)
634{
635 struct blk_mq_hw_ctx *hctx = data;
636
637 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
638 return 0;
639}
640
641static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
642 __acquires(&ctx->lock)
643{
644 struct blk_mq_ctx *ctx = m->private;
645
646 spin_lock(&ctx->lock);
647 return seq_list_start(&ctx->rq_list, *pos);
648}
649
650static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
651{
652 struct blk_mq_ctx *ctx = m->private;
653
654 return seq_list_next(v, &ctx->rq_list, pos);
655}
656
657static void ctx_rq_list_stop(struct seq_file *m, void *v)
658 __releases(&ctx->lock)
659{
660 struct blk_mq_ctx *ctx = m->private;
661
662 spin_unlock(&ctx->lock);
663}
664
665static const struct seq_operations ctx_rq_list_seq_ops = {
666 .start = ctx_rq_list_start,
667 .next = ctx_rq_list_next,
668 .stop = ctx_rq_list_stop,
669 .show = blk_mq_debugfs_rq_show,
670};
671static int ctx_dispatched_show(void *data, struct seq_file *m)
672{
673 struct blk_mq_ctx *ctx = data;
674
675 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
676 return 0;
677}
678
679static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
680 size_t count, loff_t *ppos)
681{
682 struct blk_mq_ctx *ctx = data;
683
684 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
685 return count;
686}
687
688static int ctx_merged_show(void *data, struct seq_file *m)
689{
690 struct blk_mq_ctx *ctx = data;
691
692 seq_printf(m, "%lu\n", ctx->rq_merged);
693 return 0;
694}
695
696static ssize_t ctx_merged_write(void *data, const char __user *buf,
697 size_t count, loff_t *ppos)
698{
699 struct blk_mq_ctx *ctx = data;
700
701 ctx->rq_merged = 0;
702 return count;
703}
704
705static int ctx_completed_show(void *data, struct seq_file *m)
706{
707 struct blk_mq_ctx *ctx = data;
708
709 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
710 return 0;
711}
712
713static ssize_t ctx_completed_write(void *data, const char __user *buf,
714 size_t count, loff_t *ppos)
715{
716 struct blk_mq_ctx *ctx = data;
717
718 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
719 return count;
720}
721
722static int blk_mq_debugfs_show(struct seq_file *m, void *v)
723{
724 const struct blk_mq_debugfs_attr *attr = m->private;
725 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
726
727 return attr->show(data, m);
728}
729
730static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
731 size_t count, loff_t *ppos)
732{
733 struct seq_file *m = file->private_data;
734 const struct blk_mq_debugfs_attr *attr = m->private;
735 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
736
737 /*
738 * Attributes that only implement .seq_ops are read-only and 'attr' is
739 * the same with 'data' in this case.
740 */
741 if (attr == data || !attr->write)
742 return -EPERM;
743
744 return attr->write(data, buf, count, ppos);
745}
746
747static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
748{
749 const struct blk_mq_debugfs_attr *attr = inode->i_private;
750 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
751 struct seq_file *m;
752 int ret;
753
754 if (attr->seq_ops) {
755 ret = seq_open(file, attr->seq_ops);
756 if (!ret) {
757 m = file->private_data;
758 m->private = data;
759 }
760 return ret;
761 }
762
763 if (WARN_ON_ONCE(!attr->show))
764 return -EPERM;
765
766 return single_open(file, blk_mq_debugfs_show, inode->i_private);
767}
768
769static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
770{
771 const struct blk_mq_debugfs_attr *attr = inode->i_private;
772
773 if (attr->show)
774 return single_release(inode, file);
775 else
776 return seq_release(inode, file);
777}
778
779static const struct file_operations blk_mq_debugfs_fops = {
780 .open = blk_mq_debugfs_open,
781 .read = seq_read,
782 .write = blk_mq_debugfs_write,
783 .llseek = seq_lseek,
784 .release = blk_mq_debugfs_release,
785};
786
787static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
788 {"state", 0400, hctx_state_show},
789 {"flags", 0400, hctx_flags_show},
790 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
791 {"busy", 0400, hctx_busy_show},
792 {"ctx_map", 0400, hctx_ctx_map_show},
793 {"tags", 0400, hctx_tags_show},
794 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
795 {"sched_tags", 0400, hctx_sched_tags_show},
796 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
797 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
798 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
799 {"queued", 0600, hctx_queued_show, hctx_queued_write},
800 {"run", 0600, hctx_run_show, hctx_run_write},
801 {"active", 0400, hctx_active_show},
802 {},
803};
804
805static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
806 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
807 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
808 {"merged", 0600, ctx_merged_show, ctx_merged_write},
809 {"completed", 0600, ctx_completed_show, ctx_completed_write},
810 {},
811};
812
813static bool debugfs_create_files(struct dentry *parent, void *data,
814 const struct blk_mq_debugfs_attr *attr)
815{
816 d_inode(parent)->i_private = data;
817
818 for (; attr->name; attr++) {
819 if (!debugfs_create_file(attr->name, attr->mode, parent,
820 (void *)attr, &blk_mq_debugfs_fops))
821 return false;
822 }
823 return true;
824}
825
826int blk_mq_debugfs_register(struct request_queue *q)
827{
828 struct blk_mq_hw_ctx *hctx;
829 int i;
830
831 if (!blk_debugfs_root)
832 return -ENOENT;
833
834 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
835 blk_debugfs_root);
836 if (!q->debugfs_dir)
837 return -ENOMEM;
838
839 if (!debugfs_create_files(q->debugfs_dir, q,
840 blk_mq_debugfs_queue_attrs))
841 goto err;
842
843 /*
844 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
845 * didn't exist yet (because we don't know what to name the directory
846 * until the queue is registered to a gendisk).
847 */
848 if (q->elevator && !q->sched_debugfs_dir)
849 blk_mq_debugfs_register_sched(q);
850
851 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
852 queue_for_each_hw_ctx(q, hctx, i) {
853 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
854 goto err;
855 if (q->elevator && !hctx->sched_debugfs_dir &&
856 blk_mq_debugfs_register_sched_hctx(q, hctx))
857 goto err;
858 }
859
860 return 0;
861
862err:
863 blk_mq_debugfs_unregister(q);
864 return -ENOMEM;
865}
866
867void blk_mq_debugfs_unregister(struct request_queue *q)
868{
869 debugfs_remove_recursive(q->debugfs_dir);
870 q->sched_debugfs_dir = NULL;
871 q->debugfs_dir = NULL;
872}
873
874static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
875 struct blk_mq_ctx *ctx)
876{
877 struct dentry *ctx_dir;
878 char name[20];
879
880 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
881 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
882 if (!ctx_dir)
883 return -ENOMEM;
884
885 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
886 return -ENOMEM;
887
888 return 0;
889}
890
891int blk_mq_debugfs_register_hctx(struct request_queue *q,
892 struct blk_mq_hw_ctx *hctx)
893{
894 struct blk_mq_ctx *ctx;
895 char name[20];
896 int i;
897
898 if (!q->debugfs_dir)
899 return -ENOENT;
900
901 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
902 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
903 if (!hctx->debugfs_dir)
904 return -ENOMEM;
905
906 if (!debugfs_create_files(hctx->debugfs_dir, hctx,
907 blk_mq_debugfs_hctx_attrs))
908 goto err;
909
910 hctx_for_each_ctx(hctx, ctx, i) {
911 if (blk_mq_debugfs_register_ctx(hctx, ctx))
912 goto err;
913 }
914
915 return 0;
916
917err:
918 blk_mq_debugfs_unregister_hctx(hctx);
919 return -ENOMEM;
920}
921
922void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
923{
924 debugfs_remove_recursive(hctx->debugfs_dir);
925 hctx->sched_debugfs_dir = NULL;
926 hctx->debugfs_dir = NULL;
927}
928
929int blk_mq_debugfs_register_hctxs(struct request_queue *q)
930{
931 struct blk_mq_hw_ctx *hctx;
932 int i;
933
934 queue_for_each_hw_ctx(q, hctx, i) {
935 if (blk_mq_debugfs_register_hctx(q, hctx))
936 return -ENOMEM;
937 }
938
939 return 0;
940}
941
942void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
943{
944 struct blk_mq_hw_ctx *hctx;
945 int i;
946
947 queue_for_each_hw_ctx(q, hctx, i)
948 blk_mq_debugfs_unregister_hctx(hctx);
949}
950
951int blk_mq_debugfs_register_sched(struct request_queue *q)
952{
953 struct elevator_type *e = q->elevator->type;
954
955 if (!q->debugfs_dir)
956 return -ENOENT;
957
958 if (!e->queue_debugfs_attrs)
959 return 0;
960
961 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
962 if (!q->sched_debugfs_dir)
963 return -ENOMEM;
964
965 if (!debugfs_create_files(q->sched_debugfs_dir, q,
966 e->queue_debugfs_attrs))
967 goto err;
968
969 return 0;
970
971err:
972 blk_mq_debugfs_unregister_sched(q);
973 return -ENOMEM;
974}
975
976void blk_mq_debugfs_unregister_sched(struct request_queue *q)
977{
978 debugfs_remove_recursive(q->sched_debugfs_dir);
979 q->sched_debugfs_dir = NULL;
980}
981
982int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
983 struct blk_mq_hw_ctx *hctx)
984{
985 struct elevator_type *e = q->elevator->type;
986
987 if (!hctx->debugfs_dir)
988 return -ENOENT;
989
990 if (!e->hctx_debugfs_attrs)
991 return 0;
992
993 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
994 hctx->debugfs_dir);
995 if (!hctx->sched_debugfs_dir)
996 return -ENOMEM;
997
998 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
999 e->hctx_debugfs_attrs))
1000 return -ENOMEM;
1001
1002 return 0;
1003}
1004
1005void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
1006{
1007 debugfs_remove_recursive(hctx->sched_debugfs_dir);
1008 hctx->sched_debugfs_dir = NULL;
1009}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Facebook
4 */
5
6#include <linux/kernel.h>
7#include <linux/blkdev.h>
8#include <linux/build_bug.h>
9#include <linux/debugfs.h>
10
11#include "blk.h"
12#include "blk-mq.h"
13#include "blk-mq-debugfs.h"
14#include "blk-mq-sched.h"
15#include "blk-rq-qos.h"
16
17static int queue_poll_stat_show(void *data, struct seq_file *m)
18{
19 return 0;
20}
21
22static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
23 __acquires(&q->requeue_lock)
24{
25 struct request_queue *q = m->private;
26
27 spin_lock_irq(&q->requeue_lock);
28 return seq_list_start(&q->requeue_list, *pos);
29}
30
31static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
32{
33 struct request_queue *q = m->private;
34
35 return seq_list_next(v, &q->requeue_list, pos);
36}
37
38static void queue_requeue_list_stop(struct seq_file *m, void *v)
39 __releases(&q->requeue_lock)
40{
41 struct request_queue *q = m->private;
42
43 spin_unlock_irq(&q->requeue_lock);
44}
45
46static const struct seq_operations queue_requeue_list_seq_ops = {
47 .start = queue_requeue_list_start,
48 .next = queue_requeue_list_next,
49 .stop = queue_requeue_list_stop,
50 .show = blk_mq_debugfs_rq_show,
51};
52
53static int blk_flags_show(struct seq_file *m, const unsigned long flags,
54 const char *const *flag_name, int flag_name_count)
55{
56 bool sep = false;
57 int i;
58
59 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
60 if (!(flags & BIT(i)))
61 continue;
62 if (sep)
63 seq_puts(m, "|");
64 sep = true;
65 if (i < flag_name_count && flag_name[i])
66 seq_puts(m, flag_name[i]);
67 else
68 seq_printf(m, "%d", i);
69 }
70 return 0;
71}
72
73static int queue_pm_only_show(void *data, struct seq_file *m)
74{
75 struct request_queue *q = data;
76
77 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
78 return 0;
79}
80
81#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
82static const char *const blk_queue_flag_name[] = {
83 QUEUE_FLAG_NAME(DYING),
84 QUEUE_FLAG_NAME(NOMERGES),
85 QUEUE_FLAG_NAME(SAME_COMP),
86 QUEUE_FLAG_NAME(FAIL_IO),
87 QUEUE_FLAG_NAME(NOXMERGES),
88 QUEUE_FLAG_NAME(SAME_FORCE),
89 QUEUE_FLAG_NAME(INIT_DONE),
90 QUEUE_FLAG_NAME(STATS),
91 QUEUE_FLAG_NAME(REGISTERED),
92 QUEUE_FLAG_NAME(QUIESCED),
93 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
94 QUEUE_FLAG_NAME(HCTX_ACTIVE),
95 QUEUE_FLAG_NAME(SQ_SCHED),
96};
97#undef QUEUE_FLAG_NAME
98
99static int queue_state_show(void *data, struct seq_file *m)
100{
101 struct request_queue *q = data;
102
103 BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX);
104 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
105 ARRAY_SIZE(blk_queue_flag_name));
106 seq_puts(m, "\n");
107 return 0;
108}
109
110static ssize_t queue_state_write(void *data, const char __user *buf,
111 size_t count, loff_t *ppos)
112{
113 struct request_queue *q = data;
114 char opbuf[16] = { }, *op;
115
116 /*
117 * The "state" attribute is removed when the queue is removed. Don't
118 * allow setting the state on a dying queue to avoid a use-after-free.
119 */
120 if (blk_queue_dying(q))
121 return -ENOENT;
122
123 if (count >= sizeof(opbuf)) {
124 pr_err("%s: operation too long\n", __func__);
125 goto inval;
126 }
127
128 if (copy_from_user(opbuf, buf, count))
129 return -EFAULT;
130 op = strstrip(opbuf);
131 if (strcmp(op, "run") == 0) {
132 blk_mq_run_hw_queues(q, true);
133 } else if (strcmp(op, "start") == 0) {
134 blk_mq_start_stopped_hw_queues(q, true);
135 } else if (strcmp(op, "kick") == 0) {
136 blk_mq_kick_requeue_list(q);
137 } else {
138 pr_err("%s: unsupported operation '%s'\n", __func__, op);
139inval:
140 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
141 return -EINVAL;
142 }
143 return count;
144}
145
146static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
147 { "poll_stat", 0400, queue_poll_stat_show },
148 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
149 { "pm_only", 0600, queue_pm_only_show, NULL },
150 { "state", 0600, queue_state_show, queue_state_write },
151 { "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
152 { },
153};
154
155#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
156static const char *const hctx_state_name[] = {
157 HCTX_STATE_NAME(STOPPED),
158 HCTX_STATE_NAME(TAG_ACTIVE),
159 HCTX_STATE_NAME(SCHED_RESTART),
160 HCTX_STATE_NAME(INACTIVE),
161};
162#undef HCTX_STATE_NAME
163
164static int hctx_state_show(void *data, struct seq_file *m)
165{
166 struct blk_mq_hw_ctx *hctx = data;
167
168 BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX);
169 blk_flags_show(m, hctx->state, hctx_state_name,
170 ARRAY_SIZE(hctx_state_name));
171 seq_puts(m, "\n");
172 return 0;
173}
174
175#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
176static const char *const alloc_policy_name[] = {
177 BLK_TAG_ALLOC_NAME(FIFO),
178 BLK_TAG_ALLOC_NAME(RR),
179};
180#undef BLK_TAG_ALLOC_NAME
181
182#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
183static const char *const hctx_flag_name[] = {
184 HCTX_FLAG_NAME(SHOULD_MERGE),
185 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
186 HCTX_FLAG_NAME(STACKING),
187 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
188 HCTX_FLAG_NAME(BLOCKING),
189 HCTX_FLAG_NAME(NO_SCHED),
190 HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
191};
192#undef HCTX_FLAG_NAME
193
194static int hctx_flags_show(void *data, struct seq_file *m)
195{
196 struct blk_mq_hw_ctx *hctx = data;
197 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
198
199 BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
200 BLK_MQ_F_ALLOC_POLICY_START_BIT);
201 BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
202
203 seq_puts(m, "alloc_policy=");
204 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
205 alloc_policy_name[alloc_policy])
206 seq_puts(m, alloc_policy_name[alloc_policy]);
207 else
208 seq_printf(m, "%d", alloc_policy);
209 seq_puts(m, " ");
210 blk_flags_show(m,
211 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
212 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
213 seq_puts(m, "\n");
214 return 0;
215}
216
217#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
218static const char *const cmd_flag_name[] = {
219 CMD_FLAG_NAME(FAILFAST_DEV),
220 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
221 CMD_FLAG_NAME(FAILFAST_DRIVER),
222 CMD_FLAG_NAME(SYNC),
223 CMD_FLAG_NAME(META),
224 CMD_FLAG_NAME(PRIO),
225 CMD_FLAG_NAME(NOMERGE),
226 CMD_FLAG_NAME(IDLE),
227 CMD_FLAG_NAME(INTEGRITY),
228 CMD_FLAG_NAME(FUA),
229 CMD_FLAG_NAME(PREFLUSH),
230 CMD_FLAG_NAME(RAHEAD),
231 CMD_FLAG_NAME(BACKGROUND),
232 CMD_FLAG_NAME(NOWAIT),
233 CMD_FLAG_NAME(POLLED),
234 CMD_FLAG_NAME(ALLOC_CACHE),
235 CMD_FLAG_NAME(SWAP),
236 CMD_FLAG_NAME(DRV),
237 CMD_FLAG_NAME(FS_PRIVATE),
238 CMD_FLAG_NAME(ATOMIC),
239 CMD_FLAG_NAME(NOUNMAP),
240};
241#undef CMD_FLAG_NAME
242
243#define RQF_NAME(name) [__RQF_##name] = #name
244static const char *const rqf_name[] = {
245 RQF_NAME(STARTED),
246 RQF_NAME(FLUSH_SEQ),
247 RQF_NAME(MIXED_MERGE),
248 RQF_NAME(DONTPREP),
249 RQF_NAME(SCHED_TAGS),
250 RQF_NAME(USE_SCHED),
251 RQF_NAME(FAILED),
252 RQF_NAME(QUIET),
253 RQF_NAME(IO_STAT),
254 RQF_NAME(PM),
255 RQF_NAME(HASHED),
256 RQF_NAME(STATS),
257 RQF_NAME(SPECIAL_PAYLOAD),
258 RQF_NAME(ZONE_WRITE_PLUGGING),
259 RQF_NAME(TIMED_OUT),
260 RQF_NAME(RESV),
261};
262#undef RQF_NAME
263
264static const char *const blk_mq_rq_state_name_array[] = {
265 [MQ_RQ_IDLE] = "idle",
266 [MQ_RQ_IN_FLIGHT] = "in_flight",
267 [MQ_RQ_COMPLETE] = "complete",
268};
269
270static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
271{
272 if (WARN_ON_ONCE((unsigned int)rq_state >=
273 ARRAY_SIZE(blk_mq_rq_state_name_array)))
274 return "(?)";
275 return blk_mq_rq_state_name_array[rq_state];
276}
277
278int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
279{
280 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
281 const enum req_op op = req_op(rq);
282 const char *op_str = blk_op_str(op);
283
284 BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS);
285 BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS);
286
287 seq_printf(m, "%p {.op=", rq);
288 if (strcmp(op_str, "UNKNOWN") == 0)
289 seq_printf(m, "%u", op);
290 else
291 seq_printf(m, "%s", op_str);
292 seq_puts(m, ", .cmd_flags=");
293 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
294 cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
295 seq_puts(m, ", .rq_flags=");
296 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
297 ARRAY_SIZE(rqf_name));
298 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
299 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
300 rq->internal_tag);
301 if (mq_ops->show_rq)
302 mq_ops->show_rq(m, rq);
303 seq_puts(m, "}\n");
304 return 0;
305}
306EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
307
308int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
309{
310 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
311}
312EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
313
314static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
315 __acquires(&hctx->lock)
316{
317 struct blk_mq_hw_ctx *hctx = m->private;
318
319 spin_lock(&hctx->lock);
320 return seq_list_start(&hctx->dispatch, *pos);
321}
322
323static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
324{
325 struct blk_mq_hw_ctx *hctx = m->private;
326
327 return seq_list_next(v, &hctx->dispatch, pos);
328}
329
330static void hctx_dispatch_stop(struct seq_file *m, void *v)
331 __releases(&hctx->lock)
332{
333 struct blk_mq_hw_ctx *hctx = m->private;
334
335 spin_unlock(&hctx->lock);
336}
337
338static const struct seq_operations hctx_dispatch_seq_ops = {
339 .start = hctx_dispatch_start,
340 .next = hctx_dispatch_next,
341 .stop = hctx_dispatch_stop,
342 .show = blk_mq_debugfs_rq_show,
343};
344
345struct show_busy_params {
346 struct seq_file *m;
347 struct blk_mq_hw_ctx *hctx;
348};
349
350/*
351 * Note: the state of a request may change while this function is in progress,
352 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
353 * keep iterating requests.
354 */
355static bool hctx_show_busy_rq(struct request *rq, void *data)
356{
357 const struct show_busy_params *params = data;
358
359 if (rq->mq_hctx == params->hctx)
360 __blk_mq_debugfs_rq_show(params->m, rq);
361
362 return true;
363}
364
365static int hctx_busy_show(void *data, struct seq_file *m)
366{
367 struct blk_mq_hw_ctx *hctx = data;
368 struct show_busy_params params = { .m = m, .hctx = hctx };
369
370 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
371 ¶ms);
372
373 return 0;
374}
375
376static const char *const hctx_types[] = {
377 [HCTX_TYPE_DEFAULT] = "default",
378 [HCTX_TYPE_READ] = "read",
379 [HCTX_TYPE_POLL] = "poll",
380};
381
382static int hctx_type_show(void *data, struct seq_file *m)
383{
384 struct blk_mq_hw_ctx *hctx = data;
385
386 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
387 seq_printf(m, "%s\n", hctx_types[hctx->type]);
388 return 0;
389}
390
391static int hctx_ctx_map_show(void *data, struct seq_file *m)
392{
393 struct blk_mq_hw_ctx *hctx = data;
394
395 sbitmap_bitmap_show(&hctx->ctx_map, m);
396 return 0;
397}
398
399static void blk_mq_debugfs_tags_show(struct seq_file *m,
400 struct blk_mq_tags *tags)
401{
402 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
403 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
404 seq_printf(m, "active_queues=%d\n",
405 READ_ONCE(tags->active_queues));
406
407 seq_puts(m, "\nbitmap_tags:\n");
408 sbitmap_queue_show(&tags->bitmap_tags, m);
409
410 if (tags->nr_reserved_tags) {
411 seq_puts(m, "\nbreserved_tags:\n");
412 sbitmap_queue_show(&tags->breserved_tags, m);
413 }
414}
415
416static int hctx_tags_show(void *data, struct seq_file *m)
417{
418 struct blk_mq_hw_ctx *hctx = data;
419 struct request_queue *q = hctx->queue;
420 int res;
421
422 res = mutex_lock_interruptible(&q->sysfs_lock);
423 if (res)
424 goto out;
425 if (hctx->tags)
426 blk_mq_debugfs_tags_show(m, hctx->tags);
427 mutex_unlock(&q->sysfs_lock);
428
429out:
430 return res;
431}
432
433static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
434{
435 struct blk_mq_hw_ctx *hctx = data;
436 struct request_queue *q = hctx->queue;
437 int res;
438
439 res = mutex_lock_interruptible(&q->sysfs_lock);
440 if (res)
441 goto out;
442 if (hctx->tags)
443 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
444 mutex_unlock(&q->sysfs_lock);
445
446out:
447 return res;
448}
449
450static int hctx_sched_tags_show(void *data, struct seq_file *m)
451{
452 struct blk_mq_hw_ctx *hctx = data;
453 struct request_queue *q = hctx->queue;
454 int res;
455
456 res = mutex_lock_interruptible(&q->sysfs_lock);
457 if (res)
458 goto out;
459 if (hctx->sched_tags)
460 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
461 mutex_unlock(&q->sysfs_lock);
462
463out:
464 return res;
465}
466
467static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
468{
469 struct blk_mq_hw_ctx *hctx = data;
470 struct request_queue *q = hctx->queue;
471 int res;
472
473 res = mutex_lock_interruptible(&q->sysfs_lock);
474 if (res)
475 goto out;
476 if (hctx->sched_tags)
477 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
478 mutex_unlock(&q->sysfs_lock);
479
480out:
481 return res;
482}
483
484static int hctx_active_show(void *data, struct seq_file *m)
485{
486 struct blk_mq_hw_ctx *hctx = data;
487
488 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
489 return 0;
490}
491
492static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
493{
494 struct blk_mq_hw_ctx *hctx = data;
495
496 seq_printf(m, "%u\n", hctx->dispatch_busy);
497 return 0;
498}
499
500#define CTX_RQ_SEQ_OPS(name, type) \
501static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
502 __acquires(&ctx->lock) \
503{ \
504 struct blk_mq_ctx *ctx = m->private; \
505 \
506 spin_lock(&ctx->lock); \
507 return seq_list_start(&ctx->rq_lists[type], *pos); \
508} \
509 \
510static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
511 loff_t *pos) \
512{ \
513 struct blk_mq_ctx *ctx = m->private; \
514 \
515 return seq_list_next(v, &ctx->rq_lists[type], pos); \
516} \
517 \
518static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
519 __releases(&ctx->lock) \
520{ \
521 struct blk_mq_ctx *ctx = m->private; \
522 \
523 spin_unlock(&ctx->lock); \
524} \
525 \
526static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
527 .start = ctx_##name##_rq_list_start, \
528 .next = ctx_##name##_rq_list_next, \
529 .stop = ctx_##name##_rq_list_stop, \
530 .show = blk_mq_debugfs_rq_show, \
531}
532
533CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
534CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
535CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
536
537static int blk_mq_debugfs_show(struct seq_file *m, void *v)
538{
539 const struct blk_mq_debugfs_attr *attr = m->private;
540 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
541
542 return attr->show(data, m);
543}
544
545static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
546 size_t count, loff_t *ppos)
547{
548 struct seq_file *m = file->private_data;
549 const struct blk_mq_debugfs_attr *attr = m->private;
550 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
551
552 /*
553 * Attributes that only implement .seq_ops are read-only and 'attr' is
554 * the same with 'data' in this case.
555 */
556 if (attr == data || !attr->write)
557 return -EPERM;
558
559 return attr->write(data, buf, count, ppos);
560}
561
562static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
563{
564 const struct blk_mq_debugfs_attr *attr = inode->i_private;
565 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
566 struct seq_file *m;
567 int ret;
568
569 if (attr->seq_ops) {
570 ret = seq_open(file, attr->seq_ops);
571 if (!ret) {
572 m = file->private_data;
573 m->private = data;
574 }
575 return ret;
576 }
577
578 if (WARN_ON_ONCE(!attr->show))
579 return -EPERM;
580
581 return single_open(file, blk_mq_debugfs_show, inode->i_private);
582}
583
584static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
585{
586 const struct blk_mq_debugfs_attr *attr = inode->i_private;
587
588 if (attr->show)
589 return single_release(inode, file);
590
591 return seq_release(inode, file);
592}
593
594static const struct file_operations blk_mq_debugfs_fops = {
595 .open = blk_mq_debugfs_open,
596 .read = seq_read,
597 .write = blk_mq_debugfs_write,
598 .llseek = seq_lseek,
599 .release = blk_mq_debugfs_release,
600};
601
602static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
603 {"state", 0400, hctx_state_show},
604 {"flags", 0400, hctx_flags_show},
605 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
606 {"busy", 0400, hctx_busy_show},
607 {"ctx_map", 0400, hctx_ctx_map_show},
608 {"tags", 0400, hctx_tags_show},
609 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
610 {"sched_tags", 0400, hctx_sched_tags_show},
611 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
612 {"active", 0400, hctx_active_show},
613 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
614 {"type", 0400, hctx_type_show},
615 {},
616};
617
618static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
619 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
620 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
621 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
622 {},
623};
624
625static void debugfs_create_files(struct dentry *parent, void *data,
626 const struct blk_mq_debugfs_attr *attr)
627{
628 if (IS_ERR_OR_NULL(parent))
629 return;
630
631 d_inode(parent)->i_private = data;
632
633 for (; attr->name; attr++)
634 debugfs_create_file(attr->name, attr->mode, parent,
635 (void *)attr, &blk_mq_debugfs_fops);
636}
637
638void blk_mq_debugfs_register(struct request_queue *q)
639{
640 struct blk_mq_hw_ctx *hctx;
641 unsigned long i;
642
643 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
644
645 /*
646 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
647 * didn't exist yet (because we don't know what to name the directory
648 * until the queue is registered to a gendisk).
649 */
650 if (q->elevator && !q->sched_debugfs_dir)
651 blk_mq_debugfs_register_sched(q);
652
653 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
654 queue_for_each_hw_ctx(q, hctx, i) {
655 if (!hctx->debugfs_dir)
656 blk_mq_debugfs_register_hctx(q, hctx);
657 if (q->elevator && !hctx->sched_debugfs_dir)
658 blk_mq_debugfs_register_sched_hctx(q, hctx);
659 }
660
661 if (q->rq_qos) {
662 struct rq_qos *rqos = q->rq_qos;
663
664 while (rqos) {
665 blk_mq_debugfs_register_rqos(rqos);
666 rqos = rqos->next;
667 }
668 }
669}
670
671static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
672 struct blk_mq_ctx *ctx)
673{
674 struct dentry *ctx_dir;
675 char name[20];
676
677 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
678 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
679
680 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
681}
682
683void blk_mq_debugfs_register_hctx(struct request_queue *q,
684 struct blk_mq_hw_ctx *hctx)
685{
686 struct blk_mq_ctx *ctx;
687 char name[20];
688 int i;
689
690 if (!q->debugfs_dir)
691 return;
692
693 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
694 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
695
696 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
697
698 hctx_for_each_ctx(hctx, ctx, i)
699 blk_mq_debugfs_register_ctx(hctx, ctx);
700}
701
702void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
703{
704 if (!hctx->queue->debugfs_dir)
705 return;
706 debugfs_remove_recursive(hctx->debugfs_dir);
707 hctx->sched_debugfs_dir = NULL;
708 hctx->debugfs_dir = NULL;
709}
710
711void blk_mq_debugfs_register_hctxs(struct request_queue *q)
712{
713 struct blk_mq_hw_ctx *hctx;
714 unsigned long i;
715
716 queue_for_each_hw_ctx(q, hctx, i)
717 blk_mq_debugfs_register_hctx(q, hctx);
718}
719
720void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
721{
722 struct blk_mq_hw_ctx *hctx;
723 unsigned long i;
724
725 queue_for_each_hw_ctx(q, hctx, i)
726 blk_mq_debugfs_unregister_hctx(hctx);
727}
728
729void blk_mq_debugfs_register_sched(struct request_queue *q)
730{
731 struct elevator_type *e = q->elevator->type;
732
733 lockdep_assert_held(&q->debugfs_mutex);
734
735 /*
736 * If the parent directory has not been created yet, return, we will be
737 * called again later on and the directory/files will be created then.
738 */
739 if (!q->debugfs_dir)
740 return;
741
742 if (!e->queue_debugfs_attrs)
743 return;
744
745 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
746
747 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
748}
749
750void blk_mq_debugfs_unregister_sched(struct request_queue *q)
751{
752 lockdep_assert_held(&q->debugfs_mutex);
753
754 debugfs_remove_recursive(q->sched_debugfs_dir);
755 q->sched_debugfs_dir = NULL;
756}
757
758static const char *rq_qos_id_to_name(enum rq_qos_id id)
759{
760 switch (id) {
761 case RQ_QOS_WBT:
762 return "wbt";
763 case RQ_QOS_LATENCY:
764 return "latency";
765 case RQ_QOS_COST:
766 return "cost";
767 }
768 return "unknown";
769}
770
771void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
772{
773 lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
774
775 if (!rqos->disk->queue->debugfs_dir)
776 return;
777 debugfs_remove_recursive(rqos->debugfs_dir);
778 rqos->debugfs_dir = NULL;
779}
780
781void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
782{
783 struct request_queue *q = rqos->disk->queue;
784 const char *dir_name = rq_qos_id_to_name(rqos->id);
785
786 lockdep_assert_held(&q->debugfs_mutex);
787
788 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
789 return;
790
791 if (!q->rqos_debugfs_dir)
792 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
793 q->debugfs_dir);
794
795 rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
796 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
797}
798
799void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
800 struct blk_mq_hw_ctx *hctx)
801{
802 struct elevator_type *e = q->elevator->type;
803
804 lockdep_assert_held(&q->debugfs_mutex);
805
806 /*
807 * If the parent debugfs directory has not been created yet, return;
808 * We will be called again later on with appropriate parent debugfs
809 * directory from blk_register_queue()
810 */
811 if (!hctx->debugfs_dir)
812 return;
813
814 if (!e->hctx_debugfs_attrs)
815 return;
816
817 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
818 hctx->debugfs_dir);
819 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
820 e->hctx_debugfs_attrs);
821}
822
823void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
824{
825 lockdep_assert_held(&hctx->queue->debugfs_mutex);
826
827 if (!hctx->queue->debugfs_dir)
828 return;
829 debugfs_remove_recursive(hctx->sched_debugfs_dir);
830 hctx->sched_debugfs_dir = NULL;
831}