Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Facebook
4 */
5
6#include <linux/kernel.h>
7#include <linux/blkdev.h>
8#include <linux/debugfs.h>
9
10#include <linux/blk-mq.h>
11#include "blk.h"
12#include "blk-mq.h"
13#include "blk-mq-debugfs.h"
14#include "blk-mq-sched.h"
15#include "blk-mq-tag.h"
16#include "blk-rq-qos.h"
17
18static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
19{
20 if (stat->nr_samples) {
21 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
22 stat->nr_samples, stat->mean, stat->min, stat->max);
23 } else {
24 seq_puts(m, "samples=0");
25 }
26}
27
28static int queue_poll_stat_show(void *data, struct seq_file *m)
29{
30 struct request_queue *q = data;
31 int bucket;
32
33 if (!q->poll_stat)
34 return 0;
35
36 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
37 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket]);
39 seq_puts(m, "\n");
40
41 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
42 print_stat(m, &q->poll_stat[2 * bucket + 1]);
43 seq_puts(m, "\n");
44 }
45 return 0;
46}
47
48static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
49 __acquires(&q->requeue_lock)
50{
51 struct request_queue *q = m->private;
52
53 spin_lock_irq(&q->requeue_lock);
54 return seq_list_start(&q->requeue_list, *pos);
55}
56
57static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
58{
59 struct request_queue *q = m->private;
60
61 return seq_list_next(v, &q->requeue_list, pos);
62}
63
64static void queue_requeue_list_stop(struct seq_file *m, void *v)
65 __releases(&q->requeue_lock)
66{
67 struct request_queue *q = m->private;
68
69 spin_unlock_irq(&q->requeue_lock);
70}
71
72static const struct seq_operations queue_requeue_list_seq_ops = {
73 .start = queue_requeue_list_start,
74 .next = queue_requeue_list_next,
75 .stop = queue_requeue_list_stop,
76 .show = blk_mq_debugfs_rq_show,
77};
78
79static int blk_flags_show(struct seq_file *m, const unsigned long flags,
80 const char *const *flag_name, int flag_name_count)
81{
82 bool sep = false;
83 int i;
84
85 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
86 if (!(flags & BIT(i)))
87 continue;
88 if (sep)
89 seq_puts(m, "|");
90 sep = true;
91 if (i < flag_name_count && flag_name[i])
92 seq_puts(m, flag_name[i]);
93 else
94 seq_printf(m, "%d", i);
95 }
96 return 0;
97}
98
99static int queue_pm_only_show(void *data, struct seq_file *m)
100{
101 struct request_queue *q = data;
102
103 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
104 return 0;
105}
106
107#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
108static const char *const blk_queue_flag_name[] = {
109 QUEUE_FLAG_NAME(STOPPED),
110 QUEUE_FLAG_NAME(DYING),
111 QUEUE_FLAG_NAME(NOMERGES),
112 QUEUE_FLAG_NAME(SAME_COMP),
113 QUEUE_FLAG_NAME(FAIL_IO),
114 QUEUE_FLAG_NAME(NONROT),
115 QUEUE_FLAG_NAME(IO_STAT),
116 QUEUE_FLAG_NAME(NOXMERGES),
117 QUEUE_FLAG_NAME(ADD_RANDOM),
118 QUEUE_FLAG_NAME(SAME_FORCE),
119 QUEUE_FLAG_NAME(INIT_DONE),
120 QUEUE_FLAG_NAME(STABLE_WRITES),
121 QUEUE_FLAG_NAME(POLL),
122 QUEUE_FLAG_NAME(WC),
123 QUEUE_FLAG_NAME(FUA),
124 QUEUE_FLAG_NAME(DAX),
125 QUEUE_FLAG_NAME(STATS),
126 QUEUE_FLAG_NAME(REGISTERED),
127 QUEUE_FLAG_NAME(QUIESCED),
128 QUEUE_FLAG_NAME(PCI_P2PDMA),
129 QUEUE_FLAG_NAME(ZONE_RESETALL),
130 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
131 QUEUE_FLAG_NAME(HCTX_ACTIVE),
132 QUEUE_FLAG_NAME(NOWAIT),
133};
134#undef QUEUE_FLAG_NAME
135
136static int queue_state_show(void *data, struct seq_file *m)
137{
138 struct request_queue *q = data;
139
140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
141 ARRAY_SIZE(blk_queue_flag_name));
142 seq_puts(m, "\n");
143 return 0;
144}
145
146static ssize_t queue_state_write(void *data, const char __user *buf,
147 size_t count, loff_t *ppos)
148{
149 struct request_queue *q = data;
150 char opbuf[16] = { }, *op;
151
152 /*
153 * The "state" attribute is removed when the queue is removed. Don't
154 * allow setting the state on a dying queue to avoid a use-after-free.
155 */
156 if (blk_queue_dying(q))
157 return -ENOENT;
158
159 if (count >= sizeof(opbuf)) {
160 pr_err("%s: operation too long\n", __func__);
161 goto inval;
162 }
163
164 if (copy_from_user(opbuf, buf, count))
165 return -EFAULT;
166 op = strstrip(opbuf);
167 if (strcmp(op, "run") == 0) {
168 blk_mq_run_hw_queues(q, true);
169 } else if (strcmp(op, "start") == 0) {
170 blk_mq_start_stopped_hw_queues(q, true);
171 } else if (strcmp(op, "kick") == 0) {
172 blk_mq_kick_requeue_list(q);
173 } else {
174 pr_err("%s: unsupported operation '%s'\n", __func__, op);
175inval:
176 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
177 return -EINVAL;
178 }
179 return count;
180}
181
182static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
183 { "poll_stat", 0400, queue_poll_stat_show },
184 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
185 { "pm_only", 0600, queue_pm_only_show, NULL },
186 { "state", 0600, queue_state_show, queue_state_write },
187 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
188 { },
189};
190
191#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
192static const char *const hctx_state_name[] = {
193 HCTX_STATE_NAME(STOPPED),
194 HCTX_STATE_NAME(TAG_ACTIVE),
195 HCTX_STATE_NAME(SCHED_RESTART),
196 HCTX_STATE_NAME(INACTIVE),
197};
198#undef HCTX_STATE_NAME
199
200static int hctx_state_show(void *data, struct seq_file *m)
201{
202 struct blk_mq_hw_ctx *hctx = data;
203
204 blk_flags_show(m, hctx->state, hctx_state_name,
205 ARRAY_SIZE(hctx_state_name));
206 seq_puts(m, "\n");
207 return 0;
208}
209
210#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
211static const char *const alloc_policy_name[] = {
212 BLK_TAG_ALLOC_NAME(FIFO),
213 BLK_TAG_ALLOC_NAME(RR),
214};
215#undef BLK_TAG_ALLOC_NAME
216
217#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
218static const char *const hctx_flag_name[] = {
219 HCTX_FLAG_NAME(SHOULD_MERGE),
220 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
221 HCTX_FLAG_NAME(BLOCKING),
222 HCTX_FLAG_NAME(NO_SCHED),
223 HCTX_FLAG_NAME(STACKING),
224 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
225};
226#undef HCTX_FLAG_NAME
227
228static int hctx_flags_show(void *data, struct seq_file *m)
229{
230 struct blk_mq_hw_ctx *hctx = data;
231 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
232
233 seq_puts(m, "alloc_policy=");
234 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
235 alloc_policy_name[alloc_policy])
236 seq_puts(m, alloc_policy_name[alloc_policy]);
237 else
238 seq_printf(m, "%d", alloc_policy);
239 seq_puts(m, " ");
240 blk_flags_show(m,
241 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
242 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
243 seq_puts(m, "\n");
244 return 0;
245}
246
247#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
248static const char *const cmd_flag_name[] = {
249 CMD_FLAG_NAME(FAILFAST_DEV),
250 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
251 CMD_FLAG_NAME(FAILFAST_DRIVER),
252 CMD_FLAG_NAME(SYNC),
253 CMD_FLAG_NAME(META),
254 CMD_FLAG_NAME(PRIO),
255 CMD_FLAG_NAME(NOMERGE),
256 CMD_FLAG_NAME(IDLE),
257 CMD_FLAG_NAME(INTEGRITY),
258 CMD_FLAG_NAME(FUA),
259 CMD_FLAG_NAME(PREFLUSH),
260 CMD_FLAG_NAME(RAHEAD),
261 CMD_FLAG_NAME(BACKGROUND),
262 CMD_FLAG_NAME(NOWAIT),
263 CMD_FLAG_NAME(NOUNMAP),
264 CMD_FLAG_NAME(POLLED),
265};
266#undef CMD_FLAG_NAME
267
268#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
269static const char *const rqf_name[] = {
270 RQF_NAME(STARTED),
271 RQF_NAME(SOFTBARRIER),
272 RQF_NAME(FLUSH_SEQ),
273 RQF_NAME(MIXED_MERGE),
274 RQF_NAME(MQ_INFLIGHT),
275 RQF_NAME(DONTPREP),
276 RQF_NAME(FAILED),
277 RQF_NAME(QUIET),
278 RQF_NAME(ELVPRIV),
279 RQF_NAME(IO_STAT),
280 RQF_NAME(PM),
281 RQF_NAME(HASHED),
282 RQF_NAME(STATS),
283 RQF_NAME(SPECIAL_PAYLOAD),
284 RQF_NAME(ZONE_WRITE_LOCKED),
285 RQF_NAME(MQ_POLL_SLEPT),
286 RQF_NAME(TIMED_OUT),
287 RQF_NAME(ELV),
288 RQF_NAME(RESV),
289};
290#undef RQF_NAME
291
292static const char *const blk_mq_rq_state_name_array[] = {
293 [MQ_RQ_IDLE] = "idle",
294 [MQ_RQ_IN_FLIGHT] = "in_flight",
295 [MQ_RQ_COMPLETE] = "complete",
296};
297
298static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
299{
300 if (WARN_ON_ONCE((unsigned int)rq_state >=
301 ARRAY_SIZE(blk_mq_rq_state_name_array)))
302 return "(?)";
303 return blk_mq_rq_state_name_array[rq_state];
304}
305
306int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
307{
308 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
309 const enum req_op op = req_op(rq);
310 const char *op_str = blk_op_str(op);
311
312 seq_printf(m, "%p {.op=", rq);
313 if (strcmp(op_str, "UNKNOWN") == 0)
314 seq_printf(m, "%u", op);
315 else
316 seq_printf(m, "%s", op_str);
317 seq_puts(m, ", .cmd_flags=");
318 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
319 cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
320 seq_puts(m, ", .rq_flags=");
321 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
322 ARRAY_SIZE(rqf_name));
323 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
324 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
325 rq->internal_tag);
326 if (mq_ops->show_rq)
327 mq_ops->show_rq(m, rq);
328 seq_puts(m, "}\n");
329 return 0;
330}
331EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
332
333int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
334{
335 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
336}
337EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
338
339static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
340 __acquires(&hctx->lock)
341{
342 struct blk_mq_hw_ctx *hctx = m->private;
343
344 spin_lock(&hctx->lock);
345 return seq_list_start(&hctx->dispatch, *pos);
346}
347
348static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
349{
350 struct blk_mq_hw_ctx *hctx = m->private;
351
352 return seq_list_next(v, &hctx->dispatch, pos);
353}
354
355static void hctx_dispatch_stop(struct seq_file *m, void *v)
356 __releases(&hctx->lock)
357{
358 struct blk_mq_hw_ctx *hctx = m->private;
359
360 spin_unlock(&hctx->lock);
361}
362
363static const struct seq_operations hctx_dispatch_seq_ops = {
364 .start = hctx_dispatch_start,
365 .next = hctx_dispatch_next,
366 .stop = hctx_dispatch_stop,
367 .show = blk_mq_debugfs_rq_show,
368};
369
370struct show_busy_params {
371 struct seq_file *m;
372 struct blk_mq_hw_ctx *hctx;
373};
374
375/*
376 * Note: the state of a request may change while this function is in progress,
377 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
378 * keep iterating requests.
379 */
380static bool hctx_show_busy_rq(struct request *rq, void *data)
381{
382 const struct show_busy_params *params = data;
383
384 if (rq->mq_hctx == params->hctx)
385 __blk_mq_debugfs_rq_show(params->m, rq);
386
387 return true;
388}
389
390static int hctx_busy_show(void *data, struct seq_file *m)
391{
392 struct blk_mq_hw_ctx *hctx = data;
393 struct show_busy_params params = { .m = m, .hctx = hctx };
394
395 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
396 ¶ms);
397
398 return 0;
399}
400
401static const char *const hctx_types[] = {
402 [HCTX_TYPE_DEFAULT] = "default",
403 [HCTX_TYPE_READ] = "read",
404 [HCTX_TYPE_POLL] = "poll",
405};
406
407static int hctx_type_show(void *data, struct seq_file *m)
408{
409 struct blk_mq_hw_ctx *hctx = data;
410
411 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
412 seq_printf(m, "%s\n", hctx_types[hctx->type]);
413 return 0;
414}
415
416static int hctx_ctx_map_show(void *data, struct seq_file *m)
417{
418 struct blk_mq_hw_ctx *hctx = data;
419
420 sbitmap_bitmap_show(&hctx->ctx_map, m);
421 return 0;
422}
423
424static void blk_mq_debugfs_tags_show(struct seq_file *m,
425 struct blk_mq_tags *tags)
426{
427 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
428 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
429 seq_printf(m, "active_queues=%d\n",
430 atomic_read(&tags->active_queues));
431
432 seq_puts(m, "\nbitmap_tags:\n");
433 sbitmap_queue_show(&tags->bitmap_tags, m);
434
435 if (tags->nr_reserved_tags) {
436 seq_puts(m, "\nbreserved_tags:\n");
437 sbitmap_queue_show(&tags->breserved_tags, m);
438 }
439}
440
441static int hctx_tags_show(void *data, struct seq_file *m)
442{
443 struct blk_mq_hw_ctx *hctx = data;
444 struct request_queue *q = hctx->queue;
445 int res;
446
447 res = mutex_lock_interruptible(&q->sysfs_lock);
448 if (res)
449 goto out;
450 if (hctx->tags)
451 blk_mq_debugfs_tags_show(m, hctx->tags);
452 mutex_unlock(&q->sysfs_lock);
453
454out:
455 return res;
456}
457
458static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
459{
460 struct blk_mq_hw_ctx *hctx = data;
461 struct request_queue *q = hctx->queue;
462 int res;
463
464 res = mutex_lock_interruptible(&q->sysfs_lock);
465 if (res)
466 goto out;
467 if (hctx->tags)
468 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
469 mutex_unlock(&q->sysfs_lock);
470
471out:
472 return res;
473}
474
475static int hctx_sched_tags_show(void *data, struct seq_file *m)
476{
477 struct blk_mq_hw_ctx *hctx = data;
478 struct request_queue *q = hctx->queue;
479 int res;
480
481 res = mutex_lock_interruptible(&q->sysfs_lock);
482 if (res)
483 goto out;
484 if (hctx->sched_tags)
485 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
486 mutex_unlock(&q->sysfs_lock);
487
488out:
489 return res;
490}
491
492static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
493{
494 struct blk_mq_hw_ctx *hctx = data;
495 struct request_queue *q = hctx->queue;
496 int res;
497
498 res = mutex_lock_interruptible(&q->sysfs_lock);
499 if (res)
500 goto out;
501 if (hctx->sched_tags)
502 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
503 mutex_unlock(&q->sysfs_lock);
504
505out:
506 return res;
507}
508
509static int hctx_run_show(void *data, struct seq_file *m)
510{
511 struct blk_mq_hw_ctx *hctx = data;
512
513 seq_printf(m, "%lu\n", hctx->run);
514 return 0;
515}
516
517static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
518 loff_t *ppos)
519{
520 struct blk_mq_hw_ctx *hctx = data;
521
522 hctx->run = 0;
523 return count;
524}
525
526static int hctx_active_show(void *data, struct seq_file *m)
527{
528 struct blk_mq_hw_ctx *hctx = data;
529
530 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
531 return 0;
532}
533
534static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
535{
536 struct blk_mq_hw_ctx *hctx = data;
537
538 seq_printf(m, "%u\n", hctx->dispatch_busy);
539 return 0;
540}
541
542#define CTX_RQ_SEQ_OPS(name, type) \
543static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
544 __acquires(&ctx->lock) \
545{ \
546 struct blk_mq_ctx *ctx = m->private; \
547 \
548 spin_lock(&ctx->lock); \
549 return seq_list_start(&ctx->rq_lists[type], *pos); \
550} \
551 \
552static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
553 loff_t *pos) \
554{ \
555 struct blk_mq_ctx *ctx = m->private; \
556 \
557 return seq_list_next(v, &ctx->rq_lists[type], pos); \
558} \
559 \
560static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
561 __releases(&ctx->lock) \
562{ \
563 struct blk_mq_ctx *ctx = m->private; \
564 \
565 spin_unlock(&ctx->lock); \
566} \
567 \
568static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
569 .start = ctx_##name##_rq_list_start, \
570 .next = ctx_##name##_rq_list_next, \
571 .stop = ctx_##name##_rq_list_stop, \
572 .show = blk_mq_debugfs_rq_show, \
573}
574
575CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
576CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
577CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
578
579static int blk_mq_debugfs_show(struct seq_file *m, void *v)
580{
581 const struct blk_mq_debugfs_attr *attr = m->private;
582 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
583
584 return attr->show(data, m);
585}
586
587static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
588 size_t count, loff_t *ppos)
589{
590 struct seq_file *m = file->private_data;
591 const struct blk_mq_debugfs_attr *attr = m->private;
592 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
593
594 /*
595 * Attributes that only implement .seq_ops are read-only and 'attr' is
596 * the same with 'data' in this case.
597 */
598 if (attr == data || !attr->write)
599 return -EPERM;
600
601 return attr->write(data, buf, count, ppos);
602}
603
604static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
605{
606 const struct blk_mq_debugfs_attr *attr = inode->i_private;
607 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
608 struct seq_file *m;
609 int ret;
610
611 if (attr->seq_ops) {
612 ret = seq_open(file, attr->seq_ops);
613 if (!ret) {
614 m = file->private_data;
615 m->private = data;
616 }
617 return ret;
618 }
619
620 if (WARN_ON_ONCE(!attr->show))
621 return -EPERM;
622
623 return single_open(file, blk_mq_debugfs_show, inode->i_private);
624}
625
626static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
627{
628 const struct blk_mq_debugfs_attr *attr = inode->i_private;
629
630 if (attr->show)
631 return single_release(inode, file);
632
633 return seq_release(inode, file);
634}
635
636static const struct file_operations blk_mq_debugfs_fops = {
637 .open = blk_mq_debugfs_open,
638 .read = seq_read,
639 .write = blk_mq_debugfs_write,
640 .llseek = seq_lseek,
641 .release = blk_mq_debugfs_release,
642};
643
644static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
645 {"state", 0400, hctx_state_show},
646 {"flags", 0400, hctx_flags_show},
647 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
648 {"busy", 0400, hctx_busy_show},
649 {"ctx_map", 0400, hctx_ctx_map_show},
650 {"tags", 0400, hctx_tags_show},
651 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
652 {"sched_tags", 0400, hctx_sched_tags_show},
653 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
654 {"run", 0600, hctx_run_show, hctx_run_write},
655 {"active", 0400, hctx_active_show},
656 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
657 {"type", 0400, hctx_type_show},
658 {},
659};
660
661static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
662 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
663 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
664 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
665 {},
666};
667
668static void debugfs_create_files(struct dentry *parent, void *data,
669 const struct blk_mq_debugfs_attr *attr)
670{
671 if (IS_ERR_OR_NULL(parent))
672 return;
673
674 d_inode(parent)->i_private = data;
675
676 for (; attr->name; attr++)
677 debugfs_create_file(attr->name, attr->mode, parent,
678 (void *)attr, &blk_mq_debugfs_fops);
679}
680
681void blk_mq_debugfs_register(struct request_queue *q)
682{
683 struct blk_mq_hw_ctx *hctx;
684 unsigned long i;
685
686 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
687
688 /*
689 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
690 * didn't exist yet (because we don't know what to name the directory
691 * until the queue is registered to a gendisk).
692 */
693 if (q->elevator && !q->sched_debugfs_dir)
694 blk_mq_debugfs_register_sched(q);
695
696 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
697 queue_for_each_hw_ctx(q, hctx, i) {
698 if (!hctx->debugfs_dir)
699 blk_mq_debugfs_register_hctx(q, hctx);
700 if (q->elevator && !hctx->sched_debugfs_dir)
701 blk_mq_debugfs_register_sched_hctx(q, hctx);
702 }
703
704 if (q->rq_qos) {
705 struct rq_qos *rqos = q->rq_qos;
706
707 while (rqos) {
708 blk_mq_debugfs_register_rqos(rqos);
709 rqos = rqos->next;
710 }
711 }
712}
713
714static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
715 struct blk_mq_ctx *ctx)
716{
717 struct dentry *ctx_dir;
718 char name[20];
719
720 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
721 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
722
723 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
724}
725
726void blk_mq_debugfs_register_hctx(struct request_queue *q,
727 struct blk_mq_hw_ctx *hctx)
728{
729 struct blk_mq_ctx *ctx;
730 char name[20];
731 int i;
732
733 if (!q->debugfs_dir)
734 return;
735
736 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
737 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
738
739 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
740
741 hctx_for_each_ctx(hctx, ctx, i)
742 blk_mq_debugfs_register_ctx(hctx, ctx);
743}
744
745void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
746{
747 if (!hctx->queue->debugfs_dir)
748 return;
749 debugfs_remove_recursive(hctx->debugfs_dir);
750 hctx->sched_debugfs_dir = NULL;
751 hctx->debugfs_dir = NULL;
752}
753
754void blk_mq_debugfs_register_hctxs(struct request_queue *q)
755{
756 struct blk_mq_hw_ctx *hctx;
757 unsigned long i;
758
759 queue_for_each_hw_ctx(q, hctx, i)
760 blk_mq_debugfs_register_hctx(q, hctx);
761}
762
763void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
764{
765 struct blk_mq_hw_ctx *hctx;
766 unsigned long i;
767
768 queue_for_each_hw_ctx(q, hctx, i)
769 blk_mq_debugfs_unregister_hctx(hctx);
770}
771
772void blk_mq_debugfs_register_sched(struct request_queue *q)
773{
774 struct elevator_type *e = q->elevator->type;
775
776 lockdep_assert_held(&q->debugfs_mutex);
777
778 /*
779 * If the parent directory has not been created yet, return, we will be
780 * called again later on and the directory/files will be created then.
781 */
782 if (!q->debugfs_dir)
783 return;
784
785 if (!e->queue_debugfs_attrs)
786 return;
787
788 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
789
790 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
791}
792
793void blk_mq_debugfs_unregister_sched(struct request_queue *q)
794{
795 lockdep_assert_held(&q->debugfs_mutex);
796
797 debugfs_remove_recursive(q->sched_debugfs_dir);
798 q->sched_debugfs_dir = NULL;
799}
800
801static const char *rq_qos_id_to_name(enum rq_qos_id id)
802{
803 switch (id) {
804 case RQ_QOS_WBT:
805 return "wbt";
806 case RQ_QOS_LATENCY:
807 return "latency";
808 case RQ_QOS_COST:
809 return "cost";
810 }
811 return "unknown";
812}
813
814void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
815{
816 lockdep_assert_held(&rqos->q->debugfs_mutex);
817
818 if (!rqos->q->debugfs_dir)
819 return;
820 debugfs_remove_recursive(rqos->debugfs_dir);
821 rqos->debugfs_dir = NULL;
822}
823
824void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
825{
826 struct request_queue *q = rqos->q;
827 const char *dir_name = rq_qos_id_to_name(rqos->id);
828
829 lockdep_assert_held(&q->debugfs_mutex);
830
831 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
832 return;
833
834 if (!q->rqos_debugfs_dir)
835 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
836 q->debugfs_dir);
837
838 rqos->debugfs_dir = debugfs_create_dir(dir_name,
839 rqos->q->rqos_debugfs_dir);
840
841 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
842}
843
844void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
845 struct blk_mq_hw_ctx *hctx)
846{
847 struct elevator_type *e = q->elevator->type;
848
849 lockdep_assert_held(&q->debugfs_mutex);
850
851 /*
852 * If the parent debugfs directory has not been created yet, return;
853 * We will be called again later on with appropriate parent debugfs
854 * directory from blk_register_queue()
855 */
856 if (!hctx->debugfs_dir)
857 return;
858
859 if (!e->hctx_debugfs_attrs)
860 return;
861
862 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
863 hctx->debugfs_dir);
864 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
865 e->hctx_debugfs_attrs);
866}
867
868void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
869{
870 lockdep_assert_held(&hctx->queue->debugfs_mutex);
871
872 if (!hctx->queue->debugfs_dir)
873 return;
874 debugfs_remove_recursive(hctx->sched_debugfs_dir);
875 hctx->sched_debugfs_dir = NULL;
876}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 Facebook
4 */
5
6#include <linux/kernel.h>
7#include <linux/blkdev.h>
8#include <linux/debugfs.h>
9
10#include <linux/blk-mq.h>
11#include "blk.h"
12#include "blk-mq.h"
13#include "blk-mq-debugfs.h"
14#include "blk-mq-tag.h"
15#include "blk-rq-qos.h"
16
17static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18{
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
22 } else {
23 seq_puts(m, "samples=0");
24 }
25}
26
27static int queue_poll_stat_show(void *data, struct seq_file *m)
28{
29 struct request_queue *q = data;
30 int bucket;
31
32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
34 print_stat(m, &q->poll_stat[2 * bucket]);
35 seq_puts(m, "\n");
36
37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 seq_puts(m, "\n");
40 }
41 return 0;
42}
43
44static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
46{
47 struct request_queue *q = m->private;
48
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
51}
52
53static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54{
55 struct request_queue *q = m->private;
56
57 return seq_list_next(v, &q->requeue_list, pos);
58}
59
60static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
62{
63 struct request_queue *q = m->private;
64
65 spin_unlock_irq(&q->requeue_lock);
66}
67
68static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
73};
74
75static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
77{
78 bool sep = false;
79 int i;
80
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
83 continue;
84 if (sep)
85 seq_puts(m, "|");
86 sep = true;
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
89 else
90 seq_printf(m, "%d", i);
91 }
92 return 0;
93}
94
95static int queue_pm_only_show(void *data, struct seq_file *m)
96{
97 struct request_queue *q = data;
98
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 return 0;
101}
102
103#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(POLL),
120 QUEUE_FLAG_NAME(WC),
121 QUEUE_FLAG_NAME(FUA),
122 QUEUE_FLAG_NAME(DAX),
123 QUEUE_FLAG_NAME(STATS),
124 QUEUE_FLAG_NAME(POLL_STATS),
125 QUEUE_FLAG_NAME(REGISTERED),
126 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
127 QUEUE_FLAG_NAME(QUIESCED),
128};
129#undef QUEUE_FLAG_NAME
130
131static int queue_state_show(void *data, struct seq_file *m)
132{
133 struct request_queue *q = data;
134
135 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
136 ARRAY_SIZE(blk_queue_flag_name));
137 seq_puts(m, "\n");
138 return 0;
139}
140
141static ssize_t queue_state_write(void *data, const char __user *buf,
142 size_t count, loff_t *ppos)
143{
144 struct request_queue *q = data;
145 char opbuf[16] = { }, *op;
146
147 /*
148 * The "state" attribute is removed after blk_cleanup_queue() has called
149 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
150 * triggering a use-after-free.
151 */
152 if (blk_queue_dead(q))
153 return -ENOENT;
154
155 if (count >= sizeof(opbuf)) {
156 pr_err("%s: operation too long\n", __func__);
157 goto inval;
158 }
159
160 if (copy_from_user(opbuf, buf, count))
161 return -EFAULT;
162 op = strstrip(opbuf);
163 if (strcmp(op, "run") == 0) {
164 blk_mq_run_hw_queues(q, true);
165 } else if (strcmp(op, "start") == 0) {
166 blk_mq_start_stopped_hw_queues(q, true);
167 } else if (strcmp(op, "kick") == 0) {
168 blk_mq_kick_requeue_list(q);
169 } else {
170 pr_err("%s: unsupported operation '%s'\n", __func__, op);
171inval:
172 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
173 return -EINVAL;
174 }
175 return count;
176}
177
178static int queue_write_hint_show(void *data, struct seq_file *m)
179{
180 struct request_queue *q = data;
181 int i;
182
183 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
184 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
185
186 return 0;
187}
188
189static ssize_t queue_write_hint_store(void *data, const char __user *buf,
190 size_t count, loff_t *ppos)
191{
192 struct request_queue *q = data;
193 int i;
194
195 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
196 q->write_hints[i] = 0;
197
198 return count;
199}
200
201static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
202 { "poll_stat", 0400, queue_poll_stat_show },
203 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
204 { "pm_only", 0600, queue_pm_only_show, NULL },
205 { "state", 0600, queue_state_show, queue_state_write },
206 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
207 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
208 { },
209};
210
211#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
212static const char *const hctx_state_name[] = {
213 HCTX_STATE_NAME(STOPPED),
214 HCTX_STATE_NAME(TAG_ACTIVE),
215 HCTX_STATE_NAME(SCHED_RESTART),
216};
217#undef HCTX_STATE_NAME
218
219static int hctx_state_show(void *data, struct seq_file *m)
220{
221 struct blk_mq_hw_ctx *hctx = data;
222
223 blk_flags_show(m, hctx->state, hctx_state_name,
224 ARRAY_SIZE(hctx_state_name));
225 seq_puts(m, "\n");
226 return 0;
227}
228
229#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
230static const char *const alloc_policy_name[] = {
231 BLK_TAG_ALLOC_NAME(FIFO),
232 BLK_TAG_ALLOC_NAME(RR),
233};
234#undef BLK_TAG_ALLOC_NAME
235
236#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
237static const char *const hctx_flag_name[] = {
238 HCTX_FLAG_NAME(SHOULD_MERGE),
239 HCTX_FLAG_NAME(TAG_SHARED),
240 HCTX_FLAG_NAME(BLOCKING),
241 HCTX_FLAG_NAME(NO_SCHED),
242};
243#undef HCTX_FLAG_NAME
244
245static int hctx_flags_show(void *data, struct seq_file *m)
246{
247 struct blk_mq_hw_ctx *hctx = data;
248 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
249
250 seq_puts(m, "alloc_policy=");
251 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
252 alloc_policy_name[alloc_policy])
253 seq_puts(m, alloc_policy_name[alloc_policy]);
254 else
255 seq_printf(m, "%d", alloc_policy);
256 seq_puts(m, " ");
257 blk_flags_show(m,
258 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
259 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
260 seq_puts(m, "\n");
261 return 0;
262}
263
264#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
265static const char *const cmd_flag_name[] = {
266 CMD_FLAG_NAME(FAILFAST_DEV),
267 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
268 CMD_FLAG_NAME(FAILFAST_DRIVER),
269 CMD_FLAG_NAME(SYNC),
270 CMD_FLAG_NAME(META),
271 CMD_FLAG_NAME(PRIO),
272 CMD_FLAG_NAME(NOMERGE),
273 CMD_FLAG_NAME(IDLE),
274 CMD_FLAG_NAME(INTEGRITY),
275 CMD_FLAG_NAME(FUA),
276 CMD_FLAG_NAME(PREFLUSH),
277 CMD_FLAG_NAME(RAHEAD),
278 CMD_FLAG_NAME(BACKGROUND),
279 CMD_FLAG_NAME(NOWAIT),
280 CMD_FLAG_NAME(NOUNMAP),
281 CMD_FLAG_NAME(HIPRI),
282};
283#undef CMD_FLAG_NAME
284
285#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
286static const char *const rqf_name[] = {
287 RQF_NAME(SORTED),
288 RQF_NAME(STARTED),
289 RQF_NAME(SOFTBARRIER),
290 RQF_NAME(FLUSH_SEQ),
291 RQF_NAME(MIXED_MERGE),
292 RQF_NAME(MQ_INFLIGHT),
293 RQF_NAME(DONTPREP),
294 RQF_NAME(PREEMPT),
295 RQF_NAME(COPY_USER),
296 RQF_NAME(FAILED),
297 RQF_NAME(QUIET),
298 RQF_NAME(ELVPRIV),
299 RQF_NAME(IO_STAT),
300 RQF_NAME(ALLOCED),
301 RQF_NAME(PM),
302 RQF_NAME(HASHED),
303 RQF_NAME(STATS),
304 RQF_NAME(SPECIAL_PAYLOAD),
305 RQF_NAME(ZONE_WRITE_LOCKED),
306 RQF_NAME(MQ_POLL_SLEPT),
307};
308#undef RQF_NAME
309
310static const char *const blk_mq_rq_state_name_array[] = {
311 [MQ_RQ_IDLE] = "idle",
312 [MQ_RQ_IN_FLIGHT] = "in_flight",
313 [MQ_RQ_COMPLETE] = "complete",
314};
315
316static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
317{
318 if (WARN_ON_ONCE((unsigned int)rq_state >=
319 ARRAY_SIZE(blk_mq_rq_state_name_array)))
320 return "(?)";
321 return blk_mq_rq_state_name_array[rq_state];
322}
323
324int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
325{
326 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
327 const unsigned int op = req_op(rq);
328 const char *op_str = blk_op_str(op);
329
330 seq_printf(m, "%p {.op=", rq);
331 if (strcmp(op_str, "UNKNOWN") == 0)
332 seq_printf(m, "%u", op);
333 else
334 seq_printf(m, "%s", op_str);
335 seq_puts(m, ", .cmd_flags=");
336 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
337 ARRAY_SIZE(cmd_flag_name));
338 seq_puts(m, ", .rq_flags=");
339 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
340 ARRAY_SIZE(rqf_name));
341 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
342 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
343 rq->internal_tag);
344 if (mq_ops->show_rq)
345 mq_ops->show_rq(m, rq);
346 seq_puts(m, "}\n");
347 return 0;
348}
349EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
350
351int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
352{
353 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
354}
355EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
356
357static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
358 __acquires(&hctx->lock)
359{
360 struct blk_mq_hw_ctx *hctx = m->private;
361
362 spin_lock(&hctx->lock);
363 return seq_list_start(&hctx->dispatch, *pos);
364}
365
366static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
367{
368 struct blk_mq_hw_ctx *hctx = m->private;
369
370 return seq_list_next(v, &hctx->dispatch, pos);
371}
372
373static void hctx_dispatch_stop(struct seq_file *m, void *v)
374 __releases(&hctx->lock)
375{
376 struct blk_mq_hw_ctx *hctx = m->private;
377
378 spin_unlock(&hctx->lock);
379}
380
381static const struct seq_operations hctx_dispatch_seq_ops = {
382 .start = hctx_dispatch_start,
383 .next = hctx_dispatch_next,
384 .stop = hctx_dispatch_stop,
385 .show = blk_mq_debugfs_rq_show,
386};
387
388struct show_busy_params {
389 struct seq_file *m;
390 struct blk_mq_hw_ctx *hctx;
391};
392
393/*
394 * Note: the state of a request may change while this function is in progress,
395 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
396 * keep iterating requests.
397 */
398static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
399{
400 const struct show_busy_params *params = data;
401
402 if (rq->mq_hctx == params->hctx)
403 __blk_mq_debugfs_rq_show(params->m,
404 list_entry_rq(&rq->queuelist));
405
406 return true;
407}
408
409static int hctx_busy_show(void *data, struct seq_file *m)
410{
411 struct blk_mq_hw_ctx *hctx = data;
412 struct show_busy_params params = { .m = m, .hctx = hctx };
413
414 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
415 ¶ms);
416
417 return 0;
418}
419
420static const char *const hctx_types[] = {
421 [HCTX_TYPE_DEFAULT] = "default",
422 [HCTX_TYPE_READ] = "read",
423 [HCTX_TYPE_POLL] = "poll",
424};
425
426static int hctx_type_show(void *data, struct seq_file *m)
427{
428 struct blk_mq_hw_ctx *hctx = data;
429
430 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
431 seq_printf(m, "%s\n", hctx_types[hctx->type]);
432 return 0;
433}
434
435static int hctx_ctx_map_show(void *data, struct seq_file *m)
436{
437 struct blk_mq_hw_ctx *hctx = data;
438
439 sbitmap_bitmap_show(&hctx->ctx_map, m);
440 return 0;
441}
442
443static void blk_mq_debugfs_tags_show(struct seq_file *m,
444 struct blk_mq_tags *tags)
445{
446 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
447 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
448 seq_printf(m, "active_queues=%d\n",
449 atomic_read(&tags->active_queues));
450
451 seq_puts(m, "\nbitmap_tags:\n");
452 sbitmap_queue_show(&tags->bitmap_tags, m);
453
454 if (tags->nr_reserved_tags) {
455 seq_puts(m, "\nbreserved_tags:\n");
456 sbitmap_queue_show(&tags->breserved_tags, m);
457 }
458}
459
460static int hctx_tags_show(void *data, struct seq_file *m)
461{
462 struct blk_mq_hw_ctx *hctx = data;
463 struct request_queue *q = hctx->queue;
464 int res;
465
466 res = mutex_lock_interruptible(&q->sysfs_lock);
467 if (res)
468 goto out;
469 if (hctx->tags)
470 blk_mq_debugfs_tags_show(m, hctx->tags);
471 mutex_unlock(&q->sysfs_lock);
472
473out:
474 return res;
475}
476
477static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
478{
479 struct blk_mq_hw_ctx *hctx = data;
480 struct request_queue *q = hctx->queue;
481 int res;
482
483 res = mutex_lock_interruptible(&q->sysfs_lock);
484 if (res)
485 goto out;
486 if (hctx->tags)
487 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
488 mutex_unlock(&q->sysfs_lock);
489
490out:
491 return res;
492}
493
494static int hctx_sched_tags_show(void *data, struct seq_file *m)
495{
496 struct blk_mq_hw_ctx *hctx = data;
497 struct request_queue *q = hctx->queue;
498 int res;
499
500 res = mutex_lock_interruptible(&q->sysfs_lock);
501 if (res)
502 goto out;
503 if (hctx->sched_tags)
504 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
505 mutex_unlock(&q->sysfs_lock);
506
507out:
508 return res;
509}
510
511static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
512{
513 struct blk_mq_hw_ctx *hctx = data;
514 struct request_queue *q = hctx->queue;
515 int res;
516
517 res = mutex_lock_interruptible(&q->sysfs_lock);
518 if (res)
519 goto out;
520 if (hctx->sched_tags)
521 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
522 mutex_unlock(&q->sysfs_lock);
523
524out:
525 return res;
526}
527
528static int hctx_io_poll_show(void *data, struct seq_file *m)
529{
530 struct blk_mq_hw_ctx *hctx = data;
531
532 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
533 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
534 seq_printf(m, "success=%lu\n", hctx->poll_success);
535 return 0;
536}
537
538static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
539 size_t count, loff_t *ppos)
540{
541 struct blk_mq_hw_ctx *hctx = data;
542
543 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
544 return count;
545}
546
547static int hctx_dispatched_show(void *data, struct seq_file *m)
548{
549 struct blk_mq_hw_ctx *hctx = data;
550 int i;
551
552 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
553
554 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
555 unsigned int d = 1U << (i - 1);
556
557 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
558 }
559
560 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
561 return 0;
562}
563
564static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
565 size_t count, loff_t *ppos)
566{
567 struct blk_mq_hw_ctx *hctx = data;
568 int i;
569
570 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
571 hctx->dispatched[i] = 0;
572 return count;
573}
574
575static int hctx_queued_show(void *data, struct seq_file *m)
576{
577 struct blk_mq_hw_ctx *hctx = data;
578
579 seq_printf(m, "%lu\n", hctx->queued);
580 return 0;
581}
582
583static ssize_t hctx_queued_write(void *data, const char __user *buf,
584 size_t count, loff_t *ppos)
585{
586 struct blk_mq_hw_ctx *hctx = data;
587
588 hctx->queued = 0;
589 return count;
590}
591
592static int hctx_run_show(void *data, struct seq_file *m)
593{
594 struct blk_mq_hw_ctx *hctx = data;
595
596 seq_printf(m, "%lu\n", hctx->run);
597 return 0;
598}
599
600static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
601 loff_t *ppos)
602{
603 struct blk_mq_hw_ctx *hctx = data;
604
605 hctx->run = 0;
606 return count;
607}
608
609static int hctx_active_show(void *data, struct seq_file *m)
610{
611 struct blk_mq_hw_ctx *hctx = data;
612
613 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
614 return 0;
615}
616
617static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
618{
619 struct blk_mq_hw_ctx *hctx = data;
620
621 seq_printf(m, "%u\n", hctx->dispatch_busy);
622 return 0;
623}
624
625#define CTX_RQ_SEQ_OPS(name, type) \
626static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
627 __acquires(&ctx->lock) \
628{ \
629 struct blk_mq_ctx *ctx = m->private; \
630 \
631 spin_lock(&ctx->lock); \
632 return seq_list_start(&ctx->rq_lists[type], *pos); \
633} \
634 \
635static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
636 loff_t *pos) \
637{ \
638 struct blk_mq_ctx *ctx = m->private; \
639 \
640 return seq_list_next(v, &ctx->rq_lists[type], pos); \
641} \
642 \
643static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
644 __releases(&ctx->lock) \
645{ \
646 struct blk_mq_ctx *ctx = m->private; \
647 \
648 spin_unlock(&ctx->lock); \
649} \
650 \
651static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
652 .start = ctx_##name##_rq_list_start, \
653 .next = ctx_##name##_rq_list_next, \
654 .stop = ctx_##name##_rq_list_stop, \
655 .show = blk_mq_debugfs_rq_show, \
656}
657
658CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
659CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
660CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
661
662static int ctx_dispatched_show(void *data, struct seq_file *m)
663{
664 struct blk_mq_ctx *ctx = data;
665
666 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
667 return 0;
668}
669
670static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
671 size_t count, loff_t *ppos)
672{
673 struct blk_mq_ctx *ctx = data;
674
675 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
676 return count;
677}
678
679static int ctx_merged_show(void *data, struct seq_file *m)
680{
681 struct blk_mq_ctx *ctx = data;
682
683 seq_printf(m, "%lu\n", ctx->rq_merged);
684 return 0;
685}
686
687static ssize_t ctx_merged_write(void *data, const char __user *buf,
688 size_t count, loff_t *ppos)
689{
690 struct blk_mq_ctx *ctx = data;
691
692 ctx->rq_merged = 0;
693 return count;
694}
695
696static int ctx_completed_show(void *data, struct seq_file *m)
697{
698 struct blk_mq_ctx *ctx = data;
699
700 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
701 return 0;
702}
703
704static ssize_t ctx_completed_write(void *data, const char __user *buf,
705 size_t count, loff_t *ppos)
706{
707 struct blk_mq_ctx *ctx = data;
708
709 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
710 return count;
711}
712
713static int blk_mq_debugfs_show(struct seq_file *m, void *v)
714{
715 const struct blk_mq_debugfs_attr *attr = m->private;
716 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
717
718 return attr->show(data, m);
719}
720
721static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
722 size_t count, loff_t *ppos)
723{
724 struct seq_file *m = file->private_data;
725 const struct blk_mq_debugfs_attr *attr = m->private;
726 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
727
728 /*
729 * Attributes that only implement .seq_ops are read-only and 'attr' is
730 * the same with 'data' in this case.
731 */
732 if (attr == data || !attr->write)
733 return -EPERM;
734
735 return attr->write(data, buf, count, ppos);
736}
737
738static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
739{
740 const struct blk_mq_debugfs_attr *attr = inode->i_private;
741 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
742 struct seq_file *m;
743 int ret;
744
745 if (attr->seq_ops) {
746 ret = seq_open(file, attr->seq_ops);
747 if (!ret) {
748 m = file->private_data;
749 m->private = data;
750 }
751 return ret;
752 }
753
754 if (WARN_ON_ONCE(!attr->show))
755 return -EPERM;
756
757 return single_open(file, blk_mq_debugfs_show, inode->i_private);
758}
759
760static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
761{
762 const struct blk_mq_debugfs_attr *attr = inode->i_private;
763
764 if (attr->show)
765 return single_release(inode, file);
766
767 return seq_release(inode, file);
768}
769
770static const struct file_operations blk_mq_debugfs_fops = {
771 .open = blk_mq_debugfs_open,
772 .read = seq_read,
773 .write = blk_mq_debugfs_write,
774 .llseek = seq_lseek,
775 .release = blk_mq_debugfs_release,
776};
777
778static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
779 {"state", 0400, hctx_state_show},
780 {"flags", 0400, hctx_flags_show},
781 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
782 {"busy", 0400, hctx_busy_show},
783 {"ctx_map", 0400, hctx_ctx_map_show},
784 {"tags", 0400, hctx_tags_show},
785 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
786 {"sched_tags", 0400, hctx_sched_tags_show},
787 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
788 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
789 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
790 {"queued", 0600, hctx_queued_show, hctx_queued_write},
791 {"run", 0600, hctx_run_show, hctx_run_write},
792 {"active", 0400, hctx_active_show},
793 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
794 {"type", 0400, hctx_type_show},
795 {},
796};
797
798static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
799 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
800 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
801 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
802 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
803 {"merged", 0600, ctx_merged_show, ctx_merged_write},
804 {"completed", 0600, ctx_completed_show, ctx_completed_write},
805 {},
806};
807
808static void debugfs_create_files(struct dentry *parent, void *data,
809 const struct blk_mq_debugfs_attr *attr)
810{
811 if (IS_ERR_OR_NULL(parent))
812 return;
813
814 d_inode(parent)->i_private = data;
815
816 for (; attr->name; attr++)
817 debugfs_create_file(attr->name, attr->mode, parent,
818 (void *)attr, &blk_mq_debugfs_fops);
819}
820
821void blk_mq_debugfs_register(struct request_queue *q)
822{
823 struct blk_mq_hw_ctx *hctx;
824 int i;
825
826 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
827 blk_debugfs_root);
828
829 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
830
831 /*
832 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
833 * didn't exist yet (because we don't know what to name the directory
834 * until the queue is registered to a gendisk).
835 */
836 if (q->elevator && !q->sched_debugfs_dir)
837 blk_mq_debugfs_register_sched(q);
838
839 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
840 queue_for_each_hw_ctx(q, hctx, i) {
841 if (!hctx->debugfs_dir)
842 blk_mq_debugfs_register_hctx(q, hctx);
843 if (q->elevator && !hctx->sched_debugfs_dir)
844 blk_mq_debugfs_register_sched_hctx(q, hctx);
845 }
846
847 if (q->rq_qos) {
848 struct rq_qos *rqos = q->rq_qos;
849
850 while (rqos) {
851 blk_mq_debugfs_register_rqos(rqos);
852 rqos = rqos->next;
853 }
854 }
855}
856
857void blk_mq_debugfs_unregister(struct request_queue *q)
858{
859 debugfs_remove_recursive(q->debugfs_dir);
860 q->sched_debugfs_dir = NULL;
861 q->debugfs_dir = NULL;
862}
863
864static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
865 struct blk_mq_ctx *ctx)
866{
867 struct dentry *ctx_dir;
868 char name[20];
869
870 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
871 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
872
873 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
874}
875
876void blk_mq_debugfs_register_hctx(struct request_queue *q,
877 struct blk_mq_hw_ctx *hctx)
878{
879 struct blk_mq_ctx *ctx;
880 char name[20];
881 int i;
882
883 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
884 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
885
886 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
887
888 hctx_for_each_ctx(hctx, ctx, i)
889 blk_mq_debugfs_register_ctx(hctx, ctx);
890}
891
892void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
893{
894 debugfs_remove_recursive(hctx->debugfs_dir);
895 hctx->sched_debugfs_dir = NULL;
896 hctx->debugfs_dir = NULL;
897}
898
899void blk_mq_debugfs_register_hctxs(struct request_queue *q)
900{
901 struct blk_mq_hw_ctx *hctx;
902 int i;
903
904 queue_for_each_hw_ctx(q, hctx, i)
905 blk_mq_debugfs_register_hctx(q, hctx);
906}
907
908void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
909{
910 struct blk_mq_hw_ctx *hctx;
911 int i;
912
913 queue_for_each_hw_ctx(q, hctx, i)
914 blk_mq_debugfs_unregister_hctx(hctx);
915}
916
917void blk_mq_debugfs_register_sched(struct request_queue *q)
918{
919 struct elevator_type *e = q->elevator->type;
920
921 /*
922 * If the parent directory has not been created yet, return, we will be
923 * called again later on and the directory/files will be created then.
924 */
925 if (!q->debugfs_dir)
926 return;
927
928 if (!e->queue_debugfs_attrs)
929 return;
930
931 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
932
933 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
934}
935
936void blk_mq_debugfs_unregister_sched(struct request_queue *q)
937{
938 debugfs_remove_recursive(q->sched_debugfs_dir);
939 q->sched_debugfs_dir = NULL;
940}
941
942void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
943{
944 debugfs_remove_recursive(rqos->debugfs_dir);
945 rqos->debugfs_dir = NULL;
946}
947
948void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
949{
950 struct request_queue *q = rqos->q;
951 const char *dir_name = rq_qos_id_to_name(rqos->id);
952
953 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
954 return;
955
956 if (!q->rqos_debugfs_dir)
957 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
958 q->debugfs_dir);
959
960 rqos->debugfs_dir = debugfs_create_dir(dir_name,
961 rqos->q->rqos_debugfs_dir);
962
963 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
964}
965
966void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
967{
968 debugfs_remove_recursive(q->rqos_debugfs_dir);
969 q->rqos_debugfs_dir = NULL;
970}
971
972void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
973 struct blk_mq_hw_ctx *hctx)
974{
975 struct elevator_type *e = q->elevator->type;
976
977 if (!e->hctx_debugfs_attrs)
978 return;
979
980 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
981 hctx->debugfs_dir);
982 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
983 e->hctx_debugfs_attrs);
984}
985
986void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
987{
988 debugfs_remove_recursive(hctx->sched_debugfs_dir);
989 hctx->sched_debugfs_dir = NULL;
990}