Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to sysfs handling
4 */
5#include <linux/kernel.h>
6#include <linux/slab.h>
7#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/backing-dev.h>
11#include <linux/blktrace_api.h>
12#include <linux/blk-mq.h>
13#include <linux/debugfs.h>
14
15#include "blk.h"
16#include "blk-mq.h"
17#include "blk-mq-debugfs.h"
18#include "blk-mq-sched.h"
19#include "blk-wbt.h"
20#include "blk-cgroup.h"
21#include "blk-throttle.h"
22
23struct queue_sysfs_entry {
24 struct attribute attr;
25 ssize_t (*show)(struct request_queue *, char *);
26 ssize_t (*store)(struct request_queue *, const char *, size_t);
27};
28
29static ssize_t
30queue_var_show(unsigned long var, char *page)
31{
32 return sprintf(page, "%lu\n", var);
33}
34
35static ssize_t
36queue_var_store(unsigned long *var, const char *page, size_t count)
37{
38 int err;
39 unsigned long v;
40
41 err = kstrtoul(page, 10, &v);
42 if (err || v > UINT_MAX)
43 return -EINVAL;
44
45 *var = v;
46
47 return count;
48}
49
50static ssize_t queue_var_store64(s64 *var, const char *page)
51{
52 int err;
53 s64 v;
54
55 err = kstrtos64(page, 10, &v);
56 if (err < 0)
57 return err;
58
59 *var = v;
60 return 0;
61}
62
63static ssize_t queue_requests_show(struct request_queue *q, char *page)
64{
65 return queue_var_show(q->nr_requests, page);
66}
67
68static ssize_t
69queue_requests_store(struct request_queue *q, const char *page, size_t count)
70{
71 unsigned long nr;
72 int ret, err;
73
74 if (!queue_is_mq(q))
75 return -EINVAL;
76
77 ret = queue_var_store(&nr, page, count);
78 if (ret < 0)
79 return ret;
80
81 if (nr < BLKDEV_MIN_RQ)
82 nr = BLKDEV_MIN_RQ;
83
84 err = blk_mq_update_nr_requests(q, nr);
85 if (err)
86 return err;
87
88 return ret;
89}
90
91static ssize_t queue_ra_show(struct request_queue *q, char *page)
92{
93 unsigned long ra_kb;
94
95 if (!q->disk)
96 return -EINVAL;
97 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
98 return queue_var_show(ra_kb, page);
99}
100
101static ssize_t
102queue_ra_store(struct request_queue *q, const char *page, size_t count)
103{
104 unsigned long ra_kb;
105 ssize_t ret;
106
107 if (!q->disk)
108 return -EINVAL;
109 ret = queue_var_store(&ra_kb, page, count);
110 if (ret < 0)
111 return ret;
112 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
113 return ret;
114}
115
116static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
117{
118 int max_sectors_kb = queue_max_sectors(q) >> 1;
119
120 return queue_var_show(max_sectors_kb, page);
121}
122
123static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
124{
125 return queue_var_show(queue_max_segments(q), page);
126}
127
128static ssize_t queue_max_discard_segments_show(struct request_queue *q,
129 char *page)
130{
131 return queue_var_show(queue_max_discard_segments(q), page);
132}
133
134static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
135{
136 return queue_var_show(q->limits.max_integrity_segments, page);
137}
138
139static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
140{
141 return queue_var_show(queue_max_segment_size(q), page);
142}
143
144static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
145{
146 return queue_var_show(queue_logical_block_size(q), page);
147}
148
149static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150{
151 return queue_var_show(queue_physical_block_size(q), page);
152}
153
154static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
155{
156 return queue_var_show(q->limits.chunk_sectors, page);
157}
158
159static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160{
161 return queue_var_show(queue_io_min(q), page);
162}
163
164static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165{
166 return queue_var_show(queue_io_opt(q), page);
167}
168
169static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
170{
171 return queue_var_show(q->limits.discard_granularity, page);
172}
173
174static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
175{
176
177 return sprintf(page, "%llu\n",
178 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
179}
180
181static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
182{
183 return sprintf(page, "%llu\n",
184 (unsigned long long)q->limits.max_discard_sectors << 9);
185}
186
187static ssize_t queue_discard_max_store(struct request_queue *q,
188 const char *page, size_t count)
189{
190 unsigned long max_discard;
191 ssize_t ret = queue_var_store(&max_discard, page, count);
192
193 if (ret < 0)
194 return ret;
195
196 if (max_discard & (q->limits.discard_granularity - 1))
197 return -EINVAL;
198
199 max_discard >>= 9;
200 if (max_discard > UINT_MAX)
201 return -EINVAL;
202
203 if (max_discard > q->limits.max_hw_discard_sectors)
204 max_discard = q->limits.max_hw_discard_sectors;
205
206 q->limits.max_discard_sectors = max_discard;
207 return ret;
208}
209
210static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
211{
212 return queue_var_show(0, page);
213}
214
215static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
216{
217 return queue_var_show(0, page);
218}
219
220static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
221{
222 return sprintf(page, "%llu\n",
223 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
224}
225
226static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
227 char *page)
228{
229 return queue_var_show(queue_zone_write_granularity(q), page);
230}
231
232static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
233{
234 unsigned long long max_sectors = q->limits.max_zone_append_sectors;
235
236 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
237}
238
239static ssize_t
240queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
241{
242 unsigned long max_sectors_kb,
243 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
244 page_kb = 1 << (PAGE_SHIFT - 10);
245 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
246
247 if (ret < 0)
248 return ret;
249
250 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
251 q->limits.max_dev_sectors >> 1);
252
253 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
254 return -EINVAL;
255
256 spin_lock_irq(&q->queue_lock);
257 q->limits.max_sectors = max_sectors_kb << 1;
258 if (q->disk)
259 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
260 spin_unlock_irq(&q->queue_lock);
261
262 return ret;
263}
264
265static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
266{
267 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
268
269 return queue_var_show(max_hw_sectors_kb, page);
270}
271
272static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
273{
274 return queue_var_show(q->limits.virt_boundary_mask, page);
275}
276
277static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
278{
279 return queue_var_show(queue_dma_alignment(q), page);
280}
281
282#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
283static ssize_t \
284queue_##name##_show(struct request_queue *q, char *page) \
285{ \
286 int bit; \
287 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
288 return queue_var_show(neg ? !bit : bit, page); \
289} \
290static ssize_t \
291queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
292{ \
293 unsigned long val; \
294 ssize_t ret; \
295 ret = queue_var_store(&val, page, count); \
296 if (ret < 0) \
297 return ret; \
298 if (neg) \
299 val = !val; \
300 \
301 if (val) \
302 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
303 else \
304 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
305 return ret; \
306}
307
308QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
309QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
310QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
311QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
312#undef QUEUE_SYSFS_BIT_FNS
313
314static ssize_t queue_zoned_show(struct request_queue *q, char *page)
315{
316 switch (blk_queue_zoned_model(q)) {
317 case BLK_ZONED_HA:
318 return sprintf(page, "host-aware\n");
319 case BLK_ZONED_HM:
320 return sprintf(page, "host-managed\n");
321 default:
322 return sprintf(page, "none\n");
323 }
324}
325
326static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
327{
328 return queue_var_show(disk_nr_zones(q->disk), page);
329}
330
331static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
332{
333 return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
334}
335
336static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
337{
338 return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
339}
340
341static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
342{
343 return queue_var_show((blk_queue_nomerges(q) << 1) |
344 blk_queue_noxmerges(q), page);
345}
346
347static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
348 size_t count)
349{
350 unsigned long nm;
351 ssize_t ret = queue_var_store(&nm, page, count);
352
353 if (ret < 0)
354 return ret;
355
356 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
357 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
358 if (nm == 2)
359 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
360 else if (nm)
361 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
362
363 return ret;
364}
365
366static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
367{
368 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
369 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
370
371 return queue_var_show(set << force, page);
372}
373
374static ssize_t
375queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
376{
377 ssize_t ret = -EINVAL;
378#ifdef CONFIG_SMP
379 unsigned long val;
380
381 ret = queue_var_store(&val, page, count);
382 if (ret < 0)
383 return ret;
384
385 if (val == 2) {
386 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
387 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
388 } else if (val == 1) {
389 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
390 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
391 } else if (val == 0) {
392 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
393 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
394 }
395#endif
396 return ret;
397}
398
399static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
400{
401 int val;
402
403 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
404 val = BLK_MQ_POLL_CLASSIC;
405 else
406 val = q->poll_nsec / 1000;
407
408 return sprintf(page, "%d\n", val);
409}
410
411static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
412 size_t count)
413{
414 int err, val;
415
416 if (!q->mq_ops || !q->mq_ops->poll)
417 return -EINVAL;
418
419 err = kstrtoint(page, 10, &val);
420 if (err < 0)
421 return err;
422
423 if (val == BLK_MQ_POLL_CLASSIC)
424 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
425 else if (val >= 0)
426 q->poll_nsec = val * 1000;
427 else
428 return -EINVAL;
429
430 return count;
431}
432
433static ssize_t queue_poll_show(struct request_queue *q, char *page)
434{
435 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
436}
437
438static ssize_t queue_poll_store(struct request_queue *q, const char *page,
439 size_t count)
440{
441 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
442 return -EINVAL;
443 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
444 pr_info_ratelimited("please use driver specific parameters instead.\n");
445 return count;
446}
447
448static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
449{
450 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
451}
452
453static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
454 size_t count)
455{
456 unsigned int val;
457 int err;
458
459 err = kstrtou32(page, 10, &val);
460 if (err || val == 0)
461 return -EINVAL;
462
463 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
464
465 return count;
466}
467
468static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
469{
470 if (!wbt_rq_qos(q))
471 return -EINVAL;
472
473 if (wbt_disabled(q))
474 return sprintf(page, "0\n");
475
476 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
477}
478
479static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
480 size_t count)
481{
482 struct rq_qos *rqos;
483 ssize_t ret;
484 s64 val;
485
486 ret = queue_var_store64(&val, page);
487 if (ret < 0)
488 return ret;
489 if (val < -1)
490 return -EINVAL;
491
492 rqos = wbt_rq_qos(q);
493 if (!rqos) {
494 ret = wbt_init(q);
495 if (ret)
496 return ret;
497 }
498
499 if (val == -1)
500 val = wbt_default_latency_nsec(q);
501 else if (val >= 0)
502 val *= 1000ULL;
503
504 if (wbt_get_min_lat(q) == val)
505 return count;
506
507 /*
508 * Ensure that the queue is idled, in case the latency update
509 * ends up either enabling or disabling wbt completely. We can't
510 * have IO inflight if that happens.
511 */
512 blk_mq_freeze_queue(q);
513 blk_mq_quiesce_queue(q);
514
515 wbt_set_min_lat(q, val);
516
517 blk_mq_unquiesce_queue(q);
518 blk_mq_unfreeze_queue(q);
519
520 return count;
521}
522
523static ssize_t queue_wc_show(struct request_queue *q, char *page)
524{
525 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
526 return sprintf(page, "write back\n");
527
528 return sprintf(page, "write through\n");
529}
530
531static ssize_t queue_wc_store(struct request_queue *q, const char *page,
532 size_t count)
533{
534 int set = -1;
535
536 if (!strncmp(page, "write back", 10))
537 set = 1;
538 else if (!strncmp(page, "write through", 13) ||
539 !strncmp(page, "none", 4))
540 set = 0;
541
542 if (set == -1)
543 return -EINVAL;
544
545 if (set)
546 blk_queue_flag_set(QUEUE_FLAG_WC, q);
547 else
548 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
549
550 return count;
551}
552
553static ssize_t queue_fua_show(struct request_queue *q, char *page)
554{
555 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
556}
557
558static ssize_t queue_dax_show(struct request_queue *q, char *page)
559{
560 return queue_var_show(blk_queue_dax(q), page);
561}
562
563#define QUEUE_RO_ENTRY(_prefix, _name) \
564static struct queue_sysfs_entry _prefix##_entry = { \
565 .attr = { .name = _name, .mode = 0444 }, \
566 .show = _prefix##_show, \
567};
568
569#define QUEUE_RW_ENTRY(_prefix, _name) \
570static struct queue_sysfs_entry _prefix##_entry = { \
571 .attr = { .name = _name, .mode = 0644 }, \
572 .show = _prefix##_show, \
573 .store = _prefix##_store, \
574};
575
576QUEUE_RW_ENTRY(queue_requests, "nr_requests");
577QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
578QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
579QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
580QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
581QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
582QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
583QUEUE_RW_ENTRY(elv_iosched, "scheduler");
584
585QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
586QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
587QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
588QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
589QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
590
591QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
592QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
593QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
594QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
595QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
596
597QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
598QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
599QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
600QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
601
602QUEUE_RO_ENTRY(queue_zoned, "zoned");
603QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
604QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
605QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
606
607QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
608QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
609QUEUE_RW_ENTRY(queue_poll, "io_poll");
610QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
611QUEUE_RW_ENTRY(queue_wc, "write_cache");
612QUEUE_RO_ENTRY(queue_fua, "fua");
613QUEUE_RO_ENTRY(queue_dax, "dax");
614QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
615QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
616QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
617QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
618
619#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
620QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
621#endif
622
623/* legacy alias for logical_block_size: */
624static struct queue_sysfs_entry queue_hw_sector_size_entry = {
625 .attr = {.name = "hw_sector_size", .mode = 0444 },
626 .show = queue_logical_block_size_show,
627};
628
629QUEUE_RW_ENTRY(queue_nonrot, "rotational");
630QUEUE_RW_ENTRY(queue_iostats, "iostats");
631QUEUE_RW_ENTRY(queue_random, "add_random");
632QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
633
634static struct attribute *queue_attrs[] = {
635 &queue_requests_entry.attr,
636 &queue_ra_entry.attr,
637 &queue_max_hw_sectors_entry.attr,
638 &queue_max_sectors_entry.attr,
639 &queue_max_segments_entry.attr,
640 &queue_max_discard_segments_entry.attr,
641 &queue_max_integrity_segments_entry.attr,
642 &queue_max_segment_size_entry.attr,
643 &elv_iosched_entry.attr,
644 &queue_hw_sector_size_entry.attr,
645 &queue_logical_block_size_entry.attr,
646 &queue_physical_block_size_entry.attr,
647 &queue_chunk_sectors_entry.attr,
648 &queue_io_min_entry.attr,
649 &queue_io_opt_entry.attr,
650 &queue_discard_granularity_entry.attr,
651 &queue_discard_max_entry.attr,
652 &queue_discard_max_hw_entry.attr,
653 &queue_discard_zeroes_data_entry.attr,
654 &queue_write_same_max_entry.attr,
655 &queue_write_zeroes_max_entry.attr,
656 &queue_zone_append_max_entry.attr,
657 &queue_zone_write_granularity_entry.attr,
658 &queue_nonrot_entry.attr,
659 &queue_zoned_entry.attr,
660 &queue_nr_zones_entry.attr,
661 &queue_max_open_zones_entry.attr,
662 &queue_max_active_zones_entry.attr,
663 &queue_nomerges_entry.attr,
664 &queue_rq_affinity_entry.attr,
665 &queue_iostats_entry.attr,
666 &queue_stable_writes_entry.attr,
667 &queue_random_entry.attr,
668 &queue_poll_entry.attr,
669 &queue_wc_entry.attr,
670 &queue_fua_entry.attr,
671 &queue_dax_entry.attr,
672 &queue_wb_lat_entry.attr,
673 &queue_poll_delay_entry.attr,
674 &queue_io_timeout_entry.attr,
675#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
676 &blk_throtl_sample_time_entry.attr,
677#endif
678 &queue_virt_boundary_mask_entry.attr,
679 &queue_dma_alignment_entry.attr,
680 NULL,
681};
682
683static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
684 int n)
685{
686 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
687 struct request_queue *q = disk->queue;
688
689 if (attr == &queue_io_timeout_entry.attr &&
690 (!q->mq_ops || !q->mq_ops->timeout))
691 return 0;
692
693 if ((attr == &queue_max_open_zones_entry.attr ||
694 attr == &queue_max_active_zones_entry.attr) &&
695 !blk_queue_is_zoned(q))
696 return 0;
697
698 return attr->mode;
699}
700
701static struct attribute_group queue_attr_group = {
702 .attrs = queue_attrs,
703 .is_visible = queue_attr_visible,
704};
705
706
707#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
708
709static ssize_t
710queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
711{
712 struct queue_sysfs_entry *entry = to_queue(attr);
713 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
714 struct request_queue *q = disk->queue;
715 ssize_t res;
716
717 if (!entry->show)
718 return -EIO;
719 mutex_lock(&q->sysfs_lock);
720 res = entry->show(q, page);
721 mutex_unlock(&q->sysfs_lock);
722 return res;
723}
724
725static ssize_t
726queue_attr_store(struct kobject *kobj, struct attribute *attr,
727 const char *page, size_t length)
728{
729 struct queue_sysfs_entry *entry = to_queue(attr);
730 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
731 struct request_queue *q = disk->queue;
732 ssize_t res;
733
734 if (!entry->store)
735 return -EIO;
736
737 mutex_lock(&q->sysfs_lock);
738 res = entry->store(q, page, length);
739 mutex_unlock(&q->sysfs_lock);
740 return res;
741}
742
743static const struct sysfs_ops queue_sysfs_ops = {
744 .show = queue_attr_show,
745 .store = queue_attr_store,
746};
747
748static const struct attribute_group *blk_queue_attr_groups[] = {
749 &queue_attr_group,
750 NULL
751};
752
753static void blk_queue_release(struct kobject *kobj)
754{
755 /* nothing to do here, all data is associated with the parent gendisk */
756}
757
758static struct kobj_type blk_queue_ktype = {
759 .default_groups = blk_queue_attr_groups,
760 .sysfs_ops = &queue_sysfs_ops,
761 .release = blk_queue_release,
762};
763
764static void blk_debugfs_remove(struct gendisk *disk)
765{
766 struct request_queue *q = disk->queue;
767
768 mutex_lock(&q->debugfs_mutex);
769 blk_trace_shutdown(q);
770 debugfs_remove_recursive(q->debugfs_dir);
771 q->debugfs_dir = NULL;
772 q->sched_debugfs_dir = NULL;
773 q->rqos_debugfs_dir = NULL;
774 mutex_unlock(&q->debugfs_mutex);
775}
776
777/**
778 * blk_register_queue - register a block layer queue with sysfs
779 * @disk: Disk of which the request queue should be registered with sysfs.
780 */
781int blk_register_queue(struct gendisk *disk)
782{
783 struct request_queue *q = disk->queue;
784 int ret;
785
786 mutex_lock(&q->sysfs_dir_lock);
787 kobject_init(&disk->queue_kobj, &blk_queue_ktype);
788 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
789 if (ret < 0)
790 goto out_put_queue_kobj;
791
792 if (queue_is_mq(q)) {
793 ret = blk_mq_sysfs_register(disk);
794 if (ret)
795 goto out_put_queue_kobj;
796 }
797 mutex_lock(&q->sysfs_lock);
798
799 mutex_lock(&q->debugfs_mutex);
800 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
801 if (queue_is_mq(q))
802 blk_mq_debugfs_register(q);
803 mutex_unlock(&q->debugfs_mutex);
804
805 ret = disk_register_independent_access_ranges(disk);
806 if (ret)
807 goto out_debugfs_remove;
808
809 if (q->elevator) {
810 ret = elv_register_queue(q, false);
811 if (ret)
812 goto out_unregister_ia_ranges;
813 }
814
815 ret = blk_crypto_sysfs_register(disk);
816 if (ret)
817 goto out_elv_unregister;
818
819 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
820 wbt_enable_default(q);
821 blk_throtl_register(disk);
822
823 /* Now everything is ready and send out KOBJ_ADD uevent */
824 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
825 if (q->elevator)
826 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
827 mutex_unlock(&q->sysfs_lock);
828 mutex_unlock(&q->sysfs_dir_lock);
829
830 /*
831 * SCSI probing may synchronously create and destroy a lot of
832 * request_queues for non-existent devices. Shutting down a fully
833 * functional queue takes measureable wallclock time as RCU grace
834 * periods are involved. To avoid excessive latency in these
835 * cases, a request_queue starts out in a degraded mode which is
836 * faster to shut down and is made fully functional here as
837 * request_queues for non-existent devices never get registered.
838 */
839 if (!blk_queue_init_done(q)) {
840 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
841 percpu_ref_switch_to_percpu(&q->q_usage_counter);
842 }
843
844 return ret;
845
846out_elv_unregister:
847 elv_unregister_queue(q);
848out_unregister_ia_ranges:
849 disk_unregister_independent_access_ranges(disk);
850out_debugfs_remove:
851 blk_debugfs_remove(disk);
852 mutex_unlock(&q->sysfs_lock);
853out_put_queue_kobj:
854 kobject_put(&disk->queue_kobj);
855 mutex_unlock(&q->sysfs_dir_lock);
856 return ret;
857}
858
859/**
860 * blk_unregister_queue - counterpart of blk_register_queue()
861 * @disk: Disk of which the request queue should be unregistered from sysfs.
862 *
863 * Note: the caller is responsible for guaranteeing that this function is called
864 * after blk_register_queue() has finished.
865 */
866void blk_unregister_queue(struct gendisk *disk)
867{
868 struct request_queue *q = disk->queue;
869
870 if (WARN_ON(!q))
871 return;
872
873 /* Return early if disk->queue was never registered. */
874 if (!blk_queue_registered(q))
875 return;
876
877 /*
878 * Since sysfs_remove_dir() prevents adding new directory entries
879 * before removal of existing entries starts, protect against
880 * concurrent elv_iosched_store() calls.
881 */
882 mutex_lock(&q->sysfs_lock);
883 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
884 mutex_unlock(&q->sysfs_lock);
885
886 mutex_lock(&q->sysfs_dir_lock);
887 /*
888 * Remove the sysfs attributes before unregistering the queue data
889 * structures that can be modified through sysfs.
890 */
891 if (queue_is_mq(q))
892 blk_mq_sysfs_unregister(disk);
893 blk_crypto_sysfs_unregister(disk);
894
895 mutex_lock(&q->sysfs_lock);
896 elv_unregister_queue(q);
897 disk_unregister_independent_access_ranges(disk);
898 mutex_unlock(&q->sysfs_lock);
899
900 /* Now that we've deleted all child objects, we can delete the queue. */
901 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
902 kobject_del(&disk->queue_kobj);
903 mutex_unlock(&q->sysfs_dir_lock);
904
905 blk_debugfs_remove(disk);
906}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to sysfs handling
4 */
5#include <linux/kernel.h>
6#include <linux/slab.h>
7#include <linux/module.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/backing-dev.h>
11#include <linux/blktrace_api.h>
12#include <linux/blk-mq.h>
13#include <linux/blk-cgroup.h>
14
15#include "blk.h"
16#include "blk-mq.h"
17#include "blk-mq-debugfs.h"
18#include "blk-wbt.h"
19
20struct queue_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct request_queue *, char *);
23 ssize_t (*store)(struct request_queue *, const char *, size_t);
24};
25
26static ssize_t
27queue_var_show(unsigned long var, char *page)
28{
29 return sprintf(page, "%lu\n", var);
30}
31
32static ssize_t
33queue_var_store(unsigned long *var, const char *page, size_t count)
34{
35 int err;
36 unsigned long v;
37
38 err = kstrtoul(page, 10, &v);
39 if (err || v > UINT_MAX)
40 return -EINVAL;
41
42 *var = v;
43
44 return count;
45}
46
47static ssize_t queue_var_store64(s64 *var, const char *page)
48{
49 int err;
50 s64 v;
51
52 err = kstrtos64(page, 10, &v);
53 if (err < 0)
54 return err;
55
56 *var = v;
57 return 0;
58}
59
60static ssize_t queue_requests_show(struct request_queue *q, char *page)
61{
62 return queue_var_show(q->nr_requests, (page));
63}
64
65static ssize_t
66queue_requests_store(struct request_queue *q, const char *page, size_t count)
67{
68 unsigned long nr;
69 int ret, err;
70
71 if (!q->request_fn && !q->mq_ops)
72 return -EINVAL;
73
74 ret = queue_var_store(&nr, page, count);
75 if (ret < 0)
76 return ret;
77
78 if (nr < BLKDEV_MIN_RQ)
79 nr = BLKDEV_MIN_RQ;
80
81 if (q->request_fn)
82 err = blk_update_nr_requests(q, nr);
83 else
84 err = blk_mq_update_nr_requests(q, nr);
85
86 if (err)
87 return err;
88
89 return ret;
90}
91
92static ssize_t queue_ra_show(struct request_queue *q, char *page)
93{
94 unsigned long ra_kb = q->backing_dev_info->ra_pages <<
95 (PAGE_SHIFT - 10);
96
97 return queue_var_show(ra_kb, (page));
98}
99
100static ssize_t
101queue_ra_store(struct request_queue *q, const char *page, size_t count)
102{
103 unsigned long ra_kb;
104 ssize_t ret = queue_var_store(&ra_kb, page, count);
105
106 if (ret < 0)
107 return ret;
108
109 q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
110
111 return ret;
112}
113
114static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
115{
116 int max_sectors_kb = queue_max_sectors(q) >> 1;
117
118 return queue_var_show(max_sectors_kb, (page));
119}
120
121static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
122{
123 return queue_var_show(queue_max_segments(q), (page));
124}
125
126static ssize_t queue_max_discard_segments_show(struct request_queue *q,
127 char *page)
128{
129 return queue_var_show(queue_max_discard_segments(q), (page));
130}
131
132static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
133{
134 return queue_var_show(q->limits.max_integrity_segments, (page));
135}
136
137static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
138{
139 if (blk_queue_cluster(q))
140 return queue_var_show(queue_max_segment_size(q), (page));
141
142 return queue_var_show(PAGE_SIZE, (page));
143}
144
145static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
146{
147 return queue_var_show(queue_logical_block_size(q), page);
148}
149
150static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
151{
152 return queue_var_show(queue_physical_block_size(q), page);
153}
154
155static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
156{
157 return queue_var_show(q->limits.chunk_sectors, page);
158}
159
160static ssize_t queue_io_min_show(struct request_queue *q, char *page)
161{
162 return queue_var_show(queue_io_min(q), page);
163}
164
165static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
166{
167 return queue_var_show(queue_io_opt(q), page);
168}
169
170static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
171{
172 return queue_var_show(q->limits.discard_granularity, page);
173}
174
175static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
176{
177
178 return sprintf(page, "%llu\n",
179 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
180}
181
182static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
183{
184 return sprintf(page, "%llu\n",
185 (unsigned long long)q->limits.max_discard_sectors << 9);
186}
187
188static ssize_t queue_discard_max_store(struct request_queue *q,
189 const char *page, size_t count)
190{
191 unsigned long max_discard;
192 ssize_t ret = queue_var_store(&max_discard, page, count);
193
194 if (ret < 0)
195 return ret;
196
197 if (max_discard & (q->limits.discard_granularity - 1))
198 return -EINVAL;
199
200 max_discard >>= 9;
201 if (max_discard > UINT_MAX)
202 return -EINVAL;
203
204 if (max_discard > q->limits.max_hw_discard_sectors)
205 max_discard = q->limits.max_hw_discard_sectors;
206
207 q->limits.max_discard_sectors = max_discard;
208 return ret;
209}
210
211static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
212{
213 return queue_var_show(0, page);
214}
215
216static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
217{
218 return sprintf(page, "%llu\n",
219 (unsigned long long)q->limits.max_write_same_sectors << 9);
220}
221
222static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
223{
224 return sprintf(page, "%llu\n",
225 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
226}
227
228static ssize_t
229queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
230{
231 unsigned long max_sectors_kb,
232 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
233 page_kb = 1 << (PAGE_SHIFT - 10);
234 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
235
236 if (ret < 0)
237 return ret;
238
239 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
240 q->limits.max_dev_sectors >> 1);
241
242 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
243 return -EINVAL;
244
245 spin_lock_irq(q->queue_lock);
246 q->limits.max_sectors = max_sectors_kb << 1;
247 q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
248 spin_unlock_irq(q->queue_lock);
249
250 return ret;
251}
252
253static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
254{
255 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
256
257 return queue_var_show(max_hw_sectors_kb, (page));
258}
259
260#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
261static ssize_t \
262queue_show_##name(struct request_queue *q, char *page) \
263{ \
264 int bit; \
265 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
266 return queue_var_show(neg ? !bit : bit, page); \
267} \
268static ssize_t \
269queue_store_##name(struct request_queue *q, const char *page, size_t count) \
270{ \
271 unsigned long val; \
272 ssize_t ret; \
273 ret = queue_var_store(&val, page, count); \
274 if (ret < 0) \
275 return ret; \
276 if (neg) \
277 val = !val; \
278 \
279 if (val) \
280 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
281 else \
282 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
283 return ret; \
284}
285
286QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
287QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
288QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
289#undef QUEUE_SYSFS_BIT_FNS
290
291static ssize_t queue_zoned_show(struct request_queue *q, char *page)
292{
293 switch (blk_queue_zoned_model(q)) {
294 case BLK_ZONED_HA:
295 return sprintf(page, "host-aware\n");
296 case BLK_ZONED_HM:
297 return sprintf(page, "host-managed\n");
298 default:
299 return sprintf(page, "none\n");
300 }
301}
302
303static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
304{
305 return queue_var_show((blk_queue_nomerges(q) << 1) |
306 blk_queue_noxmerges(q), page);
307}
308
309static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
310 size_t count)
311{
312 unsigned long nm;
313 ssize_t ret = queue_var_store(&nm, page, count);
314
315 if (ret < 0)
316 return ret;
317
318 spin_lock_irq(q->queue_lock);
319 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
320 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
321 if (nm == 2)
322 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
323 else if (nm)
324 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
325 spin_unlock_irq(q->queue_lock);
326
327 return ret;
328}
329
330static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
331{
332 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
333 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
334
335 return queue_var_show(set << force, page);
336}
337
338static ssize_t
339queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
340{
341 ssize_t ret = -EINVAL;
342#ifdef CONFIG_SMP
343 unsigned long val;
344
345 ret = queue_var_store(&val, page, count);
346 if (ret < 0)
347 return ret;
348
349 spin_lock_irq(q->queue_lock);
350 if (val == 2) {
351 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
352 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
353 } else if (val == 1) {
354 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
355 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
356 } else if (val == 0) {
357 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
358 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
359 }
360 spin_unlock_irq(q->queue_lock);
361#endif
362 return ret;
363}
364
365static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
366{
367 int val;
368
369 if (q->poll_nsec == -1)
370 val = -1;
371 else
372 val = q->poll_nsec / 1000;
373
374 return sprintf(page, "%d\n", val);
375}
376
377static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
378 size_t count)
379{
380 int err, val;
381
382 if (!q->mq_ops || !q->mq_ops->poll)
383 return -EINVAL;
384
385 err = kstrtoint(page, 10, &val);
386 if (err < 0)
387 return err;
388
389 if (val == -1)
390 q->poll_nsec = -1;
391 else
392 q->poll_nsec = val * 1000;
393
394 return count;
395}
396
397static ssize_t queue_poll_show(struct request_queue *q, char *page)
398{
399 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
400}
401
402static ssize_t queue_poll_store(struct request_queue *q, const char *page,
403 size_t count)
404{
405 unsigned long poll_on;
406 ssize_t ret;
407
408 if (!q->mq_ops || !q->mq_ops->poll)
409 return -EINVAL;
410
411 ret = queue_var_store(&poll_on, page, count);
412 if (ret < 0)
413 return ret;
414
415 if (poll_on)
416 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
417 else
418 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
419
420 return ret;
421}
422
423static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
424{
425 if (!q->rq_wb)
426 return -EINVAL;
427
428 return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
429}
430
431static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
432 size_t count)
433{
434 struct rq_wb *rwb;
435 ssize_t ret;
436 s64 val;
437
438 ret = queue_var_store64(&val, page);
439 if (ret < 0)
440 return ret;
441 if (val < -1)
442 return -EINVAL;
443
444 rwb = q->rq_wb;
445 if (!rwb) {
446 ret = wbt_init(q);
447 if (ret)
448 return ret;
449 }
450
451 rwb = q->rq_wb;
452 if (val == -1)
453 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
454 else if (val >= 0)
455 rwb->min_lat_nsec = val * 1000ULL;
456
457 if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
458 rwb->enable_state = WBT_STATE_ON_MANUAL;
459
460 wbt_update_limits(rwb);
461 return count;
462}
463
464static ssize_t queue_wc_show(struct request_queue *q, char *page)
465{
466 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
467 return sprintf(page, "write back\n");
468
469 return sprintf(page, "write through\n");
470}
471
472static ssize_t queue_wc_store(struct request_queue *q, const char *page,
473 size_t count)
474{
475 int set = -1;
476
477 if (!strncmp(page, "write back", 10))
478 set = 1;
479 else if (!strncmp(page, "write through", 13) ||
480 !strncmp(page, "none", 4))
481 set = 0;
482
483 if (set == -1)
484 return -EINVAL;
485
486 if (set)
487 blk_queue_flag_set(QUEUE_FLAG_WC, q);
488 else
489 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
490
491 return count;
492}
493
494static ssize_t queue_dax_show(struct request_queue *q, char *page)
495{
496 return queue_var_show(blk_queue_dax(q), page);
497}
498
499static struct queue_sysfs_entry queue_requests_entry = {
500 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
501 .show = queue_requests_show,
502 .store = queue_requests_store,
503};
504
505static struct queue_sysfs_entry queue_ra_entry = {
506 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
507 .show = queue_ra_show,
508 .store = queue_ra_store,
509};
510
511static struct queue_sysfs_entry queue_max_sectors_entry = {
512 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
513 .show = queue_max_sectors_show,
514 .store = queue_max_sectors_store,
515};
516
517static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
518 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
519 .show = queue_max_hw_sectors_show,
520};
521
522static struct queue_sysfs_entry queue_max_segments_entry = {
523 .attr = {.name = "max_segments", .mode = S_IRUGO },
524 .show = queue_max_segments_show,
525};
526
527static struct queue_sysfs_entry queue_max_discard_segments_entry = {
528 .attr = {.name = "max_discard_segments", .mode = S_IRUGO },
529 .show = queue_max_discard_segments_show,
530};
531
532static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
533 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
534 .show = queue_max_integrity_segments_show,
535};
536
537static struct queue_sysfs_entry queue_max_segment_size_entry = {
538 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
539 .show = queue_max_segment_size_show,
540};
541
542static struct queue_sysfs_entry queue_iosched_entry = {
543 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
544 .show = elv_iosched_show,
545 .store = elv_iosched_store,
546};
547
548static struct queue_sysfs_entry queue_hw_sector_size_entry = {
549 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
550 .show = queue_logical_block_size_show,
551};
552
553static struct queue_sysfs_entry queue_logical_block_size_entry = {
554 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
555 .show = queue_logical_block_size_show,
556};
557
558static struct queue_sysfs_entry queue_physical_block_size_entry = {
559 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
560 .show = queue_physical_block_size_show,
561};
562
563static struct queue_sysfs_entry queue_chunk_sectors_entry = {
564 .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
565 .show = queue_chunk_sectors_show,
566};
567
568static struct queue_sysfs_entry queue_io_min_entry = {
569 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
570 .show = queue_io_min_show,
571};
572
573static struct queue_sysfs_entry queue_io_opt_entry = {
574 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
575 .show = queue_io_opt_show,
576};
577
578static struct queue_sysfs_entry queue_discard_granularity_entry = {
579 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
580 .show = queue_discard_granularity_show,
581};
582
583static struct queue_sysfs_entry queue_discard_max_hw_entry = {
584 .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
585 .show = queue_discard_max_hw_show,
586};
587
588static struct queue_sysfs_entry queue_discard_max_entry = {
589 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
590 .show = queue_discard_max_show,
591 .store = queue_discard_max_store,
592};
593
594static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
595 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
596 .show = queue_discard_zeroes_data_show,
597};
598
599static struct queue_sysfs_entry queue_write_same_max_entry = {
600 .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
601 .show = queue_write_same_max_show,
602};
603
604static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
605 .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
606 .show = queue_write_zeroes_max_show,
607};
608
609static struct queue_sysfs_entry queue_nonrot_entry = {
610 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
611 .show = queue_show_nonrot,
612 .store = queue_store_nonrot,
613};
614
615static struct queue_sysfs_entry queue_zoned_entry = {
616 .attr = {.name = "zoned", .mode = S_IRUGO },
617 .show = queue_zoned_show,
618};
619
620static struct queue_sysfs_entry queue_nomerges_entry = {
621 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
622 .show = queue_nomerges_show,
623 .store = queue_nomerges_store,
624};
625
626static struct queue_sysfs_entry queue_rq_affinity_entry = {
627 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
628 .show = queue_rq_affinity_show,
629 .store = queue_rq_affinity_store,
630};
631
632static struct queue_sysfs_entry queue_iostats_entry = {
633 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
634 .show = queue_show_iostats,
635 .store = queue_store_iostats,
636};
637
638static struct queue_sysfs_entry queue_random_entry = {
639 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
640 .show = queue_show_random,
641 .store = queue_store_random,
642};
643
644static struct queue_sysfs_entry queue_poll_entry = {
645 .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
646 .show = queue_poll_show,
647 .store = queue_poll_store,
648};
649
650static struct queue_sysfs_entry queue_poll_delay_entry = {
651 .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
652 .show = queue_poll_delay_show,
653 .store = queue_poll_delay_store,
654};
655
656static struct queue_sysfs_entry queue_wc_entry = {
657 .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
658 .show = queue_wc_show,
659 .store = queue_wc_store,
660};
661
662static struct queue_sysfs_entry queue_dax_entry = {
663 .attr = {.name = "dax", .mode = S_IRUGO },
664 .show = queue_dax_show,
665};
666
667static struct queue_sysfs_entry queue_wb_lat_entry = {
668 .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
669 .show = queue_wb_lat_show,
670 .store = queue_wb_lat_store,
671};
672
673#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
674static struct queue_sysfs_entry throtl_sample_time_entry = {
675 .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR },
676 .show = blk_throtl_sample_time_show,
677 .store = blk_throtl_sample_time_store,
678};
679#endif
680
681static struct attribute *default_attrs[] = {
682 &queue_requests_entry.attr,
683 &queue_ra_entry.attr,
684 &queue_max_hw_sectors_entry.attr,
685 &queue_max_sectors_entry.attr,
686 &queue_max_segments_entry.attr,
687 &queue_max_discard_segments_entry.attr,
688 &queue_max_integrity_segments_entry.attr,
689 &queue_max_segment_size_entry.attr,
690 &queue_iosched_entry.attr,
691 &queue_hw_sector_size_entry.attr,
692 &queue_logical_block_size_entry.attr,
693 &queue_physical_block_size_entry.attr,
694 &queue_chunk_sectors_entry.attr,
695 &queue_io_min_entry.attr,
696 &queue_io_opt_entry.attr,
697 &queue_discard_granularity_entry.attr,
698 &queue_discard_max_entry.attr,
699 &queue_discard_max_hw_entry.attr,
700 &queue_discard_zeroes_data_entry.attr,
701 &queue_write_same_max_entry.attr,
702 &queue_write_zeroes_max_entry.attr,
703 &queue_nonrot_entry.attr,
704 &queue_zoned_entry.attr,
705 &queue_nomerges_entry.attr,
706 &queue_rq_affinity_entry.attr,
707 &queue_iostats_entry.attr,
708 &queue_random_entry.attr,
709 &queue_poll_entry.attr,
710 &queue_wc_entry.attr,
711 &queue_dax_entry.attr,
712 &queue_wb_lat_entry.attr,
713 &queue_poll_delay_entry.attr,
714#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
715 &throtl_sample_time_entry.attr,
716#endif
717 NULL,
718};
719
720#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
721
722static ssize_t
723queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
724{
725 struct queue_sysfs_entry *entry = to_queue(attr);
726 struct request_queue *q =
727 container_of(kobj, struct request_queue, kobj);
728 ssize_t res;
729
730 if (!entry->show)
731 return -EIO;
732 mutex_lock(&q->sysfs_lock);
733 if (blk_queue_dying(q)) {
734 mutex_unlock(&q->sysfs_lock);
735 return -ENOENT;
736 }
737 res = entry->show(q, page);
738 mutex_unlock(&q->sysfs_lock);
739 return res;
740}
741
742static ssize_t
743queue_attr_store(struct kobject *kobj, struct attribute *attr,
744 const char *page, size_t length)
745{
746 struct queue_sysfs_entry *entry = to_queue(attr);
747 struct request_queue *q;
748 ssize_t res;
749
750 if (!entry->store)
751 return -EIO;
752
753 q = container_of(kobj, struct request_queue, kobj);
754 mutex_lock(&q->sysfs_lock);
755 if (blk_queue_dying(q)) {
756 mutex_unlock(&q->sysfs_lock);
757 return -ENOENT;
758 }
759 res = entry->store(q, page, length);
760 mutex_unlock(&q->sysfs_lock);
761 return res;
762}
763
764static void blk_free_queue_rcu(struct rcu_head *rcu_head)
765{
766 struct request_queue *q = container_of(rcu_head, struct request_queue,
767 rcu_head);
768 kmem_cache_free(blk_requestq_cachep, q);
769}
770
771/**
772 * __blk_release_queue - release a request queue when it is no longer needed
773 * @work: pointer to the release_work member of the request queue to be released
774 *
775 * Description:
776 * blk_release_queue is the counterpart of blk_init_queue(). It should be
777 * called when a request queue is being released; typically when a block
778 * device is being de-registered. Its primary task it to free the queue
779 * itself.
780 *
781 * Notes:
782 * The low level driver must have finished any outstanding requests first
783 * via blk_cleanup_queue().
784 *
785 * Although blk_release_queue() may be called with preemption disabled,
786 * __blk_release_queue() may sleep.
787 */
788static void __blk_release_queue(struct work_struct *work)
789{
790 struct request_queue *q = container_of(work, typeof(*q), release_work);
791
792 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
793 blk_stat_remove_callback(q, q->poll_cb);
794 blk_stat_free_callback(q->poll_cb);
795
796 blk_free_queue_stats(q->stats);
797
798 blk_exit_rl(q, &q->root_rl);
799
800 if (q->queue_tags)
801 __blk_queue_free_tags(q);
802
803 if (!q->mq_ops) {
804 if (q->exit_rq_fn)
805 q->exit_rq_fn(q, q->fq->flush_rq);
806 blk_free_flush_queue(q->fq);
807 } else {
808 blk_mq_release(q);
809 }
810
811 blk_trace_shutdown(q);
812
813 if (q->mq_ops)
814 blk_mq_debugfs_unregister(q);
815
816 if (q->bio_split)
817 bioset_free(q->bio_split);
818
819 ida_simple_remove(&blk_queue_ida, q->id);
820 call_rcu(&q->rcu_head, blk_free_queue_rcu);
821}
822
823static void blk_release_queue(struct kobject *kobj)
824{
825 struct request_queue *q =
826 container_of(kobj, struct request_queue, kobj);
827
828 INIT_WORK(&q->release_work, __blk_release_queue);
829 schedule_work(&q->release_work);
830}
831
832static const struct sysfs_ops queue_sysfs_ops = {
833 .show = queue_attr_show,
834 .store = queue_attr_store,
835};
836
837struct kobj_type blk_queue_ktype = {
838 .sysfs_ops = &queue_sysfs_ops,
839 .default_attrs = default_attrs,
840 .release = blk_release_queue,
841};
842
843/**
844 * blk_register_queue - register a block layer queue with sysfs
845 * @disk: Disk of which the request queue should be registered with sysfs.
846 */
847int blk_register_queue(struct gendisk *disk)
848{
849 int ret;
850 struct device *dev = disk_to_dev(disk);
851 struct request_queue *q = disk->queue;
852
853 if (WARN_ON(!q))
854 return -ENXIO;
855
856 WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
857 "%s is registering an already registered queue\n",
858 kobject_name(&dev->kobj));
859 queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
860
861 /*
862 * SCSI probing may synchronously create and destroy a lot of
863 * request_queues for non-existent devices. Shutting down a fully
864 * functional queue takes measureable wallclock time as RCU grace
865 * periods are involved. To avoid excessive latency in these
866 * cases, a request_queue starts out in a degraded mode which is
867 * faster to shut down and is made fully functional here as
868 * request_queues for non-existent devices never get registered.
869 */
870 if (!blk_queue_init_done(q)) {
871 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
872 percpu_ref_switch_to_percpu(&q->q_usage_counter);
873 blk_queue_bypass_end(q);
874 }
875
876 ret = blk_trace_init_sysfs(dev);
877 if (ret)
878 return ret;
879
880 /* Prevent changes through sysfs until registration is completed. */
881 mutex_lock(&q->sysfs_lock);
882
883 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
884 if (ret < 0) {
885 blk_trace_remove_sysfs(dev);
886 goto unlock;
887 }
888
889 if (q->mq_ops) {
890 __blk_mq_register_dev(dev, q);
891 blk_mq_debugfs_register(q);
892 }
893
894 kobject_uevent(&q->kobj, KOBJ_ADD);
895
896 wbt_enable_default(q);
897
898 blk_throtl_register_queue(q);
899
900 if (q->request_fn || (q->mq_ops && q->elevator)) {
901 ret = elv_register_queue(q);
902 if (ret) {
903 mutex_unlock(&q->sysfs_lock);
904 kobject_uevent(&q->kobj, KOBJ_REMOVE);
905 kobject_del(&q->kobj);
906 blk_trace_remove_sysfs(dev);
907 kobject_put(&dev->kobj);
908 return ret;
909 }
910 }
911 ret = 0;
912unlock:
913 mutex_unlock(&q->sysfs_lock);
914 return ret;
915}
916EXPORT_SYMBOL_GPL(blk_register_queue);
917
918/**
919 * blk_unregister_queue - counterpart of blk_register_queue()
920 * @disk: Disk of which the request queue should be unregistered from sysfs.
921 *
922 * Note: the caller is responsible for guaranteeing that this function is called
923 * after blk_register_queue() has finished.
924 */
925void blk_unregister_queue(struct gendisk *disk)
926{
927 struct request_queue *q = disk->queue;
928
929 if (WARN_ON(!q))
930 return;
931
932 /* Return early if disk->queue was never registered. */
933 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
934 return;
935
936 /*
937 * Since sysfs_remove_dir() prevents adding new directory entries
938 * before removal of existing entries starts, protect against
939 * concurrent elv_iosched_store() calls.
940 */
941 mutex_lock(&q->sysfs_lock);
942
943 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
944
945 /*
946 * Remove the sysfs attributes before unregistering the queue data
947 * structures that can be modified through sysfs.
948 */
949 if (q->mq_ops)
950 blk_mq_unregister_dev(disk_to_dev(disk), q);
951 mutex_unlock(&q->sysfs_lock);
952
953 kobject_uevent(&q->kobj, KOBJ_REMOVE);
954 kobject_del(&q->kobj);
955 blk_trace_remove_sysfs(disk_to_dev(disk));
956
957 wbt_exit(q);
958
959 mutex_lock(&q->sysfs_lock);
960 if (q->request_fn || (q->mq_ops && q->elevator))
961 elv_unregister_queue(q);
962 mutex_unlock(&q->sysfs_lock);
963
964 kobject_put(&disk_to_dev(disk)->kobj);
965}