Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to sysfs handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/slab.h>
  7#include <linux/module.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/backing-dev.h>
 11#include <linux/blktrace_api.h>
 12#include <linux/blk-mq.h>
 
 13#include <linux/debugfs.h>
 14
 15#include "blk.h"
 16#include "blk-mq.h"
 17#include "blk-mq-debugfs.h"
 18#include "blk-mq-sched.h"
 19#include "blk-wbt.h"
 20#include "blk-cgroup.h"
 21#include "blk-throttle.h"
 22
 23struct queue_sysfs_entry {
 24	struct attribute attr;
 25	ssize_t (*show)(struct request_queue *, char *);
 26	ssize_t (*store)(struct request_queue *, const char *, size_t);
 27};
 28
 29static ssize_t
 30queue_var_show(unsigned long var, char *page)
 31{
 32	return sprintf(page, "%lu\n", var);
 33}
 34
 35static ssize_t
 36queue_var_store(unsigned long *var, const char *page, size_t count)
 37{
 38	int err;
 39	unsigned long v;
 40
 41	err = kstrtoul(page, 10, &v);
 42	if (err || v > UINT_MAX)
 43		return -EINVAL;
 44
 45	*var = v;
 46
 47	return count;
 48}
 49
 50static ssize_t queue_var_store64(s64 *var, const char *page)
 51{
 52	int err;
 53	s64 v;
 54
 55	err = kstrtos64(page, 10, &v);
 56	if (err < 0)
 57		return err;
 58
 59	*var = v;
 60	return 0;
 61}
 62
 63static ssize_t queue_requests_show(struct request_queue *q, char *page)
 64{
 65	return queue_var_show(q->nr_requests, page);
 66}
 67
 68static ssize_t
 69queue_requests_store(struct request_queue *q, const char *page, size_t count)
 70{
 71	unsigned long nr;
 72	int ret, err;
 73
 74	if (!queue_is_mq(q))
 75		return -EINVAL;
 76
 77	ret = queue_var_store(&nr, page, count);
 78	if (ret < 0)
 79		return ret;
 80
 81	if (nr < BLKDEV_MIN_RQ)
 82		nr = BLKDEV_MIN_RQ;
 83
 84	err = blk_mq_update_nr_requests(q, nr);
 85	if (err)
 86		return err;
 87
 88	return ret;
 89}
 90
 91static ssize_t queue_ra_show(struct request_queue *q, char *page)
 92{
 93	unsigned long ra_kb;
 
 94
 95	if (!q->disk)
 96		return -EINVAL;
 97	ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
 98	return queue_var_show(ra_kb, page);
 99}
100
101static ssize_t
102queue_ra_store(struct request_queue *q, const char *page, size_t count)
103{
104	unsigned long ra_kb;
105	ssize_t ret;
106
107	if (!q->disk)
108		return -EINVAL;
109	ret = queue_var_store(&ra_kb, page, count);
110	if (ret < 0)
111		return ret;
112	q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 
 
113	return ret;
114}
115
116static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
117{
118	int max_sectors_kb = queue_max_sectors(q) >> 1;
119
120	return queue_var_show(max_sectors_kb, page);
121}
122
123static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
124{
125	return queue_var_show(queue_max_segments(q), page);
126}
127
128static ssize_t queue_max_discard_segments_show(struct request_queue *q,
129		char *page)
130{
131	return queue_var_show(queue_max_discard_segments(q), page);
132}
133
134static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
135{
136	return queue_var_show(q->limits.max_integrity_segments, page);
137}
138
139static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
140{
141	return queue_var_show(queue_max_segment_size(q), page);
142}
143
144static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
145{
146	return queue_var_show(queue_logical_block_size(q), page);
147}
148
149static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
150{
151	return queue_var_show(queue_physical_block_size(q), page);
152}
153
154static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
155{
156	return queue_var_show(q->limits.chunk_sectors, page);
157}
158
159static ssize_t queue_io_min_show(struct request_queue *q, char *page)
160{
161	return queue_var_show(queue_io_min(q), page);
162}
163
164static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
165{
166	return queue_var_show(queue_io_opt(q), page);
167}
168
169static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
170{
171	return queue_var_show(q->limits.discard_granularity, page);
172}
173
174static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
175{
176
177	return sprintf(page, "%llu\n",
178		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
179}
180
181static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
182{
183	return sprintf(page, "%llu\n",
184		       (unsigned long long)q->limits.max_discard_sectors << 9);
185}
186
187static ssize_t queue_discard_max_store(struct request_queue *q,
188				       const char *page, size_t count)
189{
190	unsigned long max_discard;
191	ssize_t ret = queue_var_store(&max_discard, page, count);
192
193	if (ret < 0)
194		return ret;
195
196	if (max_discard & (q->limits.discard_granularity - 1))
197		return -EINVAL;
198
199	max_discard >>= 9;
200	if (max_discard > UINT_MAX)
201		return -EINVAL;
202
203	if (max_discard > q->limits.max_hw_discard_sectors)
204		max_discard = q->limits.max_hw_discard_sectors;
205
206	q->limits.max_discard_sectors = max_discard;
207	return ret;
208}
209
210static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
211{
212	return queue_var_show(0, page);
213}
214
215static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
216{
217	return queue_var_show(0, page);
 
218}
219
220static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
221{
222	return sprintf(page, "%llu\n",
223		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
224}
225
226static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
227						 char *page)
228{
229	return queue_var_show(queue_zone_write_granularity(q), page);
230}
231
232static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
233{
234	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
235
236	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
237}
238
239static ssize_t
240queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
241{
242	unsigned long max_sectors_kb,
243		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
244			page_kb = 1 << (PAGE_SHIFT - 10);
245	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
246
247	if (ret < 0)
248		return ret;
249
250	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
251					 q->limits.max_dev_sectors >> 1);
252
253	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
254		return -EINVAL;
255
256	spin_lock_irq(&q->queue_lock);
257	q->limits.max_sectors = max_sectors_kb << 1;
258	if (q->disk)
259		q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
260	spin_unlock_irq(&q->queue_lock);
261
262	return ret;
263}
264
265static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
266{
267	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
268
269	return queue_var_show(max_hw_sectors_kb, page);
270}
271
272static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
273{
274	return queue_var_show(q->limits.virt_boundary_mask, page);
275}
276
277static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
278{
279	return queue_var_show(queue_dma_alignment(q), page);
280}
281
282#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
283static ssize_t								\
284queue_##name##_show(struct request_queue *q, char *page)		\
285{									\
286	int bit;							\
287	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
288	return queue_var_show(neg ? !bit : bit, page);			\
289}									\
290static ssize_t								\
291queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
292{									\
293	unsigned long val;						\
294	ssize_t ret;							\
295	ret = queue_var_store(&val, page, count);			\
296	if (ret < 0)							\
297		 return ret;						\
298	if (neg)							\
299		val = !val;						\
300									\
301	if (val)							\
302		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
303	else								\
304		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
305	return ret;							\
306}
307
308QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
309QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
310QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
311QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
312#undef QUEUE_SYSFS_BIT_FNS
313
314static ssize_t queue_zoned_show(struct request_queue *q, char *page)
315{
316	switch (blk_queue_zoned_model(q)) {
317	case BLK_ZONED_HA:
318		return sprintf(page, "host-aware\n");
319	case BLK_ZONED_HM:
320		return sprintf(page, "host-managed\n");
321	default:
322		return sprintf(page, "none\n");
323	}
324}
325
326static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
327{
328	return queue_var_show(disk_nr_zones(q->disk), page);
329}
330
331static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
332{
333	return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
334}
335
336static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
337{
338	return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
339}
340
341static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
342{
343	return queue_var_show((blk_queue_nomerges(q) << 1) |
344			       blk_queue_noxmerges(q), page);
345}
346
347static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
348				    size_t count)
349{
350	unsigned long nm;
351	ssize_t ret = queue_var_store(&nm, page, count);
352
353	if (ret < 0)
354		return ret;
355
356	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
357	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
358	if (nm == 2)
359		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
360	else if (nm)
361		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
362
363	return ret;
364}
365
366static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
367{
368	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
369	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
370
371	return queue_var_show(set << force, page);
372}
373
374static ssize_t
375queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
376{
377	ssize_t ret = -EINVAL;
378#ifdef CONFIG_SMP
379	unsigned long val;
380
381	ret = queue_var_store(&val, page, count);
382	if (ret < 0)
383		return ret;
384
385	if (val == 2) {
386		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
387		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
388	} else if (val == 1) {
389		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
390		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
391	} else if (val == 0) {
392		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
393		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
394	}
395#endif
396	return ret;
397}
398
399static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
400{
401	int val;
402
403	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
404		val = BLK_MQ_POLL_CLASSIC;
405	else
406		val = q->poll_nsec / 1000;
407
408	return sprintf(page, "%d\n", val);
409}
410
411static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
412				size_t count)
413{
414	int err, val;
415
416	if (!q->mq_ops || !q->mq_ops->poll)
417		return -EINVAL;
418
419	err = kstrtoint(page, 10, &val);
420	if (err < 0)
421		return err;
422
423	if (val == BLK_MQ_POLL_CLASSIC)
424		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
425	else if (val >= 0)
426		q->poll_nsec = val * 1000;
427	else
428		return -EINVAL;
429
430	return count;
431}
432
433static ssize_t queue_poll_show(struct request_queue *q, char *page)
434{
435	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
436}
437
438static ssize_t queue_poll_store(struct request_queue *q, const char *page,
439				size_t count)
440{
441	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 
 
 
 
442		return -EINVAL;
443	pr_info_ratelimited("writes to the poll attribute are ignored.\n");
444	pr_info_ratelimited("please use driver specific parameters instead.\n");
445	return count;
 
 
 
 
 
 
 
 
446}
447
448static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
449{
450	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
451}
452
453static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
454				  size_t count)
455{
456	unsigned int val;
457	int err;
458
459	err = kstrtou32(page, 10, &val);
460	if (err || val == 0)
461		return -EINVAL;
462
463	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
464
465	return count;
466}
467
468static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
469{
470	if (!wbt_rq_qos(q))
471		return -EINVAL;
472
473	if (wbt_disabled(q))
474		return sprintf(page, "0\n");
475
476	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
477}
478
479static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
480				  size_t count)
481{
482	struct rq_qos *rqos;
483	ssize_t ret;
484	s64 val;
485
486	ret = queue_var_store64(&val, page);
487	if (ret < 0)
488		return ret;
489	if (val < -1)
490		return -EINVAL;
491
492	rqos = wbt_rq_qos(q);
493	if (!rqos) {
494		ret = wbt_init(q);
495		if (ret)
496			return ret;
497	}
498
499	if (val == -1)
500		val = wbt_default_latency_nsec(q);
501	else if (val >= 0)
502		val *= 1000ULL;
503
504	if (wbt_get_min_lat(q) == val)
505		return count;
506
507	/*
508	 * Ensure that the queue is idled, in case the latency update
509	 * ends up either enabling or disabling wbt completely. We can't
510	 * have IO inflight if that happens.
511	 */
512	blk_mq_freeze_queue(q);
513	blk_mq_quiesce_queue(q);
514
515	wbt_set_min_lat(q, val);
516
517	blk_mq_unquiesce_queue(q);
518	blk_mq_unfreeze_queue(q);
519
520	return count;
521}
522
523static ssize_t queue_wc_show(struct request_queue *q, char *page)
524{
525	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
526		return sprintf(page, "write back\n");
527
528	return sprintf(page, "write through\n");
529}
530
531static ssize_t queue_wc_store(struct request_queue *q, const char *page,
532			      size_t count)
533{
534	int set = -1;
535
536	if (!strncmp(page, "write back", 10))
537		set = 1;
538	else if (!strncmp(page, "write through", 13) ||
539		 !strncmp(page, "none", 4))
540		set = 0;
541
542	if (set == -1)
543		return -EINVAL;
544
545	if (set)
546		blk_queue_flag_set(QUEUE_FLAG_WC, q);
547	else
548		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
549
550	return count;
551}
552
553static ssize_t queue_fua_show(struct request_queue *q, char *page)
554{
555	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
556}
557
558static ssize_t queue_dax_show(struct request_queue *q, char *page)
559{
560	return queue_var_show(blk_queue_dax(q), page);
561}
562
563#define QUEUE_RO_ENTRY(_prefix, _name)			\
564static struct queue_sysfs_entry _prefix##_entry = {	\
565	.attr	= { .name = _name, .mode = 0444 },	\
566	.show	= _prefix##_show,			\
567};
568
569#define QUEUE_RW_ENTRY(_prefix, _name)			\
570static struct queue_sysfs_entry _prefix##_entry = {	\
571	.attr	= { .name = _name, .mode = 0644 },	\
572	.show	= _prefix##_show,			\
573	.store	= _prefix##_store,			\
574};
575
576QUEUE_RW_ENTRY(queue_requests, "nr_requests");
577QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
578QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
579QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
580QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
581QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
582QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
583QUEUE_RW_ENTRY(elv_iosched, "scheduler");
584
585QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
586QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
587QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
588QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
589QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
590
591QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
592QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
593QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
594QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
595QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
596
597QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
598QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
599QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
600QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
601
602QUEUE_RO_ENTRY(queue_zoned, "zoned");
603QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
604QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
605QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
606
607QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
608QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
609QUEUE_RW_ENTRY(queue_poll, "io_poll");
610QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
611QUEUE_RW_ENTRY(queue_wc, "write_cache");
612QUEUE_RO_ENTRY(queue_fua, "fua");
613QUEUE_RO_ENTRY(queue_dax, "dax");
614QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
615QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
616QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
617QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
618
619#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
620QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
621#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
623/* legacy alias for logical_block_size: */
624static struct queue_sysfs_entry queue_hw_sector_size_entry = {
625	.attr = {.name = "hw_sector_size", .mode = 0444 },
626	.show = queue_logical_block_size_show,
627};
628
629QUEUE_RW_ENTRY(queue_nonrot, "rotational");
630QUEUE_RW_ENTRY(queue_iostats, "iostats");
631QUEUE_RW_ENTRY(queue_random, "add_random");
632QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633
634static struct attribute *queue_attrs[] = {
635	&queue_requests_entry.attr,
636	&queue_ra_entry.attr,
637	&queue_max_hw_sectors_entry.attr,
638	&queue_max_sectors_entry.attr,
639	&queue_max_segments_entry.attr,
640	&queue_max_discard_segments_entry.attr,
641	&queue_max_integrity_segments_entry.attr,
642	&queue_max_segment_size_entry.attr,
643	&elv_iosched_entry.attr,
644	&queue_hw_sector_size_entry.attr,
645	&queue_logical_block_size_entry.attr,
646	&queue_physical_block_size_entry.attr,
647	&queue_chunk_sectors_entry.attr,
648	&queue_io_min_entry.attr,
649	&queue_io_opt_entry.attr,
650	&queue_discard_granularity_entry.attr,
651	&queue_discard_max_entry.attr,
652	&queue_discard_max_hw_entry.attr,
653	&queue_discard_zeroes_data_entry.attr,
654	&queue_write_same_max_entry.attr,
655	&queue_write_zeroes_max_entry.attr,
656	&queue_zone_append_max_entry.attr,
657	&queue_zone_write_granularity_entry.attr,
658	&queue_nonrot_entry.attr,
659	&queue_zoned_entry.attr,
660	&queue_nr_zones_entry.attr,
661	&queue_max_open_zones_entry.attr,
662	&queue_max_active_zones_entry.attr,
663	&queue_nomerges_entry.attr,
664	&queue_rq_affinity_entry.attr,
665	&queue_iostats_entry.attr,
666	&queue_stable_writes_entry.attr,
667	&queue_random_entry.attr,
668	&queue_poll_entry.attr,
669	&queue_wc_entry.attr,
670	&queue_fua_entry.attr,
671	&queue_dax_entry.attr,
672	&queue_wb_lat_entry.attr,
673	&queue_poll_delay_entry.attr,
674	&queue_io_timeout_entry.attr,
675#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
676	&blk_throtl_sample_time_entry.attr,
677#endif
678	&queue_virt_boundary_mask_entry.attr,
679	&queue_dma_alignment_entry.attr,
680	NULL,
681};
682
683static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
684				int n)
685{
686	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
687	struct request_queue *q = disk->queue;
688
689	if (attr == &queue_io_timeout_entry.attr &&
690		(!q->mq_ops || !q->mq_ops->timeout))
691			return 0;
692
693	if ((attr == &queue_max_open_zones_entry.attr ||
694	     attr == &queue_max_active_zones_entry.attr) &&
695	    !blk_queue_is_zoned(q))
696		return 0;
697
698	return attr->mode;
699}
700
701static struct attribute_group queue_attr_group = {
702	.attrs = queue_attrs,
703	.is_visible = queue_attr_visible,
704};
705
706
707#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
708
709static ssize_t
710queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
711{
712	struct queue_sysfs_entry *entry = to_queue(attr);
713	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
714	struct request_queue *q = disk->queue;
715	ssize_t res;
716
717	if (!entry->show)
718		return -EIO;
719	mutex_lock(&q->sysfs_lock);
720	res = entry->show(q, page);
721	mutex_unlock(&q->sysfs_lock);
722	return res;
723}
724
725static ssize_t
726queue_attr_store(struct kobject *kobj, struct attribute *attr,
727		    const char *page, size_t length)
728{
729	struct queue_sysfs_entry *entry = to_queue(attr);
730	struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
731	struct request_queue *q = disk->queue;
732	ssize_t res;
733
734	if (!entry->store)
735		return -EIO;
736
 
737	mutex_lock(&q->sysfs_lock);
738	res = entry->store(q, page, length);
739	mutex_unlock(&q->sysfs_lock);
740	return res;
741}
742
743static const struct sysfs_ops queue_sysfs_ops = {
744	.show	= queue_attr_show,
745	.store	= queue_attr_store,
746};
747
748static const struct attribute_group *blk_queue_attr_groups[] = {
749	&queue_attr_group,
750	NULL
751};
752
753static void blk_queue_release(struct kobject *kobj)
754{
755	/* nothing to do here, all data is associated with the parent gendisk */
 
 
756}
757
758static struct kobj_type blk_queue_ktype = {
759	.default_groups = blk_queue_attr_groups,
760	.sysfs_ops	= &queue_sysfs_ops,
761	.release	= blk_queue_release,
762};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
763
764static void blk_debugfs_remove(struct gendisk *disk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
765{
766	struct request_queue *q = disk->queue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
767
768	mutex_lock(&q->debugfs_mutex);
769	blk_trace_shutdown(q);
 
770	debugfs_remove_recursive(q->debugfs_dir);
771	q->debugfs_dir = NULL;
772	q->sched_debugfs_dir = NULL;
773	q->rqos_debugfs_dir = NULL;
774	mutex_unlock(&q->debugfs_mutex);
 
 
 
 
 
 
 
 
775}
776
 
 
 
 
 
 
 
 
 
 
777/**
778 * blk_register_queue - register a block layer queue with sysfs
779 * @disk: Disk of which the request queue should be registered with sysfs.
780 */
781int blk_register_queue(struct gendisk *disk)
782{
783	struct request_queue *q = disk->queue;
784	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
785
786	mutex_lock(&q->sysfs_dir_lock);
787	kobject_init(&disk->queue_kobj, &blk_queue_ktype);
788	ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
789	if (ret < 0)
790		goto out_put_queue_kobj;
791
792	if (queue_is_mq(q)) {
793		ret = blk_mq_sysfs_register(disk);
794		if (ret)
795			goto out_put_queue_kobj;
 
 
 
 
 
 
 
 
796	}
797	mutex_lock(&q->sysfs_lock);
798
799	mutex_lock(&q->debugfs_mutex);
800	q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
801	if (queue_is_mq(q))
802		blk_mq_debugfs_register(q);
803	mutex_unlock(&q->debugfs_mutex);
804
805	ret = disk_register_independent_access_ranges(disk);
806	if (ret)
807		goto out_debugfs_remove;
 
808
 
809	if (q->elevator) {
810		ret = elv_register_queue(q, false);
811		if (ret)
812			goto out_unregister_ia_ranges;
 
 
 
 
 
 
 
813	}
814
815	ret = blk_crypto_sysfs_register(disk);
816	if (ret)
817		goto out_elv_unregister;
818
819	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
820	wbt_enable_default(q);
821	blk_throtl_register(disk);
822
823	/* Now everything is ready and send out KOBJ_ADD uevent */
824	kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
825	if (q->elevator)
826		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
827	mutex_unlock(&q->sysfs_lock);
828	mutex_unlock(&q->sysfs_dir_lock);
829
830	/*
831	 * SCSI probing may synchronously create and destroy a lot of
832	 * request_queues for non-existent devices.  Shutting down a fully
833	 * functional queue takes measureable wallclock time as RCU grace
834	 * periods are involved.  To avoid excessive latency in these
835	 * cases, a request_queue starts out in a degraded mode which is
836	 * faster to shut down and is made fully functional here as
837	 * request_queues for non-existent devices never get registered.
838	 */
839	if (!blk_queue_init_done(q)) {
840		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
841		percpu_ref_switch_to_percpu(&q->q_usage_counter);
842	}
843
844	return ret;
845
846out_elv_unregister:
847	elv_unregister_queue(q);
848out_unregister_ia_ranges:
849	disk_unregister_independent_access_ranges(disk);
850out_debugfs_remove:
851	blk_debugfs_remove(disk);
852	mutex_unlock(&q->sysfs_lock);
853out_put_queue_kobj:
854	kobject_put(&disk->queue_kobj);
855	mutex_unlock(&q->sysfs_dir_lock);
856	return ret;
857}
 
858
859/**
860 * blk_unregister_queue - counterpart of blk_register_queue()
861 * @disk: Disk of which the request queue should be unregistered from sysfs.
862 *
863 * Note: the caller is responsible for guaranteeing that this function is called
864 * after blk_register_queue() has finished.
865 */
866void blk_unregister_queue(struct gendisk *disk)
867{
868	struct request_queue *q = disk->queue;
869
870	if (WARN_ON(!q))
871		return;
872
873	/* Return early if disk->queue was never registered. */
874	if (!blk_queue_registered(q))
875		return;
876
877	/*
878	 * Since sysfs_remove_dir() prevents adding new directory entries
879	 * before removal of existing entries starts, protect against
880	 * concurrent elv_iosched_store() calls.
881	 */
882	mutex_lock(&q->sysfs_lock);
883	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
884	mutex_unlock(&q->sysfs_lock);
885
886	mutex_lock(&q->sysfs_dir_lock);
887	/*
888	 * Remove the sysfs attributes before unregistering the queue data
889	 * structures that can be modified through sysfs.
890	 */
891	if (queue_is_mq(q))
892		blk_mq_sysfs_unregister(disk);
893	blk_crypto_sysfs_unregister(disk);
 
 
 
894
895	mutex_lock(&q->sysfs_lock);
896	elv_unregister_queue(q);
897	disk_unregister_independent_access_ranges(disk);
898	mutex_unlock(&q->sysfs_lock);
899
900	/* Now that we've deleted all child objects, we can delete the queue. */
901	kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
902	kobject_del(&disk->queue_kobj);
903	mutex_unlock(&q->sysfs_dir_lock);
904
905	blk_debugfs_remove(disk);
906}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to sysfs handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/slab.h>
   7#include <linux/module.h>
   8#include <linux/bio.h>
   9#include <linux/blkdev.h>
  10#include <linux/backing-dev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/blk-mq.h>
  13#include <linux/blk-cgroup.h>
  14#include <linux/debugfs.h>
  15
  16#include "blk.h"
  17#include "blk-mq.h"
  18#include "blk-mq-debugfs.h"
 
  19#include "blk-wbt.h"
 
 
  20
  21struct queue_sysfs_entry {
  22	struct attribute attr;
  23	ssize_t (*show)(struct request_queue *, char *);
  24	ssize_t (*store)(struct request_queue *, const char *, size_t);
  25};
  26
  27static ssize_t
  28queue_var_show(unsigned long var, char *page)
  29{
  30	return sprintf(page, "%lu\n", var);
  31}
  32
  33static ssize_t
  34queue_var_store(unsigned long *var, const char *page, size_t count)
  35{
  36	int err;
  37	unsigned long v;
  38
  39	err = kstrtoul(page, 10, &v);
  40	if (err || v > UINT_MAX)
  41		return -EINVAL;
  42
  43	*var = v;
  44
  45	return count;
  46}
  47
  48static ssize_t queue_var_store64(s64 *var, const char *page)
  49{
  50	int err;
  51	s64 v;
  52
  53	err = kstrtos64(page, 10, &v);
  54	if (err < 0)
  55		return err;
  56
  57	*var = v;
  58	return 0;
  59}
  60
  61static ssize_t queue_requests_show(struct request_queue *q, char *page)
  62{
  63	return queue_var_show(q->nr_requests, (page));
  64}
  65
  66static ssize_t
  67queue_requests_store(struct request_queue *q, const char *page, size_t count)
  68{
  69	unsigned long nr;
  70	int ret, err;
  71
  72	if (!queue_is_mq(q))
  73		return -EINVAL;
  74
  75	ret = queue_var_store(&nr, page, count);
  76	if (ret < 0)
  77		return ret;
  78
  79	if (nr < BLKDEV_MIN_RQ)
  80		nr = BLKDEV_MIN_RQ;
  81
  82	err = blk_mq_update_nr_requests(q, nr);
  83	if (err)
  84		return err;
  85
  86	return ret;
  87}
  88
  89static ssize_t queue_ra_show(struct request_queue *q, char *page)
  90{
  91	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
  92					(PAGE_SHIFT - 10);
  93
  94	return queue_var_show(ra_kb, (page));
 
 
 
  95}
  96
  97static ssize_t
  98queue_ra_store(struct request_queue *q, const char *page, size_t count)
  99{
 100	unsigned long ra_kb;
 101	ssize_t ret = queue_var_store(&ra_kb, page, count);
 102
 
 
 
 103	if (ret < 0)
 104		return ret;
 105
 106	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 107
 108	return ret;
 109}
 110
 111static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 112{
 113	int max_sectors_kb = queue_max_sectors(q) >> 1;
 114
 115	return queue_var_show(max_sectors_kb, (page));
 116}
 117
 118static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
 119{
 120	return queue_var_show(queue_max_segments(q), (page));
 121}
 122
 123static ssize_t queue_max_discard_segments_show(struct request_queue *q,
 124		char *page)
 125{
 126	return queue_var_show(queue_max_discard_segments(q), (page));
 127}
 128
 129static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
 130{
 131	return queue_var_show(q->limits.max_integrity_segments, (page));
 132}
 133
 134static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
 135{
 136	return queue_var_show(queue_max_segment_size(q), (page));
 137}
 138
 139static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
 140{
 141	return queue_var_show(queue_logical_block_size(q), page);
 142}
 143
 144static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
 145{
 146	return queue_var_show(queue_physical_block_size(q), page);
 147}
 148
 149static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
 150{
 151	return queue_var_show(q->limits.chunk_sectors, page);
 152}
 153
 154static ssize_t queue_io_min_show(struct request_queue *q, char *page)
 155{
 156	return queue_var_show(queue_io_min(q), page);
 157}
 158
 159static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
 160{
 161	return queue_var_show(queue_io_opt(q), page);
 162}
 163
 164static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
 165{
 166	return queue_var_show(q->limits.discard_granularity, page);
 167}
 168
 169static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
 170{
 171
 172	return sprintf(page, "%llu\n",
 173		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
 174}
 175
 176static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
 177{
 178	return sprintf(page, "%llu\n",
 179		       (unsigned long long)q->limits.max_discard_sectors << 9);
 180}
 181
 182static ssize_t queue_discard_max_store(struct request_queue *q,
 183				       const char *page, size_t count)
 184{
 185	unsigned long max_discard;
 186	ssize_t ret = queue_var_store(&max_discard, page, count);
 187
 188	if (ret < 0)
 189		return ret;
 190
 191	if (max_discard & (q->limits.discard_granularity - 1))
 192		return -EINVAL;
 193
 194	max_discard >>= 9;
 195	if (max_discard > UINT_MAX)
 196		return -EINVAL;
 197
 198	if (max_discard > q->limits.max_hw_discard_sectors)
 199		max_discard = q->limits.max_hw_discard_sectors;
 200
 201	q->limits.max_discard_sectors = max_discard;
 202	return ret;
 203}
 204
 205static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
 206{
 207	return queue_var_show(0, page);
 208}
 209
 210static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
 211{
 212	return sprintf(page, "%llu\n",
 213		(unsigned long long)q->limits.max_write_same_sectors << 9);
 214}
 215
 216static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
 217{
 218	return sprintf(page, "%llu\n",
 219		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
 220}
 221
 
 
 
 
 
 
 222static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
 223{
 224	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
 225
 226	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
 227}
 228
 229static ssize_t
 230queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 231{
 232	unsigned long max_sectors_kb,
 233		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
 234			page_kb = 1 << (PAGE_SHIFT - 10);
 235	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 236
 237	if (ret < 0)
 238		return ret;
 239
 240	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
 241					 q->limits.max_dev_sectors >> 1);
 242
 243	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
 244		return -EINVAL;
 245
 246	spin_lock_irq(&q->queue_lock);
 247	q->limits.max_sectors = max_sectors_kb << 1;
 248	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
 
 249	spin_unlock_irq(&q->queue_lock);
 250
 251	return ret;
 252}
 253
 254static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
 255{
 256	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
 257
 258	return queue_var_show(max_hw_sectors_kb, (page));
 
 
 
 
 
 
 
 
 
 
 259}
 260
 261#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
 262static ssize_t								\
 263queue_show_##name(struct request_queue *q, char *page)			\
 264{									\
 265	int bit;							\
 266	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
 267	return queue_var_show(neg ? !bit : bit, page);			\
 268}									\
 269static ssize_t								\
 270queue_store_##name(struct request_queue *q, const char *page, size_t count) \
 271{									\
 272	unsigned long val;						\
 273	ssize_t ret;							\
 274	ret = queue_var_store(&val, page, count);			\
 275	if (ret < 0)							\
 276		 return ret;						\
 277	if (neg)							\
 278		val = !val;						\
 279									\
 280	if (val)							\
 281		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
 282	else								\
 283		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
 284	return ret;							\
 285}
 286
 287QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
 288QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
 289QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 
 290#undef QUEUE_SYSFS_BIT_FNS
 291
 292static ssize_t queue_zoned_show(struct request_queue *q, char *page)
 293{
 294	switch (blk_queue_zoned_model(q)) {
 295	case BLK_ZONED_HA:
 296		return sprintf(page, "host-aware\n");
 297	case BLK_ZONED_HM:
 298		return sprintf(page, "host-managed\n");
 299	default:
 300		return sprintf(page, "none\n");
 301	}
 302}
 303
 304static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
 305{
 306	return queue_var_show(blk_queue_nr_zones(q), page);
 307}
 308
 309static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
 310{
 311	return queue_var_show(queue_max_open_zones(q), page);
 312}
 313
 314static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
 315{
 316	return queue_var_show(queue_max_active_zones(q), page);
 317}
 318
 319static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 320{
 321	return queue_var_show((blk_queue_nomerges(q) << 1) |
 322			       blk_queue_noxmerges(q), page);
 323}
 324
 325static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 326				    size_t count)
 327{
 328	unsigned long nm;
 329	ssize_t ret = queue_var_store(&nm, page, count);
 330
 331	if (ret < 0)
 332		return ret;
 333
 334	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
 335	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
 336	if (nm == 2)
 337		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 338	else if (nm)
 339		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 340
 341	return ret;
 342}
 343
 344static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 345{
 346	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
 347	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
 348
 349	return queue_var_show(set << force, page);
 350}
 351
 352static ssize_t
 353queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 354{
 355	ssize_t ret = -EINVAL;
 356#ifdef CONFIG_SMP
 357	unsigned long val;
 358
 359	ret = queue_var_store(&val, page, count);
 360	if (ret < 0)
 361		return ret;
 362
 363	if (val == 2) {
 364		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 365		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
 366	} else if (val == 1) {
 367		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 368		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 369	} else if (val == 0) {
 370		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
 371		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 372	}
 373#endif
 374	return ret;
 375}
 376
 377static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 378{
 379	int val;
 380
 381	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
 382		val = BLK_MQ_POLL_CLASSIC;
 383	else
 384		val = q->poll_nsec / 1000;
 385
 386	return sprintf(page, "%d\n", val);
 387}
 388
 389static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
 390				size_t count)
 391{
 392	int err, val;
 393
 394	if (!q->mq_ops || !q->mq_ops->poll)
 395		return -EINVAL;
 396
 397	err = kstrtoint(page, 10, &val);
 398	if (err < 0)
 399		return err;
 400
 401	if (val == BLK_MQ_POLL_CLASSIC)
 402		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 403	else if (val >= 0)
 404		q->poll_nsec = val * 1000;
 405	else
 406		return -EINVAL;
 407
 408	return count;
 409}
 410
 411static ssize_t queue_poll_show(struct request_queue *q, char *page)
 412{
 413	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
 414}
 415
 416static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 417				size_t count)
 418{
 419	unsigned long poll_on;
 420	ssize_t ret;
 421
 422	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
 423	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
 424		return -EINVAL;
 425
 426	ret = queue_var_store(&poll_on, page, count);
 427	if (ret < 0)
 428		return ret;
 429
 430	if (poll_on)
 431		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
 432	else
 433		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
 434
 435	return ret;
 436}
 437
 438static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
 439{
 440	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
 441}
 442
 443static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
 444				  size_t count)
 445{
 446	unsigned int val;
 447	int err;
 448
 449	err = kstrtou32(page, 10, &val);
 450	if (err || val == 0)
 451		return -EINVAL;
 452
 453	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
 454
 455	return count;
 456}
 457
 458static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
 459{
 460	if (!wbt_rq_qos(q))
 461		return -EINVAL;
 462
 
 
 
 463	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
 464}
 465
 466static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
 467				  size_t count)
 468{
 469	struct rq_qos *rqos;
 470	ssize_t ret;
 471	s64 val;
 472
 473	ret = queue_var_store64(&val, page);
 474	if (ret < 0)
 475		return ret;
 476	if (val < -1)
 477		return -EINVAL;
 478
 479	rqos = wbt_rq_qos(q);
 480	if (!rqos) {
 481		ret = wbt_init(q);
 482		if (ret)
 483			return ret;
 484	}
 485
 486	if (val == -1)
 487		val = wbt_default_latency_nsec(q);
 488	else if (val >= 0)
 489		val *= 1000ULL;
 490
 491	if (wbt_get_min_lat(q) == val)
 492		return count;
 493
 494	/*
 495	 * Ensure that the queue is idled, in case the latency update
 496	 * ends up either enabling or disabling wbt completely. We can't
 497	 * have IO inflight if that happens.
 498	 */
 499	blk_mq_freeze_queue(q);
 500	blk_mq_quiesce_queue(q);
 501
 502	wbt_set_min_lat(q, val);
 503
 504	blk_mq_unquiesce_queue(q);
 505	blk_mq_unfreeze_queue(q);
 506
 507	return count;
 508}
 509
 510static ssize_t queue_wc_show(struct request_queue *q, char *page)
 511{
 512	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 513		return sprintf(page, "write back\n");
 514
 515	return sprintf(page, "write through\n");
 516}
 517
 518static ssize_t queue_wc_store(struct request_queue *q, const char *page,
 519			      size_t count)
 520{
 521	int set = -1;
 522
 523	if (!strncmp(page, "write back", 10))
 524		set = 1;
 525	else if (!strncmp(page, "write through", 13) ||
 526		 !strncmp(page, "none", 4))
 527		set = 0;
 528
 529	if (set == -1)
 530		return -EINVAL;
 531
 532	if (set)
 533		blk_queue_flag_set(QUEUE_FLAG_WC, q);
 534	else
 535		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 536
 537	return count;
 538}
 539
 540static ssize_t queue_fua_show(struct request_queue *q, char *page)
 541{
 542	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
 543}
 544
 545static ssize_t queue_dax_show(struct request_queue *q, char *page)
 546{
 547	return queue_var_show(blk_queue_dax(q), page);
 548}
 549
 550static struct queue_sysfs_entry queue_requests_entry = {
 551	.attr = {.name = "nr_requests", .mode = 0644 },
 552	.show = queue_requests_show,
 553	.store = queue_requests_store,
 554};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555
 556static struct queue_sysfs_entry queue_ra_entry = {
 557	.attr = {.name = "read_ahead_kb", .mode = 0644 },
 558	.show = queue_ra_show,
 559	.store = queue_ra_store,
 560};
 561
 562static struct queue_sysfs_entry queue_max_sectors_entry = {
 563	.attr = {.name = "max_sectors_kb", .mode = 0644 },
 564	.show = queue_max_sectors_show,
 565	.store = queue_max_sectors_store,
 566};
 567
 568static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
 569	.attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
 570	.show = queue_max_hw_sectors_show,
 571};
 572
 573static struct queue_sysfs_entry queue_max_segments_entry = {
 574	.attr = {.name = "max_segments", .mode = 0444 },
 575	.show = queue_max_segments_show,
 576};
 577
 578static struct queue_sysfs_entry queue_max_discard_segments_entry = {
 579	.attr = {.name = "max_discard_segments", .mode = 0444 },
 580	.show = queue_max_discard_segments_show,
 581};
 582
 583static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
 584	.attr = {.name = "max_integrity_segments", .mode = 0444 },
 585	.show = queue_max_integrity_segments_show,
 586};
 587
 588static struct queue_sysfs_entry queue_max_segment_size_entry = {
 589	.attr = {.name = "max_segment_size", .mode = 0444 },
 590	.show = queue_max_segment_size_show,
 591};
 592
 593static struct queue_sysfs_entry queue_iosched_entry = {
 594	.attr = {.name = "scheduler", .mode = 0644 },
 595	.show = elv_iosched_show,
 596	.store = elv_iosched_store,
 597};
 598
 
 599static struct queue_sysfs_entry queue_hw_sector_size_entry = {
 600	.attr = {.name = "hw_sector_size", .mode = 0444 },
 601	.show = queue_logical_block_size_show,
 602};
 603
 604static struct queue_sysfs_entry queue_logical_block_size_entry = {
 605	.attr = {.name = "logical_block_size", .mode = 0444 },
 606	.show = queue_logical_block_size_show,
 607};
 608
 609static struct queue_sysfs_entry queue_physical_block_size_entry = {
 610	.attr = {.name = "physical_block_size", .mode = 0444 },
 611	.show = queue_physical_block_size_show,
 612};
 613
 614static struct queue_sysfs_entry queue_chunk_sectors_entry = {
 615	.attr = {.name = "chunk_sectors", .mode = 0444 },
 616	.show = queue_chunk_sectors_show,
 617};
 618
 619static struct queue_sysfs_entry queue_io_min_entry = {
 620	.attr = {.name = "minimum_io_size", .mode = 0444 },
 621	.show = queue_io_min_show,
 622};
 623
 624static struct queue_sysfs_entry queue_io_opt_entry = {
 625	.attr = {.name = "optimal_io_size", .mode = 0444 },
 626	.show = queue_io_opt_show,
 627};
 628
 629static struct queue_sysfs_entry queue_discard_granularity_entry = {
 630	.attr = {.name = "discard_granularity", .mode = 0444 },
 631	.show = queue_discard_granularity_show,
 632};
 633
 634static struct queue_sysfs_entry queue_discard_max_hw_entry = {
 635	.attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
 636	.show = queue_discard_max_hw_show,
 637};
 638
 639static struct queue_sysfs_entry queue_discard_max_entry = {
 640	.attr = {.name = "discard_max_bytes", .mode = 0644 },
 641	.show = queue_discard_max_show,
 642	.store = queue_discard_max_store,
 643};
 644
 645static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
 646	.attr = {.name = "discard_zeroes_data", .mode = 0444 },
 647	.show = queue_discard_zeroes_data_show,
 648};
 649
 650static struct queue_sysfs_entry queue_write_same_max_entry = {
 651	.attr = {.name = "write_same_max_bytes", .mode = 0444 },
 652	.show = queue_write_same_max_show,
 653};
 654
 655static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
 656	.attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
 657	.show = queue_write_zeroes_max_show,
 658};
 659
 660static struct queue_sysfs_entry queue_zone_append_max_entry = {
 661	.attr = {.name = "zone_append_max_bytes", .mode = 0444 },
 662	.show = queue_zone_append_max_show,
 663};
 664
 665static struct queue_sysfs_entry queue_nonrot_entry = {
 666	.attr = {.name = "rotational", .mode = 0644 },
 667	.show = queue_show_nonrot,
 668	.store = queue_store_nonrot,
 669};
 670
 671static struct queue_sysfs_entry queue_zoned_entry = {
 672	.attr = {.name = "zoned", .mode = 0444 },
 673	.show = queue_zoned_show,
 674};
 675
 676static struct queue_sysfs_entry queue_nr_zones_entry = {
 677	.attr = {.name = "nr_zones", .mode = 0444 },
 678	.show = queue_nr_zones_show,
 679};
 680
 681static struct queue_sysfs_entry queue_max_open_zones_entry = {
 682	.attr = {.name = "max_open_zones", .mode = 0444 },
 683	.show = queue_max_open_zones_show,
 684};
 685
 686static struct queue_sysfs_entry queue_max_active_zones_entry = {
 687	.attr = {.name = "max_active_zones", .mode = 0444 },
 688	.show = queue_max_active_zones_show,
 689};
 690
 691static struct queue_sysfs_entry queue_nomerges_entry = {
 692	.attr = {.name = "nomerges", .mode = 0644 },
 693	.show = queue_nomerges_show,
 694	.store = queue_nomerges_store,
 695};
 696
 697static struct queue_sysfs_entry queue_rq_affinity_entry = {
 698	.attr = {.name = "rq_affinity", .mode = 0644 },
 699	.show = queue_rq_affinity_show,
 700	.store = queue_rq_affinity_store,
 701};
 702
 703static struct queue_sysfs_entry queue_iostats_entry = {
 704	.attr = {.name = "iostats", .mode = 0644 },
 705	.show = queue_show_iostats,
 706	.store = queue_store_iostats,
 707};
 708
 709static struct queue_sysfs_entry queue_random_entry = {
 710	.attr = {.name = "add_random", .mode = 0644 },
 711	.show = queue_show_random,
 712	.store = queue_store_random,
 713};
 714
 715static struct queue_sysfs_entry queue_poll_entry = {
 716	.attr = {.name = "io_poll", .mode = 0644 },
 717	.show = queue_poll_show,
 718	.store = queue_poll_store,
 719};
 720
 721static struct queue_sysfs_entry queue_poll_delay_entry = {
 722	.attr = {.name = "io_poll_delay", .mode = 0644 },
 723	.show = queue_poll_delay_show,
 724	.store = queue_poll_delay_store,
 725};
 726
 727static struct queue_sysfs_entry queue_wc_entry = {
 728	.attr = {.name = "write_cache", .mode = 0644 },
 729	.show = queue_wc_show,
 730	.store = queue_wc_store,
 731};
 732
 733static struct queue_sysfs_entry queue_fua_entry = {
 734	.attr = {.name = "fua", .mode = 0444 },
 735	.show = queue_fua_show,
 736};
 737
 738static struct queue_sysfs_entry queue_dax_entry = {
 739	.attr = {.name = "dax", .mode = 0444 },
 740	.show = queue_dax_show,
 741};
 742
 743static struct queue_sysfs_entry queue_io_timeout_entry = {
 744	.attr = {.name = "io_timeout", .mode = 0644 },
 745	.show = queue_io_timeout_show,
 746	.store = queue_io_timeout_store,
 747};
 748
 749static struct queue_sysfs_entry queue_wb_lat_entry = {
 750	.attr = {.name = "wbt_lat_usec", .mode = 0644 },
 751	.show = queue_wb_lat_show,
 752	.store = queue_wb_lat_store,
 753};
 754
 755#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 756static struct queue_sysfs_entry throtl_sample_time_entry = {
 757	.attr = {.name = "throttle_sample_time", .mode = 0644 },
 758	.show = blk_throtl_sample_time_show,
 759	.store = blk_throtl_sample_time_store,
 760};
 761#endif
 762
 763static struct attribute *queue_attrs[] = {
 764	&queue_requests_entry.attr,
 765	&queue_ra_entry.attr,
 766	&queue_max_hw_sectors_entry.attr,
 767	&queue_max_sectors_entry.attr,
 768	&queue_max_segments_entry.attr,
 769	&queue_max_discard_segments_entry.attr,
 770	&queue_max_integrity_segments_entry.attr,
 771	&queue_max_segment_size_entry.attr,
 772	&queue_iosched_entry.attr,
 773	&queue_hw_sector_size_entry.attr,
 774	&queue_logical_block_size_entry.attr,
 775	&queue_physical_block_size_entry.attr,
 776	&queue_chunk_sectors_entry.attr,
 777	&queue_io_min_entry.attr,
 778	&queue_io_opt_entry.attr,
 779	&queue_discard_granularity_entry.attr,
 780	&queue_discard_max_entry.attr,
 781	&queue_discard_max_hw_entry.attr,
 782	&queue_discard_zeroes_data_entry.attr,
 783	&queue_write_same_max_entry.attr,
 784	&queue_write_zeroes_max_entry.attr,
 785	&queue_zone_append_max_entry.attr,
 
 786	&queue_nonrot_entry.attr,
 787	&queue_zoned_entry.attr,
 788	&queue_nr_zones_entry.attr,
 789	&queue_max_open_zones_entry.attr,
 790	&queue_max_active_zones_entry.attr,
 791	&queue_nomerges_entry.attr,
 792	&queue_rq_affinity_entry.attr,
 793	&queue_iostats_entry.attr,
 
 794	&queue_random_entry.attr,
 795	&queue_poll_entry.attr,
 796	&queue_wc_entry.attr,
 797	&queue_fua_entry.attr,
 798	&queue_dax_entry.attr,
 799	&queue_wb_lat_entry.attr,
 800	&queue_poll_delay_entry.attr,
 801	&queue_io_timeout_entry.attr,
 802#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 803	&throtl_sample_time_entry.attr,
 804#endif
 
 
 805	NULL,
 806};
 807
 808static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
 809				int n)
 810{
 811	struct request_queue *q =
 812		container_of(kobj, struct request_queue, kobj);
 813
 814	if (attr == &queue_io_timeout_entry.attr &&
 815		(!q->mq_ops || !q->mq_ops->timeout))
 816			return 0;
 817
 818	if ((attr == &queue_max_open_zones_entry.attr ||
 819	     attr == &queue_max_active_zones_entry.attr) &&
 820	    !blk_queue_is_zoned(q))
 821		return 0;
 822
 823	return attr->mode;
 824}
 825
 826static struct attribute_group queue_attr_group = {
 827	.attrs = queue_attrs,
 828	.is_visible = queue_attr_visible,
 829};
 830
 831
 832#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
 833
 834static ssize_t
 835queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 836{
 837	struct queue_sysfs_entry *entry = to_queue(attr);
 838	struct request_queue *q =
 839		container_of(kobj, struct request_queue, kobj);
 840	ssize_t res;
 841
 842	if (!entry->show)
 843		return -EIO;
 844	mutex_lock(&q->sysfs_lock);
 845	res = entry->show(q, page);
 846	mutex_unlock(&q->sysfs_lock);
 847	return res;
 848}
 849
 850static ssize_t
 851queue_attr_store(struct kobject *kobj, struct attribute *attr,
 852		    const char *page, size_t length)
 853{
 854	struct queue_sysfs_entry *entry = to_queue(attr);
 855	struct request_queue *q;
 
 856	ssize_t res;
 857
 858	if (!entry->store)
 859		return -EIO;
 860
 861	q = container_of(kobj, struct request_queue, kobj);
 862	mutex_lock(&q->sysfs_lock);
 863	res = entry->store(q, page, length);
 864	mutex_unlock(&q->sysfs_lock);
 865	return res;
 866}
 867
 868static void blk_free_queue_rcu(struct rcu_head *rcu_head)
 
 
 
 
 
 
 
 
 
 
 869{
 870	struct request_queue *q = container_of(rcu_head, struct request_queue,
 871					       rcu_head);
 872	kmem_cache_free(blk_requestq_cachep, q);
 873}
 874
 875/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
 876static void blk_exit_queue(struct request_queue *q)
 877{
 878	/*
 879	 * Since the I/O scheduler exit code may access cgroup information,
 880	 * perform I/O scheduler exit before disassociating from the block
 881	 * cgroup controller.
 882	 */
 883	if (q->elevator) {
 884		ioc_clear_queue(q);
 885		__elevator_exit(q, q->elevator);
 886		q->elevator = NULL;
 887	}
 888
 889	/*
 890	 * Remove all references to @q from the block cgroup controller before
 891	 * restoring @q->queue_lock to avoid that restoring this pointer causes
 892	 * e.g. blkcg_print_blkgs() to crash.
 893	 */
 894	blkcg_exit_queue(q);
 895
 896	/*
 897	 * Since the cgroup code may dereference the @q->backing_dev_info
 898	 * pointer, only decrease its reference count after having removed the
 899	 * association with the block cgroup controller.
 900	 */
 901	bdi_put(q->backing_dev_info);
 902}
 903
 904/**
 905 * blk_release_queue - releases all allocated resources of the request_queue
 906 * @kobj: pointer to a kobject, whose container is a request_queue
 907 *
 908 * This function releases all allocated resources of the request queue.
 909 *
 910 * The struct request_queue refcount is incremented with blk_get_queue() and
 911 * decremented with blk_put_queue(). Once the refcount reaches 0 this function
 912 * is called.
 913 *
 914 * For drivers that have a request_queue on a gendisk and added with
 915 * __device_add_disk() the refcount to request_queue will reach 0 with
 916 * the last put_disk() called by the driver. For drivers which don't use
 917 * __device_add_disk() this happens with blk_cleanup_queue().
 918 *
 919 * Drivers exist which depend on the release of the request_queue to be
 920 * synchronous, it should not be deferred.
 921 *
 922 * Context: can sleep
 923 */
 924static void blk_release_queue(struct kobject *kobj)
 925{
 926	struct request_queue *q =
 927		container_of(kobj, struct request_queue, kobj);
 928
 929	might_sleep();
 930
 931	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
 932		blk_stat_remove_callback(q, q->poll_cb);
 933	blk_stat_free_callback(q->poll_cb);
 934
 935	blk_free_queue_stats(q->stats);
 936
 937	if (queue_is_mq(q))
 938		cancel_delayed_work_sync(&q->requeue_work);
 939
 940	blk_exit_queue(q);
 941
 942	blk_queue_free_zone_bitmaps(q);
 943
 944	if (queue_is_mq(q))
 945		blk_mq_release(q);
 946
 
 947	blk_trace_shutdown(q);
 948	mutex_lock(&q->debugfs_mutex);
 949	debugfs_remove_recursive(q->debugfs_dir);
 
 
 
 950	mutex_unlock(&q->debugfs_mutex);
 951
 952	if (queue_is_mq(q))
 953		blk_mq_debugfs_unregister(q);
 954
 955	bioset_exit(&q->bio_split);
 956
 957	ida_simple_remove(&blk_queue_ida, q->id);
 958	call_rcu(&q->rcu_head, blk_free_queue_rcu);
 959}
 960
 961static const struct sysfs_ops queue_sysfs_ops = {
 962	.show	= queue_attr_show,
 963	.store	= queue_attr_store,
 964};
 965
 966struct kobj_type blk_queue_ktype = {
 967	.sysfs_ops	= &queue_sysfs_ops,
 968	.release	= blk_release_queue,
 969};
 970
 971/**
 972 * blk_register_queue - register a block layer queue with sysfs
 973 * @disk: Disk of which the request queue should be registered with sysfs.
 974 */
 975int blk_register_queue(struct gendisk *disk)
 976{
 
 977	int ret;
 978	struct device *dev = disk_to_dev(disk);
 979	struct request_queue *q = disk->queue;
 980	bool has_elevator = false;
 981
 982	if (WARN_ON(!q))
 983		return -ENXIO;
 984
 985	WARN_ONCE(blk_queue_registered(q),
 986		  "%s is registering an already registered queue\n",
 987		  kobject_name(&dev->kobj));
 988
 989	/*
 990	 * SCSI probing may synchronously create and destroy a lot of
 991	 * request_queues for non-existent devices.  Shutting down a fully
 992	 * functional queue takes measureable wallclock time as RCU grace
 993	 * periods are involved.  To avoid excessive latency in these
 994	 * cases, a request_queue starts out in a degraded mode which is
 995	 * faster to shut down and is made fully functional here as
 996	 * request_queues for non-existent devices never get registered.
 997	 */
 998	if (!blk_queue_init_done(q)) {
 999		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
1000		percpu_ref_switch_to_percpu(&q->q_usage_counter);
1001	}
1002
1003	ret = blk_trace_init_sysfs(dev);
1004	if (ret)
1005		return ret;
1006
1007	mutex_lock(&q->sysfs_dir_lock);
 
 
 
 
1008
1009	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
1010	if (ret < 0) {
1011		blk_trace_remove_sysfs(dev);
1012		goto unlock;
1013	}
1014
1015	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
1016	if (ret) {
1017		blk_trace_remove_sysfs(dev);
1018		kobject_del(&q->kobj);
1019		kobject_put(&dev->kobj);
1020		goto unlock;
1021	}
 
1022
1023	mutex_lock(&q->debugfs_mutex);
1024	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
1025					    blk_debugfs_root);
 
1026	mutex_unlock(&q->debugfs_mutex);
1027
1028	if (queue_is_mq(q)) {
1029		__blk_mq_register_dev(dev, q);
1030		blk_mq_debugfs_register(q);
1031	}
1032
1033	mutex_lock(&q->sysfs_lock);
1034	if (q->elevator) {
1035		ret = elv_register_queue(q, false);
1036		if (ret) {
1037			mutex_unlock(&q->sysfs_lock);
1038			mutex_unlock(&q->sysfs_dir_lock);
1039			kobject_del(&q->kobj);
1040			blk_trace_remove_sysfs(dev);
1041			kobject_put(&dev->kobj);
1042			return ret;
1043		}
1044		has_elevator = true;
1045	}
1046
 
 
 
 
1047	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
1048	wbt_enable_default(q);
1049	blk_throtl_register_queue(q);
1050
1051	/* Now everything is ready and send out KOBJ_ADD uevent */
1052	kobject_uevent(&q->kobj, KOBJ_ADD);
1053	if (has_elevator)
1054		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
1055	mutex_unlock(&q->sysfs_lock);
 
1056
1057	ret = 0;
1058unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059	mutex_unlock(&q->sysfs_dir_lock);
1060	return ret;
1061}
1062EXPORT_SYMBOL_GPL(blk_register_queue);
1063
1064/**
1065 * blk_unregister_queue - counterpart of blk_register_queue()
1066 * @disk: Disk of which the request queue should be unregistered from sysfs.
1067 *
1068 * Note: the caller is responsible for guaranteeing that this function is called
1069 * after blk_register_queue() has finished.
1070 */
1071void blk_unregister_queue(struct gendisk *disk)
1072{
1073	struct request_queue *q = disk->queue;
1074
1075	if (WARN_ON(!q))
1076		return;
1077
1078	/* Return early if disk->queue was never registered. */
1079	if (!blk_queue_registered(q))
1080		return;
1081
1082	/*
1083	 * Since sysfs_remove_dir() prevents adding new directory entries
1084	 * before removal of existing entries starts, protect against
1085	 * concurrent elv_iosched_store() calls.
1086	 */
1087	mutex_lock(&q->sysfs_lock);
1088	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
1089	mutex_unlock(&q->sysfs_lock);
1090
1091	mutex_lock(&q->sysfs_dir_lock);
1092	/*
1093	 * Remove the sysfs attributes before unregistering the queue data
1094	 * structures that can be modified through sysfs.
1095	 */
1096	if (queue_is_mq(q))
1097		blk_mq_unregister_dev(disk_to_dev(disk), q);
1098
1099	kobject_uevent(&q->kobj, KOBJ_REMOVE);
1100	kobject_del(&q->kobj);
1101	blk_trace_remove_sysfs(disk_to_dev(disk));
1102
1103	mutex_lock(&q->sysfs_lock);
1104	if (q->elevator)
1105		elv_unregister_queue(q);
1106	mutex_unlock(&q->sysfs_lock);
 
 
 
 
1107	mutex_unlock(&q->sysfs_dir_lock);
1108
1109	kobject_put(&disk_to_dev(disk)->kobj);
1110}