Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Functions related to sysfs handling
  3 */
  4#include <linux/kernel.h>
  5#include <linux/slab.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/backing-dev.h>
 10#include <linux/blktrace_api.h>
 11#include <linux/blk-mq.h>
 12#include <linux/blk-cgroup.h>
 
 13
 14#include "blk.h"
 15#include "blk-mq.h"
 
 
 16
 17struct queue_sysfs_entry {
 18	struct attribute attr;
 19	ssize_t (*show)(struct request_queue *, char *);
 20	ssize_t (*store)(struct request_queue *, const char *, size_t);
 21};
 22
 23static ssize_t
 24queue_var_show(unsigned long var, char *page)
 25{
 26	return sprintf(page, "%lu\n", var);
 27}
 28
 29static ssize_t
 30queue_var_store(unsigned long *var, const char *page, size_t count)
 31{
 32	int err;
 33	unsigned long v;
 34
 35	err = kstrtoul(page, 10, &v);
 36	if (err || v > UINT_MAX)
 37		return -EINVAL;
 38
 39	*var = v;
 40
 41	return count;
 42}
 43
 
 
 
 
 
 
 
 
 
 
 
 
 
 44static ssize_t queue_requests_show(struct request_queue *q, char *page)
 45{
 46	return queue_var_show(q->nr_requests, (page));
 47}
 48
 49static ssize_t
 50queue_requests_store(struct request_queue *q, const char *page, size_t count)
 51{
 52	unsigned long nr;
 53	int ret, err;
 54
 55	if (!q->request_fn && !q->mq_ops)
 56		return -EINVAL;
 57
 58	ret = queue_var_store(&nr, page, count);
 59	if (ret < 0)
 60		return ret;
 61
 62	if (nr < BLKDEV_MIN_RQ)
 63		nr = BLKDEV_MIN_RQ;
 64
 65	if (q->request_fn)
 66		err = blk_update_nr_requests(q, nr);
 67	else
 68		err = blk_mq_update_nr_requests(q, nr);
 69
 70	if (err)
 71		return err;
 72
 73	return ret;
 74}
 75
 76static ssize_t queue_ra_show(struct request_queue *q, char *page)
 77{
 78	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
 79					(PAGE_SHIFT - 10);
 80
 81	return queue_var_show(ra_kb, (page));
 82}
 83
 84static ssize_t
 85queue_ra_store(struct request_queue *q, const char *page, size_t count)
 86{
 87	unsigned long ra_kb;
 88	ssize_t ret = queue_var_store(&ra_kb, page, count);
 89
 90	if (ret < 0)
 91		return ret;
 92
 93	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 94
 95	return ret;
 96}
 97
 98static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 99{
100	int max_sectors_kb = queue_max_sectors(q) >> 1;
101
102	return queue_var_show(max_sectors_kb, (page));
103}
104
105static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
106{
107	return queue_var_show(queue_max_segments(q), (page));
 
 
 
 
 
 
108}
109
110static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
111{
112	return queue_var_show(q->limits.max_integrity_segments, (page));
113}
114
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{
117	if (blk_queue_cluster(q))
118		return queue_var_show(queue_max_segment_size(q), (page));
119
120	return queue_var_show(PAGE_SIZE, (page));
121}
122
123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
124{
125	return queue_var_show(queue_logical_block_size(q), page);
126}
127
128static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
129{
130	return queue_var_show(queue_physical_block_size(q), page);
131}
132
 
 
 
 
 
133static ssize_t queue_io_min_show(struct request_queue *q, char *page)
134{
135	return queue_var_show(queue_io_min(q), page);
136}
137
138static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
139{
140	return queue_var_show(queue_io_opt(q), page);
141}
142
143static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
144{
145	return queue_var_show(q->limits.discard_granularity, page);
146}
147
148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
149{
150
151	return sprintf(page, "%llu\n",
152		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
153}
154
155static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
156{
157	return sprintf(page, "%llu\n",
158		       (unsigned long long)q->limits.max_discard_sectors << 9);
159}
160
161static ssize_t queue_discard_max_store(struct request_queue *q,
162				       const char *page, size_t count)
163{
164	unsigned long max_discard;
165	ssize_t ret = queue_var_store(&max_discard, page, count);
166
167	if (ret < 0)
168		return ret;
169
170	if (max_discard & (q->limits.discard_granularity - 1))
171		return -EINVAL;
172
173	max_discard >>= 9;
174	if (max_discard > UINT_MAX)
175		return -EINVAL;
176
177	if (max_discard > q->limits.max_hw_discard_sectors)
178		max_discard = q->limits.max_hw_discard_sectors;
179
180	q->limits.max_discard_sectors = max_discard;
181	return ret;
182}
183
184static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
185{
186	return queue_var_show(queue_discard_zeroes_data(q), page);
187}
188
189static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
190{
191	return sprintf(page, "%llu\n",
192		(unsigned long long)q->limits.max_write_same_sectors << 9);
193}
194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196static ssize_t
197queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
198{
199	unsigned long max_sectors_kb,
200		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
201			page_kb = 1 << (PAGE_SHIFT - 10);
202	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
203
204	if (ret < 0)
205		return ret;
206
207	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
208					 q->limits.max_dev_sectors >> 1);
209
210	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
211		return -EINVAL;
212
213	spin_lock_irq(q->queue_lock);
214	q->limits.max_sectors = max_sectors_kb << 1;
215	spin_unlock_irq(q->queue_lock);
 
216
217	return ret;
218}
219
220static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
221{
222	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
223
224	return queue_var_show(max_hw_sectors_kb, (page));
 
 
 
 
 
225}
226
227#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
228static ssize_t								\
229queue_show_##name(struct request_queue *q, char *page)			\
230{									\
231	int bit;							\
232	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
233	return queue_var_show(neg ? !bit : bit, page);			\
234}									\
235static ssize_t								\
236queue_store_##name(struct request_queue *q, const char *page, size_t count) \
237{									\
238	unsigned long val;						\
239	ssize_t ret;							\
240	ret = queue_var_store(&val, page, count);			\
241	if (ret < 0)							\
242		 return ret;						\
243	if (neg)							\
244		val = !val;						\
245									\
246	spin_lock_irq(q->queue_lock);					\
247	if (val)							\
248		queue_flag_set(QUEUE_FLAG_##flag, q);			\
249	else								\
250		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
251	spin_unlock_irq(q->queue_lock);					\
252	return ret;							\
253}
254
255QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
256QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
257QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 
258#undef QUEUE_SYSFS_BIT_FNS
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
261{
262	return queue_var_show((blk_queue_nomerges(q) << 1) |
263			       blk_queue_noxmerges(q), page);
264}
265
266static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
267				    size_t count)
268{
269	unsigned long nm;
270	ssize_t ret = queue_var_store(&nm, page, count);
271
272	if (ret < 0)
273		return ret;
274
275	spin_lock_irq(q->queue_lock);
276	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
277	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
278	if (nm == 2)
279		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
280	else if (nm)
281		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
282	spin_unlock_irq(q->queue_lock);
283
284	return ret;
285}
286
287static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
288{
289	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
290	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
291
292	return queue_var_show(set << force, page);
293}
294
295static ssize_t
296queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
297{
298	ssize_t ret = -EINVAL;
299#ifdef CONFIG_SMP
300	unsigned long val;
301
302	ret = queue_var_store(&val, page, count);
303	if (ret < 0)
304		return ret;
305
306	spin_lock_irq(q->queue_lock);
307	if (val == 2) {
308		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
309		queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
310	} else if (val == 1) {
311		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
312		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
313	} else if (val == 0) {
314		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
315		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
316	}
317	spin_unlock_irq(q->queue_lock);
318#endif
319	return ret;
320}
321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322static ssize_t queue_poll_show(struct request_queue *q, char *page)
323{
324	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
325}
326
327static ssize_t queue_poll_store(struct request_queue *q, const char *page,
328				size_t count)
329{
330	unsigned long poll_on;
331	ssize_t ret;
332
333	if (!q->mq_ops || !q->mq_ops->poll)
 
334		return -EINVAL;
335
336	ret = queue_var_store(&poll_on, page, count);
337	if (ret < 0)
338		return ret;
339
340	spin_lock_irq(q->queue_lock);
341	if (poll_on)
342		queue_flag_set(QUEUE_FLAG_POLL, q);
343	else
344		queue_flag_clear(QUEUE_FLAG_POLL, q);
345	spin_unlock_irq(q->queue_lock);
 
346
347	return ret;
348}
349
350static struct queue_sysfs_entry queue_requests_entry = {
351	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
352	.show = queue_requests_show,
353	.store = queue_requests_store,
354};
355
356static struct queue_sysfs_entry queue_ra_entry = {
357	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
358	.show = queue_ra_show,
359	.store = queue_ra_store,
360};
361
362static struct queue_sysfs_entry queue_max_sectors_entry = {
363	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
364	.show = queue_max_sectors_show,
365	.store = queue_max_sectors_store,
366};
367
368static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
369	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
370	.show = queue_max_hw_sectors_show,
371};
372
373static struct queue_sysfs_entry queue_max_segments_entry = {
374	.attr = {.name = "max_segments", .mode = S_IRUGO },
375	.show = queue_max_segments_show,
376};
377
378static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
379	.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
380	.show = queue_max_integrity_segments_show,
381};
382
383static struct queue_sysfs_entry queue_max_segment_size_entry = {
384	.attr = {.name = "max_segment_size", .mode = S_IRUGO },
385	.show = queue_max_segment_size_show,
386};
387
388static struct queue_sysfs_entry queue_iosched_entry = {
389	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
390	.show = elv_iosched_show,
391	.store = elv_iosched_store,
392};
 
393
394static struct queue_sysfs_entry queue_hw_sector_size_entry = {
395	.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
396	.show = queue_logical_block_size_show,
397};
 
398
399static struct queue_sysfs_entry queue_logical_block_size_entry = {
400	.attr = {.name = "logical_block_size", .mode = S_IRUGO },
401	.show = queue_logical_block_size_show,
402};
 
 
403
404static struct queue_sysfs_entry queue_physical_block_size_entry = {
405	.attr = {.name = "physical_block_size", .mode = S_IRUGO },
406	.show = queue_physical_block_size_show,
407};
408
409static struct queue_sysfs_entry queue_io_min_entry = {
410	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
411	.show = queue_io_min_show,
412};
413
414static struct queue_sysfs_entry queue_io_opt_entry = {
415	.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
416	.show = queue_io_opt_show,
417};
 
 
 
418
419static struct queue_sysfs_entry queue_discard_granularity_entry = {
420	.attr = {.name = "discard_granularity", .mode = S_IRUGO },
421	.show = queue_discard_granularity_show,
422};
423
424static struct queue_sysfs_entry queue_discard_max_hw_entry = {
425	.attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
426	.show = queue_discard_max_hw_show,
427};
428
429static struct queue_sysfs_entry queue_discard_max_entry = {
430	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
431	.show = queue_discard_max_show,
432	.store = queue_discard_max_store,
433};
434
435static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
436	.attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
437	.show = queue_discard_zeroes_data_show,
438};
439
440static struct queue_sysfs_entry queue_write_same_max_entry = {
441	.attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
442	.show = queue_write_same_max_show,
443};
444
445static struct queue_sysfs_entry queue_nonrot_entry = {
446	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
447	.show = queue_show_nonrot,
448	.store = queue_store_nonrot,
449};
450
451static struct queue_sysfs_entry queue_nomerges_entry = {
452	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
453	.show = queue_nomerges_show,
454	.store = queue_nomerges_store,
455};
456
457static struct queue_sysfs_entry queue_rq_affinity_entry = {
458	.attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
459	.show = queue_rq_affinity_show,
460	.store = queue_rq_affinity_store,
461};
462
463static struct queue_sysfs_entry queue_iostats_entry = {
464	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
465	.show = queue_show_iostats,
466	.store = queue_store_iostats,
467};
468
469static struct queue_sysfs_entry queue_random_entry = {
470	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
471	.show = queue_show_random,
472	.store = queue_store_random,
473};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
475static struct queue_sysfs_entry queue_poll_entry = {
476	.attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
477	.show = queue_poll_show,
478	.store = queue_poll_store,
479};
480
481static struct attribute *default_attrs[] = {
 
 
 
 
 
482	&queue_requests_entry.attr,
483	&queue_ra_entry.attr,
484	&queue_max_hw_sectors_entry.attr,
485	&queue_max_sectors_entry.attr,
486	&queue_max_segments_entry.attr,
 
487	&queue_max_integrity_segments_entry.attr,
488	&queue_max_segment_size_entry.attr,
489	&queue_iosched_entry.attr,
490	&queue_hw_sector_size_entry.attr,
491	&queue_logical_block_size_entry.attr,
492	&queue_physical_block_size_entry.attr,
 
493	&queue_io_min_entry.attr,
494	&queue_io_opt_entry.attr,
495	&queue_discard_granularity_entry.attr,
496	&queue_discard_max_entry.attr,
497	&queue_discard_max_hw_entry.attr,
498	&queue_discard_zeroes_data_entry.attr,
499	&queue_write_same_max_entry.attr,
 
 
 
500	&queue_nonrot_entry.attr,
 
 
 
 
501	&queue_nomerges_entry.attr,
502	&queue_rq_affinity_entry.attr,
503	&queue_iostats_entry.attr,
 
504	&queue_random_entry.attr,
505	&queue_poll_entry.attr,
 
 
 
 
 
 
 
 
 
 
506	NULL,
507};
508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
510
511static ssize_t
512queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
513{
514	struct queue_sysfs_entry *entry = to_queue(attr);
515	struct request_queue *q =
516		container_of(kobj, struct request_queue, kobj);
517	ssize_t res;
518
519	if (!entry->show)
520		return -EIO;
521	mutex_lock(&q->sysfs_lock);
522	if (blk_queue_dying(q)) {
523		mutex_unlock(&q->sysfs_lock);
524		return -ENOENT;
525	}
526	res = entry->show(q, page);
527	mutex_unlock(&q->sysfs_lock);
528	return res;
529}
530
531static ssize_t
532queue_attr_store(struct kobject *kobj, struct attribute *attr,
533		    const char *page, size_t length)
534{
535	struct queue_sysfs_entry *entry = to_queue(attr);
536	struct request_queue *q;
537	ssize_t res;
538
539	if (!entry->store)
540		return -EIO;
541
542	q = container_of(kobj, struct request_queue, kobj);
543	mutex_lock(&q->sysfs_lock);
544	if (blk_queue_dying(q)) {
545		mutex_unlock(&q->sysfs_lock);
546		return -ENOENT;
547	}
548	res = entry->store(q, page, length);
549	mutex_unlock(&q->sysfs_lock);
550	return res;
551}
552
553static void blk_free_queue_rcu(struct rcu_head *rcu_head)
554{
555	struct request_queue *q = container_of(rcu_head, struct request_queue,
556					       rcu_head);
557	kmem_cache_free(blk_requestq_cachep, q);
558}
559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560/**
561 * blk_release_queue: - release a &struct request_queue when it is no longer needed
562 * @kobj:    the kobj belonging to the request queue to be released
 
 
 
 
 
 
 
 
 
 
 
563 *
564 * Description:
565 *     blk_release_queue is the pair to blk_init_queue() or
566 *     blk_queue_make_request().  It should be called when a request queue is
567 *     being released; typically when a block device is being de-registered.
568 *     Currently, its primary task it to free all the &struct request
569 *     structures that were allocated to the queue and the queue itself.
570 *
571 * Note:
572 *     The low level driver must have finished any outstanding requests first
573 *     via blk_cleanup_queue().
574 **/
575static void blk_release_queue(struct kobject *kobj)
576{
577	struct request_queue *q =
578		container_of(kobj, struct request_queue, kobj);
579
580	bdi_exit(&q->backing_dev_info);
581	blkcg_exit_queue(q);
582
583	if (q->elevator) {
584		spin_lock_irq(q->queue_lock);
585		ioc_clear_queue(q);
586		spin_unlock_irq(q->queue_lock);
587		elevator_exit(q->elevator);
 
 
 
 
 
 
 
 
 
588	}
589
590	blk_exit_rl(&q->root_rl);
591
592	if (q->queue_tags)
593		__blk_queue_free_tags(q);
594
595	if (!q->mq_ops)
596		blk_free_flush_queue(q->fq);
597	else
598		blk_mq_release(q);
599
600	blk_trace_shutdown(q);
 
 
 
601
602	if (q->bio_split)
603		bioset_free(q->bio_split);
 
 
604
605	ida_simple_remove(&blk_queue_ida, q->id);
606	call_rcu(&q->rcu_head, blk_free_queue_rcu);
607}
608
609static const struct sysfs_ops queue_sysfs_ops = {
610	.show	= queue_attr_show,
611	.store	= queue_attr_store,
612};
613
614struct kobj_type blk_queue_ktype = {
615	.sysfs_ops	= &queue_sysfs_ops,
616	.default_attrs	= default_attrs,
617	.release	= blk_release_queue,
618};
619
 
 
 
 
620int blk_register_queue(struct gendisk *disk)
621{
622	int ret;
623	struct device *dev = disk_to_dev(disk);
624	struct request_queue *q = disk->queue;
625
626	if (WARN_ON(!q))
627		return -ENXIO;
628
629	/*
630	 * SCSI probing may synchronously create and destroy a lot of
631	 * request_queues for non-existent devices.  Shutting down a fully
632	 * functional queue takes measureable wallclock time as RCU grace
633	 * periods are involved.  To avoid excessive latency in these
634	 * cases, a request_queue starts out in a degraded mode which is
635	 * faster to shut down and is made fully functional here as
636	 * request_queues for non-existent devices never get registered.
637	 */
638	if (!blk_queue_init_done(q)) {
639		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
640		percpu_ref_switch_to_percpu(&q->q_usage_counter);
641		blk_queue_bypass_end(q);
642	}
643
644	ret = blk_trace_init_sysfs(dev);
645	if (ret)
646		return ret;
647
 
 
648	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
649	if (ret < 0) {
650		blk_trace_remove_sysfs(dev);
651		return ret;
652	}
653
654	kobject_uevent(&q->kobj, KOBJ_ADD);
655
656	if (q->mq_ops)
657		blk_mq_register_disk(disk);
658
659	if (!q->request_fn)
660		return 0;
661
662	ret = elv_register_queue(q);
663	if (ret) {
664		kobject_uevent(&q->kobj, KOBJ_REMOVE);
665		kobject_del(&q->kobj);
666		blk_trace_remove_sysfs(dev);
 
667		kobject_put(&dev->kobj);
668		return ret;
669	}
670
671	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
672}
 
673
 
 
 
 
 
 
 
674void blk_unregister_queue(struct gendisk *disk)
675{
676	struct request_queue *q = disk->queue;
677
678	if (WARN_ON(!q))
679		return;
680
681	if (q->mq_ops)
682		blk_mq_unregister_disk(disk);
 
683
684	if (q->request_fn)
685		elv_unregister_queue(q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686
687	kobject_uevent(&q->kobj, KOBJ_REMOVE);
688	kobject_del(&q->kobj);
689	blk_trace_remove_sysfs(disk_to_dev(disk));
 
 
 
 
 
 
 
690	kobject_put(&disk_to_dev(disk)->kobj);
691}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to sysfs handling
  4 */
  5#include <linux/kernel.h>
  6#include <linux/slab.h>
  7#include <linux/module.h>
  8#include <linux/bio.h>
  9#include <linux/blkdev.h>
 10#include <linux/backing-dev.h>
 11#include <linux/blktrace_api.h>
 12#include <linux/blk-mq.h>
 13#include <linux/blk-cgroup.h>
 14#include <linux/debugfs.h>
 15
 16#include "blk.h"
 17#include "blk-mq.h"
 18#include "blk-mq-debugfs.h"
 19#include "blk-wbt.h"
 20
 21struct queue_sysfs_entry {
 22	struct attribute attr;
 23	ssize_t (*show)(struct request_queue *, char *);
 24	ssize_t (*store)(struct request_queue *, const char *, size_t);
 25};
 26
 27static ssize_t
 28queue_var_show(unsigned long var, char *page)
 29{
 30	return sprintf(page, "%lu\n", var);
 31}
 32
 33static ssize_t
 34queue_var_store(unsigned long *var, const char *page, size_t count)
 35{
 36	int err;
 37	unsigned long v;
 38
 39	err = kstrtoul(page, 10, &v);
 40	if (err || v > UINT_MAX)
 41		return -EINVAL;
 42
 43	*var = v;
 44
 45	return count;
 46}
 47
 48static ssize_t queue_var_store64(s64 *var, const char *page)
 49{
 50	int err;
 51	s64 v;
 52
 53	err = kstrtos64(page, 10, &v);
 54	if (err < 0)
 55		return err;
 56
 57	*var = v;
 58	return 0;
 59}
 60
 61static ssize_t queue_requests_show(struct request_queue *q, char *page)
 62{
 63	return queue_var_show(q->nr_requests, page);
 64}
 65
 66static ssize_t
 67queue_requests_store(struct request_queue *q, const char *page, size_t count)
 68{
 69	unsigned long nr;
 70	int ret, err;
 71
 72	if (!queue_is_mq(q))
 73		return -EINVAL;
 74
 75	ret = queue_var_store(&nr, page, count);
 76	if (ret < 0)
 77		return ret;
 78
 79	if (nr < BLKDEV_MIN_RQ)
 80		nr = BLKDEV_MIN_RQ;
 81
 82	err = blk_mq_update_nr_requests(q, nr);
 
 
 
 
 83	if (err)
 84		return err;
 85
 86	return ret;
 87}
 88
 89static ssize_t queue_ra_show(struct request_queue *q, char *page)
 90{
 91	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
 92					(PAGE_SHIFT - 10);
 93
 94	return queue_var_show(ra_kb, page);
 95}
 96
 97static ssize_t
 98queue_ra_store(struct request_queue *q, const char *page, size_t count)
 99{
100	unsigned long ra_kb;
101	ssize_t ret = queue_var_store(&ra_kb, page, count);
102
103	if (ret < 0)
104		return ret;
105
106	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
107
108	return ret;
109}
110
111static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
112{
113	int max_sectors_kb = queue_max_sectors(q) >> 1;
114
115	return queue_var_show(max_sectors_kb, page);
116}
117
118static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
119{
120	return queue_var_show(queue_max_segments(q), page);
121}
122
123static ssize_t queue_max_discard_segments_show(struct request_queue *q,
124		char *page)
125{
126	return queue_var_show(queue_max_discard_segments(q), page);
127}
128
129static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
130{
131	return queue_var_show(q->limits.max_integrity_segments, page);
132}
133
134static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
135{
136	return queue_var_show(queue_max_segment_size(q), page);
 
 
 
137}
138
139static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
140{
141	return queue_var_show(queue_logical_block_size(q), page);
142}
143
144static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
145{
146	return queue_var_show(queue_physical_block_size(q), page);
147}
148
149static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
150{
151	return queue_var_show(q->limits.chunk_sectors, page);
152}
153
154static ssize_t queue_io_min_show(struct request_queue *q, char *page)
155{
156	return queue_var_show(queue_io_min(q), page);
157}
158
159static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
160{
161	return queue_var_show(queue_io_opt(q), page);
162}
163
164static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
165{
166	return queue_var_show(q->limits.discard_granularity, page);
167}
168
169static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
170{
171
172	return sprintf(page, "%llu\n",
173		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
174}
175
176static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
177{
178	return sprintf(page, "%llu\n",
179		       (unsigned long long)q->limits.max_discard_sectors << 9);
180}
181
182static ssize_t queue_discard_max_store(struct request_queue *q,
183				       const char *page, size_t count)
184{
185	unsigned long max_discard;
186	ssize_t ret = queue_var_store(&max_discard, page, count);
187
188	if (ret < 0)
189		return ret;
190
191	if (max_discard & (q->limits.discard_granularity - 1))
192		return -EINVAL;
193
194	max_discard >>= 9;
195	if (max_discard > UINT_MAX)
196		return -EINVAL;
197
198	if (max_discard > q->limits.max_hw_discard_sectors)
199		max_discard = q->limits.max_hw_discard_sectors;
200
201	q->limits.max_discard_sectors = max_discard;
202	return ret;
203}
204
205static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
206{
207	return queue_var_show(0, page);
208}
209
210static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
211{
212	return sprintf(page, "%llu\n",
213		(unsigned long long)q->limits.max_write_same_sectors << 9);
214}
215
216static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
217{
218	return sprintf(page, "%llu\n",
219		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
220}
221
222static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
223						 char *page)
224{
225	return queue_var_show(queue_zone_write_granularity(q), page);
226}
227
228static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
229{
230	unsigned long long max_sectors = q->limits.max_zone_append_sectors;
231
232	return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
233}
234
235static ssize_t
236queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
237{
238	unsigned long max_sectors_kb,
239		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
240			page_kb = 1 << (PAGE_SHIFT - 10);
241	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
242
243	if (ret < 0)
244		return ret;
245
246	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
247					 q->limits.max_dev_sectors >> 1);
248
249	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
250		return -EINVAL;
251
252	spin_lock_irq(&q->queue_lock);
253	q->limits.max_sectors = max_sectors_kb << 1;
254	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
255	spin_unlock_irq(&q->queue_lock);
256
257	return ret;
258}
259
260static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
261{
262	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
263
264	return queue_var_show(max_hw_sectors_kb, page);
265}
266
267static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
268{
269	return queue_var_show(q->limits.virt_boundary_mask, page);
270}
271
272#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
273static ssize_t								\
274queue_##name##_show(struct request_queue *q, char *page)		\
275{									\
276	int bit;							\
277	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
278	return queue_var_show(neg ? !bit : bit, page);			\
279}									\
280static ssize_t								\
281queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
282{									\
283	unsigned long val;						\
284	ssize_t ret;							\
285	ret = queue_var_store(&val, page, count);			\
286	if (ret < 0)							\
287		 return ret;						\
288	if (neg)							\
289		val = !val;						\
290									\
 
291	if (val)							\
292		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
293	else								\
294		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
 
295	return ret;							\
296}
297
298QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
299QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
300QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
301QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
302#undef QUEUE_SYSFS_BIT_FNS
303
304static ssize_t queue_zoned_show(struct request_queue *q, char *page)
305{
306	switch (blk_queue_zoned_model(q)) {
307	case BLK_ZONED_HA:
308		return sprintf(page, "host-aware\n");
309	case BLK_ZONED_HM:
310		return sprintf(page, "host-managed\n");
311	default:
312		return sprintf(page, "none\n");
313	}
314}
315
316static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
317{
318	return queue_var_show(blk_queue_nr_zones(q), page);
319}
320
321static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
322{
323	return queue_var_show(queue_max_open_zones(q), page);
324}
325
326static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
327{
328	return queue_var_show(queue_max_active_zones(q), page);
329}
330
331static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
332{
333	return queue_var_show((blk_queue_nomerges(q) << 1) |
334			       blk_queue_noxmerges(q), page);
335}
336
337static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
338				    size_t count)
339{
340	unsigned long nm;
341	ssize_t ret = queue_var_store(&nm, page, count);
342
343	if (ret < 0)
344		return ret;
345
346	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
347	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
 
348	if (nm == 2)
349		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
350	else if (nm)
351		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 
352
353	return ret;
354}
355
356static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
357{
358	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
359	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
360
361	return queue_var_show(set << force, page);
362}
363
364static ssize_t
365queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
366{
367	ssize_t ret = -EINVAL;
368#ifdef CONFIG_SMP
369	unsigned long val;
370
371	ret = queue_var_store(&val, page, count);
372	if (ret < 0)
373		return ret;
374
 
375	if (val == 2) {
376		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
377		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
378	} else if (val == 1) {
379		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
380		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
381	} else if (val == 0) {
382		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
383		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
384	}
 
385#endif
386	return ret;
387}
388
389static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
390{
391	int val;
392
393	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
394		val = BLK_MQ_POLL_CLASSIC;
395	else
396		val = q->poll_nsec / 1000;
397
398	return sprintf(page, "%d\n", val);
399}
400
401static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
402				size_t count)
403{
404	int err, val;
405
406	if (!q->mq_ops || !q->mq_ops->poll)
407		return -EINVAL;
408
409	err = kstrtoint(page, 10, &val);
410	if (err < 0)
411		return err;
412
413	if (val == BLK_MQ_POLL_CLASSIC)
414		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
415	else if (val >= 0)
416		q->poll_nsec = val * 1000;
417	else
418		return -EINVAL;
419
420	return count;
421}
422
423static ssize_t queue_poll_show(struct request_queue *q, char *page)
424{
425	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
426}
427
428static ssize_t queue_poll_store(struct request_queue *q, const char *page,
429				size_t count)
430{
431	unsigned long poll_on;
432	ssize_t ret;
433
434	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
435	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
436		return -EINVAL;
437
438	ret = queue_var_store(&poll_on, page, count);
439	if (ret < 0)
440		return ret;
441
442	if (poll_on) {
443		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
444	} else {
445		blk_mq_freeze_queue(q);
446		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
447		blk_mq_unfreeze_queue(q);
448	}
449
450	return ret;
451}
452
453static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
454{
455	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
456}
 
457
458static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
459				  size_t count)
460{
461	unsigned int val;
462	int err;
463
464	err = kstrtou32(page, 10, &val);
465	if (err || val == 0)
466		return -EINVAL;
 
 
467
468	blk_queue_rq_timeout(q, msecs_to_jiffies(val));
 
 
 
469
470	return count;
471}
 
 
472
473static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
474{
475	if (!wbt_rq_qos(q))
476		return -EINVAL;
477
478	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
479}
 
 
480
481static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
482				  size_t count)
483{
484	struct rq_qos *rqos;
485	ssize_t ret;
486	s64 val;
487
488	ret = queue_var_store64(&val, page);
489	if (ret < 0)
490		return ret;
491	if (val < -1)
492		return -EINVAL;
493
494	rqos = wbt_rq_qos(q);
495	if (!rqos) {
496		ret = wbt_init(q);
497		if (ret)
498			return ret;
499	}
500
501	if (val == -1)
502		val = wbt_default_latency_nsec(q);
503	else if (val >= 0)
504		val *= 1000ULL;
505
506	if (wbt_get_min_lat(q) == val)
507		return count;
 
 
508
509	/*
510	 * Ensure that the queue is idled, in case the latency update
511	 * ends up either enabling or disabling wbt completely. We can't
512	 * have IO inflight if that happens.
513	 */
514	blk_mq_freeze_queue(q);
515	blk_mq_quiesce_queue(q);
516
517	wbt_set_min_lat(q, val);
 
 
 
518
519	blk_mq_unquiesce_queue(q);
520	blk_mq_unfreeze_queue(q);
 
 
521
522	return count;
523}
 
 
 
524
525static ssize_t queue_wc_show(struct request_queue *q, char *page)
526{
527	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
528		return sprintf(page, "write back\n");
529
530	return sprintf(page, "write through\n");
531}
 
 
532
533static ssize_t queue_wc_store(struct request_queue *q, const char *page,
534			      size_t count)
535{
536	int set = -1;
 
537
538	if (!strncmp(page, "write back", 10))
539		set = 1;
540	else if (!strncmp(page, "write through", 13) ||
541		 !strncmp(page, "none", 4))
542		set = 0;
543
544	if (set == -1)
545		return -EINVAL;
 
 
 
546
547	if (set)
548		blk_queue_flag_set(QUEUE_FLAG_WC, q);
549	else
550		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 
551
552	return count;
553}
554
555static ssize_t queue_fua_show(struct request_queue *q, char *page)
556{
557	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
558}
559
560static ssize_t queue_dax_show(struct request_queue *q, char *page)
561{
562	return queue_var_show(blk_queue_dax(q), page);
563}
564
565#define QUEUE_RO_ENTRY(_prefix, _name)			\
566static struct queue_sysfs_entry _prefix##_entry = {	\
567	.attr	= { .name = _name, .mode = 0444 },	\
568	.show	= _prefix##_show,			\
569};
570
571#define QUEUE_RW_ENTRY(_prefix, _name)			\
572static struct queue_sysfs_entry _prefix##_entry = {	\
573	.attr	= { .name = _name, .mode = 0644 },	\
574	.show	= _prefix##_show,			\
575	.store	= _prefix##_store,			\
576};
577
578QUEUE_RW_ENTRY(queue_requests, "nr_requests");
579QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
580QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
581QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
582QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
583QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
584QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
585QUEUE_RW_ENTRY(elv_iosched, "scheduler");
586
587QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
588QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
589QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
590QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
591QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
592
593QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
594QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
595QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
596QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
597QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
598
599QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
600QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
601QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
602QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
603
604QUEUE_RO_ENTRY(queue_zoned, "zoned");
605QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
606QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
607QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
608
609QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
610QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
611QUEUE_RW_ENTRY(queue_poll, "io_poll");
612QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
613QUEUE_RW_ENTRY(queue_wc, "write_cache");
614QUEUE_RO_ENTRY(queue_fua, "fua");
615QUEUE_RO_ENTRY(queue_dax, "dax");
616QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
617QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
618QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
619
620#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
621QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
622#endif
623
624/* legacy alias for logical_block_size: */
625static struct queue_sysfs_entry queue_hw_sector_size_entry = {
626	.attr = {.name = "hw_sector_size", .mode = 0444 },
627	.show = queue_logical_block_size_show,
628};
629
630QUEUE_RW_ENTRY(queue_nonrot, "rotational");
631QUEUE_RW_ENTRY(queue_iostats, "iostats");
632QUEUE_RW_ENTRY(queue_random, "add_random");
633QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
634
635static struct attribute *queue_attrs[] = {
636	&queue_requests_entry.attr,
637	&queue_ra_entry.attr,
638	&queue_max_hw_sectors_entry.attr,
639	&queue_max_sectors_entry.attr,
640	&queue_max_segments_entry.attr,
641	&queue_max_discard_segments_entry.attr,
642	&queue_max_integrity_segments_entry.attr,
643	&queue_max_segment_size_entry.attr,
644	&elv_iosched_entry.attr,
645	&queue_hw_sector_size_entry.attr,
646	&queue_logical_block_size_entry.attr,
647	&queue_physical_block_size_entry.attr,
648	&queue_chunk_sectors_entry.attr,
649	&queue_io_min_entry.attr,
650	&queue_io_opt_entry.attr,
651	&queue_discard_granularity_entry.attr,
652	&queue_discard_max_entry.attr,
653	&queue_discard_max_hw_entry.attr,
654	&queue_discard_zeroes_data_entry.attr,
655	&queue_write_same_max_entry.attr,
656	&queue_write_zeroes_max_entry.attr,
657	&queue_zone_append_max_entry.attr,
658	&queue_zone_write_granularity_entry.attr,
659	&queue_nonrot_entry.attr,
660	&queue_zoned_entry.attr,
661	&queue_nr_zones_entry.attr,
662	&queue_max_open_zones_entry.attr,
663	&queue_max_active_zones_entry.attr,
664	&queue_nomerges_entry.attr,
665	&queue_rq_affinity_entry.attr,
666	&queue_iostats_entry.attr,
667	&queue_stable_writes_entry.attr,
668	&queue_random_entry.attr,
669	&queue_poll_entry.attr,
670	&queue_wc_entry.attr,
671	&queue_fua_entry.attr,
672	&queue_dax_entry.attr,
673	&queue_wb_lat_entry.attr,
674	&queue_poll_delay_entry.attr,
675	&queue_io_timeout_entry.attr,
676#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
677	&blk_throtl_sample_time_entry.attr,
678#endif
679	&queue_virt_boundary_mask_entry.attr,
680	NULL,
681};
682
683static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
684				int n)
685{
686	struct request_queue *q =
687		container_of(kobj, struct request_queue, kobj);
688
689	if (attr == &queue_io_timeout_entry.attr &&
690		(!q->mq_ops || !q->mq_ops->timeout))
691			return 0;
692
693	if ((attr == &queue_max_open_zones_entry.attr ||
694	     attr == &queue_max_active_zones_entry.attr) &&
695	    !blk_queue_is_zoned(q))
696		return 0;
697
698	return attr->mode;
699}
700
701static struct attribute_group queue_attr_group = {
702	.attrs = queue_attrs,
703	.is_visible = queue_attr_visible,
704};
705
706
707#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
708
709static ssize_t
710queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
711{
712	struct queue_sysfs_entry *entry = to_queue(attr);
713	struct request_queue *q =
714		container_of(kobj, struct request_queue, kobj);
715	ssize_t res;
716
717	if (!entry->show)
718		return -EIO;
719	mutex_lock(&q->sysfs_lock);
 
 
 
 
720	res = entry->show(q, page);
721	mutex_unlock(&q->sysfs_lock);
722	return res;
723}
724
725static ssize_t
726queue_attr_store(struct kobject *kobj, struct attribute *attr,
727		    const char *page, size_t length)
728{
729	struct queue_sysfs_entry *entry = to_queue(attr);
730	struct request_queue *q;
731	ssize_t res;
732
733	if (!entry->store)
734		return -EIO;
735
736	q = container_of(kobj, struct request_queue, kobj);
737	mutex_lock(&q->sysfs_lock);
 
 
 
 
738	res = entry->store(q, page, length);
739	mutex_unlock(&q->sysfs_lock);
740	return res;
741}
742
743static void blk_free_queue_rcu(struct rcu_head *rcu_head)
744{
745	struct request_queue *q = container_of(rcu_head, struct request_queue,
746					       rcu_head);
747	kmem_cache_free(blk_requestq_cachep, q);
748}
749
750/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
751static void blk_exit_queue(struct request_queue *q)
752{
753	/*
754	 * Since the I/O scheduler exit code may access cgroup information,
755	 * perform I/O scheduler exit before disassociating from the block
756	 * cgroup controller.
757	 */
758	if (q->elevator) {
759		ioc_clear_queue(q);
760		__elevator_exit(q, q->elevator);
761	}
762
763	/*
764	 * Remove all references to @q from the block cgroup controller before
765	 * restoring @q->queue_lock to avoid that restoring this pointer causes
766	 * e.g. blkcg_print_blkgs() to crash.
767	 */
768	blkcg_exit_queue(q);
769
770	/*
771	 * Since the cgroup code may dereference the @q->backing_dev_info
772	 * pointer, only decrease its reference count after having removed the
773	 * association with the block cgroup controller.
774	 */
775	bdi_put(q->backing_dev_info);
776}
777
778/**
779 * blk_release_queue - releases all allocated resources of the request_queue
780 * @kobj: pointer to a kobject, whose container is a request_queue
781 *
782 * This function releases all allocated resources of the request queue.
783 *
784 * The struct request_queue refcount is incremented with blk_get_queue() and
785 * decremented with blk_put_queue(). Once the refcount reaches 0 this function
786 * is called.
787 *
788 * For drivers that have a request_queue on a gendisk and added with
789 * __device_add_disk() the refcount to request_queue will reach 0 with
790 * the last put_disk() called by the driver. For drivers which don't use
791 * __device_add_disk() this happens with blk_cleanup_queue().
792 *
793 * Drivers exist which depend on the release of the request_queue to be
794 * synchronous, it should not be deferred.
 
 
 
 
795 *
796 * Context: can sleep
797 */
 
 
798static void blk_release_queue(struct kobject *kobj)
799{
800	struct request_queue *q =
801		container_of(kobj, struct request_queue, kobj);
802
803	might_sleep();
 
804
805	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
806		blk_stat_remove_callback(q, q->poll_cb);
807	blk_stat_free_callback(q->poll_cb);
808
809	blk_free_queue_stats(q->stats);
810
811	if (queue_is_mq(q)) {
812		struct blk_mq_hw_ctx *hctx;
813		int i;
814
815		cancel_delayed_work_sync(&q->requeue_work);
816
817		queue_for_each_hw_ctx(q, hctx, i)
818			cancel_delayed_work_sync(&hctx->run_work);
819	}
820
821	blk_exit_queue(q);
822
823	blk_queue_free_zone_bitmaps(q);
 
824
825	if (queue_is_mq(q))
 
 
826		blk_mq_release(q);
827
828	blk_trace_shutdown(q);
829	mutex_lock(&q->debugfs_mutex);
830	debugfs_remove_recursive(q->debugfs_dir);
831	mutex_unlock(&q->debugfs_mutex);
832
833	if (queue_is_mq(q))
834		blk_mq_debugfs_unregister(q);
835
836	bioset_exit(&q->bio_split);
837
838	ida_simple_remove(&blk_queue_ida, q->id);
839	call_rcu(&q->rcu_head, blk_free_queue_rcu);
840}
841
842static const struct sysfs_ops queue_sysfs_ops = {
843	.show	= queue_attr_show,
844	.store	= queue_attr_store,
845};
846
847struct kobj_type blk_queue_ktype = {
848	.sysfs_ops	= &queue_sysfs_ops,
 
849	.release	= blk_release_queue,
850};
851
852/**
853 * blk_register_queue - register a block layer queue with sysfs
854 * @disk: Disk of which the request queue should be registered with sysfs.
855 */
856int blk_register_queue(struct gendisk *disk)
857{
858	int ret;
859	struct device *dev = disk_to_dev(disk);
860	struct request_queue *q = disk->queue;
861
862	if (WARN_ON(!q))
863		return -ENXIO;
864
865	WARN_ONCE(blk_queue_registered(q),
866		  "%s is registering an already registered queue\n",
867		  kobject_name(&dev->kobj));
868
869	blk_queue_update_readahead(q);
 
 
 
 
 
 
 
 
 
870
871	ret = blk_trace_init_sysfs(dev);
872	if (ret)
873		return ret;
874
875	mutex_lock(&q->sysfs_dir_lock);
876
877	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
878	if (ret < 0) {
879		blk_trace_remove_sysfs(dev);
880		goto unlock;
881	}
882
883	ret = sysfs_create_group(&q->kobj, &queue_attr_group);
 
 
 
 
 
 
 
 
884	if (ret) {
 
 
885		blk_trace_remove_sysfs(dev);
886		kobject_del(&q->kobj);
887		kobject_put(&dev->kobj);
888		goto unlock;
889	}
890
891	mutex_lock(&q->debugfs_mutex);
892	q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
893					    blk_debugfs_root);
894	mutex_unlock(&q->debugfs_mutex);
895
896	if (queue_is_mq(q)) {
897		__blk_mq_register_dev(dev, q);
898		blk_mq_debugfs_register(q);
899	}
900
901	mutex_lock(&q->sysfs_lock);
902	if (q->elevator) {
903		ret = elv_register_queue(q, false);
904		if (ret) {
905			mutex_unlock(&q->sysfs_lock);
906			mutex_unlock(&q->sysfs_dir_lock);
907			kobject_del(&q->kobj);
908			blk_trace_remove_sysfs(dev);
909			kobject_put(&dev->kobj);
910			return ret;
911		}
912	}
913
914	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
915	wbt_enable_default(q);
916	blk_throtl_register_queue(q);
917
918	/* Now everything is ready and send out KOBJ_ADD uevent */
919	kobject_uevent(&q->kobj, KOBJ_ADD);
920	if (q->elevator)
921		kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
922	mutex_unlock(&q->sysfs_lock);
923
924	ret = 0;
925unlock:
926	mutex_unlock(&q->sysfs_dir_lock);
927
928	/*
929	 * SCSI probing may synchronously create and destroy a lot of
930	 * request_queues for non-existent devices.  Shutting down a fully
931	 * functional queue takes measureable wallclock time as RCU grace
932	 * periods are involved.  To avoid excessive latency in these
933	 * cases, a request_queue starts out in a degraded mode which is
934	 * faster to shut down and is made fully functional here as
935	 * request_queues for non-existent devices never get registered.
936	 */
937	if (!blk_queue_init_done(q)) {
938		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
939		percpu_ref_switch_to_percpu(&q->q_usage_counter);
940	}
941
942	return ret;
943}
944EXPORT_SYMBOL_GPL(blk_register_queue);
945
946/**
947 * blk_unregister_queue - counterpart of blk_register_queue()
948 * @disk: Disk of which the request queue should be unregistered from sysfs.
949 *
950 * Note: the caller is responsible for guaranteeing that this function is called
951 * after blk_register_queue() has finished.
952 */
953void blk_unregister_queue(struct gendisk *disk)
954{
955	struct request_queue *q = disk->queue;
956
957	if (WARN_ON(!q))
958		return;
959
960	/* Return early if disk->queue was never registered. */
961	if (!blk_queue_registered(q))
962		return;
963
964	/*
965	 * Since sysfs_remove_dir() prevents adding new directory entries
966	 * before removal of existing entries starts, protect against
967	 * concurrent elv_iosched_store() calls.
968	 */
969	mutex_lock(&q->sysfs_lock);
970	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
971	mutex_unlock(&q->sysfs_lock);
972
973	mutex_lock(&q->sysfs_dir_lock);
974	/*
975	 * Remove the sysfs attributes before unregistering the queue data
976	 * structures that can be modified through sysfs.
977	 */
978	if (queue_is_mq(q))
979		blk_mq_unregister_dev(disk_to_dev(disk), q);
980
981	kobject_uevent(&q->kobj, KOBJ_REMOVE);
982	kobject_del(&q->kobj);
983	blk_trace_remove_sysfs(disk_to_dev(disk));
984
985	mutex_lock(&q->sysfs_lock);
986	if (q->elevator)
987		elv_unregister_queue(q);
988	mutex_unlock(&q->sysfs_lock);
989	mutex_unlock(&q->sysfs_dir_lock);
990
991	kobject_put(&disk_to_dev(disk)->kobj);
992}