Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __BLK_NULL_BLK_H
  3#define __BLK_NULL_BLK_H
  4
  5#undef pr_fmt
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/blkdev.h>
  9#include <linux/slab.h>
 10#include <linux/blk-mq.h>
 11#include <linux/hrtimer.h>
 12#include <linux/configfs.h>
 13#include <linux/badblocks.h>
 14#include <linux/fault-inject.h>
 15#include <linux/spinlock.h>
 16#include <linux/mutex.h>
 17
 18struct nullb_cmd {
 
 
 
 
 
 19	blk_status_t error;
 20	bool fake_timeout;
 21	struct nullb_queue *nq;
 22	struct hrtimer timer;
 23};
 24
 25struct nullb_queue {
 
 
 
 26	struct nullb_device *dev;
 27	unsigned int requeue_selection;
 28
 29	struct list_head poll_list;
 30	spinlock_t poll_lock;
 
 
 31};
 32
 33struct nullb_zone {
 34	/*
 35	 * Zone lock to prevent concurrent modification of a zone write
 36	 * pointer position and condition: with memory backing, a write
 37	 * command execution may sleep on memory allocation. For this case,
 38	 * use mutex as the zone lock. Otherwise, use the spinlock for
 39	 * locking the zone.
 40	 */
 41	union {
 42		spinlock_t spinlock;
 43		struct mutex mutex;
 44	};
 45	enum blk_zone_type type;
 46	enum blk_zone_cond cond;
 47	sector_t start;
 48	sector_t wp;
 49	unsigned int len;
 50	unsigned int capacity;
 51};
 52
 
 
 
 
 
 
 
 53struct nullb_device {
 54	struct nullb *nullb;
 55	struct config_group group;
 56#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
 57	struct fault_config timeout_config;
 58	struct fault_config requeue_config;
 59	struct fault_config init_hctx_fault_config;
 60#endif
 61	struct radix_tree_root data; /* data stored in the disk */
 62	struct radix_tree_root cache; /* disk cache data */
 63	unsigned long flags; /* device flags */
 64	unsigned int curr_cache;
 65	struct badblocks badblocks;
 66
 67	unsigned int nr_zones;
 68	unsigned int nr_zones_imp_open;
 69	unsigned int nr_zones_exp_open;
 70	unsigned int nr_zones_closed;
 71	unsigned int imp_close_zone_no;
 72	struct nullb_zone *zones;
 73	sector_t zone_size_sects;
 74	bool need_zone_res_mgmt;
 75	spinlock_t zone_res_lock;
 76
 77	unsigned long size; /* device size in MB */
 78	unsigned long completion_nsec; /* time in ns to complete a request */
 79	unsigned long cache_size; /* disk cache size in MB */
 80	unsigned long zone_size; /* zone size in MB if device is zoned */
 81	unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
 82	unsigned int zone_nr_conv; /* number of conventional zones */
 83	unsigned int zone_max_open; /* max number of open zones */
 84	unsigned int zone_max_active; /* max number of active zones */
 85	unsigned int submit_queues; /* number of submission queues */
 86	unsigned int prev_submit_queues; /* number of submission queues before change */
 87	unsigned int poll_queues; /* number of IOPOLL submission queues */
 88	unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
 89	unsigned int home_node; /* home node for the device */
 90	unsigned int queue_mode; /* block interface */
 91	unsigned int blocksize; /* block size */
 92	unsigned int max_sectors; /* Max sectors per command */
 93	unsigned int irqmode; /* IRQ completion handler */
 94	unsigned int hw_queue_depth; /* queue depth */
 95	unsigned int index; /* index of the disk, only valid with a disk */
 96	unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
 97	bool blocking; /* blocking blk-mq device */
 98	bool use_per_node_hctx; /* use per-node allocation for hardware context */
 99	bool power; /* power on/off the device */
100	bool memory_backed; /* if data is stored in memory */
101	bool discard; /* if support discard */
102	bool zoned; /* if device is zoned */
103	bool virt_boundary; /* virtual boundary on/off for the device */
104	bool no_sched; /* no IO scheduler for the device */
105	bool shared_tags; /* share tag set between devices for blk-mq */
106	bool shared_tag_bitmap; /* use hostwide shared tags */
107};
108
109struct nullb {
110	struct nullb_device *dev;
111	struct list_head list;
112	unsigned int index;
113	struct request_queue *q;
114	struct gendisk *disk;
115	struct blk_mq_tag_set *tag_set;
116	struct blk_mq_tag_set __tag_set;
 
117	atomic_long_t cur_bytes;
118	struct hrtimer bw_timer;
119	unsigned long cache_flush_pos;
120	spinlock_t lock;
121
122	struct nullb_queue *queues;
 
123	char disk_name[DISK_NAME_LEN];
124};
125
126blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
127				 sector_t nr_sectors);
128blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
129			      sector_t sector, unsigned int nr_sectors);
130
131#ifdef CONFIG_BLK_DEV_ZONED
132int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
133int null_register_zoned_dev(struct nullb *nullb);
134void null_free_zoned_dev(struct nullb_device *dev);
135int null_report_zones(struct gendisk *disk, sector_t sector,
136		      unsigned int nr_zones, report_zones_cb cb, void *data);
137blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
138				    sector_t sector, sector_t nr_sectors);
139size_t null_zone_valid_read_len(struct nullb *nullb,
140				sector_t sector, unsigned int len);
141ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
142			size_t count, enum blk_zone_cond cond);
143#else
144static inline int null_init_zoned_dev(struct nullb_device *dev,
145		struct queue_limits *lim)
146{
147	pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
148	return -EINVAL;
149}
150static inline int null_register_zoned_dev(struct nullb *nullb)
151{
152	return -ENODEV;
153}
154static inline void null_free_zoned_dev(struct nullb_device *dev) {}
155static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
156			enum req_op op, sector_t sector, sector_t nr_sectors)
157{
158	return BLK_STS_NOTSUPP;
159}
160static inline size_t null_zone_valid_read_len(struct nullb *nullb,
161					      sector_t sector,
162					      unsigned int len)
163{
164	return len;
165}
166static inline ssize_t zone_cond_store(struct nullb_device *dev,
167				      const char *page, size_t count,
168				      enum blk_zone_cond cond)
169{
170	return -EOPNOTSUPP;
171}
172#define null_report_zones	NULL
173#endif /* CONFIG_BLK_DEV_ZONED */
174#endif /* __NULL_BLK_H */
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __BLK_NULL_BLK_H
  3#define __BLK_NULL_BLK_H
  4
  5#undef pr_fmt
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/blkdev.h>
  9#include <linux/slab.h>
 10#include <linux/blk-mq.h>
 11#include <linux/hrtimer.h>
 12#include <linux/configfs.h>
 13#include <linux/badblocks.h>
 14#include <linux/fault-inject.h>
 15#include <linux/spinlock.h>
 16#include <linux/mutex.h>
 17
 18struct nullb_cmd {
 19	union {
 20		struct request *rq;
 21		struct bio *bio;
 22	};
 23	unsigned int tag;
 24	blk_status_t error;
 25	bool fake_timeout;
 26	struct nullb_queue *nq;
 27	struct hrtimer timer;
 28};
 29
 30struct nullb_queue {
 31	unsigned long *tag_map;
 32	wait_queue_head_t wait;
 33	unsigned int queue_depth;
 34	struct nullb_device *dev;
 35	unsigned int requeue_selection;
 36
 37	struct list_head poll_list;
 38	spinlock_t poll_lock;
 39
 40	struct nullb_cmd *cmds;
 41};
 42
 43struct nullb_zone {
 44	/*
 45	 * Zone lock to prevent concurrent modification of a zone write
 46	 * pointer position and condition: with memory backing, a write
 47	 * command execution may sleep on memory allocation. For this case,
 48	 * use mutex as the zone lock. Otherwise, use the spinlock for
 49	 * locking the zone.
 50	 */
 51	union {
 52		spinlock_t spinlock;
 53		struct mutex mutex;
 54	};
 55	enum blk_zone_type type;
 56	enum blk_zone_cond cond;
 57	sector_t start;
 58	sector_t wp;
 59	unsigned int len;
 60	unsigned int capacity;
 61};
 62
 63/* Queue modes */
 64enum {
 65	NULL_Q_BIO	= 0,
 66	NULL_Q_RQ	= 1,
 67	NULL_Q_MQ	= 2,
 68};
 69
 70struct nullb_device {
 71	struct nullb *nullb;
 72	struct config_group group;
 73#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
 74	struct fault_config timeout_config;
 75	struct fault_config requeue_config;
 76	struct fault_config init_hctx_fault_config;
 77#endif
 78	struct radix_tree_root data; /* data stored in the disk */
 79	struct radix_tree_root cache; /* disk cache data */
 80	unsigned long flags; /* device flags */
 81	unsigned int curr_cache;
 82	struct badblocks badblocks;
 83
 84	unsigned int nr_zones;
 85	unsigned int nr_zones_imp_open;
 86	unsigned int nr_zones_exp_open;
 87	unsigned int nr_zones_closed;
 88	unsigned int imp_close_zone_no;
 89	struct nullb_zone *zones;
 90	sector_t zone_size_sects;
 91	bool need_zone_res_mgmt;
 92	spinlock_t zone_res_lock;
 93
 94	unsigned long size; /* device size in MB */
 95	unsigned long completion_nsec; /* time in ns to complete a request */
 96	unsigned long cache_size; /* disk cache size in MB */
 97	unsigned long zone_size; /* zone size in MB if device is zoned */
 98	unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
 99	unsigned int zone_nr_conv; /* number of conventional zones */
100	unsigned int zone_max_open; /* max number of open zones */
101	unsigned int zone_max_active; /* max number of active zones */
102	unsigned int submit_queues; /* number of submission queues */
103	unsigned int prev_submit_queues; /* number of submission queues before change */
104	unsigned int poll_queues; /* number of IOPOLL submission queues */
105	unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
106	unsigned int home_node; /* home node for the device */
107	unsigned int queue_mode; /* block interface */
108	unsigned int blocksize; /* block size */
109	unsigned int max_sectors; /* Max sectors per command */
110	unsigned int irqmode; /* IRQ completion handler */
111	unsigned int hw_queue_depth; /* queue depth */
112	unsigned int index; /* index of the disk, only valid with a disk */
113	unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
114	bool blocking; /* blocking blk-mq device */
115	bool use_per_node_hctx; /* use per-node allocation for hardware context */
116	bool power; /* power on/off the device */
117	bool memory_backed; /* if data is stored in memory */
118	bool discard; /* if support discard */
119	bool zoned; /* if device is zoned */
120	bool virt_boundary; /* virtual boundary on/off for the device */
121	bool no_sched; /* no IO scheduler for the device */
 
122	bool shared_tag_bitmap; /* use hostwide shared tags */
123};
124
125struct nullb {
126	struct nullb_device *dev;
127	struct list_head list;
128	unsigned int index;
129	struct request_queue *q;
130	struct gendisk *disk;
131	struct blk_mq_tag_set *tag_set;
132	struct blk_mq_tag_set __tag_set;
133	unsigned int queue_depth;
134	atomic_long_t cur_bytes;
135	struct hrtimer bw_timer;
136	unsigned long cache_flush_pos;
137	spinlock_t lock;
138
139	struct nullb_queue *queues;
140	unsigned int nr_queues;
141	char disk_name[DISK_NAME_LEN];
142};
143
144blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
145				 sector_t nr_sectors);
146blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
147			      sector_t sector, unsigned int nr_sectors);
148
149#ifdef CONFIG_BLK_DEV_ZONED
150int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
151int null_register_zoned_dev(struct nullb *nullb);
152void null_free_zoned_dev(struct nullb_device *dev);
153int null_report_zones(struct gendisk *disk, sector_t sector,
154		      unsigned int nr_zones, report_zones_cb cb, void *data);
155blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
156				    sector_t sector, sector_t nr_sectors);
157size_t null_zone_valid_read_len(struct nullb *nullb,
158				sector_t sector, unsigned int len);
159ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
160			size_t count, enum blk_zone_cond cond);
161#else
162static inline int null_init_zoned_dev(struct nullb_device *dev,
163				      struct request_queue *q)
164{
165	pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
166	return -EINVAL;
167}
168static inline int null_register_zoned_dev(struct nullb *nullb)
169{
170	return -ENODEV;
171}
172static inline void null_free_zoned_dev(struct nullb_device *dev) {}
173static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
174			enum req_op op, sector_t sector, sector_t nr_sectors)
175{
176	return BLK_STS_NOTSUPP;
177}
178static inline size_t null_zone_valid_read_len(struct nullb *nullb,
179					      sector_t sector,
180					      unsigned int len)
181{
182	return len;
183}
184static inline ssize_t zone_cond_store(struct nullb_device *dev,
185				      const char *page, size_t count,
186				      enum blk_zone_cond cond)
187{
188	return -EOPNOTSUPP;
189}
190#define null_report_zones	NULL
191#endif /* CONFIG_BLK_DEV_ZONED */
192#endif /* __NULL_BLK_H */