Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Block data types and constants.  Directly include this file only to
  4 * break include dependency loop.
  5 */
  6#ifndef __LINUX_BLK_TYPES_H
  7#define __LINUX_BLK_TYPES_H
  8
 
 
  9#include <linux/types.h>
 10#include <linux/bvec.h>
 11#include <linux/ktime.h>
 12
 13struct bio_set;
 14struct bio;
 15struct bio_integrity_payload;
 16struct page;
 
 17struct io_context;
 18struct cgroup_subsys_state;
 19typedef void (bio_end_io_t) (struct bio *);
 20struct bio_crypt_ctx;
 21
 22struct block_device {
 23	dev_t			bd_dev;  /* not a kdev_t - it's a search key */
 24	int			bd_openers;
 25	struct inode *		bd_inode;	/* will die */
 26	struct super_block *	bd_super;
 27	struct mutex		bd_mutex;	/* open/close mutex */
 28	void *			bd_claiming;
 29	void *			bd_holder;
 30	int			bd_holders;
 31	bool			bd_write_holder;
 32#ifdef CONFIG_SYSFS
 33	struct list_head	bd_holder_disks;
 34#endif
 35	struct block_device *	bd_contains;
 36	u8			bd_partno;
 37	struct hd_struct *	bd_part;
 38	/* number of times partitions within this device have been opened. */
 39	unsigned		bd_part_count;
 40	int			bd_invalidated;
 41	struct gendisk *	bd_disk;
 42	struct backing_dev_info *bd_bdi;
 43
 44	/* The counter of freeze processes */
 45	int			bd_fsfreeze_count;
 46	/* Mutex for freeze */
 47	struct mutex		bd_fsfreeze_mutex;
 48} __randomize_layout;
 49
 50/*
 51 * Block error status values.  See block/blk-core:blk_errors for the details.
 52 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
 53 */
 54#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 55typedef u32 __bitwise blk_status_t;
 56#else
 57typedef u8 __bitwise blk_status_t;
 58#endif
 59#define	BLK_STS_OK 0
 60#define BLK_STS_NOTSUPP		((__force blk_status_t)1)
 61#define BLK_STS_TIMEOUT		((__force blk_status_t)2)
 62#define BLK_STS_NOSPC		((__force blk_status_t)3)
 63#define BLK_STS_TRANSPORT	((__force blk_status_t)4)
 64#define BLK_STS_TARGET		((__force blk_status_t)5)
 65#define BLK_STS_NEXUS		((__force blk_status_t)6)
 66#define BLK_STS_MEDIUM		((__force blk_status_t)7)
 67#define BLK_STS_PROTECTION	((__force blk_status_t)8)
 68#define BLK_STS_RESOURCE	((__force blk_status_t)9)
 69#define BLK_STS_IOERR		((__force blk_status_t)10)
 70
 71/* hack for device mapper, don't use elsewhere: */
 72#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
 73
 74#define BLK_STS_AGAIN		((__force blk_status_t)12)
 75
 76/*
 77 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
 78 * device related resources are unavailable, but the driver can guarantee
 79 * that the queue will be rerun in the future once resources become
 80 * available again. This is typically the case for device specific
 81 * resources that are consumed for IO. If the driver fails allocating these
 82 * resources, we know that inflight (or pending) IO will free these
 83 * resource upon completion.
 84 *
 85 * This is different from BLK_STS_RESOURCE in that it explicitly references
 86 * a device specific resource. For resources of wider scope, allocation
 87 * failure can happen without having pending IO. This means that we can't
 88 * rely on request completions freeing these resources, as IO may not be in
 89 * flight. Examples of that are kernel memory allocations, DMA mappings, or
 90 * any other system wide resources.
 91 */
 92#define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
 93
 94/*
 95 * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
 96 * related resources are unavailable, but the driver can guarantee the queue
 97 * will be rerun in the future once the resources become available again.
 98 *
 99 * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
100 * a zone specific resource and IO to a different zone on the same device could
101 * still be served. Examples of that are zones that are write-locked, but a read
102 * to the same zone could be served.
103 */
104#define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
105
106/**
107 * blk_path_error - returns true if error may be path related
108 * @error: status the request was completed with
109 *
110 * Description:
111 *     This classifies block error status into non-retryable errors and ones
112 *     that may be successful if retried on a failover path.
113 *
114 * Return:
115 *     %false - retrying failover path will not help
116 *     %true  - may succeed if retried
117 */
118static inline bool blk_path_error(blk_status_t error)
119{
120	switch (error) {
121	case BLK_STS_NOTSUPP:
122	case BLK_STS_NOSPC:
123	case BLK_STS_TARGET:
124	case BLK_STS_NEXUS:
125	case BLK_STS_MEDIUM:
126	case BLK_STS_PROTECTION:
127		return false;
128	}
129
130	/* Anything else could be a path failure, so should be retried */
131	return true;
132}
133
134/*
135 * From most significant bit:
136 * 1 bit: reserved for other usage, see below
137 * 12 bits: original size of bio
138 * 51 bits: issue time of bio
139 */
140#define BIO_ISSUE_RES_BITS      1
141#define BIO_ISSUE_SIZE_BITS     12
142#define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
143#define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
144#define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
145#define BIO_ISSUE_SIZE_MASK     \
146	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
147#define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
148
149/* Reserved bit for blk-throtl */
150#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
151
152struct bio_issue {
153	u64 value;
154};
155
156static inline u64 __bio_issue_time(u64 time)
157{
158	return time & BIO_ISSUE_TIME_MASK;
159}
160
161static inline u64 bio_issue_time(struct bio_issue *issue)
162{
163	return __bio_issue_time(issue->value);
164}
165
166static inline sector_t bio_issue_size(struct bio_issue *issue)
167{
168	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
169}
170
171static inline void bio_issue_init(struct bio_issue *issue,
172				       sector_t size)
173{
174	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
175	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
176			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
177			((u64)size << BIO_ISSUE_SIZE_SHIFT));
178}
179
180/*
181 * main unit of I/O for the block layer and lower layers (ie drivers and
182 * stacking drivers)
183 */
184struct bio {
 
 
185	struct bio		*bi_next;	/* request queue link */
186	struct gendisk		*bi_disk;
187	unsigned int		bi_opf;		/* bottom bits req flags,
188						 * top bits REQ_OP. Use
189						 * accessors.
190						 */
191	unsigned short		bi_flags;	/* status, etc and bvec pool number */
192	unsigned short		bi_ioprio;
193	unsigned short		bi_write_hint;
194	blk_status_t		bi_status;
195	u8			bi_partno;
196	atomic_t		__bi_remaining;
197
198	struct bvec_iter	bi_iter;
199
200	bio_end_io_t		*bi_end_io;
 
201
202	void			*bi_private;
203#ifdef CONFIG_BLK_CGROUP
204	/*
205	 * Represents the association of the css and request_queue for the bio.
206	 * If a bio goes direct to device, it will not have a blkg as it will
207	 * not have a request_queue associated with it.  The reference is put
208	 * on release of the bio.
209	 */
210	struct blkcg_gq		*bi_blkg;
211	struct bio_issue	bi_issue;
212#ifdef CONFIG_BLK_CGROUP_IOCOST
213	u64			bi_iocost_cost;
214#endif
215#endif
216
217#ifdef CONFIG_BLK_INLINE_ENCRYPTION
218	struct bio_crypt_ctx	*bi_crypt_context;
219#endif
220
221	union {
222#if defined(CONFIG_BLK_DEV_INTEGRITY)
223		struct bio_integrity_payload *bi_integrity; /* data integrity */
224#endif
225	};
226
227	unsigned short		bi_vcnt;	/* how many bio_vec's */
228
229	/*
230	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
 
231	 */
 
 
232
233	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
234
235	atomic_t		__bi_cnt;	/* pin count */
236
237	struct bio_vec		*bi_io_vec;	/* the actual vec list */
238
239	struct bio_set		*bi_pool;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
241	/*
242	 * We can inline a number of vecs at the end of the bio, to avoid
243	 * double allocations for a small number of bio_vecs. This member
244	 * MUST obviously be kept at the very end of the bio.
245	 */
246	struct bio_vec		bi_inline_vecs[];
247};
248
249#define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
250
251/*
252 * bio flags
253 */
254enum {
255	BIO_NO_PAGE_REF,	/* don't put release vec pages */
256	BIO_CLONED,		/* doesn't own data */
257	BIO_BOUNCED,		/* bio is a bounce bio */
258	BIO_USER_MAPPED,	/* contains user pages */
259	BIO_NULL_MAPPED,	/* contains invalid user pages */
260	BIO_WORKINGSET,		/* contains userspace workingset pages */
261	BIO_QUIET,		/* Make BIO Quiet */
262	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
263	BIO_REFFED,		/* bio has elevated ->bi_cnt */
264	BIO_THROTTLED,		/* This bio has already been subjected to
265				 * throttling rules. Don't do it again. */
266	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
267				 * of this bio. */
268	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
269	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
270	BIO_FLAG_LAST
271};
272
273/* See BVEC_POOL_OFFSET below before adding new flags */
274
275/*
276 * We support 6 different bvec pools, the last one is magic in that it
277 * is backed by a mempool.
278 */
279#define BVEC_POOL_NR		6
280#define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
281
282/*
283 * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
284 * 1 to the actual index so that 0 indicates that there are no bvecs to be
285 * freed.
286 */
287#define BVEC_POOL_BITS		(3)
288#define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
289#define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
290#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
291# error "BVEC_POOL_BITS is too small"
292#endif
293
294/*
295 * Flags starting here get preserved by bio_reset() - this includes
296 * only BVEC_POOL_IDX()
297 */
298#define BIO_RESET_BITS	BVEC_POOL_OFFSET
299
300typedef __u32 __bitwise blk_mq_req_flags_t;
301
302/*
303 * Operations and flags common to the bio and request structures.
304 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
305 *
306 * The least significant bit of the operation number indicates the data
307 * transfer direction:
308 *
309 *   - if the least significant bit is set transfers are TO the device
310 *   - if the least significant bit is not set transfers are FROM the device
311 *
312 * If a operation does not transfer data the least significant bit has no
313 * meaning.
314 */
315#define REQ_OP_BITS	8
316#define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
317#define REQ_FLAG_BITS	24
318
319enum req_opf {
320	/* read sectors from the device */
321	REQ_OP_READ		= 0,
322	/* write sectors to the device */
323	REQ_OP_WRITE		= 1,
324	/* flush the volatile write cache */
325	REQ_OP_FLUSH		= 2,
326	/* discard sectors */
327	REQ_OP_DISCARD		= 3,
328	/* securely erase sectors */
329	REQ_OP_SECURE_ERASE	= 5,
330	/* write the same sector many times */
331	REQ_OP_WRITE_SAME	= 7,
332	/* write the zero filled sector many times */
333	REQ_OP_WRITE_ZEROES	= 9,
334	/* Open a zone */
335	REQ_OP_ZONE_OPEN	= 10,
336	/* Close a zone */
337	REQ_OP_ZONE_CLOSE	= 11,
338	/* Transition a zone to full */
339	REQ_OP_ZONE_FINISH	= 12,
340	/* write data at the current zone write pointer */
341	REQ_OP_ZONE_APPEND	= 13,
342	/* reset a zone write pointer */
343	REQ_OP_ZONE_RESET	= 15,
344	/* reset all the zone present on the device */
345	REQ_OP_ZONE_RESET_ALL	= 17,
346
347	/* SCSI passthrough using struct scsi_request */
348	REQ_OP_SCSI_IN		= 32,
349	REQ_OP_SCSI_OUT		= 33,
350	/* Driver private requests */
351	REQ_OP_DRV_IN		= 34,
352	REQ_OP_DRV_OUT		= 35,
353
354	REQ_OP_LAST,
355};
356
357enum req_flag_bits {
358	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
359		REQ_OP_BITS,
360	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
361	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
 
362	__REQ_SYNC,		/* request is sync (sync write or read) */
363	__REQ_META,		/* metadata io request */
364	__REQ_PRIO,		/* boost priority in cfq */
365	__REQ_NOMERGE,		/* don't touch this for merging */
366	__REQ_IDLE,		/* anticipate more IO after this one */
367	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
368	__REQ_FUA,		/* forced unit access */
369	__REQ_PREFLUSH,		/* request for cache flush */
370	__REQ_RAHEAD,		/* read ahead, can fail anytime */
371	__REQ_BACKGROUND,	/* background IO */
372	__REQ_NOWAIT,           /* Don't wait if request will block */
373	/*
374	 * When a shared kthread needs to issue a bio for a cgroup, doing
375	 * so synchronously can lead to priority inversions as the kthread
376	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
377	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
378	 * work item to avoid such priority inversions.
379	 */
380	__REQ_CGROUP_PUNT,
381
382	/* command specific flags for REQ_OP_WRITE_ZEROES: */
383	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
 
384
385	__REQ_HIPRI,
 
 
 
386
387	/* for driver use */
388	__REQ_DRV,
389	__REQ_SWAP,		/* swapping request. */
 
 
 
 
 
 
 
 
 
 
 
 
 
390	__REQ_NR_BITS,		/* stops here */
391};
392
393#define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
394#define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
395#define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
396#define REQ_SYNC		(1ULL << __REQ_SYNC)
397#define REQ_META		(1ULL << __REQ_META)
398#define REQ_PRIO		(1ULL << __REQ_PRIO)
399#define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
400#define REQ_IDLE		(1ULL << __REQ_IDLE)
401#define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
402#define REQ_FUA			(1ULL << __REQ_FUA)
403#define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
404#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
405#define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
406#define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
407#define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
408
409#define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
410#define REQ_HIPRI		(1ULL << __REQ_HIPRI)
411
412#define REQ_DRV			(1ULL << __REQ_DRV)
413#define REQ_SWAP		(1ULL << __REQ_SWAP)
414
415#define REQ_FAILFAST_MASK \
416	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
417
418#define REQ_NOMERGE_FLAGS \
419	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
420
421enum stat_group {
422	STAT_READ,
423	STAT_WRITE,
424	STAT_DISCARD,
425	STAT_FLUSH,
426
427	NR_STAT_GROUPS
428};
429
430#define bio_op(bio) \
431	((bio)->bi_opf & REQ_OP_MASK)
432#define req_op(req) \
433	((req)->cmd_flags & REQ_OP_MASK)
434
435/* obsolete, don't use in new code */
436static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
437		unsigned op_flags)
438{
439	bio->bi_opf = op | op_flags;
440}
441
442static inline bool op_is_write(unsigned int op)
443{
444	return (op & 1);
445}
446
447/*
448 * Check if the bio or request is one that needs special treatment in the
449 * flush state machine.
450 */
451static inline bool op_is_flush(unsigned int op)
452{
453	return op & (REQ_FUA | REQ_PREFLUSH);
454}
455
456/*
457 * Reads are always treated as synchronous, as are requests with the FUA or
458 * PREFLUSH flag.  Other operations may be marked as synchronous using the
459 * REQ_SYNC flag.
460 */
461static inline bool op_is_sync(unsigned int op)
462{
463	return (op & REQ_OP_MASK) == REQ_OP_READ ||
464		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
465}
466
467static inline bool op_is_discard(unsigned int op)
468{
469	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
470}
471
472/*
473 * Check if a bio or request operation is a zone management operation, with
474 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
475 * due to its different handling in the block layer and device response in
476 * case of command failure.
477 */
478static inline bool op_is_zone_mgmt(enum req_opf op)
479{
480	switch (op & REQ_OP_MASK) {
481	case REQ_OP_ZONE_RESET:
482	case REQ_OP_ZONE_OPEN:
483	case REQ_OP_ZONE_CLOSE:
484	case REQ_OP_ZONE_FINISH:
485		return true;
486	default:
487		return false;
488	}
489}
490
491static inline int op_stat_group(unsigned int op)
492{
493	if (op_is_discard(op))
494		return STAT_DISCARD;
495	return op_is_write(op);
496}
497
498typedef unsigned int blk_qc_t;
499#define BLK_QC_T_NONE		-1U
500#define BLK_QC_T_SHIFT		16
501#define BLK_QC_T_INTERNAL	(1U << 31)
502
503static inline bool blk_qc_t_valid(blk_qc_t cookie)
504{
505	return cookie != BLK_QC_T_NONE;
506}
507
508static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
509{
510	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
511}
512
513static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
514{
515	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
516}
517
518static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
519{
520	return (cookie & BLK_QC_T_INTERNAL) != 0;
521}
522
523struct blk_rq_stat {
524	u64 mean;
525	u64 min;
526	u64 max;
527	u32 nr_samples;
528	u64 batch;
529};
530
531#endif /* __LINUX_BLK_TYPES_H */
v3.5.6
 
  1/*
  2 * Block data types and constants.  Directly include this file only to
  3 * break include dependency loop.
  4 */
  5#ifndef __LINUX_BLK_TYPES_H
  6#define __LINUX_BLK_TYPES_H
  7
  8#ifdef CONFIG_BLOCK
  9
 10#include <linux/types.h>
 
 
 11
 12struct bio_set;
 13struct bio;
 14struct bio_integrity_payload;
 15struct page;
 16struct block_device;
 17struct io_context;
 18struct cgroup_subsys_state;
 19typedef void (bio_end_io_t) (struct bio *, int);
 20typedef void (bio_destructor_t) (struct bio *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21
 22/*
 23 * was unsigned short, but we might as well be ready for > 64kB I/O pages
 
 
 
 
 
 
 
 24 */
 25struct bio_vec {
 26	struct page	*bv_page;
 27	unsigned int	bv_len;
 28	unsigned int	bv_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29};
 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31/*
 32 * main unit of I/O for the block layer and lower layers (ie drivers and
 33 * stacking drivers)
 34 */
 35struct bio {
 36	sector_t		bi_sector;	/* device address in 512 byte
 37						   sectors */
 38	struct bio		*bi_next;	/* request queue link */
 39	struct block_device	*bi_bdev;
 40	unsigned long		bi_flags;	/* status, command, etc */
 41	unsigned long		bi_rw;		/* bottom bits READ/WRITE,
 42						 * top bits priority
 43						 */
 
 
 
 
 
 
 
 
 44
 45	unsigned short		bi_vcnt;	/* how many bio_vec's */
 46	unsigned short		bi_idx;		/* current index into bvl_vec */
 47
 48	/* Number of segments in this BIO after
 49	 * physical address coalescing is performed.
 
 
 
 
 
 50	 */
 51	unsigned int		bi_phys_segments;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 53	unsigned int		bi_size;	/* residual I/O count */
 54
 55	/*
 56	 * To keep track of the max segment size, we account for the
 57	 * sizes of the first and last mergeable segments in this bio.
 58	 */
 59	unsigned int		bi_seg_front_size;
 60	unsigned int		bi_seg_back_size;
 61
 62	unsigned int		bi_max_vecs;	/* max bvl_vecs we can hold */
 63
 64	atomic_t		bi_cnt;		/* pin count */
 65
 66	struct bio_vec		*bi_io_vec;	/* the actual vec list */
 67
 68	bio_end_io_t		*bi_end_io;
 69
 70	void			*bi_private;
 71#ifdef CONFIG_BLK_CGROUP
 72	/*
 73	 * Optional ioc and css associated with this bio.  Put on bio
 74	 * release.  Read comment on top of bio_associate_current().
 75	 */
 76	struct io_context	*bi_ioc;
 77	struct cgroup_subsys_state *bi_css;
 78#endif
 79#if defined(CONFIG_BLK_DEV_INTEGRITY)
 80	struct bio_integrity_payload *bi_integrity;  /* data integrity */
 81#endif
 82
 83	bio_destructor_t	*bi_destructor;	/* destructor */
 84
 85	/*
 86	 * We can inline a number of vecs at the end of the bio, to avoid
 87	 * double allocations for a small number of bio_vecs. This member
 88	 * MUST obviously be kept at the very end of the bio.
 89	 */
 90	struct bio_vec		bi_inline_vecs[0];
 91};
 92
 
 
 93/*
 94 * bio flags
 95 */
 96#define BIO_UPTODATE	0	/* ok after I/O completion */
 97#define BIO_RW_BLOCK	1	/* RW_AHEAD set, and read/write would block */
 98#define BIO_EOF		2	/* out-out-bounds error */
 99#define BIO_SEG_VALID	3	/* bi_phys_segments valid */
100#define BIO_CLONED	4	/* doesn't own data */
101#define BIO_BOUNCED	5	/* bio is a bounce bio */
102#define BIO_USER_MAPPED 6	/* contains user pages */
103#define BIO_EOPNOTSUPP	7	/* not supported */
104#define BIO_NULL_MAPPED 8	/* contains invalid user pages */
105#define BIO_FS_INTEGRITY 9	/* fs owns integrity data, not block layer */
106#define BIO_QUIET	10	/* Make BIO Quiet */
107#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
108#define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
109
110/*
111 * top 4 bits of bio flags indicate the pool this bio came from
112 */
113#define BIO_POOL_BITS		(4)
114#define BIO_POOL_NONE		((1UL << BIO_POOL_BITS) - 1)
115#define BIO_POOL_OFFSET		(BITS_PER_LONG - BIO_POOL_BITS)
116#define BIO_POOL_MASK		(1UL << BIO_POOL_OFFSET)
117#define BIO_POOL_IDX(bio)	((bio)->bi_flags >> BIO_POOL_OFFSET)
118
119#endif /* CONFIG_BLOCK */
120
121/*
122 * Request flags.  For use in the cmd_flags field of struct request, and in
123 * bi_rw of struct bio.  Note that some flags are only valid in either one.
124 */
125enum rq_flag_bits {
126	/* common flags */
127	__REQ_WRITE,		/* not set, read. set, write */
128	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
130	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
131
132	__REQ_SYNC,		/* request is sync (sync write or read) */
133	__REQ_META,		/* metadata io request */
134	__REQ_PRIO,		/* boost priority in cfq */
135	__REQ_DISCARD,		/* request to discard sectors */
136	__REQ_SECURE,		/* secure discard (used with __REQ_DISCARD) */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
138	__REQ_NOIDLE,		/* don't anticipate more IO after this one */
139	__REQ_FUA,		/* forced unit access */
140	__REQ_FLUSH,		/* request for cache flush */
141
142	/* bio only flags */
143	__REQ_RAHEAD,		/* read ahead, can fail anytime */
144	__REQ_THROTTLED,	/* This bio has already been subjected to
145				 * throttling rules. Don't do it again. */
146
147	/* request only flags */
148	__REQ_SORTED,		/* elevator knows about this request */
149	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
150	__REQ_NOMERGE,		/* don't touch this for merging */
151	__REQ_STARTED,		/* drive already may have started this one */
152	__REQ_DONTPREP,		/* don't call prep for this one */
153	__REQ_QUEUED,		/* uses queueing */
154	__REQ_ELVPRIV,		/* elevator private data attached */
155	__REQ_FAILED,		/* set if the request failed */
156	__REQ_QUIET,		/* don't worry about errors */
157	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
158	__REQ_ALLOCED,		/* request came from our alloc pool */
159	__REQ_COPY_USER,	/* contains copies of user pages */
160	__REQ_FLUSH_SEQ,	/* request for flush sequence */
161	__REQ_IO_STAT,		/* account I/O stat */
162	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
163	__REQ_NR_BITS,		/* stops here */
164};
165
166#define REQ_WRITE		(1 << __REQ_WRITE)
167#define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
168#define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
169#define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
170#define REQ_SYNC		(1 << __REQ_SYNC)
171#define REQ_META		(1 << __REQ_META)
172#define REQ_PRIO		(1 << __REQ_PRIO)
173#define REQ_DISCARD		(1 << __REQ_DISCARD)
174#define REQ_NOIDLE		(1 << __REQ_NOIDLE)
 
 
 
 
 
 
 
 
 
 
 
 
175
176#define REQ_FAILFAST_MASK \
177	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
178#define REQ_COMMON_MASK \
179	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
180	 REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
181#define REQ_CLONE_MASK		REQ_COMMON_MASK
182
183#define REQ_RAHEAD		(1 << __REQ_RAHEAD)
184#define REQ_THROTTLED		(1 << __REQ_THROTTLED)
185
186#define REQ_SORTED		(1 << __REQ_SORTED)
187#define REQ_SOFTBARRIER		(1 << __REQ_SOFTBARRIER)
188#define REQ_FUA			(1 << __REQ_FUA)
189#define REQ_NOMERGE		(1 << __REQ_NOMERGE)
190#define REQ_STARTED		(1 << __REQ_STARTED)
191#define REQ_DONTPREP		(1 << __REQ_DONTPREP)
192#define REQ_QUEUED		(1 << __REQ_QUEUED)
193#define REQ_ELVPRIV		(1 << __REQ_ELVPRIV)
194#define REQ_FAILED		(1 << __REQ_FAILED)
195#define REQ_QUIET		(1 << __REQ_QUIET)
196#define REQ_PREEMPT		(1 << __REQ_PREEMPT)
197#define REQ_ALLOCED		(1 << __REQ_ALLOCED)
198#define REQ_COPY_USER		(1 << __REQ_COPY_USER)
199#define REQ_FLUSH		(1 << __REQ_FLUSH)
200#define REQ_FLUSH_SEQ		(1 << __REQ_FLUSH_SEQ)
201#define REQ_IO_STAT		(1 << __REQ_IO_STAT)
202#define REQ_MIXED_MERGE		(1 << __REQ_MIXED_MERGE)
203#define REQ_SECURE		(1 << __REQ_SECURE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
205#endif /* __LINUX_BLK_TYPES_H */