Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
  4 */
  5#ifndef __LINUX_BIO_H
  6#define __LINUX_BIO_H
  7
  8#include <linux/highmem.h>
  9#include <linux/mempool.h>
 10#include <linux/ioprio.h>
 
 
 11/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
 12#include <linux/blk_types.h>
 13
 14#define BIO_DEBUG
 15
 16#ifdef BIO_DEBUG
 17#define BIO_BUG_ON	BUG_ON
 18#else
 19#define BIO_BUG_ON
 20#endif
 21
 22#define BIO_MAX_PAGES		256
 23
 24#define bio_prio(bio)			(bio)->bi_ioprio
 25#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
 26
 27#define bio_iter_iovec(bio, iter)				\
 28	bvec_iter_bvec((bio)->bi_io_vec, (iter))
 29
 30#define bio_iter_page(bio, iter)				\
 31	bvec_iter_page((bio)->bi_io_vec, (iter))
 32#define bio_iter_len(bio, iter)					\
 33	bvec_iter_len((bio)->bi_io_vec, (iter))
 34#define bio_iter_offset(bio, iter)				\
 35	bvec_iter_offset((bio)->bi_io_vec, (iter))
 36
 37#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
 38#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
 39#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
 40
 41#define bio_multiple_segments(bio)				\
 42	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
 43
 44#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
 45#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
 46
 47#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
 48#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
 49
 50/*
 51 * Return the data direction, READ or WRITE.
 52 */
 53#define bio_data_dir(bio) \
 54	(op_is_write(bio_op(bio)) ? WRITE : READ)
 55
 56/*
 57 * Check whether this bio carries any data or not. A NULL bio is allowed.
 58 */
 59static inline bool bio_has_data(struct bio *bio)
 60{
 61	if (bio &&
 62	    bio->bi_iter.bi_size &&
 63	    bio_op(bio) != REQ_OP_DISCARD &&
 64	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
 65	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
 66		return true;
 67
 68	return false;
 69}
 70
 71static inline bool bio_no_advance_iter(const struct bio *bio)
 72{
 73	return bio_op(bio) == REQ_OP_DISCARD ||
 74	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
 75	       bio_op(bio) == REQ_OP_WRITE_SAME ||
 76	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
 77}
 78
 79static inline bool bio_mergeable(struct bio *bio)
 80{
 81	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
 82		return false;
 83
 84	return true;
 85}
 86
 87static inline unsigned int bio_cur_bytes(struct bio *bio)
 88{
 89	if (bio_has_data(bio))
 90		return bio_iovec(bio).bv_len;
 91	else /* dataless requests such as discard */
 92		return bio->bi_iter.bi_size;
 93}
 94
 95static inline void *bio_data(struct bio *bio)
 96{
 97	if (bio_has_data(bio))
 98		return page_address(bio_page(bio)) + bio_offset(bio);
 99
100	return NULL;
101}
102
103/**
104 * bio_full - check if the bio is full
105 * @bio:	bio to check
106 * @len:	length of one segment to be added
107 *
108 * Return true if @bio is full and one segment with @len bytes can't be
109 * added to the bio, otherwise return false
110 */
111static inline bool bio_full(struct bio *bio, unsigned len)
112{
113	if (bio->bi_vcnt >= bio->bi_max_vecs)
114		return true;
115
116	if (bio->bi_iter.bi_size > UINT_MAX - len)
117		return true;
118
119	return false;
120}
121
122static inline bool bio_next_segment(const struct bio *bio,
123				    struct bvec_iter_all *iter)
124{
125	if (iter->idx >= bio->bi_vcnt)
126		return false;
127
128	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
129	return true;
130}
131
132/*
133 * drivers should _never_ use the all version - the bio may have been split
134 * before it got to the driver and the driver won't own all of it
135 */
136#define bio_for_each_segment_all(bvl, bio, iter) \
137	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
138
139static inline void bio_advance_iter(const struct bio *bio,
140				    struct bvec_iter *iter, unsigned int bytes)
141{
142	iter->bi_sector += bytes >> 9;
143
144	if (bio_no_advance_iter(bio))
145		iter->bi_size -= bytes;
146	else
147		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
148		/* TODO: It is reasonable to complete bio with error here. */
149}
150
151#define __bio_for_each_segment(bvl, bio, iter, start)			\
152	for (iter = (start);						\
153	     (iter).bi_size &&						\
154		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
155	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
156
157#define bio_for_each_segment(bvl, bio, iter)				\
158	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
159
160#define __bio_for_each_bvec(bvl, bio, iter, start)		\
161	for (iter = (start);						\
162	     (iter).bi_size &&						\
163		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
164	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
165
166/* iterate over multi-page bvec */
167#define bio_for_each_bvec(bvl, bio, iter)			\
168	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
169
170/*
171 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
172 * same reasons as bio_for_each_segment_all().
173 */
174#define bio_for_each_bvec_all(bvl, bio, i)		\
175	for (i = 0, bvl = bio_first_bvec_all(bio);	\
176	     i < (bio)->bi_vcnt; i++, bvl++)		\
177
178#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
179
180static inline unsigned bio_segments(struct bio *bio)
181{
182	unsigned segs = 0;
183	struct bio_vec bv;
184	struct bvec_iter iter;
185
186	/*
187	 * We special case discard/write same/write zeroes, because they
188	 * interpret bi_size differently:
189	 */
190
191	switch (bio_op(bio)) {
192	case REQ_OP_DISCARD:
193	case REQ_OP_SECURE_ERASE:
194	case REQ_OP_WRITE_ZEROES:
195		return 0;
196	case REQ_OP_WRITE_SAME:
197		return 1;
198	default:
199		break;
200	}
201
202	bio_for_each_segment(bv, bio, iter)
203		segs++;
204
205	return segs;
206}
207
208/*
209 * get a reference to a bio, so it won't disappear. the intended use is
210 * something like:
211 *
212 * bio_get(bio);
213 * submit_bio(rw, bio);
214 * if (bio->bi_flags ...)
215 *	do_something
216 * bio_put(bio);
217 *
218 * without the bio_get(), it could potentially complete I/O before submit_bio
219 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
220 * runs
221 */
222static inline void bio_get(struct bio *bio)
223{
224	bio->bi_flags |= (1 << BIO_REFFED);
225	smp_mb__before_atomic();
226	atomic_inc(&bio->__bi_cnt);
227}
228
229static inline void bio_cnt_set(struct bio *bio, unsigned int count)
230{
231	if (count != 1) {
232		bio->bi_flags |= (1 << BIO_REFFED);
233		smp_mb();
234	}
235	atomic_set(&bio->__bi_cnt, count);
236}
237
238static inline bool bio_flagged(struct bio *bio, unsigned int bit)
239{
240	return (bio->bi_flags & (1U << bit)) != 0;
241}
242
243static inline void bio_set_flag(struct bio *bio, unsigned int bit)
244{
245	bio->bi_flags |= (1U << bit);
246}
247
248static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
249{
250	bio->bi_flags &= ~(1U << bit);
251}
252
253static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
254{
255	*bv = bio_iovec(bio);
256}
257
258static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
259{
260	struct bvec_iter iter = bio->bi_iter;
261	int idx;
262
263	if (unlikely(!bio_multiple_segments(bio))) {
264		*bv = bio_iovec(bio);
265		return;
266	}
267
268	bio_advance_iter(bio, &iter, iter.bi_size);
269
270	if (!iter.bi_bvec_done)
271		idx = iter.bi_idx - 1;
272	else	/* in the middle of bvec */
273		idx = iter.bi_idx;
274
275	*bv = bio->bi_io_vec[idx];
276
277	/*
278	 * iter.bi_bvec_done records actual length of the last bvec
279	 * if this bio ends in the middle of one io vector
280	 */
281	if (iter.bi_bvec_done)
282		bv->bv_len = iter.bi_bvec_done;
283}
284
285static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
286{
287	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
288	return bio->bi_io_vec;
289}
290
291static inline struct page *bio_first_page_all(struct bio *bio)
292{
293	return bio_first_bvec_all(bio)->bv_page;
294}
295
296static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
297{
298	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
299	return &bio->bi_io_vec[bio->bi_vcnt - 1];
300}
301
302enum bip_flags {
303	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
304	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
305	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
306	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
307	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
308};
309
310/*
311 * bio integrity payload
312 */
313struct bio_integrity_payload {
314	struct bio		*bip_bio;	/* parent bio */
315
316	struct bvec_iter	bip_iter;
317
318	unsigned short		bip_slab;	/* slab the bip came from */
319	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
320	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
321	unsigned short		bip_flags;	/* control flags */
322
323	struct bvec_iter	bio_iter;	/* for rewinding parent bio */
324
325	struct work_struct	bip_work;	/* I/O completion */
326
327	struct bio_vec		*bip_vec;
328	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
329};
330
331#if defined(CONFIG_BLK_DEV_INTEGRITY)
332
333static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
334{
335	if (bio->bi_opf & REQ_INTEGRITY)
336		return bio->bi_integrity;
337
338	return NULL;
339}
340
341static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
342{
343	struct bio_integrity_payload *bip = bio_integrity(bio);
344
345	if (bip)
346		return bip->bip_flags & flag;
347
348	return false;
349}
350
351static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
352{
353	return bip->bip_iter.bi_sector;
354}
355
356static inline void bip_set_seed(struct bio_integrity_payload *bip,
357				sector_t seed)
358{
359	bip->bip_iter.bi_sector = seed;
360}
361
362#endif /* CONFIG_BLK_DEV_INTEGRITY */
363
364extern void bio_trim(struct bio *bio, int offset, int size);
365extern struct bio *bio_split(struct bio *bio, int sectors,
366			     gfp_t gfp, struct bio_set *bs);
367
368/**
369 * bio_next_split - get next @sectors from a bio, splitting if necessary
370 * @bio:	bio to split
371 * @sectors:	number of sectors to split from the front of @bio
372 * @gfp:	gfp mask
373 * @bs:		bio set to allocate from
374 *
375 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
376 * than @sectors, returns the original bio unchanged.
377 */
378static inline struct bio *bio_next_split(struct bio *bio, int sectors,
379					 gfp_t gfp, struct bio_set *bs)
380{
381	if (sectors >= bio_sectors(bio))
382		return bio;
383
384	return bio_split(bio, sectors, gfp, bs);
385}
386
387enum {
388	BIOSET_NEED_BVECS = BIT(0),
389	BIOSET_NEED_RESCUER = BIT(1),
390};
391extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
392extern void bioset_exit(struct bio_set *);
393extern int biovec_init_pool(mempool_t *pool, int pool_entries);
394extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
395
396extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
397extern void bio_put(struct bio *);
398
399extern void __bio_clone_fast(struct bio *, struct bio *);
400extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
401
402extern struct bio_set fs_bio_set;
403
404static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
405{
406	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
407}
408
409static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
410{
411	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
412}
413
414extern blk_qc_t submit_bio(struct bio *);
415
416extern void bio_endio(struct bio *);
417
418static inline void bio_io_error(struct bio *bio)
419{
420	bio->bi_status = BLK_STS_IOERR;
421	bio_endio(bio);
422}
423
424static inline void bio_wouldblock_error(struct bio *bio)
425{
426	bio_set_flag(bio, BIO_QUIET);
427	bio->bi_status = BLK_STS_AGAIN;
428	bio_endio(bio);
429}
430
431struct request_queue;
432
433extern int submit_bio_wait(struct bio *bio);
434extern void bio_advance(struct bio *, unsigned);
435
436extern void bio_init(struct bio *bio, struct bio_vec *table,
437		     unsigned short max_vecs);
438extern void bio_uninit(struct bio *);
439extern void bio_reset(struct bio *);
440void bio_chain(struct bio *, struct bio *);
441
442extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
443extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
444			   unsigned int, unsigned int);
445bool __bio_try_merge_page(struct bio *bio, struct page *page,
446		unsigned int len, unsigned int off, bool *same_page);
447void __bio_add_page(struct bio *bio, struct page *page,
448		unsigned int len, unsigned int off);
449int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
450void bio_release_pages(struct bio *bio, bool mark_dirty);
 
 
 
 
 
 
 
 
451extern void bio_set_pages_dirty(struct bio *bio);
452extern void bio_check_pages_dirty(struct bio *bio);
453
 
 
 
 
 
 
454extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
455			       struct bio *src, struct bvec_iter *src_iter);
456extern void bio_copy_data(struct bio *dst, struct bio *src);
457extern void bio_list_copy_data(struct bio *dst, struct bio *src);
458extern void bio_free_pages(struct bio *bio);
 
 
 
 
 
 
459void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
460void bio_truncate(struct bio *bio, unsigned new_size);
461void guard_bio_eod(struct bio *bio);
462
463static inline void zero_fill_bio(struct bio *bio)
464{
465	zero_fill_bio_iter(bio, bio->bi_iter);
466}
467
468extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
469extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
470extern unsigned int bvec_nr_vecs(unsigned short idx);
471extern const char *bio_devname(struct bio *bio, char *buffer);
472
473#define bio_set_dev(bio, bdev) 			\
474do {						\
475	if ((bio)->bi_disk != (bdev)->bd_disk)	\
476		bio_clear_flag(bio, BIO_THROTTLED);\
477	(bio)->bi_disk = (bdev)->bd_disk;	\
478	(bio)->bi_partno = (bdev)->bd_partno;	\
479	bio_associate_blkg(bio);		\
480} while (0)
481
482#define bio_copy_dev(dst, src)			\
483do {						\
484	(dst)->bi_disk = (src)->bi_disk;	\
485	(dst)->bi_partno = (src)->bi_partno;	\
486	bio_clone_blkg_association(dst, src);	\
487} while (0)
488
489#define bio_dev(bio) \
490	disk_devt((bio)->bi_disk)
491
 
 
 
 
 
 
 
492#ifdef CONFIG_BLK_CGROUP
 
493void bio_associate_blkg(struct bio *bio);
494void bio_associate_blkg_from_css(struct bio *bio,
495				 struct cgroup_subsys_state *css);
496void bio_clone_blkg_association(struct bio *dst, struct bio *src);
497#else	/* CONFIG_BLK_CGROUP */
 
498static inline void bio_associate_blkg(struct bio *bio) { }
499static inline void bio_associate_blkg_from_css(struct bio *bio,
500					       struct cgroup_subsys_state *css)
501{ }
502static inline void bio_clone_blkg_association(struct bio *dst,
503					      struct bio *src) { }
504#endif	/* CONFIG_BLK_CGROUP */
505
506#ifdef CONFIG_HIGHMEM
507/*
508 * remember never ever reenable interrupts between a bvec_kmap_irq and
509 * bvec_kunmap_irq!
510 */
511static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
512{
513	unsigned long addr;
514
515	/*
516	 * might not be a highmem page, but the preempt/irq count
517	 * balancing is a lot nicer this way
518	 */
519	local_irq_save(*flags);
520	addr = (unsigned long) kmap_atomic(bvec->bv_page);
521
522	BUG_ON(addr & ~PAGE_MASK);
523
524	return (char *) addr + bvec->bv_offset;
525}
526
527static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
528{
529	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
530
531	kunmap_atomic((void *) ptr);
532	local_irq_restore(*flags);
533}
534
535#else
536static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
537{
538	return page_address(bvec->bv_page) + bvec->bv_offset;
539}
540
541static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
542{
543	*flags = 0;
544}
545#endif
546
547/*
548 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
549 *
550 * A bio_list anchors a singly-linked list of bios chained through the bi_next
551 * member of the bio.  The bio_list also caches the last list member to allow
552 * fast access to the tail.
553 */
554struct bio_list {
555	struct bio *head;
556	struct bio *tail;
557};
558
559static inline int bio_list_empty(const struct bio_list *bl)
560{
561	return bl->head == NULL;
562}
563
564static inline void bio_list_init(struct bio_list *bl)
565{
566	bl->head = bl->tail = NULL;
567}
568
569#define BIO_EMPTY_LIST	{ NULL, NULL }
570
571#define bio_list_for_each(bio, bl) \
572	for (bio = (bl)->head; bio; bio = bio->bi_next)
573
574static inline unsigned bio_list_size(const struct bio_list *bl)
575{
576	unsigned sz = 0;
577	struct bio *bio;
578
579	bio_list_for_each(bio, bl)
580		sz++;
581
582	return sz;
583}
584
585static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
586{
587	bio->bi_next = NULL;
588
589	if (bl->tail)
590		bl->tail->bi_next = bio;
591	else
592		bl->head = bio;
593
594	bl->tail = bio;
595}
596
597static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
598{
599	bio->bi_next = bl->head;
600
601	bl->head = bio;
602
603	if (!bl->tail)
604		bl->tail = bio;
605}
606
607static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
608{
609	if (!bl2->head)
610		return;
611
612	if (bl->tail)
613		bl->tail->bi_next = bl2->head;
614	else
615		bl->head = bl2->head;
616
617	bl->tail = bl2->tail;
618}
619
620static inline void bio_list_merge_head(struct bio_list *bl,
621				       struct bio_list *bl2)
622{
623	if (!bl2->head)
624		return;
625
626	if (bl->head)
627		bl2->tail->bi_next = bl->head;
628	else
629		bl->tail = bl2->tail;
630
631	bl->head = bl2->head;
632}
633
634static inline struct bio *bio_list_peek(struct bio_list *bl)
635{
636	return bl->head;
637}
638
639static inline struct bio *bio_list_pop(struct bio_list *bl)
640{
641	struct bio *bio = bl->head;
642
643	if (bio) {
644		bl->head = bl->head->bi_next;
645		if (!bl->head)
646			bl->tail = NULL;
647
648		bio->bi_next = NULL;
649	}
650
651	return bio;
652}
653
654static inline struct bio *bio_list_get(struct bio_list *bl)
655{
656	struct bio *bio = bl->head;
657
658	bl->head = bl->tail = NULL;
659
660	return bio;
661}
662
663/*
664 * Increment chain count for the bio. Make sure the CHAIN flag update
665 * is visible before the raised count.
666 */
667static inline void bio_inc_remaining(struct bio *bio)
668{
669	bio_set_flag(bio, BIO_CHAIN);
670	smp_mb__before_atomic();
671	atomic_inc(&bio->__bi_remaining);
672}
673
674/*
675 * bio_set is used to allow other portions of the IO system to
676 * allocate their own private memory pools for bio and iovec structures.
677 * These memory pools in turn all allocate from the bio_slab
678 * and the bvec_slabs[].
679 */
680#define BIO_POOL_SIZE 2
681
682struct bio_set {
683	struct kmem_cache *bio_slab;
684	unsigned int front_pad;
685
686	mempool_t bio_pool;
687	mempool_t bvec_pool;
688#if defined(CONFIG_BLK_DEV_INTEGRITY)
689	mempool_t bio_integrity_pool;
690	mempool_t bvec_integrity_pool;
691#endif
692
693	/*
694	 * Deadlock avoidance for stacking block drivers: see comments in
695	 * bio_alloc_bioset() for details
696	 */
697	spinlock_t		rescue_lock;
698	struct bio_list		rescue_list;
699	struct work_struct	rescue_work;
700	struct workqueue_struct	*rescue_workqueue;
701};
702
703struct biovec_slab {
704	int nr_vecs;
705	char *name;
706	struct kmem_cache *slab;
707};
708
709static inline bool bioset_initialized(struct bio_set *bs)
710{
711	return bs->bio_slab != NULL;
712}
713
714/*
715 * a small number of entries is fine, not going to be performance critical.
716 * basically we just need to survive
717 */
718#define BIO_SPLIT_ENTRIES 2
719
720#if defined(CONFIG_BLK_DEV_INTEGRITY)
721
722#define bip_for_each_vec(bvl, bip, iter)				\
723	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
724
725#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
726	for_each_bio(_bio)						\
727		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
728
729extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
730extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
731extern bool bio_integrity_prep(struct bio *);
732extern void bio_integrity_advance(struct bio *, unsigned int);
733extern void bio_integrity_trim(struct bio *);
734extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
735extern int bioset_integrity_create(struct bio_set *, int);
736extern void bioset_integrity_free(struct bio_set *);
737extern void bio_integrity_init(void);
738
739#else /* CONFIG_BLK_DEV_INTEGRITY */
740
741static inline void *bio_integrity(struct bio *bio)
742{
743	return NULL;
744}
745
746static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
747{
748	return 0;
749}
750
751static inline void bioset_integrity_free (struct bio_set *bs)
752{
753	return;
754}
755
756static inline bool bio_integrity_prep(struct bio *bio)
757{
758	return true;
759}
760
761static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
762				      gfp_t gfp_mask)
763{
764	return 0;
765}
766
767static inline void bio_integrity_advance(struct bio *bio,
768					 unsigned int bytes_done)
769{
770	return;
771}
772
773static inline void bio_integrity_trim(struct bio *bio)
774{
775	return;
776}
777
778static inline void bio_integrity_init(void)
779{
780	return;
781}
782
783static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
784{
785	return false;
786}
787
788static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
789								unsigned int nr)
790{
791	return ERR_PTR(-EINVAL);
792}
793
794static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
795					unsigned int len, unsigned int offset)
796{
797	return 0;
798}
799
800#endif /* CONFIG_BLK_DEV_INTEGRITY */
801
802/*
803 * Mark a bio as polled. Note that for async polled IO, the caller must
804 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
805 * We cannot block waiting for requests on polled IO, as those completions
806 * must be found by the caller. This is different than IRQ driven IO, where
807 * it's safe to wait for IO to complete.
808 */
809static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
810{
811	bio->bi_opf |= REQ_HIPRI;
812	if (!is_sync_kiocb(kiocb))
813		bio->bi_opf |= REQ_NOWAIT;
814}
815
 
816#endif /* __LINUX_BIO_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
  4 */
  5#ifndef __LINUX_BIO_H
  6#define __LINUX_BIO_H
  7
  8#include <linux/highmem.h>
  9#include <linux/mempool.h>
 10#include <linux/ioprio.h>
 11
 12#ifdef CONFIG_BLOCK
 13/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
 14#include <linux/blk_types.h>
 15
 16#define BIO_DEBUG
 17
 18#ifdef BIO_DEBUG
 19#define BIO_BUG_ON	BUG_ON
 20#else
 21#define BIO_BUG_ON
 22#endif
 23
 24#define BIO_MAX_PAGES		256
 25
 26#define bio_prio(bio)			(bio)->bi_ioprio
 27#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
 28
 29#define bio_iter_iovec(bio, iter)				\
 30	bvec_iter_bvec((bio)->bi_io_vec, (iter))
 31
 32#define bio_iter_page(bio, iter)				\
 33	bvec_iter_page((bio)->bi_io_vec, (iter))
 34#define bio_iter_len(bio, iter)					\
 35	bvec_iter_len((bio)->bi_io_vec, (iter))
 36#define bio_iter_offset(bio, iter)				\
 37	bvec_iter_offset((bio)->bi_io_vec, (iter))
 38
 39#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
 40#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
 41#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
 42
 43#define bio_multiple_segments(bio)				\
 44	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
 45
 46#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
 47#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
 48
 49#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
 50#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
 51
 52/*
 53 * Return the data direction, READ or WRITE.
 54 */
 55#define bio_data_dir(bio) \
 56	(op_is_write(bio_op(bio)) ? WRITE : READ)
 57
 58/*
 59 * Check whether this bio carries any data or not. A NULL bio is allowed.
 60 */
 61static inline bool bio_has_data(struct bio *bio)
 62{
 63	if (bio &&
 64	    bio->bi_iter.bi_size &&
 65	    bio_op(bio) != REQ_OP_DISCARD &&
 66	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
 67	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
 68		return true;
 69
 70	return false;
 71}
 72
 73static inline bool bio_no_advance_iter(struct bio *bio)
 74{
 75	return bio_op(bio) == REQ_OP_DISCARD ||
 76	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
 77	       bio_op(bio) == REQ_OP_WRITE_SAME ||
 78	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
 79}
 80
 81static inline bool bio_mergeable(struct bio *bio)
 82{
 83	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
 84		return false;
 85
 86	return true;
 87}
 88
 89static inline unsigned int bio_cur_bytes(struct bio *bio)
 90{
 91	if (bio_has_data(bio))
 92		return bio_iovec(bio).bv_len;
 93	else /* dataless requests such as discard */
 94		return bio->bi_iter.bi_size;
 95}
 96
 97static inline void *bio_data(struct bio *bio)
 98{
 99	if (bio_has_data(bio))
100		return page_address(bio_page(bio)) + bio_offset(bio);
101
102	return NULL;
103}
104
105/**
106 * bio_full - check if the bio is full
107 * @bio:	bio to check
108 * @len:	length of one segment to be added
109 *
110 * Return true if @bio is full and one segment with @len bytes can't be
111 * added to the bio, otherwise return false
112 */
113static inline bool bio_full(struct bio *bio, unsigned len)
114{
115	if (bio->bi_vcnt >= bio->bi_max_vecs)
116		return true;
117
118	if (bio->bi_iter.bi_size > UINT_MAX - len)
119		return true;
120
121	return false;
122}
123
124static inline bool bio_next_segment(const struct bio *bio,
125				    struct bvec_iter_all *iter)
126{
127	if (iter->idx >= bio->bi_vcnt)
128		return false;
129
130	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
131	return true;
132}
133
134/*
135 * drivers should _never_ use the all version - the bio may have been split
136 * before it got to the driver and the driver won't own all of it
137 */
138#define bio_for_each_segment_all(bvl, bio, iter) \
139	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
140
141static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
142				    unsigned bytes)
143{
144	iter->bi_sector += bytes >> 9;
145
146	if (bio_no_advance_iter(bio))
147		iter->bi_size -= bytes;
148	else
149		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
150		/* TODO: It is reasonable to complete bio with error here. */
151}
152
153#define __bio_for_each_segment(bvl, bio, iter, start)			\
154	for (iter = (start);						\
155	     (iter).bi_size &&						\
156		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
157	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
158
159#define bio_for_each_segment(bvl, bio, iter)				\
160	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
161
162#define __bio_for_each_bvec(bvl, bio, iter, start)		\
163	for (iter = (start);						\
164	     (iter).bi_size &&						\
165		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
166	     bio_advance_iter((bio), &(iter), (bvl).bv_len))
167
168/* iterate over multi-page bvec */
169#define bio_for_each_bvec(bvl, bio, iter)			\
170	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
171
 
 
 
 
 
 
 
 
172#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
173
174static inline unsigned bio_segments(struct bio *bio)
175{
176	unsigned segs = 0;
177	struct bio_vec bv;
178	struct bvec_iter iter;
179
180	/*
181	 * We special case discard/write same/write zeroes, because they
182	 * interpret bi_size differently:
183	 */
184
185	switch (bio_op(bio)) {
186	case REQ_OP_DISCARD:
187	case REQ_OP_SECURE_ERASE:
188	case REQ_OP_WRITE_ZEROES:
189		return 0;
190	case REQ_OP_WRITE_SAME:
191		return 1;
192	default:
193		break;
194	}
195
196	bio_for_each_segment(bv, bio, iter)
197		segs++;
198
199	return segs;
200}
201
202/*
203 * get a reference to a bio, so it won't disappear. the intended use is
204 * something like:
205 *
206 * bio_get(bio);
207 * submit_bio(rw, bio);
208 * if (bio->bi_flags ...)
209 *	do_something
210 * bio_put(bio);
211 *
212 * without the bio_get(), it could potentially complete I/O before submit_bio
213 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
214 * runs
215 */
216static inline void bio_get(struct bio *bio)
217{
218	bio->bi_flags |= (1 << BIO_REFFED);
219	smp_mb__before_atomic();
220	atomic_inc(&bio->__bi_cnt);
221}
222
223static inline void bio_cnt_set(struct bio *bio, unsigned int count)
224{
225	if (count != 1) {
226		bio->bi_flags |= (1 << BIO_REFFED);
227		smp_mb();
228	}
229	atomic_set(&bio->__bi_cnt, count);
230}
231
232static inline bool bio_flagged(struct bio *bio, unsigned int bit)
233{
234	return (bio->bi_flags & (1U << bit)) != 0;
235}
236
237static inline void bio_set_flag(struct bio *bio, unsigned int bit)
238{
239	bio->bi_flags |= (1U << bit);
240}
241
242static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
243{
244	bio->bi_flags &= ~(1U << bit);
245}
246
247static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
248{
249	*bv = bio_iovec(bio);
250}
251
252static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
253{
254	struct bvec_iter iter = bio->bi_iter;
255	int idx;
256
257	if (unlikely(!bio_multiple_segments(bio))) {
258		*bv = bio_iovec(bio);
259		return;
260	}
261
262	bio_advance_iter(bio, &iter, iter.bi_size);
263
264	if (!iter.bi_bvec_done)
265		idx = iter.bi_idx - 1;
266	else	/* in the middle of bvec */
267		idx = iter.bi_idx;
268
269	*bv = bio->bi_io_vec[idx];
270
271	/*
272	 * iter.bi_bvec_done records actual length of the last bvec
273	 * if this bio ends in the middle of one io vector
274	 */
275	if (iter.bi_bvec_done)
276		bv->bv_len = iter.bi_bvec_done;
277}
278
279static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
280{
281	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
282	return bio->bi_io_vec;
283}
284
285static inline struct page *bio_first_page_all(struct bio *bio)
286{
287	return bio_first_bvec_all(bio)->bv_page;
288}
289
290static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
291{
292	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
293	return &bio->bi_io_vec[bio->bi_vcnt - 1];
294}
295
296enum bip_flags {
297	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
298	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
299	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
300	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
301	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
302};
303
304/*
305 * bio integrity payload
306 */
307struct bio_integrity_payload {
308	struct bio		*bip_bio;	/* parent bio */
309
310	struct bvec_iter	bip_iter;
311
312	unsigned short		bip_slab;	/* slab the bip came from */
313	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
314	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
315	unsigned short		bip_flags;	/* control flags */
316
317	struct bvec_iter	bio_iter;	/* for rewinding parent bio */
318
319	struct work_struct	bip_work;	/* I/O completion */
320
321	struct bio_vec		*bip_vec;
322	struct bio_vec		bip_inline_vecs[0];/* embedded bvec array */
323};
324
325#if defined(CONFIG_BLK_DEV_INTEGRITY)
326
327static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
328{
329	if (bio->bi_opf & REQ_INTEGRITY)
330		return bio->bi_integrity;
331
332	return NULL;
333}
334
335static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
336{
337	struct bio_integrity_payload *bip = bio_integrity(bio);
338
339	if (bip)
340		return bip->bip_flags & flag;
341
342	return false;
343}
344
345static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
346{
347	return bip->bip_iter.bi_sector;
348}
349
350static inline void bip_set_seed(struct bio_integrity_payload *bip,
351				sector_t seed)
352{
353	bip->bip_iter.bi_sector = seed;
354}
355
356#endif /* CONFIG_BLK_DEV_INTEGRITY */
357
358extern void bio_trim(struct bio *bio, int offset, int size);
359extern struct bio *bio_split(struct bio *bio, int sectors,
360			     gfp_t gfp, struct bio_set *bs);
361
362/**
363 * bio_next_split - get next @sectors from a bio, splitting if necessary
364 * @bio:	bio to split
365 * @sectors:	number of sectors to split from the front of @bio
366 * @gfp:	gfp mask
367 * @bs:		bio set to allocate from
368 *
369 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
370 * than @sectors, returns the original bio unchanged.
371 */
372static inline struct bio *bio_next_split(struct bio *bio, int sectors,
373					 gfp_t gfp, struct bio_set *bs)
374{
375	if (sectors >= bio_sectors(bio))
376		return bio;
377
378	return bio_split(bio, sectors, gfp, bs);
379}
380
381enum {
382	BIOSET_NEED_BVECS = BIT(0),
383	BIOSET_NEED_RESCUER = BIT(1),
384};
385extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
386extern void bioset_exit(struct bio_set *);
387extern int biovec_init_pool(mempool_t *pool, int pool_entries);
388extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
389
390extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
391extern void bio_put(struct bio *);
392
393extern void __bio_clone_fast(struct bio *, struct bio *);
394extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
395
396extern struct bio_set fs_bio_set;
397
398static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
399{
400	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
401}
402
403static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
404{
405	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
406}
407
408extern blk_qc_t submit_bio(struct bio *);
409
410extern void bio_endio(struct bio *);
411
412static inline void bio_io_error(struct bio *bio)
413{
414	bio->bi_status = BLK_STS_IOERR;
415	bio_endio(bio);
416}
417
418static inline void bio_wouldblock_error(struct bio *bio)
419{
 
420	bio->bi_status = BLK_STS_AGAIN;
421	bio_endio(bio);
422}
423
424struct request_queue;
425
426extern int submit_bio_wait(struct bio *bio);
427extern void bio_advance(struct bio *, unsigned);
428
429extern void bio_init(struct bio *bio, struct bio_vec *table,
430		     unsigned short max_vecs);
431extern void bio_uninit(struct bio *);
432extern void bio_reset(struct bio *);
433void bio_chain(struct bio *, struct bio *);
434
435extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
436extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
437			   unsigned int, unsigned int);
438bool __bio_try_merge_page(struct bio *bio, struct page *page,
439		unsigned int len, unsigned int off, bool *same_page);
440void __bio_add_page(struct bio *bio, struct page *page,
441		unsigned int len, unsigned int off);
442int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
443void bio_release_pages(struct bio *bio, bool mark_dirty);
444struct rq_map_data;
445extern struct bio *bio_map_user_iov(struct request_queue *,
446				    struct iov_iter *, gfp_t);
447extern void bio_unmap_user(struct bio *);
448extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
449				gfp_t);
450extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
451				 gfp_t, int);
452extern void bio_set_pages_dirty(struct bio *bio);
453extern void bio_check_pages_dirty(struct bio *bio);
454
455void generic_start_io_acct(struct request_queue *q, int op,
456				unsigned long sectors, struct hd_struct *part);
457void generic_end_io_acct(struct request_queue *q, int op,
458				struct hd_struct *part,
459				unsigned long start_time);
460
461extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
462			       struct bio *src, struct bvec_iter *src_iter);
463extern void bio_copy_data(struct bio *dst, struct bio *src);
464extern void bio_list_copy_data(struct bio *dst, struct bio *src);
465extern void bio_free_pages(struct bio *bio);
466
467extern struct bio *bio_copy_user_iov(struct request_queue *,
468				     struct rq_map_data *,
469				     struct iov_iter *,
470				     gfp_t);
471extern int bio_uncopy_user(struct bio *);
472void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
 
 
473
474static inline void zero_fill_bio(struct bio *bio)
475{
476	zero_fill_bio_iter(bio, bio->bi_iter);
477}
478
479extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
480extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
481extern unsigned int bvec_nr_vecs(unsigned short idx);
482extern const char *bio_devname(struct bio *bio, char *buffer);
483
484#define bio_set_dev(bio, bdev) 			\
485do {						\
486	if ((bio)->bi_disk != (bdev)->bd_disk)	\
487		bio_clear_flag(bio, BIO_THROTTLED);\
488	(bio)->bi_disk = (bdev)->bd_disk;	\
489	(bio)->bi_partno = (bdev)->bd_partno;	\
490	bio_associate_blkg(bio);		\
491} while (0)
492
493#define bio_copy_dev(dst, src)			\
494do {						\
495	(dst)->bi_disk = (src)->bi_disk;	\
496	(dst)->bi_partno = (src)->bi_partno;	\
497	bio_clone_blkg_association(dst, src);	\
498} while (0)
499
500#define bio_dev(bio) \
501	disk_devt((bio)->bi_disk)
502
503#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
504void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
505#else
506static inline void bio_associate_blkg_from_page(struct bio *bio,
507						struct page *page) { }
508#endif
509
510#ifdef CONFIG_BLK_CGROUP
511void bio_disassociate_blkg(struct bio *bio);
512void bio_associate_blkg(struct bio *bio);
513void bio_associate_blkg_from_css(struct bio *bio,
514				 struct cgroup_subsys_state *css);
515void bio_clone_blkg_association(struct bio *dst, struct bio *src);
516#else	/* CONFIG_BLK_CGROUP */
517static inline void bio_disassociate_blkg(struct bio *bio) { }
518static inline void bio_associate_blkg(struct bio *bio) { }
519static inline void bio_associate_blkg_from_css(struct bio *bio,
520					       struct cgroup_subsys_state *css)
521{ }
522static inline void bio_clone_blkg_association(struct bio *dst,
523					      struct bio *src) { }
524#endif	/* CONFIG_BLK_CGROUP */
525
526#ifdef CONFIG_HIGHMEM
527/*
528 * remember never ever reenable interrupts between a bvec_kmap_irq and
529 * bvec_kunmap_irq!
530 */
531static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
532{
533	unsigned long addr;
534
535	/*
536	 * might not be a highmem page, but the preempt/irq count
537	 * balancing is a lot nicer this way
538	 */
539	local_irq_save(*flags);
540	addr = (unsigned long) kmap_atomic(bvec->bv_page);
541
542	BUG_ON(addr & ~PAGE_MASK);
543
544	return (char *) addr + bvec->bv_offset;
545}
546
547static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
548{
549	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
550
551	kunmap_atomic((void *) ptr);
552	local_irq_restore(*flags);
553}
554
555#else
556static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
557{
558	return page_address(bvec->bv_page) + bvec->bv_offset;
559}
560
561static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
562{
563	*flags = 0;
564}
565#endif
566
567/*
568 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
569 *
570 * A bio_list anchors a singly-linked list of bios chained through the bi_next
571 * member of the bio.  The bio_list also caches the last list member to allow
572 * fast access to the tail.
573 */
574struct bio_list {
575	struct bio *head;
576	struct bio *tail;
577};
578
579static inline int bio_list_empty(const struct bio_list *bl)
580{
581	return bl->head == NULL;
582}
583
584static inline void bio_list_init(struct bio_list *bl)
585{
586	bl->head = bl->tail = NULL;
587}
588
589#define BIO_EMPTY_LIST	{ NULL, NULL }
590
591#define bio_list_for_each(bio, bl) \
592	for (bio = (bl)->head; bio; bio = bio->bi_next)
593
594static inline unsigned bio_list_size(const struct bio_list *bl)
595{
596	unsigned sz = 0;
597	struct bio *bio;
598
599	bio_list_for_each(bio, bl)
600		sz++;
601
602	return sz;
603}
604
605static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
606{
607	bio->bi_next = NULL;
608
609	if (bl->tail)
610		bl->tail->bi_next = bio;
611	else
612		bl->head = bio;
613
614	bl->tail = bio;
615}
616
617static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
618{
619	bio->bi_next = bl->head;
620
621	bl->head = bio;
622
623	if (!bl->tail)
624		bl->tail = bio;
625}
626
627static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
628{
629	if (!bl2->head)
630		return;
631
632	if (bl->tail)
633		bl->tail->bi_next = bl2->head;
634	else
635		bl->head = bl2->head;
636
637	bl->tail = bl2->tail;
638}
639
640static inline void bio_list_merge_head(struct bio_list *bl,
641				       struct bio_list *bl2)
642{
643	if (!bl2->head)
644		return;
645
646	if (bl->head)
647		bl2->tail->bi_next = bl->head;
648	else
649		bl->tail = bl2->tail;
650
651	bl->head = bl2->head;
652}
653
654static inline struct bio *bio_list_peek(struct bio_list *bl)
655{
656	return bl->head;
657}
658
659static inline struct bio *bio_list_pop(struct bio_list *bl)
660{
661	struct bio *bio = bl->head;
662
663	if (bio) {
664		bl->head = bl->head->bi_next;
665		if (!bl->head)
666			bl->tail = NULL;
667
668		bio->bi_next = NULL;
669	}
670
671	return bio;
672}
673
674static inline struct bio *bio_list_get(struct bio_list *bl)
675{
676	struct bio *bio = bl->head;
677
678	bl->head = bl->tail = NULL;
679
680	return bio;
681}
682
683/*
684 * Increment chain count for the bio. Make sure the CHAIN flag update
685 * is visible before the raised count.
686 */
687static inline void bio_inc_remaining(struct bio *bio)
688{
689	bio_set_flag(bio, BIO_CHAIN);
690	smp_mb__before_atomic();
691	atomic_inc(&bio->__bi_remaining);
692}
693
694/*
695 * bio_set is used to allow other portions of the IO system to
696 * allocate their own private memory pools for bio and iovec structures.
697 * These memory pools in turn all allocate from the bio_slab
698 * and the bvec_slabs[].
699 */
700#define BIO_POOL_SIZE 2
701
702struct bio_set {
703	struct kmem_cache *bio_slab;
704	unsigned int front_pad;
705
706	mempool_t bio_pool;
707	mempool_t bvec_pool;
708#if defined(CONFIG_BLK_DEV_INTEGRITY)
709	mempool_t bio_integrity_pool;
710	mempool_t bvec_integrity_pool;
711#endif
712
713	/*
714	 * Deadlock avoidance for stacking block drivers: see comments in
715	 * bio_alloc_bioset() for details
716	 */
717	spinlock_t		rescue_lock;
718	struct bio_list		rescue_list;
719	struct work_struct	rescue_work;
720	struct workqueue_struct	*rescue_workqueue;
721};
722
723struct biovec_slab {
724	int nr_vecs;
725	char *name;
726	struct kmem_cache *slab;
727};
728
729static inline bool bioset_initialized(struct bio_set *bs)
730{
731	return bs->bio_slab != NULL;
732}
733
734/*
735 * a small number of entries is fine, not going to be performance critical.
736 * basically we just need to survive
737 */
738#define BIO_SPLIT_ENTRIES 2
739
740#if defined(CONFIG_BLK_DEV_INTEGRITY)
741
742#define bip_for_each_vec(bvl, bip, iter)				\
743	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
744
745#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
746	for_each_bio(_bio)						\
747		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
748
749extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
750extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
751extern bool bio_integrity_prep(struct bio *);
752extern void bio_integrity_advance(struct bio *, unsigned int);
753extern void bio_integrity_trim(struct bio *);
754extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
755extern int bioset_integrity_create(struct bio_set *, int);
756extern void bioset_integrity_free(struct bio_set *);
757extern void bio_integrity_init(void);
758
759#else /* CONFIG_BLK_DEV_INTEGRITY */
760
761static inline void *bio_integrity(struct bio *bio)
762{
763	return NULL;
764}
765
766static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
767{
768	return 0;
769}
770
771static inline void bioset_integrity_free (struct bio_set *bs)
772{
773	return;
774}
775
776static inline bool bio_integrity_prep(struct bio *bio)
777{
778	return true;
779}
780
781static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
782				      gfp_t gfp_mask)
783{
784	return 0;
785}
786
787static inline void bio_integrity_advance(struct bio *bio,
788					 unsigned int bytes_done)
789{
790	return;
791}
792
793static inline void bio_integrity_trim(struct bio *bio)
794{
795	return;
796}
797
798static inline void bio_integrity_init(void)
799{
800	return;
801}
802
803static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
804{
805	return false;
806}
807
808static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
809								unsigned int nr)
810{
811	return ERR_PTR(-EINVAL);
812}
813
814static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
815					unsigned int len, unsigned int offset)
816{
817	return 0;
818}
819
820#endif /* CONFIG_BLK_DEV_INTEGRITY */
821
822/*
823 * Mark a bio as polled. Note that for async polled IO, the caller must
824 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
825 * We cannot block waiting for requests on polled IO, as those completions
826 * must be found by the caller. This is different than IRQ driven IO, where
827 * it's safe to wait for IO to complete.
828 */
829static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
830{
831	bio->bi_opf |= REQ_HIPRI;
832	if (!is_sync_kiocb(kiocb))
833		bio->bi_opf |= REQ_NOWAIT;
834}
835
836#endif /* CONFIG_BLOCK */
837#endif /* __LINUX_BIO_H */