Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * include/linux/buffer_head.h
  4 *
  5 * Everything to do with buffer_heads.
  6 */
  7
  8#ifndef _LINUX_BUFFER_HEAD_H
  9#define _LINUX_BUFFER_HEAD_H
 10
 11#include <linux/types.h>
 
 12#include <linux/fs.h>
 13#include <linux/linkage.h>
 14#include <linux/pagemap.h>
 15#include <linux/wait.h>
 16#include <linux/atomic.h>
 17
 18#ifdef CONFIG_BLOCK
 19
 20enum bh_state_bits {
 21	BH_Uptodate,	/* Contains valid data */
 22	BH_Dirty,	/* Is dirty */
 23	BH_Lock,	/* Is locked */
 24	BH_Req,		/* Has been submitted for I/O */
 25
 26	BH_Mapped,	/* Has a disk mapping */
 27	BH_New,		/* Disk mapping was newly created by get_block */
 28	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
 29	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
 30	BH_Delay,	/* Buffer is not yet allocated on disk */
 31	BH_Boundary,	/* Block is followed by a discontiguity */
 32	BH_Write_EIO,	/* I/O error on write */
 33	BH_Unwritten,	/* Buffer is allocated on disk but not written */
 34	BH_Quiet,	/* Buffer Error Prinks to be quiet */
 35	BH_Meta,	/* Buffer contains metadata */
 36	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
 37	BH_Defer_Completion, /* Defer AIO completion to workqueue */
 38
 39	BH_PrivateStart,/* not a state bit, but the first bit available
 40			 * for private allocation by other entities
 41			 */
 42};
 43
 44#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
 45
 46struct page;
 47struct buffer_head;
 48struct address_space;
 49typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
 50
 51/*
 52 * Historically, a buffer_head was used to map a single block
 53 * within a page, and of course as the unit of I/O through the
 54 * filesystem and block layers.  Nowadays the basic I/O unit
 55 * is the bio, and buffer_heads are used for extracting block
 56 * mappings (via a get_block_t call), for tracking state within
 57 * a page (via a page_mapping) and for wrapping bio submission
 58 * for backward compatibility reasons (e.g. submit_bh).
 59 */
 60struct buffer_head {
 61	unsigned long b_state;		/* buffer state bitmap (see above) */
 62	struct buffer_head *b_this_page;/* circular list of page's buffers */
 63	struct page *b_page;		/* the page this bh is mapped to */
 
 
 
 64
 65	sector_t b_blocknr;		/* start block number */
 66	size_t b_size;			/* size of mapping */
 67	char *b_data;			/* pointer to data within the page */
 68
 69	struct block_device *b_bdev;
 70	bh_end_io_t *b_end_io;		/* I/O completion */
 71 	void *b_private;		/* reserved for b_end_io */
 72	struct list_head b_assoc_buffers; /* associated with another mapping */
 73	struct address_space *b_assoc_map;	/* mapping this buffer is
 74						   associated with */
 75	atomic_t b_count;		/* users using this buffer_head */
 76	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
 77					 * serialise IO completion of other
 78					 * buffers in the page */
 79};
 80
 81/*
 82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
 83 * and buffer_foo() functions.
 84 * To avoid reset buffer flags that are already set, because that causes
 85 * a costly cache line transition, check the flag first.
 86 */
 87#define BUFFER_FNS(bit, name)						\
 88static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
 89{									\
 90	if (!test_bit(BH_##bit, &(bh)->b_state))			\
 91		set_bit(BH_##bit, &(bh)->b_state);			\
 92}									\
 93static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
 94{									\
 95	clear_bit(BH_##bit, &(bh)->b_state);				\
 96}									\
 97static __always_inline int buffer_##name(const struct buffer_head *bh)	\
 98{									\
 99	return test_bit(BH_##bit, &(bh)->b_state);			\
100}
101
102/*
103 * test_set_buffer_foo() and test_clear_buffer_foo()
104 */
105#define TAS_BUFFER_FNS(bit, name)					\
106static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107{									\
108	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
109}									\
110static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111{									\
112	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
113}									\
114
115/*
116 * Emit the buffer bitops functions.   Note that there are also functions
117 * of the form "mark_buffer_foo()".  These are higher-level functions which
118 * do something in addition to setting a b_state bit.
119 */
120BUFFER_FNS(Uptodate, uptodate)
121BUFFER_FNS(Dirty, dirty)
122TAS_BUFFER_FNS(Dirty, dirty)
123BUFFER_FNS(Lock, locked)
124BUFFER_FNS(Req, req)
125TAS_BUFFER_FNS(Req, req)
126BUFFER_FNS(Mapped, mapped)
127BUFFER_FNS(New, new)
128BUFFER_FNS(Async_Read, async_read)
129BUFFER_FNS(Async_Write, async_write)
130BUFFER_FNS(Delay, delay)
131BUFFER_FNS(Boundary, boundary)
132BUFFER_FNS(Write_EIO, write_io_error)
133BUFFER_FNS(Unwritten, unwritten)
134BUFFER_FNS(Meta, meta)
135BUFFER_FNS(Prio, prio)
136BUFFER_FNS(Defer_Completion, defer_completion)
137
138#define bh_offset(bh)		((unsigned long)(bh)->b_data & ~PAGE_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140/* If we *know* page->private refers to buffer_heads */
141#define page_buffers(page)					\
142	({							\
143		BUG_ON(!PagePrivate(page));			\
144		((struct buffer_head *)page_private(page));	\
145	})
146#define page_has_buffers(page)	PagePrivate(page)
 
147
148void buffer_check_dirty_writeback(struct page *page,
149				     bool *dirty, bool *writeback);
150
151/*
152 * Declarations
153 */
154
155void mark_buffer_dirty(struct buffer_head *bh);
156void mark_buffer_write_io_error(struct buffer_head *bh);
157void touch_buffer(struct buffer_head *bh);
158void set_bh_page(struct buffer_head *bh,
159		struct page *page, unsigned long offset);
160int try_to_free_buffers(struct page *);
161struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
162		bool retry);
163void create_empty_buffers(struct page *, unsigned long,
164			unsigned long b_state);
165void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
166void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
167void end_buffer_async_write(struct buffer_head *bh, int uptodate);
168
169/* Things to do with buffers at mapping->private_list */
170void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
171int inode_has_buffers(struct inode *);
172void invalidate_inode_buffers(struct inode *);
173int remove_inode_buffers(struct inode *inode);
174int sync_mapping_buffers(struct address_space *mapping);
175void clean_bdev_aliases(struct block_device *bdev, sector_t block,
176			sector_t len);
177static inline void clean_bdev_bh_alias(struct buffer_head *bh)
178{
179	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
180}
181
182void mark_buffer_async_write(struct buffer_head *bh);
183void __wait_on_buffer(struct buffer_head *);
184wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
185struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
186			unsigned size);
187struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
188				  unsigned size, gfp_t gfp);
189void __brelse(struct buffer_head *);
190void __bforget(struct buffer_head *);
191void __breadahead(struct block_device *, sector_t block, unsigned int size);
192void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
193		  gfp_t gfp);
194struct buffer_head *__bread_gfp(struct block_device *,
195				sector_t block, unsigned size, gfp_t gfp);
196void invalidate_bh_lrus(void);
197struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
198void free_buffer_head(struct buffer_head * bh);
199void unlock_buffer(struct buffer_head *bh);
200void __lock_buffer(struct buffer_head *bh);
201void ll_rw_block(int, int, int, struct buffer_head * bh[]);
202int sync_dirty_buffer(struct buffer_head *bh);
203int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
204void write_dirty_buffer(struct buffer_head *bh, int op_flags);
205int submit_bh(int, int, struct buffer_head *);
206void write_boundary_block(struct block_device *bdev,
207			sector_t bblock, unsigned blocksize);
208int bh_uptodate_or_lock(struct buffer_head *bh);
209int bh_submit_read(struct buffer_head *bh);
210
211extern int buffer_heads_over_limit;
212
213/*
214 * Generic address_space_operations implementations for buffer_head-backed
215 * address_spaces.
216 */
217void block_invalidatepage(struct page *page, unsigned int offset,
218			  unsigned int length);
219int block_write_full_page(struct page *page, get_block_t *get_block,
220				struct writeback_control *wbc);
221int __block_write_full_page(struct inode *inode, struct page *page,
222			get_block_t *get_block, struct writeback_control *wbc,
223			bh_end_io_t *handler);
224int block_read_full_page(struct page*, get_block_t*);
225int block_is_partially_uptodate(struct page *page, unsigned long from,
226				unsigned long count);
227int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
228		unsigned flags, struct page **pagep, get_block_t *get_block);
229int __block_write_begin(struct page *page, loff_t pos, unsigned len,
230		get_block_t *get_block);
231int block_write_end(struct file *, struct address_space *,
232				loff_t, unsigned, unsigned,
233				struct page *, void *);
234int generic_write_end(struct file *, struct address_space *,
235				loff_t, unsigned, unsigned,
236				struct page *, void *);
237void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
238void clean_page_buffers(struct page *page);
239int cont_write_begin(struct file *, struct address_space *, loff_t,
240			unsigned, unsigned, struct page **, void **,
241			get_block_t *, loff_t *);
242int generic_cont_expand_simple(struct inode *inode, loff_t size);
243int block_commit_write(struct page *page, unsigned from, unsigned to);
244int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
245				get_block_t get_block);
246/* Convert errno to return value from ->page_mkwrite() call */
247static inline vm_fault_t block_page_mkwrite_return(int err)
248{
249	if (err == 0)
250		return VM_FAULT_LOCKED;
251	if (err == -EFAULT || err == -EAGAIN)
252		return VM_FAULT_NOPAGE;
253	if (err == -ENOMEM)
254		return VM_FAULT_OOM;
255	/* -ENOSPC, -EDQUOT, -EIO ... */
256	return VM_FAULT_SIGBUS;
257}
258sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
259int block_truncate_page(struct address_space *, loff_t, get_block_t *);
260int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
261				struct page **, void **, get_block_t*);
262int nobh_write_end(struct file *, struct address_space *,
263				loff_t, unsigned, unsigned,
264				struct page *, void *);
265int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
266int nobh_writepage(struct page *page, get_block_t *get_block,
267                        struct writeback_control *wbc);
268
269void buffer_init(void);
 
 
 
 
 
 
 
 
270
271/*
272 * inline definitions
273 */
274
275static inline void get_bh(struct buffer_head *bh)
276{
277        atomic_inc(&bh->b_count);
278}
279
280static inline void put_bh(struct buffer_head *bh)
281{
282        smp_mb__before_atomic();
283        atomic_dec(&bh->b_count);
284}
285
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286static inline void brelse(struct buffer_head *bh)
287{
288	if (bh)
289		__brelse(bh);
290}
291
 
 
 
 
 
 
 
 
 
 
292static inline void bforget(struct buffer_head *bh)
293{
294	if (bh)
295		__bforget(bh);
296}
297
298static inline struct buffer_head *
299sb_bread(struct super_block *sb, sector_t block)
300{
301	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
302}
303
304static inline struct buffer_head *
305sb_bread_unmovable(struct super_block *sb, sector_t block)
306{
307	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
308}
309
310static inline void
311sb_breadahead(struct super_block *sb, sector_t block)
312{
313	__breadahead(sb->s_bdev, block, sb->s_blocksize);
314}
315
316static inline void
317sb_breadahead_unmovable(struct super_block *sb, sector_t block)
318{
319	__breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
 
 
 
 
 
320}
321
322static inline struct buffer_head *
323sb_getblk(struct super_block *sb, sector_t block)
324{
325	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
 
 
 
 
 
326}
327
 
 
 
 
 
328
329static inline struct buffer_head *
330sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
331{
332	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
333}
334
335static inline struct buffer_head *
336sb_find_get_block(struct super_block *sb, sector_t block)
337{
338	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
339}
340
341static inline void
342map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
343{
344	set_buffer_mapped(bh);
345	bh->b_bdev = sb->s_bdev;
346	bh->b_blocknr = block;
347	bh->b_size = sb->s_blocksize;
348}
349
350static inline void wait_on_buffer(struct buffer_head *bh)
351{
352	might_sleep();
353	if (buffer_locked(bh))
354		__wait_on_buffer(bh);
355}
356
357static inline int trylock_buffer(struct buffer_head *bh)
358{
359	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
360}
361
362static inline void lock_buffer(struct buffer_head *bh)
363{
364	might_sleep();
365	if (!trylock_buffer(bh))
366		__lock_buffer(bh);
367}
368
369static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
370						   sector_t block,
371						   unsigned size)
372{
373	return __getblk_gfp(bdev, block, size, 0);
 
 
 
 
 
374}
375
376static inline struct buffer_head *__getblk(struct block_device *bdev,
377					   sector_t block,
378					   unsigned size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379{
380	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
381}
382
383/**
384 *  __bread() - reads a specified block and returns the bh
385 *  @bdev: the block_device to read from
386 *  @block: number of block
387 *  @size: size (in bytes) to read
 
 
 
 
 
388 *
389 *  Reads a specified block, and returns buffer head that contains it.
390 *  The page cache is allocated from movable area so that it can be migrated.
391 *  It returns NULL if the block was unreadable.
392 */
393static inline struct buffer_head *
394__bread(struct block_device *bdev, sector_t block, unsigned size)
395{
396	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
397}
398
399extern int __set_page_dirty_buffers(struct page *page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
401#else /* CONFIG_BLOCK */
402
403static inline void buffer_init(void) {}
404static inline int try_to_free_buffers(struct page *page) { return 1; }
405static inline int inode_has_buffers(struct inode *inode) { return 0; }
406static inline void invalidate_inode_buffers(struct inode *inode) {}
407static inline int remove_inode_buffers(struct inode *inode) { return 1; }
408static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
 
 
 
409#define buffer_heads_over_limit 0
410
411#endif /* CONFIG_BLOCK */
412#endif /* _LINUX_BUFFER_HEAD_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * include/linux/buffer_head.h
  4 *
  5 * Everything to do with buffer_heads.
  6 */
  7
  8#ifndef _LINUX_BUFFER_HEAD_H
  9#define _LINUX_BUFFER_HEAD_H
 10
 11#include <linux/types.h>
 12#include <linux/blk_types.h>
 13#include <linux/fs.h>
 14#include <linux/linkage.h>
 15#include <linux/pagemap.h>
 16#include <linux/wait.h>
 17#include <linux/atomic.h>
 18
 
 
 19enum bh_state_bits {
 20	BH_Uptodate,	/* Contains valid data */
 21	BH_Dirty,	/* Is dirty */
 22	BH_Lock,	/* Is locked */
 23	BH_Req,		/* Has been submitted for I/O */
 24
 25	BH_Mapped,	/* Has a disk mapping */
 26	BH_New,		/* Disk mapping was newly created by get_block */
 27	BH_Async_Read,	/* Is under end_buffer_async_read I/O */
 28	BH_Async_Write,	/* Is under end_buffer_async_write I/O */
 29	BH_Delay,	/* Buffer is not yet allocated on disk */
 30	BH_Boundary,	/* Block is followed by a discontiguity */
 31	BH_Write_EIO,	/* I/O error on write */
 32	BH_Unwritten,	/* Buffer is allocated on disk but not written */
 33	BH_Quiet,	/* Buffer Error Prinks to be quiet */
 34	BH_Meta,	/* Buffer contains metadata */
 35	BH_Prio,	/* Buffer should be submitted with REQ_PRIO */
 36	BH_Defer_Completion, /* Defer AIO completion to workqueue */
 37
 38	BH_PrivateStart,/* not a state bit, but the first bit available
 39			 * for private allocation by other entities
 40			 */
 41};
 42
 43#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
 44
 45struct page;
 46struct buffer_head;
 47struct address_space;
 48typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
 49
 50/*
 51 * Historically, a buffer_head was used to map a single block
 52 * within a page, and of course as the unit of I/O through the
 53 * filesystem and block layers.  Nowadays the basic I/O unit
 54 * is the bio, and buffer_heads are used for extracting block
 55 * mappings (via a get_block_t call), for tracking state within
 56 * a folio (via a folio_mapping) and for wrapping bio submission
 57 * for backward compatibility reasons (e.g. submit_bh).
 58 */
 59struct buffer_head {
 60	unsigned long b_state;		/* buffer state bitmap (see above) */
 61	struct buffer_head *b_this_page;/* circular list of page's buffers */
 62	union {
 63		struct page *b_page;	/* the page this bh is mapped to */
 64		struct folio *b_folio;	/* the folio this bh is mapped to */
 65	};
 66
 67	sector_t b_blocknr;		/* start block number */
 68	size_t b_size;			/* size of mapping */
 69	char *b_data;			/* pointer to data within the page */
 70
 71	struct block_device *b_bdev;
 72	bh_end_io_t *b_end_io;		/* I/O completion */
 73 	void *b_private;		/* reserved for b_end_io */
 74	struct list_head b_assoc_buffers; /* associated with another mapping */
 75	struct address_space *b_assoc_map;	/* mapping this buffer is
 76						   associated with */
 77	atomic_t b_count;		/* users using this buffer_head */
 78	spinlock_t b_uptodate_lock;	/* Used by the first bh in a page, to
 79					 * serialise IO completion of other
 80					 * buffers in the page */
 81};
 82
 83/*
 84 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
 85 * and buffer_foo() functions.
 86 * To avoid reset buffer flags that are already set, because that causes
 87 * a costly cache line transition, check the flag first.
 88 */
 89#define BUFFER_FNS(bit, name)						\
 90static __always_inline void set_buffer_##name(struct buffer_head *bh)	\
 91{									\
 92	if (!test_bit(BH_##bit, &(bh)->b_state))			\
 93		set_bit(BH_##bit, &(bh)->b_state);			\
 94}									\
 95static __always_inline void clear_buffer_##name(struct buffer_head *bh)	\
 96{									\
 97	clear_bit(BH_##bit, &(bh)->b_state);				\
 98}									\
 99static __always_inline int buffer_##name(const struct buffer_head *bh)	\
100{									\
101	return test_bit(BH_##bit, &(bh)->b_state);			\
102}
103
104/*
105 * test_set_buffer_foo() and test_clear_buffer_foo()
106 */
107#define TAS_BUFFER_FNS(bit, name)					\
108static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
109{									\
110	return test_and_set_bit(BH_##bit, &(bh)->b_state);		\
111}									\
112static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
113{									\
114	return test_and_clear_bit(BH_##bit, &(bh)->b_state);		\
115}									\
116
117/*
118 * Emit the buffer bitops functions.   Note that there are also functions
119 * of the form "mark_buffer_foo()".  These are higher-level functions which
120 * do something in addition to setting a b_state bit.
121 */
 
122BUFFER_FNS(Dirty, dirty)
123TAS_BUFFER_FNS(Dirty, dirty)
124BUFFER_FNS(Lock, locked)
125BUFFER_FNS(Req, req)
126TAS_BUFFER_FNS(Req, req)
127BUFFER_FNS(Mapped, mapped)
128BUFFER_FNS(New, new)
129BUFFER_FNS(Async_Read, async_read)
130BUFFER_FNS(Async_Write, async_write)
131BUFFER_FNS(Delay, delay)
132BUFFER_FNS(Boundary, boundary)
133BUFFER_FNS(Write_EIO, write_io_error)
134BUFFER_FNS(Unwritten, unwritten)
135BUFFER_FNS(Meta, meta)
136BUFFER_FNS(Prio, prio)
137BUFFER_FNS(Defer_Completion, defer_completion)
138
139static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
140{
141	/*
142	 * If somebody else already set this uptodate, they will
143	 * have done the memory barrier, and a reader will thus
144	 * see *some* valid buffer state.
145	 *
146	 * Any other serialization (with IO errors or whatever that
147	 * might clear the bit) has to come from other state (eg BH_Lock).
148	 */
149	if (test_bit(BH_Uptodate, &bh->b_state))
150		return;
151
152	/*
153	 * make it consistent with folio_mark_uptodate
154	 * pairs with smp_load_acquire in buffer_uptodate
155	 */
156	smp_mb__before_atomic();
157	set_bit(BH_Uptodate, &bh->b_state);
158}
159
160static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
161{
162	clear_bit(BH_Uptodate, &bh->b_state);
163}
164
165static __always_inline int buffer_uptodate(const struct buffer_head *bh)
166{
167	/*
168	 * make it consistent with folio_test_uptodate
169	 * pairs with smp_mb__before_atomic in set_buffer_uptodate
170	 */
171	return test_bit_acquire(BH_Uptodate, &bh->b_state);
172}
173
174static inline unsigned long bh_offset(const struct buffer_head *bh)
175{
176	return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
177}
178
179/* If we *know* page->private refers to buffer_heads */
180#define page_buffers(page)					\
181	({							\
182		BUG_ON(!PagePrivate(page));			\
183		((struct buffer_head *)page_private(page));	\
184	})
185#define page_has_buffers(page)	PagePrivate(page)
186#define folio_buffers(folio)		folio_get_private(folio)
187
188void buffer_check_dirty_writeback(struct folio *folio,
189				     bool *dirty, bool *writeback);
190
191/*
192 * Declarations
193 */
194
195void mark_buffer_dirty(struct buffer_head *bh);
196void mark_buffer_write_io_error(struct buffer_head *bh);
197void touch_buffer(struct buffer_head *bh);
198void folio_set_bh(struct buffer_head *bh, struct folio *folio,
199		  unsigned long offset);
200struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
201					gfp_t gfp);
202struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
203struct buffer_head *create_empty_buffers(struct folio *folio,
204		unsigned long blocksize, unsigned long b_state);
205void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
206void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
 
207
208/* Things to do with buffers at mapping->private_list */
209void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
210int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
211				  bool datasync);
212int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
213			  bool datasync);
214void clean_bdev_aliases(struct block_device *bdev, sector_t block,
215			sector_t len);
216static inline void clean_bdev_bh_alias(struct buffer_head *bh)
217{
218	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
219}
220
221void mark_buffer_async_write(struct buffer_head *bh);
222void __wait_on_buffer(struct buffer_head *);
223wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
224struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
225			unsigned size);
226struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
227		unsigned size, gfp_t gfp);
228void __brelse(struct buffer_head *);
229void __bforget(struct buffer_head *);
230void __breadahead(struct block_device *, sector_t block, unsigned int size);
 
 
231struct buffer_head *__bread_gfp(struct block_device *,
232				sector_t block, unsigned size, gfp_t gfp);
 
233struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
234void free_buffer_head(struct buffer_head * bh);
235void unlock_buffer(struct buffer_head *bh);
236void __lock_buffer(struct buffer_head *bh);
 
237int sync_dirty_buffer(struct buffer_head *bh);
238int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
239void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
240void submit_bh(blk_opf_t, struct buffer_head *);
241void write_boundary_block(struct block_device *bdev,
242			sector_t bblock, unsigned blocksize);
243int bh_uptodate_or_lock(struct buffer_head *bh);
244int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
245void __bh_read_batch(int nr, struct buffer_head *bhs[],
246		     blk_opf_t op_flags, bool force_lock);
247
248/*
249 * Generic address_space_operations implementations for buffer_head-backed
250 * address_spaces.
251 */
252void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
253int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
254		void *get_block);
255int __block_write_full_folio(struct inode *inode, struct folio *folio,
256		get_block_t *get_block, struct writeback_control *wbc);
257int block_read_full_folio(struct folio *, get_block_t *);
258bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
 
 
 
259int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
260		struct folio **foliop, get_block_t *get_block);
261int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
262		get_block_t *get_block);
263int block_write_end(struct file *, struct address_space *,
264				loff_t, unsigned len, unsigned copied,
265				struct folio *, void *);
266int generic_write_end(struct file *, struct address_space *,
267				loff_t, unsigned len, unsigned copied,
268				struct folio *, void *);
269void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
 
270int cont_write_begin(struct file *, struct address_space *, loff_t,
271			unsigned, struct folio **, void **,
272			get_block_t *, loff_t *);
273int generic_cont_expand_simple(struct inode *inode, loff_t size);
274void block_commit_write(struct page *page, unsigned int from, unsigned int to);
275int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
276				get_block_t get_block);
 
 
 
 
 
 
 
 
 
 
 
 
277sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
278int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 
 
 
 
 
 
 
 
279
280#ifdef CONFIG_MIGRATION
281extern int buffer_migrate_folio(struct address_space *,
282		struct folio *dst, struct folio *src, enum migrate_mode);
283extern int buffer_migrate_folio_norefs(struct address_space *,
284		struct folio *dst, struct folio *src, enum migrate_mode);
285#else
286#define buffer_migrate_folio NULL
287#define buffer_migrate_folio_norefs NULL
288#endif
289
290/*
291 * inline definitions
292 */
293
294static inline void get_bh(struct buffer_head *bh)
295{
296        atomic_inc(&bh->b_count);
297}
298
299static inline void put_bh(struct buffer_head *bh)
300{
301        smp_mb__before_atomic();
302        atomic_dec(&bh->b_count);
303}
304
305/**
306 * brelse - Release a buffer.
307 * @bh: The buffer to release.
308 *
309 * Decrement a buffer_head's reference count.  If @bh is NULL, this
310 * function is a no-op.
311 *
312 * If all buffers on a folio have zero reference count, are clean
313 * and unlocked, and if the folio is unlocked and not under writeback
314 * then try_to_free_buffers() may strip the buffers from the folio in
315 * preparation for freeing it (sometimes, rarely, buffers are removed
316 * from a folio but it ends up not being freed, and buffers may later
317 * be reattached).
318 *
319 * Context: Any context.
320 */
321static inline void brelse(struct buffer_head *bh)
322{
323	if (bh)
324		__brelse(bh);
325}
326
327/**
328 * bforget - Discard any dirty data in a buffer.
329 * @bh: The buffer to forget.
330 *
331 * Call this function instead of brelse() if the data written to a buffer
332 * no longer needs to be written back.  It will clear the buffer's dirty
333 * flag so writeback of this buffer will be skipped.
334 *
335 * Context: Any context.
336 */
337static inline void bforget(struct buffer_head *bh)
338{
339	if (bh)
340		__bforget(bh);
341}
342
343static inline struct buffer_head *
344sb_bread(struct super_block *sb, sector_t block)
345{
346	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
347}
348
349static inline struct buffer_head *
350sb_bread_unmovable(struct super_block *sb, sector_t block)
351{
352	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
353}
354
355static inline void
356sb_breadahead(struct super_block *sb, sector_t block)
357{
358	__breadahead(sb->s_bdev, block, sb->s_blocksize);
359}
360
361static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
362		sector_t block, unsigned size)
363{
364	gfp_t gfp;
365
366	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
367	gfp |= __GFP_NOFAIL;
368
369	return bdev_getblk(bdev, block, size, gfp);
370}
371
372static inline struct buffer_head *__getblk(struct block_device *bdev,
373		sector_t block, unsigned size)
374{
375	gfp_t gfp;
376
377	gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
378	gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
379
380	return bdev_getblk(bdev, block, size, gfp);
381}
382
383static inline struct buffer_head *sb_getblk(struct super_block *sb,
384		sector_t block)
385{
386	return __getblk(sb->s_bdev, block, sb->s_blocksize);
387}
388
389static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
390		sector_t block, gfp_t gfp)
391{
392	return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
393}
394
395static inline struct buffer_head *
396sb_find_get_block(struct super_block *sb, sector_t block)
397{
398	return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
399}
400
401static inline void
402map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
403{
404	set_buffer_mapped(bh);
405	bh->b_bdev = sb->s_bdev;
406	bh->b_blocknr = block;
407	bh->b_size = sb->s_blocksize;
408}
409
410static inline void wait_on_buffer(struct buffer_head *bh)
411{
412	might_sleep();
413	if (buffer_locked(bh))
414		__wait_on_buffer(bh);
415}
416
417static inline int trylock_buffer(struct buffer_head *bh)
418{
419	return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
420}
421
422static inline void lock_buffer(struct buffer_head *bh)
423{
424	might_sleep();
425	if (!trylock_buffer(bh))
426		__lock_buffer(bh);
427}
428
429static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
 
 
430{
431	if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
432		if (!buffer_uptodate(bh))
433			__bh_read(bh, op_flags, false);
434		else
435			unlock_buffer(bh);
436	}
437}
438
439static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
440{
441	if (!bh_uptodate_or_lock(bh))
442		__bh_read(bh, op_flags, false);
443}
444
445/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
446static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
447{
448	if (bh_uptodate_or_lock(bh))
449		return 1;
450	return __bh_read(bh, op_flags, true);
451}
452
453static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
454{
455	__bh_read_batch(nr, bhs, 0, true);
456}
457
458static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
459				      blk_opf_t op_flags)
460{
461	__bh_read_batch(nr, bhs, op_flags, false);
462}
463
464/**
465 * __bread() - Read a block.
466 * @bdev: The block device to read from.
467 * @block: Block number in units of block size.
468 * @size: The block size of this device in bytes.
469 *
470 * Read a specified block, and return the buffer head that refers
471 * to it.  The memory is allocated from the movable area so that it can
472 * be migrated.  The returned buffer head has its refcount increased.
473 * The caller should call brelse() when it has finished with the buffer.
474 *
475 * Context: May sleep waiting for I/O.
476 * Return: NULL if the block was unreadable.
 
477 */
478static inline struct buffer_head *__bread(struct block_device *bdev,
479		sector_t block, unsigned size)
480{
481	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
482}
483
484/**
485 * get_nth_bh - Get a reference on the n'th buffer after this one.
486 * @bh: The buffer to start counting from.
487 * @count: How many buffers to skip.
488 *
489 * This is primarily useful for finding the nth buffer in a folio; in
490 * that case you pass the head buffer and the byte offset in the folio
491 * divided by the block size.  It can be used for other purposes, but
492 * it will wrap at the end of the folio rather than returning NULL or
493 * proceeding to the next folio for you.
494 *
495 * Return: The requested buffer with an elevated refcount.
496 */
497static inline __must_check
498struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
499{
500	while (count--)
501		bh = bh->b_this_page;
502	get_bh(bh);
503	return bh;
504}
505
506bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
507
508#ifdef CONFIG_BUFFER_HEAD
509
510void buffer_init(void);
511bool try_to_free_buffers(struct folio *folio);
512int inode_has_buffers(struct inode *inode);
513void invalidate_inode_buffers(struct inode *inode);
514int remove_inode_buffers(struct inode *inode);
515int sync_mapping_buffers(struct address_space *mapping);
516void invalidate_bh_lrus(void);
517void invalidate_bh_lrus_cpu(void);
518bool has_bh_in_lru(int cpu, void *dummy);
519extern int buffer_heads_over_limit;
520
521#else /* CONFIG_BUFFER_HEAD */
522
523static inline void buffer_init(void) {}
524static inline bool try_to_free_buffers(struct folio *folio) { return true; }
525static inline int inode_has_buffers(struct inode *inode) { return 0; }
526static inline void invalidate_inode_buffers(struct inode *inode) {}
527static inline int remove_inode_buffers(struct inode *inode) { return 1; }
528static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
529static inline void invalidate_bh_lrus(void) {}
530static inline void invalidate_bh_lrus_cpu(void) {}
531static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
532#define buffer_heads_over_limit 0
533
534#endif /* CONFIG_BUFFER_HEAD */
535#endif /* _LINUX_BUFFER_HEAD_H */