Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_BUF_H__
7#define __XFS_BUF_H__
8
9#include <linux/list.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/dax.h>
15#include <linux/uio.h>
16#include <linux/list_lru.h>
17
18/*
19 * Base types
20 */
21struct xfs_buf;
22
23#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
24
25#define XBF_READ (1 << 0) /* buffer intended for reading from device */
26#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
27#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
28#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
29#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
30#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
31#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
32#define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */
33
34/* buffer type flags for write callbacks */
35#define _XBF_INODES (1 << 16)/* inode buffer */
36#define _XBF_DQUOTS (1 << 17)/* dquot buffer */
37#define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */
38
39/* flags used only internally */
40#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
41#define _XBF_KMEM (1 << 21)/* backed by heap memory */
42#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
43
44/* flags used only as arguments to access routines */
45#define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */
46#define XBF_UNMAPPED (1 << 31)/* do not map the buffer */
47
48typedef unsigned int xfs_buf_flags_t;
49
50#define XFS_BUF_FLAGS \
51 { XBF_READ, "READ" }, \
52 { XBF_WRITE, "WRITE" }, \
53 { XBF_READ_AHEAD, "READ_AHEAD" }, \
54 { XBF_NO_IOACCT, "NO_IOACCT" }, \
55 { XBF_ASYNC, "ASYNC" }, \
56 { XBF_DONE, "DONE" }, \
57 { XBF_STALE, "STALE" }, \
58 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
59 { _XBF_INODES, "INODES" }, \
60 { _XBF_DQUOTS, "DQUOTS" }, \
61 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
62 { _XBF_PAGES, "PAGES" }, \
63 { _XBF_KMEM, "KMEM" }, \
64 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
65 /* The following interface flags should never be set */ \
66 { XBF_TRYLOCK, "TRYLOCK" }, \
67 { XBF_UNMAPPED, "UNMAPPED" }
68
69/*
70 * Internal state flags.
71 */
72#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
73#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
74
75/*
76 * The xfs_buftarg contains 2 notions of "sector size" -
77 *
78 * 1) The metadata sector size, which is the minimum unit and
79 * alignment of IO which will be performed by metadata operations.
80 * 2) The device logical sector size
81 *
82 * The first is specified at mkfs time, and is stored on-disk in the
83 * superblock's sb_sectsize.
84 *
85 * The latter is derived from the underlying device, and controls direct IO
86 * alignment constraints.
87 */
88typedef struct xfs_buftarg {
89 dev_t bt_dev;
90 struct block_device *bt_bdev;
91 struct dax_device *bt_daxdev;
92 struct xfs_mount *bt_mount;
93 unsigned int bt_meta_sectorsize;
94 size_t bt_meta_sectormask;
95 size_t bt_logical_sectorsize;
96 size_t bt_logical_sectormask;
97
98 /* LRU control structures */
99 struct shrinker bt_shrinker;
100 struct list_lru bt_lru;
101
102 struct percpu_counter bt_io_count;
103 struct ratelimit_state bt_ioerror_rl;
104} xfs_buftarg_t;
105
106#define XB_PAGES 2
107
108struct xfs_buf_map {
109 xfs_daddr_t bm_bn; /* block number for I/O */
110 int bm_len; /* size of I/O */
111};
112
113#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
114 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
115
116struct xfs_buf_ops {
117 char *name;
118 union {
119 __be32 magic[2]; /* v4 and v5 on disk magic values */
120 __be16 magic16[2]; /* v4 and v5 on disk magic values */
121 };
122 void (*verify_read)(struct xfs_buf *);
123 void (*verify_write)(struct xfs_buf *);
124 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
125};
126
127struct xfs_buf {
128 /*
129 * first cacheline holds all the fields needed for an uncontended cache
130 * hit to be fully processed. The semaphore straddles the cacheline
131 * boundary, but the counter and lock sits on the first cacheline,
132 * which is the only bit that is touched if we hit the semaphore
133 * fast-path on locking.
134 */
135 struct rhash_head b_rhash_head; /* pag buffer hash node */
136 xfs_daddr_t b_bn; /* block number of buffer */
137 int b_length; /* size of buffer in BBs */
138 atomic_t b_hold; /* reference count */
139 atomic_t b_lru_ref; /* lru reclaim ref count */
140 xfs_buf_flags_t b_flags; /* status flags */
141 struct semaphore b_sema; /* semaphore for lockables */
142
143 /*
144 * concurrent access to b_lru and b_lru_flags are protected by
145 * bt_lru_lock and not by b_sema
146 */
147 struct list_head b_lru; /* lru list */
148 spinlock_t b_lock; /* internal state lock */
149 unsigned int b_state; /* internal state flags */
150 int b_io_error; /* internal IO error state */
151 wait_queue_head_t b_waiters; /* unpin waiters */
152 struct list_head b_list;
153 struct xfs_perag *b_pag; /* contains rbtree root */
154 struct xfs_mount *b_mount;
155 struct xfs_buftarg *b_target; /* buffer target (device) */
156 void *b_addr; /* virtual address of buffer */
157 struct work_struct b_ioend_work;
158 struct completion b_iowait; /* queue for I/O waiters */
159 struct xfs_buf_log_item *b_log_item;
160 struct list_head b_li_list; /* Log items list head */
161 struct xfs_trans *b_transp;
162 struct page **b_pages; /* array of page pointers */
163 struct page *b_page_array[XB_PAGES]; /* inline pages */
164 struct xfs_buf_map *b_maps; /* compound buffer map */
165 struct xfs_buf_map __b_map; /* inline compound buffer map */
166 int b_map_count;
167 atomic_t b_pin_count; /* pin count */
168 atomic_t b_io_remaining; /* #outstanding I/O requests */
169 unsigned int b_page_count; /* size of page array */
170 unsigned int b_offset; /* page offset of b_addr,
171 only for _XBF_KMEM buffers */
172 int b_error; /* error code on I/O */
173
174 /*
175 * async write failure retry count. Initialised to zero on the first
176 * failure, then when it exceeds the maximum configured without a
177 * success the write is considered to be failed permanently and the
178 * iodone handler will take appropriate action.
179 *
180 * For retry timeouts, we record the jiffie of the first failure. This
181 * means that we can change the retry timeout for buffers already under
182 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
183 *
184 * last_error is used to ensure that we are getting repeated errors, not
185 * different errors. e.g. a block device might change ENOSPC to EIO when
186 * a failure timeout occurs, so we want to re-initialise the error
187 * retry behaviour appropriately when that happens.
188 */
189 int b_retries;
190 unsigned long b_first_retry_time; /* in jiffies */
191 int b_last_error;
192
193 const struct xfs_buf_ops *b_ops;
194};
195
196/* Finding and Reading Buffers */
197struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
198 xfs_daddr_t blkno, size_t numblks,
199 xfs_buf_flags_t flags);
200
201int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
202 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
203int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
204 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
205 const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
206void xfs_buf_readahead_map(struct xfs_buftarg *target,
207 struct xfs_buf_map *map, int nmaps,
208 const struct xfs_buf_ops *ops);
209
210static inline int
211xfs_buf_get(
212 struct xfs_buftarg *target,
213 xfs_daddr_t blkno,
214 size_t numblks,
215 struct xfs_buf **bpp)
216{
217 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
218
219 return xfs_buf_get_map(target, &map, 1, 0, bpp);
220}
221
222static inline int
223xfs_buf_read(
224 struct xfs_buftarg *target,
225 xfs_daddr_t blkno,
226 size_t numblks,
227 xfs_buf_flags_t flags,
228 struct xfs_buf **bpp,
229 const struct xfs_buf_ops *ops)
230{
231 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
232
233 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
234 __builtin_return_address(0));
235}
236
237static inline void
238xfs_buf_readahead(
239 struct xfs_buftarg *target,
240 xfs_daddr_t blkno,
241 size_t numblks,
242 const struct xfs_buf_ops *ops)
243{
244 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
245 return xfs_buf_readahead_map(target, &map, 1, ops);
246}
247
248int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
249 struct xfs_buf **bpp);
250int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
251 size_t numblks, int flags, struct xfs_buf **bpp,
252 const struct xfs_buf_ops *ops);
253int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
254void xfs_buf_hold(struct xfs_buf *bp);
255
256/* Releasing Buffers */
257extern void xfs_buf_rele(struct xfs_buf *);
258
259/* Locking and Unlocking Buffers */
260extern int xfs_buf_trylock(struct xfs_buf *);
261extern void xfs_buf_lock(struct xfs_buf *);
262extern void xfs_buf_unlock(struct xfs_buf *);
263#define xfs_buf_islocked(bp) \
264 ((bp)->b_sema.count <= 0)
265
266static inline void xfs_buf_relse(struct xfs_buf *bp)
267{
268 xfs_buf_unlock(bp);
269 xfs_buf_rele(bp);
270}
271
272/* Buffer Read and Write Routines */
273extern int xfs_bwrite(struct xfs_buf *bp);
274
275extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
276 xfs_failaddr_t failaddr);
277#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
278extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
279void xfs_buf_ioend_fail(struct xfs_buf *);
280void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
281void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
282#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
283
284/* Buffer Utility Routines */
285extern void *xfs_buf_offset(struct xfs_buf *, size_t);
286extern void xfs_buf_stale(struct xfs_buf *bp);
287
288/* Delayed Write Buffer Routines */
289extern void xfs_buf_delwri_cancel(struct list_head *);
290extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
291extern int xfs_buf_delwri_submit(struct list_head *);
292extern int xfs_buf_delwri_submit_nowait(struct list_head *);
293extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
294
295/* Buffer Daemon Setup Routines */
296extern int xfs_buf_init(void);
297extern void xfs_buf_terminate(void);
298
299/*
300 * These macros use the IO block map rather than b_bn. b_bn is now really
301 * just for the buffer cache index for cached buffers. As IO does not use b_bn
302 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
303 * map directly. Uncached buffers are not allowed to be discontiguous, so this
304 * is safe to do.
305 *
306 * In future, uncached buffers will pass the block number directly to the io
307 * request function and hence these macros will go away at that point.
308 */
309#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
310#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
311
312void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
313
314/*
315 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
316 * up with a reference count of 0 so it will be tossed from the cache when
317 * released.
318 */
319static inline void xfs_buf_oneshot(struct xfs_buf *bp)
320{
321 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
322 return;
323 atomic_set(&bp->b_lru_ref, 0);
324}
325
326static inline int xfs_buf_ispinned(struct xfs_buf *bp)
327{
328 return atomic_read(&bp->b_pin_count);
329}
330
331static inline int
332xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
333{
334 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
335 cksum_offset);
336}
337
338static inline void
339xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
340{
341 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
342 cksum_offset);
343}
344
345/*
346 * Handling of buftargs.
347 */
348extern struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *,
349 struct block_device *, struct dax_device *);
350extern void xfs_free_buftarg(struct xfs_buftarg *);
351extern void xfs_buftarg_wait(struct xfs_buftarg *);
352extern void xfs_buftarg_drain(struct xfs_buftarg *);
353extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
354
355#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
356#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
357
358static inline int
359xfs_buftarg_dma_alignment(struct xfs_buftarg *bt)
360{
361 return queue_dma_alignment(bt->bt_bdev->bd_disk->queue);
362}
363
364int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
365bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
366bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
367
368#endif /* __XFS_BUF_H__ */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef __XFS_BUF_H__
7#define __XFS_BUF_H__
8
9#include <linux/list.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/dax.h>
15#include <linux/uio.h>
16#include <linux/list_lru.h>
17
18extern struct kmem_cache *xfs_buf_cache;
19
20/*
21 * Base types
22 */
23struct xfs_buf;
24
25#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
26
27#define XBF_READ (1u << 0) /* buffer intended for reading from device */
28#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
29#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
30#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
31#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
32#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
33#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
34#define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
35
36/* buffer type flags for write callbacks */
37#define _XBF_INODES (1u << 16)/* inode buffer */
38#define _XBF_DQUOTS (1u << 17)/* dquot buffer */
39#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
40
41/* flags used only internally */
42#define _XBF_PAGES (1u << 20)/* backed by refcounted pages */
43#define _XBF_KMEM (1u << 21)/* backed by heap memory */
44#define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
45
46/* flags used only as arguments to access routines */
47#define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
48#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
49#define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
50
51
52typedef unsigned int xfs_buf_flags_t;
53
54#define XFS_BUF_FLAGS \
55 { XBF_READ, "READ" }, \
56 { XBF_WRITE, "WRITE" }, \
57 { XBF_READ_AHEAD, "READ_AHEAD" }, \
58 { XBF_NO_IOACCT, "NO_IOACCT" }, \
59 { XBF_ASYNC, "ASYNC" }, \
60 { XBF_DONE, "DONE" }, \
61 { XBF_STALE, "STALE" }, \
62 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
63 { _XBF_INODES, "INODES" }, \
64 { _XBF_DQUOTS, "DQUOTS" }, \
65 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
66 { _XBF_PAGES, "PAGES" }, \
67 { _XBF_KMEM, "KMEM" }, \
68 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
69 /* The following interface flags should never be set */ \
70 { XBF_INCORE, "INCORE" }, \
71 { XBF_TRYLOCK, "TRYLOCK" }, \
72 { XBF_UNMAPPED, "UNMAPPED" }
73
74/*
75 * Internal state flags.
76 */
77#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
78#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
79
80/*
81 * The xfs_buftarg contains 2 notions of "sector size" -
82 *
83 * 1) The metadata sector size, which is the minimum unit and
84 * alignment of IO which will be performed by metadata operations.
85 * 2) The device logical sector size
86 *
87 * The first is specified at mkfs time, and is stored on-disk in the
88 * superblock's sb_sectsize.
89 *
90 * The latter is derived from the underlying device, and controls direct IO
91 * alignment constraints.
92 */
93typedef struct xfs_buftarg {
94 dev_t bt_dev;
95 struct block_device *bt_bdev;
96 struct dax_device *bt_daxdev;
97 u64 bt_dax_part_off;
98 struct xfs_mount *bt_mount;
99 unsigned int bt_meta_sectorsize;
100 size_t bt_meta_sectormask;
101 size_t bt_logical_sectorsize;
102 size_t bt_logical_sectormask;
103
104 /* LRU control structures */
105 struct shrinker bt_shrinker;
106 struct list_lru bt_lru;
107
108 struct percpu_counter bt_io_count;
109 struct ratelimit_state bt_ioerror_rl;
110} xfs_buftarg_t;
111
112#define XB_PAGES 2
113
114struct xfs_buf_map {
115 xfs_daddr_t bm_bn; /* block number for I/O */
116 int bm_len; /* size of I/O */
117};
118
119#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
120 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
121
122struct xfs_buf_ops {
123 char *name;
124 union {
125 __be32 magic[2]; /* v4 and v5 on disk magic values */
126 __be16 magic16[2]; /* v4 and v5 on disk magic values */
127 };
128 void (*verify_read)(struct xfs_buf *);
129 void (*verify_write)(struct xfs_buf *);
130 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
131};
132
133struct xfs_buf {
134 /*
135 * first cacheline holds all the fields needed for an uncontended cache
136 * hit to be fully processed. The semaphore straddles the cacheline
137 * boundary, but the counter and lock sits on the first cacheline,
138 * which is the only bit that is touched if we hit the semaphore
139 * fast-path on locking.
140 */
141 struct rhash_head b_rhash_head; /* pag buffer hash node */
142
143 xfs_daddr_t b_rhash_key; /* buffer cache index */
144 int b_length; /* size of buffer in BBs */
145 atomic_t b_hold; /* reference count */
146 atomic_t b_lru_ref; /* lru reclaim ref count */
147 xfs_buf_flags_t b_flags; /* status flags */
148 struct semaphore b_sema; /* semaphore for lockables */
149
150 /*
151 * concurrent access to b_lru and b_lru_flags are protected by
152 * bt_lru_lock and not by b_sema
153 */
154 struct list_head b_lru; /* lru list */
155 spinlock_t b_lock; /* internal state lock */
156 unsigned int b_state; /* internal state flags */
157 int b_io_error; /* internal IO error state */
158 wait_queue_head_t b_waiters; /* unpin waiters */
159 struct list_head b_list;
160 struct xfs_perag *b_pag; /* contains rbtree root */
161 struct xfs_mount *b_mount;
162 struct xfs_buftarg *b_target; /* buffer target (device) */
163 void *b_addr; /* virtual address of buffer */
164 struct work_struct b_ioend_work;
165 struct completion b_iowait; /* queue for I/O waiters */
166 struct xfs_buf_log_item *b_log_item;
167 struct list_head b_li_list; /* Log items list head */
168 struct xfs_trans *b_transp;
169 struct page **b_pages; /* array of page pointers */
170 struct page *b_page_array[XB_PAGES]; /* inline pages */
171 struct xfs_buf_map *b_maps; /* compound buffer map */
172 struct xfs_buf_map __b_map; /* inline compound buffer map */
173 int b_map_count;
174 atomic_t b_pin_count; /* pin count */
175 atomic_t b_io_remaining; /* #outstanding I/O requests */
176 unsigned int b_page_count; /* size of page array */
177 unsigned int b_offset; /* page offset of b_addr,
178 only for _XBF_KMEM buffers */
179 int b_error; /* error code on I/O */
180
181 /*
182 * async write failure retry count. Initialised to zero on the first
183 * failure, then when it exceeds the maximum configured without a
184 * success the write is considered to be failed permanently and the
185 * iodone handler will take appropriate action.
186 *
187 * For retry timeouts, we record the jiffie of the first failure. This
188 * means that we can change the retry timeout for buffers already under
189 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
190 *
191 * last_error is used to ensure that we are getting repeated errors, not
192 * different errors. e.g. a block device might change ENOSPC to EIO when
193 * a failure timeout occurs, so we want to re-initialise the error
194 * retry behaviour appropriately when that happens.
195 */
196 int b_retries;
197 unsigned long b_first_retry_time; /* in jiffies */
198 int b_last_error;
199
200 const struct xfs_buf_ops *b_ops;
201 struct rcu_head b_rcu;
202};
203
204/* Finding and Reading Buffers */
205int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
206 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
207int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
208 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
209 const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
210void xfs_buf_readahead_map(struct xfs_buftarg *target,
211 struct xfs_buf_map *map, int nmaps,
212 const struct xfs_buf_ops *ops);
213
214static inline int
215xfs_buf_incore(
216 struct xfs_buftarg *target,
217 xfs_daddr_t blkno,
218 size_t numblks,
219 xfs_buf_flags_t flags,
220 struct xfs_buf **bpp)
221{
222 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
223
224 return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
225}
226
227static inline int
228xfs_buf_get(
229 struct xfs_buftarg *target,
230 xfs_daddr_t blkno,
231 size_t numblks,
232 struct xfs_buf **bpp)
233{
234 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
235
236 return xfs_buf_get_map(target, &map, 1, 0, bpp);
237}
238
239static inline int
240xfs_buf_read(
241 struct xfs_buftarg *target,
242 xfs_daddr_t blkno,
243 size_t numblks,
244 xfs_buf_flags_t flags,
245 struct xfs_buf **bpp,
246 const struct xfs_buf_ops *ops)
247{
248 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
249
250 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
251 __builtin_return_address(0));
252}
253
254static inline void
255xfs_buf_readahead(
256 struct xfs_buftarg *target,
257 xfs_daddr_t blkno,
258 size_t numblks,
259 const struct xfs_buf_ops *ops)
260{
261 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
262 return xfs_buf_readahead_map(target, &map, 1, ops);
263}
264
265int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
266 xfs_buf_flags_t flags, struct xfs_buf **bpp);
267int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
268 size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
269 const struct xfs_buf_ops *ops);
270int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
271void xfs_buf_hold(struct xfs_buf *bp);
272
273/* Releasing Buffers */
274extern void xfs_buf_rele(struct xfs_buf *);
275
276/* Locking and Unlocking Buffers */
277extern int xfs_buf_trylock(struct xfs_buf *);
278extern void xfs_buf_lock(struct xfs_buf *);
279extern void xfs_buf_unlock(struct xfs_buf *);
280#define xfs_buf_islocked(bp) \
281 ((bp)->b_sema.count <= 0)
282
283static inline void xfs_buf_relse(struct xfs_buf *bp)
284{
285 xfs_buf_unlock(bp);
286 xfs_buf_rele(bp);
287}
288
289/* Buffer Read and Write Routines */
290extern int xfs_bwrite(struct xfs_buf *bp);
291
292extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
293 xfs_failaddr_t failaddr);
294#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
295extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
296void xfs_buf_ioend_fail(struct xfs_buf *);
297void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
298void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
299#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
300
301/* Buffer Utility Routines */
302extern void *xfs_buf_offset(struct xfs_buf *, size_t);
303extern void xfs_buf_stale(struct xfs_buf *bp);
304
305/* Delayed Write Buffer Routines */
306extern void xfs_buf_delwri_cancel(struct list_head *);
307extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
308extern int xfs_buf_delwri_submit(struct list_head *);
309extern int xfs_buf_delwri_submit_nowait(struct list_head *);
310extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
311
312static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
313{
314 return bp->b_maps[0].bm_bn;
315}
316
317void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
318
319/*
320 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
321 * up with a reference count of 0 so it will be tossed from the cache when
322 * released.
323 */
324static inline void xfs_buf_oneshot(struct xfs_buf *bp)
325{
326 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
327 return;
328 atomic_set(&bp->b_lru_ref, 0);
329}
330
331static inline int xfs_buf_ispinned(struct xfs_buf *bp)
332{
333 return atomic_read(&bp->b_pin_count);
334}
335
336static inline int
337xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
338{
339 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
340 cksum_offset);
341}
342
343static inline void
344xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
345{
346 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
347 cksum_offset);
348}
349
350/*
351 * Handling of buftargs.
352 */
353struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
354 struct block_device *bdev);
355extern void xfs_free_buftarg(struct xfs_buftarg *);
356extern void xfs_buftarg_wait(struct xfs_buftarg *);
357extern void xfs_buftarg_drain(struct xfs_buftarg *);
358extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
359
360#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
361#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
362
363int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
364bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
365bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
366
367#endif /* __XFS_BUF_H__ */