Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#ifndef __XFS_BUF_H__
 19#define __XFS_BUF_H__
 20
 21#include <linux/list.h>
 22#include <linux/types.h>
 23#include <linux/spinlock.h>
 24#include <linux/mm.h>
 25#include <linux/fs.h>
 26#include <linux/dax.h>
 27#include <linux/buffer_head.h>
 28#include <linux/uio.h>
 29#include <linux/list_lru.h>
 30
 31/*
 32 *	Base types
 33 */
 34
 35#define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
 36
 37typedef enum {
 38	XBRW_READ = 1,			/* transfer into target memory */
 39	XBRW_WRITE = 2,			/* transfer from target memory */
 40	XBRW_ZERO = 3,			/* Zero target memory */
 41} xfs_buf_rw_t;
 42
 43#define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
 44#define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
 45#define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
 
 46#define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
 47#define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
 48#define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
 49#define XBF_WRITE_FAIL	 (1 << 24)/* async writes have failed on this buffer */
 50
 51/* I/O hints for the BIO layer */
 52#define XBF_SYNCIO	 (1 << 10)/* treat this buffer as synchronous I/O */
 53#define XBF_FUA		 (1 << 11)/* force cache write through mode */
 54#define XBF_FLUSH	 (1 << 12)/* flush the disk cache before a write */
 55
 56/* flags used only as arguments to access routines */
 57#define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */
 58#define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */
 59
 60/* flags used only internally */
 61#define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
 62#define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
 63#define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
 64#define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
 65
 66typedef unsigned int xfs_buf_flags_t;
 67
 68#define XFS_BUF_FLAGS \
 69	{ XBF_READ,		"READ" }, \
 70	{ XBF_WRITE,		"WRITE" }, \
 71	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
 
 72	{ XBF_ASYNC,		"ASYNC" }, \
 73	{ XBF_DONE,		"DONE" }, \
 74	{ XBF_STALE,		"STALE" }, \
 75	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
 76	{ XBF_SYNCIO,		"SYNCIO" }, \
 77	{ XBF_FUA,		"FUA" }, \
 78	{ XBF_FLUSH,		"FLUSH" }, \
 79	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\
 80	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\
 81	{ _XBF_PAGES,		"PAGES" }, \
 82	{ _XBF_KMEM,		"KMEM" }, \
 83	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
 84	{ _XBF_COMPOUND,	"COMPOUND" }
 85
 86
 87/*
 88 * Internal state flags.
 89 */
 90#define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
 
 91
 92/*
 93 * The xfs_buftarg contains 2 notions of "sector size" -
 94 *
 95 * 1) The metadata sector size, which is the minimum unit and
 96 *    alignment of IO which will be performed by metadata operations.
 97 * 2) The device logical sector size
 98 *
 99 * The first is specified at mkfs time, and is stored on-disk in the
100 * superblock's sb_sectsize.
101 *
102 * The latter is derived from the underlying device, and controls direct IO
103 * alignment constraints.
104 */
105typedef struct xfs_buftarg {
106	dev_t			bt_dev;
107	struct block_device	*bt_bdev;
108	struct backing_dev_info	*bt_bdi;
109	struct xfs_mount	*bt_mount;
110	unsigned int		bt_meta_sectorsize;
111	size_t			bt_meta_sectormask;
112	size_t			bt_logical_sectorsize;
113	size_t			bt_logical_sectormask;
114
115	/* LRU control structures */
116	struct shrinker		bt_shrinker;
117	struct list_lru		bt_lru;
 
 
118} xfs_buftarg_t;
119
120struct xfs_buf;
121typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
122
123
124#define XB_PAGES	2
125
126struct xfs_buf_map {
127	xfs_daddr_t		bm_bn;	/* block number for I/O */
128	int			bm_len;	/* size of I/O */
129};
130
131#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
132	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
133
134struct xfs_buf_ops {
135	char *name;
136	void (*verify_read)(struct xfs_buf *);
137	void (*verify_write)(struct xfs_buf *);
 
138};
139
140typedef struct xfs_buf {
141	/*
142	 * first cacheline holds all the fields needed for an uncontended cache
143	 * hit to be fully processed. The semaphore straddles the cacheline
144	 * boundary, but the counter and lock sits on the first cacheline,
145	 * which is the only bit that is touched if we hit the semaphore
146	 * fast-path on locking.
147	 */
148	struct rb_node		b_rbnode;	/* rbtree node */
149	xfs_daddr_t		b_bn;		/* block number of buffer */
150	int			b_length;	/* size of buffer in BBs */
151	atomic_t		b_hold;		/* reference count */
152	atomic_t		b_lru_ref;	/* lru reclaim ref count */
153	xfs_buf_flags_t		b_flags;	/* status flags */
154	struct semaphore	b_sema;		/* semaphore for lockables */
155
156	/*
157	 * concurrent access to b_lru and b_lru_flags are protected by
158	 * bt_lru_lock and not by b_sema
159	 */
160	struct list_head	b_lru;		/* lru list */
161	spinlock_t		b_lock;		/* internal state lock */
162	unsigned int		b_state;	/* internal state flags */
163	int			b_io_error;	/* internal IO error state */
164	wait_queue_head_t	b_waiters;	/* unpin waiters */
165	struct list_head	b_list;
166	struct xfs_perag	*b_pag;		/* contains rbtree root */
167	xfs_buftarg_t		*b_target;	/* buffer target (device) */
168	void			*b_addr;	/* virtual address of buffer */
169	struct work_struct	b_ioend_work;
170	struct workqueue_struct	*b_ioend_wq;	/* I/O completion wq */
171	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
172	struct completion	b_iowait;	/* queue for I/O waiters */
173	void			*b_fspriv;
 
174	struct xfs_trans	*b_transp;
175	struct page		**b_pages;	/* array of page pointers */
176	struct page		*b_page_array[XB_PAGES]; /* inline pages */
177	struct xfs_buf_map	*b_maps;	/* compound buffer map */
178	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
179	int			b_map_count;
180	int			b_io_length;	/* IO size in BBs */
181	atomic_t		b_pin_count;	/* pin count */
182	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
183	unsigned int		b_page_count;	/* size of page array */
184	unsigned int		b_offset;	/* page offset in first page */
185	int			b_error;	/* error code on I/O */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186	const struct xfs_buf_ops	*b_ops;
187
188#ifdef XFS_BUF_LOCK_TRACKING
189	int			b_last_holder;
190#endif
191} xfs_buf_t;
192
193/* Finding and Reading Buffers */
194struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
195			      struct xfs_buf_map *map, int nmaps,
196			      xfs_buf_flags_t flags, struct xfs_buf *new_bp);
197
198static inline struct xfs_buf *
199xfs_incore(
200	struct xfs_buftarg	*target,
201	xfs_daddr_t		blkno,
202	size_t			numblks,
203	xfs_buf_flags_t		flags)
204{
205	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
206	return _xfs_buf_find(target, &map, 1, flags, NULL);
207}
208
209struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
210			       struct xfs_buf_map *map, int nmaps,
211			       xfs_buf_flags_t flags);
212
213static inline struct xfs_buf *
214xfs_buf_alloc(
215	struct xfs_buftarg	*target,
216	xfs_daddr_t		blkno,
217	size_t			numblks,
218	xfs_buf_flags_t		flags)
219{
220	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
221	return _xfs_buf_alloc(target, &map, 1, flags);
222}
223
224struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
225			       struct xfs_buf_map *map, int nmaps,
226			       xfs_buf_flags_t flags);
227struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
228			       struct xfs_buf_map *map, int nmaps,
229			       xfs_buf_flags_t flags,
230			       const struct xfs_buf_ops *ops);
231void xfs_buf_readahead_map(struct xfs_buftarg *target,
232			       struct xfs_buf_map *map, int nmaps,
233			       const struct xfs_buf_ops *ops);
234
235static inline struct xfs_buf *
236xfs_buf_get(
237	struct xfs_buftarg	*target,
238	xfs_daddr_t		blkno,
239	size_t			numblks,
240	xfs_buf_flags_t		flags)
241{
242	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
243	return xfs_buf_get_map(target, &map, 1, flags);
244}
245
246static inline struct xfs_buf *
247xfs_buf_read(
248	struct xfs_buftarg	*target,
249	xfs_daddr_t		blkno,
250	size_t			numblks,
251	xfs_buf_flags_t		flags,
252	const struct xfs_buf_ops *ops)
253{
254	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
255	return xfs_buf_read_map(target, &map, 1, flags, ops);
256}
257
258static inline void
259xfs_buf_readahead(
260	struct xfs_buftarg	*target,
261	xfs_daddr_t		blkno,
262	size_t			numblks,
263	const struct xfs_buf_ops *ops)
264{
265	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
266	return xfs_buf_readahead_map(target, &map, 1, ops);
267}
268
269struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
270void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
271int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
272
273struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
274				int flags);
275int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
276			  size_t numblks, int flags, struct xfs_buf **bpp,
277			  const struct xfs_buf_ops *ops);
278void xfs_buf_hold(struct xfs_buf *bp);
279
280/* Releasing Buffers */
281extern void xfs_buf_free(xfs_buf_t *);
282extern void xfs_buf_rele(xfs_buf_t *);
283
284/* Locking and Unlocking Buffers */
285extern int xfs_buf_trylock(xfs_buf_t *);
286extern void xfs_buf_lock(xfs_buf_t *);
287extern void xfs_buf_unlock(xfs_buf_t *);
288#define xfs_buf_islocked(bp) \
289	((bp)->b_sema.count <= 0)
290
291/* Buffer Read and Write Routines */
292extern int xfs_bwrite(struct xfs_buf *bp);
293extern void xfs_buf_ioend(struct xfs_buf *bp);
294extern void xfs_buf_ioerror(xfs_buf_t *, int);
 
 
295extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
296extern void xfs_buf_submit(struct xfs_buf *bp);
297extern int xfs_buf_submit_wait(struct xfs_buf *bp);
298extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
299				xfs_buf_rw_t);
300#define xfs_buf_zero(bp, off, len) \
301	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
302
303/* Buffer Utility Routines */
304extern void *xfs_buf_offset(struct xfs_buf *, size_t);
305extern void xfs_buf_stale(struct xfs_buf *bp);
306
307/* Delayed Write Buffer Routines */
 
308extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
309extern int xfs_buf_delwri_submit(struct list_head *);
310extern int xfs_buf_delwri_submit_nowait(struct list_head *);
 
311
312/* Buffer Daemon Setup Routines */
313extern int xfs_buf_init(void);
314extern void xfs_buf_terminate(void);
315
316/*
317 * These macros use the IO block map rather than b_bn. b_bn is now really
318 * just for the buffer cache index for cached buffers. As IO does not use b_bn
319 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
320 * map directly. Uncached buffers are not allowed to be discontiguous, so this
321 * is safe to do.
322 *
323 * In future, uncached buffers will pass the block number directly to the io
324 * request function and hence these macros will go away at that point.
325 */
326#define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
327#define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
328
329static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
330{
331	atomic_set(&bp->b_lru_ref, lru_ref);
332}
333
334static inline int xfs_buf_ispinned(struct xfs_buf *bp)
335{
336	return atomic_read(&bp->b_pin_count);
337}
338
339static inline void xfs_buf_relse(xfs_buf_t *bp)
340{
341	xfs_buf_unlock(bp);
342	xfs_buf_rele(bp);
343}
344
345static inline int
346xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
347{
348	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
349				cksum_offset);
350}
351
352static inline void
353xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
354{
355	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
356			 cksum_offset);
357}
358
359/*
360 *	Handling of buftargs.
361 */
362extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
363			struct block_device *);
364extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
365extern void xfs_wait_buftarg(xfs_buftarg_t *);
366extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
367
368#define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
369#define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
370
371#endif	/* __XFS_BUF_H__ */
v4.17
  1/*
  2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#ifndef __XFS_BUF_H__
 19#define __XFS_BUF_H__
 20
 21#include <linux/list.h>
 22#include <linux/types.h>
 23#include <linux/spinlock.h>
 24#include <linux/mm.h>
 25#include <linux/fs.h>
 26#include <linux/dax.h>
 27#include <linux/buffer_head.h>
 28#include <linux/uio.h>
 29#include <linux/list_lru.h>
 30
 31/*
 32 *	Base types
 33 */
 34
 35#define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
 36
 37typedef enum {
 38	XBRW_READ = 1,			/* transfer into target memory */
 39	XBRW_WRITE = 2,			/* transfer from target memory */
 40	XBRW_ZERO = 3,			/* Zero target memory */
 41} xfs_buf_rw_t;
 42
 43#define XBF_READ	 (1 << 0) /* buffer intended for reading from device */
 44#define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */
 45#define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */
 46#define XBF_NO_IOACCT	 (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
 47#define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */
 48#define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */
 49#define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */
 50#define XBF_WRITE_FAIL	 (1 << 24)/* async writes have failed on this buffer */
 51
 52/* I/O hints for the BIO layer */
 53#define XBF_SYNCIO	 (1 << 10)/* treat this buffer as synchronous I/O */
 54#define XBF_FUA		 (1 << 11)/* force cache write through mode */
 55#define XBF_FLUSH	 (1 << 12)/* flush the disk cache before a write */
 56
 57/* flags used only as arguments to access routines */
 58#define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */
 59#define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */
 60
 61/* flags used only internally */
 62#define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */
 63#define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
 64#define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
 65#define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
 66
 67typedef unsigned int xfs_buf_flags_t;
 68
 69#define XFS_BUF_FLAGS \
 70	{ XBF_READ,		"READ" }, \
 71	{ XBF_WRITE,		"WRITE" }, \
 72	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
 73	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
 74	{ XBF_ASYNC,		"ASYNC" }, \
 75	{ XBF_DONE,		"DONE" }, \
 76	{ XBF_STALE,		"STALE" }, \
 77	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
 78	{ XBF_SYNCIO,		"SYNCIO" }, \
 79	{ XBF_FUA,		"FUA" }, \
 80	{ XBF_FLUSH,		"FLUSH" }, \
 81	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\
 82	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\
 83	{ _XBF_PAGES,		"PAGES" }, \
 84	{ _XBF_KMEM,		"KMEM" }, \
 85	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
 86	{ _XBF_COMPOUND,	"COMPOUND" }
 87
 88
 89/*
 90 * Internal state flags.
 91 */
 92#define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
 93#define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
 94
 95/*
 96 * The xfs_buftarg contains 2 notions of "sector size" -
 97 *
 98 * 1) The metadata sector size, which is the minimum unit and
 99 *    alignment of IO which will be performed by metadata operations.
100 * 2) The device logical sector size
101 *
102 * The first is specified at mkfs time, and is stored on-disk in the
103 * superblock's sb_sectsize.
104 *
105 * The latter is derived from the underlying device, and controls direct IO
106 * alignment constraints.
107 */
108typedef struct xfs_buftarg {
109	dev_t			bt_dev;
110	struct block_device	*bt_bdev;
111	struct dax_device	*bt_daxdev;
112	struct xfs_mount	*bt_mount;
113	unsigned int		bt_meta_sectorsize;
114	size_t			bt_meta_sectormask;
115	size_t			bt_logical_sectorsize;
116	size_t			bt_logical_sectormask;
117
118	/* LRU control structures */
119	struct shrinker		bt_shrinker;
120	struct list_lru		bt_lru;
121
122	struct percpu_counter	bt_io_count;
123} xfs_buftarg_t;
124
125struct xfs_buf;
126typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
127
128
129#define XB_PAGES	2
130
131struct xfs_buf_map {
132	xfs_daddr_t		bm_bn;	/* block number for I/O */
133	int			bm_len;	/* size of I/O */
134};
135
136#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
137	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
138
139struct xfs_buf_ops {
140	char *name;
141	void (*verify_read)(struct xfs_buf *);
142	void (*verify_write)(struct xfs_buf *);
143	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
144};
145
146typedef struct xfs_buf {
147	/*
148	 * first cacheline holds all the fields needed for an uncontended cache
149	 * hit to be fully processed. The semaphore straddles the cacheline
150	 * boundary, but the counter and lock sits on the first cacheline,
151	 * which is the only bit that is touched if we hit the semaphore
152	 * fast-path on locking.
153	 */
154	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
155	xfs_daddr_t		b_bn;		/* block number of buffer */
156	int			b_length;	/* size of buffer in BBs */
157	atomic_t		b_hold;		/* reference count */
158	atomic_t		b_lru_ref;	/* lru reclaim ref count */
159	xfs_buf_flags_t		b_flags;	/* status flags */
160	struct semaphore	b_sema;		/* semaphore for lockables */
161
162	/*
163	 * concurrent access to b_lru and b_lru_flags are protected by
164	 * bt_lru_lock and not by b_sema
165	 */
166	struct list_head	b_lru;		/* lru list */
167	spinlock_t		b_lock;		/* internal state lock */
168	unsigned int		b_state;	/* internal state flags */
169	int			b_io_error;	/* internal IO error state */
170	wait_queue_head_t	b_waiters;	/* unpin waiters */
171	struct list_head	b_list;
172	struct xfs_perag	*b_pag;		/* contains rbtree root */
173	xfs_buftarg_t		*b_target;	/* buffer target (device) */
174	void			*b_addr;	/* virtual address of buffer */
175	struct work_struct	b_ioend_work;
176	struct workqueue_struct	*b_ioend_wq;	/* I/O completion wq */
177	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
178	struct completion	b_iowait;	/* queue for I/O waiters */
179	void			*b_log_item;
180	struct list_head	b_li_list;	/* Log items list head */
181	struct xfs_trans	*b_transp;
182	struct page		**b_pages;	/* array of page pointers */
183	struct page		*b_page_array[XB_PAGES]; /* inline pages */
184	struct xfs_buf_map	*b_maps;	/* compound buffer map */
185	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
186	int			b_map_count;
187	int			b_io_length;	/* IO size in BBs */
188	atomic_t		b_pin_count;	/* pin count */
189	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
190	unsigned int		b_page_count;	/* size of page array */
191	unsigned int		b_offset;	/* page offset in first page */
192	int			b_error;	/* error code on I/O */
193
194	/*
195	 * async write failure retry count. Initialised to zero on the first
196	 * failure, then when it exceeds the maximum configured without a
197	 * success the write is considered to be failed permanently and the
198	 * iodone handler will take appropriate action.
199	 *
200	 * For retry timeouts, we record the jiffie of the first failure. This
201	 * means that we can change the retry timeout for buffers already under
202	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
203	 *
204	 * last_error is used to ensure that we are getting repeated errors, not
205	 * different errors. e.g. a block device might change ENOSPC to EIO when
206	 * a failure timeout occurs, so we want to re-initialise the error
207	 * retry behaviour appropriately when that happens.
208	 */
209	int			b_retries;
210	unsigned long		b_first_retry_time; /* in jiffies */
211	int			b_last_error;
212
213	const struct xfs_buf_ops	*b_ops;
214
215#ifdef XFS_BUF_LOCK_TRACKING
216	int			b_last_holder;
217#endif
218} xfs_buf_t;
219
220/* Finding and Reading Buffers */
221struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
222			      struct xfs_buf_map *map, int nmaps,
223			      xfs_buf_flags_t flags, struct xfs_buf *new_bp);
224
225static inline struct xfs_buf *
226xfs_incore(
227	struct xfs_buftarg	*target,
228	xfs_daddr_t		blkno,
229	size_t			numblks,
230	xfs_buf_flags_t		flags)
231{
232	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
233	return _xfs_buf_find(target, &map, 1, flags, NULL);
234}
235
236struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
237			       struct xfs_buf_map *map, int nmaps,
238			       xfs_buf_flags_t flags);
239
240static inline struct xfs_buf *
241xfs_buf_alloc(
242	struct xfs_buftarg	*target,
243	xfs_daddr_t		blkno,
244	size_t			numblks,
245	xfs_buf_flags_t		flags)
246{
247	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
248	return _xfs_buf_alloc(target, &map, 1, flags);
249}
250
251struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
252			       struct xfs_buf_map *map, int nmaps,
253			       xfs_buf_flags_t flags);
254struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
255			       struct xfs_buf_map *map, int nmaps,
256			       xfs_buf_flags_t flags,
257			       const struct xfs_buf_ops *ops);
258void xfs_buf_readahead_map(struct xfs_buftarg *target,
259			       struct xfs_buf_map *map, int nmaps,
260			       const struct xfs_buf_ops *ops);
261
262static inline struct xfs_buf *
263xfs_buf_get(
264	struct xfs_buftarg	*target,
265	xfs_daddr_t		blkno,
266	size_t			numblks,
267	xfs_buf_flags_t		flags)
268{
269	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
270	return xfs_buf_get_map(target, &map, 1, flags);
271}
272
273static inline struct xfs_buf *
274xfs_buf_read(
275	struct xfs_buftarg	*target,
276	xfs_daddr_t		blkno,
277	size_t			numblks,
278	xfs_buf_flags_t		flags,
279	const struct xfs_buf_ops *ops)
280{
281	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
282	return xfs_buf_read_map(target, &map, 1, flags, ops);
283}
284
285static inline void
286xfs_buf_readahead(
287	struct xfs_buftarg	*target,
288	xfs_daddr_t		blkno,
289	size_t			numblks,
290	const struct xfs_buf_ops *ops)
291{
292	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
293	return xfs_buf_readahead_map(target, &map, 1, ops);
294}
295
 
296void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
297int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
298
299struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
300				int flags);
301int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
302			  size_t numblks, int flags, struct xfs_buf **bpp,
303			  const struct xfs_buf_ops *ops);
304void xfs_buf_hold(struct xfs_buf *bp);
305
306/* Releasing Buffers */
307extern void xfs_buf_free(xfs_buf_t *);
308extern void xfs_buf_rele(xfs_buf_t *);
309
310/* Locking and Unlocking Buffers */
311extern int xfs_buf_trylock(xfs_buf_t *);
312extern void xfs_buf_lock(xfs_buf_t *);
313extern void xfs_buf_unlock(xfs_buf_t *);
314#define xfs_buf_islocked(bp) \
315	((bp)->b_sema.count <= 0)
316
317/* Buffer Read and Write Routines */
318extern int xfs_bwrite(struct xfs_buf *bp);
319extern void xfs_buf_ioend(struct xfs_buf *bp);
320extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
321		xfs_failaddr_t failaddr);
322#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
323extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
324extern void xfs_buf_submit(struct xfs_buf *bp);
325extern int xfs_buf_submit_wait(struct xfs_buf *bp);
326extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
327				xfs_buf_rw_t);
328#define xfs_buf_zero(bp, off, len) \
329	    xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
330
331/* Buffer Utility Routines */
332extern void *xfs_buf_offset(struct xfs_buf *, size_t);
333extern void xfs_buf_stale(struct xfs_buf *bp);
334
335/* Delayed Write Buffer Routines */
336extern void xfs_buf_delwri_cancel(struct list_head *);
337extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
338extern int xfs_buf_delwri_submit(struct list_head *);
339extern int xfs_buf_delwri_submit_nowait(struct list_head *);
340extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
341
342/* Buffer Daemon Setup Routines */
343extern int xfs_buf_init(void);
344extern void xfs_buf_terminate(void);
345
346/*
347 * These macros use the IO block map rather than b_bn. b_bn is now really
348 * just for the buffer cache index for cached buffers. As IO does not use b_bn
349 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
350 * map directly. Uncached buffers are not allowed to be discontiguous, so this
351 * is safe to do.
352 *
353 * In future, uncached buffers will pass the block number directly to the io
354 * request function and hence these macros will go away at that point.
355 */
356#define XFS_BUF_ADDR(bp)		((bp)->b_maps[0].bm_bn)
357#define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
358
359void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
 
 
 
360
361static inline int xfs_buf_ispinned(struct xfs_buf *bp)
362{
363	return atomic_read(&bp->b_pin_count);
364}
365
366static inline void xfs_buf_relse(xfs_buf_t *bp)
367{
368	xfs_buf_unlock(bp);
369	xfs_buf_rele(bp);
370}
371
372static inline int
373xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
374{
375	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
376				cksum_offset);
377}
378
379static inline void
380xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
381{
382	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
383			 cksum_offset);
384}
385
386/*
387 *	Handling of buftargs.
388 */
389extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
390			struct block_device *, struct dax_device *);
391extern void xfs_free_buftarg(struct xfs_buftarg *);
392extern void xfs_wait_buftarg(xfs_buftarg_t *);
393extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
394
395#define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
396#define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
397
398#endif	/* __XFS_BUF_H__ */