Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifndef BTRFS_EXTENT_IO_H
  4#define BTRFS_EXTENT_IO_H
  5
  6#include <linux/rbtree.h>
  7#include <linux/refcount.h>
  8#include <linux/fiemap.h>
  9#include <linux/btrfs_tree.h>
 10#include "compression.h"
 11#include "ulist.h"
 12#include "misc.h"
 13
 14struct btrfs_trans_handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15
 16enum {
 17	EXTENT_BUFFER_UPTODATE,
 18	EXTENT_BUFFER_DIRTY,
 19	EXTENT_BUFFER_CORRUPT,
 20	/* this got triggered by readahead */
 21	EXTENT_BUFFER_READAHEAD,
 22	EXTENT_BUFFER_TREE_REF,
 23	EXTENT_BUFFER_STALE,
 24	EXTENT_BUFFER_WRITEBACK,
 25	/* read IO error */
 26	EXTENT_BUFFER_READ_ERR,
 27	EXTENT_BUFFER_UNMAPPED,
 28	EXTENT_BUFFER_IN_TREE,
 29	/* write IO error */
 30	EXTENT_BUFFER_WRITE_ERR,
 31	/* Indicate the extent buffer is written zeroed out (for zoned) */
 32	EXTENT_BUFFER_ZONED_ZEROOUT,
 33	/* Indicate that extent buffer pages a being read */
 34	EXTENT_BUFFER_READING,
 35};
 36
 37/* these are flags for __process_pages_contig */
 38enum {
 39	ENUM_BIT(PAGE_UNLOCK),
 40	/* Page starts writeback, clear dirty bit and set writeback bit */
 41	ENUM_BIT(PAGE_START_WRITEBACK),
 42	ENUM_BIT(PAGE_END_WRITEBACK),
 43	ENUM_BIT(PAGE_SET_ORDERED),
 44};
 45
 46/*
 47 * Folio private values.  Every page that is controlled by the extent map has
 48 * folio private set to this value.
 49 */
 50#define EXTENT_FOLIO_PRIVATE			1
 51
 52/*
 53 * The extent buffer bitmap operations are done with byte granularity instead of
 54 * word granularity for two reasons:
 55 * 1. The bitmaps must be little-endian on disk.
 56 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 57 *    single word in a bitmap may straddle two pages in the extent buffer.
 58 */
 59#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
 60#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
 61#define BITMAP_FIRST_BYTE_MASK(start) \
 62	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
 63#define BITMAP_LAST_BYTE_MASK(nbits) \
 64	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
 65
 
 66struct btrfs_root;
 67struct btrfs_inode;
 68struct btrfs_fs_info;
 69struct extent_io_tree;
 70struct btrfs_tree_parent_check;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71
 72int __init extent_buffer_init_cachep(void);
 73void __cold extent_buffer_free_cachep(void);
 74
 75#define INLINE_EXTENT_BUFFER_PAGES     (BTRFS_MAX_METADATA_BLOCKSIZE / PAGE_SIZE)
 
 
 
 
 
 
 76struct extent_buffer {
 77	u64 start;
 78	unsigned long len;
 79	unsigned long bflags;
 80	struct btrfs_fs_info *fs_info;
 81
 82	/*
 83	 * The address where the eb can be accessed without any cross-page handling.
 84	 * This can be NULL if not possible.
 85	 */
 86	void *addr;
 87
 88	spinlock_t refs_lock;
 89	atomic_t refs;
 
 90	int read_mirror;
 91	/* >= 0 if eb belongs to a log tree, -1 otherwise */
 92	s8 log_index;
 93	struct rcu_head rcu_head;
 
 94
 95	struct rw_semaphore lock;
 
 
 
 
 96
 97	/*
 98	 * Pointers to all the folios of the extent buffer.
 99	 *
100	 * For now the folio is always order 0 (aka, a single page).
 
 
 
 
 
 
101	 */
102	struct folio *folios[INLINE_EXTENT_BUFFER_PAGES];
 
103#ifdef CONFIG_BTRFS_DEBUG
 
 
 
 
104	struct list_head leak_list;
105	pid_t lock_owner;
106#endif
107};
108
109struct btrfs_eb_write_context {
110	struct writeback_control *wbc;
111	struct extent_buffer *eb;
112	/* Block group @eb resides in. Only used for zoned mode. */
113	struct btrfs_block_group *zoned_bg;
114};
115
116/*
117 * Get the correct offset inside the page of extent buffer.
118 *
119 * @eb:		target extent buffer
120 * @start:	offset inside the extent buffer
121 *
122 * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
123 */
124static inline size_t get_eb_offset_in_folio(const struct extent_buffer *eb,
125					    unsigned long offset)
126{
127	/*
128	 * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
129	 *    1.1) One large folio covering the whole eb
130	 *	   The eb->start is aligned to folio size, thus adding it
131	 *	   won't cause any difference.
132	 *    1.2) Several page sized folios
133	 *	   The eb->start is aligned to folio (page) size, thus
134	 *	   adding it won't cause any difference.
135	 *
136	 * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
137	 *    In this case there would only be one page sized folio, and there
138	 *    may be several different extent buffers in the page/folio.
139	 *    We need to add eb->start to properly access the offset inside
140	 *    that eb.
141	 */
142	return offset_in_folio(eb->folios[0], offset + eb->start);
143}
144
145static inline unsigned long get_eb_folio_index(const struct extent_buffer *eb,
146					       unsigned long offset)
147{
148	/*
149	 * 1) sectorsize == PAGE_SIZE and nodesize >= PAGE_SIZE case
150	 *    1.1) One large folio covering the whole eb.
151	 *	   the folio_shift would be large enough to always make us
152	 *	   return 0 as index.
153	 *    1.2) Several page sized folios
154	 *         The folio_shift() would be PAGE_SHIFT, giving us the correct
155	 *         index.
156	 *
157	 * 2) sectorsize < PAGE_SIZE and nodesize < PAGE_SIZE case
158	 *    The folio would only be page sized, and always give us 0 as index.
159	 */
160	return offset >> folio_shift(eb->folios[0]);
161}
162
163/*
164 * Structure to record how many bytes and which ranges are set/cleared
165 */
166struct extent_changeset {
167	/* How many bytes are set/cleared in this operation */
168	u64 bytes_changed;
169
170	/* Changed ranges */
171	struct ulist range_changed;
172};
173
174static inline void extent_changeset_init(struct extent_changeset *changeset)
175{
176	changeset->bytes_changed = 0;
177	ulist_init(&changeset->range_changed);
178}
179
180static inline struct extent_changeset *extent_changeset_alloc(void)
181{
182	struct extent_changeset *ret;
183
184	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
185	if (!ret)
186		return NULL;
187
188	extent_changeset_init(ret);
189	return ret;
190}
191
192static inline void extent_changeset_release(struct extent_changeset *changeset)
193{
194	if (!changeset)
195		return;
196	changeset->bytes_changed = 0;
197	ulist_release(&changeset->range_changed);
198}
199
200static inline void extent_changeset_free(struct extent_changeset *changeset)
201{
202	if (!changeset)
203		return;
204	extent_changeset_release(changeset);
205	kfree(changeset);
206}
207
 
 
 
 
 
 
 
 
 
 
 
208struct extent_map_tree;
209
 
 
 
 
 
 
 
 
 
 
210int try_release_extent_mapping(struct page *page, gfp_t mask);
211int try_release_extent_buffer(struct page *page);
 
 
 
 
 
 
 
 
 
 
 
 
 
212
213int btrfs_read_folio(struct file *file, struct folio *folio);
214void extent_write_locked_range(struct inode *inode, struct page *locked_page,
215			       u64 start, u64 end, struct writeback_control *wbc,
216			       bool pages_dirty);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217int extent_writepages(struct address_space *mapping,
218		      struct writeback_control *wbc);
219int btree_write_cache_pages(struct address_space *mapping,
220			    struct writeback_control *wbc);
221void extent_readahead(struct readahead_control *rac);
222int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
223		  u64 start, u64 len);
224int set_page_extent_mapped(struct page *page);
225void clear_page_extent_mapped(struct page *page);
226
227struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
228					  u64 start, u64 owner_root, int level);
229struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
230						  u64 start, unsigned long len);
231struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
232						u64 start);
233struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
234struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
235					 u64 start);
236void free_extent_buffer(struct extent_buffer *eb);
237void free_extent_buffer_stale(struct extent_buffer *eb);
238#define WAIT_NONE	0
239#define WAIT_COMPLETE	1
240#define WAIT_PAGE_LOCK	2
241int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
242			     struct btrfs_tree_parent_check *parent_check);
243void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
244void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
245				u64 bytenr, u64 owner_root, u64 gen, int level);
246void btrfs_readahead_node_child(struct extent_buffer *node, int slot);
247
248static inline int num_extent_pages(const struct extent_buffer *eb)
249{
250	/*
251	 * For sectorsize == PAGE_SIZE case, since nodesize is always aligned to
252	 * sectorsize, it's just eb->len >> PAGE_SHIFT.
253	 *
254	 * For sectorsize < PAGE_SIZE case, we could have nodesize < PAGE_SIZE,
255	 * thus have to ensure we get at least one page.
256	 */
257	return (eb->len >> PAGE_SHIFT) ?: 1;
258}
259
260/*
261 * This can only be determined at runtime by checking eb::folios[0].
262 *
263 * As we can have either one large folio covering the whole eb
264 * (either nodesize <= PAGE_SIZE, or high order folio), or multiple
265 * single-paged folios.
266 */
267static inline int num_extent_folios(const struct extent_buffer *eb)
268{
269	if (folio_order(eb->folios[0]))
270		return 1;
271	return num_extent_pages(eb);
272}
273
274static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
275{
276	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
277}
278
279int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
280			 unsigned long start, unsigned long len);
281void read_extent_buffer(const struct extent_buffer *eb, void *dst,
282			unsigned long start,
283			unsigned long len);
284int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
285				       void __user *dst, unsigned long start,
286				       unsigned long len);
287void write_extent_buffer(const struct extent_buffer *eb, const void *src,
 
 
 
288			 unsigned long start, unsigned long len);
289
290static inline void write_extent_buffer_chunk_tree_uuid(
291		const struct extent_buffer *eb, const void *chunk_tree_uuid)
292{
293	write_extent_buffer(eb, chunk_tree_uuid,
294			    offsetof(struct btrfs_header, chunk_tree_uuid),
295			    BTRFS_FSID_SIZE);
296}
297
298static inline void write_extent_buffer_fsid(const struct extent_buffer *eb,
299					    const void *fsid)
300{
301	write_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
302			    BTRFS_FSID_SIZE);
303}
304
305void copy_extent_buffer_full(const struct extent_buffer *dst,
306			     const struct extent_buffer *src);
307void copy_extent_buffer(const struct extent_buffer *dst,
308			const struct extent_buffer *src,
309			unsigned long dst_offset, unsigned long src_offset,
310			unsigned long len);
311void memcpy_extent_buffer(const struct extent_buffer *dst,
312			  unsigned long dst_offset, unsigned long src_offset,
313			  unsigned long len);
314void memmove_extent_buffer(const struct extent_buffer *dst,
315			   unsigned long dst_offset, unsigned long src_offset,
316			   unsigned long len);
317void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
318			   unsigned long len);
319int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
320			   unsigned long pos);
321void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
322			      unsigned long pos, unsigned long len);
323void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
324				unsigned long start, unsigned long pos,
325				unsigned long len);
326void set_extent_buffer_dirty(struct extent_buffer *eb);
327void set_extent_buffer_uptodate(struct extent_buffer *eb);
328void clear_extent_buffer_uptodate(struct extent_buffer *eb);
 
 
 
 
 
329void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
330void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
 
331				  struct page *locked_page,
332				  u32 bits_to_clear, unsigned long page_ops);
333int extent_invalidate_folio(struct extent_io_tree *tree,
334			    struct folio *folio, size_t offset);
335void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
336			      struct extent_buffer *buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
338int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
339			   gfp_t extra_gfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
342bool find_lock_delalloc_range(struct inode *inode,
343			     struct page *locked_page, u64 *start,
344			     u64 *end);
345#endif
346struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
347					       u64 start);
348
349#ifdef CONFIG_BTRFS_DEBUG
350void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info);
351#else
352#define btrfs_extent_buffer_leak_debug_check(fs_info)	do {} while (0)
353#endif
354
355#endif
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifndef BTRFS_EXTENT_IO_H
  4#define BTRFS_EXTENT_IO_H
  5
  6#include <linux/rbtree.h>
  7#include <linux/refcount.h>
 
 
 
  8#include "ulist.h"
 
  9
 10/* bits for the extent state */
 11#define EXTENT_DIRTY		(1U << 0)
 12#define EXTENT_UPTODATE		(1U << 1)
 13#define EXTENT_LOCKED		(1U << 2)
 14#define EXTENT_NEW		(1U << 3)
 15#define EXTENT_DELALLOC		(1U << 4)
 16#define EXTENT_DEFRAG		(1U << 5)
 17#define EXTENT_BOUNDARY		(1U << 6)
 18#define EXTENT_NODATASUM	(1U << 7)
 19#define EXTENT_CLEAR_META_RESV	(1U << 8)
 20#define EXTENT_NEED_WAIT	(1U << 9)
 21#define EXTENT_DAMAGED		(1U << 10)
 22#define EXTENT_NORESERVE	(1U << 11)
 23#define EXTENT_QGROUP_RESERVED	(1U << 12)
 24#define EXTENT_CLEAR_DATA_RESV	(1U << 13)
 25#define EXTENT_DELALLOC_NEW	(1U << 14)
 26#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
 27				 EXTENT_CLEAR_DATA_RESV)
 28#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING)
 29
 30/*
 31 * Redefined bits above which are used only in the device allocation tree,
 32 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
 33 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
 34 * manipulation functions
 35 */
 36#define CHUNK_ALLOCATED EXTENT_DIRTY
 37#define CHUNK_TRIMMED   EXTENT_DEFRAG
 38
 39/*
 40 * flags for bio submission. The high bits indicate the compression
 41 * type for this bio
 42 */
 43#define EXTENT_BIO_COMPRESSED 1
 44#define EXTENT_BIO_FLAG_SHIFT 16
 45
 46enum {
 47	EXTENT_BUFFER_UPTODATE,
 48	EXTENT_BUFFER_DIRTY,
 49	EXTENT_BUFFER_CORRUPT,
 50	/* this got triggered by readahead */
 51	EXTENT_BUFFER_READAHEAD,
 52	EXTENT_BUFFER_TREE_REF,
 53	EXTENT_BUFFER_STALE,
 54	EXTENT_BUFFER_WRITEBACK,
 55	/* read IO error */
 56	EXTENT_BUFFER_READ_ERR,
 57	EXTENT_BUFFER_UNMAPPED,
 58	EXTENT_BUFFER_IN_TREE,
 59	/* write IO error */
 60	EXTENT_BUFFER_WRITE_ERR,
 
 
 
 
 61};
 62
 63/* these are flags for __process_pages_contig */
 64#define PAGE_UNLOCK		(1 << 0)
 65#define PAGE_CLEAR_DIRTY	(1 << 1)
 66#define PAGE_SET_WRITEBACK	(1 << 2)
 67#define PAGE_END_WRITEBACK	(1 << 3)
 68#define PAGE_SET_PRIVATE2	(1 << 4)
 69#define PAGE_SET_ERROR		(1 << 5)
 70#define PAGE_LOCK		(1 << 6)
 71
 72/*
 73 * page->private values.  Every page that is controlled by the extent
 74 * map has page->private set to one.
 75 */
 76#define EXTENT_PAGE_PRIVATE 1
 77
 78/*
 79 * The extent buffer bitmap operations are done with byte granularity instead of
 80 * word granularity for two reasons:
 81 * 1. The bitmaps must be little-endian on disk.
 82 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
 83 *    single word in a bitmap may straddle two pages in the extent buffer.
 84 */
 85#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
 86#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
 87#define BITMAP_FIRST_BYTE_MASK(start) \
 88	((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
 89#define BITMAP_LAST_BYTE_MASK(nbits) \
 90	(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
 91
 92struct extent_state;
 93struct btrfs_root;
 94struct btrfs_inode;
 95struct btrfs_io_bio;
 96struct io_failure_record;
 97
 98
 99typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
100		struct bio *bio, u64 bio_offset);
101
102struct extent_io_ops {
103	/*
104	 * The following callbacks must be always defined, the function
105	 * pointer will be called unconditionally.
106	 */
107	blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
108					int mirror_num, unsigned long bio_flags);
109	int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
110				    struct page *page, u64 start, u64 end,
111				    int mirror);
112};
113
114enum {
115	IO_TREE_FS_INFO_FREED_EXTENTS0,
116	IO_TREE_FS_INFO_FREED_EXTENTS1,
117	IO_TREE_INODE_IO,
118	IO_TREE_INODE_IO_FAILURE,
119	IO_TREE_RELOC_BLOCKS,
120	IO_TREE_TRANS_DIRTY_PAGES,
121	IO_TREE_ROOT_DIRTY_LOG_PAGES,
122	IO_TREE_SELFTEST,
123};
124
125struct extent_io_tree {
126	struct rb_root state;
127	struct btrfs_fs_info *fs_info;
128	void *private_data;
129	u64 dirty_bytes;
130	bool track_uptodate;
131
132	/* Who owns this io tree, should be one of IO_TREE_* */
133	u8 owner;
134
135	spinlock_t lock;
136	const struct extent_io_ops *ops;
137};
138
139struct extent_state {
140	u64 start;
141	u64 end; /* inclusive */
142	struct rb_node rb_node;
143
144	/* ADD NEW ELEMENTS AFTER THIS */
145	wait_queue_head_t wq;
146	refcount_t refs;
147	unsigned state;
148
149	struct io_failure_record *failrec;
 
150
151#ifdef CONFIG_BTRFS_DEBUG
152	struct list_head leak_list;
153#endif
154};
155
156#define INLINE_EXTENT_BUFFER_PAGES 16
157#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
158struct extent_buffer {
159	u64 start;
160	unsigned long len;
161	unsigned long bflags;
162	struct btrfs_fs_info *fs_info;
 
 
 
 
 
 
 
163	spinlock_t refs_lock;
164	atomic_t refs;
165	atomic_t io_pages;
166	int read_mirror;
 
 
167	struct rcu_head rcu_head;
168	pid_t lock_owner;
169
170	int blocking_writers;
171	atomic_t blocking_readers;
172	bool lock_nested;
173	/* >= 0 if eb belongs to a log tree, -1 otherwise */
174	short log_index;
175
176	/* protects write locks */
177	rwlock_t lock;
178
179	/* readers use lock_wq while they wait for the write
180	 * lock holders to unlock
181	 */
182	wait_queue_head_t write_lock_wq;
183
184	/* writers use read_lock_wq while they wait for readers
185	 * to unlock
186	 */
187	wait_queue_head_t read_lock_wq;
188	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
189#ifdef CONFIG_BTRFS_DEBUG
190	int spinning_writers;
191	atomic_t spinning_readers;
192	atomic_t read_locks;
193	int write_locks;
194	struct list_head leak_list;
 
195#endif
196};
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198/*
199 * Structure to record how many bytes and which ranges are set/cleared
200 */
201struct extent_changeset {
202	/* How many bytes are set/cleared in this operation */
203	unsigned int bytes_changed;
204
205	/* Changed ranges */
206	struct ulist range_changed;
207};
208
209static inline void extent_changeset_init(struct extent_changeset *changeset)
210{
211	changeset->bytes_changed = 0;
212	ulist_init(&changeset->range_changed);
213}
214
215static inline struct extent_changeset *extent_changeset_alloc(void)
216{
217	struct extent_changeset *ret;
218
219	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
220	if (!ret)
221		return NULL;
222
223	extent_changeset_init(ret);
224	return ret;
225}
226
227static inline void extent_changeset_release(struct extent_changeset *changeset)
228{
229	if (!changeset)
230		return;
231	changeset->bytes_changed = 0;
232	ulist_release(&changeset->range_changed);
233}
234
235static inline void extent_changeset_free(struct extent_changeset *changeset)
236{
237	if (!changeset)
238		return;
239	extent_changeset_release(changeset);
240	kfree(changeset);
241}
242
243static inline void extent_set_compress_type(unsigned long *bio_flags,
244					    int compress_type)
245{
246	*bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
247}
248
249static inline int extent_compress_type(unsigned long bio_flags)
250{
251	return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
252}
253
254struct extent_map_tree;
255
256typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
257					  struct page *page,
258					  size_t pg_offset,
259					  u64 start, u64 len,
260					  int create);
261
262void extent_io_tree_init(struct btrfs_fs_info *fs_info,
263			 struct extent_io_tree *tree, unsigned int owner,
264			 void *private_data);
265void extent_io_tree_release(struct extent_io_tree *tree);
266int try_release_extent_mapping(struct page *page, gfp_t mask);
267int try_release_extent_buffer(struct page *page);
268int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
269		     struct extent_state **cached);
270
271static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
272{
273	return lock_extent_bits(tree, start, end, NULL);
274}
275
276int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
277int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
278			  get_extent_t *get_extent, int mirror_num);
279int __init extent_io_init(void);
280void __cold extent_io_exit(void);
281
282u64 count_range_bits(struct extent_io_tree *tree,
283		     u64 *start, u64 search_end,
284		     u64 max_bytes, unsigned bits, int contig);
285
286void free_extent_state(struct extent_state *state);
287int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
288		   unsigned bits, int filled,
289		   struct extent_state *cached_state);
290int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
291		unsigned bits, struct extent_changeset *changeset);
292int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
293		     unsigned bits, int wake, int delete,
294		     struct extent_state **cached);
295int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
296		     unsigned bits, int wake, int delete,
297		     struct extent_state **cached, gfp_t mask,
298		     struct extent_changeset *changeset);
299
300static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
301{
302	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
303}
304
305static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
306		u64 end, struct extent_state **cached)
307{
308	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
309				GFP_NOFS, NULL);
310}
311
312static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
313		u64 start, u64 end, struct extent_state **cached)
314{
315	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
316				GFP_ATOMIC, NULL);
317}
318
319static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
320		u64 end, unsigned bits)
321{
322	int wake = 0;
323
324	if (bits & EXTENT_LOCKED)
325		wake = 1;
326
327	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
328}
329
330int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
331			   unsigned bits, struct extent_changeset *changeset);
332int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
333		   unsigned bits, u64 *failed_start,
334		   struct extent_state **cached_state, gfp_t mask);
335int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
336			   unsigned bits);
337
338static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
339		u64 end, unsigned bits)
340{
341	return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
342}
343
344static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
345		u64 end, struct extent_state **cached_state)
346{
347	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
348				cached_state, GFP_NOFS, NULL);
349}
350
351static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
352		u64 end, gfp_t mask)
353{
354	return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
355			      NULL, mask);
356}
357
358static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
359				     u64 end, struct extent_state **cached)
360{
361	return clear_extent_bit(tree, start, end,
362				EXTENT_DIRTY | EXTENT_DELALLOC |
363				EXTENT_DO_ACCOUNTING, 0, 0, cached);
364}
365
366int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
367		       unsigned bits, unsigned clear_bits,
368		       struct extent_state **cached_state);
369
370static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
371				      u64 end, unsigned int extra_bits,
372				      struct extent_state **cached_state)
373{
374	return set_extent_bit(tree, start, end,
375			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
376			      NULL, cached_state, GFP_NOFS);
377}
378
379static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
380		u64 end, struct extent_state **cached_state)
381{
382	return set_extent_bit(tree, start, end,
383			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
384			      NULL, cached_state, GFP_NOFS);
385}
386
387static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
388		u64 end)
389{
390	return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
391			GFP_NOFS);
392}
393
394static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
395		u64 end, struct extent_state **cached_state, gfp_t mask)
396{
397	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
398			      cached_state, mask);
399}
400
401int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
402			  u64 *start_ret, u64 *end_ret, unsigned bits,
403			  struct extent_state **cached_state);
404void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
405				 u64 *start_ret, u64 *end_ret, unsigned bits);
406int extent_invalidatepage(struct extent_io_tree *tree,
407			  struct page *page, unsigned long offset);
408int extent_write_full_page(struct page *page, struct writeback_control *wbc);
409int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
410			      int mode);
411int extent_writepages(struct address_space *mapping,
412		      struct writeback_control *wbc);
413int btree_write_cache_pages(struct address_space *mapping,
414			    struct writeback_control *wbc);
415int extent_readpages(struct address_space *mapping, struct list_head *pages,
416		     unsigned nr_pages);
417int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
418		__u64 start, __u64 len);
419void set_page_extent_mapped(struct page *page);
420
421struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
422					  u64 start);
423struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
424						  u64 start, unsigned long len);
425struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
426						u64 start);
427struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
428struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
429					 u64 start);
430void free_extent_buffer(struct extent_buffer *eb);
431void free_extent_buffer_stale(struct extent_buffer *eb);
432#define WAIT_NONE	0
433#define WAIT_COMPLETE	1
434#define WAIT_PAGE_LOCK	2
435int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
436			     int mirror_num);
437void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
 
 
 
438
439static inline int num_extent_pages(const struct extent_buffer *eb)
440{
441	return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
442	       (eb->start >> PAGE_SHIFT);
 
 
 
 
 
 
443}
444
445static inline void extent_buffer_get(struct extent_buffer *eb)
 
 
 
 
 
 
 
446{
447	atomic_inc(&eb->refs);
 
 
448}
449
450static inline int extent_buffer_uptodate(struct extent_buffer *eb)
451{
452	return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
453}
454
455int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
456			 unsigned long start, unsigned long len);
457void read_extent_buffer(const struct extent_buffer *eb, void *dst,
458			unsigned long start,
459			unsigned long len);
460int read_extent_buffer_to_user(const struct extent_buffer *eb,
461			       void __user *dst, unsigned long start,
462			       unsigned long len);
463void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
464void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
465		const void *src);
466void write_extent_buffer(struct extent_buffer *eb, const void *src,
467			 unsigned long start, unsigned long len);
468void copy_extent_buffer_full(struct extent_buffer *dst,
469			     struct extent_buffer *src);
470void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471			unsigned long dst_offset, unsigned long src_offset,
472			unsigned long len);
473void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
474			   unsigned long src_offset, unsigned long len);
475void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
476			   unsigned long src_offset, unsigned long len);
477void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
 
 
478			   unsigned long len);
479int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
480			   unsigned long pos);
481void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
482			      unsigned long pos, unsigned long len);
483void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
484				unsigned long pos, unsigned long len);
485void clear_extent_buffer_dirty(struct extent_buffer *eb);
486bool set_extent_buffer_dirty(struct extent_buffer *eb);
487void set_extent_buffer_uptodate(struct extent_buffer *eb);
488void clear_extent_buffer_uptodate(struct extent_buffer *eb);
489int extent_buffer_under_io(struct extent_buffer *eb);
490int map_private_extent_buffer(const struct extent_buffer *eb,
491			      unsigned long offset, unsigned long min_len,
492			      char **map, unsigned long *map_start,
493			      unsigned long *map_len);
494void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
495void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
496void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
497				  struct page *locked_page,
498				  unsigned bits_to_clear,
499				  unsigned long page_ops);
500struct bio *btrfs_bio_alloc(u64 first_byte);
501struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
502struct bio *btrfs_bio_clone(struct bio *bio);
503struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
504
505struct btrfs_fs_info;
506struct btrfs_inode;
507
508int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
509		      u64 length, u64 logical, struct page *page,
510		      unsigned int pg_offset, int mirror_num);
511int clean_io_failure(struct btrfs_fs_info *fs_info,
512		     struct extent_io_tree *failure_tree,
513		     struct extent_io_tree *io_tree, u64 start,
514		     struct page *page, u64 ino, unsigned int pg_offset);
515void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
516int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
517
518/*
519 * When IO fails, either with EIO or csum verification fails, we
520 * try other mirrors that might have a good copy of the data.  This
521 * io_failure_record is used to record state as we go through all the
522 * mirrors.  If another mirror has good data, the page is set up to date
523 * and things continue.  If a good mirror can't be found, the original
524 * bio end_io callback is called to indicate things have failed.
525 */
526struct io_failure_record {
527	struct page *page;
528	u64 start;
529	u64 len;
530	u64 logical;
531	unsigned long bio_flags;
532	int this_mirror;
533	int failed_mirror;
534	int in_validation;
535};
536
537
538void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
539		u64 end);
540int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
541				struct io_failure_record **failrec_ret);
542bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
543			    struct io_failure_record *failrec, int fail_mirror);
544struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
545				    struct io_failure_record *failrec,
546				    struct page *page, int pg_offset, int icsum,
547				    bio_end_io_t *endio_func, void *data);
548int free_io_failure(struct extent_io_tree *failure_tree,
549		    struct extent_io_tree *io_tree,
550		    struct io_failure_record *rec);
551#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
552bool find_lock_delalloc_range(struct inode *inode,
553			     struct page *locked_page, u64 *start,
554			     u64 *end);
555#endif
556struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
557					       u64 start);
 
 
 
 
 
 
558
559#endif