Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
5
6#include <linux/rbtree.h>
7#include <linux/refcount.h>
8#include "ulist.h"
9
10/* bits for the extent state */
11#define EXTENT_DIRTY (1U << 0)
12#define EXTENT_UPTODATE (1U << 1)
13#define EXTENT_LOCKED (1U << 2)
14#define EXTENT_NEW (1U << 3)
15#define EXTENT_DELALLOC (1U << 4)
16#define EXTENT_DEFRAG (1U << 5)
17#define EXTENT_BOUNDARY (1U << 6)
18#define EXTENT_NODATASUM (1U << 7)
19#define EXTENT_CLEAR_META_RESV (1U << 8)
20#define EXTENT_NEED_WAIT (1U << 9)
21#define EXTENT_DAMAGED (1U << 10)
22#define EXTENT_NORESERVE (1U << 11)
23#define EXTENT_QGROUP_RESERVED (1U << 12)
24#define EXTENT_CLEAR_DATA_RESV (1U << 13)
25#define EXTENT_DELALLOC_NEW (1U << 14)
26#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
27 EXTENT_CLEAR_DATA_RESV)
28#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
29
30/*
31 * Redefined bits above which are used only in the device allocation tree,
32 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
33 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
34 * manipulation functions
35 */
36#define CHUNK_ALLOCATED EXTENT_DIRTY
37#define CHUNK_TRIMMED EXTENT_DEFRAG
38
39/*
40 * flags for bio submission. The high bits indicate the compression
41 * type for this bio
42 */
43#define EXTENT_BIO_COMPRESSED 1
44#define EXTENT_BIO_FLAG_SHIFT 16
45
46enum {
47 EXTENT_BUFFER_UPTODATE,
48 EXTENT_BUFFER_DIRTY,
49 EXTENT_BUFFER_CORRUPT,
50 /* this got triggered by readahead */
51 EXTENT_BUFFER_READAHEAD,
52 EXTENT_BUFFER_TREE_REF,
53 EXTENT_BUFFER_STALE,
54 EXTENT_BUFFER_WRITEBACK,
55 /* read IO error */
56 EXTENT_BUFFER_READ_ERR,
57 EXTENT_BUFFER_UNMAPPED,
58 EXTENT_BUFFER_IN_TREE,
59 /* write IO error */
60 EXTENT_BUFFER_WRITE_ERR,
61};
62
63/* these are flags for __process_pages_contig */
64#define PAGE_UNLOCK (1 << 0)
65#define PAGE_CLEAR_DIRTY (1 << 1)
66#define PAGE_SET_WRITEBACK (1 << 2)
67#define PAGE_END_WRITEBACK (1 << 3)
68#define PAGE_SET_PRIVATE2 (1 << 4)
69#define PAGE_SET_ERROR (1 << 5)
70#define PAGE_LOCK (1 << 6)
71
72/*
73 * page->private values. Every page that is controlled by the extent
74 * map has page->private set to one.
75 */
76#define EXTENT_PAGE_PRIVATE 1
77
78/*
79 * The extent buffer bitmap operations are done with byte granularity instead of
80 * word granularity for two reasons:
81 * 1. The bitmaps must be little-endian on disk.
82 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
83 * single word in a bitmap may straddle two pages in the extent buffer.
84 */
85#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
86#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
87#define BITMAP_FIRST_BYTE_MASK(start) \
88 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
89#define BITMAP_LAST_BYTE_MASK(nbits) \
90 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
91
92struct extent_state;
93struct btrfs_root;
94struct btrfs_inode;
95struct btrfs_io_bio;
96struct io_failure_record;
97
98
99typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
100 struct bio *bio, u64 bio_offset);
101
102struct extent_io_ops {
103 /*
104 * The following callbacks must be always defined, the function
105 * pointer will be called unconditionally.
106 */
107 blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
108 int mirror_num, unsigned long bio_flags);
109 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
110 struct page *page, u64 start, u64 end,
111 int mirror);
112};
113
114enum {
115 IO_TREE_FS_INFO_FREED_EXTENTS0,
116 IO_TREE_FS_INFO_FREED_EXTENTS1,
117 IO_TREE_INODE_IO,
118 IO_TREE_INODE_IO_FAILURE,
119 IO_TREE_RELOC_BLOCKS,
120 IO_TREE_TRANS_DIRTY_PAGES,
121 IO_TREE_ROOT_DIRTY_LOG_PAGES,
122 IO_TREE_SELFTEST,
123};
124
125struct extent_io_tree {
126 struct rb_root state;
127 struct btrfs_fs_info *fs_info;
128 void *private_data;
129 u64 dirty_bytes;
130 bool track_uptodate;
131
132 /* Who owns this io tree, should be one of IO_TREE_* */
133 u8 owner;
134
135 spinlock_t lock;
136 const struct extent_io_ops *ops;
137};
138
139struct extent_state {
140 u64 start;
141 u64 end; /* inclusive */
142 struct rb_node rb_node;
143
144 /* ADD NEW ELEMENTS AFTER THIS */
145 wait_queue_head_t wq;
146 refcount_t refs;
147 unsigned state;
148
149 struct io_failure_record *failrec;
150
151#ifdef CONFIG_BTRFS_DEBUG
152 struct list_head leak_list;
153#endif
154};
155
156#define INLINE_EXTENT_BUFFER_PAGES 16
157#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
158struct extent_buffer {
159 u64 start;
160 unsigned long len;
161 unsigned long bflags;
162 struct btrfs_fs_info *fs_info;
163 spinlock_t refs_lock;
164 atomic_t refs;
165 atomic_t io_pages;
166 int read_mirror;
167 struct rcu_head rcu_head;
168 pid_t lock_owner;
169
170 int blocking_writers;
171 atomic_t blocking_readers;
172 bool lock_nested;
173 /* >= 0 if eb belongs to a log tree, -1 otherwise */
174 short log_index;
175
176 /* protects write locks */
177 rwlock_t lock;
178
179 /* readers use lock_wq while they wait for the write
180 * lock holders to unlock
181 */
182 wait_queue_head_t write_lock_wq;
183
184 /* writers use read_lock_wq while they wait for readers
185 * to unlock
186 */
187 wait_queue_head_t read_lock_wq;
188 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
189#ifdef CONFIG_BTRFS_DEBUG
190 int spinning_writers;
191 atomic_t spinning_readers;
192 atomic_t read_locks;
193 int write_locks;
194 struct list_head leak_list;
195#endif
196};
197
198/*
199 * Structure to record how many bytes and which ranges are set/cleared
200 */
201struct extent_changeset {
202 /* How many bytes are set/cleared in this operation */
203 unsigned int bytes_changed;
204
205 /* Changed ranges */
206 struct ulist range_changed;
207};
208
209static inline void extent_changeset_init(struct extent_changeset *changeset)
210{
211 changeset->bytes_changed = 0;
212 ulist_init(&changeset->range_changed);
213}
214
215static inline struct extent_changeset *extent_changeset_alloc(void)
216{
217 struct extent_changeset *ret;
218
219 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
220 if (!ret)
221 return NULL;
222
223 extent_changeset_init(ret);
224 return ret;
225}
226
227static inline void extent_changeset_release(struct extent_changeset *changeset)
228{
229 if (!changeset)
230 return;
231 changeset->bytes_changed = 0;
232 ulist_release(&changeset->range_changed);
233}
234
235static inline void extent_changeset_free(struct extent_changeset *changeset)
236{
237 if (!changeset)
238 return;
239 extent_changeset_release(changeset);
240 kfree(changeset);
241}
242
243static inline void extent_set_compress_type(unsigned long *bio_flags,
244 int compress_type)
245{
246 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
247}
248
249static inline int extent_compress_type(unsigned long bio_flags)
250{
251 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
252}
253
254struct extent_map_tree;
255
256typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
257 struct page *page,
258 size_t pg_offset,
259 u64 start, u64 len,
260 int create);
261
262void extent_io_tree_init(struct btrfs_fs_info *fs_info,
263 struct extent_io_tree *tree, unsigned int owner,
264 void *private_data);
265void extent_io_tree_release(struct extent_io_tree *tree);
266int try_release_extent_mapping(struct page *page, gfp_t mask);
267int try_release_extent_buffer(struct page *page);
268int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
269 struct extent_state **cached);
270
271static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
272{
273 return lock_extent_bits(tree, start, end, NULL);
274}
275
276int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
277int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
278 get_extent_t *get_extent, int mirror_num);
279int __init extent_io_init(void);
280void __cold extent_io_exit(void);
281
282u64 count_range_bits(struct extent_io_tree *tree,
283 u64 *start, u64 search_end,
284 u64 max_bytes, unsigned bits, int contig);
285
286void free_extent_state(struct extent_state *state);
287int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
288 unsigned bits, int filled,
289 struct extent_state *cached_state);
290int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
291 unsigned bits, struct extent_changeset *changeset);
292int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
293 unsigned bits, int wake, int delete,
294 struct extent_state **cached);
295int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
296 unsigned bits, int wake, int delete,
297 struct extent_state **cached, gfp_t mask,
298 struct extent_changeset *changeset);
299
300static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
301{
302 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
303}
304
305static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
306 u64 end, struct extent_state **cached)
307{
308 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
309 GFP_NOFS, NULL);
310}
311
312static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
313 u64 start, u64 end, struct extent_state **cached)
314{
315 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
316 GFP_ATOMIC, NULL);
317}
318
319static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
320 u64 end, unsigned bits)
321{
322 int wake = 0;
323
324 if (bits & EXTENT_LOCKED)
325 wake = 1;
326
327 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
328}
329
330int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
331 unsigned bits, struct extent_changeset *changeset);
332int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
333 unsigned bits, u64 *failed_start,
334 struct extent_state **cached_state, gfp_t mask);
335int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
336 unsigned bits);
337
338static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
339 u64 end, unsigned bits)
340{
341 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
342}
343
344static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
345 u64 end, struct extent_state **cached_state)
346{
347 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
348 cached_state, GFP_NOFS, NULL);
349}
350
351static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
352 u64 end, gfp_t mask)
353{
354 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
355 NULL, mask);
356}
357
358static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
359 u64 end, struct extent_state **cached)
360{
361 return clear_extent_bit(tree, start, end,
362 EXTENT_DIRTY | EXTENT_DELALLOC |
363 EXTENT_DO_ACCOUNTING, 0, 0, cached);
364}
365
366int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
367 unsigned bits, unsigned clear_bits,
368 struct extent_state **cached_state);
369
370static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
371 u64 end, unsigned int extra_bits,
372 struct extent_state **cached_state)
373{
374 return set_extent_bit(tree, start, end,
375 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
376 NULL, cached_state, GFP_NOFS);
377}
378
379static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
380 u64 end, struct extent_state **cached_state)
381{
382 return set_extent_bit(tree, start, end,
383 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
384 NULL, cached_state, GFP_NOFS);
385}
386
387static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
388 u64 end)
389{
390 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
391 GFP_NOFS);
392}
393
394static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
395 u64 end, struct extent_state **cached_state, gfp_t mask)
396{
397 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
398 cached_state, mask);
399}
400
401int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
402 u64 *start_ret, u64 *end_ret, unsigned bits,
403 struct extent_state **cached_state);
404void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
405 u64 *start_ret, u64 *end_ret, unsigned bits);
406int extent_invalidatepage(struct extent_io_tree *tree,
407 struct page *page, unsigned long offset);
408int extent_write_full_page(struct page *page, struct writeback_control *wbc);
409int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
410 int mode);
411int extent_writepages(struct address_space *mapping,
412 struct writeback_control *wbc);
413int btree_write_cache_pages(struct address_space *mapping,
414 struct writeback_control *wbc);
415int extent_readpages(struct address_space *mapping, struct list_head *pages,
416 unsigned nr_pages);
417int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
418 __u64 start, __u64 len);
419void set_page_extent_mapped(struct page *page);
420
421struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
422 u64 start);
423struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
424 u64 start, unsigned long len);
425struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
426 u64 start);
427struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
428struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
429 u64 start);
430void free_extent_buffer(struct extent_buffer *eb);
431void free_extent_buffer_stale(struct extent_buffer *eb);
432#define WAIT_NONE 0
433#define WAIT_COMPLETE 1
434#define WAIT_PAGE_LOCK 2
435int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
436 int mirror_num);
437void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
438
439static inline int num_extent_pages(const struct extent_buffer *eb)
440{
441 return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
442 (eb->start >> PAGE_SHIFT);
443}
444
445static inline void extent_buffer_get(struct extent_buffer *eb)
446{
447 atomic_inc(&eb->refs);
448}
449
450static inline int extent_buffer_uptodate(struct extent_buffer *eb)
451{
452 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
453}
454
455int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
456 unsigned long start, unsigned long len);
457void read_extent_buffer(const struct extent_buffer *eb, void *dst,
458 unsigned long start,
459 unsigned long len);
460int read_extent_buffer_to_user(const struct extent_buffer *eb,
461 void __user *dst, unsigned long start,
462 unsigned long len);
463void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
464void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
465 const void *src);
466void write_extent_buffer(struct extent_buffer *eb, const void *src,
467 unsigned long start, unsigned long len);
468void copy_extent_buffer_full(struct extent_buffer *dst,
469 struct extent_buffer *src);
470void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
471 unsigned long dst_offset, unsigned long src_offset,
472 unsigned long len);
473void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
474 unsigned long src_offset, unsigned long len);
475void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
476 unsigned long src_offset, unsigned long len);
477void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
478 unsigned long len);
479int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
480 unsigned long pos);
481void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
482 unsigned long pos, unsigned long len);
483void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
484 unsigned long pos, unsigned long len);
485void clear_extent_buffer_dirty(struct extent_buffer *eb);
486bool set_extent_buffer_dirty(struct extent_buffer *eb);
487void set_extent_buffer_uptodate(struct extent_buffer *eb);
488void clear_extent_buffer_uptodate(struct extent_buffer *eb);
489int extent_buffer_under_io(struct extent_buffer *eb);
490int map_private_extent_buffer(const struct extent_buffer *eb,
491 unsigned long offset, unsigned long min_len,
492 char **map, unsigned long *map_start,
493 unsigned long *map_len);
494void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
495void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
496void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
497 struct page *locked_page,
498 unsigned bits_to_clear,
499 unsigned long page_ops);
500struct bio *btrfs_bio_alloc(u64 first_byte);
501struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
502struct bio *btrfs_bio_clone(struct bio *bio);
503struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
504
505struct btrfs_fs_info;
506struct btrfs_inode;
507
508int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
509 u64 length, u64 logical, struct page *page,
510 unsigned int pg_offset, int mirror_num);
511int clean_io_failure(struct btrfs_fs_info *fs_info,
512 struct extent_io_tree *failure_tree,
513 struct extent_io_tree *io_tree, u64 start,
514 struct page *page, u64 ino, unsigned int pg_offset);
515void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
516int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
517
518/*
519 * When IO fails, either with EIO or csum verification fails, we
520 * try other mirrors that might have a good copy of the data. This
521 * io_failure_record is used to record state as we go through all the
522 * mirrors. If another mirror has good data, the page is set up to date
523 * and things continue. If a good mirror can't be found, the original
524 * bio end_io callback is called to indicate things have failed.
525 */
526struct io_failure_record {
527 struct page *page;
528 u64 start;
529 u64 len;
530 u64 logical;
531 unsigned long bio_flags;
532 int this_mirror;
533 int failed_mirror;
534 int in_validation;
535};
536
537
538void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
539 u64 end);
540int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
541 struct io_failure_record **failrec_ret);
542bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
543 struct io_failure_record *failrec, int fail_mirror);
544struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
545 struct io_failure_record *failrec,
546 struct page *page, int pg_offset, int icsum,
547 bio_end_io_t *endio_func, void *data);
548int free_io_failure(struct extent_io_tree *failure_tree,
549 struct extent_io_tree *io_tree,
550 struct io_failure_record *rec);
551#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
552bool find_lock_delalloc_range(struct inode *inode,
553 struct page *locked_page, u64 *start,
554 u64 *end);
555#endif
556struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
557 u64 start);
558
559#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_EXTENT_IO_H
4#define BTRFS_EXTENT_IO_H
5
6#include <linux/rbtree.h>
7#include <linux/refcount.h>
8#include "ulist.h"
9
10/* bits for the extent state */
11#define EXTENT_DIRTY (1U << 0)
12#define EXTENT_WRITEBACK (1U << 1)
13#define EXTENT_UPTODATE (1U << 2)
14#define EXTENT_LOCKED (1U << 3)
15#define EXTENT_NEW (1U << 4)
16#define EXTENT_DELALLOC (1U << 5)
17#define EXTENT_DEFRAG (1U << 6)
18#define EXTENT_BOUNDARY (1U << 9)
19#define EXTENT_NODATASUM (1U << 10)
20#define EXTENT_CLEAR_META_RESV (1U << 11)
21#define EXTENT_FIRST_DELALLOC (1U << 12)
22#define EXTENT_NEED_WAIT (1U << 13)
23#define EXTENT_DAMAGED (1U << 14)
24#define EXTENT_NORESERVE (1U << 15)
25#define EXTENT_QGROUP_RESERVED (1U << 16)
26#define EXTENT_CLEAR_DATA_RESV (1U << 17)
27#define EXTENT_DELALLOC_NEW (1U << 18)
28#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
29#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
30 EXTENT_CLEAR_DATA_RESV)
31#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
32
33/*
34 * flags for bio submission. The high bits indicate the compression
35 * type for this bio
36 */
37#define EXTENT_BIO_COMPRESSED 1
38#define EXTENT_BIO_FLAG_SHIFT 16
39
40/* these are bit numbers for test/set bit */
41#define EXTENT_BUFFER_UPTODATE 0
42#define EXTENT_BUFFER_DIRTY 2
43#define EXTENT_BUFFER_CORRUPT 3
44#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
45#define EXTENT_BUFFER_TREE_REF 5
46#define EXTENT_BUFFER_STALE 6
47#define EXTENT_BUFFER_WRITEBACK 7
48#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
49#define EXTENT_BUFFER_DUMMY 9
50#define EXTENT_BUFFER_IN_TREE 10
51#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
52
53/* these are flags for __process_pages_contig */
54#define PAGE_UNLOCK (1 << 0)
55#define PAGE_CLEAR_DIRTY (1 << 1)
56#define PAGE_SET_WRITEBACK (1 << 2)
57#define PAGE_END_WRITEBACK (1 << 3)
58#define PAGE_SET_PRIVATE2 (1 << 4)
59#define PAGE_SET_ERROR (1 << 5)
60#define PAGE_LOCK (1 << 6)
61
62/*
63 * page->private values. Every page that is controlled by the extent
64 * map has page->private set to one.
65 */
66#define EXTENT_PAGE_PRIVATE 1
67
68/*
69 * The extent buffer bitmap operations are done with byte granularity instead of
70 * word granularity for two reasons:
71 * 1. The bitmaps must be little-endian on disk.
72 * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
73 * single word in a bitmap may straddle two pages in the extent buffer.
74 */
75#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
76#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
77#define BITMAP_FIRST_BYTE_MASK(start) \
78 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
79#define BITMAP_LAST_BYTE_MASK(nbits) \
80 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
81
82static inline int le_test_bit(int nr, const u8 *addr)
83{
84 return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
85}
86
87void le_bitmap_set(u8 *map, unsigned int start, int len);
88void le_bitmap_clear(u8 *map, unsigned int start, int len);
89
90struct extent_state;
91struct btrfs_root;
92struct btrfs_inode;
93struct btrfs_io_bio;
94struct io_failure_record;
95
96typedef blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
97 int mirror_num, unsigned long bio_flags,
98 u64 bio_offset);
99
100typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
101 struct bio *bio, u64 bio_offset);
102
103typedef blk_status_t (extent_submit_bio_done_t)(void *private_data,
104 struct bio *bio, int mirror_num);
105
106struct extent_io_ops {
107 /*
108 * The following callbacks must be allways defined, the function
109 * pointer will be called unconditionally.
110 */
111 extent_submit_bio_hook_t *submit_bio_hook;
112 int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
113 struct page *page, u64 start, u64 end,
114 int mirror);
115 int (*merge_bio_hook)(struct page *page, unsigned long offset,
116 size_t size, struct bio *bio,
117 unsigned long bio_flags);
118 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
119 struct btrfs_fs_info *(*tree_fs_info)(void *private_data);
120 void (*set_range_writeback)(void *private_data, u64 start, u64 end);
121
122 /*
123 * Optional hooks, called if the pointer is not NULL
124 */
125 int (*fill_delalloc)(void *private_data, struct page *locked_page,
126 u64 start, u64 end, int *page_started,
127 unsigned long *nr_written,
128 struct writeback_control *wbc);
129
130 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
131 void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
132 struct extent_state *state, int uptodate);
133 void (*set_bit_hook)(void *private_data, struct extent_state *state,
134 unsigned *bits);
135 void (*clear_bit_hook)(void *private_data,
136 struct extent_state *state,
137 unsigned *bits);
138 void (*merge_extent_hook)(void *private_data,
139 struct extent_state *new,
140 struct extent_state *other);
141 void (*split_extent_hook)(void *private_data,
142 struct extent_state *orig, u64 split);
143 void (*check_extent_io_range)(void *private_data, const char *caller,
144 u64 start, u64 end);
145};
146
147struct extent_io_tree {
148 struct rb_root state;
149 void *private_data;
150 u64 dirty_bytes;
151 int track_uptodate;
152 spinlock_t lock;
153 const struct extent_io_ops *ops;
154};
155
156struct extent_state {
157 u64 start;
158 u64 end; /* inclusive */
159 struct rb_node rb_node;
160
161 /* ADD NEW ELEMENTS AFTER THIS */
162 wait_queue_head_t wq;
163 refcount_t refs;
164 unsigned state;
165
166 struct io_failure_record *failrec;
167
168#ifdef CONFIG_BTRFS_DEBUG
169 struct list_head leak_list;
170#endif
171};
172
173#define INLINE_EXTENT_BUFFER_PAGES 16
174#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
175struct extent_buffer {
176 u64 start;
177 unsigned long len;
178 unsigned long bflags;
179 struct btrfs_fs_info *fs_info;
180 spinlock_t refs_lock;
181 atomic_t refs;
182 atomic_t io_pages;
183 int read_mirror;
184 struct rcu_head rcu_head;
185 pid_t lock_owner;
186
187 /* count of read lock holders on the extent buffer */
188 atomic_t write_locks;
189 atomic_t read_locks;
190 atomic_t blocking_writers;
191 atomic_t blocking_readers;
192 atomic_t spinning_readers;
193 atomic_t spinning_writers;
194 short lock_nested;
195 /* >= 0 if eb belongs to a log tree, -1 otherwise */
196 short log_index;
197
198 /* protects write locks */
199 rwlock_t lock;
200
201 /* readers use lock_wq while they wait for the write
202 * lock holders to unlock
203 */
204 wait_queue_head_t write_lock_wq;
205
206 /* writers use read_lock_wq while they wait for readers
207 * to unlock
208 */
209 wait_queue_head_t read_lock_wq;
210 struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
211#ifdef CONFIG_BTRFS_DEBUG
212 struct list_head leak_list;
213#endif
214};
215
216/*
217 * Structure to record how many bytes and which ranges are set/cleared
218 */
219struct extent_changeset {
220 /* How many bytes are set/cleared in this operation */
221 unsigned int bytes_changed;
222
223 /* Changed ranges */
224 struct ulist range_changed;
225};
226
227static inline void extent_changeset_init(struct extent_changeset *changeset)
228{
229 changeset->bytes_changed = 0;
230 ulist_init(&changeset->range_changed);
231}
232
233static inline struct extent_changeset *extent_changeset_alloc(void)
234{
235 struct extent_changeset *ret;
236
237 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
238 if (!ret)
239 return NULL;
240
241 extent_changeset_init(ret);
242 return ret;
243}
244
245static inline void extent_changeset_release(struct extent_changeset *changeset)
246{
247 if (!changeset)
248 return;
249 changeset->bytes_changed = 0;
250 ulist_release(&changeset->range_changed);
251}
252
253static inline void extent_changeset_free(struct extent_changeset *changeset)
254{
255 if (!changeset)
256 return;
257 extent_changeset_release(changeset);
258 kfree(changeset);
259}
260
261static inline void extent_set_compress_type(unsigned long *bio_flags,
262 int compress_type)
263{
264 *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
265}
266
267static inline int extent_compress_type(unsigned long bio_flags)
268{
269 return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
270}
271
272struct extent_map_tree;
273
274typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
275 struct page *page,
276 size_t pg_offset,
277 u64 start, u64 len,
278 int create);
279
280void extent_io_tree_init(struct extent_io_tree *tree, void *private_data);
281int try_release_extent_mapping(struct extent_map_tree *map,
282 struct extent_io_tree *tree, struct page *page,
283 gfp_t mask);
284int try_release_extent_buffer(struct page *page);
285int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
286 struct extent_state **cached);
287
288static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
289{
290 return lock_extent_bits(tree, start, end, NULL);
291}
292
293int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
294int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
295 get_extent_t *get_extent, int mirror_num);
296int __init extent_io_init(void);
297void __cold extent_io_exit(void);
298
299u64 count_range_bits(struct extent_io_tree *tree,
300 u64 *start, u64 search_end,
301 u64 max_bytes, unsigned bits, int contig);
302
303void free_extent_state(struct extent_state *state);
304int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
305 unsigned bits, int filled,
306 struct extent_state *cached_state);
307int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
308 unsigned bits, struct extent_changeset *changeset);
309int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
310 unsigned bits, int wake, int delete,
311 struct extent_state **cached);
312int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
313 unsigned bits, int wake, int delete,
314 struct extent_state **cached, gfp_t mask,
315 struct extent_changeset *changeset);
316
317static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
318{
319 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
320}
321
322static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
323 u64 end, struct extent_state **cached)
324{
325 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
326 GFP_NOFS, NULL);
327}
328
329static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
330 u64 start, u64 end, struct extent_state **cached)
331{
332 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
333 GFP_ATOMIC, NULL);
334}
335
336static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
337 u64 end, unsigned bits)
338{
339 int wake = 0;
340
341 if (bits & EXTENT_LOCKED)
342 wake = 1;
343
344 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
345}
346
347int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
348 unsigned bits, struct extent_changeset *changeset);
349int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
350 unsigned bits, u64 *failed_start,
351 struct extent_state **cached_state, gfp_t mask);
352
353static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
354 u64 end, unsigned bits)
355{
356 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
357}
358
359static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
360 u64 end, struct extent_state **cached_state)
361{
362 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
363 cached_state, GFP_NOFS, NULL);
364}
365
366static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
367 u64 end, gfp_t mask)
368{
369 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
370 NULL, mask);
371}
372
373static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
374 u64 end)
375{
376 return clear_extent_bit(tree, start, end,
377 EXTENT_DIRTY | EXTENT_DELALLOC |
378 EXTENT_DO_ACCOUNTING, 0, 0, NULL);
379}
380
381int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
382 unsigned bits, unsigned clear_bits,
383 struct extent_state **cached_state);
384
385static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
386 u64 end, unsigned int extra_bits,
387 struct extent_state **cached_state)
388{
389 return set_extent_bit(tree, start, end,
390 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
391 NULL, cached_state, GFP_NOFS);
392}
393
394static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
395 u64 end, struct extent_state **cached_state)
396{
397 return set_extent_bit(tree, start, end,
398 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
399 NULL, cached_state, GFP_NOFS);
400}
401
402static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
403 u64 end)
404{
405 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
406 GFP_NOFS);
407}
408
409static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
410 u64 end, struct extent_state **cached_state, gfp_t mask)
411{
412 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
413 cached_state, mask);
414}
415
416int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
417 u64 *start_ret, u64 *end_ret, unsigned bits,
418 struct extent_state **cached_state);
419int extent_invalidatepage(struct extent_io_tree *tree,
420 struct page *page, unsigned long offset);
421int extent_write_full_page(struct page *page, struct writeback_control *wbc);
422int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
423 int mode);
424int extent_writepages(struct extent_io_tree *tree,
425 struct address_space *mapping,
426 struct writeback_control *wbc);
427int btree_write_cache_pages(struct address_space *mapping,
428 struct writeback_control *wbc);
429int extent_readpages(struct extent_io_tree *tree,
430 struct address_space *mapping,
431 struct list_head *pages, unsigned nr_pages);
432int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
433 __u64 start, __u64 len);
434void set_page_extent_mapped(struct page *page);
435
436struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
437 u64 start);
438struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
439 u64 start, unsigned long len);
440struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
441 u64 start);
442struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
443struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
444 u64 start);
445void free_extent_buffer(struct extent_buffer *eb);
446void free_extent_buffer_stale(struct extent_buffer *eb);
447#define WAIT_NONE 0
448#define WAIT_COMPLETE 1
449#define WAIT_PAGE_LOCK 2
450int read_extent_buffer_pages(struct extent_io_tree *tree,
451 struct extent_buffer *eb, int wait,
452 int mirror_num);
453void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
454
455static inline unsigned long num_extent_pages(u64 start, u64 len)
456{
457 return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) -
458 (start >> PAGE_SHIFT);
459}
460
461static inline void extent_buffer_get(struct extent_buffer *eb)
462{
463 atomic_inc(&eb->refs);
464}
465
466static inline int extent_buffer_uptodate(struct extent_buffer *eb)
467{
468 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
469}
470
471int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
472 unsigned long start, unsigned long len);
473void read_extent_buffer(const struct extent_buffer *eb, void *dst,
474 unsigned long start,
475 unsigned long len);
476int read_extent_buffer_to_user(const struct extent_buffer *eb,
477 void __user *dst, unsigned long start,
478 unsigned long len);
479void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
480void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
481 const void *src);
482void write_extent_buffer(struct extent_buffer *eb, const void *src,
483 unsigned long start, unsigned long len);
484void copy_extent_buffer_full(struct extent_buffer *dst,
485 struct extent_buffer *src);
486void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
487 unsigned long dst_offset, unsigned long src_offset,
488 unsigned long len);
489void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
490 unsigned long src_offset, unsigned long len);
491void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
492 unsigned long src_offset, unsigned long len);
493void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
494 unsigned long len);
495int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
496 unsigned long pos);
497void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
498 unsigned long pos, unsigned long len);
499void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
500 unsigned long pos, unsigned long len);
501void clear_extent_buffer_dirty(struct extent_buffer *eb);
502int set_extent_buffer_dirty(struct extent_buffer *eb);
503void set_extent_buffer_uptodate(struct extent_buffer *eb);
504void clear_extent_buffer_uptodate(struct extent_buffer *eb);
505int extent_buffer_under_io(struct extent_buffer *eb);
506int map_private_extent_buffer(const struct extent_buffer *eb,
507 unsigned long offset, unsigned long min_len,
508 char **map, unsigned long *map_start,
509 unsigned long *map_len);
510void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
511void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
512void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
513 u64 delalloc_end, struct page *locked_page,
514 unsigned bits_to_clear,
515 unsigned long page_ops);
516struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte);
517struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
518struct bio *btrfs_bio_clone(struct bio *bio);
519struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
520
521struct btrfs_fs_info;
522struct btrfs_inode;
523
524int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
525 u64 length, u64 logical, struct page *page,
526 unsigned int pg_offset, int mirror_num);
527int clean_io_failure(struct btrfs_fs_info *fs_info,
528 struct extent_io_tree *failure_tree,
529 struct extent_io_tree *io_tree, u64 start,
530 struct page *page, u64 ino, unsigned int pg_offset);
531void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
532int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
533 struct extent_buffer *eb, int mirror_num);
534
535/*
536 * When IO fails, either with EIO or csum verification fails, we
537 * try other mirrors that might have a good copy of the data. This
538 * io_failure_record is used to record state as we go through all the
539 * mirrors. If another mirror has good data, the page is set up to date
540 * and things continue. If a good mirror can't be found, the original
541 * bio end_io callback is called to indicate things have failed.
542 */
543struct io_failure_record {
544 struct page *page;
545 u64 start;
546 u64 len;
547 u64 logical;
548 unsigned long bio_flags;
549 int this_mirror;
550 int failed_mirror;
551 int in_validation;
552};
553
554
555void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
556 u64 end);
557int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
558 struct io_failure_record **failrec_ret);
559bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
560 struct io_failure_record *failrec, int fail_mirror);
561struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
562 struct io_failure_record *failrec,
563 struct page *page, int pg_offset, int icsum,
564 bio_end_io_t *endio_func, void *data);
565int free_io_failure(struct extent_io_tree *failure_tree,
566 struct extent_io_tree *io_tree,
567 struct io_failure_record *rec);
568#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
569noinline u64 find_lock_delalloc_range(struct inode *inode,
570 struct extent_io_tree *tree,
571 struct page *locked_page, u64 *start,
572 u64 *end, u64 max_bytes);
573#endif
574struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
575 u64 start);
576
577#endif