Loading...
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
11#include <linux/writeback.h>
12#include <linux/pagevec.h>
13#include <linux/prefetch.h>
14#include <linux/cleancache.h>
15#include "extent_io.h"
16#include "extent_map.h"
17#include "compat.h"
18#include "ctree.h"
19#include "btrfs_inode.h"
20
21static struct kmem_cache *extent_state_cache;
22static struct kmem_cache *extent_buffer_cache;
23
24static LIST_HEAD(buffers);
25static LIST_HEAD(states);
26
27#define LEAK_DEBUG 0
28#if LEAK_DEBUG
29static DEFINE_SPINLOCK(leak_lock);
30#endif
31
32#define BUFFER_LRU_MAX 64
33
34struct tree_entry {
35 u64 start;
36 u64 end;
37 struct rb_node rb_node;
38};
39
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_io_tree *tree;
43 get_extent_t *get_extent;
44
45 /* tells writepage not to lock the state bits for this range
46 * it still does the unlocking
47 */
48 unsigned int extent_locked:1;
49
50 /* tells the submit_bio code to use a WRITE_SYNC */
51 unsigned int sync_io:1;
52};
53
54int __init extent_io_init(void)
55{
56 extent_state_cache = kmem_cache_create("extent_state",
57 sizeof(struct extent_state), 0,
58 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
59 if (!extent_state_cache)
60 return -ENOMEM;
61
62 extent_buffer_cache = kmem_cache_create("extent_buffers",
63 sizeof(struct extent_buffer), 0,
64 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
65 if (!extent_buffer_cache)
66 goto free_state_cache;
67 return 0;
68
69free_state_cache:
70 kmem_cache_destroy(extent_state_cache);
71 return -ENOMEM;
72}
73
74void extent_io_exit(void)
75{
76 struct extent_state *state;
77 struct extent_buffer *eb;
78
79 while (!list_empty(&states)) {
80 state = list_entry(states.next, struct extent_state, leak_list);
81 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
82 "state %lu in tree %p refs %d\n",
83 (unsigned long long)state->start,
84 (unsigned long long)state->end,
85 state->state, state->tree, atomic_read(&state->refs));
86 list_del(&state->leak_list);
87 kmem_cache_free(extent_state_cache, state);
88
89 }
90
91 while (!list_empty(&buffers)) {
92 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
93 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
94 "refs %d\n", (unsigned long long)eb->start,
95 eb->len, atomic_read(&eb->refs));
96 list_del(&eb->leak_list);
97 kmem_cache_free(extent_buffer_cache, eb);
98 }
99 if (extent_state_cache)
100 kmem_cache_destroy(extent_state_cache);
101 if (extent_buffer_cache)
102 kmem_cache_destroy(extent_buffer_cache);
103}
104
105void extent_io_tree_init(struct extent_io_tree *tree,
106 struct address_space *mapping)
107{
108 tree->state = RB_ROOT;
109 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
110 tree->ops = NULL;
111 tree->dirty_bytes = 0;
112 spin_lock_init(&tree->lock);
113 spin_lock_init(&tree->buffer_lock);
114 tree->mapping = mapping;
115}
116
117static struct extent_state *alloc_extent_state(gfp_t mask)
118{
119 struct extent_state *state;
120#if LEAK_DEBUG
121 unsigned long flags;
122#endif
123
124 state = kmem_cache_alloc(extent_state_cache, mask);
125 if (!state)
126 return state;
127 state->state = 0;
128 state->private = 0;
129 state->tree = NULL;
130#if LEAK_DEBUG
131 spin_lock_irqsave(&leak_lock, flags);
132 list_add(&state->leak_list, &states);
133 spin_unlock_irqrestore(&leak_lock, flags);
134#endif
135 atomic_set(&state->refs, 1);
136 init_waitqueue_head(&state->wq);
137 return state;
138}
139
140void free_extent_state(struct extent_state *state)
141{
142 if (!state)
143 return;
144 if (atomic_dec_and_test(&state->refs)) {
145#if LEAK_DEBUG
146 unsigned long flags;
147#endif
148 WARN_ON(state->tree);
149#if LEAK_DEBUG
150 spin_lock_irqsave(&leak_lock, flags);
151 list_del(&state->leak_list);
152 spin_unlock_irqrestore(&leak_lock, flags);
153#endif
154 kmem_cache_free(extent_state_cache, state);
155 }
156}
157
158static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
159 struct rb_node *node)
160{
161 struct rb_node **p = &root->rb_node;
162 struct rb_node *parent = NULL;
163 struct tree_entry *entry;
164
165 while (*p) {
166 parent = *p;
167 entry = rb_entry(parent, struct tree_entry, rb_node);
168
169 if (offset < entry->start)
170 p = &(*p)->rb_left;
171 else if (offset > entry->end)
172 p = &(*p)->rb_right;
173 else
174 return parent;
175 }
176
177 entry = rb_entry(node, struct tree_entry, rb_node);
178 rb_link_node(node, parent, p);
179 rb_insert_color(node, root);
180 return NULL;
181}
182
183static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
184 struct rb_node **prev_ret,
185 struct rb_node **next_ret)
186{
187 struct rb_root *root = &tree->state;
188 struct rb_node *n = root->rb_node;
189 struct rb_node *prev = NULL;
190 struct rb_node *orig_prev = NULL;
191 struct tree_entry *entry;
192 struct tree_entry *prev_entry = NULL;
193
194 while (n) {
195 entry = rb_entry(n, struct tree_entry, rb_node);
196 prev = n;
197 prev_entry = entry;
198
199 if (offset < entry->start)
200 n = n->rb_left;
201 else if (offset > entry->end)
202 n = n->rb_right;
203 else
204 return n;
205 }
206
207 if (prev_ret) {
208 orig_prev = prev;
209 while (prev && offset > prev_entry->end) {
210 prev = rb_next(prev);
211 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
212 }
213 *prev_ret = prev;
214 prev = orig_prev;
215 }
216
217 if (next_ret) {
218 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
219 while (prev && offset < prev_entry->start) {
220 prev = rb_prev(prev);
221 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
222 }
223 *next_ret = prev;
224 }
225 return NULL;
226}
227
228static inline struct rb_node *tree_search(struct extent_io_tree *tree,
229 u64 offset)
230{
231 struct rb_node *prev = NULL;
232 struct rb_node *ret;
233
234 ret = __etree_search(tree, offset, &prev, NULL);
235 if (!ret)
236 return prev;
237 return ret;
238}
239
240static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
241 struct extent_state *other)
242{
243 if (tree->ops && tree->ops->merge_extent_hook)
244 tree->ops->merge_extent_hook(tree->mapping->host, new,
245 other);
246}
247
248/*
249 * utility function to look for merge candidates inside a given range.
250 * Any extents with matching state are merged together into a single
251 * extent in the tree. Extents with EXTENT_IO in their state field
252 * are not merged because the end_io handlers need to be able to do
253 * operations on them without sleeping (or doing allocations/splits).
254 *
255 * This should be called with the tree lock held.
256 */
257static void merge_state(struct extent_io_tree *tree,
258 struct extent_state *state)
259{
260 struct extent_state *other;
261 struct rb_node *other_node;
262
263 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
264 return;
265
266 other_node = rb_prev(&state->rb_node);
267 if (other_node) {
268 other = rb_entry(other_node, struct extent_state, rb_node);
269 if (other->end == state->start - 1 &&
270 other->state == state->state) {
271 merge_cb(tree, state, other);
272 state->start = other->start;
273 other->tree = NULL;
274 rb_erase(&other->rb_node, &tree->state);
275 free_extent_state(other);
276 }
277 }
278 other_node = rb_next(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->start == state->end + 1 &&
282 other->state == state->state) {
283 merge_cb(tree, state, other);
284 state->end = other->end;
285 other->tree = NULL;
286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
288 }
289 }
290}
291
292static void set_state_cb(struct extent_io_tree *tree,
293 struct extent_state *state, int *bits)
294{
295 if (tree->ops && tree->ops->set_bit_hook)
296 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
297}
298
299static void clear_state_cb(struct extent_io_tree *tree,
300 struct extent_state *state, int *bits)
301{
302 if (tree->ops && tree->ops->clear_bit_hook)
303 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
304}
305
306static void set_state_bits(struct extent_io_tree *tree,
307 struct extent_state *state, int *bits);
308
309/*
310 * insert an extent_state struct into the tree. 'bits' are set on the
311 * struct before it is inserted.
312 *
313 * This may return -EEXIST if the extent is already there, in which case the
314 * state struct is freed.
315 *
316 * The tree lock is not taken internally. This is a utility function and
317 * probably isn't what you want to call (see set/clear_extent_bit).
318 */
319static int insert_state(struct extent_io_tree *tree,
320 struct extent_state *state, u64 start, u64 end,
321 int *bits)
322{
323 struct rb_node *node;
324
325 if (end < start) {
326 printk(KERN_ERR "btrfs end < start %llu %llu\n",
327 (unsigned long long)end,
328 (unsigned long long)start);
329 WARN_ON(1);
330 }
331 state->start = start;
332 state->end = end;
333
334 set_state_bits(tree, state, bits);
335
336 node = tree_insert(&tree->state, end, &state->rb_node);
337 if (node) {
338 struct extent_state *found;
339 found = rb_entry(node, struct extent_state, rb_node);
340 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
341 "%llu %llu\n", (unsigned long long)found->start,
342 (unsigned long long)found->end,
343 (unsigned long long)start, (unsigned long long)end);
344 return -EEXIST;
345 }
346 state->tree = tree;
347 merge_state(tree, state);
348 return 0;
349}
350
351static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
352 u64 split)
353{
354 if (tree->ops && tree->ops->split_extent_hook)
355 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
356}
357
358/*
359 * split a given extent state struct in two, inserting the preallocated
360 * struct 'prealloc' as the newly created second half. 'split' indicates an
361 * offset inside 'orig' where it should be split.
362 *
363 * Before calling,
364 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
365 * are two extent state structs in the tree:
366 * prealloc: [orig->start, split - 1]
367 * orig: [ split, orig->end ]
368 *
369 * The tree locks are not taken by this function. They need to be held
370 * by the caller.
371 */
372static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
373 struct extent_state *prealloc, u64 split)
374{
375 struct rb_node *node;
376
377 split_cb(tree, orig, split);
378
379 prealloc->start = orig->start;
380 prealloc->end = split - 1;
381 prealloc->state = orig->state;
382 orig->start = split;
383
384 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
385 if (node) {
386 free_extent_state(prealloc);
387 return -EEXIST;
388 }
389 prealloc->tree = tree;
390 return 0;
391}
392
393/*
394 * utility function to clear some bits in an extent state struct.
395 * it will optionally wake up any one waiting on this state (wake == 1), or
396 * forcibly remove the state from the tree (delete == 1).
397 *
398 * If no bits are set on the state struct after clearing things, the
399 * struct is freed and removed from the tree
400 */
401static int clear_state_bit(struct extent_io_tree *tree,
402 struct extent_state *state,
403 int *bits, int wake)
404{
405 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
406 int ret = state->state & bits_to_clear;
407
408 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
409 u64 range = state->end - state->start + 1;
410 WARN_ON(range > tree->dirty_bytes);
411 tree->dirty_bytes -= range;
412 }
413 clear_state_cb(tree, state, bits);
414 state->state &= ~bits_to_clear;
415 if (wake)
416 wake_up(&state->wq);
417 if (state->state == 0) {
418 if (state->tree) {
419 rb_erase(&state->rb_node, &tree->state);
420 state->tree = NULL;
421 free_extent_state(state);
422 } else {
423 WARN_ON(1);
424 }
425 } else {
426 merge_state(tree, state);
427 }
428 return ret;
429}
430
431static struct extent_state *
432alloc_extent_state_atomic(struct extent_state *prealloc)
433{
434 if (!prealloc)
435 prealloc = alloc_extent_state(GFP_ATOMIC);
436
437 return prealloc;
438}
439
440/*
441 * clear some bits on a range in the tree. This may require splitting
442 * or inserting elements in the tree, so the gfp mask is used to
443 * indicate which allocations or sleeping are allowed.
444 *
445 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
446 * the given range from the tree regardless of state (ie for truncate).
447 *
448 * the range [start, end] is inclusive.
449 *
450 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
451 * bits were already set, or zero if none of the bits were already set.
452 */
453int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
454 int bits, int wake, int delete,
455 struct extent_state **cached_state,
456 gfp_t mask)
457{
458 struct extent_state *state;
459 struct extent_state *cached;
460 struct extent_state *prealloc = NULL;
461 struct rb_node *next_node;
462 struct rb_node *node;
463 u64 last_end;
464 int err;
465 int set = 0;
466 int clear = 0;
467
468 if (delete)
469 bits |= ~EXTENT_CTLBITS;
470 bits |= EXTENT_FIRST_DELALLOC;
471
472 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
473 clear = 1;
474again:
475 if (!prealloc && (mask & __GFP_WAIT)) {
476 prealloc = alloc_extent_state(mask);
477 if (!prealloc)
478 return -ENOMEM;
479 }
480
481 spin_lock(&tree->lock);
482 if (cached_state) {
483 cached = *cached_state;
484
485 if (clear) {
486 *cached_state = NULL;
487 cached_state = NULL;
488 }
489
490 if (cached && cached->tree && cached->start <= start &&
491 cached->end > start) {
492 if (clear)
493 atomic_dec(&cached->refs);
494 state = cached;
495 goto hit_next;
496 }
497 if (clear)
498 free_extent_state(cached);
499 }
500 /*
501 * this search will find the extents that end after
502 * our range starts
503 */
504 node = tree_search(tree, start);
505 if (!node)
506 goto out;
507 state = rb_entry(node, struct extent_state, rb_node);
508hit_next:
509 if (state->start > end)
510 goto out;
511 WARN_ON(state->end < start);
512 last_end = state->end;
513
514 /*
515 * | ---- desired range ---- |
516 * | state | or
517 * | ------------- state -------------- |
518 *
519 * We need to split the extent we found, and may flip
520 * bits on second half.
521 *
522 * If the extent we found extends past our range, we
523 * just split and search again. It'll get split again
524 * the next time though.
525 *
526 * If the extent we found is inside our range, we clear
527 * the desired bit on it.
528 */
529
530 if (state->start < start) {
531 prealloc = alloc_extent_state_atomic(prealloc);
532 BUG_ON(!prealloc);
533 err = split_state(tree, state, prealloc, start);
534 BUG_ON(err == -EEXIST);
535 prealloc = NULL;
536 if (err)
537 goto out;
538 if (state->end <= end) {
539 set |= clear_state_bit(tree, state, &bits, wake);
540 if (last_end == (u64)-1)
541 goto out;
542 start = last_end + 1;
543 }
544 goto search_again;
545 }
546 /*
547 * | ---- desired range ---- |
548 * | state |
549 * We need to split the extent, and clear the bit
550 * on the first half
551 */
552 if (state->start <= end && state->end > end) {
553 prealloc = alloc_extent_state_atomic(prealloc);
554 BUG_ON(!prealloc);
555 err = split_state(tree, state, prealloc, end + 1);
556 BUG_ON(err == -EEXIST);
557 if (wake)
558 wake_up(&state->wq);
559
560 set |= clear_state_bit(tree, prealloc, &bits, wake);
561
562 prealloc = NULL;
563 goto out;
564 }
565
566 if (state->end < end && prealloc && !need_resched())
567 next_node = rb_next(&state->rb_node);
568 else
569 next_node = NULL;
570
571 set |= clear_state_bit(tree, state, &bits, wake);
572 if (last_end == (u64)-1)
573 goto out;
574 start = last_end + 1;
575 if (start <= end && next_node) {
576 state = rb_entry(next_node, struct extent_state,
577 rb_node);
578 if (state->start == start)
579 goto hit_next;
580 }
581 goto search_again;
582
583out:
584 spin_unlock(&tree->lock);
585 if (prealloc)
586 free_extent_state(prealloc);
587
588 return set;
589
590search_again:
591 if (start > end)
592 goto out;
593 spin_unlock(&tree->lock);
594 if (mask & __GFP_WAIT)
595 cond_resched();
596 goto again;
597}
598
599static int wait_on_state(struct extent_io_tree *tree,
600 struct extent_state *state)
601 __releases(tree->lock)
602 __acquires(tree->lock)
603{
604 DEFINE_WAIT(wait);
605 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
606 spin_unlock(&tree->lock);
607 schedule();
608 spin_lock(&tree->lock);
609 finish_wait(&state->wq, &wait);
610 return 0;
611}
612
613/*
614 * waits for one or more bits to clear on a range in the state tree.
615 * The range [start, end] is inclusive.
616 * The tree lock is taken by this function
617 */
618int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
619{
620 struct extent_state *state;
621 struct rb_node *node;
622
623 spin_lock(&tree->lock);
624again:
625 while (1) {
626 /*
627 * this search will find all the extents that end after
628 * our range starts
629 */
630 node = tree_search(tree, start);
631 if (!node)
632 break;
633
634 state = rb_entry(node, struct extent_state, rb_node);
635
636 if (state->start > end)
637 goto out;
638
639 if (state->state & bits) {
640 start = state->start;
641 atomic_inc(&state->refs);
642 wait_on_state(tree, state);
643 free_extent_state(state);
644 goto again;
645 }
646 start = state->end + 1;
647
648 if (start > end)
649 break;
650
651 cond_resched_lock(&tree->lock);
652 }
653out:
654 spin_unlock(&tree->lock);
655 return 0;
656}
657
658static void set_state_bits(struct extent_io_tree *tree,
659 struct extent_state *state,
660 int *bits)
661{
662 int bits_to_set = *bits & ~EXTENT_CTLBITS;
663
664 set_state_cb(tree, state, bits);
665 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
666 u64 range = state->end - state->start + 1;
667 tree->dirty_bytes += range;
668 }
669 state->state |= bits_to_set;
670}
671
672static void cache_state(struct extent_state *state,
673 struct extent_state **cached_ptr)
674{
675 if (cached_ptr && !(*cached_ptr)) {
676 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
677 *cached_ptr = state;
678 atomic_inc(&state->refs);
679 }
680 }
681}
682
683static void uncache_state(struct extent_state **cached_ptr)
684{
685 if (cached_ptr && (*cached_ptr)) {
686 struct extent_state *state = *cached_ptr;
687 *cached_ptr = NULL;
688 free_extent_state(state);
689 }
690}
691
692/*
693 * set some bits on a range in the tree. This may require allocations or
694 * sleeping, so the gfp mask is used to indicate what is allowed.
695 *
696 * If any of the exclusive bits are set, this will fail with -EEXIST if some
697 * part of the range already has the desired bits set. The start of the
698 * existing range is returned in failed_start in this case.
699 *
700 * [start, end] is inclusive This takes the tree lock.
701 */
702
703int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
704 int bits, int exclusive_bits, u64 *failed_start,
705 struct extent_state **cached_state, gfp_t mask)
706{
707 struct extent_state *state;
708 struct extent_state *prealloc = NULL;
709 struct rb_node *node;
710 int err = 0;
711 u64 last_start;
712 u64 last_end;
713
714 bits |= EXTENT_FIRST_DELALLOC;
715again:
716 if (!prealloc && (mask & __GFP_WAIT)) {
717 prealloc = alloc_extent_state(mask);
718 BUG_ON(!prealloc);
719 }
720
721 spin_lock(&tree->lock);
722 if (cached_state && *cached_state) {
723 state = *cached_state;
724 if (state->start <= start && state->end > start &&
725 state->tree) {
726 node = &state->rb_node;
727 goto hit_next;
728 }
729 }
730 /*
731 * this search will find all the extents that end after
732 * our range starts.
733 */
734 node = tree_search(tree, start);
735 if (!node) {
736 prealloc = alloc_extent_state_atomic(prealloc);
737 BUG_ON(!prealloc);
738 err = insert_state(tree, prealloc, start, end, &bits);
739 prealloc = NULL;
740 BUG_ON(err == -EEXIST);
741 goto out;
742 }
743 state = rb_entry(node, struct extent_state, rb_node);
744hit_next:
745 last_start = state->start;
746 last_end = state->end;
747
748 /*
749 * | ---- desired range ---- |
750 * | state |
751 *
752 * Just lock what we found and keep going
753 */
754 if (state->start == start && state->end <= end) {
755 struct rb_node *next_node;
756 if (state->state & exclusive_bits) {
757 *failed_start = state->start;
758 err = -EEXIST;
759 goto out;
760 }
761
762 set_state_bits(tree, state, &bits);
763
764 cache_state(state, cached_state);
765 merge_state(tree, state);
766 if (last_end == (u64)-1)
767 goto out;
768
769 start = last_end + 1;
770 next_node = rb_next(&state->rb_node);
771 if (next_node && start < end && prealloc && !need_resched()) {
772 state = rb_entry(next_node, struct extent_state,
773 rb_node);
774 if (state->start == start)
775 goto hit_next;
776 }
777 goto search_again;
778 }
779
780 /*
781 * | ---- desired range ---- |
782 * | state |
783 * or
784 * | ------------- state -------------- |
785 *
786 * We need to split the extent we found, and may flip bits on
787 * second half.
788 *
789 * If the extent we found extends past our
790 * range, we just split and search again. It'll get split
791 * again the next time though.
792 *
793 * If the extent we found is inside our range, we set the
794 * desired bit on it.
795 */
796 if (state->start < start) {
797 if (state->state & exclusive_bits) {
798 *failed_start = start;
799 err = -EEXIST;
800 goto out;
801 }
802
803 prealloc = alloc_extent_state_atomic(prealloc);
804 BUG_ON(!prealloc);
805 err = split_state(tree, state, prealloc, start);
806 BUG_ON(err == -EEXIST);
807 prealloc = NULL;
808 if (err)
809 goto out;
810 if (state->end <= end) {
811 set_state_bits(tree, state, &bits);
812 cache_state(state, cached_state);
813 merge_state(tree, state);
814 if (last_end == (u64)-1)
815 goto out;
816 start = last_end + 1;
817 }
818 goto search_again;
819 }
820 /*
821 * | ---- desired range ---- |
822 * | state | or | state |
823 *
824 * There's a hole, we need to insert something in it and
825 * ignore the extent we found.
826 */
827 if (state->start > start) {
828 u64 this_end;
829 if (end < last_start)
830 this_end = end;
831 else
832 this_end = last_start - 1;
833
834 prealloc = alloc_extent_state_atomic(prealloc);
835 BUG_ON(!prealloc);
836
837 /*
838 * Avoid to free 'prealloc' if it can be merged with
839 * the later extent.
840 */
841 err = insert_state(tree, prealloc, start, this_end,
842 &bits);
843 BUG_ON(err == -EEXIST);
844 if (err) {
845 free_extent_state(prealloc);
846 prealloc = NULL;
847 goto out;
848 }
849 cache_state(prealloc, cached_state);
850 prealloc = NULL;
851 start = this_end + 1;
852 goto search_again;
853 }
854 /*
855 * | ---- desired range ---- |
856 * | state |
857 * We need to split the extent, and set the bit
858 * on the first half
859 */
860 if (state->start <= end && state->end > end) {
861 if (state->state & exclusive_bits) {
862 *failed_start = start;
863 err = -EEXIST;
864 goto out;
865 }
866
867 prealloc = alloc_extent_state_atomic(prealloc);
868 BUG_ON(!prealloc);
869 err = split_state(tree, state, prealloc, end + 1);
870 BUG_ON(err == -EEXIST);
871
872 set_state_bits(tree, prealloc, &bits);
873 cache_state(prealloc, cached_state);
874 merge_state(tree, prealloc);
875 prealloc = NULL;
876 goto out;
877 }
878
879 goto search_again;
880
881out:
882 spin_unlock(&tree->lock);
883 if (prealloc)
884 free_extent_state(prealloc);
885
886 return err;
887
888search_again:
889 if (start > end)
890 goto out;
891 spin_unlock(&tree->lock);
892 if (mask & __GFP_WAIT)
893 cond_resched();
894 goto again;
895}
896
897/* wrappers around set/clear extent bit */
898int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
899 gfp_t mask)
900{
901 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
902 NULL, mask);
903}
904
905int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
906 int bits, gfp_t mask)
907{
908 return set_extent_bit(tree, start, end, bits, 0, NULL,
909 NULL, mask);
910}
911
912int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
913 int bits, gfp_t mask)
914{
915 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
916}
917
918int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
919 struct extent_state **cached_state, gfp_t mask)
920{
921 return set_extent_bit(tree, start, end,
922 EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
923 0, NULL, cached_state, mask);
924}
925
926int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
927 gfp_t mask)
928{
929 return clear_extent_bit(tree, start, end,
930 EXTENT_DIRTY | EXTENT_DELALLOC |
931 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
932}
933
934int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
935 gfp_t mask)
936{
937 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
938 NULL, mask);
939}
940
941int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
942 struct extent_state **cached_state, gfp_t mask)
943{
944 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
945 NULL, cached_state, mask);
946}
947
948static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
949 u64 end, struct extent_state **cached_state,
950 gfp_t mask)
951{
952 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
953 cached_state, mask);
954}
955
956/*
957 * either insert or lock state struct between start and end use mask to tell
958 * us if waiting is desired.
959 */
960int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
961 int bits, struct extent_state **cached_state, gfp_t mask)
962{
963 int err;
964 u64 failed_start;
965 while (1) {
966 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
967 EXTENT_LOCKED, &failed_start,
968 cached_state, mask);
969 if (err == -EEXIST && (mask & __GFP_WAIT)) {
970 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
971 start = failed_start;
972 } else {
973 break;
974 }
975 WARN_ON(start > end);
976 }
977 return err;
978}
979
980int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
981{
982 return lock_extent_bits(tree, start, end, 0, NULL, mask);
983}
984
985int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
986 gfp_t mask)
987{
988 int err;
989 u64 failed_start;
990
991 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
992 &failed_start, NULL, mask);
993 if (err == -EEXIST) {
994 if (failed_start > start)
995 clear_extent_bit(tree, start, failed_start - 1,
996 EXTENT_LOCKED, 1, 0, NULL, mask);
997 return 0;
998 }
999 return 1;
1000}
1001
1002int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1003 struct extent_state **cached, gfp_t mask)
1004{
1005 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1006 mask);
1007}
1008
1009int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1010{
1011 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1012 mask);
1013}
1014
1015/*
1016 * helper function to set both pages and extents in the tree writeback
1017 */
1018static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1019{
1020 unsigned long index = start >> PAGE_CACHE_SHIFT;
1021 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1022 struct page *page;
1023
1024 while (index <= end_index) {
1025 page = find_get_page(tree->mapping, index);
1026 BUG_ON(!page);
1027 set_page_writeback(page);
1028 page_cache_release(page);
1029 index++;
1030 }
1031 return 0;
1032}
1033
1034/* find the first state struct with 'bits' set after 'start', and
1035 * return it. tree->lock must be held. NULL will returned if
1036 * nothing was found after 'start'
1037 */
1038struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1039 u64 start, int bits)
1040{
1041 struct rb_node *node;
1042 struct extent_state *state;
1043
1044 /*
1045 * this search will find all the extents that end after
1046 * our range starts.
1047 */
1048 node = tree_search(tree, start);
1049 if (!node)
1050 goto out;
1051
1052 while (1) {
1053 state = rb_entry(node, struct extent_state, rb_node);
1054 if (state->end >= start && (state->state & bits))
1055 return state;
1056
1057 node = rb_next(node);
1058 if (!node)
1059 break;
1060 }
1061out:
1062 return NULL;
1063}
1064
1065/*
1066 * find the first offset in the io tree with 'bits' set. zero is
1067 * returned if we find something, and *start_ret and *end_ret are
1068 * set to reflect the state struct that was found.
1069 *
1070 * If nothing was found, 1 is returned, < 0 on error
1071 */
1072int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1073 u64 *start_ret, u64 *end_ret, int bits)
1074{
1075 struct extent_state *state;
1076 int ret = 1;
1077
1078 spin_lock(&tree->lock);
1079 state = find_first_extent_bit_state(tree, start, bits);
1080 if (state) {
1081 *start_ret = state->start;
1082 *end_ret = state->end;
1083 ret = 0;
1084 }
1085 spin_unlock(&tree->lock);
1086 return ret;
1087}
1088
1089/*
1090 * find a contiguous range of bytes in the file marked as delalloc, not
1091 * more than 'max_bytes'. start and end are used to return the range,
1092 *
1093 * 1 is returned if we find something, 0 if nothing was in the tree
1094 */
1095static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1096 u64 *start, u64 *end, u64 max_bytes,
1097 struct extent_state **cached_state)
1098{
1099 struct rb_node *node;
1100 struct extent_state *state;
1101 u64 cur_start = *start;
1102 u64 found = 0;
1103 u64 total_bytes = 0;
1104
1105 spin_lock(&tree->lock);
1106
1107 /*
1108 * this search will find all the extents that end after
1109 * our range starts.
1110 */
1111 node = tree_search(tree, cur_start);
1112 if (!node) {
1113 if (!found)
1114 *end = (u64)-1;
1115 goto out;
1116 }
1117
1118 while (1) {
1119 state = rb_entry(node, struct extent_state, rb_node);
1120 if (found && (state->start != cur_start ||
1121 (state->state & EXTENT_BOUNDARY))) {
1122 goto out;
1123 }
1124 if (!(state->state & EXTENT_DELALLOC)) {
1125 if (!found)
1126 *end = state->end;
1127 goto out;
1128 }
1129 if (!found) {
1130 *start = state->start;
1131 *cached_state = state;
1132 atomic_inc(&state->refs);
1133 }
1134 found++;
1135 *end = state->end;
1136 cur_start = state->end + 1;
1137 node = rb_next(node);
1138 if (!node)
1139 break;
1140 total_bytes += state->end - state->start + 1;
1141 if (total_bytes >= max_bytes)
1142 break;
1143 }
1144out:
1145 spin_unlock(&tree->lock);
1146 return found;
1147}
1148
1149static noinline int __unlock_for_delalloc(struct inode *inode,
1150 struct page *locked_page,
1151 u64 start, u64 end)
1152{
1153 int ret;
1154 struct page *pages[16];
1155 unsigned long index = start >> PAGE_CACHE_SHIFT;
1156 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1157 unsigned long nr_pages = end_index - index + 1;
1158 int i;
1159
1160 if (index == locked_page->index && end_index == index)
1161 return 0;
1162
1163 while (nr_pages > 0) {
1164 ret = find_get_pages_contig(inode->i_mapping, index,
1165 min_t(unsigned long, nr_pages,
1166 ARRAY_SIZE(pages)), pages);
1167 for (i = 0; i < ret; i++) {
1168 if (pages[i] != locked_page)
1169 unlock_page(pages[i]);
1170 page_cache_release(pages[i]);
1171 }
1172 nr_pages -= ret;
1173 index += ret;
1174 cond_resched();
1175 }
1176 return 0;
1177}
1178
1179static noinline int lock_delalloc_pages(struct inode *inode,
1180 struct page *locked_page,
1181 u64 delalloc_start,
1182 u64 delalloc_end)
1183{
1184 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1185 unsigned long start_index = index;
1186 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1187 unsigned long pages_locked = 0;
1188 struct page *pages[16];
1189 unsigned long nrpages;
1190 int ret;
1191 int i;
1192
1193 /* the caller is responsible for locking the start index */
1194 if (index == locked_page->index && index == end_index)
1195 return 0;
1196
1197 /* skip the page at the start index */
1198 nrpages = end_index - index + 1;
1199 while (nrpages > 0) {
1200 ret = find_get_pages_contig(inode->i_mapping, index,
1201 min_t(unsigned long,
1202 nrpages, ARRAY_SIZE(pages)), pages);
1203 if (ret == 0) {
1204 ret = -EAGAIN;
1205 goto done;
1206 }
1207 /* now we have an array of pages, lock them all */
1208 for (i = 0; i < ret; i++) {
1209 /*
1210 * the caller is taking responsibility for
1211 * locked_page
1212 */
1213 if (pages[i] != locked_page) {
1214 lock_page(pages[i]);
1215 if (!PageDirty(pages[i]) ||
1216 pages[i]->mapping != inode->i_mapping) {
1217 ret = -EAGAIN;
1218 unlock_page(pages[i]);
1219 page_cache_release(pages[i]);
1220 goto done;
1221 }
1222 }
1223 page_cache_release(pages[i]);
1224 pages_locked++;
1225 }
1226 nrpages -= ret;
1227 index += ret;
1228 cond_resched();
1229 }
1230 ret = 0;
1231done:
1232 if (ret && pages_locked) {
1233 __unlock_for_delalloc(inode, locked_page,
1234 delalloc_start,
1235 ((u64)(start_index + pages_locked - 1)) <<
1236 PAGE_CACHE_SHIFT);
1237 }
1238 return ret;
1239}
1240
1241/*
1242 * find a contiguous range of bytes in the file marked as delalloc, not
1243 * more than 'max_bytes'. start and end are used to return the range,
1244 *
1245 * 1 is returned if we find something, 0 if nothing was in the tree
1246 */
1247static noinline u64 find_lock_delalloc_range(struct inode *inode,
1248 struct extent_io_tree *tree,
1249 struct page *locked_page,
1250 u64 *start, u64 *end,
1251 u64 max_bytes)
1252{
1253 u64 delalloc_start;
1254 u64 delalloc_end;
1255 u64 found;
1256 struct extent_state *cached_state = NULL;
1257 int ret;
1258 int loops = 0;
1259
1260again:
1261 /* step one, find a bunch of delalloc bytes starting at start */
1262 delalloc_start = *start;
1263 delalloc_end = 0;
1264 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1265 max_bytes, &cached_state);
1266 if (!found || delalloc_end <= *start) {
1267 *start = delalloc_start;
1268 *end = delalloc_end;
1269 free_extent_state(cached_state);
1270 return found;
1271 }
1272
1273 /*
1274 * start comes from the offset of locked_page. We have to lock
1275 * pages in order, so we can't process delalloc bytes before
1276 * locked_page
1277 */
1278 if (delalloc_start < *start)
1279 delalloc_start = *start;
1280
1281 /*
1282 * make sure to limit the number of pages we try to lock down
1283 * if we're looping.
1284 */
1285 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1286 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1287
1288 /* step two, lock all the pages after the page that has start */
1289 ret = lock_delalloc_pages(inode, locked_page,
1290 delalloc_start, delalloc_end);
1291 if (ret == -EAGAIN) {
1292 /* some of the pages are gone, lets avoid looping by
1293 * shortening the size of the delalloc range we're searching
1294 */
1295 free_extent_state(cached_state);
1296 if (!loops) {
1297 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1298 max_bytes = PAGE_CACHE_SIZE - offset;
1299 loops = 1;
1300 goto again;
1301 } else {
1302 found = 0;
1303 goto out_failed;
1304 }
1305 }
1306 BUG_ON(ret);
1307
1308 /* step three, lock the state bits for the whole range */
1309 lock_extent_bits(tree, delalloc_start, delalloc_end,
1310 0, &cached_state, GFP_NOFS);
1311
1312 /* then test to make sure it is all still delalloc */
1313 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1314 EXTENT_DELALLOC, 1, cached_state);
1315 if (!ret) {
1316 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1317 &cached_state, GFP_NOFS);
1318 __unlock_for_delalloc(inode, locked_page,
1319 delalloc_start, delalloc_end);
1320 cond_resched();
1321 goto again;
1322 }
1323 free_extent_state(cached_state);
1324 *start = delalloc_start;
1325 *end = delalloc_end;
1326out_failed:
1327 return found;
1328}
1329
1330int extent_clear_unlock_delalloc(struct inode *inode,
1331 struct extent_io_tree *tree,
1332 u64 start, u64 end, struct page *locked_page,
1333 unsigned long op)
1334{
1335 int ret;
1336 struct page *pages[16];
1337 unsigned long index = start >> PAGE_CACHE_SHIFT;
1338 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1339 unsigned long nr_pages = end_index - index + 1;
1340 int i;
1341 int clear_bits = 0;
1342
1343 if (op & EXTENT_CLEAR_UNLOCK)
1344 clear_bits |= EXTENT_LOCKED;
1345 if (op & EXTENT_CLEAR_DIRTY)
1346 clear_bits |= EXTENT_DIRTY;
1347
1348 if (op & EXTENT_CLEAR_DELALLOC)
1349 clear_bits |= EXTENT_DELALLOC;
1350
1351 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1352 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1353 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1354 EXTENT_SET_PRIVATE2)))
1355 return 0;
1356
1357 while (nr_pages > 0) {
1358 ret = find_get_pages_contig(inode->i_mapping, index,
1359 min_t(unsigned long,
1360 nr_pages, ARRAY_SIZE(pages)), pages);
1361 for (i = 0; i < ret; i++) {
1362
1363 if (op & EXTENT_SET_PRIVATE2)
1364 SetPagePrivate2(pages[i]);
1365
1366 if (pages[i] == locked_page) {
1367 page_cache_release(pages[i]);
1368 continue;
1369 }
1370 if (op & EXTENT_CLEAR_DIRTY)
1371 clear_page_dirty_for_io(pages[i]);
1372 if (op & EXTENT_SET_WRITEBACK)
1373 set_page_writeback(pages[i]);
1374 if (op & EXTENT_END_WRITEBACK)
1375 end_page_writeback(pages[i]);
1376 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1377 unlock_page(pages[i]);
1378 page_cache_release(pages[i]);
1379 }
1380 nr_pages -= ret;
1381 index += ret;
1382 cond_resched();
1383 }
1384 return 0;
1385}
1386
1387/*
1388 * count the number of bytes in the tree that have a given bit(s)
1389 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1390 * cached. The total number found is returned.
1391 */
1392u64 count_range_bits(struct extent_io_tree *tree,
1393 u64 *start, u64 search_end, u64 max_bytes,
1394 unsigned long bits, int contig)
1395{
1396 struct rb_node *node;
1397 struct extent_state *state;
1398 u64 cur_start = *start;
1399 u64 total_bytes = 0;
1400 u64 last = 0;
1401 int found = 0;
1402
1403 if (search_end <= cur_start) {
1404 WARN_ON(1);
1405 return 0;
1406 }
1407
1408 spin_lock(&tree->lock);
1409 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1410 total_bytes = tree->dirty_bytes;
1411 goto out;
1412 }
1413 /*
1414 * this search will find all the extents that end after
1415 * our range starts.
1416 */
1417 node = tree_search(tree, cur_start);
1418 if (!node)
1419 goto out;
1420
1421 while (1) {
1422 state = rb_entry(node, struct extent_state, rb_node);
1423 if (state->start > search_end)
1424 break;
1425 if (contig && found && state->start > last + 1)
1426 break;
1427 if (state->end >= cur_start && (state->state & bits) == bits) {
1428 total_bytes += min(search_end, state->end) + 1 -
1429 max(cur_start, state->start);
1430 if (total_bytes >= max_bytes)
1431 break;
1432 if (!found) {
1433 *start = max(cur_start, state->start);
1434 found = 1;
1435 }
1436 last = state->end;
1437 } else if (contig && found) {
1438 break;
1439 }
1440 node = rb_next(node);
1441 if (!node)
1442 break;
1443 }
1444out:
1445 spin_unlock(&tree->lock);
1446 return total_bytes;
1447}
1448
1449/*
1450 * set the private field for a given byte offset in the tree. If there isn't
1451 * an extent_state there already, this does nothing.
1452 */
1453int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1454{
1455 struct rb_node *node;
1456 struct extent_state *state;
1457 int ret = 0;
1458
1459 spin_lock(&tree->lock);
1460 /*
1461 * this search will find all the extents that end after
1462 * our range starts.
1463 */
1464 node = tree_search(tree, start);
1465 if (!node) {
1466 ret = -ENOENT;
1467 goto out;
1468 }
1469 state = rb_entry(node, struct extent_state, rb_node);
1470 if (state->start != start) {
1471 ret = -ENOENT;
1472 goto out;
1473 }
1474 state->private = private;
1475out:
1476 spin_unlock(&tree->lock);
1477 return ret;
1478}
1479
1480int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1481{
1482 struct rb_node *node;
1483 struct extent_state *state;
1484 int ret = 0;
1485
1486 spin_lock(&tree->lock);
1487 /*
1488 * this search will find all the extents that end after
1489 * our range starts.
1490 */
1491 node = tree_search(tree, start);
1492 if (!node) {
1493 ret = -ENOENT;
1494 goto out;
1495 }
1496 state = rb_entry(node, struct extent_state, rb_node);
1497 if (state->start != start) {
1498 ret = -ENOENT;
1499 goto out;
1500 }
1501 *private = state->private;
1502out:
1503 spin_unlock(&tree->lock);
1504 return ret;
1505}
1506
1507/*
1508 * searches a range in the state tree for a given mask.
1509 * If 'filled' == 1, this returns 1 only if every extent in the tree
1510 * has the bits set. Otherwise, 1 is returned if any bit in the
1511 * range is found set.
1512 */
1513int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1514 int bits, int filled, struct extent_state *cached)
1515{
1516 struct extent_state *state = NULL;
1517 struct rb_node *node;
1518 int bitset = 0;
1519
1520 spin_lock(&tree->lock);
1521 if (cached && cached->tree && cached->start <= start &&
1522 cached->end > start)
1523 node = &cached->rb_node;
1524 else
1525 node = tree_search(tree, start);
1526 while (node && start <= end) {
1527 state = rb_entry(node, struct extent_state, rb_node);
1528
1529 if (filled && state->start > start) {
1530 bitset = 0;
1531 break;
1532 }
1533
1534 if (state->start > end)
1535 break;
1536
1537 if (state->state & bits) {
1538 bitset = 1;
1539 if (!filled)
1540 break;
1541 } else if (filled) {
1542 bitset = 0;
1543 break;
1544 }
1545
1546 if (state->end == (u64)-1)
1547 break;
1548
1549 start = state->end + 1;
1550 if (start > end)
1551 break;
1552 node = rb_next(node);
1553 if (!node) {
1554 if (filled)
1555 bitset = 0;
1556 break;
1557 }
1558 }
1559 spin_unlock(&tree->lock);
1560 return bitset;
1561}
1562
1563/*
1564 * helper function to set a given page up to date if all the
1565 * extents in the tree for that page are up to date
1566 */
1567static int check_page_uptodate(struct extent_io_tree *tree,
1568 struct page *page)
1569{
1570 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1571 u64 end = start + PAGE_CACHE_SIZE - 1;
1572 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1573 SetPageUptodate(page);
1574 return 0;
1575}
1576
1577/*
1578 * helper function to unlock a page if all the extents in the tree
1579 * for that page are unlocked
1580 */
1581static int check_page_locked(struct extent_io_tree *tree,
1582 struct page *page)
1583{
1584 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1585 u64 end = start + PAGE_CACHE_SIZE - 1;
1586 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1587 unlock_page(page);
1588 return 0;
1589}
1590
1591/*
1592 * helper function to end page writeback if all the extents
1593 * in the tree for that page are done with writeback
1594 */
1595static int check_page_writeback(struct extent_io_tree *tree,
1596 struct page *page)
1597{
1598 end_page_writeback(page);
1599 return 0;
1600}
1601
1602/* lots and lots of room for performance fixes in the end_bio funcs */
1603
1604/*
1605 * after a writepage IO is done, we need to:
1606 * clear the uptodate bits on error
1607 * clear the writeback bits in the extent tree for this IO
1608 * end_page_writeback if the page has no more pending IO
1609 *
1610 * Scheduling is not allowed, so the extent state tree is expected
1611 * to have one and only one object corresponding to this IO.
1612 */
1613static void end_bio_extent_writepage(struct bio *bio, int err)
1614{
1615 int uptodate = err == 0;
1616 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1617 struct extent_io_tree *tree;
1618 u64 start;
1619 u64 end;
1620 int whole_page;
1621 int ret;
1622
1623 do {
1624 struct page *page = bvec->bv_page;
1625 tree = &BTRFS_I(page->mapping->host)->io_tree;
1626
1627 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1628 bvec->bv_offset;
1629 end = start + bvec->bv_len - 1;
1630
1631 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1632 whole_page = 1;
1633 else
1634 whole_page = 0;
1635
1636 if (--bvec >= bio->bi_io_vec)
1637 prefetchw(&bvec->bv_page->flags);
1638 if (tree->ops && tree->ops->writepage_end_io_hook) {
1639 ret = tree->ops->writepage_end_io_hook(page, start,
1640 end, NULL, uptodate);
1641 if (ret)
1642 uptodate = 0;
1643 }
1644
1645 if (!uptodate && tree->ops &&
1646 tree->ops->writepage_io_failed_hook) {
1647 ret = tree->ops->writepage_io_failed_hook(bio, page,
1648 start, end, NULL);
1649 if (ret == 0) {
1650 uptodate = (err == 0);
1651 continue;
1652 }
1653 }
1654
1655 if (!uptodate) {
1656 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
1657 ClearPageUptodate(page);
1658 SetPageError(page);
1659 }
1660
1661 if (whole_page)
1662 end_page_writeback(page);
1663 else
1664 check_page_writeback(tree, page);
1665 } while (bvec >= bio->bi_io_vec);
1666
1667 bio_put(bio);
1668}
1669
1670/*
1671 * after a readpage IO is done, we need to:
1672 * clear the uptodate bits on error
1673 * set the uptodate bits if things worked
1674 * set the page up to date if all extents in the tree are uptodate
1675 * clear the lock bit in the extent tree
1676 * unlock the page if there are no other extents locked for it
1677 *
1678 * Scheduling is not allowed, so the extent state tree is expected
1679 * to have one and only one object corresponding to this IO.
1680 */
1681static void end_bio_extent_readpage(struct bio *bio, int err)
1682{
1683 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1684 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
1685 struct bio_vec *bvec = bio->bi_io_vec;
1686 struct extent_io_tree *tree;
1687 u64 start;
1688 u64 end;
1689 int whole_page;
1690 int ret;
1691
1692 if (err)
1693 uptodate = 0;
1694
1695 do {
1696 struct page *page = bvec->bv_page;
1697 struct extent_state *cached = NULL;
1698 struct extent_state *state;
1699
1700 tree = &BTRFS_I(page->mapping->host)->io_tree;
1701
1702 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1703 bvec->bv_offset;
1704 end = start + bvec->bv_len - 1;
1705
1706 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1707 whole_page = 1;
1708 else
1709 whole_page = 0;
1710
1711 if (++bvec <= bvec_end)
1712 prefetchw(&bvec->bv_page->flags);
1713
1714 spin_lock(&tree->lock);
1715 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1716 if (state && state->start == start) {
1717 /*
1718 * take a reference on the state, unlock will drop
1719 * the ref
1720 */
1721 cache_state(state, &cached);
1722 }
1723 spin_unlock(&tree->lock);
1724
1725 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1726 ret = tree->ops->readpage_end_io_hook(page, start, end,
1727 state);
1728 if (ret)
1729 uptodate = 0;
1730 }
1731 if (!uptodate && tree->ops &&
1732 tree->ops->readpage_io_failed_hook) {
1733 ret = tree->ops->readpage_io_failed_hook(bio, page,
1734 start, end, NULL);
1735 if (ret == 0) {
1736 uptodate =
1737 test_bit(BIO_UPTODATE, &bio->bi_flags);
1738 if (err)
1739 uptodate = 0;
1740 uncache_state(&cached);
1741 continue;
1742 }
1743 }
1744
1745 if (uptodate) {
1746 set_extent_uptodate(tree, start, end, &cached,
1747 GFP_ATOMIC);
1748 }
1749 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1750
1751 if (whole_page) {
1752 if (uptodate) {
1753 SetPageUptodate(page);
1754 } else {
1755 ClearPageUptodate(page);
1756 SetPageError(page);
1757 }
1758 unlock_page(page);
1759 } else {
1760 if (uptodate) {
1761 check_page_uptodate(tree, page);
1762 } else {
1763 ClearPageUptodate(page);
1764 SetPageError(page);
1765 }
1766 check_page_locked(tree, page);
1767 }
1768 } while (bvec <= bvec_end);
1769
1770 bio_put(bio);
1771}
1772
1773struct bio *
1774btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1775 gfp_t gfp_flags)
1776{
1777 struct bio *bio;
1778
1779 bio = bio_alloc(gfp_flags, nr_vecs);
1780
1781 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1782 while (!bio && (nr_vecs /= 2))
1783 bio = bio_alloc(gfp_flags, nr_vecs);
1784 }
1785
1786 if (bio) {
1787 bio->bi_size = 0;
1788 bio->bi_bdev = bdev;
1789 bio->bi_sector = first_sector;
1790 }
1791 return bio;
1792}
1793
1794static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1795 unsigned long bio_flags)
1796{
1797 int ret = 0;
1798 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1799 struct page *page = bvec->bv_page;
1800 struct extent_io_tree *tree = bio->bi_private;
1801 u64 start;
1802
1803 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1804
1805 bio->bi_private = NULL;
1806
1807 bio_get(bio);
1808
1809 if (tree->ops && tree->ops->submit_bio_hook)
1810 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1811 mirror_num, bio_flags, start);
1812 else
1813 submit_bio(rw, bio);
1814 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1815 ret = -EOPNOTSUPP;
1816 bio_put(bio);
1817 return ret;
1818}
1819
1820static int submit_extent_page(int rw, struct extent_io_tree *tree,
1821 struct page *page, sector_t sector,
1822 size_t size, unsigned long offset,
1823 struct block_device *bdev,
1824 struct bio **bio_ret,
1825 unsigned long max_pages,
1826 bio_end_io_t end_io_func,
1827 int mirror_num,
1828 unsigned long prev_bio_flags,
1829 unsigned long bio_flags)
1830{
1831 int ret = 0;
1832 struct bio *bio;
1833 int nr;
1834 int contig = 0;
1835 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1836 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1837 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1838
1839 if (bio_ret && *bio_ret) {
1840 bio = *bio_ret;
1841 if (old_compressed)
1842 contig = bio->bi_sector == sector;
1843 else
1844 contig = bio->bi_sector + (bio->bi_size >> 9) ==
1845 sector;
1846
1847 if (prev_bio_flags != bio_flags || !contig ||
1848 (tree->ops && tree->ops->merge_bio_hook &&
1849 tree->ops->merge_bio_hook(page, offset, page_size, bio,
1850 bio_flags)) ||
1851 bio_add_page(bio, page, page_size, offset) < page_size) {
1852 ret = submit_one_bio(rw, bio, mirror_num,
1853 prev_bio_flags);
1854 bio = NULL;
1855 } else {
1856 return 0;
1857 }
1858 }
1859 if (this_compressed)
1860 nr = BIO_MAX_PAGES;
1861 else
1862 nr = bio_get_nr_vecs(bdev);
1863
1864 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1865 if (!bio)
1866 return -ENOMEM;
1867
1868 bio_add_page(bio, page, page_size, offset);
1869 bio->bi_end_io = end_io_func;
1870 bio->bi_private = tree;
1871
1872 if (bio_ret)
1873 *bio_ret = bio;
1874 else
1875 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1876
1877 return ret;
1878}
1879
1880void set_page_extent_mapped(struct page *page)
1881{
1882 if (!PagePrivate(page)) {
1883 SetPagePrivate(page);
1884 page_cache_get(page);
1885 set_page_private(page, EXTENT_PAGE_PRIVATE);
1886 }
1887}
1888
1889static void set_page_extent_head(struct page *page, unsigned long len)
1890{
1891 WARN_ON(!PagePrivate(page));
1892 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1893}
1894
1895/*
1896 * basic readpage implementation. Locked extent state structs are inserted
1897 * into the tree that are removed when the IO is done (by the end_io
1898 * handlers)
1899 */
1900static int __extent_read_full_page(struct extent_io_tree *tree,
1901 struct page *page,
1902 get_extent_t *get_extent,
1903 struct bio **bio, int mirror_num,
1904 unsigned long *bio_flags)
1905{
1906 struct inode *inode = page->mapping->host;
1907 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1908 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1909 u64 end;
1910 u64 cur = start;
1911 u64 extent_offset;
1912 u64 last_byte = i_size_read(inode);
1913 u64 block_start;
1914 u64 cur_end;
1915 sector_t sector;
1916 struct extent_map *em;
1917 struct block_device *bdev;
1918 struct btrfs_ordered_extent *ordered;
1919 int ret;
1920 int nr = 0;
1921 size_t pg_offset = 0;
1922 size_t iosize;
1923 size_t disk_io_size;
1924 size_t blocksize = inode->i_sb->s_blocksize;
1925 unsigned long this_bio_flag = 0;
1926
1927 set_page_extent_mapped(page);
1928
1929 if (!PageUptodate(page)) {
1930 if (cleancache_get_page(page) == 0) {
1931 BUG_ON(blocksize != PAGE_SIZE);
1932 goto out;
1933 }
1934 }
1935
1936 end = page_end;
1937 while (1) {
1938 lock_extent(tree, start, end, GFP_NOFS);
1939 ordered = btrfs_lookup_ordered_extent(inode, start);
1940 if (!ordered)
1941 break;
1942 unlock_extent(tree, start, end, GFP_NOFS);
1943 btrfs_start_ordered_extent(inode, ordered, 1);
1944 btrfs_put_ordered_extent(ordered);
1945 }
1946
1947 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1948 char *userpage;
1949 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1950
1951 if (zero_offset) {
1952 iosize = PAGE_CACHE_SIZE - zero_offset;
1953 userpage = kmap_atomic(page, KM_USER0);
1954 memset(userpage + zero_offset, 0, iosize);
1955 flush_dcache_page(page);
1956 kunmap_atomic(userpage, KM_USER0);
1957 }
1958 }
1959 while (cur <= end) {
1960 if (cur >= last_byte) {
1961 char *userpage;
1962 struct extent_state *cached = NULL;
1963
1964 iosize = PAGE_CACHE_SIZE - pg_offset;
1965 userpage = kmap_atomic(page, KM_USER0);
1966 memset(userpage + pg_offset, 0, iosize);
1967 flush_dcache_page(page);
1968 kunmap_atomic(userpage, KM_USER0);
1969 set_extent_uptodate(tree, cur, cur + iosize - 1,
1970 &cached, GFP_NOFS);
1971 unlock_extent_cached(tree, cur, cur + iosize - 1,
1972 &cached, GFP_NOFS);
1973 break;
1974 }
1975 em = get_extent(inode, page, pg_offset, cur,
1976 end - cur + 1, 0);
1977 if (IS_ERR_OR_NULL(em)) {
1978 SetPageError(page);
1979 unlock_extent(tree, cur, end, GFP_NOFS);
1980 break;
1981 }
1982 extent_offset = cur - em->start;
1983 BUG_ON(extent_map_end(em) <= cur);
1984 BUG_ON(end < cur);
1985
1986 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1987 this_bio_flag = EXTENT_BIO_COMPRESSED;
1988 extent_set_compress_type(&this_bio_flag,
1989 em->compress_type);
1990 }
1991
1992 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1993 cur_end = min(extent_map_end(em) - 1, end);
1994 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1995 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
1996 disk_io_size = em->block_len;
1997 sector = em->block_start >> 9;
1998 } else {
1999 sector = (em->block_start + extent_offset) >> 9;
2000 disk_io_size = iosize;
2001 }
2002 bdev = em->bdev;
2003 block_start = em->block_start;
2004 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2005 block_start = EXTENT_MAP_HOLE;
2006 free_extent_map(em);
2007 em = NULL;
2008
2009 /* we've found a hole, just zero and go on */
2010 if (block_start == EXTENT_MAP_HOLE) {
2011 char *userpage;
2012 struct extent_state *cached = NULL;
2013
2014 userpage = kmap_atomic(page, KM_USER0);
2015 memset(userpage + pg_offset, 0, iosize);
2016 flush_dcache_page(page);
2017 kunmap_atomic(userpage, KM_USER0);
2018
2019 set_extent_uptodate(tree, cur, cur + iosize - 1,
2020 &cached, GFP_NOFS);
2021 unlock_extent_cached(tree, cur, cur + iosize - 1,
2022 &cached, GFP_NOFS);
2023 cur = cur + iosize;
2024 pg_offset += iosize;
2025 continue;
2026 }
2027 /* the get_extent function already copied into the page */
2028 if (test_range_bit(tree, cur, cur_end,
2029 EXTENT_UPTODATE, 1, NULL)) {
2030 check_page_uptodate(tree, page);
2031 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2032 cur = cur + iosize;
2033 pg_offset += iosize;
2034 continue;
2035 }
2036 /* we have an inline extent but it didn't get marked up
2037 * to date. Error out
2038 */
2039 if (block_start == EXTENT_MAP_INLINE) {
2040 SetPageError(page);
2041 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2042 cur = cur + iosize;
2043 pg_offset += iosize;
2044 continue;
2045 }
2046
2047 ret = 0;
2048 if (tree->ops && tree->ops->readpage_io_hook) {
2049 ret = tree->ops->readpage_io_hook(page, cur,
2050 cur + iosize - 1);
2051 }
2052 if (!ret) {
2053 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2054 pnr -= page->index;
2055 ret = submit_extent_page(READ, tree, page,
2056 sector, disk_io_size, pg_offset,
2057 bdev, bio, pnr,
2058 end_bio_extent_readpage, mirror_num,
2059 *bio_flags,
2060 this_bio_flag);
2061 nr++;
2062 *bio_flags = this_bio_flag;
2063 }
2064 if (ret)
2065 SetPageError(page);
2066 cur = cur + iosize;
2067 pg_offset += iosize;
2068 }
2069out:
2070 if (!nr) {
2071 if (!PageError(page))
2072 SetPageUptodate(page);
2073 unlock_page(page);
2074 }
2075 return 0;
2076}
2077
2078int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2079 get_extent_t *get_extent)
2080{
2081 struct bio *bio = NULL;
2082 unsigned long bio_flags = 0;
2083 int ret;
2084
2085 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2086 &bio_flags);
2087 if (bio)
2088 ret = submit_one_bio(READ, bio, 0, bio_flags);
2089 return ret;
2090}
2091
2092static noinline void update_nr_written(struct page *page,
2093 struct writeback_control *wbc,
2094 unsigned long nr_written)
2095{
2096 wbc->nr_to_write -= nr_written;
2097 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2098 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2099 page->mapping->writeback_index = page->index + nr_written;
2100}
2101
2102/*
2103 * the writepage semantics are similar to regular writepage. extent
2104 * records are inserted to lock ranges in the tree, and as dirty areas
2105 * are found, they are marked writeback. Then the lock bits are removed
2106 * and the end_io handler clears the writeback ranges
2107 */
2108static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2109 void *data)
2110{
2111 struct inode *inode = page->mapping->host;
2112 struct extent_page_data *epd = data;
2113 struct extent_io_tree *tree = epd->tree;
2114 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2115 u64 delalloc_start;
2116 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2117 u64 end;
2118 u64 cur = start;
2119 u64 extent_offset;
2120 u64 last_byte = i_size_read(inode);
2121 u64 block_start;
2122 u64 iosize;
2123 sector_t sector;
2124 struct extent_state *cached_state = NULL;
2125 struct extent_map *em;
2126 struct block_device *bdev;
2127 int ret;
2128 int nr = 0;
2129 size_t pg_offset = 0;
2130 size_t blocksize;
2131 loff_t i_size = i_size_read(inode);
2132 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2133 u64 nr_delalloc;
2134 u64 delalloc_end;
2135 int page_started;
2136 int compressed;
2137 int write_flags;
2138 unsigned long nr_written = 0;
2139
2140 if (wbc->sync_mode == WB_SYNC_ALL)
2141 write_flags = WRITE_SYNC;
2142 else
2143 write_flags = WRITE;
2144
2145 trace___extent_writepage(page, inode, wbc);
2146
2147 WARN_ON(!PageLocked(page));
2148 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2149 if (page->index > end_index ||
2150 (page->index == end_index && !pg_offset)) {
2151 page->mapping->a_ops->invalidatepage(page, 0);
2152 unlock_page(page);
2153 return 0;
2154 }
2155
2156 if (page->index == end_index) {
2157 char *userpage;
2158
2159 userpage = kmap_atomic(page, KM_USER0);
2160 memset(userpage + pg_offset, 0,
2161 PAGE_CACHE_SIZE - pg_offset);
2162 kunmap_atomic(userpage, KM_USER0);
2163 flush_dcache_page(page);
2164 }
2165 pg_offset = 0;
2166
2167 set_page_extent_mapped(page);
2168
2169 delalloc_start = start;
2170 delalloc_end = 0;
2171 page_started = 0;
2172 if (!epd->extent_locked) {
2173 u64 delalloc_to_write = 0;
2174 /*
2175 * make sure the wbc mapping index is at least updated
2176 * to this page.
2177 */
2178 update_nr_written(page, wbc, 0);
2179
2180 while (delalloc_end < page_end) {
2181 nr_delalloc = find_lock_delalloc_range(inode, tree,
2182 page,
2183 &delalloc_start,
2184 &delalloc_end,
2185 128 * 1024 * 1024);
2186 if (nr_delalloc == 0) {
2187 delalloc_start = delalloc_end + 1;
2188 continue;
2189 }
2190 tree->ops->fill_delalloc(inode, page, delalloc_start,
2191 delalloc_end, &page_started,
2192 &nr_written);
2193 /*
2194 * delalloc_end is already one less than the total
2195 * length, so we don't subtract one from
2196 * PAGE_CACHE_SIZE
2197 */
2198 delalloc_to_write += (delalloc_end - delalloc_start +
2199 PAGE_CACHE_SIZE) >>
2200 PAGE_CACHE_SHIFT;
2201 delalloc_start = delalloc_end + 1;
2202 }
2203 if (wbc->nr_to_write < delalloc_to_write) {
2204 int thresh = 8192;
2205
2206 if (delalloc_to_write < thresh * 2)
2207 thresh = delalloc_to_write;
2208 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2209 thresh);
2210 }
2211
2212 /* did the fill delalloc function already unlock and start
2213 * the IO?
2214 */
2215 if (page_started) {
2216 ret = 0;
2217 /*
2218 * we've unlocked the page, so we can't update
2219 * the mapping's writeback index, just update
2220 * nr_to_write.
2221 */
2222 wbc->nr_to_write -= nr_written;
2223 goto done_unlocked;
2224 }
2225 }
2226 if (tree->ops && tree->ops->writepage_start_hook) {
2227 ret = tree->ops->writepage_start_hook(page, start,
2228 page_end);
2229 if (ret == -EAGAIN) {
2230 redirty_page_for_writepage(wbc, page);
2231 update_nr_written(page, wbc, nr_written);
2232 unlock_page(page);
2233 ret = 0;
2234 goto done_unlocked;
2235 }
2236 }
2237
2238 /*
2239 * we don't want to touch the inode after unlocking the page,
2240 * so we update the mapping writeback index now
2241 */
2242 update_nr_written(page, wbc, nr_written + 1);
2243
2244 end = page_end;
2245 if (last_byte <= start) {
2246 if (tree->ops && tree->ops->writepage_end_io_hook)
2247 tree->ops->writepage_end_io_hook(page, start,
2248 page_end, NULL, 1);
2249 goto done;
2250 }
2251
2252 blocksize = inode->i_sb->s_blocksize;
2253
2254 while (cur <= end) {
2255 if (cur >= last_byte) {
2256 if (tree->ops && tree->ops->writepage_end_io_hook)
2257 tree->ops->writepage_end_io_hook(page, cur,
2258 page_end, NULL, 1);
2259 break;
2260 }
2261 em = epd->get_extent(inode, page, pg_offset, cur,
2262 end - cur + 1, 1);
2263 if (IS_ERR_OR_NULL(em)) {
2264 SetPageError(page);
2265 break;
2266 }
2267
2268 extent_offset = cur - em->start;
2269 BUG_ON(extent_map_end(em) <= cur);
2270 BUG_ON(end < cur);
2271 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2272 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2273 sector = (em->block_start + extent_offset) >> 9;
2274 bdev = em->bdev;
2275 block_start = em->block_start;
2276 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2277 free_extent_map(em);
2278 em = NULL;
2279
2280 /*
2281 * compressed and inline extents are written through other
2282 * paths in the FS
2283 */
2284 if (compressed || block_start == EXTENT_MAP_HOLE ||
2285 block_start == EXTENT_MAP_INLINE) {
2286 /*
2287 * end_io notification does not happen here for
2288 * compressed extents
2289 */
2290 if (!compressed && tree->ops &&
2291 tree->ops->writepage_end_io_hook)
2292 tree->ops->writepage_end_io_hook(page, cur,
2293 cur + iosize - 1,
2294 NULL, 1);
2295 else if (compressed) {
2296 /* we don't want to end_page_writeback on
2297 * a compressed extent. this happens
2298 * elsewhere
2299 */
2300 nr++;
2301 }
2302
2303 cur += iosize;
2304 pg_offset += iosize;
2305 continue;
2306 }
2307 /* leave this out until we have a page_mkwrite call */
2308 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2309 EXTENT_DIRTY, 0, NULL)) {
2310 cur = cur + iosize;
2311 pg_offset += iosize;
2312 continue;
2313 }
2314
2315 if (tree->ops && tree->ops->writepage_io_hook) {
2316 ret = tree->ops->writepage_io_hook(page, cur,
2317 cur + iosize - 1);
2318 } else {
2319 ret = 0;
2320 }
2321 if (ret) {
2322 SetPageError(page);
2323 } else {
2324 unsigned long max_nr = end_index + 1;
2325
2326 set_range_writeback(tree, cur, cur + iosize - 1);
2327 if (!PageWriteback(page)) {
2328 printk(KERN_ERR "btrfs warning page %lu not "
2329 "writeback, cur %llu end %llu\n",
2330 page->index, (unsigned long long)cur,
2331 (unsigned long long)end);
2332 }
2333
2334 ret = submit_extent_page(write_flags, tree, page,
2335 sector, iosize, pg_offset,
2336 bdev, &epd->bio, max_nr,
2337 end_bio_extent_writepage,
2338 0, 0, 0);
2339 if (ret)
2340 SetPageError(page);
2341 }
2342 cur = cur + iosize;
2343 pg_offset += iosize;
2344 nr++;
2345 }
2346done:
2347 if (nr == 0) {
2348 /* make sure the mapping tag for page dirty gets cleared */
2349 set_page_writeback(page);
2350 end_page_writeback(page);
2351 }
2352 unlock_page(page);
2353
2354done_unlocked:
2355
2356 /* drop our reference on any cached states */
2357 free_extent_state(cached_state);
2358 return 0;
2359}
2360
2361/**
2362 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2363 * @mapping: address space structure to write
2364 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2365 * @writepage: function called for each page
2366 * @data: data passed to writepage function
2367 *
2368 * If a page is already under I/O, write_cache_pages() skips it, even
2369 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2370 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2371 * and msync() need to guarantee that all the data which was dirty at the time
2372 * the call was made get new I/O started against them. If wbc->sync_mode is
2373 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2374 * existing IO to complete.
2375 */
2376static int extent_write_cache_pages(struct extent_io_tree *tree,
2377 struct address_space *mapping,
2378 struct writeback_control *wbc,
2379 writepage_t writepage, void *data,
2380 void (*flush_fn)(void *))
2381{
2382 int ret = 0;
2383 int done = 0;
2384 int nr_to_write_done = 0;
2385 struct pagevec pvec;
2386 int nr_pages;
2387 pgoff_t index;
2388 pgoff_t end; /* Inclusive */
2389 int scanned = 0;
2390 int tag;
2391
2392 pagevec_init(&pvec, 0);
2393 if (wbc->range_cyclic) {
2394 index = mapping->writeback_index; /* Start from prev offset */
2395 end = -1;
2396 } else {
2397 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2398 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2399 scanned = 1;
2400 }
2401 if (wbc->sync_mode == WB_SYNC_ALL)
2402 tag = PAGECACHE_TAG_TOWRITE;
2403 else
2404 tag = PAGECACHE_TAG_DIRTY;
2405retry:
2406 if (wbc->sync_mode == WB_SYNC_ALL)
2407 tag_pages_for_writeback(mapping, index, end);
2408 while (!done && !nr_to_write_done && (index <= end) &&
2409 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2410 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2411 unsigned i;
2412
2413 scanned = 1;
2414 for (i = 0; i < nr_pages; i++) {
2415 struct page *page = pvec.pages[i];
2416
2417 /*
2418 * At this point we hold neither mapping->tree_lock nor
2419 * lock on the page itself: the page may be truncated or
2420 * invalidated (changing page->mapping to NULL), or even
2421 * swizzled back from swapper_space to tmpfs file
2422 * mapping
2423 */
2424 if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2425 tree->ops->write_cache_pages_lock_hook(page);
2426 else
2427 lock_page(page);
2428
2429 if (unlikely(page->mapping != mapping)) {
2430 unlock_page(page);
2431 continue;
2432 }
2433
2434 if (!wbc->range_cyclic && page->index > end) {
2435 done = 1;
2436 unlock_page(page);
2437 continue;
2438 }
2439
2440 if (wbc->sync_mode != WB_SYNC_NONE) {
2441 if (PageWriteback(page))
2442 flush_fn(data);
2443 wait_on_page_writeback(page);
2444 }
2445
2446 if (PageWriteback(page) ||
2447 !clear_page_dirty_for_io(page)) {
2448 unlock_page(page);
2449 continue;
2450 }
2451
2452 ret = (*writepage)(page, wbc, data);
2453
2454 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2455 unlock_page(page);
2456 ret = 0;
2457 }
2458 if (ret)
2459 done = 1;
2460
2461 /*
2462 * the filesystem may choose to bump up nr_to_write.
2463 * We have to make sure to honor the new nr_to_write
2464 * at any time
2465 */
2466 nr_to_write_done = wbc->nr_to_write <= 0;
2467 }
2468 pagevec_release(&pvec);
2469 cond_resched();
2470 }
2471 if (!scanned && !done) {
2472 /*
2473 * We hit the last page and there is more work to be done: wrap
2474 * back to the start of the file
2475 */
2476 scanned = 1;
2477 index = 0;
2478 goto retry;
2479 }
2480 return ret;
2481}
2482
2483static void flush_epd_write_bio(struct extent_page_data *epd)
2484{
2485 if (epd->bio) {
2486 if (epd->sync_io)
2487 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2488 else
2489 submit_one_bio(WRITE, epd->bio, 0, 0);
2490 epd->bio = NULL;
2491 }
2492}
2493
2494static noinline void flush_write_bio(void *data)
2495{
2496 struct extent_page_data *epd = data;
2497 flush_epd_write_bio(epd);
2498}
2499
2500int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2501 get_extent_t *get_extent,
2502 struct writeback_control *wbc)
2503{
2504 int ret;
2505 struct extent_page_data epd = {
2506 .bio = NULL,
2507 .tree = tree,
2508 .get_extent = get_extent,
2509 .extent_locked = 0,
2510 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2511 };
2512
2513 ret = __extent_writepage(page, wbc, &epd);
2514
2515 flush_epd_write_bio(&epd);
2516 return ret;
2517}
2518
2519int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2520 u64 start, u64 end, get_extent_t *get_extent,
2521 int mode)
2522{
2523 int ret = 0;
2524 struct address_space *mapping = inode->i_mapping;
2525 struct page *page;
2526 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2527 PAGE_CACHE_SHIFT;
2528
2529 struct extent_page_data epd = {
2530 .bio = NULL,
2531 .tree = tree,
2532 .get_extent = get_extent,
2533 .extent_locked = 1,
2534 .sync_io = mode == WB_SYNC_ALL,
2535 };
2536 struct writeback_control wbc_writepages = {
2537 .sync_mode = mode,
2538 .nr_to_write = nr_pages * 2,
2539 .range_start = start,
2540 .range_end = end + 1,
2541 };
2542
2543 while (start <= end) {
2544 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2545 if (clear_page_dirty_for_io(page))
2546 ret = __extent_writepage(page, &wbc_writepages, &epd);
2547 else {
2548 if (tree->ops && tree->ops->writepage_end_io_hook)
2549 tree->ops->writepage_end_io_hook(page, start,
2550 start + PAGE_CACHE_SIZE - 1,
2551 NULL, 1);
2552 unlock_page(page);
2553 }
2554 page_cache_release(page);
2555 start += PAGE_CACHE_SIZE;
2556 }
2557
2558 flush_epd_write_bio(&epd);
2559 return ret;
2560}
2561
2562int extent_writepages(struct extent_io_tree *tree,
2563 struct address_space *mapping,
2564 get_extent_t *get_extent,
2565 struct writeback_control *wbc)
2566{
2567 int ret = 0;
2568 struct extent_page_data epd = {
2569 .bio = NULL,
2570 .tree = tree,
2571 .get_extent = get_extent,
2572 .extent_locked = 0,
2573 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2574 };
2575
2576 ret = extent_write_cache_pages(tree, mapping, wbc,
2577 __extent_writepage, &epd,
2578 flush_write_bio);
2579 flush_epd_write_bio(&epd);
2580 return ret;
2581}
2582
2583int extent_readpages(struct extent_io_tree *tree,
2584 struct address_space *mapping,
2585 struct list_head *pages, unsigned nr_pages,
2586 get_extent_t get_extent)
2587{
2588 struct bio *bio = NULL;
2589 unsigned page_idx;
2590 unsigned long bio_flags = 0;
2591
2592 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2593 struct page *page = list_entry(pages->prev, struct page, lru);
2594
2595 prefetchw(&page->flags);
2596 list_del(&page->lru);
2597 if (!add_to_page_cache_lru(page, mapping,
2598 page->index, GFP_NOFS)) {
2599 __extent_read_full_page(tree, page, get_extent,
2600 &bio, 0, &bio_flags);
2601 }
2602 page_cache_release(page);
2603 }
2604 BUG_ON(!list_empty(pages));
2605 if (bio)
2606 submit_one_bio(READ, bio, 0, bio_flags);
2607 return 0;
2608}
2609
2610/*
2611 * basic invalidatepage code, this waits on any locked or writeback
2612 * ranges corresponding to the page, and then deletes any extent state
2613 * records from the tree
2614 */
2615int extent_invalidatepage(struct extent_io_tree *tree,
2616 struct page *page, unsigned long offset)
2617{
2618 struct extent_state *cached_state = NULL;
2619 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2620 u64 end = start + PAGE_CACHE_SIZE - 1;
2621 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2622
2623 start += (offset + blocksize - 1) & ~(blocksize - 1);
2624 if (start > end)
2625 return 0;
2626
2627 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
2628 wait_on_page_writeback(page);
2629 clear_extent_bit(tree, start, end,
2630 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2631 EXTENT_DO_ACCOUNTING,
2632 1, 1, &cached_state, GFP_NOFS);
2633 return 0;
2634}
2635
2636/*
2637 * a helper for releasepage, this tests for areas of the page that
2638 * are locked or under IO and drops the related state bits if it is safe
2639 * to drop the page.
2640 */
2641int try_release_extent_state(struct extent_map_tree *map,
2642 struct extent_io_tree *tree, struct page *page,
2643 gfp_t mask)
2644{
2645 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2646 u64 end = start + PAGE_CACHE_SIZE - 1;
2647 int ret = 1;
2648
2649 if (test_range_bit(tree, start, end,
2650 EXTENT_IOBITS, 0, NULL))
2651 ret = 0;
2652 else {
2653 if ((mask & GFP_NOFS) == GFP_NOFS)
2654 mask = GFP_NOFS;
2655 /*
2656 * at this point we can safely clear everything except the
2657 * locked bit and the nodatasum bit
2658 */
2659 ret = clear_extent_bit(tree, start, end,
2660 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2661 0, 0, NULL, mask);
2662
2663 /* if clear_extent_bit failed for enomem reasons,
2664 * we can't allow the release to continue.
2665 */
2666 if (ret < 0)
2667 ret = 0;
2668 else
2669 ret = 1;
2670 }
2671 return ret;
2672}
2673
2674/*
2675 * a helper for releasepage. As long as there are no locked extents
2676 * in the range corresponding to the page, both state records and extent
2677 * map records are removed
2678 */
2679int try_release_extent_mapping(struct extent_map_tree *map,
2680 struct extent_io_tree *tree, struct page *page,
2681 gfp_t mask)
2682{
2683 struct extent_map *em;
2684 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2685 u64 end = start + PAGE_CACHE_SIZE - 1;
2686
2687 if ((mask & __GFP_WAIT) &&
2688 page->mapping->host->i_size > 16 * 1024 * 1024) {
2689 u64 len;
2690 while (start <= end) {
2691 len = end - start + 1;
2692 write_lock(&map->lock);
2693 em = lookup_extent_mapping(map, start, len);
2694 if (IS_ERR_OR_NULL(em)) {
2695 write_unlock(&map->lock);
2696 break;
2697 }
2698 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2699 em->start != start) {
2700 write_unlock(&map->lock);
2701 free_extent_map(em);
2702 break;
2703 }
2704 if (!test_range_bit(tree, em->start,
2705 extent_map_end(em) - 1,
2706 EXTENT_LOCKED | EXTENT_WRITEBACK,
2707 0, NULL)) {
2708 remove_extent_mapping(map, em);
2709 /* once for the rb tree */
2710 free_extent_map(em);
2711 }
2712 start = extent_map_end(em);
2713 write_unlock(&map->lock);
2714
2715 /* once for us */
2716 free_extent_map(em);
2717 }
2718 }
2719 return try_release_extent_state(map, tree, page, mask);
2720}
2721
2722/*
2723 * helper function for fiemap, which doesn't want to see any holes.
2724 * This maps until we find something past 'last'
2725 */
2726static struct extent_map *get_extent_skip_holes(struct inode *inode,
2727 u64 offset,
2728 u64 last,
2729 get_extent_t *get_extent)
2730{
2731 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2732 struct extent_map *em;
2733 u64 len;
2734
2735 if (offset >= last)
2736 return NULL;
2737
2738 while(1) {
2739 len = last - offset;
2740 if (len == 0)
2741 break;
2742 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2743 em = get_extent(inode, NULL, 0, offset, len, 0);
2744 if (IS_ERR_OR_NULL(em))
2745 return em;
2746
2747 /* if this isn't a hole return it */
2748 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2749 em->block_start != EXTENT_MAP_HOLE) {
2750 return em;
2751 }
2752
2753 /* this is a hole, advance to the next extent */
2754 offset = extent_map_end(em);
2755 free_extent_map(em);
2756 if (offset >= last)
2757 break;
2758 }
2759 return NULL;
2760}
2761
2762int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2763 __u64 start, __u64 len, get_extent_t *get_extent)
2764{
2765 int ret = 0;
2766 u64 off = start;
2767 u64 max = start + len;
2768 u32 flags = 0;
2769 u32 found_type;
2770 u64 last;
2771 u64 last_for_get_extent = 0;
2772 u64 disko = 0;
2773 u64 isize = i_size_read(inode);
2774 struct btrfs_key found_key;
2775 struct extent_map *em = NULL;
2776 struct extent_state *cached_state = NULL;
2777 struct btrfs_path *path;
2778 struct btrfs_file_extent_item *item;
2779 int end = 0;
2780 u64 em_start = 0;
2781 u64 em_len = 0;
2782 u64 em_end = 0;
2783 unsigned long emflags;
2784
2785 if (len == 0)
2786 return -EINVAL;
2787
2788 path = btrfs_alloc_path();
2789 if (!path)
2790 return -ENOMEM;
2791 path->leave_spinning = 1;
2792
2793 /*
2794 * lookup the last file extent. We're not using i_size here
2795 * because there might be preallocation past i_size
2796 */
2797 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2798 path, btrfs_ino(inode), -1, 0);
2799 if (ret < 0) {
2800 btrfs_free_path(path);
2801 return ret;
2802 }
2803 WARN_ON(!ret);
2804 path->slots[0]--;
2805 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2806 struct btrfs_file_extent_item);
2807 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2808 found_type = btrfs_key_type(&found_key);
2809
2810 /* No extents, but there might be delalloc bits */
2811 if (found_key.objectid != btrfs_ino(inode) ||
2812 found_type != BTRFS_EXTENT_DATA_KEY) {
2813 /* have to trust i_size as the end */
2814 last = (u64)-1;
2815 last_for_get_extent = isize;
2816 } else {
2817 /*
2818 * remember the start of the last extent. There are a
2819 * bunch of different factors that go into the length of the
2820 * extent, so its much less complex to remember where it started
2821 */
2822 last = found_key.offset;
2823 last_for_get_extent = last + 1;
2824 }
2825 btrfs_free_path(path);
2826
2827 /*
2828 * we might have some extents allocated but more delalloc past those
2829 * extents. so, we trust isize unless the start of the last extent is
2830 * beyond isize
2831 */
2832 if (last < isize) {
2833 last = (u64)-1;
2834 last_for_get_extent = isize;
2835 }
2836
2837 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2838 &cached_state, GFP_NOFS);
2839
2840 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2841 get_extent);
2842 if (!em)
2843 goto out;
2844 if (IS_ERR(em)) {
2845 ret = PTR_ERR(em);
2846 goto out;
2847 }
2848
2849 while (!end) {
2850 u64 offset_in_extent;
2851
2852 /* break if the extent we found is outside the range */
2853 if (em->start >= max || extent_map_end(em) < off)
2854 break;
2855
2856 /*
2857 * get_extent may return an extent that starts before our
2858 * requested range. We have to make sure the ranges
2859 * we return to fiemap always move forward and don't
2860 * overlap, so adjust the offsets here
2861 */
2862 em_start = max(em->start, off);
2863
2864 /*
2865 * record the offset from the start of the extent
2866 * for adjusting the disk offset below
2867 */
2868 offset_in_extent = em_start - em->start;
2869 em_end = extent_map_end(em);
2870 em_len = em_end - em_start;
2871 emflags = em->flags;
2872 disko = 0;
2873 flags = 0;
2874
2875 /*
2876 * bump off for our next call to get_extent
2877 */
2878 off = extent_map_end(em);
2879 if (off >= max)
2880 end = 1;
2881
2882 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2883 end = 1;
2884 flags |= FIEMAP_EXTENT_LAST;
2885 } else if (em->block_start == EXTENT_MAP_INLINE) {
2886 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2887 FIEMAP_EXTENT_NOT_ALIGNED);
2888 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2889 flags |= (FIEMAP_EXTENT_DELALLOC |
2890 FIEMAP_EXTENT_UNKNOWN);
2891 } else {
2892 disko = em->block_start + offset_in_extent;
2893 }
2894 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2895 flags |= FIEMAP_EXTENT_ENCODED;
2896
2897 free_extent_map(em);
2898 em = NULL;
2899 if ((em_start >= last) || em_len == (u64)-1 ||
2900 (last == (u64)-1 && isize <= em_end)) {
2901 flags |= FIEMAP_EXTENT_LAST;
2902 end = 1;
2903 }
2904
2905 /* now scan forward to see if this is really the last extent. */
2906 em = get_extent_skip_holes(inode, off, last_for_get_extent,
2907 get_extent);
2908 if (IS_ERR(em)) {
2909 ret = PTR_ERR(em);
2910 goto out;
2911 }
2912 if (!em) {
2913 flags |= FIEMAP_EXTENT_LAST;
2914 end = 1;
2915 }
2916 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2917 em_len, flags);
2918 if (ret)
2919 goto out_free;
2920 }
2921out_free:
2922 free_extent_map(em);
2923out:
2924 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
2925 &cached_state, GFP_NOFS);
2926 return ret;
2927}
2928
2929static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2930 unsigned long i)
2931{
2932 struct page *p;
2933 struct address_space *mapping;
2934
2935 if (i == 0)
2936 return eb->first_page;
2937 i += eb->start >> PAGE_CACHE_SHIFT;
2938 mapping = eb->first_page->mapping;
2939 if (!mapping)
2940 return NULL;
2941
2942 /*
2943 * extent_buffer_page is only called after pinning the page
2944 * by increasing the reference count. So we know the page must
2945 * be in the radix tree.
2946 */
2947 rcu_read_lock();
2948 p = radix_tree_lookup(&mapping->page_tree, i);
2949 rcu_read_unlock();
2950
2951 return p;
2952}
2953
2954static inline unsigned long num_extent_pages(u64 start, u64 len)
2955{
2956 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2957 (start >> PAGE_CACHE_SHIFT);
2958}
2959
2960static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
2961 u64 start,
2962 unsigned long len,
2963 gfp_t mask)
2964{
2965 struct extent_buffer *eb = NULL;
2966#if LEAK_DEBUG
2967 unsigned long flags;
2968#endif
2969
2970 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2971 if (eb == NULL)
2972 return NULL;
2973 eb->start = start;
2974 eb->len = len;
2975 rwlock_init(&eb->lock);
2976 atomic_set(&eb->write_locks, 0);
2977 atomic_set(&eb->read_locks, 0);
2978 atomic_set(&eb->blocking_readers, 0);
2979 atomic_set(&eb->blocking_writers, 0);
2980 atomic_set(&eb->spinning_readers, 0);
2981 atomic_set(&eb->spinning_writers, 0);
2982 init_waitqueue_head(&eb->write_lock_wq);
2983 init_waitqueue_head(&eb->read_lock_wq);
2984
2985#if LEAK_DEBUG
2986 spin_lock_irqsave(&leak_lock, flags);
2987 list_add(&eb->leak_list, &buffers);
2988 spin_unlock_irqrestore(&leak_lock, flags);
2989#endif
2990 atomic_set(&eb->refs, 1);
2991
2992 return eb;
2993}
2994
2995static void __free_extent_buffer(struct extent_buffer *eb)
2996{
2997#if LEAK_DEBUG
2998 unsigned long flags;
2999 spin_lock_irqsave(&leak_lock, flags);
3000 list_del(&eb->leak_list);
3001 spin_unlock_irqrestore(&leak_lock, flags);
3002#endif
3003 kmem_cache_free(extent_buffer_cache, eb);
3004}
3005
3006/*
3007 * Helper for releasing extent buffer page.
3008 */
3009static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3010 unsigned long start_idx)
3011{
3012 unsigned long index;
3013 struct page *page;
3014
3015 if (!eb->first_page)
3016 return;
3017
3018 index = num_extent_pages(eb->start, eb->len);
3019 if (start_idx >= index)
3020 return;
3021
3022 do {
3023 index--;
3024 page = extent_buffer_page(eb, index);
3025 if (page)
3026 page_cache_release(page);
3027 } while (index != start_idx);
3028}
3029
3030/*
3031 * Helper for releasing the extent buffer.
3032 */
3033static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3034{
3035 btrfs_release_extent_buffer_page(eb, 0);
3036 __free_extent_buffer(eb);
3037}
3038
3039struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3040 u64 start, unsigned long len,
3041 struct page *page0)
3042{
3043 unsigned long num_pages = num_extent_pages(start, len);
3044 unsigned long i;
3045 unsigned long index = start >> PAGE_CACHE_SHIFT;
3046 struct extent_buffer *eb;
3047 struct extent_buffer *exists = NULL;
3048 struct page *p;
3049 struct address_space *mapping = tree->mapping;
3050 int uptodate = 1;
3051 int ret;
3052
3053 rcu_read_lock();
3054 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3055 if (eb && atomic_inc_not_zero(&eb->refs)) {
3056 rcu_read_unlock();
3057 mark_page_accessed(eb->first_page);
3058 return eb;
3059 }
3060 rcu_read_unlock();
3061
3062 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3063 if (!eb)
3064 return NULL;
3065
3066 if (page0) {
3067 eb->first_page = page0;
3068 i = 1;
3069 index++;
3070 page_cache_get(page0);
3071 mark_page_accessed(page0);
3072 set_page_extent_mapped(page0);
3073 set_page_extent_head(page0, len);
3074 uptodate = PageUptodate(page0);
3075 } else {
3076 i = 0;
3077 }
3078 for (; i < num_pages; i++, index++) {
3079 p = find_or_create_page(mapping, index, GFP_NOFS);
3080 if (!p) {
3081 WARN_ON(1);
3082 goto free_eb;
3083 }
3084 set_page_extent_mapped(p);
3085 mark_page_accessed(p);
3086 if (i == 0) {
3087 eb->first_page = p;
3088 set_page_extent_head(p, len);
3089 } else {
3090 set_page_private(p, EXTENT_PAGE_PRIVATE);
3091 }
3092 if (!PageUptodate(p))
3093 uptodate = 0;
3094
3095 /*
3096 * see below about how we avoid a nasty race with release page
3097 * and why we unlock later
3098 */
3099 if (i != 0)
3100 unlock_page(p);
3101 }
3102 if (uptodate)
3103 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3104
3105 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3106 if (ret)
3107 goto free_eb;
3108
3109 spin_lock(&tree->buffer_lock);
3110 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
3111 if (ret == -EEXIST) {
3112 exists = radix_tree_lookup(&tree->buffer,
3113 start >> PAGE_CACHE_SHIFT);
3114 /* add one reference for the caller */
3115 atomic_inc(&exists->refs);
3116 spin_unlock(&tree->buffer_lock);
3117 radix_tree_preload_end();
3118 goto free_eb;
3119 }
3120 /* add one reference for the tree */
3121 atomic_inc(&eb->refs);
3122 spin_unlock(&tree->buffer_lock);
3123 radix_tree_preload_end();
3124
3125 /*
3126 * there is a race where release page may have
3127 * tried to find this extent buffer in the radix
3128 * but failed. It will tell the VM it is safe to
3129 * reclaim the, and it will clear the page private bit.
3130 * We must make sure to set the page private bit properly
3131 * after the extent buffer is in the radix tree so
3132 * it doesn't get lost
3133 */
3134 set_page_extent_mapped(eb->first_page);
3135 set_page_extent_head(eb->first_page, eb->len);
3136 if (!page0)
3137 unlock_page(eb->first_page);
3138 return eb;
3139
3140free_eb:
3141 if (eb->first_page && !page0)
3142 unlock_page(eb->first_page);
3143
3144 if (!atomic_dec_and_test(&eb->refs))
3145 return exists;
3146 btrfs_release_extent_buffer(eb);
3147 return exists;
3148}
3149
3150struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3151 u64 start, unsigned long len)
3152{
3153 struct extent_buffer *eb;
3154
3155 rcu_read_lock();
3156 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3157 if (eb && atomic_inc_not_zero(&eb->refs)) {
3158 rcu_read_unlock();
3159 mark_page_accessed(eb->first_page);
3160 return eb;
3161 }
3162 rcu_read_unlock();
3163
3164 return NULL;
3165}
3166
3167void free_extent_buffer(struct extent_buffer *eb)
3168{
3169 if (!eb)
3170 return;
3171
3172 if (!atomic_dec_and_test(&eb->refs))
3173 return;
3174
3175 WARN_ON(1);
3176}
3177
3178int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3179 struct extent_buffer *eb)
3180{
3181 unsigned long i;
3182 unsigned long num_pages;
3183 struct page *page;
3184
3185 num_pages = num_extent_pages(eb->start, eb->len);
3186
3187 for (i = 0; i < num_pages; i++) {
3188 page = extent_buffer_page(eb, i);
3189 if (!PageDirty(page))
3190 continue;
3191
3192 lock_page(page);
3193 WARN_ON(!PagePrivate(page));
3194
3195 set_page_extent_mapped(page);
3196 if (i == 0)
3197 set_page_extent_head(page, eb->len);
3198
3199 clear_page_dirty_for_io(page);
3200 spin_lock_irq(&page->mapping->tree_lock);
3201 if (!PageDirty(page)) {
3202 radix_tree_tag_clear(&page->mapping->page_tree,
3203 page_index(page),
3204 PAGECACHE_TAG_DIRTY);
3205 }
3206 spin_unlock_irq(&page->mapping->tree_lock);
3207 unlock_page(page);
3208 }
3209 return 0;
3210}
3211
3212int set_extent_buffer_dirty(struct extent_io_tree *tree,
3213 struct extent_buffer *eb)
3214{
3215 unsigned long i;
3216 unsigned long num_pages;
3217 int was_dirty = 0;
3218
3219 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3220 num_pages = num_extent_pages(eb->start, eb->len);
3221 for (i = 0; i < num_pages; i++)
3222 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3223 return was_dirty;
3224}
3225
3226static int __eb_straddles_pages(u64 start, u64 len)
3227{
3228 if (len < PAGE_CACHE_SIZE)
3229 return 1;
3230 if (start & (PAGE_CACHE_SIZE - 1))
3231 return 1;
3232 if ((start + len) & (PAGE_CACHE_SIZE - 1))
3233 return 1;
3234 return 0;
3235}
3236
3237static int eb_straddles_pages(struct extent_buffer *eb)
3238{
3239 return __eb_straddles_pages(eb->start, eb->len);
3240}
3241
3242int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3243 struct extent_buffer *eb,
3244 struct extent_state **cached_state)
3245{
3246 unsigned long i;
3247 struct page *page;
3248 unsigned long num_pages;
3249
3250 num_pages = num_extent_pages(eb->start, eb->len);
3251 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3252
3253 if (eb_straddles_pages(eb)) {
3254 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3255 cached_state, GFP_NOFS);
3256 }
3257 for (i = 0; i < num_pages; i++) {
3258 page = extent_buffer_page(eb, i);
3259 if (page)
3260 ClearPageUptodate(page);
3261 }
3262 return 0;
3263}
3264
3265int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3266 struct extent_buffer *eb)
3267{
3268 unsigned long i;
3269 struct page *page;
3270 unsigned long num_pages;
3271
3272 num_pages = num_extent_pages(eb->start, eb->len);
3273
3274 if (eb_straddles_pages(eb)) {
3275 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3276 NULL, GFP_NOFS);
3277 }
3278 for (i = 0; i < num_pages; i++) {
3279 page = extent_buffer_page(eb, i);
3280 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3281 ((i == num_pages - 1) &&
3282 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3283 check_page_uptodate(tree, page);
3284 continue;
3285 }
3286 SetPageUptodate(page);
3287 }
3288 return 0;
3289}
3290
3291int extent_range_uptodate(struct extent_io_tree *tree,
3292 u64 start, u64 end)
3293{
3294 struct page *page;
3295 int ret;
3296 int pg_uptodate = 1;
3297 int uptodate;
3298 unsigned long index;
3299
3300 if (__eb_straddles_pages(start, end - start + 1)) {
3301 ret = test_range_bit(tree, start, end,
3302 EXTENT_UPTODATE, 1, NULL);
3303 if (ret)
3304 return 1;
3305 }
3306 while (start <= end) {
3307 index = start >> PAGE_CACHE_SHIFT;
3308 page = find_get_page(tree->mapping, index);
3309 uptodate = PageUptodate(page);
3310 page_cache_release(page);
3311 if (!uptodate) {
3312 pg_uptodate = 0;
3313 break;
3314 }
3315 start += PAGE_CACHE_SIZE;
3316 }
3317 return pg_uptodate;
3318}
3319
3320int extent_buffer_uptodate(struct extent_io_tree *tree,
3321 struct extent_buffer *eb,
3322 struct extent_state *cached_state)
3323{
3324 int ret = 0;
3325 unsigned long num_pages;
3326 unsigned long i;
3327 struct page *page;
3328 int pg_uptodate = 1;
3329
3330 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3331 return 1;
3332
3333 if (eb_straddles_pages(eb)) {
3334 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3335 EXTENT_UPTODATE, 1, cached_state);
3336 if (ret)
3337 return ret;
3338 }
3339
3340 num_pages = num_extent_pages(eb->start, eb->len);
3341 for (i = 0; i < num_pages; i++) {
3342 page = extent_buffer_page(eb, i);
3343 if (!PageUptodate(page)) {
3344 pg_uptodate = 0;
3345 break;
3346 }
3347 }
3348 return pg_uptodate;
3349}
3350
3351int read_extent_buffer_pages(struct extent_io_tree *tree,
3352 struct extent_buffer *eb,
3353 u64 start, int wait,
3354 get_extent_t *get_extent, int mirror_num)
3355{
3356 unsigned long i;
3357 unsigned long start_i;
3358 struct page *page;
3359 int err;
3360 int ret = 0;
3361 int locked_pages = 0;
3362 int all_uptodate = 1;
3363 int inc_all_pages = 0;
3364 unsigned long num_pages;
3365 struct bio *bio = NULL;
3366 unsigned long bio_flags = 0;
3367
3368 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3369 return 0;
3370
3371 if (eb_straddles_pages(eb)) {
3372 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3373 EXTENT_UPTODATE, 1, NULL)) {
3374 return 0;
3375 }
3376 }
3377
3378 if (start) {
3379 WARN_ON(start < eb->start);
3380 start_i = (start >> PAGE_CACHE_SHIFT) -
3381 (eb->start >> PAGE_CACHE_SHIFT);
3382 } else {
3383 start_i = 0;
3384 }
3385
3386 num_pages = num_extent_pages(eb->start, eb->len);
3387 for (i = start_i; i < num_pages; i++) {
3388 page = extent_buffer_page(eb, i);
3389 if (!wait) {
3390 if (!trylock_page(page))
3391 goto unlock_exit;
3392 } else {
3393 lock_page(page);
3394 }
3395 locked_pages++;
3396 if (!PageUptodate(page))
3397 all_uptodate = 0;
3398 }
3399 if (all_uptodate) {
3400 if (start_i == 0)
3401 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3402 goto unlock_exit;
3403 }
3404
3405 for (i = start_i; i < num_pages; i++) {
3406 page = extent_buffer_page(eb, i);
3407
3408 WARN_ON(!PagePrivate(page));
3409
3410 set_page_extent_mapped(page);
3411 if (i == 0)
3412 set_page_extent_head(page, eb->len);
3413
3414 if (inc_all_pages)
3415 page_cache_get(page);
3416 if (!PageUptodate(page)) {
3417 if (start_i == 0)
3418 inc_all_pages = 1;
3419 ClearPageError(page);
3420 err = __extent_read_full_page(tree, page,
3421 get_extent, &bio,
3422 mirror_num, &bio_flags);
3423 if (err)
3424 ret = err;
3425 } else {
3426 unlock_page(page);
3427 }
3428 }
3429
3430 if (bio)
3431 submit_one_bio(READ, bio, mirror_num, bio_flags);
3432
3433 if (ret || !wait)
3434 return ret;
3435
3436 for (i = start_i; i < num_pages; i++) {
3437 page = extent_buffer_page(eb, i);
3438 wait_on_page_locked(page);
3439 if (!PageUptodate(page))
3440 ret = -EIO;
3441 }
3442
3443 if (!ret)
3444 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3445 return ret;
3446
3447unlock_exit:
3448 i = start_i;
3449 while (locked_pages > 0) {
3450 page = extent_buffer_page(eb, i);
3451 i++;
3452 unlock_page(page);
3453 locked_pages--;
3454 }
3455 return ret;
3456}
3457
3458void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3459 unsigned long start,
3460 unsigned long len)
3461{
3462 size_t cur;
3463 size_t offset;
3464 struct page *page;
3465 char *kaddr;
3466 char *dst = (char *)dstv;
3467 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3468 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3469
3470 WARN_ON(start > eb->len);
3471 WARN_ON(start + len > eb->start + eb->len);
3472
3473 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3474
3475 while (len > 0) {
3476 page = extent_buffer_page(eb, i);
3477
3478 cur = min(len, (PAGE_CACHE_SIZE - offset));
3479 kaddr = page_address(page);
3480 memcpy(dst, kaddr + offset, cur);
3481
3482 dst += cur;
3483 len -= cur;
3484 offset = 0;
3485 i++;
3486 }
3487}
3488
3489int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3490 unsigned long min_len, char **map,
3491 unsigned long *map_start,
3492 unsigned long *map_len)
3493{
3494 size_t offset = start & (PAGE_CACHE_SIZE - 1);
3495 char *kaddr;
3496 struct page *p;
3497 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3498 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3499 unsigned long end_i = (start_offset + start + min_len - 1) >>
3500 PAGE_CACHE_SHIFT;
3501
3502 if (i != end_i)
3503 return -EINVAL;
3504
3505 if (i == 0) {
3506 offset = start_offset;
3507 *map_start = 0;
3508 } else {
3509 offset = 0;
3510 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3511 }
3512
3513 if (start + min_len > eb->len) {
3514 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3515 "wanted %lu %lu\n", (unsigned long long)eb->start,
3516 eb->len, start, min_len);
3517 WARN_ON(1);
3518 return -EINVAL;
3519 }
3520
3521 p = extent_buffer_page(eb, i);
3522 kaddr = page_address(p);
3523 *map = kaddr + offset;
3524 *map_len = PAGE_CACHE_SIZE - offset;
3525 return 0;
3526}
3527
3528int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3529 unsigned long start,
3530 unsigned long len)
3531{
3532 size_t cur;
3533 size_t offset;
3534 struct page *page;
3535 char *kaddr;
3536 char *ptr = (char *)ptrv;
3537 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3538 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3539 int ret = 0;
3540
3541 WARN_ON(start > eb->len);
3542 WARN_ON(start + len > eb->start + eb->len);
3543
3544 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3545
3546 while (len > 0) {
3547 page = extent_buffer_page(eb, i);
3548
3549 cur = min(len, (PAGE_CACHE_SIZE - offset));
3550
3551 kaddr = page_address(page);
3552 ret = memcmp(ptr, kaddr + offset, cur);
3553 if (ret)
3554 break;
3555
3556 ptr += cur;
3557 len -= cur;
3558 offset = 0;
3559 i++;
3560 }
3561 return ret;
3562}
3563
3564void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3565 unsigned long start, unsigned long len)
3566{
3567 size_t cur;
3568 size_t offset;
3569 struct page *page;
3570 char *kaddr;
3571 char *src = (char *)srcv;
3572 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3573 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3574
3575 WARN_ON(start > eb->len);
3576 WARN_ON(start + len > eb->start + eb->len);
3577
3578 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3579
3580 while (len > 0) {
3581 page = extent_buffer_page(eb, i);
3582 WARN_ON(!PageUptodate(page));
3583
3584 cur = min(len, PAGE_CACHE_SIZE - offset);
3585 kaddr = page_address(page);
3586 memcpy(kaddr + offset, src, cur);
3587
3588 src += cur;
3589 len -= cur;
3590 offset = 0;
3591 i++;
3592 }
3593}
3594
3595void memset_extent_buffer(struct extent_buffer *eb, char c,
3596 unsigned long start, unsigned long len)
3597{
3598 size_t cur;
3599 size_t offset;
3600 struct page *page;
3601 char *kaddr;
3602 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3603 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3604
3605 WARN_ON(start > eb->len);
3606 WARN_ON(start + len > eb->start + eb->len);
3607
3608 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3609
3610 while (len > 0) {
3611 page = extent_buffer_page(eb, i);
3612 WARN_ON(!PageUptodate(page));
3613
3614 cur = min(len, PAGE_CACHE_SIZE - offset);
3615 kaddr = page_address(page);
3616 memset(kaddr + offset, c, cur);
3617
3618 len -= cur;
3619 offset = 0;
3620 i++;
3621 }
3622}
3623
3624void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3625 unsigned long dst_offset, unsigned long src_offset,
3626 unsigned long len)
3627{
3628 u64 dst_len = dst->len;
3629 size_t cur;
3630 size_t offset;
3631 struct page *page;
3632 char *kaddr;
3633 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3634 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3635
3636 WARN_ON(src->len != dst_len);
3637
3638 offset = (start_offset + dst_offset) &
3639 ((unsigned long)PAGE_CACHE_SIZE - 1);
3640
3641 while (len > 0) {
3642 page = extent_buffer_page(dst, i);
3643 WARN_ON(!PageUptodate(page));
3644
3645 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3646
3647 kaddr = page_address(page);
3648 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3649
3650 src_offset += cur;
3651 len -= cur;
3652 offset = 0;
3653 i++;
3654 }
3655}
3656
3657static void move_pages(struct page *dst_page, struct page *src_page,
3658 unsigned long dst_off, unsigned long src_off,
3659 unsigned long len)
3660{
3661 char *dst_kaddr = page_address(dst_page);
3662 if (dst_page == src_page) {
3663 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3664 } else {
3665 char *src_kaddr = page_address(src_page);
3666 char *p = dst_kaddr + dst_off + len;
3667 char *s = src_kaddr + src_off + len;
3668
3669 while (len--)
3670 *--p = *--s;
3671 }
3672}
3673
3674static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3675{
3676 unsigned long distance = (src > dst) ? src - dst : dst - src;
3677 return distance < len;
3678}
3679
3680static void copy_pages(struct page *dst_page, struct page *src_page,
3681 unsigned long dst_off, unsigned long src_off,
3682 unsigned long len)
3683{
3684 char *dst_kaddr = page_address(dst_page);
3685 char *src_kaddr;
3686
3687 if (dst_page != src_page) {
3688 src_kaddr = page_address(src_page);
3689 } else {
3690 src_kaddr = dst_kaddr;
3691 BUG_ON(areas_overlap(src_off, dst_off, len));
3692 }
3693
3694 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3695}
3696
3697void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3698 unsigned long src_offset, unsigned long len)
3699{
3700 size_t cur;
3701 size_t dst_off_in_page;
3702 size_t src_off_in_page;
3703 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3704 unsigned long dst_i;
3705 unsigned long src_i;
3706
3707 if (src_offset + len > dst->len) {
3708 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3709 "len %lu dst len %lu\n", src_offset, len, dst->len);
3710 BUG_ON(1);
3711 }
3712 if (dst_offset + len > dst->len) {
3713 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3714 "len %lu dst len %lu\n", dst_offset, len, dst->len);
3715 BUG_ON(1);
3716 }
3717
3718 while (len > 0) {
3719 dst_off_in_page = (start_offset + dst_offset) &
3720 ((unsigned long)PAGE_CACHE_SIZE - 1);
3721 src_off_in_page = (start_offset + src_offset) &
3722 ((unsigned long)PAGE_CACHE_SIZE - 1);
3723
3724 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3725 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3726
3727 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3728 src_off_in_page));
3729 cur = min_t(unsigned long, cur,
3730 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3731
3732 copy_pages(extent_buffer_page(dst, dst_i),
3733 extent_buffer_page(dst, src_i),
3734 dst_off_in_page, src_off_in_page, cur);
3735
3736 src_offset += cur;
3737 dst_offset += cur;
3738 len -= cur;
3739 }
3740}
3741
3742void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3743 unsigned long src_offset, unsigned long len)
3744{
3745 size_t cur;
3746 size_t dst_off_in_page;
3747 size_t src_off_in_page;
3748 unsigned long dst_end = dst_offset + len - 1;
3749 unsigned long src_end = src_offset + len - 1;
3750 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3751 unsigned long dst_i;
3752 unsigned long src_i;
3753
3754 if (src_offset + len > dst->len) {
3755 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3756 "len %lu len %lu\n", src_offset, len, dst->len);
3757 BUG_ON(1);
3758 }
3759 if (dst_offset + len > dst->len) {
3760 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3761 "len %lu len %lu\n", dst_offset, len, dst->len);
3762 BUG_ON(1);
3763 }
3764 if (!areas_overlap(src_offset, dst_offset, len)) {
3765 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3766 return;
3767 }
3768 while (len > 0) {
3769 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3770 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3771
3772 dst_off_in_page = (start_offset + dst_end) &
3773 ((unsigned long)PAGE_CACHE_SIZE - 1);
3774 src_off_in_page = (start_offset + src_end) &
3775 ((unsigned long)PAGE_CACHE_SIZE - 1);
3776
3777 cur = min_t(unsigned long, len, src_off_in_page + 1);
3778 cur = min(cur, dst_off_in_page + 1);
3779 move_pages(extent_buffer_page(dst, dst_i),
3780 extent_buffer_page(dst, src_i),
3781 dst_off_in_page - cur + 1,
3782 src_off_in_page - cur + 1, cur);
3783
3784 dst_end -= cur;
3785 src_end -= cur;
3786 len -= cur;
3787 }
3788}
3789
3790static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3791{
3792 struct extent_buffer *eb =
3793 container_of(head, struct extent_buffer, rcu_head);
3794
3795 btrfs_release_extent_buffer(eb);
3796}
3797
3798int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3799{
3800 u64 start = page_offset(page);
3801 struct extent_buffer *eb;
3802 int ret = 1;
3803
3804 spin_lock(&tree->buffer_lock);
3805 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3806 if (!eb) {
3807 spin_unlock(&tree->buffer_lock);
3808 return ret;
3809 }
3810
3811 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3812 ret = 0;
3813 goto out;
3814 }
3815
3816 /*
3817 * set @eb->refs to 0 if it is already 1, and then release the @eb.
3818 * Or go back.
3819 */
3820 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) {
3821 ret = 0;
3822 goto out;
3823 }
3824
3825 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3826out:
3827 spin_unlock(&tree->buffer_lock);
3828
3829 /* at this point we can safely release the extent buffer */
3830 if (atomic_read(&eb->refs) == 0)
3831 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3832 return ret;
3833}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
11#include <linux/swap.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include <linux/prefetch.h>
15#include <linux/cleancache.h>
16#include "extent_io.h"
17#include "extent-io-tree.h"
18#include "extent_map.h"
19#include "ctree.h"
20#include "btrfs_inode.h"
21#include "volumes.h"
22#include "check-integrity.h"
23#include "locking.h"
24#include "rcu-string.h"
25#include "backref.h"
26#include "disk-io.h"
27
28static struct kmem_cache *extent_state_cache;
29static struct kmem_cache *extent_buffer_cache;
30static struct bio_set btrfs_bioset;
31
32static inline bool extent_state_in_tree(const struct extent_state *state)
33{
34 return !RB_EMPTY_NODE(&state->rb_node);
35}
36
37#ifdef CONFIG_BTRFS_DEBUG
38static LIST_HEAD(states);
39static DEFINE_SPINLOCK(leak_lock);
40
41static inline void btrfs_leak_debug_add(spinlock_t *lock,
42 struct list_head *new,
43 struct list_head *head)
44{
45 unsigned long flags;
46
47 spin_lock_irqsave(lock, flags);
48 list_add(new, head);
49 spin_unlock_irqrestore(lock, flags);
50}
51
52static inline void btrfs_leak_debug_del(spinlock_t *lock,
53 struct list_head *entry)
54{
55 unsigned long flags;
56
57 spin_lock_irqsave(lock, flags);
58 list_del(entry);
59 spin_unlock_irqrestore(lock, flags);
60}
61
62void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
63{
64 struct extent_buffer *eb;
65 unsigned long flags;
66
67 /*
68 * If we didn't get into open_ctree our allocated_ebs will not be
69 * initialized, so just skip this.
70 */
71 if (!fs_info->allocated_ebs.next)
72 return;
73
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 kmem_cache_free(extent_buffer_cache, eb);
84 }
85 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
86}
87
88static inline void btrfs_extent_state_leak_debug_check(void)
89{
90 struct extent_state *state;
91
92 while (!list_empty(&states)) {
93 state = list_entry(states.next, struct extent_state, leak_list);
94 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
95 state->start, state->end, state->state,
96 extent_state_in_tree(state),
97 refcount_read(&state->refs));
98 list_del(&state->leak_list);
99 kmem_cache_free(extent_state_cache, state);
100 }
101}
102
103#define btrfs_debug_check_extent_io_range(tree, start, end) \
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
105static inline void __btrfs_debug_check_extent_io_range(const char *caller,
106 struct extent_io_tree *tree, u64 start, u64 end)
107{
108 struct inode *inode = tree->private_data;
109 u64 isize;
110
111 if (!inode || !is_data_inode(inode))
112 return;
113
114 isize = i_size_read(inode);
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
116 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
117 "%s: ino %llu isize %llu odd range [%llu,%llu]",
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
119 }
120}
121#else
122#define btrfs_leak_debug_add(lock, new, head) do {} while (0)
123#define btrfs_leak_debug_del(lock, entry) do {} while (0)
124#define btrfs_extent_state_leak_debug_check() do {} while (0)
125#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
126#endif
127
128struct tree_entry {
129 u64 start;
130 u64 end;
131 struct rb_node rb_node;
132};
133
134struct extent_page_data {
135 struct bio *bio;
136 /* tells writepage not to lock the state bits for this range
137 * it still does the unlocking
138 */
139 unsigned int extent_locked:1;
140
141 /* tells the submit_bio code to use REQ_SYNC */
142 unsigned int sync_io:1;
143};
144
145static int add_extent_changeset(struct extent_state *state, unsigned bits,
146 struct extent_changeset *changeset,
147 int set)
148{
149 int ret;
150
151 if (!changeset)
152 return 0;
153 if (set && (state->state & bits) == bits)
154 return 0;
155 if (!set && (state->state & bits) == 0)
156 return 0;
157 changeset->bytes_changed += state->end - state->start + 1;
158 ret = ulist_add(&changeset->range_changed, state->start, state->end,
159 GFP_ATOMIC);
160 return ret;
161}
162
163static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
164 unsigned long bio_flags)
165{
166 blk_status_t ret = 0;
167 struct extent_io_tree *tree = bio->bi_private;
168
169 bio->bi_private = NULL;
170
171 if (tree->ops)
172 ret = tree->ops->submit_bio_hook(tree->private_data, bio,
173 mirror_num, bio_flags);
174 else
175 btrfsic_submit_bio(bio);
176
177 return blk_status_to_errno(ret);
178}
179
180/* Cleanup unsubmitted bios */
181static void end_write_bio(struct extent_page_data *epd, int ret)
182{
183 if (epd->bio) {
184 epd->bio->bi_status = errno_to_blk_status(ret);
185 bio_endio(epd->bio);
186 epd->bio = NULL;
187 }
188}
189
190/*
191 * Submit bio from extent page data via submit_one_bio
192 *
193 * Return 0 if everything is OK.
194 * Return <0 for error.
195 */
196static int __must_check flush_write_bio(struct extent_page_data *epd)
197{
198 int ret = 0;
199
200 if (epd->bio) {
201 ret = submit_one_bio(epd->bio, 0, 0);
202 /*
203 * Clean up of epd->bio is handled by its endio function.
204 * And endio is either triggered by successful bio execution
205 * or the error handler of submit bio hook.
206 * So at this point, no matter what happened, we don't need
207 * to clean up epd->bio.
208 */
209 epd->bio = NULL;
210 }
211 return ret;
212}
213
214int __init extent_state_cache_init(void)
215{
216 extent_state_cache = kmem_cache_create("btrfs_extent_state",
217 sizeof(struct extent_state), 0,
218 SLAB_MEM_SPREAD, NULL);
219 if (!extent_state_cache)
220 return -ENOMEM;
221 return 0;
222}
223
224int __init extent_io_init(void)
225{
226 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
227 sizeof(struct extent_buffer), 0,
228 SLAB_MEM_SPREAD, NULL);
229 if (!extent_buffer_cache)
230 return -ENOMEM;
231
232 if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
233 offsetof(struct btrfs_io_bio, bio),
234 BIOSET_NEED_BVECS))
235 goto free_buffer_cache;
236
237 if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
238 goto free_bioset;
239
240 return 0;
241
242free_bioset:
243 bioset_exit(&btrfs_bioset);
244
245free_buffer_cache:
246 kmem_cache_destroy(extent_buffer_cache);
247 extent_buffer_cache = NULL;
248 return -ENOMEM;
249}
250
251void __cold extent_state_cache_exit(void)
252{
253 btrfs_extent_state_leak_debug_check();
254 kmem_cache_destroy(extent_state_cache);
255}
256
257void __cold extent_io_exit(void)
258{
259 /*
260 * Make sure all delayed rcu free are flushed before we
261 * destroy caches.
262 */
263 rcu_barrier();
264 kmem_cache_destroy(extent_buffer_cache);
265 bioset_exit(&btrfs_bioset);
266}
267
268/*
269 * For the file_extent_tree, we want to hold the inode lock when we lookup and
270 * update the disk_i_size, but lockdep will complain because our io_tree we hold
271 * the tree lock and get the inode lock when setting delalloc. These two things
272 * are unrelated, so make a class for the file_extent_tree so we don't get the
273 * two locking patterns mixed up.
274 */
275static struct lock_class_key file_extent_tree_class;
276
277void extent_io_tree_init(struct btrfs_fs_info *fs_info,
278 struct extent_io_tree *tree, unsigned int owner,
279 void *private_data)
280{
281 tree->fs_info = fs_info;
282 tree->state = RB_ROOT;
283 tree->ops = NULL;
284 tree->dirty_bytes = 0;
285 spin_lock_init(&tree->lock);
286 tree->private_data = private_data;
287 tree->owner = owner;
288 if (owner == IO_TREE_INODE_FILE_EXTENT)
289 lockdep_set_class(&tree->lock, &file_extent_tree_class);
290}
291
292void extent_io_tree_release(struct extent_io_tree *tree)
293{
294 spin_lock(&tree->lock);
295 /*
296 * Do a single barrier for the waitqueue_active check here, the state
297 * of the waitqueue should not change once extent_io_tree_release is
298 * called.
299 */
300 smp_mb();
301 while (!RB_EMPTY_ROOT(&tree->state)) {
302 struct rb_node *node;
303 struct extent_state *state;
304
305 node = rb_first(&tree->state);
306 state = rb_entry(node, struct extent_state, rb_node);
307 rb_erase(&state->rb_node, &tree->state);
308 RB_CLEAR_NODE(&state->rb_node);
309 /*
310 * btree io trees aren't supposed to have tasks waiting for
311 * changes in the flags of extent states ever.
312 */
313 ASSERT(!waitqueue_active(&state->wq));
314 free_extent_state(state);
315
316 cond_resched_lock(&tree->lock);
317 }
318 spin_unlock(&tree->lock);
319}
320
321static struct extent_state *alloc_extent_state(gfp_t mask)
322{
323 struct extent_state *state;
324
325 /*
326 * The given mask might be not appropriate for the slab allocator,
327 * drop the unsupported bits
328 */
329 mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
330 state = kmem_cache_alloc(extent_state_cache, mask);
331 if (!state)
332 return state;
333 state->state = 0;
334 state->failrec = NULL;
335 RB_CLEAR_NODE(&state->rb_node);
336 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
337 refcount_set(&state->refs, 1);
338 init_waitqueue_head(&state->wq);
339 trace_alloc_extent_state(state, mask, _RET_IP_);
340 return state;
341}
342
343void free_extent_state(struct extent_state *state)
344{
345 if (!state)
346 return;
347 if (refcount_dec_and_test(&state->refs)) {
348 WARN_ON(extent_state_in_tree(state));
349 btrfs_leak_debug_del(&leak_lock, &state->leak_list);
350 trace_free_extent_state(state, _RET_IP_);
351 kmem_cache_free(extent_state_cache, state);
352 }
353}
354
355static struct rb_node *tree_insert(struct rb_root *root,
356 struct rb_node *search_start,
357 u64 offset,
358 struct rb_node *node,
359 struct rb_node ***p_in,
360 struct rb_node **parent_in)
361{
362 struct rb_node **p;
363 struct rb_node *parent = NULL;
364 struct tree_entry *entry;
365
366 if (p_in && parent_in) {
367 p = *p_in;
368 parent = *parent_in;
369 goto do_insert;
370 }
371
372 p = search_start ? &search_start : &root->rb_node;
373 while (*p) {
374 parent = *p;
375 entry = rb_entry(parent, struct tree_entry, rb_node);
376
377 if (offset < entry->start)
378 p = &(*p)->rb_left;
379 else if (offset > entry->end)
380 p = &(*p)->rb_right;
381 else
382 return parent;
383 }
384
385do_insert:
386 rb_link_node(node, parent, p);
387 rb_insert_color(node, root);
388 return NULL;
389}
390
391/**
392 * __etree_search - searche @tree for an entry that contains @offset. Such
393 * entry would have entry->start <= offset && entry->end >= offset.
394 *
395 * @tree - the tree to search
396 * @offset - offset that should fall within an entry in @tree
397 * @next_ret - pointer to the first entry whose range ends after @offset
398 * @prev - pointer to the first entry whose range begins before @offset
399 * @p_ret - pointer where new node should be anchored (used when inserting an
400 * entry in the tree)
401 * @parent_ret - points to entry which would have been the parent of the entry,
402 * containing @offset
403 *
404 * This function returns a pointer to the entry that contains @offset byte
405 * address. If no such entry exists, then NULL is returned and the other
406 * pointer arguments to the function are filled, otherwise the found entry is
407 * returned and other pointers are left untouched.
408 */
409static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
410 struct rb_node **next_ret,
411 struct rb_node **prev_ret,
412 struct rb_node ***p_ret,
413 struct rb_node **parent_ret)
414{
415 struct rb_root *root = &tree->state;
416 struct rb_node **n = &root->rb_node;
417 struct rb_node *prev = NULL;
418 struct rb_node *orig_prev = NULL;
419 struct tree_entry *entry;
420 struct tree_entry *prev_entry = NULL;
421
422 while (*n) {
423 prev = *n;
424 entry = rb_entry(prev, struct tree_entry, rb_node);
425 prev_entry = entry;
426
427 if (offset < entry->start)
428 n = &(*n)->rb_left;
429 else if (offset > entry->end)
430 n = &(*n)->rb_right;
431 else
432 return *n;
433 }
434
435 if (p_ret)
436 *p_ret = n;
437 if (parent_ret)
438 *parent_ret = prev;
439
440 if (next_ret) {
441 orig_prev = prev;
442 while (prev && offset > prev_entry->end) {
443 prev = rb_next(prev);
444 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
445 }
446 *next_ret = prev;
447 prev = orig_prev;
448 }
449
450 if (prev_ret) {
451 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
452 while (prev && offset < prev_entry->start) {
453 prev = rb_prev(prev);
454 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
455 }
456 *prev_ret = prev;
457 }
458 return NULL;
459}
460
461static inline struct rb_node *
462tree_search_for_insert(struct extent_io_tree *tree,
463 u64 offset,
464 struct rb_node ***p_ret,
465 struct rb_node **parent_ret)
466{
467 struct rb_node *next= NULL;
468 struct rb_node *ret;
469
470 ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
471 if (!ret)
472 return next;
473 return ret;
474}
475
476static inline struct rb_node *tree_search(struct extent_io_tree *tree,
477 u64 offset)
478{
479 return tree_search_for_insert(tree, offset, NULL, NULL);
480}
481
482/*
483 * utility function to look for merge candidates inside a given range.
484 * Any extents with matching state are merged together into a single
485 * extent in the tree. Extents with EXTENT_IO in their state field
486 * are not merged because the end_io handlers need to be able to do
487 * operations on them without sleeping (or doing allocations/splits).
488 *
489 * This should be called with the tree lock held.
490 */
491static void merge_state(struct extent_io_tree *tree,
492 struct extent_state *state)
493{
494 struct extent_state *other;
495 struct rb_node *other_node;
496
497 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
498 return;
499
500 other_node = rb_prev(&state->rb_node);
501 if (other_node) {
502 other = rb_entry(other_node, struct extent_state, rb_node);
503 if (other->end == state->start - 1 &&
504 other->state == state->state) {
505 if (tree->private_data &&
506 is_data_inode(tree->private_data))
507 btrfs_merge_delalloc_extent(tree->private_data,
508 state, other);
509 state->start = other->start;
510 rb_erase(&other->rb_node, &tree->state);
511 RB_CLEAR_NODE(&other->rb_node);
512 free_extent_state(other);
513 }
514 }
515 other_node = rb_next(&state->rb_node);
516 if (other_node) {
517 other = rb_entry(other_node, struct extent_state, rb_node);
518 if (other->start == state->end + 1 &&
519 other->state == state->state) {
520 if (tree->private_data &&
521 is_data_inode(tree->private_data))
522 btrfs_merge_delalloc_extent(tree->private_data,
523 state, other);
524 state->end = other->end;
525 rb_erase(&other->rb_node, &tree->state);
526 RB_CLEAR_NODE(&other->rb_node);
527 free_extent_state(other);
528 }
529 }
530}
531
532static void set_state_bits(struct extent_io_tree *tree,
533 struct extent_state *state, unsigned *bits,
534 struct extent_changeset *changeset);
535
536/*
537 * insert an extent_state struct into the tree. 'bits' are set on the
538 * struct before it is inserted.
539 *
540 * This may return -EEXIST if the extent is already there, in which case the
541 * state struct is freed.
542 *
543 * The tree lock is not taken internally. This is a utility function and
544 * probably isn't what you want to call (see set/clear_extent_bit).
545 */
546static int insert_state(struct extent_io_tree *tree,
547 struct extent_state *state, u64 start, u64 end,
548 struct rb_node ***p,
549 struct rb_node **parent,
550 unsigned *bits, struct extent_changeset *changeset)
551{
552 struct rb_node *node;
553
554 if (end < start) {
555 btrfs_err(tree->fs_info,
556 "insert state: end < start %llu %llu", end, start);
557 WARN_ON(1);
558 }
559 state->start = start;
560 state->end = end;
561
562 set_state_bits(tree, state, bits, changeset);
563
564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
565 if (node) {
566 struct extent_state *found;
567 found = rb_entry(node, struct extent_state, rb_node);
568 btrfs_err(tree->fs_info,
569 "found node %llu %llu on insert of %llu %llu",
570 found->start, found->end, start, end);
571 return -EEXIST;
572 }
573 merge_state(tree, state);
574 return 0;
575}
576
577/*
578 * split a given extent state struct in two, inserting the preallocated
579 * struct 'prealloc' as the newly created second half. 'split' indicates an
580 * offset inside 'orig' where it should be split.
581 *
582 * Before calling,
583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
584 * are two extent state structs in the tree:
585 * prealloc: [orig->start, split - 1]
586 * orig: [ split, orig->end ]
587 *
588 * The tree locks are not taken by this function. They need to be held
589 * by the caller.
590 */
591static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
592 struct extent_state *prealloc, u64 split)
593{
594 struct rb_node *node;
595
596 if (tree->private_data && is_data_inode(tree->private_data))
597 btrfs_split_delalloc_extent(tree->private_data, orig, split);
598
599 prealloc->start = orig->start;
600 prealloc->end = split - 1;
601 prealloc->state = orig->state;
602 orig->start = split;
603
604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
605 &prealloc->rb_node, NULL, NULL);
606 if (node) {
607 free_extent_state(prealloc);
608 return -EEXIST;
609 }
610 return 0;
611}
612
613static struct extent_state *next_state(struct extent_state *state)
614{
615 struct rb_node *next = rb_next(&state->rb_node);
616 if (next)
617 return rb_entry(next, struct extent_state, rb_node);
618 else
619 return NULL;
620}
621
622/*
623 * utility function to clear some bits in an extent state struct.
624 * it will optionally wake up anyone waiting on this state (wake == 1).
625 *
626 * If no bits are set on the state struct after clearing things, the
627 * struct is freed and removed from the tree
628 */
629static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
630 struct extent_state *state,
631 unsigned *bits, int wake,
632 struct extent_changeset *changeset)
633{
634 struct extent_state *next;
635 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
636 int ret;
637
638 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
639 u64 range = state->end - state->start + 1;
640 WARN_ON(range > tree->dirty_bytes);
641 tree->dirty_bytes -= range;
642 }
643
644 if (tree->private_data && is_data_inode(tree->private_data))
645 btrfs_clear_delalloc_extent(tree->private_data, state, bits);
646
647 ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
648 BUG_ON(ret < 0);
649 state->state &= ~bits_to_clear;
650 if (wake)
651 wake_up(&state->wq);
652 if (state->state == 0) {
653 next = next_state(state);
654 if (extent_state_in_tree(state)) {
655 rb_erase(&state->rb_node, &tree->state);
656 RB_CLEAR_NODE(&state->rb_node);
657 free_extent_state(state);
658 } else {
659 WARN_ON(1);
660 }
661 } else {
662 merge_state(tree, state);
663 next = next_state(state);
664 }
665 return next;
666}
667
668static struct extent_state *
669alloc_extent_state_atomic(struct extent_state *prealloc)
670{
671 if (!prealloc)
672 prealloc = alloc_extent_state(GFP_ATOMIC);
673
674 return prealloc;
675}
676
677static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
678{
679 struct inode *inode = tree->private_data;
680
681 btrfs_panic(btrfs_sb(inode->i_sb), err,
682 "locking error: extent tree was modified by another thread while locked");
683}
684
685/*
686 * clear some bits on a range in the tree. This may require splitting
687 * or inserting elements in the tree, so the gfp mask is used to
688 * indicate which allocations or sleeping are allowed.
689 *
690 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
691 * the given range from the tree regardless of state (ie for truncate).
692 *
693 * the range [start, end] is inclusive.
694 *
695 * This takes the tree lock, and returns 0 on success and < 0 on error.
696 */
697int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
698 unsigned bits, int wake, int delete,
699 struct extent_state **cached_state,
700 gfp_t mask, struct extent_changeset *changeset)
701{
702 struct extent_state *state;
703 struct extent_state *cached;
704 struct extent_state *prealloc = NULL;
705 struct rb_node *node;
706 u64 last_end;
707 int err;
708 int clear = 0;
709
710 btrfs_debug_check_extent_io_range(tree, start, end);
711 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
712
713 if (bits & EXTENT_DELALLOC)
714 bits |= EXTENT_NORESERVE;
715
716 if (delete)
717 bits |= ~EXTENT_CTLBITS;
718
719 if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
720 clear = 1;
721again:
722 if (!prealloc && gfpflags_allow_blocking(mask)) {
723 /*
724 * Don't care for allocation failure here because we might end
725 * up not needing the pre-allocated extent state at all, which
726 * is the case if we only have in the tree extent states that
727 * cover our input range and don't cover too any other range.
728 * If we end up needing a new extent state we allocate it later.
729 */
730 prealloc = alloc_extent_state(mask);
731 }
732
733 spin_lock(&tree->lock);
734 if (cached_state) {
735 cached = *cached_state;
736
737 if (clear) {
738 *cached_state = NULL;
739 cached_state = NULL;
740 }
741
742 if (cached && extent_state_in_tree(cached) &&
743 cached->start <= start && cached->end > start) {
744 if (clear)
745 refcount_dec(&cached->refs);
746 state = cached;
747 goto hit_next;
748 }
749 if (clear)
750 free_extent_state(cached);
751 }
752 /*
753 * this search will find the extents that end after
754 * our range starts
755 */
756 node = tree_search(tree, start);
757 if (!node)
758 goto out;
759 state = rb_entry(node, struct extent_state, rb_node);
760hit_next:
761 if (state->start > end)
762 goto out;
763 WARN_ON(state->end < start);
764 last_end = state->end;
765
766 /* the state doesn't have the wanted bits, go ahead */
767 if (!(state->state & bits)) {
768 state = next_state(state);
769 goto next;
770 }
771
772 /*
773 * | ---- desired range ---- |
774 * | state | or
775 * | ------------- state -------------- |
776 *
777 * We need to split the extent we found, and may flip
778 * bits on second half.
779 *
780 * If the extent we found extends past our range, we
781 * just split and search again. It'll get split again
782 * the next time though.
783 *
784 * If the extent we found is inside our range, we clear
785 * the desired bit on it.
786 */
787
788 if (state->start < start) {
789 prealloc = alloc_extent_state_atomic(prealloc);
790 BUG_ON(!prealloc);
791 err = split_state(tree, state, prealloc, start);
792 if (err)
793 extent_io_tree_panic(tree, err);
794
795 prealloc = NULL;
796 if (err)
797 goto out;
798 if (state->end <= end) {
799 state = clear_state_bit(tree, state, &bits, wake,
800 changeset);
801 goto next;
802 }
803 goto search_again;
804 }
805 /*
806 * | ---- desired range ---- |
807 * | state |
808 * We need to split the extent, and clear the bit
809 * on the first half
810 */
811 if (state->start <= end && state->end > end) {
812 prealloc = alloc_extent_state_atomic(prealloc);
813 BUG_ON(!prealloc);
814 err = split_state(tree, state, prealloc, end + 1);
815 if (err)
816 extent_io_tree_panic(tree, err);
817
818 if (wake)
819 wake_up(&state->wq);
820
821 clear_state_bit(tree, prealloc, &bits, wake, changeset);
822
823 prealloc = NULL;
824 goto out;
825 }
826
827 state = clear_state_bit(tree, state, &bits, wake, changeset);
828next:
829 if (last_end == (u64)-1)
830 goto out;
831 start = last_end + 1;
832 if (start <= end && state && !need_resched())
833 goto hit_next;
834
835search_again:
836 if (start > end)
837 goto out;
838 spin_unlock(&tree->lock);
839 if (gfpflags_allow_blocking(mask))
840 cond_resched();
841 goto again;
842
843out:
844 spin_unlock(&tree->lock);
845 if (prealloc)
846 free_extent_state(prealloc);
847
848 return 0;
849
850}
851
852static void wait_on_state(struct extent_io_tree *tree,
853 struct extent_state *state)
854 __releases(tree->lock)
855 __acquires(tree->lock)
856{
857 DEFINE_WAIT(wait);
858 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
859 spin_unlock(&tree->lock);
860 schedule();
861 spin_lock(&tree->lock);
862 finish_wait(&state->wq, &wait);
863}
864
865/*
866 * waits for one or more bits to clear on a range in the state tree.
867 * The range [start, end] is inclusive.
868 * The tree lock is taken by this function
869 */
870static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
871 unsigned long bits)
872{
873 struct extent_state *state;
874 struct rb_node *node;
875
876 btrfs_debug_check_extent_io_range(tree, start, end);
877
878 spin_lock(&tree->lock);
879again:
880 while (1) {
881 /*
882 * this search will find all the extents that end after
883 * our range starts
884 */
885 node = tree_search(tree, start);
886process_node:
887 if (!node)
888 break;
889
890 state = rb_entry(node, struct extent_state, rb_node);
891
892 if (state->start > end)
893 goto out;
894
895 if (state->state & bits) {
896 start = state->start;
897 refcount_inc(&state->refs);
898 wait_on_state(tree, state);
899 free_extent_state(state);
900 goto again;
901 }
902 start = state->end + 1;
903
904 if (start > end)
905 break;
906
907 if (!cond_resched_lock(&tree->lock)) {
908 node = rb_next(node);
909 goto process_node;
910 }
911 }
912out:
913 spin_unlock(&tree->lock);
914}
915
916static void set_state_bits(struct extent_io_tree *tree,
917 struct extent_state *state,
918 unsigned *bits, struct extent_changeset *changeset)
919{
920 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
921 int ret;
922
923 if (tree->private_data && is_data_inode(tree->private_data))
924 btrfs_set_delalloc_extent(tree->private_data, state, bits);
925
926 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
927 u64 range = state->end - state->start + 1;
928 tree->dirty_bytes += range;
929 }
930 ret = add_extent_changeset(state, bits_to_set, changeset, 1);
931 BUG_ON(ret < 0);
932 state->state |= bits_to_set;
933}
934
935static void cache_state_if_flags(struct extent_state *state,
936 struct extent_state **cached_ptr,
937 unsigned flags)
938{
939 if (cached_ptr && !(*cached_ptr)) {
940 if (!flags || (state->state & flags)) {
941 *cached_ptr = state;
942 refcount_inc(&state->refs);
943 }
944 }
945}
946
947static void cache_state(struct extent_state *state,
948 struct extent_state **cached_ptr)
949{
950 return cache_state_if_flags(state, cached_ptr,
951 EXTENT_LOCKED | EXTENT_BOUNDARY);
952}
953
954/*
955 * set some bits on a range in the tree. This may require allocations or
956 * sleeping, so the gfp mask is used to indicate what is allowed.
957 *
958 * If any of the exclusive bits are set, this will fail with -EEXIST if some
959 * part of the range already has the desired bits set. The start of the
960 * existing range is returned in failed_start in this case.
961 *
962 * [start, end] is inclusive This takes the tree lock.
963 */
964
965static int __must_check
966__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
967 unsigned bits, unsigned exclusive_bits,
968 u64 *failed_start, struct extent_state **cached_state,
969 gfp_t mask, struct extent_changeset *changeset)
970{
971 struct extent_state *state;
972 struct extent_state *prealloc = NULL;
973 struct rb_node *node;
974 struct rb_node **p;
975 struct rb_node *parent;
976 int err = 0;
977 u64 last_start;
978 u64 last_end;
979
980 btrfs_debug_check_extent_io_range(tree, start, end);
981 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
982
983again:
984 if (!prealloc && gfpflags_allow_blocking(mask)) {
985 /*
986 * Don't care for allocation failure here because we might end
987 * up not needing the pre-allocated extent state at all, which
988 * is the case if we only have in the tree extent states that
989 * cover our input range and don't cover too any other range.
990 * If we end up needing a new extent state we allocate it later.
991 */
992 prealloc = alloc_extent_state(mask);
993 }
994
995 spin_lock(&tree->lock);
996 if (cached_state && *cached_state) {
997 state = *cached_state;
998 if (state->start <= start && state->end > start &&
999 extent_state_in_tree(state)) {
1000 node = &state->rb_node;
1001 goto hit_next;
1002 }
1003 }
1004 /*
1005 * this search will find all the extents that end after
1006 * our range starts.
1007 */
1008 node = tree_search_for_insert(tree, start, &p, &parent);
1009 if (!node) {
1010 prealloc = alloc_extent_state_atomic(prealloc);
1011 BUG_ON(!prealloc);
1012 err = insert_state(tree, prealloc, start, end,
1013 &p, &parent, &bits, changeset);
1014 if (err)
1015 extent_io_tree_panic(tree, err);
1016
1017 cache_state(prealloc, cached_state);
1018 prealloc = NULL;
1019 goto out;
1020 }
1021 state = rb_entry(node, struct extent_state, rb_node);
1022hit_next:
1023 last_start = state->start;
1024 last_end = state->end;
1025
1026 /*
1027 * | ---- desired range ---- |
1028 * | state |
1029 *
1030 * Just lock what we found and keep going
1031 */
1032 if (state->start == start && state->end <= end) {
1033 if (state->state & exclusive_bits) {
1034 *failed_start = state->start;
1035 err = -EEXIST;
1036 goto out;
1037 }
1038
1039 set_state_bits(tree, state, &bits, changeset);
1040 cache_state(state, cached_state);
1041 merge_state(tree, state);
1042 if (last_end == (u64)-1)
1043 goto out;
1044 start = last_end + 1;
1045 state = next_state(state);
1046 if (start < end && state && state->start == start &&
1047 !need_resched())
1048 goto hit_next;
1049 goto search_again;
1050 }
1051
1052 /*
1053 * | ---- desired range ---- |
1054 * | state |
1055 * or
1056 * | ------------- state -------------- |
1057 *
1058 * We need to split the extent we found, and may flip bits on
1059 * second half.
1060 *
1061 * If the extent we found extends past our
1062 * range, we just split and search again. It'll get split
1063 * again the next time though.
1064 *
1065 * If the extent we found is inside our range, we set the
1066 * desired bit on it.
1067 */
1068 if (state->start < start) {
1069 if (state->state & exclusive_bits) {
1070 *failed_start = start;
1071 err = -EEXIST;
1072 goto out;
1073 }
1074
1075 /*
1076 * If this extent already has all the bits we want set, then
1077 * skip it, not necessary to split it or do anything with it.
1078 */
1079 if ((state->state & bits) == bits) {
1080 start = state->end + 1;
1081 cache_state(state, cached_state);
1082 goto search_again;
1083 }
1084
1085 prealloc = alloc_extent_state_atomic(prealloc);
1086 BUG_ON(!prealloc);
1087 err = split_state(tree, state, prealloc, start);
1088 if (err)
1089 extent_io_tree_panic(tree, err);
1090
1091 prealloc = NULL;
1092 if (err)
1093 goto out;
1094 if (state->end <= end) {
1095 set_state_bits(tree, state, &bits, changeset);
1096 cache_state(state, cached_state);
1097 merge_state(tree, state);
1098 if (last_end == (u64)-1)
1099 goto out;
1100 start = last_end + 1;
1101 state = next_state(state);
1102 if (start < end && state && state->start == start &&
1103 !need_resched())
1104 goto hit_next;
1105 }
1106 goto search_again;
1107 }
1108 /*
1109 * | ---- desired range ---- |
1110 * | state | or | state |
1111 *
1112 * There's a hole, we need to insert something in it and
1113 * ignore the extent we found.
1114 */
1115 if (state->start > start) {
1116 u64 this_end;
1117 if (end < last_start)
1118 this_end = end;
1119 else
1120 this_end = last_start - 1;
1121
1122 prealloc = alloc_extent_state_atomic(prealloc);
1123 BUG_ON(!prealloc);
1124
1125 /*
1126 * Avoid to free 'prealloc' if it can be merged with
1127 * the later extent.
1128 */
1129 err = insert_state(tree, prealloc, start, this_end,
1130 NULL, NULL, &bits, changeset);
1131 if (err)
1132 extent_io_tree_panic(tree, err);
1133
1134 cache_state(prealloc, cached_state);
1135 prealloc = NULL;
1136 start = this_end + 1;
1137 goto search_again;
1138 }
1139 /*
1140 * | ---- desired range ---- |
1141 * | state |
1142 * We need to split the extent, and set the bit
1143 * on the first half
1144 */
1145 if (state->start <= end && state->end > end) {
1146 if (state->state & exclusive_bits) {
1147 *failed_start = start;
1148 err = -EEXIST;
1149 goto out;
1150 }
1151
1152 prealloc = alloc_extent_state_atomic(prealloc);
1153 BUG_ON(!prealloc);
1154 err = split_state(tree, state, prealloc, end + 1);
1155 if (err)
1156 extent_io_tree_panic(tree, err);
1157
1158 set_state_bits(tree, prealloc, &bits, changeset);
1159 cache_state(prealloc, cached_state);
1160 merge_state(tree, prealloc);
1161 prealloc = NULL;
1162 goto out;
1163 }
1164
1165search_again:
1166 if (start > end)
1167 goto out;
1168 spin_unlock(&tree->lock);
1169 if (gfpflags_allow_blocking(mask))
1170 cond_resched();
1171 goto again;
1172
1173out:
1174 spin_unlock(&tree->lock);
1175 if (prealloc)
1176 free_extent_state(prealloc);
1177
1178 return err;
1179
1180}
1181
1182int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1183 unsigned bits, u64 * failed_start,
1184 struct extent_state **cached_state, gfp_t mask)
1185{
1186 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1187 cached_state, mask, NULL);
1188}
1189
1190
1191/**
1192 * convert_extent_bit - convert all bits in a given range from one bit to
1193 * another
1194 * @tree: the io tree to search
1195 * @start: the start offset in bytes
1196 * @end: the end offset in bytes (inclusive)
1197 * @bits: the bits to set in this range
1198 * @clear_bits: the bits to clear in this range
1199 * @cached_state: state that we're going to cache
1200 *
1201 * This will go through and set bits for the given range. If any states exist
1202 * already in this range they are set with the given bit and cleared of the
1203 * clear_bits. This is only meant to be used by things that are mergeable, ie
1204 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1205 * boundary bits like LOCK.
1206 *
1207 * All allocations are done with GFP_NOFS.
1208 */
1209int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1210 unsigned bits, unsigned clear_bits,
1211 struct extent_state **cached_state)
1212{
1213 struct extent_state *state;
1214 struct extent_state *prealloc = NULL;
1215 struct rb_node *node;
1216 struct rb_node **p;
1217 struct rb_node *parent;
1218 int err = 0;
1219 u64 last_start;
1220 u64 last_end;
1221 bool first_iteration = true;
1222
1223 btrfs_debug_check_extent_io_range(tree, start, end);
1224 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1225 clear_bits);
1226
1227again:
1228 if (!prealloc) {
1229 /*
1230 * Best effort, don't worry if extent state allocation fails
1231 * here for the first iteration. We might have a cached state
1232 * that matches exactly the target range, in which case no
1233 * extent state allocations are needed. We'll only know this
1234 * after locking the tree.
1235 */
1236 prealloc = alloc_extent_state(GFP_NOFS);
1237 if (!prealloc && !first_iteration)
1238 return -ENOMEM;
1239 }
1240
1241 spin_lock(&tree->lock);
1242 if (cached_state && *cached_state) {
1243 state = *cached_state;
1244 if (state->start <= start && state->end > start &&
1245 extent_state_in_tree(state)) {
1246 node = &state->rb_node;
1247 goto hit_next;
1248 }
1249 }
1250
1251 /*
1252 * this search will find all the extents that end after
1253 * our range starts.
1254 */
1255 node = tree_search_for_insert(tree, start, &p, &parent);
1256 if (!node) {
1257 prealloc = alloc_extent_state_atomic(prealloc);
1258 if (!prealloc) {
1259 err = -ENOMEM;
1260 goto out;
1261 }
1262 err = insert_state(tree, prealloc, start, end,
1263 &p, &parent, &bits, NULL);
1264 if (err)
1265 extent_io_tree_panic(tree, err);
1266 cache_state(prealloc, cached_state);
1267 prealloc = NULL;
1268 goto out;
1269 }
1270 state = rb_entry(node, struct extent_state, rb_node);
1271hit_next:
1272 last_start = state->start;
1273 last_end = state->end;
1274
1275 /*
1276 * | ---- desired range ---- |
1277 * | state |
1278 *
1279 * Just lock what we found and keep going
1280 */
1281 if (state->start == start && state->end <= end) {
1282 set_state_bits(tree, state, &bits, NULL);
1283 cache_state(state, cached_state);
1284 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1285 if (last_end == (u64)-1)
1286 goto out;
1287 start = last_end + 1;
1288 if (start < end && state && state->start == start &&
1289 !need_resched())
1290 goto hit_next;
1291 goto search_again;
1292 }
1293
1294 /*
1295 * | ---- desired range ---- |
1296 * | state |
1297 * or
1298 * | ------------- state -------------- |
1299 *
1300 * We need to split the extent we found, and may flip bits on
1301 * second half.
1302 *
1303 * If the extent we found extends past our
1304 * range, we just split and search again. It'll get split
1305 * again the next time though.
1306 *
1307 * If the extent we found is inside our range, we set the
1308 * desired bit on it.
1309 */
1310 if (state->start < start) {
1311 prealloc = alloc_extent_state_atomic(prealloc);
1312 if (!prealloc) {
1313 err = -ENOMEM;
1314 goto out;
1315 }
1316 err = split_state(tree, state, prealloc, start);
1317 if (err)
1318 extent_io_tree_panic(tree, err);
1319 prealloc = NULL;
1320 if (err)
1321 goto out;
1322 if (state->end <= end) {
1323 set_state_bits(tree, state, &bits, NULL);
1324 cache_state(state, cached_state);
1325 state = clear_state_bit(tree, state, &clear_bits, 0,
1326 NULL);
1327 if (last_end == (u64)-1)
1328 goto out;
1329 start = last_end + 1;
1330 if (start < end && state && state->start == start &&
1331 !need_resched())
1332 goto hit_next;
1333 }
1334 goto search_again;
1335 }
1336 /*
1337 * | ---- desired range ---- |
1338 * | state | or | state |
1339 *
1340 * There's a hole, we need to insert something in it and
1341 * ignore the extent we found.
1342 */
1343 if (state->start > start) {
1344 u64 this_end;
1345 if (end < last_start)
1346 this_end = end;
1347 else
1348 this_end = last_start - 1;
1349
1350 prealloc = alloc_extent_state_atomic(prealloc);
1351 if (!prealloc) {
1352 err = -ENOMEM;
1353 goto out;
1354 }
1355
1356 /*
1357 * Avoid to free 'prealloc' if it can be merged with
1358 * the later extent.
1359 */
1360 err = insert_state(tree, prealloc, start, this_end,
1361 NULL, NULL, &bits, NULL);
1362 if (err)
1363 extent_io_tree_panic(tree, err);
1364 cache_state(prealloc, cached_state);
1365 prealloc = NULL;
1366 start = this_end + 1;
1367 goto search_again;
1368 }
1369 /*
1370 * | ---- desired range ---- |
1371 * | state |
1372 * We need to split the extent, and set the bit
1373 * on the first half
1374 */
1375 if (state->start <= end && state->end > end) {
1376 prealloc = alloc_extent_state_atomic(prealloc);
1377 if (!prealloc) {
1378 err = -ENOMEM;
1379 goto out;
1380 }
1381
1382 err = split_state(tree, state, prealloc, end + 1);
1383 if (err)
1384 extent_io_tree_panic(tree, err);
1385
1386 set_state_bits(tree, prealloc, &bits, NULL);
1387 cache_state(prealloc, cached_state);
1388 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1389 prealloc = NULL;
1390 goto out;
1391 }
1392
1393search_again:
1394 if (start > end)
1395 goto out;
1396 spin_unlock(&tree->lock);
1397 cond_resched();
1398 first_iteration = false;
1399 goto again;
1400
1401out:
1402 spin_unlock(&tree->lock);
1403 if (prealloc)
1404 free_extent_state(prealloc);
1405
1406 return err;
1407}
1408
1409/* wrappers around set/clear extent bit */
1410int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1411 unsigned bits, struct extent_changeset *changeset)
1412{
1413 /*
1414 * We don't support EXTENT_LOCKED yet, as current changeset will
1415 * record any bits changed, so for EXTENT_LOCKED case, it will
1416 * either fail with -EEXIST or changeset will record the whole
1417 * range.
1418 */
1419 BUG_ON(bits & EXTENT_LOCKED);
1420
1421 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1422 changeset);
1423}
1424
1425int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426 unsigned bits)
1427{
1428 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1429 GFP_NOWAIT, NULL);
1430}
1431
1432int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1433 unsigned bits, int wake, int delete,
1434 struct extent_state **cached)
1435{
1436 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1437 cached, GFP_NOFS, NULL);
1438}
1439
1440int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1441 unsigned bits, struct extent_changeset *changeset)
1442{
1443 /*
1444 * Don't support EXTENT_LOCKED case, same reason as
1445 * set_record_extent_bits().
1446 */
1447 BUG_ON(bits & EXTENT_LOCKED);
1448
1449 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1450 changeset);
1451}
1452
1453/*
1454 * either insert or lock state struct between start and end use mask to tell
1455 * us if waiting is desired.
1456 */
1457int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1458 struct extent_state **cached_state)
1459{
1460 int err;
1461 u64 failed_start;
1462
1463 while (1) {
1464 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1465 EXTENT_LOCKED, &failed_start,
1466 cached_state, GFP_NOFS, NULL);
1467 if (err == -EEXIST) {
1468 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1469 start = failed_start;
1470 } else
1471 break;
1472 WARN_ON(start > end);
1473 }
1474 return err;
1475}
1476
1477int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1478{
1479 int err;
1480 u64 failed_start;
1481
1482 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1483 &failed_start, NULL, GFP_NOFS, NULL);
1484 if (err == -EEXIST) {
1485 if (failed_start > start)
1486 clear_extent_bit(tree, start, failed_start - 1,
1487 EXTENT_LOCKED, 1, 0, NULL);
1488 return 0;
1489 }
1490 return 1;
1491}
1492
1493void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1494{
1495 unsigned long index = start >> PAGE_SHIFT;
1496 unsigned long end_index = end >> PAGE_SHIFT;
1497 struct page *page;
1498
1499 while (index <= end_index) {
1500 page = find_get_page(inode->i_mapping, index);
1501 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1502 clear_page_dirty_for_io(page);
1503 put_page(page);
1504 index++;
1505 }
1506}
1507
1508void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1509{
1510 unsigned long index = start >> PAGE_SHIFT;
1511 unsigned long end_index = end >> PAGE_SHIFT;
1512 struct page *page;
1513
1514 while (index <= end_index) {
1515 page = find_get_page(inode->i_mapping, index);
1516 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1517 __set_page_dirty_nobuffers(page);
1518 account_page_redirty(page);
1519 put_page(page);
1520 index++;
1521 }
1522}
1523
1524/* find the first state struct with 'bits' set after 'start', and
1525 * return it. tree->lock must be held. NULL will returned if
1526 * nothing was found after 'start'
1527 */
1528static struct extent_state *
1529find_first_extent_bit_state(struct extent_io_tree *tree,
1530 u64 start, unsigned bits)
1531{
1532 struct rb_node *node;
1533 struct extent_state *state;
1534
1535 /*
1536 * this search will find all the extents that end after
1537 * our range starts.
1538 */
1539 node = tree_search(tree, start);
1540 if (!node)
1541 goto out;
1542
1543 while (1) {
1544 state = rb_entry(node, struct extent_state, rb_node);
1545 if (state->end >= start && (state->state & bits))
1546 return state;
1547
1548 node = rb_next(node);
1549 if (!node)
1550 break;
1551 }
1552out:
1553 return NULL;
1554}
1555
1556/*
1557 * find the first offset in the io tree with 'bits' set. zero is
1558 * returned if we find something, and *start_ret and *end_ret are
1559 * set to reflect the state struct that was found.
1560 *
1561 * If nothing was found, 1 is returned. If found something, return 0.
1562 */
1563int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1564 u64 *start_ret, u64 *end_ret, unsigned bits,
1565 struct extent_state **cached_state)
1566{
1567 struct extent_state *state;
1568 int ret = 1;
1569
1570 spin_lock(&tree->lock);
1571 if (cached_state && *cached_state) {
1572 state = *cached_state;
1573 if (state->end == start - 1 && extent_state_in_tree(state)) {
1574 while ((state = next_state(state)) != NULL) {
1575 if (state->state & bits)
1576 goto got_it;
1577 }
1578 free_extent_state(*cached_state);
1579 *cached_state = NULL;
1580 goto out;
1581 }
1582 free_extent_state(*cached_state);
1583 *cached_state = NULL;
1584 }
1585
1586 state = find_first_extent_bit_state(tree, start, bits);
1587got_it:
1588 if (state) {
1589 cache_state_if_flags(state, cached_state, 0);
1590 *start_ret = state->start;
1591 *end_ret = state->end;
1592 ret = 0;
1593 }
1594out:
1595 spin_unlock(&tree->lock);
1596 return ret;
1597}
1598
1599/**
1600 * find_contiguous_extent_bit: find a contiguous area of bits
1601 * @tree - io tree to check
1602 * @start - offset to start the search from
1603 * @start_ret - the first offset we found with the bits set
1604 * @end_ret - the final contiguous range of the bits that were set
1605 * @bits - bits to look for
1606 *
1607 * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1608 * to set bits appropriately, and then merge them again. During this time it
1609 * will drop the tree->lock, so use this helper if you want to find the actual
1610 * contiguous area for given bits. We will search to the first bit we find, and
1611 * then walk down the tree until we find a non-contiguous area. The area
1612 * returned will be the full contiguous area with the bits set.
1613 */
1614int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1615 u64 *start_ret, u64 *end_ret, unsigned bits)
1616{
1617 struct extent_state *state;
1618 int ret = 1;
1619
1620 spin_lock(&tree->lock);
1621 state = find_first_extent_bit_state(tree, start, bits);
1622 if (state) {
1623 *start_ret = state->start;
1624 *end_ret = state->end;
1625 while ((state = next_state(state)) != NULL) {
1626 if (state->start > (*end_ret + 1))
1627 break;
1628 *end_ret = state->end;
1629 }
1630 ret = 0;
1631 }
1632 spin_unlock(&tree->lock);
1633 return ret;
1634}
1635
1636/**
1637 * find_first_clear_extent_bit - find the first range that has @bits not set.
1638 * This range could start before @start.
1639 *
1640 * @tree - the tree to search
1641 * @start - the offset at/after which the found extent should start
1642 * @start_ret - records the beginning of the range
1643 * @end_ret - records the end of the range (inclusive)
1644 * @bits - the set of bits which must be unset
1645 *
1646 * Since unallocated range is also considered one which doesn't have the bits
1647 * set it's possible that @end_ret contains -1, this happens in case the range
1648 * spans (last_range_end, end of device]. In this case it's up to the caller to
1649 * trim @end_ret to the appropriate size.
1650 */
1651void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1652 u64 *start_ret, u64 *end_ret, unsigned bits)
1653{
1654 struct extent_state *state;
1655 struct rb_node *node, *prev = NULL, *next;
1656
1657 spin_lock(&tree->lock);
1658
1659 /* Find first extent with bits cleared */
1660 while (1) {
1661 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1662 if (!node && !next && !prev) {
1663 /*
1664 * Tree is completely empty, send full range and let
1665 * caller deal with it
1666 */
1667 *start_ret = 0;
1668 *end_ret = -1;
1669 goto out;
1670 } else if (!node && !next) {
1671 /*
1672 * We are past the last allocated chunk, set start at
1673 * the end of the last extent.
1674 */
1675 state = rb_entry(prev, struct extent_state, rb_node);
1676 *start_ret = state->end + 1;
1677 *end_ret = -1;
1678 goto out;
1679 } else if (!node) {
1680 node = next;
1681 }
1682 /*
1683 * At this point 'node' either contains 'start' or start is
1684 * before 'node'
1685 */
1686 state = rb_entry(node, struct extent_state, rb_node);
1687
1688 if (in_range(start, state->start, state->end - state->start + 1)) {
1689 if (state->state & bits) {
1690 /*
1691 * |--range with bits sets--|
1692 * |
1693 * start
1694 */
1695 start = state->end + 1;
1696 } else {
1697 /*
1698 * 'start' falls within a range that doesn't
1699 * have the bits set, so take its start as
1700 * the beginning of the desired range
1701 *
1702 * |--range with bits cleared----|
1703 * |
1704 * start
1705 */
1706 *start_ret = state->start;
1707 break;
1708 }
1709 } else {
1710 /*
1711 * |---prev range---|---hole/unset---|---node range---|
1712 * |
1713 * start
1714 *
1715 * or
1716 *
1717 * |---hole/unset--||--first node--|
1718 * 0 |
1719 * start
1720 */
1721 if (prev) {
1722 state = rb_entry(prev, struct extent_state,
1723 rb_node);
1724 *start_ret = state->end + 1;
1725 } else {
1726 *start_ret = 0;
1727 }
1728 break;
1729 }
1730 }
1731
1732 /*
1733 * Find the longest stretch from start until an entry which has the
1734 * bits set
1735 */
1736 while (1) {
1737 state = rb_entry(node, struct extent_state, rb_node);
1738 if (state->end >= start && !(state->state & bits)) {
1739 *end_ret = state->end;
1740 } else {
1741 *end_ret = state->start - 1;
1742 break;
1743 }
1744
1745 node = rb_next(node);
1746 if (!node)
1747 break;
1748 }
1749out:
1750 spin_unlock(&tree->lock);
1751}
1752
1753/*
1754 * find a contiguous range of bytes in the file marked as delalloc, not
1755 * more than 'max_bytes'. start and end are used to return the range,
1756 *
1757 * true is returned if we find something, false if nothing was in the tree
1758 */
1759bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1760 u64 *end, u64 max_bytes,
1761 struct extent_state **cached_state)
1762{
1763 struct rb_node *node;
1764 struct extent_state *state;
1765 u64 cur_start = *start;
1766 bool found = false;
1767 u64 total_bytes = 0;
1768
1769 spin_lock(&tree->lock);
1770
1771 /*
1772 * this search will find all the extents that end after
1773 * our range starts.
1774 */
1775 node = tree_search(tree, cur_start);
1776 if (!node) {
1777 *end = (u64)-1;
1778 goto out;
1779 }
1780
1781 while (1) {
1782 state = rb_entry(node, struct extent_state, rb_node);
1783 if (found && (state->start != cur_start ||
1784 (state->state & EXTENT_BOUNDARY))) {
1785 goto out;
1786 }
1787 if (!(state->state & EXTENT_DELALLOC)) {
1788 if (!found)
1789 *end = state->end;
1790 goto out;
1791 }
1792 if (!found) {
1793 *start = state->start;
1794 *cached_state = state;
1795 refcount_inc(&state->refs);
1796 }
1797 found = true;
1798 *end = state->end;
1799 cur_start = state->end + 1;
1800 node = rb_next(node);
1801 total_bytes += state->end - state->start + 1;
1802 if (total_bytes >= max_bytes)
1803 break;
1804 if (!node)
1805 break;
1806 }
1807out:
1808 spin_unlock(&tree->lock);
1809 return found;
1810}
1811
1812static int __process_pages_contig(struct address_space *mapping,
1813 struct page *locked_page,
1814 pgoff_t start_index, pgoff_t end_index,
1815 unsigned long page_ops, pgoff_t *index_ret);
1816
1817static noinline void __unlock_for_delalloc(struct inode *inode,
1818 struct page *locked_page,
1819 u64 start, u64 end)
1820{
1821 unsigned long index = start >> PAGE_SHIFT;
1822 unsigned long end_index = end >> PAGE_SHIFT;
1823
1824 ASSERT(locked_page);
1825 if (index == locked_page->index && end_index == index)
1826 return;
1827
1828 __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1829 PAGE_UNLOCK, NULL);
1830}
1831
1832static noinline int lock_delalloc_pages(struct inode *inode,
1833 struct page *locked_page,
1834 u64 delalloc_start,
1835 u64 delalloc_end)
1836{
1837 unsigned long index = delalloc_start >> PAGE_SHIFT;
1838 unsigned long index_ret = index;
1839 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1840 int ret;
1841
1842 ASSERT(locked_page);
1843 if (index == locked_page->index && index == end_index)
1844 return 0;
1845
1846 ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1847 end_index, PAGE_LOCK, &index_ret);
1848 if (ret == -EAGAIN)
1849 __unlock_for_delalloc(inode, locked_page, delalloc_start,
1850 (u64)index_ret << PAGE_SHIFT);
1851 return ret;
1852}
1853
1854/*
1855 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1856 * more than @max_bytes. @Start and @end are used to return the range,
1857 *
1858 * Return: true if we find something
1859 * false if nothing was in the tree
1860 */
1861EXPORT_FOR_TESTS
1862noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1863 struct page *locked_page, u64 *start,
1864 u64 *end)
1865{
1866 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1867 u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1868 u64 delalloc_start;
1869 u64 delalloc_end;
1870 bool found;
1871 struct extent_state *cached_state = NULL;
1872 int ret;
1873 int loops = 0;
1874
1875again:
1876 /* step one, find a bunch of delalloc bytes starting at start */
1877 delalloc_start = *start;
1878 delalloc_end = 0;
1879 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1880 max_bytes, &cached_state);
1881 if (!found || delalloc_end <= *start) {
1882 *start = delalloc_start;
1883 *end = delalloc_end;
1884 free_extent_state(cached_state);
1885 return false;
1886 }
1887
1888 /*
1889 * start comes from the offset of locked_page. We have to lock
1890 * pages in order, so we can't process delalloc bytes before
1891 * locked_page
1892 */
1893 if (delalloc_start < *start)
1894 delalloc_start = *start;
1895
1896 /*
1897 * make sure to limit the number of pages we try to lock down
1898 */
1899 if (delalloc_end + 1 - delalloc_start > max_bytes)
1900 delalloc_end = delalloc_start + max_bytes - 1;
1901
1902 /* step two, lock all the pages after the page that has start */
1903 ret = lock_delalloc_pages(inode, locked_page,
1904 delalloc_start, delalloc_end);
1905 ASSERT(!ret || ret == -EAGAIN);
1906 if (ret == -EAGAIN) {
1907 /* some of the pages are gone, lets avoid looping by
1908 * shortening the size of the delalloc range we're searching
1909 */
1910 free_extent_state(cached_state);
1911 cached_state = NULL;
1912 if (!loops) {
1913 max_bytes = PAGE_SIZE;
1914 loops = 1;
1915 goto again;
1916 } else {
1917 found = false;
1918 goto out_failed;
1919 }
1920 }
1921
1922 /* step three, lock the state bits for the whole range */
1923 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1924
1925 /* then test to make sure it is all still delalloc */
1926 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1927 EXTENT_DELALLOC, 1, cached_state);
1928 if (!ret) {
1929 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1930 &cached_state);
1931 __unlock_for_delalloc(inode, locked_page,
1932 delalloc_start, delalloc_end);
1933 cond_resched();
1934 goto again;
1935 }
1936 free_extent_state(cached_state);
1937 *start = delalloc_start;
1938 *end = delalloc_end;
1939out_failed:
1940 return found;
1941}
1942
1943static int __process_pages_contig(struct address_space *mapping,
1944 struct page *locked_page,
1945 pgoff_t start_index, pgoff_t end_index,
1946 unsigned long page_ops, pgoff_t *index_ret)
1947{
1948 unsigned long nr_pages = end_index - start_index + 1;
1949 unsigned long pages_locked = 0;
1950 pgoff_t index = start_index;
1951 struct page *pages[16];
1952 unsigned ret;
1953 int err = 0;
1954 int i;
1955
1956 if (page_ops & PAGE_LOCK) {
1957 ASSERT(page_ops == PAGE_LOCK);
1958 ASSERT(index_ret && *index_ret == start_index);
1959 }
1960
1961 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1962 mapping_set_error(mapping, -EIO);
1963
1964 while (nr_pages > 0) {
1965 ret = find_get_pages_contig(mapping, index,
1966 min_t(unsigned long,
1967 nr_pages, ARRAY_SIZE(pages)), pages);
1968 if (ret == 0) {
1969 /*
1970 * Only if we're going to lock these pages,
1971 * can we find nothing at @index.
1972 */
1973 ASSERT(page_ops & PAGE_LOCK);
1974 err = -EAGAIN;
1975 goto out;
1976 }
1977
1978 for (i = 0; i < ret; i++) {
1979 if (page_ops & PAGE_SET_PRIVATE2)
1980 SetPagePrivate2(pages[i]);
1981
1982 if (locked_page && pages[i] == locked_page) {
1983 put_page(pages[i]);
1984 pages_locked++;
1985 continue;
1986 }
1987 if (page_ops & PAGE_CLEAR_DIRTY)
1988 clear_page_dirty_for_io(pages[i]);
1989 if (page_ops & PAGE_SET_WRITEBACK)
1990 set_page_writeback(pages[i]);
1991 if (page_ops & PAGE_SET_ERROR)
1992 SetPageError(pages[i]);
1993 if (page_ops & PAGE_END_WRITEBACK)
1994 end_page_writeback(pages[i]);
1995 if (page_ops & PAGE_UNLOCK)
1996 unlock_page(pages[i]);
1997 if (page_ops & PAGE_LOCK) {
1998 lock_page(pages[i]);
1999 if (!PageDirty(pages[i]) ||
2000 pages[i]->mapping != mapping) {
2001 unlock_page(pages[i]);
2002 for (; i < ret; i++)
2003 put_page(pages[i]);
2004 err = -EAGAIN;
2005 goto out;
2006 }
2007 }
2008 put_page(pages[i]);
2009 pages_locked++;
2010 }
2011 nr_pages -= ret;
2012 index += ret;
2013 cond_resched();
2014 }
2015out:
2016 if (err && index_ret)
2017 *index_ret = start_index + pages_locked - 1;
2018 return err;
2019}
2020
2021void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2022 struct page *locked_page,
2023 unsigned clear_bits,
2024 unsigned long page_ops)
2025{
2026 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2027
2028 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2029 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
2030 page_ops, NULL);
2031}
2032
2033/*
2034 * count the number of bytes in the tree that have a given bit(s)
2035 * set. This can be fairly slow, except for EXTENT_DIRTY which is
2036 * cached. The total number found is returned.
2037 */
2038u64 count_range_bits(struct extent_io_tree *tree,
2039 u64 *start, u64 search_end, u64 max_bytes,
2040 unsigned bits, int contig)
2041{
2042 struct rb_node *node;
2043 struct extent_state *state;
2044 u64 cur_start = *start;
2045 u64 total_bytes = 0;
2046 u64 last = 0;
2047 int found = 0;
2048
2049 if (WARN_ON(search_end <= cur_start))
2050 return 0;
2051
2052 spin_lock(&tree->lock);
2053 if (cur_start == 0 && bits == EXTENT_DIRTY) {
2054 total_bytes = tree->dirty_bytes;
2055 goto out;
2056 }
2057 /*
2058 * this search will find all the extents that end after
2059 * our range starts.
2060 */
2061 node = tree_search(tree, cur_start);
2062 if (!node)
2063 goto out;
2064
2065 while (1) {
2066 state = rb_entry(node, struct extent_state, rb_node);
2067 if (state->start > search_end)
2068 break;
2069 if (contig && found && state->start > last + 1)
2070 break;
2071 if (state->end >= cur_start && (state->state & bits) == bits) {
2072 total_bytes += min(search_end, state->end) + 1 -
2073 max(cur_start, state->start);
2074 if (total_bytes >= max_bytes)
2075 break;
2076 if (!found) {
2077 *start = max(cur_start, state->start);
2078 found = 1;
2079 }
2080 last = state->end;
2081 } else if (contig && found) {
2082 break;
2083 }
2084 node = rb_next(node);
2085 if (!node)
2086 break;
2087 }
2088out:
2089 spin_unlock(&tree->lock);
2090 return total_bytes;
2091}
2092
2093/*
2094 * set the private field for a given byte offset in the tree. If there isn't
2095 * an extent_state there already, this does nothing.
2096 */
2097int set_state_failrec(struct extent_io_tree *tree, u64 start,
2098 struct io_failure_record *failrec)
2099{
2100 struct rb_node *node;
2101 struct extent_state *state;
2102 int ret = 0;
2103
2104 spin_lock(&tree->lock);
2105 /*
2106 * this search will find all the extents that end after
2107 * our range starts.
2108 */
2109 node = tree_search(tree, start);
2110 if (!node) {
2111 ret = -ENOENT;
2112 goto out;
2113 }
2114 state = rb_entry(node, struct extent_state, rb_node);
2115 if (state->start != start) {
2116 ret = -ENOENT;
2117 goto out;
2118 }
2119 state->failrec = failrec;
2120out:
2121 spin_unlock(&tree->lock);
2122 return ret;
2123}
2124
2125struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2126{
2127 struct rb_node *node;
2128 struct extent_state *state;
2129 struct io_failure_record *failrec;
2130
2131 spin_lock(&tree->lock);
2132 /*
2133 * this search will find all the extents that end after
2134 * our range starts.
2135 */
2136 node = tree_search(tree, start);
2137 if (!node) {
2138 failrec = ERR_PTR(-ENOENT);
2139 goto out;
2140 }
2141 state = rb_entry(node, struct extent_state, rb_node);
2142 if (state->start != start) {
2143 failrec = ERR_PTR(-ENOENT);
2144 goto out;
2145 }
2146
2147 failrec = state->failrec;
2148out:
2149 spin_unlock(&tree->lock);
2150 return failrec;
2151}
2152
2153/*
2154 * searches a range in the state tree for a given mask.
2155 * If 'filled' == 1, this returns 1 only if every extent in the tree
2156 * has the bits set. Otherwise, 1 is returned if any bit in the
2157 * range is found set.
2158 */
2159int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2160 unsigned bits, int filled, struct extent_state *cached)
2161{
2162 struct extent_state *state = NULL;
2163 struct rb_node *node;
2164 int bitset = 0;
2165
2166 spin_lock(&tree->lock);
2167 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2168 cached->end > start)
2169 node = &cached->rb_node;
2170 else
2171 node = tree_search(tree, start);
2172 while (node && start <= end) {
2173 state = rb_entry(node, struct extent_state, rb_node);
2174
2175 if (filled && state->start > start) {
2176 bitset = 0;
2177 break;
2178 }
2179
2180 if (state->start > end)
2181 break;
2182
2183 if (state->state & bits) {
2184 bitset = 1;
2185 if (!filled)
2186 break;
2187 } else if (filled) {
2188 bitset = 0;
2189 break;
2190 }
2191
2192 if (state->end == (u64)-1)
2193 break;
2194
2195 start = state->end + 1;
2196 if (start > end)
2197 break;
2198 node = rb_next(node);
2199 if (!node) {
2200 if (filled)
2201 bitset = 0;
2202 break;
2203 }
2204 }
2205 spin_unlock(&tree->lock);
2206 return bitset;
2207}
2208
2209/*
2210 * helper function to set a given page up to date if all the
2211 * extents in the tree for that page are up to date
2212 */
2213static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2214{
2215 u64 start = page_offset(page);
2216 u64 end = start + PAGE_SIZE - 1;
2217 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2218 SetPageUptodate(page);
2219}
2220
2221int free_io_failure(struct extent_io_tree *failure_tree,
2222 struct extent_io_tree *io_tree,
2223 struct io_failure_record *rec)
2224{
2225 int ret;
2226 int err = 0;
2227
2228 set_state_failrec(failure_tree, rec->start, NULL);
2229 ret = clear_extent_bits(failure_tree, rec->start,
2230 rec->start + rec->len - 1,
2231 EXTENT_LOCKED | EXTENT_DIRTY);
2232 if (ret)
2233 err = ret;
2234
2235 ret = clear_extent_bits(io_tree, rec->start,
2236 rec->start + rec->len - 1,
2237 EXTENT_DAMAGED);
2238 if (ret && !err)
2239 err = ret;
2240
2241 kfree(rec);
2242 return err;
2243}
2244
2245/*
2246 * this bypasses the standard btrfs submit functions deliberately, as
2247 * the standard behavior is to write all copies in a raid setup. here we only
2248 * want to write the one bad copy. so we do the mapping for ourselves and issue
2249 * submit_bio directly.
2250 * to avoid any synchronization issues, wait for the data after writing, which
2251 * actually prevents the read that triggered the error from finishing.
2252 * currently, there can be no more than two copies of every data bit. thus,
2253 * exactly one rewrite is required.
2254 */
2255int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2256 u64 length, u64 logical, struct page *page,
2257 unsigned int pg_offset, int mirror_num)
2258{
2259 struct bio *bio;
2260 struct btrfs_device *dev;
2261 u64 map_length = 0;
2262 u64 sector;
2263 struct btrfs_bio *bbio = NULL;
2264 int ret;
2265
2266 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2267 BUG_ON(!mirror_num);
2268
2269 bio = btrfs_io_bio_alloc(1);
2270 bio->bi_iter.bi_size = 0;
2271 map_length = length;
2272
2273 /*
2274 * Avoid races with device replace and make sure our bbio has devices
2275 * associated to its stripes that don't go away while we are doing the
2276 * read repair operation.
2277 */
2278 btrfs_bio_counter_inc_blocked(fs_info);
2279 if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2280 /*
2281 * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2282 * to update all raid stripes, but here we just want to correct
2283 * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2284 * stripe's dev and sector.
2285 */
2286 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2287 &map_length, &bbio, 0);
2288 if (ret) {
2289 btrfs_bio_counter_dec(fs_info);
2290 bio_put(bio);
2291 return -EIO;
2292 }
2293 ASSERT(bbio->mirror_num == 1);
2294 } else {
2295 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2296 &map_length, &bbio, mirror_num);
2297 if (ret) {
2298 btrfs_bio_counter_dec(fs_info);
2299 bio_put(bio);
2300 return -EIO;
2301 }
2302 BUG_ON(mirror_num != bbio->mirror_num);
2303 }
2304
2305 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2306 bio->bi_iter.bi_sector = sector;
2307 dev = bbio->stripes[bbio->mirror_num - 1].dev;
2308 btrfs_put_bbio(bbio);
2309 if (!dev || !dev->bdev ||
2310 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2311 btrfs_bio_counter_dec(fs_info);
2312 bio_put(bio);
2313 return -EIO;
2314 }
2315 bio_set_dev(bio, dev->bdev);
2316 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2317 bio_add_page(bio, page, length, pg_offset);
2318
2319 if (btrfsic_submit_bio_wait(bio)) {
2320 /* try to remap that extent elsewhere? */
2321 btrfs_bio_counter_dec(fs_info);
2322 bio_put(bio);
2323 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2324 return -EIO;
2325 }
2326
2327 btrfs_info_rl_in_rcu(fs_info,
2328 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2329 ino, start,
2330 rcu_str_deref(dev->name), sector);
2331 btrfs_bio_counter_dec(fs_info);
2332 bio_put(bio);
2333 return 0;
2334}
2335
2336int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2337{
2338 struct btrfs_fs_info *fs_info = eb->fs_info;
2339 u64 start = eb->start;
2340 int i, num_pages = num_extent_pages(eb);
2341 int ret = 0;
2342
2343 if (sb_rdonly(fs_info->sb))
2344 return -EROFS;
2345
2346 for (i = 0; i < num_pages; i++) {
2347 struct page *p = eb->pages[i];
2348
2349 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2350 start - page_offset(p), mirror_num);
2351 if (ret)
2352 break;
2353 start += PAGE_SIZE;
2354 }
2355
2356 return ret;
2357}
2358
2359/*
2360 * each time an IO finishes, we do a fast check in the IO failure tree
2361 * to see if we need to process or clean up an io_failure_record
2362 */
2363int clean_io_failure(struct btrfs_fs_info *fs_info,
2364 struct extent_io_tree *failure_tree,
2365 struct extent_io_tree *io_tree, u64 start,
2366 struct page *page, u64 ino, unsigned int pg_offset)
2367{
2368 u64 private;
2369 struct io_failure_record *failrec;
2370 struct extent_state *state;
2371 int num_copies;
2372 int ret;
2373
2374 private = 0;
2375 ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2376 EXTENT_DIRTY, 0);
2377 if (!ret)
2378 return 0;
2379
2380 failrec = get_state_failrec(failure_tree, start);
2381 if (IS_ERR(failrec))
2382 return 0;
2383
2384 BUG_ON(!failrec->this_mirror);
2385
2386 if (failrec->in_validation) {
2387 /* there was no real error, just free the record */
2388 btrfs_debug(fs_info,
2389 "clean_io_failure: freeing dummy error at %llu",
2390 failrec->start);
2391 goto out;
2392 }
2393 if (sb_rdonly(fs_info->sb))
2394 goto out;
2395
2396 spin_lock(&io_tree->lock);
2397 state = find_first_extent_bit_state(io_tree,
2398 failrec->start,
2399 EXTENT_LOCKED);
2400 spin_unlock(&io_tree->lock);
2401
2402 if (state && state->start <= failrec->start &&
2403 state->end >= failrec->start + failrec->len - 1) {
2404 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2405 failrec->len);
2406 if (num_copies > 1) {
2407 repair_io_failure(fs_info, ino, start, failrec->len,
2408 failrec->logical, page, pg_offset,
2409 failrec->failed_mirror);
2410 }
2411 }
2412
2413out:
2414 free_io_failure(failure_tree, io_tree, failrec);
2415
2416 return 0;
2417}
2418
2419/*
2420 * Can be called when
2421 * - hold extent lock
2422 * - under ordered extent
2423 * - the inode is freeing
2424 */
2425void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2426{
2427 struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2428 struct io_failure_record *failrec;
2429 struct extent_state *state, *next;
2430
2431 if (RB_EMPTY_ROOT(&failure_tree->state))
2432 return;
2433
2434 spin_lock(&failure_tree->lock);
2435 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2436 while (state) {
2437 if (state->start > end)
2438 break;
2439
2440 ASSERT(state->end <= end);
2441
2442 next = next_state(state);
2443
2444 failrec = state->failrec;
2445 free_extent_state(state);
2446 kfree(failrec);
2447
2448 state = next;
2449 }
2450 spin_unlock(&failure_tree->lock);
2451}
2452
2453static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2454 u64 start, u64 end)
2455{
2456 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2457 struct io_failure_record *failrec;
2458 struct extent_map *em;
2459 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2460 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2461 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2462 int ret;
2463 u64 logical;
2464
2465 failrec = get_state_failrec(failure_tree, start);
2466 if (!IS_ERR(failrec)) {
2467 btrfs_debug(fs_info,
2468 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2469 failrec->logical, failrec->start, failrec->len,
2470 failrec->in_validation);
2471 /*
2472 * when data can be on disk more than twice, add to failrec here
2473 * (e.g. with a list for failed_mirror) to make
2474 * clean_io_failure() clean all those errors at once.
2475 */
2476
2477 return failrec;
2478 }
2479
2480 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2481 if (!failrec)
2482 return ERR_PTR(-ENOMEM);
2483
2484 failrec->start = start;
2485 failrec->len = end - start + 1;
2486 failrec->this_mirror = 0;
2487 failrec->bio_flags = 0;
2488 failrec->in_validation = 0;
2489
2490 read_lock(&em_tree->lock);
2491 em = lookup_extent_mapping(em_tree, start, failrec->len);
2492 if (!em) {
2493 read_unlock(&em_tree->lock);
2494 kfree(failrec);
2495 return ERR_PTR(-EIO);
2496 }
2497
2498 if (em->start > start || em->start + em->len <= start) {
2499 free_extent_map(em);
2500 em = NULL;
2501 }
2502 read_unlock(&em_tree->lock);
2503 if (!em) {
2504 kfree(failrec);
2505 return ERR_PTR(-EIO);
2506 }
2507
2508 logical = start - em->start;
2509 logical = em->block_start + logical;
2510 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2511 logical = em->block_start;
2512 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2513 extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2514 }
2515
2516 btrfs_debug(fs_info,
2517 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2518 logical, start, failrec->len);
2519
2520 failrec->logical = logical;
2521 free_extent_map(em);
2522
2523 /* Set the bits in the private failure tree */
2524 ret = set_extent_bits(failure_tree, start, end,
2525 EXTENT_LOCKED | EXTENT_DIRTY);
2526 if (ret >= 0) {
2527 ret = set_state_failrec(failure_tree, start, failrec);
2528 /* Set the bits in the inode's tree */
2529 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2530 } else if (ret < 0) {
2531 kfree(failrec);
2532 return ERR_PTR(ret);
2533 }
2534
2535 return failrec;
2536}
2537
2538static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
2539 struct io_failure_record *failrec,
2540 int failed_mirror)
2541{
2542 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2543 int num_copies;
2544
2545 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2546 if (num_copies == 1) {
2547 /*
2548 * we only have a single copy of the data, so don't bother with
2549 * all the retry and error correction code that follows. no
2550 * matter what the error is, it is very likely to persist.
2551 */
2552 btrfs_debug(fs_info,
2553 "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2554 num_copies, failrec->this_mirror, failed_mirror);
2555 return false;
2556 }
2557
2558 /*
2559 * there are two premises:
2560 * a) deliver good data to the caller
2561 * b) correct the bad sectors on disk
2562 */
2563 if (needs_validation) {
2564 /*
2565 * to fulfill b), we need to know the exact failing sectors, as
2566 * we don't want to rewrite any more than the failed ones. thus,
2567 * we need separate read requests for the failed bio
2568 *
2569 * if the following BUG_ON triggers, our validation request got
2570 * merged. we need separate requests for our algorithm to work.
2571 */
2572 BUG_ON(failrec->in_validation);
2573 failrec->in_validation = 1;
2574 failrec->this_mirror = failed_mirror;
2575 } else {
2576 /*
2577 * we're ready to fulfill a) and b) alongside. get a good copy
2578 * of the failed sector and if we succeed, we have setup
2579 * everything for repair_io_failure to do the rest for us.
2580 */
2581 if (failrec->in_validation) {
2582 BUG_ON(failrec->this_mirror != failed_mirror);
2583 failrec->in_validation = 0;
2584 failrec->this_mirror = 0;
2585 }
2586 failrec->failed_mirror = failed_mirror;
2587 failrec->this_mirror++;
2588 if (failrec->this_mirror == failed_mirror)
2589 failrec->this_mirror++;
2590 }
2591
2592 if (failrec->this_mirror > num_copies) {
2593 btrfs_debug(fs_info,
2594 "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2595 num_copies, failrec->this_mirror, failed_mirror);
2596 return false;
2597 }
2598
2599 return true;
2600}
2601
2602static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
2603{
2604 u64 len = 0;
2605 const u32 blocksize = inode->i_sb->s_blocksize;
2606
2607 /*
2608 * If bi_status is BLK_STS_OK, then this was a checksum error, not an
2609 * I/O error. In this case, we already know exactly which sector was
2610 * bad, so we don't need to validate.
2611 */
2612 if (bio->bi_status == BLK_STS_OK)
2613 return false;
2614
2615 /*
2616 * We need to validate each sector individually if the failed I/O was
2617 * for multiple sectors.
2618 *
2619 * There are a few possible bios that can end up here:
2620 * 1. A buffered read bio, which is not cloned.
2621 * 2. A direct I/O read bio, which is cloned.
2622 * 3. A (buffered or direct) repair bio, which is not cloned.
2623 *
2624 * For cloned bios (case 2), we can get the size from
2625 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
2626 * it from the bvecs.
2627 */
2628 if (bio_flagged(bio, BIO_CLONED)) {
2629 if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
2630 return true;
2631 } else {
2632 struct bio_vec *bvec;
2633 int i;
2634
2635 bio_for_each_bvec_all(bvec, bio, i) {
2636 len += bvec->bv_len;
2637 if (len > blocksize)
2638 return true;
2639 }
2640 }
2641 return false;
2642}
2643
2644blk_status_t btrfs_submit_read_repair(struct inode *inode,
2645 struct bio *failed_bio, u64 phy_offset,
2646 struct page *page, unsigned int pgoff,
2647 u64 start, u64 end, int failed_mirror,
2648 submit_bio_hook_t *submit_bio_hook)
2649{
2650 struct io_failure_record *failrec;
2651 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2652 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2653 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2654 struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
2655 const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits;
2656 bool need_validation;
2657 struct bio *repair_bio;
2658 struct btrfs_io_bio *repair_io_bio;
2659 blk_status_t status;
2660
2661 btrfs_debug(fs_info,
2662 "repair read error: read error at %llu", start);
2663
2664 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2665
2666 failrec = btrfs_get_io_failure_record(inode, start, end);
2667 if (IS_ERR(failrec))
2668 return errno_to_blk_status(PTR_ERR(failrec));
2669
2670 need_validation = btrfs_io_needs_validation(inode, failed_bio);
2671
2672 if (!btrfs_check_repairable(inode, need_validation, failrec,
2673 failed_mirror)) {
2674 free_io_failure(failure_tree, tree, failrec);
2675 return BLK_STS_IOERR;
2676 }
2677
2678 repair_bio = btrfs_io_bio_alloc(1);
2679 repair_io_bio = btrfs_io_bio(repair_bio);
2680 repair_bio->bi_opf = REQ_OP_READ;
2681 if (need_validation)
2682 repair_bio->bi_opf |= REQ_FAILFAST_DEV;
2683 repair_bio->bi_end_io = failed_bio->bi_end_io;
2684 repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2685 repair_bio->bi_private = failed_bio->bi_private;
2686
2687 if (failed_io_bio->csum) {
2688 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2689
2690 repair_io_bio->csum = repair_io_bio->csum_inline;
2691 memcpy(repair_io_bio->csum,
2692 failed_io_bio->csum + csum_size * icsum, csum_size);
2693 }
2694
2695 bio_add_page(repair_bio, page, failrec->len, pgoff);
2696 repair_io_bio->logical = failrec->start;
2697 repair_io_bio->iter = repair_bio->bi_iter;
2698
2699 btrfs_debug(btrfs_sb(inode->i_sb),
2700"repair read error: submitting new read to mirror %d, in_validation=%d",
2701 failrec->this_mirror, failrec->in_validation);
2702
2703 status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2704 failrec->bio_flags);
2705 if (status) {
2706 free_io_failure(failure_tree, tree, failrec);
2707 bio_put(repair_bio);
2708 }
2709 return status;
2710}
2711
2712/* lots and lots of room for performance fixes in the end_bio funcs */
2713
2714void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2715{
2716 int uptodate = (err == 0);
2717 int ret = 0;
2718
2719 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2720
2721 if (!uptodate) {
2722 ClearPageUptodate(page);
2723 SetPageError(page);
2724 ret = err < 0 ? err : -EIO;
2725 mapping_set_error(page->mapping, ret);
2726 }
2727}
2728
2729/*
2730 * after a writepage IO is done, we need to:
2731 * clear the uptodate bits on error
2732 * clear the writeback bits in the extent tree for this IO
2733 * end_page_writeback if the page has no more pending IO
2734 *
2735 * Scheduling is not allowed, so the extent state tree is expected
2736 * to have one and only one object corresponding to this IO.
2737 */
2738static void end_bio_extent_writepage(struct bio *bio)
2739{
2740 int error = blk_status_to_errno(bio->bi_status);
2741 struct bio_vec *bvec;
2742 u64 start;
2743 u64 end;
2744 struct bvec_iter_all iter_all;
2745
2746 ASSERT(!bio_flagged(bio, BIO_CLONED));
2747 bio_for_each_segment_all(bvec, bio, iter_all) {
2748 struct page *page = bvec->bv_page;
2749 struct inode *inode = page->mapping->host;
2750 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2751
2752 /* We always issue full-page reads, but if some block
2753 * in a page fails to read, blk_update_request() will
2754 * advance bv_offset and adjust bv_len to compensate.
2755 * Print a warning for nonzero offsets, and an error
2756 * if they don't add up to a full page. */
2757 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2758 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2759 btrfs_err(fs_info,
2760 "partial page write in btrfs with offset %u and length %u",
2761 bvec->bv_offset, bvec->bv_len);
2762 else
2763 btrfs_info(fs_info,
2764 "incomplete page write in btrfs with offset %u and length %u",
2765 bvec->bv_offset, bvec->bv_len);
2766 }
2767
2768 start = page_offset(page);
2769 end = start + bvec->bv_offset + bvec->bv_len - 1;
2770
2771 end_extent_writepage(page, error, start, end);
2772 end_page_writeback(page);
2773 }
2774
2775 bio_put(bio);
2776}
2777
2778static void
2779endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2780 int uptodate)
2781{
2782 struct extent_state *cached = NULL;
2783 u64 end = start + len - 1;
2784
2785 if (uptodate && tree->track_uptodate)
2786 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2787 unlock_extent_cached_atomic(tree, start, end, &cached);
2788}
2789
2790/*
2791 * after a readpage IO is done, we need to:
2792 * clear the uptodate bits on error
2793 * set the uptodate bits if things worked
2794 * set the page up to date if all extents in the tree are uptodate
2795 * clear the lock bit in the extent tree
2796 * unlock the page if there are no other extents locked for it
2797 *
2798 * Scheduling is not allowed, so the extent state tree is expected
2799 * to have one and only one object corresponding to this IO.
2800 */
2801static void end_bio_extent_readpage(struct bio *bio)
2802{
2803 struct bio_vec *bvec;
2804 int uptodate = !bio->bi_status;
2805 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2806 struct extent_io_tree *tree, *failure_tree;
2807 u64 offset = 0;
2808 u64 start;
2809 u64 end;
2810 u64 len;
2811 u64 extent_start = 0;
2812 u64 extent_len = 0;
2813 int mirror;
2814 int ret;
2815 struct bvec_iter_all iter_all;
2816
2817 ASSERT(!bio_flagged(bio, BIO_CLONED));
2818 bio_for_each_segment_all(bvec, bio, iter_all) {
2819 struct page *page = bvec->bv_page;
2820 struct inode *inode = page->mapping->host;
2821 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2822 bool data_inode = btrfs_ino(BTRFS_I(inode))
2823 != BTRFS_BTREE_INODE_OBJECTID;
2824
2825 btrfs_debug(fs_info,
2826 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2827 (u64)bio->bi_iter.bi_sector, bio->bi_status,
2828 io_bio->mirror_num);
2829 tree = &BTRFS_I(inode)->io_tree;
2830 failure_tree = &BTRFS_I(inode)->io_failure_tree;
2831
2832 /* We always issue full-page reads, but if some block
2833 * in a page fails to read, blk_update_request() will
2834 * advance bv_offset and adjust bv_len to compensate.
2835 * Print a warning for nonzero offsets, and an error
2836 * if they don't add up to a full page. */
2837 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2838 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2839 btrfs_err(fs_info,
2840 "partial page read in btrfs with offset %u and length %u",
2841 bvec->bv_offset, bvec->bv_len);
2842 else
2843 btrfs_info(fs_info,
2844 "incomplete page read in btrfs with offset %u and length %u",
2845 bvec->bv_offset, bvec->bv_len);
2846 }
2847
2848 start = page_offset(page);
2849 end = start + bvec->bv_offset + bvec->bv_len - 1;
2850 len = bvec->bv_len;
2851
2852 mirror = io_bio->mirror_num;
2853 if (likely(uptodate)) {
2854 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2855 page, start, end,
2856 mirror);
2857 if (ret)
2858 uptodate = 0;
2859 else
2860 clean_io_failure(BTRFS_I(inode)->root->fs_info,
2861 failure_tree, tree, start,
2862 page,
2863 btrfs_ino(BTRFS_I(inode)), 0);
2864 }
2865
2866 if (likely(uptodate))
2867 goto readpage_ok;
2868
2869 if (data_inode) {
2870
2871 /*
2872 * The generic bio_readpage_error handles errors the
2873 * following way: If possible, new read requests are
2874 * created and submitted and will end up in
2875 * end_bio_extent_readpage as well (if we're lucky,
2876 * not in the !uptodate case). In that case it returns
2877 * 0 and we just go on with the next page in our bio.
2878 * If it can't handle the error it will return -EIO and
2879 * we remain responsible for that page.
2880 */
2881 if (!btrfs_submit_read_repair(inode, bio, offset, page,
2882 start - page_offset(page),
2883 start, end, mirror,
2884 tree->ops->submit_bio_hook)) {
2885 uptodate = !bio->bi_status;
2886 offset += len;
2887 continue;
2888 }
2889 } else {
2890 struct extent_buffer *eb;
2891
2892 eb = (struct extent_buffer *)page->private;
2893 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2894 eb->read_mirror = mirror;
2895 atomic_dec(&eb->io_pages);
2896 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2897 &eb->bflags))
2898 btree_readahead_hook(eb, -EIO);
2899 }
2900readpage_ok:
2901 if (likely(uptodate)) {
2902 loff_t i_size = i_size_read(inode);
2903 pgoff_t end_index = i_size >> PAGE_SHIFT;
2904 unsigned off;
2905
2906 /* Zero out the end if this page straddles i_size */
2907 off = offset_in_page(i_size);
2908 if (page->index == end_index && off)
2909 zero_user_segment(page, off, PAGE_SIZE);
2910 SetPageUptodate(page);
2911 } else {
2912 ClearPageUptodate(page);
2913 SetPageError(page);
2914 }
2915 unlock_page(page);
2916 offset += len;
2917
2918 if (unlikely(!uptodate)) {
2919 if (extent_len) {
2920 endio_readpage_release_extent(tree,
2921 extent_start,
2922 extent_len, 1);
2923 extent_start = 0;
2924 extent_len = 0;
2925 }
2926 endio_readpage_release_extent(tree, start,
2927 end - start + 1, 0);
2928 } else if (!extent_len) {
2929 extent_start = start;
2930 extent_len = end + 1 - start;
2931 } else if (extent_start + extent_len == start) {
2932 extent_len += end + 1 - start;
2933 } else {
2934 endio_readpage_release_extent(tree, extent_start,
2935 extent_len, uptodate);
2936 extent_start = start;
2937 extent_len = end + 1 - start;
2938 }
2939 }
2940
2941 if (extent_len)
2942 endio_readpage_release_extent(tree, extent_start, extent_len,
2943 uptodate);
2944 btrfs_io_bio_free_csum(io_bio);
2945 bio_put(bio);
2946}
2947
2948/*
2949 * Initialize the members up to but not including 'bio'. Use after allocating a
2950 * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2951 * 'bio' because use of __GFP_ZERO is not supported.
2952 */
2953static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2954{
2955 memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2956}
2957
2958/*
2959 * The following helpers allocate a bio. As it's backed by a bioset, it'll
2960 * never fail. We're returning a bio right now but you can call btrfs_io_bio
2961 * for the appropriate container_of magic
2962 */
2963struct bio *btrfs_bio_alloc(u64 first_byte)
2964{
2965 struct bio *bio;
2966
2967 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2968 bio->bi_iter.bi_sector = first_byte >> 9;
2969 btrfs_io_bio_init(btrfs_io_bio(bio));
2970 return bio;
2971}
2972
2973struct bio *btrfs_bio_clone(struct bio *bio)
2974{
2975 struct btrfs_io_bio *btrfs_bio;
2976 struct bio *new;
2977
2978 /* Bio allocation backed by a bioset does not fail */
2979 new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2980 btrfs_bio = btrfs_io_bio(new);
2981 btrfs_io_bio_init(btrfs_bio);
2982 btrfs_bio->iter = bio->bi_iter;
2983 return new;
2984}
2985
2986struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2987{
2988 struct bio *bio;
2989
2990 /* Bio allocation backed by a bioset does not fail */
2991 bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2992 btrfs_io_bio_init(btrfs_io_bio(bio));
2993 return bio;
2994}
2995
2996struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2997{
2998 struct bio *bio;
2999 struct btrfs_io_bio *btrfs_bio;
3000
3001 /* this will never fail when it's backed by a bioset */
3002 bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3003 ASSERT(bio);
3004
3005 btrfs_bio = btrfs_io_bio(bio);
3006 btrfs_io_bio_init(btrfs_bio);
3007
3008 bio_trim(bio, offset >> 9, size >> 9);
3009 btrfs_bio->iter = bio->bi_iter;
3010 return bio;
3011}
3012
3013/*
3014 * @opf: bio REQ_OP_* and REQ_* flags as one value
3015 * @wbc: optional writeback control for io accounting
3016 * @page: page to add to the bio
3017 * @pg_offset: offset of the new bio or to check whether we are adding
3018 * a contiguous page to the previous one
3019 * @size: portion of page that we want to write
3020 * @offset: starting offset in the page
3021 * @bio_ret: must be valid pointer, newly allocated bio will be stored there
3022 * @end_io_func: end_io callback for new bio
3023 * @mirror_num: desired mirror to read/write
3024 * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3025 * @bio_flags: flags of the current bio to see if we can merge them
3026 */
3027static int submit_extent_page(unsigned int opf,
3028 struct writeback_control *wbc,
3029 struct page *page, u64 offset,
3030 size_t size, unsigned long pg_offset,
3031 struct bio **bio_ret,
3032 bio_end_io_t end_io_func,
3033 int mirror_num,
3034 unsigned long prev_bio_flags,
3035 unsigned long bio_flags,
3036 bool force_bio_submit)
3037{
3038 int ret = 0;
3039 struct bio *bio;
3040 size_t page_size = min_t(size_t, size, PAGE_SIZE);
3041 sector_t sector = offset >> 9;
3042 struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
3043
3044 ASSERT(bio_ret);
3045
3046 if (*bio_ret) {
3047 bool contig;
3048 bool can_merge = true;
3049
3050 bio = *bio_ret;
3051 if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
3052 contig = bio->bi_iter.bi_sector == sector;
3053 else
3054 contig = bio_end_sector(bio) == sector;
3055
3056 ASSERT(tree->ops);
3057 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
3058 can_merge = false;
3059
3060 if (prev_bio_flags != bio_flags || !contig || !can_merge ||
3061 force_bio_submit ||
3062 bio_add_page(bio, page, page_size, pg_offset) < page_size) {
3063 ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
3064 if (ret < 0) {
3065 *bio_ret = NULL;
3066 return ret;
3067 }
3068 bio = NULL;
3069 } else {
3070 if (wbc)
3071 wbc_account_cgroup_owner(wbc, page, page_size);
3072 return 0;
3073 }
3074 }
3075
3076 bio = btrfs_bio_alloc(offset);
3077 bio_add_page(bio, page, page_size, pg_offset);
3078 bio->bi_end_io = end_io_func;
3079 bio->bi_private = tree;
3080 bio->bi_write_hint = page->mapping->host->i_write_hint;
3081 bio->bi_opf = opf;
3082 if (wbc) {
3083 struct block_device *bdev;
3084
3085 bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
3086 bio_set_dev(bio, bdev);
3087 wbc_init_bio(wbc, bio);
3088 wbc_account_cgroup_owner(wbc, page, page_size);
3089 }
3090
3091 *bio_ret = bio;
3092
3093 return ret;
3094}
3095
3096static void attach_extent_buffer_page(struct extent_buffer *eb,
3097 struct page *page)
3098{
3099 if (!PagePrivate(page))
3100 attach_page_private(page, eb);
3101 else
3102 WARN_ON(page->private != (unsigned long)eb);
3103}
3104
3105void set_page_extent_mapped(struct page *page)
3106{
3107 if (!PagePrivate(page))
3108 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3109}
3110
3111static struct extent_map *
3112__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3113 u64 start, u64 len, get_extent_t *get_extent,
3114 struct extent_map **em_cached)
3115{
3116 struct extent_map *em;
3117
3118 if (em_cached && *em_cached) {
3119 em = *em_cached;
3120 if (extent_map_in_tree(em) && start >= em->start &&
3121 start < extent_map_end(em)) {
3122 refcount_inc(&em->refs);
3123 return em;
3124 }
3125
3126 free_extent_map(em);
3127 *em_cached = NULL;
3128 }
3129
3130 em = get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3131 if (em_cached && !IS_ERR_OR_NULL(em)) {
3132 BUG_ON(*em_cached);
3133 refcount_inc(&em->refs);
3134 *em_cached = em;
3135 }
3136 return em;
3137}
3138/*
3139 * basic readpage implementation. Locked extent state structs are inserted
3140 * into the tree that are removed when the IO is done (by the end_io
3141 * handlers)
3142 * XXX JDM: This needs looking at to ensure proper page locking
3143 * return 0 on success, otherwise return error
3144 */
3145static int __do_readpage(struct page *page,
3146 get_extent_t *get_extent,
3147 struct extent_map **em_cached,
3148 struct bio **bio, int mirror_num,
3149 unsigned long *bio_flags, unsigned int read_flags,
3150 u64 *prev_em_start)
3151{
3152 struct inode *inode = page->mapping->host;
3153 u64 start = page_offset(page);
3154 const u64 end = start + PAGE_SIZE - 1;
3155 u64 cur = start;
3156 u64 extent_offset;
3157 u64 last_byte = i_size_read(inode);
3158 u64 block_start;
3159 u64 cur_end;
3160 struct extent_map *em;
3161 int ret = 0;
3162 int nr = 0;
3163 size_t pg_offset = 0;
3164 size_t iosize;
3165 size_t disk_io_size;
3166 size_t blocksize = inode->i_sb->s_blocksize;
3167 unsigned long this_bio_flag = 0;
3168 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3169
3170 set_page_extent_mapped(page);
3171
3172 if (!PageUptodate(page)) {
3173 if (cleancache_get_page(page) == 0) {
3174 BUG_ON(blocksize != PAGE_SIZE);
3175 unlock_extent(tree, start, end);
3176 goto out;
3177 }
3178 }
3179
3180 if (page->index == last_byte >> PAGE_SHIFT) {
3181 char *userpage;
3182 size_t zero_offset = offset_in_page(last_byte);
3183
3184 if (zero_offset) {
3185 iosize = PAGE_SIZE - zero_offset;
3186 userpage = kmap_atomic(page);
3187 memset(userpage + zero_offset, 0, iosize);
3188 flush_dcache_page(page);
3189 kunmap_atomic(userpage);
3190 }
3191 }
3192 while (cur <= end) {
3193 bool force_bio_submit = false;
3194 u64 offset;
3195
3196 if (cur >= last_byte) {
3197 char *userpage;
3198 struct extent_state *cached = NULL;
3199
3200 iosize = PAGE_SIZE - pg_offset;
3201 userpage = kmap_atomic(page);
3202 memset(userpage + pg_offset, 0, iosize);
3203 flush_dcache_page(page);
3204 kunmap_atomic(userpage);
3205 set_extent_uptodate(tree, cur, cur + iosize - 1,
3206 &cached, GFP_NOFS);
3207 unlock_extent_cached(tree, cur,
3208 cur + iosize - 1, &cached);
3209 break;
3210 }
3211 em = __get_extent_map(inode, page, pg_offset, cur,
3212 end - cur + 1, get_extent, em_cached);
3213 if (IS_ERR_OR_NULL(em)) {
3214 SetPageError(page);
3215 unlock_extent(tree, cur, end);
3216 break;
3217 }
3218 extent_offset = cur - em->start;
3219 BUG_ON(extent_map_end(em) <= cur);
3220 BUG_ON(end < cur);
3221
3222 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3223 this_bio_flag |= EXTENT_BIO_COMPRESSED;
3224 extent_set_compress_type(&this_bio_flag,
3225 em->compress_type);
3226 }
3227
3228 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3229 cur_end = min(extent_map_end(em) - 1, end);
3230 iosize = ALIGN(iosize, blocksize);
3231 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3232 disk_io_size = em->block_len;
3233 offset = em->block_start;
3234 } else {
3235 offset = em->block_start + extent_offset;
3236 disk_io_size = iosize;
3237 }
3238 block_start = em->block_start;
3239 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3240 block_start = EXTENT_MAP_HOLE;
3241
3242 /*
3243 * If we have a file range that points to a compressed extent
3244 * and it's followed by a consecutive file range that points to
3245 * to the same compressed extent (possibly with a different
3246 * offset and/or length, so it either points to the whole extent
3247 * or only part of it), we must make sure we do not submit a
3248 * single bio to populate the pages for the 2 ranges because
3249 * this makes the compressed extent read zero out the pages
3250 * belonging to the 2nd range. Imagine the following scenario:
3251 *
3252 * File layout
3253 * [0 - 8K] [8K - 24K]
3254 * | |
3255 * | |
3256 * points to extent X, points to extent X,
3257 * offset 4K, length of 8K offset 0, length 16K
3258 *
3259 * [extent X, compressed length = 4K uncompressed length = 16K]
3260 *
3261 * If the bio to read the compressed extent covers both ranges,
3262 * it will decompress extent X into the pages belonging to the
3263 * first range and then it will stop, zeroing out the remaining
3264 * pages that belong to the other range that points to extent X.
3265 * So here we make sure we submit 2 bios, one for the first
3266 * range and another one for the third range. Both will target
3267 * the same physical extent from disk, but we can't currently
3268 * make the compressed bio endio callback populate the pages
3269 * for both ranges because each compressed bio is tightly
3270 * coupled with a single extent map, and each range can have
3271 * an extent map with a different offset value relative to the
3272 * uncompressed data of our extent and different lengths. This
3273 * is a corner case so we prioritize correctness over
3274 * non-optimal behavior (submitting 2 bios for the same extent).
3275 */
3276 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3277 prev_em_start && *prev_em_start != (u64)-1 &&
3278 *prev_em_start != em->start)
3279 force_bio_submit = true;
3280
3281 if (prev_em_start)
3282 *prev_em_start = em->start;
3283
3284 free_extent_map(em);
3285 em = NULL;
3286
3287 /* we've found a hole, just zero and go on */
3288 if (block_start == EXTENT_MAP_HOLE) {
3289 char *userpage;
3290 struct extent_state *cached = NULL;
3291
3292 userpage = kmap_atomic(page);
3293 memset(userpage + pg_offset, 0, iosize);
3294 flush_dcache_page(page);
3295 kunmap_atomic(userpage);
3296
3297 set_extent_uptodate(tree, cur, cur + iosize - 1,
3298 &cached, GFP_NOFS);
3299 unlock_extent_cached(tree, cur,
3300 cur + iosize - 1, &cached);
3301 cur = cur + iosize;
3302 pg_offset += iosize;
3303 continue;
3304 }
3305 /* the get_extent function already copied into the page */
3306 if (test_range_bit(tree, cur, cur_end,
3307 EXTENT_UPTODATE, 1, NULL)) {
3308 check_page_uptodate(tree, page);
3309 unlock_extent(tree, cur, cur + iosize - 1);
3310 cur = cur + iosize;
3311 pg_offset += iosize;
3312 continue;
3313 }
3314 /* we have an inline extent but it didn't get marked up
3315 * to date. Error out
3316 */
3317 if (block_start == EXTENT_MAP_INLINE) {
3318 SetPageError(page);
3319 unlock_extent(tree, cur, cur + iosize - 1);
3320 cur = cur + iosize;
3321 pg_offset += iosize;
3322 continue;
3323 }
3324
3325 ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3326 page, offset, disk_io_size,
3327 pg_offset, bio,
3328 end_bio_extent_readpage, mirror_num,
3329 *bio_flags,
3330 this_bio_flag,
3331 force_bio_submit);
3332 if (!ret) {
3333 nr++;
3334 *bio_flags = this_bio_flag;
3335 } else {
3336 SetPageError(page);
3337 unlock_extent(tree, cur, cur + iosize - 1);
3338 goto out;
3339 }
3340 cur = cur + iosize;
3341 pg_offset += iosize;
3342 }
3343out:
3344 if (!nr) {
3345 if (!PageError(page))
3346 SetPageUptodate(page);
3347 unlock_page(page);
3348 }
3349 return ret;
3350}
3351
3352static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3353 u64 start, u64 end,
3354 struct extent_map **em_cached,
3355 struct bio **bio,
3356 unsigned long *bio_flags,
3357 u64 *prev_em_start)
3358{
3359 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3360 int index;
3361
3362 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3363
3364 for (index = 0; index < nr_pages; index++) {
3365 __do_readpage(pages[index], btrfs_get_extent, em_cached,
3366 bio, 0, bio_flags, REQ_RAHEAD, prev_em_start);
3367 put_page(pages[index]);
3368 }
3369}
3370
3371static int __extent_read_full_page(struct page *page,
3372 get_extent_t *get_extent,
3373 struct bio **bio, int mirror_num,
3374 unsigned long *bio_flags,
3375 unsigned int read_flags)
3376{
3377 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
3378 u64 start = page_offset(page);
3379 u64 end = start + PAGE_SIZE - 1;
3380 int ret;
3381
3382 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3383
3384 ret = __do_readpage(page, get_extent, NULL, bio, mirror_num,
3385 bio_flags, read_flags, NULL);
3386 return ret;
3387}
3388
3389int extent_read_full_page(struct page *page, get_extent_t *get_extent,
3390 int mirror_num)
3391{
3392 struct bio *bio = NULL;
3393 unsigned long bio_flags = 0;
3394 int ret;
3395
3396 ret = __extent_read_full_page(page, get_extent, &bio, mirror_num,
3397 &bio_flags, 0);
3398 if (bio)
3399 ret = submit_one_bio(bio, mirror_num, bio_flags);
3400 return ret;
3401}
3402
3403static void update_nr_written(struct writeback_control *wbc,
3404 unsigned long nr_written)
3405{
3406 wbc->nr_to_write -= nr_written;
3407}
3408
3409/*
3410 * helper for __extent_writepage, doing all of the delayed allocation setup.
3411 *
3412 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3413 * to write the page (copy into inline extent). In this case the IO has
3414 * been started and the page is already unlocked.
3415 *
3416 * This returns 0 if all went well (page still locked)
3417 * This returns < 0 if there were errors (page still locked)
3418 */
3419static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3420 struct page *page, struct writeback_control *wbc,
3421 u64 delalloc_start, unsigned long *nr_written)
3422{
3423 u64 page_end = delalloc_start + PAGE_SIZE - 1;
3424 bool found;
3425 u64 delalloc_to_write = 0;
3426 u64 delalloc_end = 0;
3427 int ret;
3428 int page_started = 0;
3429
3430
3431 while (delalloc_end < page_end) {
3432 found = find_lock_delalloc_range(&inode->vfs_inode, page,
3433 &delalloc_start,
3434 &delalloc_end);
3435 if (!found) {
3436 delalloc_start = delalloc_end + 1;
3437 continue;
3438 }
3439 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3440 delalloc_end, &page_started, nr_written, wbc);
3441 if (ret) {
3442 SetPageError(page);
3443 /*
3444 * btrfs_run_delalloc_range should return < 0 for error
3445 * but just in case, we use > 0 here meaning the IO is
3446 * started, so we don't want to return > 0 unless
3447 * things are going well.
3448 */
3449 return ret < 0 ? ret : -EIO;
3450 }
3451 /*
3452 * delalloc_end is already one less than the total length, so
3453 * we don't subtract one from PAGE_SIZE
3454 */
3455 delalloc_to_write += (delalloc_end - delalloc_start +
3456 PAGE_SIZE) >> PAGE_SHIFT;
3457 delalloc_start = delalloc_end + 1;
3458 }
3459 if (wbc->nr_to_write < delalloc_to_write) {
3460 int thresh = 8192;
3461
3462 if (delalloc_to_write < thresh * 2)
3463 thresh = delalloc_to_write;
3464 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3465 thresh);
3466 }
3467
3468 /* did the fill delalloc function already unlock and start
3469 * the IO?
3470 */
3471 if (page_started) {
3472 /*
3473 * we've unlocked the page, so we can't update
3474 * the mapping's writeback index, just update
3475 * nr_to_write.
3476 */
3477 wbc->nr_to_write -= *nr_written;
3478 return 1;
3479 }
3480
3481 return 0;
3482}
3483
3484/*
3485 * helper for __extent_writepage. This calls the writepage start hooks,
3486 * and does the loop to map the page into extents and bios.
3487 *
3488 * We return 1 if the IO is started and the page is unlocked,
3489 * 0 if all went well (page still locked)
3490 * < 0 if there were errors (page still locked)
3491 */
3492static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3493 struct page *page,
3494 struct writeback_control *wbc,
3495 struct extent_page_data *epd,
3496 loff_t i_size,
3497 unsigned long nr_written,
3498 int *nr_ret)
3499{
3500 struct extent_io_tree *tree = &inode->io_tree;
3501 u64 start = page_offset(page);
3502 u64 page_end = start + PAGE_SIZE - 1;
3503 u64 end;
3504 u64 cur = start;
3505 u64 extent_offset;
3506 u64 block_start;
3507 u64 iosize;
3508 struct extent_map *em;
3509 size_t pg_offset = 0;
3510 size_t blocksize;
3511 int ret = 0;
3512 int nr = 0;
3513 const unsigned int write_flags = wbc_to_write_flags(wbc);
3514 bool compressed;
3515
3516 ret = btrfs_writepage_cow_fixup(page, start, page_end);
3517 if (ret) {
3518 /* Fixup worker will requeue */
3519 redirty_page_for_writepage(wbc, page);
3520 update_nr_written(wbc, nr_written);
3521 unlock_page(page);
3522 return 1;
3523 }
3524
3525 /*
3526 * we don't want to touch the inode after unlocking the page,
3527 * so we update the mapping writeback index now
3528 */
3529 update_nr_written(wbc, nr_written + 1);
3530
3531 end = page_end;
3532 blocksize = inode->vfs_inode.i_sb->s_blocksize;
3533
3534 while (cur <= end) {
3535 u64 em_end;
3536 u64 offset;
3537
3538 if (cur >= i_size) {
3539 btrfs_writepage_endio_finish_ordered(page, cur,
3540 page_end, 1);
3541 break;
3542 }
3543 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3544 if (IS_ERR_OR_NULL(em)) {
3545 SetPageError(page);
3546 ret = PTR_ERR_OR_ZERO(em);
3547 break;
3548 }
3549
3550 extent_offset = cur - em->start;
3551 em_end = extent_map_end(em);
3552 BUG_ON(em_end <= cur);
3553 BUG_ON(end < cur);
3554 iosize = min(em_end - cur, end - cur + 1);
3555 iosize = ALIGN(iosize, blocksize);
3556 offset = em->block_start + extent_offset;
3557 block_start = em->block_start;
3558 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3559 free_extent_map(em);
3560 em = NULL;
3561
3562 /*
3563 * compressed and inline extents are written through other
3564 * paths in the FS
3565 */
3566 if (compressed || block_start == EXTENT_MAP_HOLE ||
3567 block_start == EXTENT_MAP_INLINE) {
3568 if (compressed)
3569 nr++;
3570 else
3571 btrfs_writepage_endio_finish_ordered(page, cur,
3572 cur + iosize - 1, 1);
3573 cur += iosize;
3574 pg_offset += iosize;
3575 continue;
3576 }
3577
3578 btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3579 if (!PageWriteback(page)) {
3580 btrfs_err(inode->root->fs_info,
3581 "page %lu not writeback, cur %llu end %llu",
3582 page->index, cur, end);
3583 }
3584
3585 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3586 page, offset, iosize, pg_offset,
3587 &epd->bio,
3588 end_bio_extent_writepage,
3589 0, 0, 0, false);
3590 if (ret) {
3591 SetPageError(page);
3592 if (PageWriteback(page))
3593 end_page_writeback(page);
3594 }
3595
3596 cur = cur + iosize;
3597 pg_offset += iosize;
3598 nr++;
3599 }
3600 *nr_ret = nr;
3601 return ret;
3602}
3603
3604/*
3605 * the writepage semantics are similar to regular writepage. extent
3606 * records are inserted to lock ranges in the tree, and as dirty areas
3607 * are found, they are marked writeback. Then the lock bits are removed
3608 * and the end_io handler clears the writeback ranges
3609 *
3610 * Return 0 if everything goes well.
3611 * Return <0 for error.
3612 */
3613static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3614 struct extent_page_data *epd)
3615{
3616 struct inode *inode = page->mapping->host;
3617 u64 start = page_offset(page);
3618 u64 page_end = start + PAGE_SIZE - 1;
3619 int ret;
3620 int nr = 0;
3621 size_t pg_offset;
3622 loff_t i_size = i_size_read(inode);
3623 unsigned long end_index = i_size >> PAGE_SHIFT;
3624 unsigned long nr_written = 0;
3625
3626 trace___extent_writepage(page, inode, wbc);
3627
3628 WARN_ON(!PageLocked(page));
3629
3630 ClearPageError(page);
3631
3632 pg_offset = offset_in_page(i_size);
3633 if (page->index > end_index ||
3634 (page->index == end_index && !pg_offset)) {
3635 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3636 unlock_page(page);
3637 return 0;
3638 }
3639
3640 if (page->index == end_index) {
3641 char *userpage;
3642
3643 userpage = kmap_atomic(page);
3644 memset(userpage + pg_offset, 0,
3645 PAGE_SIZE - pg_offset);
3646 kunmap_atomic(userpage);
3647 flush_dcache_page(page);
3648 }
3649
3650 set_page_extent_mapped(page);
3651
3652 if (!epd->extent_locked) {
3653 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3654 &nr_written);
3655 if (ret == 1)
3656 return 0;
3657 if (ret)
3658 goto done;
3659 }
3660
3661 ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
3662 nr_written, &nr);
3663 if (ret == 1)
3664 return 0;
3665
3666done:
3667 if (nr == 0) {
3668 /* make sure the mapping tag for page dirty gets cleared */
3669 set_page_writeback(page);
3670 end_page_writeback(page);
3671 }
3672 if (PageError(page)) {
3673 ret = ret < 0 ? ret : -EIO;
3674 end_extent_writepage(page, ret, start, page_end);
3675 }
3676 unlock_page(page);
3677 ASSERT(ret <= 0);
3678 return ret;
3679}
3680
3681void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3682{
3683 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3684 TASK_UNINTERRUPTIBLE);
3685}
3686
3687static void end_extent_buffer_writeback(struct extent_buffer *eb)
3688{
3689 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3690 smp_mb__after_atomic();
3691 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3692}
3693
3694/*
3695 * Lock eb pages and flush the bio if we can't the locks
3696 *
3697 * Return 0 if nothing went wrong
3698 * Return >0 is same as 0, except bio is not submitted
3699 * Return <0 if something went wrong, no page is locked
3700 */
3701static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3702 struct extent_page_data *epd)
3703{
3704 struct btrfs_fs_info *fs_info = eb->fs_info;
3705 int i, num_pages, failed_page_nr;
3706 int flush = 0;
3707 int ret = 0;
3708
3709 if (!btrfs_try_tree_write_lock(eb)) {
3710 ret = flush_write_bio(epd);
3711 if (ret < 0)
3712 return ret;
3713 flush = 1;
3714 btrfs_tree_lock(eb);
3715 }
3716
3717 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3718 btrfs_tree_unlock(eb);
3719 if (!epd->sync_io)
3720 return 0;
3721 if (!flush) {
3722 ret = flush_write_bio(epd);
3723 if (ret < 0)
3724 return ret;
3725 flush = 1;
3726 }
3727 while (1) {
3728 wait_on_extent_buffer_writeback(eb);
3729 btrfs_tree_lock(eb);
3730 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3731 break;
3732 btrfs_tree_unlock(eb);
3733 }
3734 }
3735
3736 /*
3737 * We need to do this to prevent races in people who check if the eb is
3738 * under IO since we can end up having no IO bits set for a short period
3739 * of time.
3740 */
3741 spin_lock(&eb->refs_lock);
3742 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3743 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3744 spin_unlock(&eb->refs_lock);
3745 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3746 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3747 -eb->len,
3748 fs_info->dirty_metadata_batch);
3749 ret = 1;
3750 } else {
3751 spin_unlock(&eb->refs_lock);
3752 }
3753
3754 btrfs_tree_unlock(eb);
3755
3756 if (!ret)
3757 return ret;
3758
3759 num_pages = num_extent_pages(eb);
3760 for (i = 0; i < num_pages; i++) {
3761 struct page *p = eb->pages[i];
3762
3763 if (!trylock_page(p)) {
3764 if (!flush) {
3765 int err;
3766
3767 err = flush_write_bio(epd);
3768 if (err < 0) {
3769 ret = err;
3770 failed_page_nr = i;
3771 goto err_unlock;
3772 }
3773 flush = 1;
3774 }
3775 lock_page(p);
3776 }
3777 }
3778
3779 return ret;
3780err_unlock:
3781 /* Unlock already locked pages */
3782 for (i = 0; i < failed_page_nr; i++)
3783 unlock_page(eb->pages[i]);
3784 /*
3785 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
3786 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
3787 * be made and undo everything done before.
3788 */
3789 btrfs_tree_lock(eb);
3790 spin_lock(&eb->refs_lock);
3791 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3792 end_extent_buffer_writeback(eb);
3793 spin_unlock(&eb->refs_lock);
3794 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
3795 fs_info->dirty_metadata_batch);
3796 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3797 btrfs_tree_unlock(eb);
3798 return ret;
3799}
3800
3801static void set_btree_ioerr(struct page *page)
3802{
3803 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3804 struct btrfs_fs_info *fs_info;
3805
3806 SetPageError(page);
3807 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3808 return;
3809
3810 /*
3811 * If we error out, we should add back the dirty_metadata_bytes
3812 * to make it consistent.
3813 */
3814 fs_info = eb->fs_info;
3815 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3816 eb->len, fs_info->dirty_metadata_batch);
3817
3818 /*
3819 * If writeback for a btree extent that doesn't belong to a log tree
3820 * failed, increment the counter transaction->eb_write_errors.
3821 * We do this because while the transaction is running and before it's
3822 * committing (when we call filemap_fdata[write|wait]_range against
3823 * the btree inode), we might have
3824 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3825 * returns an error or an error happens during writeback, when we're
3826 * committing the transaction we wouldn't know about it, since the pages
3827 * can be no longer dirty nor marked anymore for writeback (if a
3828 * subsequent modification to the extent buffer didn't happen before the
3829 * transaction commit), which makes filemap_fdata[write|wait]_range not
3830 * able to find the pages tagged with SetPageError at transaction
3831 * commit time. So if this happens we must abort the transaction,
3832 * otherwise we commit a super block with btree roots that point to
3833 * btree nodes/leafs whose content on disk is invalid - either garbage
3834 * or the content of some node/leaf from a past generation that got
3835 * cowed or deleted and is no longer valid.
3836 *
3837 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3838 * not be enough - we need to distinguish between log tree extents vs
3839 * non-log tree extents, and the next filemap_fdatawait_range() call
3840 * will catch and clear such errors in the mapping - and that call might
3841 * be from a log sync and not from a transaction commit. Also, checking
3842 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3843 * not done and would not be reliable - the eb might have been released
3844 * from memory and reading it back again means that flag would not be
3845 * set (since it's a runtime flag, not persisted on disk).
3846 *
3847 * Using the flags below in the btree inode also makes us achieve the
3848 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3849 * writeback for all dirty pages and before filemap_fdatawait_range()
3850 * is called, the writeback for all dirty pages had already finished
3851 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3852 * filemap_fdatawait_range() would return success, as it could not know
3853 * that writeback errors happened (the pages were no longer tagged for
3854 * writeback).
3855 */
3856 switch (eb->log_index) {
3857 case -1:
3858 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3859 break;
3860 case 0:
3861 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3862 break;
3863 case 1:
3864 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3865 break;
3866 default:
3867 BUG(); /* unexpected, logic error */
3868 }
3869}
3870
3871static void end_bio_extent_buffer_writepage(struct bio *bio)
3872{
3873 struct bio_vec *bvec;
3874 struct extent_buffer *eb;
3875 int done;
3876 struct bvec_iter_all iter_all;
3877
3878 ASSERT(!bio_flagged(bio, BIO_CLONED));
3879 bio_for_each_segment_all(bvec, bio, iter_all) {
3880 struct page *page = bvec->bv_page;
3881
3882 eb = (struct extent_buffer *)page->private;
3883 BUG_ON(!eb);
3884 done = atomic_dec_and_test(&eb->io_pages);
3885
3886 if (bio->bi_status ||
3887 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3888 ClearPageUptodate(page);
3889 set_btree_ioerr(page);
3890 }
3891
3892 end_page_writeback(page);
3893
3894 if (!done)
3895 continue;
3896
3897 end_extent_buffer_writeback(eb);
3898 }
3899
3900 bio_put(bio);
3901}
3902
3903static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3904 struct writeback_control *wbc,
3905 struct extent_page_data *epd)
3906{
3907 u64 offset = eb->start;
3908 u32 nritems;
3909 int i, num_pages;
3910 unsigned long start, end;
3911 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3912 int ret = 0;
3913
3914 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3915 num_pages = num_extent_pages(eb);
3916 atomic_set(&eb->io_pages, num_pages);
3917
3918 /* set btree blocks beyond nritems with 0 to avoid stale content. */
3919 nritems = btrfs_header_nritems(eb);
3920 if (btrfs_header_level(eb) > 0) {
3921 end = btrfs_node_key_ptr_offset(nritems);
3922
3923 memzero_extent_buffer(eb, end, eb->len - end);
3924 } else {
3925 /*
3926 * leaf:
3927 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3928 */
3929 start = btrfs_item_nr_offset(nritems);
3930 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3931 memzero_extent_buffer(eb, start, end - start);
3932 }
3933
3934 for (i = 0; i < num_pages; i++) {
3935 struct page *p = eb->pages[i];
3936
3937 clear_page_dirty_for_io(p);
3938 set_page_writeback(p);
3939 ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3940 p, offset, PAGE_SIZE, 0,
3941 &epd->bio,
3942 end_bio_extent_buffer_writepage,
3943 0, 0, 0, false);
3944 if (ret) {
3945 set_btree_ioerr(p);
3946 if (PageWriteback(p))
3947 end_page_writeback(p);
3948 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3949 end_extent_buffer_writeback(eb);
3950 ret = -EIO;
3951 break;
3952 }
3953 offset += PAGE_SIZE;
3954 update_nr_written(wbc, 1);
3955 unlock_page(p);
3956 }
3957
3958 if (unlikely(ret)) {
3959 for (; i < num_pages; i++) {
3960 struct page *p = eb->pages[i];
3961 clear_page_dirty_for_io(p);
3962 unlock_page(p);
3963 }
3964 }
3965
3966 return ret;
3967}
3968
3969int btree_write_cache_pages(struct address_space *mapping,
3970 struct writeback_control *wbc)
3971{
3972 struct extent_buffer *eb, *prev_eb = NULL;
3973 struct extent_page_data epd = {
3974 .bio = NULL,
3975 .extent_locked = 0,
3976 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3977 };
3978 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3979 int ret = 0;
3980 int done = 0;
3981 int nr_to_write_done = 0;
3982 struct pagevec pvec;
3983 int nr_pages;
3984 pgoff_t index;
3985 pgoff_t end; /* Inclusive */
3986 int scanned = 0;
3987 xa_mark_t tag;
3988
3989 pagevec_init(&pvec);
3990 if (wbc->range_cyclic) {
3991 index = mapping->writeback_index; /* Start from prev offset */
3992 end = -1;
3993 /*
3994 * Start from the beginning does not need to cycle over the
3995 * range, mark it as scanned.
3996 */
3997 scanned = (index == 0);
3998 } else {
3999 index = wbc->range_start >> PAGE_SHIFT;
4000 end = wbc->range_end >> PAGE_SHIFT;
4001 scanned = 1;
4002 }
4003 if (wbc->sync_mode == WB_SYNC_ALL)
4004 tag = PAGECACHE_TAG_TOWRITE;
4005 else
4006 tag = PAGECACHE_TAG_DIRTY;
4007retry:
4008 if (wbc->sync_mode == WB_SYNC_ALL)
4009 tag_pages_for_writeback(mapping, index, end);
4010 while (!done && !nr_to_write_done && (index <= end) &&
4011 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4012 tag))) {
4013 unsigned i;
4014
4015 for (i = 0; i < nr_pages; i++) {
4016 struct page *page = pvec.pages[i];
4017
4018 if (!PagePrivate(page))
4019 continue;
4020
4021 spin_lock(&mapping->private_lock);
4022 if (!PagePrivate(page)) {
4023 spin_unlock(&mapping->private_lock);
4024 continue;
4025 }
4026
4027 eb = (struct extent_buffer *)page->private;
4028
4029 /*
4030 * Shouldn't happen and normally this would be a BUG_ON
4031 * but no sense in crashing the users box for something
4032 * we can survive anyway.
4033 */
4034 if (WARN_ON(!eb)) {
4035 spin_unlock(&mapping->private_lock);
4036 continue;
4037 }
4038
4039 if (eb == prev_eb) {
4040 spin_unlock(&mapping->private_lock);
4041 continue;
4042 }
4043
4044 ret = atomic_inc_not_zero(&eb->refs);
4045 spin_unlock(&mapping->private_lock);
4046 if (!ret)
4047 continue;
4048
4049 prev_eb = eb;
4050 ret = lock_extent_buffer_for_io(eb, &epd);
4051 if (!ret) {
4052 free_extent_buffer(eb);
4053 continue;
4054 } else if (ret < 0) {
4055 done = 1;
4056 free_extent_buffer(eb);
4057 break;
4058 }
4059
4060 ret = write_one_eb(eb, wbc, &epd);
4061 if (ret) {
4062 done = 1;
4063 free_extent_buffer(eb);
4064 break;
4065 }
4066 free_extent_buffer(eb);
4067
4068 /*
4069 * the filesystem may choose to bump up nr_to_write.
4070 * We have to make sure to honor the new nr_to_write
4071 * at any time
4072 */
4073 nr_to_write_done = wbc->nr_to_write <= 0;
4074 }
4075 pagevec_release(&pvec);
4076 cond_resched();
4077 }
4078 if (!scanned && !done) {
4079 /*
4080 * We hit the last page and there is more work to be done: wrap
4081 * back to the start of the file
4082 */
4083 scanned = 1;
4084 index = 0;
4085 goto retry;
4086 }
4087 ASSERT(ret <= 0);
4088 if (ret < 0) {
4089 end_write_bio(&epd, ret);
4090 return ret;
4091 }
4092 /*
4093 * If something went wrong, don't allow any metadata write bio to be
4094 * submitted.
4095 *
4096 * This would prevent use-after-free if we had dirty pages not
4097 * cleaned up, which can still happen by fuzzed images.
4098 *
4099 * - Bad extent tree
4100 * Allowing existing tree block to be allocated for other trees.
4101 *
4102 * - Log tree operations
4103 * Exiting tree blocks get allocated to log tree, bumps its
4104 * generation, then get cleaned in tree re-balance.
4105 * Such tree block will not be written back, since it's clean,
4106 * thus no WRITTEN flag set.
4107 * And after log writes back, this tree block is not traced by
4108 * any dirty extent_io_tree.
4109 *
4110 * - Offending tree block gets re-dirtied from its original owner
4111 * Since it has bumped generation, no WRITTEN flag, it can be
4112 * reused without COWing. This tree block will not be traced
4113 * by btrfs_transaction::dirty_pages.
4114 *
4115 * Now such dirty tree block will not be cleaned by any dirty
4116 * extent io tree. Thus we don't want to submit such wild eb
4117 * if the fs already has error.
4118 */
4119 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4120 ret = flush_write_bio(&epd);
4121 } else {
4122 ret = -EROFS;
4123 end_write_bio(&epd, ret);
4124 }
4125 return ret;
4126}
4127
4128/**
4129 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
4130 * @mapping: address space structure to write
4131 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4132 * @data: data passed to __extent_writepage function
4133 *
4134 * If a page is already under I/O, write_cache_pages() skips it, even
4135 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4136 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4137 * and msync() need to guarantee that all the data which was dirty at the time
4138 * the call was made get new I/O started against them. If wbc->sync_mode is
4139 * WB_SYNC_ALL then we were called for data integrity and we must wait for
4140 * existing IO to complete.
4141 */
4142static int extent_write_cache_pages(struct address_space *mapping,
4143 struct writeback_control *wbc,
4144 struct extent_page_data *epd)
4145{
4146 struct inode *inode = mapping->host;
4147 int ret = 0;
4148 int done = 0;
4149 int nr_to_write_done = 0;
4150 struct pagevec pvec;
4151 int nr_pages;
4152 pgoff_t index;
4153 pgoff_t end; /* Inclusive */
4154 pgoff_t done_index;
4155 int range_whole = 0;
4156 int scanned = 0;
4157 xa_mark_t tag;
4158
4159 /*
4160 * We have to hold onto the inode so that ordered extents can do their
4161 * work when the IO finishes. The alternative to this is failing to add
4162 * an ordered extent if the igrab() fails there and that is a huge pain
4163 * to deal with, so instead just hold onto the inode throughout the
4164 * writepages operation. If it fails here we are freeing up the inode
4165 * anyway and we'd rather not waste our time writing out stuff that is
4166 * going to be truncated anyway.
4167 */
4168 if (!igrab(inode))
4169 return 0;
4170
4171 pagevec_init(&pvec);
4172 if (wbc->range_cyclic) {
4173 index = mapping->writeback_index; /* Start from prev offset */
4174 end = -1;
4175 /*
4176 * Start from the beginning does not need to cycle over the
4177 * range, mark it as scanned.
4178 */
4179 scanned = (index == 0);
4180 } else {
4181 index = wbc->range_start >> PAGE_SHIFT;
4182 end = wbc->range_end >> PAGE_SHIFT;
4183 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4184 range_whole = 1;
4185 scanned = 1;
4186 }
4187
4188 /*
4189 * We do the tagged writepage as long as the snapshot flush bit is set
4190 * and we are the first one who do the filemap_flush() on this inode.
4191 *
4192 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4193 * not race in and drop the bit.
4194 */
4195 if (range_whole && wbc->nr_to_write == LONG_MAX &&
4196 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4197 &BTRFS_I(inode)->runtime_flags))
4198 wbc->tagged_writepages = 1;
4199
4200 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4201 tag = PAGECACHE_TAG_TOWRITE;
4202 else
4203 tag = PAGECACHE_TAG_DIRTY;
4204retry:
4205 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4206 tag_pages_for_writeback(mapping, index, end);
4207 done_index = index;
4208 while (!done && !nr_to_write_done && (index <= end) &&
4209 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4210 &index, end, tag))) {
4211 unsigned i;
4212
4213 for (i = 0; i < nr_pages; i++) {
4214 struct page *page = pvec.pages[i];
4215
4216 done_index = page->index + 1;
4217 /*
4218 * At this point we hold neither the i_pages lock nor
4219 * the page lock: the page may be truncated or
4220 * invalidated (changing page->mapping to NULL),
4221 * or even swizzled back from swapper_space to
4222 * tmpfs file mapping
4223 */
4224 if (!trylock_page(page)) {
4225 ret = flush_write_bio(epd);
4226 BUG_ON(ret < 0);
4227 lock_page(page);
4228 }
4229
4230 if (unlikely(page->mapping != mapping)) {
4231 unlock_page(page);
4232 continue;
4233 }
4234
4235 if (wbc->sync_mode != WB_SYNC_NONE) {
4236 if (PageWriteback(page)) {
4237 ret = flush_write_bio(epd);
4238 BUG_ON(ret < 0);
4239 }
4240 wait_on_page_writeback(page);
4241 }
4242
4243 if (PageWriteback(page) ||
4244 !clear_page_dirty_for_io(page)) {
4245 unlock_page(page);
4246 continue;
4247 }
4248
4249 ret = __extent_writepage(page, wbc, epd);
4250 if (ret < 0) {
4251 done = 1;
4252 break;
4253 }
4254
4255 /*
4256 * the filesystem may choose to bump up nr_to_write.
4257 * We have to make sure to honor the new nr_to_write
4258 * at any time
4259 */
4260 nr_to_write_done = wbc->nr_to_write <= 0;
4261 }
4262 pagevec_release(&pvec);
4263 cond_resched();
4264 }
4265 if (!scanned && !done) {
4266 /*
4267 * We hit the last page and there is more work to be done: wrap
4268 * back to the start of the file
4269 */
4270 scanned = 1;
4271 index = 0;
4272
4273 /*
4274 * If we're looping we could run into a page that is locked by a
4275 * writer and that writer could be waiting on writeback for a
4276 * page in our current bio, and thus deadlock, so flush the
4277 * write bio here.
4278 */
4279 ret = flush_write_bio(epd);
4280 if (!ret)
4281 goto retry;
4282 }
4283
4284 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4285 mapping->writeback_index = done_index;
4286
4287 btrfs_add_delayed_iput(inode);
4288 return ret;
4289}
4290
4291int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4292{
4293 int ret;
4294 struct extent_page_data epd = {
4295 .bio = NULL,
4296 .extent_locked = 0,
4297 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4298 };
4299
4300 ret = __extent_writepage(page, wbc, &epd);
4301 ASSERT(ret <= 0);
4302 if (ret < 0) {
4303 end_write_bio(&epd, ret);
4304 return ret;
4305 }
4306
4307 ret = flush_write_bio(&epd);
4308 ASSERT(ret <= 0);
4309 return ret;
4310}
4311
4312int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4313 int mode)
4314{
4315 int ret = 0;
4316 struct address_space *mapping = inode->i_mapping;
4317 struct page *page;
4318 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4319 PAGE_SHIFT;
4320
4321 struct extent_page_data epd = {
4322 .bio = NULL,
4323 .extent_locked = 1,
4324 .sync_io = mode == WB_SYNC_ALL,
4325 };
4326 struct writeback_control wbc_writepages = {
4327 .sync_mode = mode,
4328 .nr_to_write = nr_pages * 2,
4329 .range_start = start,
4330 .range_end = end + 1,
4331 /* We're called from an async helper function */
4332 .punt_to_cgroup = 1,
4333 .no_cgroup_owner = 1,
4334 };
4335
4336 wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
4337 while (start <= end) {
4338 page = find_get_page(mapping, start >> PAGE_SHIFT);
4339 if (clear_page_dirty_for_io(page))
4340 ret = __extent_writepage(page, &wbc_writepages, &epd);
4341 else {
4342 btrfs_writepage_endio_finish_ordered(page, start,
4343 start + PAGE_SIZE - 1, 1);
4344 unlock_page(page);
4345 }
4346 put_page(page);
4347 start += PAGE_SIZE;
4348 }
4349
4350 ASSERT(ret <= 0);
4351 if (ret == 0)
4352 ret = flush_write_bio(&epd);
4353 else
4354 end_write_bio(&epd, ret);
4355
4356 wbc_detach_inode(&wbc_writepages);
4357 return ret;
4358}
4359
4360int extent_writepages(struct address_space *mapping,
4361 struct writeback_control *wbc)
4362{
4363 int ret = 0;
4364 struct extent_page_data epd = {
4365 .bio = NULL,
4366 .extent_locked = 0,
4367 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4368 };
4369
4370 ret = extent_write_cache_pages(mapping, wbc, &epd);
4371 ASSERT(ret <= 0);
4372 if (ret < 0) {
4373 end_write_bio(&epd, ret);
4374 return ret;
4375 }
4376 ret = flush_write_bio(&epd);
4377 return ret;
4378}
4379
4380void extent_readahead(struct readahead_control *rac)
4381{
4382 struct bio *bio = NULL;
4383 unsigned long bio_flags = 0;
4384 struct page *pagepool[16];
4385 struct extent_map *em_cached = NULL;
4386 u64 prev_em_start = (u64)-1;
4387 int nr;
4388
4389 while ((nr = readahead_page_batch(rac, pagepool))) {
4390 u64 contig_start = page_offset(pagepool[0]);
4391 u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
4392
4393 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4394
4395 contiguous_readpages(pagepool, nr, contig_start, contig_end,
4396 &em_cached, &bio, &bio_flags, &prev_em_start);
4397 }
4398
4399 if (em_cached)
4400 free_extent_map(em_cached);
4401
4402 if (bio) {
4403 if (submit_one_bio(bio, 0, bio_flags))
4404 return;
4405 }
4406}
4407
4408/*
4409 * basic invalidatepage code, this waits on any locked or writeback
4410 * ranges corresponding to the page, and then deletes any extent state
4411 * records from the tree
4412 */
4413int extent_invalidatepage(struct extent_io_tree *tree,
4414 struct page *page, unsigned long offset)
4415{
4416 struct extent_state *cached_state = NULL;
4417 u64 start = page_offset(page);
4418 u64 end = start + PAGE_SIZE - 1;
4419 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4420
4421 start += ALIGN(offset, blocksize);
4422 if (start > end)
4423 return 0;
4424
4425 lock_extent_bits(tree, start, end, &cached_state);
4426 wait_on_page_writeback(page);
4427 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4428 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
4429 return 0;
4430}
4431
4432/*
4433 * a helper for releasepage, this tests for areas of the page that
4434 * are locked or under IO and drops the related state bits if it is safe
4435 * to drop the page.
4436 */
4437static int try_release_extent_state(struct extent_io_tree *tree,
4438 struct page *page, gfp_t mask)
4439{
4440 u64 start = page_offset(page);
4441 u64 end = start + PAGE_SIZE - 1;
4442 int ret = 1;
4443
4444 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4445 ret = 0;
4446 } else {
4447 /*
4448 * at this point we can safely clear everything except the
4449 * locked bit and the nodatasum bit
4450 */
4451 ret = __clear_extent_bit(tree, start, end,
4452 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4453 0, 0, NULL, mask, NULL);
4454
4455 /* if clear_extent_bit failed for enomem reasons,
4456 * we can't allow the release to continue.
4457 */
4458 if (ret < 0)
4459 ret = 0;
4460 else
4461 ret = 1;
4462 }
4463 return ret;
4464}
4465
4466/*
4467 * a helper for releasepage. As long as there are no locked extents
4468 * in the range corresponding to the page, both state records and extent
4469 * map records are removed
4470 */
4471int try_release_extent_mapping(struct page *page, gfp_t mask)
4472{
4473 struct extent_map *em;
4474 u64 start = page_offset(page);
4475 u64 end = start + PAGE_SIZE - 1;
4476 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4477 struct extent_io_tree *tree = &btrfs_inode->io_tree;
4478 struct extent_map_tree *map = &btrfs_inode->extent_tree;
4479
4480 if (gfpflags_allow_blocking(mask) &&
4481 page->mapping->host->i_size > SZ_16M) {
4482 u64 len;
4483 while (start <= end) {
4484 struct btrfs_fs_info *fs_info;
4485 u64 cur_gen;
4486
4487 len = end - start + 1;
4488 write_lock(&map->lock);
4489 em = lookup_extent_mapping(map, start, len);
4490 if (!em) {
4491 write_unlock(&map->lock);
4492 break;
4493 }
4494 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4495 em->start != start) {
4496 write_unlock(&map->lock);
4497 free_extent_map(em);
4498 break;
4499 }
4500 if (test_range_bit(tree, em->start,
4501 extent_map_end(em) - 1,
4502 EXTENT_LOCKED, 0, NULL))
4503 goto next;
4504 /*
4505 * If it's not in the list of modified extents, used
4506 * by a fast fsync, we can remove it. If it's being
4507 * logged we can safely remove it since fsync took an
4508 * extra reference on the em.
4509 */
4510 if (list_empty(&em->list) ||
4511 test_bit(EXTENT_FLAG_LOGGING, &em->flags))
4512 goto remove_em;
4513 /*
4514 * If it's in the list of modified extents, remove it
4515 * only if its generation is older then the current one,
4516 * in which case we don't need it for a fast fsync.
4517 * Otherwise don't remove it, we could be racing with an
4518 * ongoing fast fsync that could miss the new extent.
4519 */
4520 fs_info = btrfs_inode->root->fs_info;
4521 spin_lock(&fs_info->trans_lock);
4522 cur_gen = fs_info->generation;
4523 spin_unlock(&fs_info->trans_lock);
4524 if (em->generation >= cur_gen)
4525 goto next;
4526remove_em:
4527 /*
4528 * We only remove extent maps that are not in the list of
4529 * modified extents or that are in the list but with a
4530 * generation lower then the current generation, so there
4531 * is no need to set the full fsync flag on the inode (it
4532 * hurts the fsync performance for workloads with a data
4533 * size that exceeds or is close to the system's memory).
4534 */
4535 remove_extent_mapping(map, em);
4536 /* once for the rb tree */
4537 free_extent_map(em);
4538next:
4539 start = extent_map_end(em);
4540 write_unlock(&map->lock);
4541
4542 /* once for us */
4543 free_extent_map(em);
4544
4545 cond_resched(); /* Allow large-extent preemption. */
4546 }
4547 }
4548 return try_release_extent_state(tree, page, mask);
4549}
4550
4551/*
4552 * helper function for fiemap, which doesn't want to see any holes.
4553 * This maps until we find something past 'last'
4554 */
4555static struct extent_map *get_extent_skip_holes(struct inode *inode,
4556 u64 offset, u64 last)
4557{
4558 u64 sectorsize = btrfs_inode_sectorsize(inode);
4559 struct extent_map *em;
4560 u64 len;
4561
4562 if (offset >= last)
4563 return NULL;
4564
4565 while (1) {
4566 len = last - offset;
4567 if (len == 0)
4568 break;
4569 len = ALIGN(len, sectorsize);
4570 em = btrfs_get_extent_fiemap(BTRFS_I(inode), offset, len);
4571 if (IS_ERR_OR_NULL(em))
4572 return em;
4573
4574 /* if this isn't a hole return it */
4575 if (em->block_start != EXTENT_MAP_HOLE)
4576 return em;
4577
4578 /* this is a hole, advance to the next extent */
4579 offset = extent_map_end(em);
4580 free_extent_map(em);
4581 if (offset >= last)
4582 break;
4583 }
4584 return NULL;
4585}
4586
4587/*
4588 * To cache previous fiemap extent
4589 *
4590 * Will be used for merging fiemap extent
4591 */
4592struct fiemap_cache {
4593 u64 offset;
4594 u64 phys;
4595 u64 len;
4596 u32 flags;
4597 bool cached;
4598};
4599
4600/*
4601 * Helper to submit fiemap extent.
4602 *
4603 * Will try to merge current fiemap extent specified by @offset, @phys,
4604 * @len and @flags with cached one.
4605 * And only when we fails to merge, cached one will be submitted as
4606 * fiemap extent.
4607 *
4608 * Return value is the same as fiemap_fill_next_extent().
4609 */
4610static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4611 struct fiemap_cache *cache,
4612 u64 offset, u64 phys, u64 len, u32 flags)
4613{
4614 int ret = 0;
4615
4616 if (!cache->cached)
4617 goto assign;
4618
4619 /*
4620 * Sanity check, extent_fiemap() should have ensured that new
4621 * fiemap extent won't overlap with cached one.
4622 * Not recoverable.
4623 *
4624 * NOTE: Physical address can overlap, due to compression
4625 */
4626 if (cache->offset + cache->len > offset) {
4627 WARN_ON(1);
4628 return -EINVAL;
4629 }
4630
4631 /*
4632 * Only merges fiemap extents if
4633 * 1) Their logical addresses are continuous
4634 *
4635 * 2) Their physical addresses are continuous
4636 * So truly compressed (physical size smaller than logical size)
4637 * extents won't get merged with each other
4638 *
4639 * 3) Share same flags except FIEMAP_EXTENT_LAST
4640 * So regular extent won't get merged with prealloc extent
4641 */
4642 if (cache->offset + cache->len == offset &&
4643 cache->phys + cache->len == phys &&
4644 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4645 (flags & ~FIEMAP_EXTENT_LAST)) {
4646 cache->len += len;
4647 cache->flags |= flags;
4648 goto try_submit_last;
4649 }
4650
4651 /* Not mergeable, need to submit cached one */
4652 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4653 cache->len, cache->flags);
4654 cache->cached = false;
4655 if (ret)
4656 return ret;
4657assign:
4658 cache->cached = true;
4659 cache->offset = offset;
4660 cache->phys = phys;
4661 cache->len = len;
4662 cache->flags = flags;
4663try_submit_last:
4664 if (cache->flags & FIEMAP_EXTENT_LAST) {
4665 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4666 cache->phys, cache->len, cache->flags);
4667 cache->cached = false;
4668 }
4669 return ret;
4670}
4671
4672/*
4673 * Emit last fiemap cache
4674 *
4675 * The last fiemap cache may still be cached in the following case:
4676 * 0 4k 8k
4677 * |<- Fiemap range ->|
4678 * |<------------ First extent ----------->|
4679 *
4680 * In this case, the first extent range will be cached but not emitted.
4681 * So we must emit it before ending extent_fiemap().
4682 */
4683static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4684 struct fiemap_cache *cache)
4685{
4686 int ret;
4687
4688 if (!cache->cached)
4689 return 0;
4690
4691 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4692 cache->len, cache->flags);
4693 cache->cached = false;
4694 if (ret > 0)
4695 ret = 0;
4696 return ret;
4697}
4698
4699int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4700 u64 start, u64 len)
4701{
4702 int ret = 0;
4703 u64 off = start;
4704 u64 max = start + len;
4705 u32 flags = 0;
4706 u32 found_type;
4707 u64 last;
4708 u64 last_for_get_extent = 0;
4709 u64 disko = 0;
4710 u64 isize = i_size_read(inode);
4711 struct btrfs_key found_key;
4712 struct extent_map *em = NULL;
4713 struct extent_state *cached_state = NULL;
4714 struct btrfs_path *path;
4715 struct btrfs_root *root = BTRFS_I(inode)->root;
4716 struct fiemap_cache cache = { 0 };
4717 struct ulist *roots;
4718 struct ulist *tmp_ulist;
4719 int end = 0;
4720 u64 em_start = 0;
4721 u64 em_len = 0;
4722 u64 em_end = 0;
4723
4724 if (len == 0)
4725 return -EINVAL;
4726
4727 path = btrfs_alloc_path();
4728 if (!path)
4729 return -ENOMEM;
4730 path->leave_spinning = 1;
4731
4732 roots = ulist_alloc(GFP_KERNEL);
4733 tmp_ulist = ulist_alloc(GFP_KERNEL);
4734 if (!roots || !tmp_ulist) {
4735 ret = -ENOMEM;
4736 goto out_free_ulist;
4737 }
4738
4739 start = round_down(start, btrfs_inode_sectorsize(inode));
4740 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4741
4742 /*
4743 * lookup the last file extent. We're not using i_size here
4744 * because there might be preallocation past i_size
4745 */
4746 ret = btrfs_lookup_file_extent(NULL, root, path,
4747 btrfs_ino(BTRFS_I(inode)), -1, 0);
4748 if (ret < 0) {
4749 goto out_free_ulist;
4750 } else {
4751 WARN_ON(!ret);
4752 if (ret == 1)
4753 ret = 0;
4754 }
4755
4756 path->slots[0]--;
4757 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4758 found_type = found_key.type;
4759
4760 /* No extents, but there might be delalloc bits */
4761 if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) ||
4762 found_type != BTRFS_EXTENT_DATA_KEY) {
4763 /* have to trust i_size as the end */
4764 last = (u64)-1;
4765 last_for_get_extent = isize;
4766 } else {
4767 /*
4768 * remember the start of the last extent. There are a
4769 * bunch of different factors that go into the length of the
4770 * extent, so its much less complex to remember where it started
4771 */
4772 last = found_key.offset;
4773 last_for_get_extent = last + 1;
4774 }
4775 btrfs_release_path(path);
4776
4777 /*
4778 * we might have some extents allocated but more delalloc past those
4779 * extents. so, we trust isize unless the start of the last extent is
4780 * beyond isize
4781 */
4782 if (last < isize) {
4783 last = (u64)-1;
4784 last_for_get_extent = isize;
4785 }
4786
4787 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4788 &cached_state);
4789
4790 em = get_extent_skip_holes(inode, start, last_for_get_extent);
4791 if (!em)
4792 goto out;
4793 if (IS_ERR(em)) {
4794 ret = PTR_ERR(em);
4795 goto out;
4796 }
4797
4798 while (!end) {
4799 u64 offset_in_extent = 0;
4800
4801 /* break if the extent we found is outside the range */
4802 if (em->start >= max || extent_map_end(em) < off)
4803 break;
4804
4805 /*
4806 * get_extent may return an extent that starts before our
4807 * requested range. We have to make sure the ranges
4808 * we return to fiemap always move forward and don't
4809 * overlap, so adjust the offsets here
4810 */
4811 em_start = max(em->start, off);
4812
4813 /*
4814 * record the offset from the start of the extent
4815 * for adjusting the disk offset below. Only do this if the
4816 * extent isn't compressed since our in ram offset may be past
4817 * what we have actually allocated on disk.
4818 */
4819 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4820 offset_in_extent = em_start - em->start;
4821 em_end = extent_map_end(em);
4822 em_len = em_end - em_start;
4823 flags = 0;
4824 if (em->block_start < EXTENT_MAP_LAST_BYTE)
4825 disko = em->block_start + offset_in_extent;
4826 else
4827 disko = 0;
4828
4829 /*
4830 * bump off for our next call to get_extent
4831 */
4832 off = extent_map_end(em);
4833 if (off >= max)
4834 end = 1;
4835
4836 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4837 end = 1;
4838 flags |= FIEMAP_EXTENT_LAST;
4839 } else if (em->block_start == EXTENT_MAP_INLINE) {
4840 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4841 FIEMAP_EXTENT_NOT_ALIGNED);
4842 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4843 flags |= (FIEMAP_EXTENT_DELALLOC |
4844 FIEMAP_EXTENT_UNKNOWN);
4845 } else if (fieinfo->fi_extents_max) {
4846 u64 bytenr = em->block_start -
4847 (em->start - em->orig_start);
4848
4849 /*
4850 * As btrfs supports shared space, this information
4851 * can be exported to userspace tools via
4852 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4853 * then we're just getting a count and we can skip the
4854 * lookup stuff.
4855 */
4856 ret = btrfs_check_shared(root,
4857 btrfs_ino(BTRFS_I(inode)),
4858 bytenr, roots, tmp_ulist);
4859 if (ret < 0)
4860 goto out_free;
4861 if (ret)
4862 flags |= FIEMAP_EXTENT_SHARED;
4863 ret = 0;
4864 }
4865 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4866 flags |= FIEMAP_EXTENT_ENCODED;
4867 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4868 flags |= FIEMAP_EXTENT_UNWRITTEN;
4869
4870 free_extent_map(em);
4871 em = NULL;
4872 if ((em_start >= last) || em_len == (u64)-1 ||
4873 (last == (u64)-1 && isize <= em_end)) {
4874 flags |= FIEMAP_EXTENT_LAST;
4875 end = 1;
4876 }
4877
4878 /* now scan forward to see if this is really the last extent. */
4879 em = get_extent_skip_holes(inode, off, last_for_get_extent);
4880 if (IS_ERR(em)) {
4881 ret = PTR_ERR(em);
4882 goto out;
4883 }
4884 if (!em) {
4885 flags |= FIEMAP_EXTENT_LAST;
4886 end = 1;
4887 }
4888 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4889 em_len, flags);
4890 if (ret) {
4891 if (ret == 1)
4892 ret = 0;
4893 goto out_free;
4894 }
4895 }
4896out_free:
4897 if (!ret)
4898 ret = emit_last_fiemap_cache(fieinfo, &cache);
4899 free_extent_map(em);
4900out:
4901 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4902 &cached_state);
4903
4904out_free_ulist:
4905 btrfs_free_path(path);
4906 ulist_free(roots);
4907 ulist_free(tmp_ulist);
4908 return ret;
4909}
4910
4911static void __free_extent_buffer(struct extent_buffer *eb)
4912{
4913 kmem_cache_free(extent_buffer_cache, eb);
4914}
4915
4916int extent_buffer_under_io(const struct extent_buffer *eb)
4917{
4918 return (atomic_read(&eb->io_pages) ||
4919 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4920 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4921}
4922
4923/*
4924 * Release all pages attached to the extent buffer.
4925 */
4926static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4927{
4928 int i;
4929 int num_pages;
4930 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4931
4932 BUG_ON(extent_buffer_under_io(eb));
4933
4934 num_pages = num_extent_pages(eb);
4935 for (i = 0; i < num_pages; i++) {
4936 struct page *page = eb->pages[i];
4937
4938 if (!page)
4939 continue;
4940 if (mapped)
4941 spin_lock(&page->mapping->private_lock);
4942 /*
4943 * We do this since we'll remove the pages after we've
4944 * removed the eb from the radix tree, so we could race
4945 * and have this page now attached to the new eb. So
4946 * only clear page_private if it's still connected to
4947 * this eb.
4948 */
4949 if (PagePrivate(page) &&
4950 page->private == (unsigned long)eb) {
4951 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4952 BUG_ON(PageDirty(page));
4953 BUG_ON(PageWriteback(page));
4954 /*
4955 * We need to make sure we haven't be attached
4956 * to a new eb.
4957 */
4958 detach_page_private(page);
4959 }
4960
4961 if (mapped)
4962 spin_unlock(&page->mapping->private_lock);
4963
4964 /* One for when we allocated the page */
4965 put_page(page);
4966 }
4967}
4968
4969/*
4970 * Helper for releasing the extent buffer.
4971 */
4972static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4973{
4974 btrfs_release_extent_buffer_pages(eb);
4975 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
4976 __free_extent_buffer(eb);
4977}
4978
4979static struct extent_buffer *
4980__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4981 unsigned long len)
4982{
4983 struct extent_buffer *eb = NULL;
4984
4985 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4986 eb->start = start;
4987 eb->len = len;
4988 eb->fs_info = fs_info;
4989 eb->bflags = 0;
4990 rwlock_init(&eb->lock);
4991 atomic_set(&eb->blocking_readers, 0);
4992 eb->blocking_writers = 0;
4993 eb->lock_nested = false;
4994 init_waitqueue_head(&eb->write_lock_wq);
4995 init_waitqueue_head(&eb->read_lock_wq);
4996
4997 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
4998 &fs_info->allocated_ebs);
4999
5000 spin_lock_init(&eb->refs_lock);
5001 atomic_set(&eb->refs, 1);
5002 atomic_set(&eb->io_pages, 0);
5003
5004 /*
5005 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
5006 */
5007 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
5008 > MAX_INLINE_EXTENT_BUFFER_SIZE);
5009 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
5010
5011#ifdef CONFIG_BTRFS_DEBUG
5012 eb->spinning_writers = 0;
5013 atomic_set(&eb->spinning_readers, 0);
5014 atomic_set(&eb->read_locks, 0);
5015 eb->write_locks = 0;
5016#endif
5017
5018 return eb;
5019}
5020
5021struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
5022{
5023 int i;
5024 struct page *p;
5025 struct extent_buffer *new;
5026 int num_pages = num_extent_pages(src);
5027
5028 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5029 if (new == NULL)
5030 return NULL;
5031
5032 for (i = 0; i < num_pages; i++) {
5033 p = alloc_page(GFP_NOFS);
5034 if (!p) {
5035 btrfs_release_extent_buffer(new);
5036 return NULL;
5037 }
5038 attach_extent_buffer_page(new, p);
5039 WARN_ON(PageDirty(p));
5040 SetPageUptodate(p);
5041 new->pages[i] = p;
5042 copy_page(page_address(p), page_address(src->pages[i]));
5043 }
5044
5045 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
5046 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5047
5048 return new;
5049}
5050
5051struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5052 u64 start, unsigned long len)
5053{
5054 struct extent_buffer *eb;
5055 int num_pages;
5056 int i;
5057
5058 eb = __alloc_extent_buffer(fs_info, start, len);
5059 if (!eb)
5060 return NULL;
5061
5062 num_pages = num_extent_pages(eb);
5063 for (i = 0; i < num_pages; i++) {
5064 eb->pages[i] = alloc_page(GFP_NOFS);
5065 if (!eb->pages[i])
5066 goto err;
5067 }
5068 set_extent_buffer_uptodate(eb);
5069 btrfs_set_header_nritems(eb, 0);
5070 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5071
5072 return eb;
5073err:
5074 for (; i > 0; i--)
5075 __free_page(eb->pages[i - 1]);
5076 __free_extent_buffer(eb);
5077 return NULL;
5078}
5079
5080struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5081 u64 start)
5082{
5083 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5084}
5085
5086static void check_buffer_tree_ref(struct extent_buffer *eb)
5087{
5088 int refs;
5089 /*
5090 * The TREE_REF bit is first set when the extent_buffer is added
5091 * to the radix tree. It is also reset, if unset, when a new reference
5092 * is created by find_extent_buffer.
5093 *
5094 * It is only cleared in two cases: freeing the last non-tree
5095 * reference to the extent_buffer when its STALE bit is set or
5096 * calling releasepage when the tree reference is the only reference.
5097 *
5098 * In both cases, care is taken to ensure that the extent_buffer's
5099 * pages are not under io. However, releasepage can be concurrently
5100 * called with creating new references, which is prone to race
5101 * conditions between the calls to check_buffer_tree_ref in those
5102 * codepaths and clearing TREE_REF in try_release_extent_buffer.
5103 *
5104 * The actual lifetime of the extent_buffer in the radix tree is
5105 * adequately protected by the refcount, but the TREE_REF bit and
5106 * its corresponding reference are not. To protect against this
5107 * class of races, we call check_buffer_tree_ref from the codepaths
5108 * which trigger io after they set eb->io_pages. Note that once io is
5109 * initiated, TREE_REF can no longer be cleared, so that is the
5110 * moment at which any such race is best fixed.
5111 */
5112 refs = atomic_read(&eb->refs);
5113 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5114 return;
5115
5116 spin_lock(&eb->refs_lock);
5117 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5118 atomic_inc(&eb->refs);
5119 spin_unlock(&eb->refs_lock);
5120}
5121
5122static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5123 struct page *accessed)
5124{
5125 int num_pages, i;
5126
5127 check_buffer_tree_ref(eb);
5128
5129 num_pages = num_extent_pages(eb);
5130 for (i = 0; i < num_pages; i++) {
5131 struct page *p = eb->pages[i];
5132
5133 if (p != accessed)
5134 mark_page_accessed(p);
5135 }
5136}
5137
5138struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5139 u64 start)
5140{
5141 struct extent_buffer *eb;
5142
5143 rcu_read_lock();
5144 eb = radix_tree_lookup(&fs_info->buffer_radix,
5145 start >> PAGE_SHIFT);
5146 if (eb && atomic_inc_not_zero(&eb->refs)) {
5147 rcu_read_unlock();
5148 /*
5149 * Lock our eb's refs_lock to avoid races with
5150 * free_extent_buffer. When we get our eb it might be flagged
5151 * with EXTENT_BUFFER_STALE and another task running
5152 * free_extent_buffer might have seen that flag set,
5153 * eb->refs == 2, that the buffer isn't under IO (dirty and
5154 * writeback flags not set) and it's still in the tree (flag
5155 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5156 * of decrementing the extent buffer's reference count twice.
5157 * So here we could race and increment the eb's reference count,
5158 * clear its stale flag, mark it as dirty and drop our reference
5159 * before the other task finishes executing free_extent_buffer,
5160 * which would later result in an attempt to free an extent
5161 * buffer that is dirty.
5162 */
5163 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5164 spin_lock(&eb->refs_lock);
5165 spin_unlock(&eb->refs_lock);
5166 }
5167 mark_extent_buffer_accessed(eb, NULL);
5168 return eb;
5169 }
5170 rcu_read_unlock();
5171
5172 return NULL;
5173}
5174
5175#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5176struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5177 u64 start)
5178{
5179 struct extent_buffer *eb, *exists = NULL;
5180 int ret;
5181
5182 eb = find_extent_buffer(fs_info, start);
5183 if (eb)
5184 return eb;
5185 eb = alloc_dummy_extent_buffer(fs_info, start);
5186 if (!eb)
5187 return ERR_PTR(-ENOMEM);
5188 eb->fs_info = fs_info;
5189again:
5190 ret = radix_tree_preload(GFP_NOFS);
5191 if (ret) {
5192 exists = ERR_PTR(ret);
5193 goto free_eb;
5194 }
5195 spin_lock(&fs_info->buffer_lock);
5196 ret = radix_tree_insert(&fs_info->buffer_radix,
5197 start >> PAGE_SHIFT, eb);
5198 spin_unlock(&fs_info->buffer_lock);
5199 radix_tree_preload_end();
5200 if (ret == -EEXIST) {
5201 exists = find_extent_buffer(fs_info, start);
5202 if (exists)
5203 goto free_eb;
5204 else
5205 goto again;
5206 }
5207 check_buffer_tree_ref(eb);
5208 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5209
5210 return eb;
5211free_eb:
5212 btrfs_release_extent_buffer(eb);
5213 return exists;
5214}
5215#endif
5216
5217struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5218 u64 start)
5219{
5220 unsigned long len = fs_info->nodesize;
5221 int num_pages;
5222 int i;
5223 unsigned long index = start >> PAGE_SHIFT;
5224 struct extent_buffer *eb;
5225 struct extent_buffer *exists = NULL;
5226 struct page *p;
5227 struct address_space *mapping = fs_info->btree_inode->i_mapping;
5228 int uptodate = 1;
5229 int ret;
5230
5231 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5232 btrfs_err(fs_info, "bad tree block start %llu", start);
5233 return ERR_PTR(-EINVAL);
5234 }
5235
5236 eb = find_extent_buffer(fs_info, start);
5237 if (eb)
5238 return eb;
5239
5240 eb = __alloc_extent_buffer(fs_info, start, len);
5241 if (!eb)
5242 return ERR_PTR(-ENOMEM);
5243
5244 num_pages = num_extent_pages(eb);
5245 for (i = 0; i < num_pages; i++, index++) {
5246 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5247 if (!p) {
5248 exists = ERR_PTR(-ENOMEM);
5249 goto free_eb;
5250 }
5251
5252 spin_lock(&mapping->private_lock);
5253 if (PagePrivate(p)) {
5254 /*
5255 * We could have already allocated an eb for this page
5256 * and attached one so lets see if we can get a ref on
5257 * the existing eb, and if we can we know it's good and
5258 * we can just return that one, else we know we can just
5259 * overwrite page->private.
5260 */
5261 exists = (struct extent_buffer *)p->private;
5262 if (atomic_inc_not_zero(&exists->refs)) {
5263 spin_unlock(&mapping->private_lock);
5264 unlock_page(p);
5265 put_page(p);
5266 mark_extent_buffer_accessed(exists, p);
5267 goto free_eb;
5268 }
5269 exists = NULL;
5270
5271 /*
5272 * Do this so attach doesn't complain and we need to
5273 * drop the ref the old guy had.
5274 */
5275 ClearPagePrivate(p);
5276 WARN_ON(PageDirty(p));
5277 put_page(p);
5278 }
5279 attach_extent_buffer_page(eb, p);
5280 spin_unlock(&mapping->private_lock);
5281 WARN_ON(PageDirty(p));
5282 eb->pages[i] = p;
5283 if (!PageUptodate(p))
5284 uptodate = 0;
5285
5286 /*
5287 * We can't unlock the pages just yet since the extent buffer
5288 * hasn't been properly inserted in the radix tree, this
5289 * opens a race with btree_releasepage which can free a page
5290 * while we are still filling in all pages for the buffer and
5291 * we could crash.
5292 */
5293 }
5294 if (uptodate)
5295 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5296again:
5297 ret = radix_tree_preload(GFP_NOFS);
5298 if (ret) {
5299 exists = ERR_PTR(ret);
5300 goto free_eb;
5301 }
5302
5303 spin_lock(&fs_info->buffer_lock);
5304 ret = radix_tree_insert(&fs_info->buffer_radix,
5305 start >> PAGE_SHIFT, eb);
5306 spin_unlock(&fs_info->buffer_lock);
5307 radix_tree_preload_end();
5308 if (ret == -EEXIST) {
5309 exists = find_extent_buffer(fs_info, start);
5310 if (exists)
5311 goto free_eb;
5312 else
5313 goto again;
5314 }
5315 /* add one reference for the tree */
5316 check_buffer_tree_ref(eb);
5317 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5318
5319 /*
5320 * Now it's safe to unlock the pages because any calls to
5321 * btree_releasepage will correctly detect that a page belongs to a
5322 * live buffer and won't free them prematurely.
5323 */
5324 for (i = 0; i < num_pages; i++)
5325 unlock_page(eb->pages[i]);
5326 return eb;
5327
5328free_eb:
5329 WARN_ON(!atomic_dec_and_test(&eb->refs));
5330 for (i = 0; i < num_pages; i++) {
5331 if (eb->pages[i])
5332 unlock_page(eb->pages[i]);
5333 }
5334
5335 btrfs_release_extent_buffer(eb);
5336 return exists;
5337}
5338
5339static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5340{
5341 struct extent_buffer *eb =
5342 container_of(head, struct extent_buffer, rcu_head);
5343
5344 __free_extent_buffer(eb);
5345}
5346
5347static int release_extent_buffer(struct extent_buffer *eb)
5348 __releases(&eb->refs_lock)
5349{
5350 lockdep_assert_held(&eb->refs_lock);
5351
5352 WARN_ON(atomic_read(&eb->refs) == 0);
5353 if (atomic_dec_and_test(&eb->refs)) {
5354 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5355 struct btrfs_fs_info *fs_info = eb->fs_info;
5356
5357 spin_unlock(&eb->refs_lock);
5358
5359 spin_lock(&fs_info->buffer_lock);
5360 radix_tree_delete(&fs_info->buffer_radix,
5361 eb->start >> PAGE_SHIFT);
5362 spin_unlock(&fs_info->buffer_lock);
5363 } else {
5364 spin_unlock(&eb->refs_lock);
5365 }
5366
5367 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5368 /* Should be safe to release our pages at this point */
5369 btrfs_release_extent_buffer_pages(eb);
5370#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5371 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5372 __free_extent_buffer(eb);
5373 return 1;
5374 }
5375#endif
5376 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5377 return 1;
5378 }
5379 spin_unlock(&eb->refs_lock);
5380
5381 return 0;
5382}
5383
5384void free_extent_buffer(struct extent_buffer *eb)
5385{
5386 int refs;
5387 int old;
5388 if (!eb)
5389 return;
5390
5391 while (1) {
5392 refs = atomic_read(&eb->refs);
5393 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5394 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5395 refs == 1))
5396 break;
5397 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5398 if (old == refs)
5399 return;
5400 }
5401
5402 spin_lock(&eb->refs_lock);
5403 if (atomic_read(&eb->refs) == 2 &&
5404 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5405 !extent_buffer_under_io(eb) &&
5406 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5407 atomic_dec(&eb->refs);
5408
5409 /*
5410 * I know this is terrible, but it's temporary until we stop tracking
5411 * the uptodate bits and such for the extent buffers.
5412 */
5413 release_extent_buffer(eb);
5414}
5415
5416void free_extent_buffer_stale(struct extent_buffer *eb)
5417{
5418 if (!eb)
5419 return;
5420
5421 spin_lock(&eb->refs_lock);
5422 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5423
5424 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5425 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5426 atomic_dec(&eb->refs);
5427 release_extent_buffer(eb);
5428}
5429
5430void clear_extent_buffer_dirty(const struct extent_buffer *eb)
5431{
5432 int i;
5433 int num_pages;
5434 struct page *page;
5435
5436 num_pages = num_extent_pages(eb);
5437
5438 for (i = 0; i < num_pages; i++) {
5439 page = eb->pages[i];
5440 if (!PageDirty(page))
5441 continue;
5442
5443 lock_page(page);
5444 WARN_ON(!PagePrivate(page));
5445
5446 clear_page_dirty_for_io(page);
5447 xa_lock_irq(&page->mapping->i_pages);
5448 if (!PageDirty(page))
5449 __xa_clear_mark(&page->mapping->i_pages,
5450 page_index(page), PAGECACHE_TAG_DIRTY);
5451 xa_unlock_irq(&page->mapping->i_pages);
5452 ClearPageError(page);
5453 unlock_page(page);
5454 }
5455 WARN_ON(atomic_read(&eb->refs) == 0);
5456}
5457
5458bool set_extent_buffer_dirty(struct extent_buffer *eb)
5459{
5460 int i;
5461 int num_pages;
5462 bool was_dirty;
5463
5464 check_buffer_tree_ref(eb);
5465
5466 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5467
5468 num_pages = num_extent_pages(eb);
5469 WARN_ON(atomic_read(&eb->refs) == 0);
5470 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5471
5472 if (!was_dirty)
5473 for (i = 0; i < num_pages; i++)
5474 set_page_dirty(eb->pages[i]);
5475
5476#ifdef CONFIG_BTRFS_DEBUG
5477 for (i = 0; i < num_pages; i++)
5478 ASSERT(PageDirty(eb->pages[i]));
5479#endif
5480
5481 return was_dirty;
5482}
5483
5484void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5485{
5486 int i;
5487 struct page *page;
5488 int num_pages;
5489
5490 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5491 num_pages = num_extent_pages(eb);
5492 for (i = 0; i < num_pages; i++) {
5493 page = eb->pages[i];
5494 if (page)
5495 ClearPageUptodate(page);
5496 }
5497}
5498
5499void set_extent_buffer_uptodate(struct extent_buffer *eb)
5500{
5501 int i;
5502 struct page *page;
5503 int num_pages;
5504
5505 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5506 num_pages = num_extent_pages(eb);
5507 for (i = 0; i < num_pages; i++) {
5508 page = eb->pages[i];
5509 SetPageUptodate(page);
5510 }
5511}
5512
5513int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5514{
5515 int i;
5516 struct page *page;
5517 int err;
5518 int ret = 0;
5519 int locked_pages = 0;
5520 int all_uptodate = 1;
5521 int num_pages;
5522 unsigned long num_reads = 0;
5523 struct bio *bio = NULL;
5524 unsigned long bio_flags = 0;
5525
5526 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5527 return 0;
5528
5529 num_pages = num_extent_pages(eb);
5530 for (i = 0; i < num_pages; i++) {
5531 page = eb->pages[i];
5532 if (wait == WAIT_NONE) {
5533 if (!trylock_page(page))
5534 goto unlock_exit;
5535 } else {
5536 lock_page(page);
5537 }
5538 locked_pages++;
5539 }
5540 /*
5541 * We need to firstly lock all pages to make sure that
5542 * the uptodate bit of our pages won't be affected by
5543 * clear_extent_buffer_uptodate().
5544 */
5545 for (i = 0; i < num_pages; i++) {
5546 page = eb->pages[i];
5547 if (!PageUptodate(page)) {
5548 num_reads++;
5549 all_uptodate = 0;
5550 }
5551 }
5552
5553 if (all_uptodate) {
5554 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5555 goto unlock_exit;
5556 }
5557
5558 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5559 eb->read_mirror = 0;
5560 atomic_set(&eb->io_pages, num_reads);
5561 /*
5562 * It is possible for releasepage to clear the TREE_REF bit before we
5563 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
5564 */
5565 check_buffer_tree_ref(eb);
5566 for (i = 0; i < num_pages; i++) {
5567 page = eb->pages[i];
5568
5569 if (!PageUptodate(page)) {
5570 if (ret) {
5571 atomic_dec(&eb->io_pages);
5572 unlock_page(page);
5573 continue;
5574 }
5575
5576 ClearPageError(page);
5577 err = __extent_read_full_page(page,
5578 btree_get_extent, &bio,
5579 mirror_num, &bio_flags,
5580 REQ_META);
5581 if (err) {
5582 ret = err;
5583 /*
5584 * We use &bio in above __extent_read_full_page,
5585 * so we ensure that if it returns error, the
5586 * current page fails to add itself to bio and
5587 * it's been unlocked.
5588 *
5589 * We must dec io_pages by ourselves.
5590 */
5591 atomic_dec(&eb->io_pages);
5592 }
5593 } else {
5594 unlock_page(page);
5595 }
5596 }
5597
5598 if (bio) {
5599 err = submit_one_bio(bio, mirror_num, bio_flags);
5600 if (err)
5601 return err;
5602 }
5603
5604 if (ret || wait != WAIT_COMPLETE)
5605 return ret;
5606
5607 for (i = 0; i < num_pages; i++) {
5608 page = eb->pages[i];
5609 wait_on_page_locked(page);
5610 if (!PageUptodate(page))
5611 ret = -EIO;
5612 }
5613
5614 return ret;
5615
5616unlock_exit:
5617 while (locked_pages > 0) {
5618 locked_pages--;
5619 page = eb->pages[locked_pages];
5620 unlock_page(page);
5621 }
5622 return ret;
5623}
5624
5625void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5626 unsigned long start, unsigned long len)
5627{
5628 size_t cur;
5629 size_t offset;
5630 struct page *page;
5631 char *kaddr;
5632 char *dst = (char *)dstv;
5633 unsigned long i = start >> PAGE_SHIFT;
5634
5635 if (start + len > eb->len) {
5636 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
5637 eb->start, eb->len, start, len);
5638 memset(dst, 0, len);
5639 return;
5640 }
5641
5642 offset = offset_in_page(start);
5643
5644 while (len > 0) {
5645 page = eb->pages[i];
5646
5647 cur = min(len, (PAGE_SIZE - offset));
5648 kaddr = page_address(page);
5649 memcpy(dst, kaddr + offset, cur);
5650
5651 dst += cur;
5652 len -= cur;
5653 offset = 0;
5654 i++;
5655 }
5656}
5657
5658int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
5659 void __user *dstv,
5660 unsigned long start, unsigned long len)
5661{
5662 size_t cur;
5663 size_t offset;
5664 struct page *page;
5665 char *kaddr;
5666 char __user *dst = (char __user *)dstv;
5667 unsigned long i = start >> PAGE_SHIFT;
5668 int ret = 0;
5669
5670 WARN_ON(start > eb->len);
5671 WARN_ON(start + len > eb->start + eb->len);
5672
5673 offset = offset_in_page(start);
5674
5675 while (len > 0) {
5676 page = eb->pages[i];
5677
5678 cur = min(len, (PAGE_SIZE - offset));
5679 kaddr = page_address(page);
5680 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
5681 ret = -EFAULT;
5682 break;
5683 }
5684
5685 dst += cur;
5686 len -= cur;
5687 offset = 0;
5688 i++;
5689 }
5690
5691 return ret;
5692}
5693
5694int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5695 unsigned long start, unsigned long len)
5696{
5697 size_t cur;
5698 size_t offset;
5699 struct page *page;
5700 char *kaddr;
5701 char *ptr = (char *)ptrv;
5702 unsigned long i = start >> PAGE_SHIFT;
5703 int ret = 0;
5704
5705 WARN_ON(start > eb->len);
5706 WARN_ON(start + len > eb->start + eb->len);
5707
5708 offset = offset_in_page(start);
5709
5710 while (len > 0) {
5711 page = eb->pages[i];
5712
5713 cur = min(len, (PAGE_SIZE - offset));
5714
5715 kaddr = page_address(page);
5716 ret = memcmp(ptr, kaddr + offset, cur);
5717 if (ret)
5718 break;
5719
5720 ptr += cur;
5721 len -= cur;
5722 offset = 0;
5723 i++;
5724 }
5725 return ret;
5726}
5727
5728void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
5729 const void *srcv)
5730{
5731 char *kaddr;
5732
5733 WARN_ON(!PageUptodate(eb->pages[0]));
5734 kaddr = page_address(eb->pages[0]);
5735 memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5736 BTRFS_FSID_SIZE);
5737}
5738
5739void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
5740{
5741 char *kaddr;
5742
5743 WARN_ON(!PageUptodate(eb->pages[0]));
5744 kaddr = page_address(eb->pages[0]);
5745 memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5746 BTRFS_FSID_SIZE);
5747}
5748
5749void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
5750 unsigned long start, unsigned long len)
5751{
5752 size_t cur;
5753 size_t offset;
5754 struct page *page;
5755 char *kaddr;
5756 char *src = (char *)srcv;
5757 unsigned long i = start >> PAGE_SHIFT;
5758
5759 WARN_ON(start > eb->len);
5760 WARN_ON(start + len > eb->start + eb->len);
5761
5762 offset = offset_in_page(start);
5763
5764 while (len > 0) {
5765 page = eb->pages[i];
5766 WARN_ON(!PageUptodate(page));
5767
5768 cur = min(len, PAGE_SIZE - offset);
5769 kaddr = page_address(page);
5770 memcpy(kaddr + offset, src, cur);
5771
5772 src += cur;
5773 len -= cur;
5774 offset = 0;
5775 i++;
5776 }
5777}
5778
5779void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
5780 unsigned long len)
5781{
5782 size_t cur;
5783 size_t offset;
5784 struct page *page;
5785 char *kaddr;
5786 unsigned long i = start >> PAGE_SHIFT;
5787
5788 WARN_ON(start > eb->len);
5789 WARN_ON(start + len > eb->start + eb->len);
5790
5791 offset = offset_in_page(start);
5792
5793 while (len > 0) {
5794 page = eb->pages[i];
5795 WARN_ON(!PageUptodate(page));
5796
5797 cur = min(len, PAGE_SIZE - offset);
5798 kaddr = page_address(page);
5799 memset(kaddr + offset, 0, cur);
5800
5801 len -= cur;
5802 offset = 0;
5803 i++;
5804 }
5805}
5806
5807void copy_extent_buffer_full(const struct extent_buffer *dst,
5808 const struct extent_buffer *src)
5809{
5810 int i;
5811 int num_pages;
5812
5813 ASSERT(dst->len == src->len);
5814
5815 num_pages = num_extent_pages(dst);
5816 for (i = 0; i < num_pages; i++)
5817 copy_page(page_address(dst->pages[i]),
5818 page_address(src->pages[i]));
5819}
5820
5821void copy_extent_buffer(const struct extent_buffer *dst,
5822 const struct extent_buffer *src,
5823 unsigned long dst_offset, unsigned long src_offset,
5824 unsigned long len)
5825{
5826 u64 dst_len = dst->len;
5827 size_t cur;
5828 size_t offset;
5829 struct page *page;
5830 char *kaddr;
5831 unsigned long i = dst_offset >> PAGE_SHIFT;
5832
5833 WARN_ON(src->len != dst_len);
5834
5835 offset = offset_in_page(dst_offset);
5836
5837 while (len > 0) {
5838 page = dst->pages[i];
5839 WARN_ON(!PageUptodate(page));
5840
5841 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5842
5843 kaddr = page_address(page);
5844 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5845
5846 src_offset += cur;
5847 len -= cur;
5848 offset = 0;
5849 i++;
5850 }
5851}
5852
5853/*
5854 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5855 * given bit number
5856 * @eb: the extent buffer
5857 * @start: offset of the bitmap item in the extent buffer
5858 * @nr: bit number
5859 * @page_index: return index of the page in the extent buffer that contains the
5860 * given bit number
5861 * @page_offset: return offset into the page given by page_index
5862 *
5863 * This helper hides the ugliness of finding the byte in an extent buffer which
5864 * contains a given bit.
5865 */
5866static inline void eb_bitmap_offset(const struct extent_buffer *eb,
5867 unsigned long start, unsigned long nr,
5868 unsigned long *page_index,
5869 size_t *page_offset)
5870{
5871 size_t byte_offset = BIT_BYTE(nr);
5872 size_t offset;
5873
5874 /*
5875 * The byte we want is the offset of the extent buffer + the offset of
5876 * the bitmap item in the extent buffer + the offset of the byte in the
5877 * bitmap item.
5878 */
5879 offset = start + byte_offset;
5880
5881 *page_index = offset >> PAGE_SHIFT;
5882 *page_offset = offset_in_page(offset);
5883}
5884
5885/**
5886 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5887 * @eb: the extent buffer
5888 * @start: offset of the bitmap item in the extent buffer
5889 * @nr: bit number to test
5890 */
5891int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
5892 unsigned long nr)
5893{
5894 u8 *kaddr;
5895 struct page *page;
5896 unsigned long i;
5897 size_t offset;
5898
5899 eb_bitmap_offset(eb, start, nr, &i, &offset);
5900 page = eb->pages[i];
5901 WARN_ON(!PageUptodate(page));
5902 kaddr = page_address(page);
5903 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5904}
5905
5906/**
5907 * extent_buffer_bitmap_set - set an area of a bitmap
5908 * @eb: the extent buffer
5909 * @start: offset of the bitmap item in the extent buffer
5910 * @pos: bit number of the first bit
5911 * @len: number of bits to set
5912 */
5913void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
5914 unsigned long pos, unsigned long len)
5915{
5916 u8 *kaddr;
5917 struct page *page;
5918 unsigned long i;
5919 size_t offset;
5920 const unsigned int size = pos + len;
5921 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5922 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5923
5924 eb_bitmap_offset(eb, start, pos, &i, &offset);
5925 page = eb->pages[i];
5926 WARN_ON(!PageUptodate(page));
5927 kaddr = page_address(page);
5928
5929 while (len >= bits_to_set) {
5930 kaddr[offset] |= mask_to_set;
5931 len -= bits_to_set;
5932 bits_to_set = BITS_PER_BYTE;
5933 mask_to_set = ~0;
5934 if (++offset >= PAGE_SIZE && len > 0) {
5935 offset = 0;
5936 page = eb->pages[++i];
5937 WARN_ON(!PageUptodate(page));
5938 kaddr = page_address(page);
5939 }
5940 }
5941 if (len) {
5942 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5943 kaddr[offset] |= mask_to_set;
5944 }
5945}
5946
5947
5948/**
5949 * extent_buffer_bitmap_clear - clear an area of a bitmap
5950 * @eb: the extent buffer
5951 * @start: offset of the bitmap item in the extent buffer
5952 * @pos: bit number of the first bit
5953 * @len: number of bits to clear
5954 */
5955void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
5956 unsigned long start, unsigned long pos,
5957 unsigned long len)
5958{
5959 u8 *kaddr;
5960 struct page *page;
5961 unsigned long i;
5962 size_t offset;
5963 const unsigned int size = pos + len;
5964 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5965 u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5966
5967 eb_bitmap_offset(eb, start, pos, &i, &offset);
5968 page = eb->pages[i];
5969 WARN_ON(!PageUptodate(page));
5970 kaddr = page_address(page);
5971
5972 while (len >= bits_to_clear) {
5973 kaddr[offset] &= ~mask_to_clear;
5974 len -= bits_to_clear;
5975 bits_to_clear = BITS_PER_BYTE;
5976 mask_to_clear = ~0;
5977 if (++offset >= PAGE_SIZE && len > 0) {
5978 offset = 0;
5979 page = eb->pages[++i];
5980 WARN_ON(!PageUptodate(page));
5981 kaddr = page_address(page);
5982 }
5983 }
5984 if (len) {
5985 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5986 kaddr[offset] &= ~mask_to_clear;
5987 }
5988}
5989
5990static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5991{
5992 unsigned long distance = (src > dst) ? src - dst : dst - src;
5993 return distance < len;
5994}
5995
5996static void copy_pages(struct page *dst_page, struct page *src_page,
5997 unsigned long dst_off, unsigned long src_off,
5998 unsigned long len)
5999{
6000 char *dst_kaddr = page_address(dst_page);
6001 char *src_kaddr;
6002 int must_memmove = 0;
6003
6004 if (dst_page != src_page) {
6005 src_kaddr = page_address(src_page);
6006 } else {
6007 src_kaddr = dst_kaddr;
6008 if (areas_overlap(src_off, dst_off, len))
6009 must_memmove = 1;
6010 }
6011
6012 if (must_memmove)
6013 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6014 else
6015 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
6016}
6017
6018void memcpy_extent_buffer(const struct extent_buffer *dst,
6019 unsigned long dst_offset, unsigned long src_offset,
6020 unsigned long len)
6021{
6022 struct btrfs_fs_info *fs_info = dst->fs_info;
6023 size_t cur;
6024 size_t dst_off_in_page;
6025 size_t src_off_in_page;
6026 unsigned long dst_i;
6027 unsigned long src_i;
6028
6029 if (src_offset + len > dst->len) {
6030 btrfs_err(fs_info,
6031 "memmove bogus src_offset %lu move len %lu dst len %lu",
6032 src_offset, len, dst->len);
6033 BUG();
6034 }
6035 if (dst_offset + len > dst->len) {
6036 btrfs_err(fs_info,
6037 "memmove bogus dst_offset %lu move len %lu dst len %lu",
6038 dst_offset, len, dst->len);
6039 BUG();
6040 }
6041
6042 while (len > 0) {
6043 dst_off_in_page = offset_in_page(dst_offset);
6044 src_off_in_page = offset_in_page(src_offset);
6045
6046 dst_i = dst_offset >> PAGE_SHIFT;
6047 src_i = src_offset >> PAGE_SHIFT;
6048
6049 cur = min(len, (unsigned long)(PAGE_SIZE -
6050 src_off_in_page));
6051 cur = min_t(unsigned long, cur,
6052 (unsigned long)(PAGE_SIZE - dst_off_in_page));
6053
6054 copy_pages(dst->pages[dst_i], dst->pages[src_i],
6055 dst_off_in_page, src_off_in_page, cur);
6056
6057 src_offset += cur;
6058 dst_offset += cur;
6059 len -= cur;
6060 }
6061}
6062
6063void memmove_extent_buffer(const struct extent_buffer *dst,
6064 unsigned long dst_offset, unsigned long src_offset,
6065 unsigned long len)
6066{
6067 struct btrfs_fs_info *fs_info = dst->fs_info;
6068 size_t cur;
6069 size_t dst_off_in_page;
6070 size_t src_off_in_page;
6071 unsigned long dst_end = dst_offset + len - 1;
6072 unsigned long src_end = src_offset + len - 1;
6073 unsigned long dst_i;
6074 unsigned long src_i;
6075
6076 if (src_offset + len > dst->len) {
6077 btrfs_err(fs_info,
6078 "memmove bogus src_offset %lu move len %lu len %lu",
6079 src_offset, len, dst->len);
6080 BUG();
6081 }
6082 if (dst_offset + len > dst->len) {
6083 btrfs_err(fs_info,
6084 "memmove bogus dst_offset %lu move len %lu len %lu",
6085 dst_offset, len, dst->len);
6086 BUG();
6087 }
6088 if (dst_offset < src_offset) {
6089 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
6090 return;
6091 }
6092 while (len > 0) {
6093 dst_i = dst_end >> PAGE_SHIFT;
6094 src_i = src_end >> PAGE_SHIFT;
6095
6096 dst_off_in_page = offset_in_page(dst_end);
6097 src_off_in_page = offset_in_page(src_end);
6098
6099 cur = min_t(unsigned long, len, src_off_in_page + 1);
6100 cur = min(cur, dst_off_in_page + 1);
6101 copy_pages(dst->pages[dst_i], dst->pages[src_i],
6102 dst_off_in_page - cur + 1,
6103 src_off_in_page - cur + 1, cur);
6104
6105 dst_end -= cur;
6106 src_end -= cur;
6107 len -= cur;
6108 }
6109}
6110
6111int try_release_extent_buffer(struct page *page)
6112{
6113 struct extent_buffer *eb;
6114
6115 /*
6116 * We need to make sure nobody is attaching this page to an eb right
6117 * now.
6118 */
6119 spin_lock(&page->mapping->private_lock);
6120 if (!PagePrivate(page)) {
6121 spin_unlock(&page->mapping->private_lock);
6122 return 1;
6123 }
6124
6125 eb = (struct extent_buffer *)page->private;
6126 BUG_ON(!eb);
6127
6128 /*
6129 * This is a little awful but should be ok, we need to make sure that
6130 * the eb doesn't disappear out from under us while we're looking at
6131 * this page.
6132 */
6133 spin_lock(&eb->refs_lock);
6134 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6135 spin_unlock(&eb->refs_lock);
6136 spin_unlock(&page->mapping->private_lock);
6137 return 0;
6138 }
6139 spin_unlock(&page->mapping->private_lock);
6140
6141 /*
6142 * If tree ref isn't set then we know the ref on this eb is a real ref,
6143 * so just return, this page will likely be freed soon anyway.
6144 */
6145 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6146 spin_unlock(&eb->refs_lock);
6147 return 0;
6148 }
6149
6150 return release_extent_buffer(eb);
6151}