Loading...
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <linux/blkdev.h>
10#include <linux/swap.h>
11#include <linux/writeback.h>
12#include <linux/pagevec.h>
13#include <linux/prefetch.h>
14#include <linux/cleancache.h>
15#include "extent_io.h"
16#include "extent_map.h"
17#include "compat.h"
18#include "ctree.h"
19#include "btrfs_inode.h"
20#include "volumes.h"
21#include "check-integrity.h"
22#include "locking.h"
23#include "rcu-string.h"
24
25static struct kmem_cache *extent_state_cache;
26static struct kmem_cache *extent_buffer_cache;
27
28static LIST_HEAD(buffers);
29static LIST_HEAD(states);
30
31#define LEAK_DEBUG 0
32#if LEAK_DEBUG
33static DEFINE_SPINLOCK(leak_lock);
34#endif
35
36#define BUFFER_LRU_MAX 64
37
38struct tree_entry {
39 u64 start;
40 u64 end;
41 struct rb_node rb_node;
42};
43
44struct extent_page_data {
45 struct bio *bio;
46 struct extent_io_tree *tree;
47 get_extent_t *get_extent;
48
49 /* tells writepage not to lock the state bits for this range
50 * it still does the unlocking
51 */
52 unsigned int extent_locked:1;
53
54 /* tells the submit_bio code to use a WRITE_SYNC */
55 unsigned int sync_io:1;
56};
57
58static noinline void flush_write_bio(void *data);
59static inline struct btrfs_fs_info *
60tree_fs_info(struct extent_io_tree *tree)
61{
62 return btrfs_sb(tree->mapping->host->i_sb);
63}
64
65int __init extent_io_init(void)
66{
67 extent_state_cache = kmem_cache_create("extent_state",
68 sizeof(struct extent_state), 0,
69 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
70 if (!extent_state_cache)
71 return -ENOMEM;
72
73 extent_buffer_cache = kmem_cache_create("extent_buffers",
74 sizeof(struct extent_buffer), 0,
75 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
76 if (!extent_buffer_cache)
77 goto free_state_cache;
78 return 0;
79
80free_state_cache:
81 kmem_cache_destroy(extent_state_cache);
82 return -ENOMEM;
83}
84
85void extent_io_exit(void)
86{
87 struct extent_state *state;
88 struct extent_buffer *eb;
89
90 while (!list_empty(&states)) {
91 state = list_entry(states.next, struct extent_state, leak_list);
92 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
93 "state %lu in tree %p refs %d\n",
94 (unsigned long long)state->start,
95 (unsigned long long)state->end,
96 state->state, state->tree, atomic_read(&state->refs));
97 list_del(&state->leak_list);
98 kmem_cache_free(extent_state_cache, state);
99
100 }
101
102 while (!list_empty(&buffers)) {
103 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
104 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
105 "refs %d\n", (unsigned long long)eb->start,
106 eb->len, atomic_read(&eb->refs));
107 list_del(&eb->leak_list);
108 kmem_cache_free(extent_buffer_cache, eb);
109 }
110 if (extent_state_cache)
111 kmem_cache_destroy(extent_state_cache);
112 if (extent_buffer_cache)
113 kmem_cache_destroy(extent_buffer_cache);
114}
115
116void extent_io_tree_init(struct extent_io_tree *tree,
117 struct address_space *mapping)
118{
119 tree->state = RB_ROOT;
120 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
121 tree->ops = NULL;
122 tree->dirty_bytes = 0;
123 spin_lock_init(&tree->lock);
124 spin_lock_init(&tree->buffer_lock);
125 tree->mapping = mapping;
126}
127
128static struct extent_state *alloc_extent_state(gfp_t mask)
129{
130 struct extent_state *state;
131#if LEAK_DEBUG
132 unsigned long flags;
133#endif
134
135 state = kmem_cache_alloc(extent_state_cache, mask);
136 if (!state)
137 return state;
138 state->state = 0;
139 state->private = 0;
140 state->tree = NULL;
141#if LEAK_DEBUG
142 spin_lock_irqsave(&leak_lock, flags);
143 list_add(&state->leak_list, &states);
144 spin_unlock_irqrestore(&leak_lock, flags);
145#endif
146 atomic_set(&state->refs, 1);
147 init_waitqueue_head(&state->wq);
148 trace_alloc_extent_state(state, mask, _RET_IP_);
149 return state;
150}
151
152void free_extent_state(struct extent_state *state)
153{
154 if (!state)
155 return;
156 if (atomic_dec_and_test(&state->refs)) {
157#if LEAK_DEBUG
158 unsigned long flags;
159#endif
160 WARN_ON(state->tree);
161#if LEAK_DEBUG
162 spin_lock_irqsave(&leak_lock, flags);
163 list_del(&state->leak_list);
164 spin_unlock_irqrestore(&leak_lock, flags);
165#endif
166 trace_free_extent_state(state, _RET_IP_);
167 kmem_cache_free(extent_state_cache, state);
168 }
169}
170
171static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
172 struct rb_node *node)
173{
174 struct rb_node **p = &root->rb_node;
175 struct rb_node *parent = NULL;
176 struct tree_entry *entry;
177
178 while (*p) {
179 parent = *p;
180 entry = rb_entry(parent, struct tree_entry, rb_node);
181
182 if (offset < entry->start)
183 p = &(*p)->rb_left;
184 else if (offset > entry->end)
185 p = &(*p)->rb_right;
186 else
187 return parent;
188 }
189
190 rb_link_node(node, parent, p);
191 rb_insert_color(node, root);
192 return NULL;
193}
194
195static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
196 struct rb_node **prev_ret,
197 struct rb_node **next_ret)
198{
199 struct rb_root *root = &tree->state;
200 struct rb_node *n = root->rb_node;
201 struct rb_node *prev = NULL;
202 struct rb_node *orig_prev = NULL;
203 struct tree_entry *entry;
204 struct tree_entry *prev_entry = NULL;
205
206 while (n) {
207 entry = rb_entry(n, struct tree_entry, rb_node);
208 prev = n;
209 prev_entry = entry;
210
211 if (offset < entry->start)
212 n = n->rb_left;
213 else if (offset > entry->end)
214 n = n->rb_right;
215 else
216 return n;
217 }
218
219 if (prev_ret) {
220 orig_prev = prev;
221 while (prev && offset > prev_entry->end) {
222 prev = rb_next(prev);
223 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224 }
225 *prev_ret = prev;
226 prev = orig_prev;
227 }
228
229 if (next_ret) {
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 while (prev && offset < prev_entry->start) {
232 prev = rb_prev(prev);
233 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
234 }
235 *next_ret = prev;
236 }
237 return NULL;
238}
239
240static inline struct rb_node *tree_search(struct extent_io_tree *tree,
241 u64 offset)
242{
243 struct rb_node *prev = NULL;
244 struct rb_node *ret;
245
246 ret = __etree_search(tree, offset, &prev, NULL);
247 if (!ret)
248 return prev;
249 return ret;
250}
251
252static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
253 struct extent_state *other)
254{
255 if (tree->ops && tree->ops->merge_extent_hook)
256 tree->ops->merge_extent_hook(tree->mapping->host, new,
257 other);
258}
259
260/*
261 * utility function to look for merge candidates inside a given range.
262 * Any extents with matching state are merged together into a single
263 * extent in the tree. Extents with EXTENT_IO in their state field
264 * are not merged because the end_io handlers need to be able to do
265 * operations on them without sleeping (or doing allocations/splits).
266 *
267 * This should be called with the tree lock held.
268 */
269static void merge_state(struct extent_io_tree *tree,
270 struct extent_state *state)
271{
272 struct extent_state *other;
273 struct rb_node *other_node;
274
275 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
276 return;
277
278 other_node = rb_prev(&state->rb_node);
279 if (other_node) {
280 other = rb_entry(other_node, struct extent_state, rb_node);
281 if (other->end == state->start - 1 &&
282 other->state == state->state) {
283 merge_cb(tree, state, other);
284 state->start = other->start;
285 other->tree = NULL;
286 rb_erase(&other->rb_node, &tree->state);
287 free_extent_state(other);
288 }
289 }
290 other_node = rb_next(&state->rb_node);
291 if (other_node) {
292 other = rb_entry(other_node, struct extent_state, rb_node);
293 if (other->start == state->end + 1 &&
294 other->state == state->state) {
295 merge_cb(tree, state, other);
296 state->end = other->end;
297 other->tree = NULL;
298 rb_erase(&other->rb_node, &tree->state);
299 free_extent_state(other);
300 }
301 }
302}
303
304static void set_state_cb(struct extent_io_tree *tree,
305 struct extent_state *state, int *bits)
306{
307 if (tree->ops && tree->ops->set_bit_hook)
308 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
309}
310
311static void clear_state_cb(struct extent_io_tree *tree,
312 struct extent_state *state, int *bits)
313{
314 if (tree->ops && tree->ops->clear_bit_hook)
315 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
316}
317
318static void set_state_bits(struct extent_io_tree *tree,
319 struct extent_state *state, int *bits);
320
321/*
322 * insert an extent_state struct into the tree. 'bits' are set on the
323 * struct before it is inserted.
324 *
325 * This may return -EEXIST if the extent is already there, in which case the
326 * state struct is freed.
327 *
328 * The tree lock is not taken internally. This is a utility function and
329 * probably isn't what you want to call (see set/clear_extent_bit).
330 */
331static int insert_state(struct extent_io_tree *tree,
332 struct extent_state *state, u64 start, u64 end,
333 int *bits)
334{
335 struct rb_node *node;
336
337 if (end < start) {
338 printk(KERN_ERR "btrfs end < start %llu %llu\n",
339 (unsigned long long)end,
340 (unsigned long long)start);
341 WARN_ON(1);
342 }
343 state->start = start;
344 state->end = end;
345
346 set_state_bits(tree, state, bits);
347
348 node = tree_insert(&tree->state, end, &state->rb_node);
349 if (node) {
350 struct extent_state *found;
351 found = rb_entry(node, struct extent_state, rb_node);
352 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
353 "%llu %llu\n", (unsigned long long)found->start,
354 (unsigned long long)found->end,
355 (unsigned long long)start, (unsigned long long)end);
356 return -EEXIST;
357 }
358 state->tree = tree;
359 merge_state(tree, state);
360 return 0;
361}
362
363static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
364 u64 split)
365{
366 if (tree->ops && tree->ops->split_extent_hook)
367 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
368}
369
370/*
371 * split a given extent state struct in two, inserting the preallocated
372 * struct 'prealloc' as the newly created second half. 'split' indicates an
373 * offset inside 'orig' where it should be split.
374 *
375 * Before calling,
376 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
377 * are two extent state structs in the tree:
378 * prealloc: [orig->start, split - 1]
379 * orig: [ split, orig->end ]
380 *
381 * The tree locks are not taken by this function. They need to be held
382 * by the caller.
383 */
384static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
385 struct extent_state *prealloc, u64 split)
386{
387 struct rb_node *node;
388
389 split_cb(tree, orig, split);
390
391 prealloc->start = orig->start;
392 prealloc->end = split - 1;
393 prealloc->state = orig->state;
394 orig->start = split;
395
396 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
397 if (node) {
398 free_extent_state(prealloc);
399 return -EEXIST;
400 }
401 prealloc->tree = tree;
402 return 0;
403}
404
405static struct extent_state *next_state(struct extent_state *state)
406{
407 struct rb_node *next = rb_next(&state->rb_node);
408 if (next)
409 return rb_entry(next, struct extent_state, rb_node);
410 else
411 return NULL;
412}
413
414/*
415 * utility function to clear some bits in an extent state struct.
416 * it will optionally wake up any one waiting on this state (wake == 1).
417 *
418 * If no bits are set on the state struct after clearing things, the
419 * struct is freed and removed from the tree
420 */
421static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
422 struct extent_state *state,
423 int *bits, int wake)
424{
425 struct extent_state *next;
426 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
427
428 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
429 u64 range = state->end - state->start + 1;
430 WARN_ON(range > tree->dirty_bytes);
431 tree->dirty_bytes -= range;
432 }
433 clear_state_cb(tree, state, bits);
434 state->state &= ~bits_to_clear;
435 if (wake)
436 wake_up(&state->wq);
437 if (state->state == 0) {
438 next = next_state(state);
439 if (state->tree) {
440 rb_erase(&state->rb_node, &tree->state);
441 state->tree = NULL;
442 free_extent_state(state);
443 } else {
444 WARN_ON(1);
445 }
446 } else {
447 merge_state(tree, state);
448 next = next_state(state);
449 }
450 return next;
451}
452
453static struct extent_state *
454alloc_extent_state_atomic(struct extent_state *prealloc)
455{
456 if (!prealloc)
457 prealloc = alloc_extent_state(GFP_ATOMIC);
458
459 return prealloc;
460}
461
462void extent_io_tree_panic(struct extent_io_tree *tree, int err)
463{
464 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
465 "Extent tree was modified by another "
466 "thread while locked.");
467}
468
469/*
470 * clear some bits on a range in the tree. This may require splitting
471 * or inserting elements in the tree, so the gfp mask is used to
472 * indicate which allocations or sleeping are allowed.
473 *
474 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
475 * the given range from the tree regardless of state (ie for truncate).
476 *
477 * the range [start, end] is inclusive.
478 *
479 * This takes the tree lock, and returns 0 on success and < 0 on error.
480 */
481int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
482 int bits, int wake, int delete,
483 struct extent_state **cached_state,
484 gfp_t mask)
485{
486 struct extent_state *state;
487 struct extent_state *cached;
488 struct extent_state *prealloc = NULL;
489 struct rb_node *node;
490 u64 last_end;
491 int err;
492 int clear = 0;
493
494 if (delete)
495 bits |= ~EXTENT_CTLBITS;
496 bits |= EXTENT_FIRST_DELALLOC;
497
498 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
499 clear = 1;
500again:
501 if (!prealloc && (mask & __GFP_WAIT)) {
502 prealloc = alloc_extent_state(mask);
503 if (!prealloc)
504 return -ENOMEM;
505 }
506
507 spin_lock(&tree->lock);
508 if (cached_state) {
509 cached = *cached_state;
510
511 if (clear) {
512 *cached_state = NULL;
513 cached_state = NULL;
514 }
515
516 if (cached && cached->tree && cached->start <= start &&
517 cached->end > start) {
518 if (clear)
519 atomic_dec(&cached->refs);
520 state = cached;
521 goto hit_next;
522 }
523 if (clear)
524 free_extent_state(cached);
525 }
526 /*
527 * this search will find the extents that end after
528 * our range starts
529 */
530 node = tree_search(tree, start);
531 if (!node)
532 goto out;
533 state = rb_entry(node, struct extent_state, rb_node);
534hit_next:
535 if (state->start > end)
536 goto out;
537 WARN_ON(state->end < start);
538 last_end = state->end;
539
540 /* the state doesn't have the wanted bits, go ahead */
541 if (!(state->state & bits)) {
542 state = next_state(state);
543 goto next;
544 }
545
546 /*
547 * | ---- desired range ---- |
548 * | state | or
549 * | ------------- state -------------- |
550 *
551 * We need to split the extent we found, and may flip
552 * bits on second half.
553 *
554 * If the extent we found extends past our range, we
555 * just split and search again. It'll get split again
556 * the next time though.
557 *
558 * If the extent we found is inside our range, we clear
559 * the desired bit on it.
560 */
561
562 if (state->start < start) {
563 prealloc = alloc_extent_state_atomic(prealloc);
564 BUG_ON(!prealloc);
565 err = split_state(tree, state, prealloc, start);
566 if (err)
567 extent_io_tree_panic(tree, err);
568
569 prealloc = NULL;
570 if (err)
571 goto out;
572 if (state->end <= end) {
573 state = clear_state_bit(tree, state, &bits, wake);
574 goto next;
575 }
576 goto search_again;
577 }
578 /*
579 * | ---- desired range ---- |
580 * | state |
581 * We need to split the extent, and clear the bit
582 * on the first half
583 */
584 if (state->start <= end && state->end > end) {
585 prealloc = alloc_extent_state_atomic(prealloc);
586 BUG_ON(!prealloc);
587 err = split_state(tree, state, prealloc, end + 1);
588 if (err)
589 extent_io_tree_panic(tree, err);
590
591 if (wake)
592 wake_up(&state->wq);
593
594 clear_state_bit(tree, prealloc, &bits, wake);
595
596 prealloc = NULL;
597 goto out;
598 }
599
600 state = clear_state_bit(tree, state, &bits, wake);
601next:
602 if (last_end == (u64)-1)
603 goto out;
604 start = last_end + 1;
605 if (start <= end && state && !need_resched())
606 goto hit_next;
607 goto search_again;
608
609out:
610 spin_unlock(&tree->lock);
611 if (prealloc)
612 free_extent_state(prealloc);
613
614 return 0;
615
616search_again:
617 if (start > end)
618 goto out;
619 spin_unlock(&tree->lock);
620 if (mask & __GFP_WAIT)
621 cond_resched();
622 goto again;
623}
624
625static void wait_on_state(struct extent_io_tree *tree,
626 struct extent_state *state)
627 __releases(tree->lock)
628 __acquires(tree->lock)
629{
630 DEFINE_WAIT(wait);
631 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
632 spin_unlock(&tree->lock);
633 schedule();
634 spin_lock(&tree->lock);
635 finish_wait(&state->wq, &wait);
636}
637
638/*
639 * waits for one or more bits to clear on a range in the state tree.
640 * The range [start, end] is inclusive.
641 * The tree lock is taken by this function
642 */
643void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
644{
645 struct extent_state *state;
646 struct rb_node *node;
647
648 spin_lock(&tree->lock);
649again:
650 while (1) {
651 /*
652 * this search will find all the extents that end after
653 * our range starts
654 */
655 node = tree_search(tree, start);
656 if (!node)
657 break;
658
659 state = rb_entry(node, struct extent_state, rb_node);
660
661 if (state->start > end)
662 goto out;
663
664 if (state->state & bits) {
665 start = state->start;
666 atomic_inc(&state->refs);
667 wait_on_state(tree, state);
668 free_extent_state(state);
669 goto again;
670 }
671 start = state->end + 1;
672
673 if (start > end)
674 break;
675
676 cond_resched_lock(&tree->lock);
677 }
678out:
679 spin_unlock(&tree->lock);
680}
681
682static void set_state_bits(struct extent_io_tree *tree,
683 struct extent_state *state,
684 int *bits)
685{
686 int bits_to_set = *bits & ~EXTENT_CTLBITS;
687
688 set_state_cb(tree, state, bits);
689 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
690 u64 range = state->end - state->start + 1;
691 tree->dirty_bytes += range;
692 }
693 state->state |= bits_to_set;
694}
695
696static void cache_state(struct extent_state *state,
697 struct extent_state **cached_ptr)
698{
699 if (cached_ptr && !(*cached_ptr)) {
700 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
701 *cached_ptr = state;
702 atomic_inc(&state->refs);
703 }
704 }
705}
706
707static void uncache_state(struct extent_state **cached_ptr)
708{
709 if (cached_ptr && (*cached_ptr)) {
710 struct extent_state *state = *cached_ptr;
711 *cached_ptr = NULL;
712 free_extent_state(state);
713 }
714}
715
716/*
717 * set some bits on a range in the tree. This may require allocations or
718 * sleeping, so the gfp mask is used to indicate what is allowed.
719 *
720 * If any of the exclusive bits are set, this will fail with -EEXIST if some
721 * part of the range already has the desired bits set. The start of the
722 * existing range is returned in failed_start in this case.
723 *
724 * [start, end] is inclusive This takes the tree lock.
725 */
726
727static int __must_check
728__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
729 int bits, int exclusive_bits, u64 *failed_start,
730 struct extent_state **cached_state, gfp_t mask)
731{
732 struct extent_state *state;
733 struct extent_state *prealloc = NULL;
734 struct rb_node *node;
735 int err = 0;
736 u64 last_start;
737 u64 last_end;
738
739 bits |= EXTENT_FIRST_DELALLOC;
740again:
741 if (!prealloc && (mask & __GFP_WAIT)) {
742 prealloc = alloc_extent_state(mask);
743 BUG_ON(!prealloc);
744 }
745
746 spin_lock(&tree->lock);
747 if (cached_state && *cached_state) {
748 state = *cached_state;
749 if (state->start <= start && state->end > start &&
750 state->tree) {
751 node = &state->rb_node;
752 goto hit_next;
753 }
754 }
755 /*
756 * this search will find all the extents that end after
757 * our range starts.
758 */
759 node = tree_search(tree, start);
760 if (!node) {
761 prealloc = alloc_extent_state_atomic(prealloc);
762 BUG_ON(!prealloc);
763 err = insert_state(tree, prealloc, start, end, &bits);
764 if (err)
765 extent_io_tree_panic(tree, err);
766
767 prealloc = NULL;
768 goto out;
769 }
770 state = rb_entry(node, struct extent_state, rb_node);
771hit_next:
772 last_start = state->start;
773 last_end = state->end;
774
775 /*
776 * | ---- desired range ---- |
777 * | state |
778 *
779 * Just lock what we found and keep going
780 */
781 if (state->start == start && state->end <= end) {
782 if (state->state & exclusive_bits) {
783 *failed_start = state->start;
784 err = -EEXIST;
785 goto out;
786 }
787
788 set_state_bits(tree, state, &bits);
789 cache_state(state, cached_state);
790 merge_state(tree, state);
791 if (last_end == (u64)-1)
792 goto out;
793 start = last_end + 1;
794 state = next_state(state);
795 if (start < end && state && state->start == start &&
796 !need_resched())
797 goto hit_next;
798 goto search_again;
799 }
800
801 /*
802 * | ---- desired range ---- |
803 * | state |
804 * or
805 * | ------------- state -------------- |
806 *
807 * We need to split the extent we found, and may flip bits on
808 * second half.
809 *
810 * If the extent we found extends past our
811 * range, we just split and search again. It'll get split
812 * again the next time though.
813 *
814 * If the extent we found is inside our range, we set the
815 * desired bit on it.
816 */
817 if (state->start < start) {
818 if (state->state & exclusive_bits) {
819 *failed_start = start;
820 err = -EEXIST;
821 goto out;
822 }
823
824 prealloc = alloc_extent_state_atomic(prealloc);
825 BUG_ON(!prealloc);
826 err = split_state(tree, state, prealloc, start);
827 if (err)
828 extent_io_tree_panic(tree, err);
829
830 prealloc = NULL;
831 if (err)
832 goto out;
833 if (state->end <= end) {
834 set_state_bits(tree, state, &bits);
835 cache_state(state, cached_state);
836 merge_state(tree, state);
837 if (last_end == (u64)-1)
838 goto out;
839 start = last_end + 1;
840 state = next_state(state);
841 if (start < end && state && state->start == start &&
842 !need_resched())
843 goto hit_next;
844 }
845 goto search_again;
846 }
847 /*
848 * | ---- desired range ---- |
849 * | state | or | state |
850 *
851 * There's a hole, we need to insert something in it and
852 * ignore the extent we found.
853 */
854 if (state->start > start) {
855 u64 this_end;
856 if (end < last_start)
857 this_end = end;
858 else
859 this_end = last_start - 1;
860
861 prealloc = alloc_extent_state_atomic(prealloc);
862 BUG_ON(!prealloc);
863
864 /*
865 * Avoid to free 'prealloc' if it can be merged with
866 * the later extent.
867 */
868 err = insert_state(tree, prealloc, start, this_end,
869 &bits);
870 if (err)
871 extent_io_tree_panic(tree, err);
872
873 cache_state(prealloc, cached_state);
874 prealloc = NULL;
875 start = this_end + 1;
876 goto search_again;
877 }
878 /*
879 * | ---- desired range ---- |
880 * | state |
881 * We need to split the extent, and set the bit
882 * on the first half
883 */
884 if (state->start <= end && state->end > end) {
885 if (state->state & exclusive_bits) {
886 *failed_start = start;
887 err = -EEXIST;
888 goto out;
889 }
890
891 prealloc = alloc_extent_state_atomic(prealloc);
892 BUG_ON(!prealloc);
893 err = split_state(tree, state, prealloc, end + 1);
894 if (err)
895 extent_io_tree_panic(tree, err);
896
897 set_state_bits(tree, prealloc, &bits);
898 cache_state(prealloc, cached_state);
899 merge_state(tree, prealloc);
900 prealloc = NULL;
901 goto out;
902 }
903
904 goto search_again;
905
906out:
907 spin_unlock(&tree->lock);
908 if (prealloc)
909 free_extent_state(prealloc);
910
911 return err;
912
913search_again:
914 if (start > end)
915 goto out;
916 spin_unlock(&tree->lock);
917 if (mask & __GFP_WAIT)
918 cond_resched();
919 goto again;
920}
921
922int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
923 u64 *failed_start, struct extent_state **cached_state,
924 gfp_t mask)
925{
926 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
927 cached_state, mask);
928}
929
930
931/**
932 * convert_extent - convert all bits in a given range from one bit to another
933 * @tree: the io tree to search
934 * @start: the start offset in bytes
935 * @end: the end offset in bytes (inclusive)
936 * @bits: the bits to set in this range
937 * @clear_bits: the bits to clear in this range
938 * @mask: the allocation mask
939 *
940 * This will go through and set bits for the given range. If any states exist
941 * already in this range they are set with the given bit and cleared of the
942 * clear_bits. This is only meant to be used by things that are mergeable, ie
943 * converting from say DELALLOC to DIRTY. This is not meant to be used with
944 * boundary bits like LOCK.
945 */
946int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
947 int bits, int clear_bits, gfp_t mask)
948{
949 struct extent_state *state;
950 struct extent_state *prealloc = NULL;
951 struct rb_node *node;
952 int err = 0;
953 u64 last_start;
954 u64 last_end;
955
956again:
957 if (!prealloc && (mask & __GFP_WAIT)) {
958 prealloc = alloc_extent_state(mask);
959 if (!prealloc)
960 return -ENOMEM;
961 }
962
963 spin_lock(&tree->lock);
964 /*
965 * this search will find all the extents that end after
966 * our range starts.
967 */
968 node = tree_search(tree, start);
969 if (!node) {
970 prealloc = alloc_extent_state_atomic(prealloc);
971 if (!prealloc) {
972 err = -ENOMEM;
973 goto out;
974 }
975 err = insert_state(tree, prealloc, start, end, &bits);
976 prealloc = NULL;
977 if (err)
978 extent_io_tree_panic(tree, err);
979 goto out;
980 }
981 state = rb_entry(node, struct extent_state, rb_node);
982hit_next:
983 last_start = state->start;
984 last_end = state->end;
985
986 /*
987 * | ---- desired range ---- |
988 * | state |
989 *
990 * Just lock what we found and keep going
991 */
992 if (state->start == start && state->end <= end) {
993 set_state_bits(tree, state, &bits);
994 state = clear_state_bit(tree, state, &clear_bits, 0);
995 if (last_end == (u64)-1)
996 goto out;
997 start = last_end + 1;
998 if (start < end && state && state->start == start &&
999 !need_resched())
1000 goto hit_next;
1001 goto search_again;
1002 }
1003
1004 /*
1005 * | ---- desired range ---- |
1006 * | state |
1007 * or
1008 * | ------------- state -------------- |
1009 *
1010 * We need to split the extent we found, and may flip bits on
1011 * second half.
1012 *
1013 * If the extent we found extends past our
1014 * range, we just split and search again. It'll get split
1015 * again the next time though.
1016 *
1017 * If the extent we found is inside our range, we set the
1018 * desired bit on it.
1019 */
1020 if (state->start < start) {
1021 prealloc = alloc_extent_state_atomic(prealloc);
1022 if (!prealloc) {
1023 err = -ENOMEM;
1024 goto out;
1025 }
1026 err = split_state(tree, state, prealloc, start);
1027 if (err)
1028 extent_io_tree_panic(tree, err);
1029 prealloc = NULL;
1030 if (err)
1031 goto out;
1032 if (state->end <= end) {
1033 set_state_bits(tree, state, &bits);
1034 state = clear_state_bit(tree, state, &clear_bits, 0);
1035 if (last_end == (u64)-1)
1036 goto out;
1037 start = last_end + 1;
1038 if (start < end && state && state->start == start &&
1039 !need_resched())
1040 goto hit_next;
1041 }
1042 goto search_again;
1043 }
1044 /*
1045 * | ---- desired range ---- |
1046 * | state | or | state |
1047 *
1048 * There's a hole, we need to insert something in it and
1049 * ignore the extent we found.
1050 */
1051 if (state->start > start) {
1052 u64 this_end;
1053 if (end < last_start)
1054 this_end = end;
1055 else
1056 this_end = last_start - 1;
1057
1058 prealloc = alloc_extent_state_atomic(prealloc);
1059 if (!prealloc) {
1060 err = -ENOMEM;
1061 goto out;
1062 }
1063
1064 /*
1065 * Avoid to free 'prealloc' if it can be merged with
1066 * the later extent.
1067 */
1068 err = insert_state(tree, prealloc, start, this_end,
1069 &bits);
1070 if (err)
1071 extent_io_tree_panic(tree, err);
1072 prealloc = NULL;
1073 start = this_end + 1;
1074 goto search_again;
1075 }
1076 /*
1077 * | ---- desired range ---- |
1078 * | state |
1079 * We need to split the extent, and set the bit
1080 * on the first half
1081 */
1082 if (state->start <= end && state->end > end) {
1083 prealloc = alloc_extent_state_atomic(prealloc);
1084 if (!prealloc) {
1085 err = -ENOMEM;
1086 goto out;
1087 }
1088
1089 err = split_state(tree, state, prealloc, end + 1);
1090 if (err)
1091 extent_io_tree_panic(tree, err);
1092
1093 set_state_bits(tree, prealloc, &bits);
1094 clear_state_bit(tree, prealloc, &clear_bits, 0);
1095 prealloc = NULL;
1096 goto out;
1097 }
1098
1099 goto search_again;
1100
1101out:
1102 spin_unlock(&tree->lock);
1103 if (prealloc)
1104 free_extent_state(prealloc);
1105
1106 return err;
1107
1108search_again:
1109 if (start > end)
1110 goto out;
1111 spin_unlock(&tree->lock);
1112 if (mask & __GFP_WAIT)
1113 cond_resched();
1114 goto again;
1115}
1116
1117/* wrappers around set/clear extent bit */
1118int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1119 gfp_t mask)
1120{
1121 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1122 NULL, mask);
1123}
1124
1125int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1126 int bits, gfp_t mask)
1127{
1128 return set_extent_bit(tree, start, end, bits, NULL,
1129 NULL, mask);
1130}
1131
1132int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1133 int bits, gfp_t mask)
1134{
1135 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1136}
1137
1138int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1139 struct extent_state **cached_state, gfp_t mask)
1140{
1141 return set_extent_bit(tree, start, end,
1142 EXTENT_DELALLOC | EXTENT_UPTODATE,
1143 NULL, cached_state, mask);
1144}
1145
1146int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1147 gfp_t mask)
1148{
1149 return clear_extent_bit(tree, start, end,
1150 EXTENT_DIRTY | EXTENT_DELALLOC |
1151 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1152}
1153
1154int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1155 gfp_t mask)
1156{
1157 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1158 NULL, mask);
1159}
1160
1161int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1162 struct extent_state **cached_state, gfp_t mask)
1163{
1164 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1165 cached_state, mask);
1166}
1167
1168int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1169 struct extent_state **cached_state, gfp_t mask)
1170{
1171 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1172 cached_state, mask);
1173}
1174
1175/*
1176 * either insert or lock state struct between start and end use mask to tell
1177 * us if waiting is desired.
1178 */
1179int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1180 int bits, struct extent_state **cached_state)
1181{
1182 int err;
1183 u64 failed_start;
1184 while (1) {
1185 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1186 EXTENT_LOCKED, &failed_start,
1187 cached_state, GFP_NOFS);
1188 if (err == -EEXIST) {
1189 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1190 start = failed_start;
1191 } else
1192 break;
1193 WARN_ON(start > end);
1194 }
1195 return err;
1196}
1197
1198int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1199{
1200 return lock_extent_bits(tree, start, end, 0, NULL);
1201}
1202
1203int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1204{
1205 int err;
1206 u64 failed_start;
1207
1208 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1209 &failed_start, NULL, GFP_NOFS);
1210 if (err == -EEXIST) {
1211 if (failed_start > start)
1212 clear_extent_bit(tree, start, failed_start - 1,
1213 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1214 return 0;
1215 }
1216 return 1;
1217}
1218
1219int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1220 struct extent_state **cached, gfp_t mask)
1221{
1222 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1223 mask);
1224}
1225
1226int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1227{
1228 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1229 GFP_NOFS);
1230}
1231
1232/*
1233 * helper function to set both pages and extents in the tree writeback
1234 */
1235static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1236{
1237 unsigned long index = start >> PAGE_CACHE_SHIFT;
1238 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1239 struct page *page;
1240
1241 while (index <= end_index) {
1242 page = find_get_page(tree->mapping, index);
1243 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1244 set_page_writeback(page);
1245 page_cache_release(page);
1246 index++;
1247 }
1248 return 0;
1249}
1250
1251/* find the first state struct with 'bits' set after 'start', and
1252 * return it. tree->lock must be held. NULL will returned if
1253 * nothing was found after 'start'
1254 */
1255struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1256 u64 start, int bits)
1257{
1258 struct rb_node *node;
1259 struct extent_state *state;
1260
1261 /*
1262 * this search will find all the extents that end after
1263 * our range starts.
1264 */
1265 node = tree_search(tree, start);
1266 if (!node)
1267 goto out;
1268
1269 while (1) {
1270 state = rb_entry(node, struct extent_state, rb_node);
1271 if (state->end >= start && (state->state & bits))
1272 return state;
1273
1274 node = rb_next(node);
1275 if (!node)
1276 break;
1277 }
1278out:
1279 return NULL;
1280}
1281
1282/*
1283 * find the first offset in the io tree with 'bits' set. zero is
1284 * returned if we find something, and *start_ret and *end_ret are
1285 * set to reflect the state struct that was found.
1286 *
1287 * If nothing was found, 1 is returned. If found something, return 0.
1288 */
1289int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1290 u64 *start_ret, u64 *end_ret, int bits)
1291{
1292 struct extent_state *state;
1293 int ret = 1;
1294
1295 spin_lock(&tree->lock);
1296 state = find_first_extent_bit_state(tree, start, bits);
1297 if (state) {
1298 *start_ret = state->start;
1299 *end_ret = state->end;
1300 ret = 0;
1301 }
1302 spin_unlock(&tree->lock);
1303 return ret;
1304}
1305
1306/*
1307 * find a contiguous range of bytes in the file marked as delalloc, not
1308 * more than 'max_bytes'. start and end are used to return the range,
1309 *
1310 * 1 is returned if we find something, 0 if nothing was in the tree
1311 */
1312static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1313 u64 *start, u64 *end, u64 max_bytes,
1314 struct extent_state **cached_state)
1315{
1316 struct rb_node *node;
1317 struct extent_state *state;
1318 u64 cur_start = *start;
1319 u64 found = 0;
1320 u64 total_bytes = 0;
1321
1322 spin_lock(&tree->lock);
1323
1324 /*
1325 * this search will find all the extents that end after
1326 * our range starts.
1327 */
1328 node = tree_search(tree, cur_start);
1329 if (!node) {
1330 if (!found)
1331 *end = (u64)-1;
1332 goto out;
1333 }
1334
1335 while (1) {
1336 state = rb_entry(node, struct extent_state, rb_node);
1337 if (found && (state->start != cur_start ||
1338 (state->state & EXTENT_BOUNDARY))) {
1339 goto out;
1340 }
1341 if (!(state->state & EXTENT_DELALLOC)) {
1342 if (!found)
1343 *end = state->end;
1344 goto out;
1345 }
1346 if (!found) {
1347 *start = state->start;
1348 *cached_state = state;
1349 atomic_inc(&state->refs);
1350 }
1351 found++;
1352 *end = state->end;
1353 cur_start = state->end + 1;
1354 node = rb_next(node);
1355 if (!node)
1356 break;
1357 total_bytes += state->end - state->start + 1;
1358 if (total_bytes >= max_bytes)
1359 break;
1360 }
1361out:
1362 spin_unlock(&tree->lock);
1363 return found;
1364}
1365
1366static noinline void __unlock_for_delalloc(struct inode *inode,
1367 struct page *locked_page,
1368 u64 start, u64 end)
1369{
1370 int ret;
1371 struct page *pages[16];
1372 unsigned long index = start >> PAGE_CACHE_SHIFT;
1373 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1374 unsigned long nr_pages = end_index - index + 1;
1375 int i;
1376
1377 if (index == locked_page->index && end_index == index)
1378 return;
1379
1380 while (nr_pages > 0) {
1381 ret = find_get_pages_contig(inode->i_mapping, index,
1382 min_t(unsigned long, nr_pages,
1383 ARRAY_SIZE(pages)), pages);
1384 for (i = 0; i < ret; i++) {
1385 if (pages[i] != locked_page)
1386 unlock_page(pages[i]);
1387 page_cache_release(pages[i]);
1388 }
1389 nr_pages -= ret;
1390 index += ret;
1391 cond_resched();
1392 }
1393}
1394
1395static noinline int lock_delalloc_pages(struct inode *inode,
1396 struct page *locked_page,
1397 u64 delalloc_start,
1398 u64 delalloc_end)
1399{
1400 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1401 unsigned long start_index = index;
1402 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1403 unsigned long pages_locked = 0;
1404 struct page *pages[16];
1405 unsigned long nrpages;
1406 int ret;
1407 int i;
1408
1409 /* the caller is responsible for locking the start index */
1410 if (index == locked_page->index && index == end_index)
1411 return 0;
1412
1413 /* skip the page at the start index */
1414 nrpages = end_index - index + 1;
1415 while (nrpages > 0) {
1416 ret = find_get_pages_contig(inode->i_mapping, index,
1417 min_t(unsigned long,
1418 nrpages, ARRAY_SIZE(pages)), pages);
1419 if (ret == 0) {
1420 ret = -EAGAIN;
1421 goto done;
1422 }
1423 /* now we have an array of pages, lock them all */
1424 for (i = 0; i < ret; i++) {
1425 /*
1426 * the caller is taking responsibility for
1427 * locked_page
1428 */
1429 if (pages[i] != locked_page) {
1430 lock_page(pages[i]);
1431 if (!PageDirty(pages[i]) ||
1432 pages[i]->mapping != inode->i_mapping) {
1433 ret = -EAGAIN;
1434 unlock_page(pages[i]);
1435 page_cache_release(pages[i]);
1436 goto done;
1437 }
1438 }
1439 page_cache_release(pages[i]);
1440 pages_locked++;
1441 }
1442 nrpages -= ret;
1443 index += ret;
1444 cond_resched();
1445 }
1446 ret = 0;
1447done:
1448 if (ret && pages_locked) {
1449 __unlock_for_delalloc(inode, locked_page,
1450 delalloc_start,
1451 ((u64)(start_index + pages_locked - 1)) <<
1452 PAGE_CACHE_SHIFT);
1453 }
1454 return ret;
1455}
1456
1457/*
1458 * find a contiguous range of bytes in the file marked as delalloc, not
1459 * more than 'max_bytes'. start and end are used to return the range,
1460 *
1461 * 1 is returned if we find something, 0 if nothing was in the tree
1462 */
1463static noinline u64 find_lock_delalloc_range(struct inode *inode,
1464 struct extent_io_tree *tree,
1465 struct page *locked_page,
1466 u64 *start, u64 *end,
1467 u64 max_bytes)
1468{
1469 u64 delalloc_start;
1470 u64 delalloc_end;
1471 u64 found;
1472 struct extent_state *cached_state = NULL;
1473 int ret;
1474 int loops = 0;
1475
1476again:
1477 /* step one, find a bunch of delalloc bytes starting at start */
1478 delalloc_start = *start;
1479 delalloc_end = 0;
1480 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1481 max_bytes, &cached_state);
1482 if (!found || delalloc_end <= *start) {
1483 *start = delalloc_start;
1484 *end = delalloc_end;
1485 free_extent_state(cached_state);
1486 return found;
1487 }
1488
1489 /*
1490 * start comes from the offset of locked_page. We have to lock
1491 * pages in order, so we can't process delalloc bytes before
1492 * locked_page
1493 */
1494 if (delalloc_start < *start)
1495 delalloc_start = *start;
1496
1497 /*
1498 * make sure to limit the number of pages we try to lock down
1499 * if we're looping.
1500 */
1501 if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1502 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1503
1504 /* step two, lock all the pages after the page that has start */
1505 ret = lock_delalloc_pages(inode, locked_page,
1506 delalloc_start, delalloc_end);
1507 if (ret == -EAGAIN) {
1508 /* some of the pages are gone, lets avoid looping by
1509 * shortening the size of the delalloc range we're searching
1510 */
1511 free_extent_state(cached_state);
1512 if (!loops) {
1513 unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1514 max_bytes = PAGE_CACHE_SIZE - offset;
1515 loops = 1;
1516 goto again;
1517 } else {
1518 found = 0;
1519 goto out_failed;
1520 }
1521 }
1522 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1523
1524 /* step three, lock the state bits for the whole range */
1525 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1526
1527 /* then test to make sure it is all still delalloc */
1528 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1529 EXTENT_DELALLOC, 1, cached_state);
1530 if (!ret) {
1531 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1532 &cached_state, GFP_NOFS);
1533 __unlock_for_delalloc(inode, locked_page,
1534 delalloc_start, delalloc_end);
1535 cond_resched();
1536 goto again;
1537 }
1538 free_extent_state(cached_state);
1539 *start = delalloc_start;
1540 *end = delalloc_end;
1541out_failed:
1542 return found;
1543}
1544
1545int extent_clear_unlock_delalloc(struct inode *inode,
1546 struct extent_io_tree *tree,
1547 u64 start, u64 end, struct page *locked_page,
1548 unsigned long op)
1549{
1550 int ret;
1551 struct page *pages[16];
1552 unsigned long index = start >> PAGE_CACHE_SHIFT;
1553 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1554 unsigned long nr_pages = end_index - index + 1;
1555 int i;
1556 int clear_bits = 0;
1557
1558 if (op & EXTENT_CLEAR_UNLOCK)
1559 clear_bits |= EXTENT_LOCKED;
1560 if (op & EXTENT_CLEAR_DIRTY)
1561 clear_bits |= EXTENT_DIRTY;
1562
1563 if (op & EXTENT_CLEAR_DELALLOC)
1564 clear_bits |= EXTENT_DELALLOC;
1565
1566 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1567 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1568 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
1569 EXTENT_SET_PRIVATE2)))
1570 return 0;
1571
1572 while (nr_pages > 0) {
1573 ret = find_get_pages_contig(inode->i_mapping, index,
1574 min_t(unsigned long,
1575 nr_pages, ARRAY_SIZE(pages)), pages);
1576 for (i = 0; i < ret; i++) {
1577
1578 if (op & EXTENT_SET_PRIVATE2)
1579 SetPagePrivate2(pages[i]);
1580
1581 if (pages[i] == locked_page) {
1582 page_cache_release(pages[i]);
1583 continue;
1584 }
1585 if (op & EXTENT_CLEAR_DIRTY)
1586 clear_page_dirty_for_io(pages[i]);
1587 if (op & EXTENT_SET_WRITEBACK)
1588 set_page_writeback(pages[i]);
1589 if (op & EXTENT_END_WRITEBACK)
1590 end_page_writeback(pages[i]);
1591 if (op & EXTENT_CLEAR_UNLOCK_PAGE)
1592 unlock_page(pages[i]);
1593 page_cache_release(pages[i]);
1594 }
1595 nr_pages -= ret;
1596 index += ret;
1597 cond_resched();
1598 }
1599 return 0;
1600}
1601
1602/*
1603 * count the number of bytes in the tree that have a given bit(s)
1604 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1605 * cached. The total number found is returned.
1606 */
1607u64 count_range_bits(struct extent_io_tree *tree,
1608 u64 *start, u64 search_end, u64 max_bytes,
1609 unsigned long bits, int contig)
1610{
1611 struct rb_node *node;
1612 struct extent_state *state;
1613 u64 cur_start = *start;
1614 u64 total_bytes = 0;
1615 u64 last = 0;
1616 int found = 0;
1617
1618 if (search_end <= cur_start) {
1619 WARN_ON(1);
1620 return 0;
1621 }
1622
1623 spin_lock(&tree->lock);
1624 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1625 total_bytes = tree->dirty_bytes;
1626 goto out;
1627 }
1628 /*
1629 * this search will find all the extents that end after
1630 * our range starts.
1631 */
1632 node = tree_search(tree, cur_start);
1633 if (!node)
1634 goto out;
1635
1636 while (1) {
1637 state = rb_entry(node, struct extent_state, rb_node);
1638 if (state->start > search_end)
1639 break;
1640 if (contig && found && state->start > last + 1)
1641 break;
1642 if (state->end >= cur_start && (state->state & bits) == bits) {
1643 total_bytes += min(search_end, state->end) + 1 -
1644 max(cur_start, state->start);
1645 if (total_bytes >= max_bytes)
1646 break;
1647 if (!found) {
1648 *start = max(cur_start, state->start);
1649 found = 1;
1650 }
1651 last = state->end;
1652 } else if (contig && found) {
1653 break;
1654 }
1655 node = rb_next(node);
1656 if (!node)
1657 break;
1658 }
1659out:
1660 spin_unlock(&tree->lock);
1661 return total_bytes;
1662}
1663
1664/*
1665 * set the private field for a given byte offset in the tree. If there isn't
1666 * an extent_state there already, this does nothing.
1667 */
1668int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1669{
1670 struct rb_node *node;
1671 struct extent_state *state;
1672 int ret = 0;
1673
1674 spin_lock(&tree->lock);
1675 /*
1676 * this search will find all the extents that end after
1677 * our range starts.
1678 */
1679 node = tree_search(tree, start);
1680 if (!node) {
1681 ret = -ENOENT;
1682 goto out;
1683 }
1684 state = rb_entry(node, struct extent_state, rb_node);
1685 if (state->start != start) {
1686 ret = -ENOENT;
1687 goto out;
1688 }
1689 state->private = private;
1690out:
1691 spin_unlock(&tree->lock);
1692 return ret;
1693}
1694
1695int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1696{
1697 struct rb_node *node;
1698 struct extent_state *state;
1699 int ret = 0;
1700
1701 spin_lock(&tree->lock);
1702 /*
1703 * this search will find all the extents that end after
1704 * our range starts.
1705 */
1706 node = tree_search(tree, start);
1707 if (!node) {
1708 ret = -ENOENT;
1709 goto out;
1710 }
1711 state = rb_entry(node, struct extent_state, rb_node);
1712 if (state->start != start) {
1713 ret = -ENOENT;
1714 goto out;
1715 }
1716 *private = state->private;
1717out:
1718 spin_unlock(&tree->lock);
1719 return ret;
1720}
1721
1722/*
1723 * searches a range in the state tree for a given mask.
1724 * If 'filled' == 1, this returns 1 only if every extent in the tree
1725 * has the bits set. Otherwise, 1 is returned if any bit in the
1726 * range is found set.
1727 */
1728int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1729 int bits, int filled, struct extent_state *cached)
1730{
1731 struct extent_state *state = NULL;
1732 struct rb_node *node;
1733 int bitset = 0;
1734
1735 spin_lock(&tree->lock);
1736 if (cached && cached->tree && cached->start <= start &&
1737 cached->end > start)
1738 node = &cached->rb_node;
1739 else
1740 node = tree_search(tree, start);
1741 while (node && start <= end) {
1742 state = rb_entry(node, struct extent_state, rb_node);
1743
1744 if (filled && state->start > start) {
1745 bitset = 0;
1746 break;
1747 }
1748
1749 if (state->start > end)
1750 break;
1751
1752 if (state->state & bits) {
1753 bitset = 1;
1754 if (!filled)
1755 break;
1756 } else if (filled) {
1757 bitset = 0;
1758 break;
1759 }
1760
1761 if (state->end == (u64)-1)
1762 break;
1763
1764 start = state->end + 1;
1765 if (start > end)
1766 break;
1767 node = rb_next(node);
1768 if (!node) {
1769 if (filled)
1770 bitset = 0;
1771 break;
1772 }
1773 }
1774 spin_unlock(&tree->lock);
1775 return bitset;
1776}
1777
1778/*
1779 * helper function to set a given page up to date if all the
1780 * extents in the tree for that page are up to date
1781 */
1782static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1783{
1784 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1785 u64 end = start + PAGE_CACHE_SIZE - 1;
1786 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1787 SetPageUptodate(page);
1788}
1789
1790/*
1791 * helper function to unlock a page if all the extents in the tree
1792 * for that page are unlocked
1793 */
1794static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1795{
1796 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1797 u64 end = start + PAGE_CACHE_SIZE - 1;
1798 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1799 unlock_page(page);
1800}
1801
1802/*
1803 * helper function to end page writeback if all the extents
1804 * in the tree for that page are done with writeback
1805 */
1806static void check_page_writeback(struct extent_io_tree *tree,
1807 struct page *page)
1808{
1809 end_page_writeback(page);
1810}
1811
1812/*
1813 * When IO fails, either with EIO or csum verification fails, we
1814 * try other mirrors that might have a good copy of the data. This
1815 * io_failure_record is used to record state as we go through all the
1816 * mirrors. If another mirror has good data, the page is set up to date
1817 * and things continue. If a good mirror can't be found, the original
1818 * bio end_io callback is called to indicate things have failed.
1819 */
1820struct io_failure_record {
1821 struct page *page;
1822 u64 start;
1823 u64 len;
1824 u64 logical;
1825 unsigned long bio_flags;
1826 int this_mirror;
1827 int failed_mirror;
1828 int in_validation;
1829};
1830
1831static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1832 int did_repair)
1833{
1834 int ret;
1835 int err = 0;
1836 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1837
1838 set_state_private(failure_tree, rec->start, 0);
1839 ret = clear_extent_bits(failure_tree, rec->start,
1840 rec->start + rec->len - 1,
1841 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1842 if (ret)
1843 err = ret;
1844
1845 if (did_repair) {
1846 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1847 rec->start + rec->len - 1,
1848 EXTENT_DAMAGED, GFP_NOFS);
1849 if (ret && !err)
1850 err = ret;
1851 }
1852
1853 kfree(rec);
1854 return err;
1855}
1856
1857static void repair_io_failure_callback(struct bio *bio, int err)
1858{
1859 complete(bio->bi_private);
1860}
1861
1862/*
1863 * this bypasses the standard btrfs submit functions deliberately, as
1864 * the standard behavior is to write all copies in a raid setup. here we only
1865 * want to write the one bad copy. so we do the mapping for ourselves and issue
1866 * submit_bio directly.
1867 * to avoid any synchonization issues, wait for the data after writing, which
1868 * actually prevents the read that triggered the error from finishing.
1869 * currently, there can be no more than two copies of every data bit. thus,
1870 * exactly one rewrite is required.
1871 */
1872int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1873 u64 length, u64 logical, struct page *page,
1874 int mirror_num)
1875{
1876 struct bio *bio;
1877 struct btrfs_device *dev;
1878 DECLARE_COMPLETION_ONSTACK(compl);
1879 u64 map_length = 0;
1880 u64 sector;
1881 struct btrfs_bio *bbio = NULL;
1882 int ret;
1883
1884 BUG_ON(!mirror_num);
1885
1886 bio = bio_alloc(GFP_NOFS, 1);
1887 if (!bio)
1888 return -EIO;
1889 bio->bi_private = &compl;
1890 bio->bi_end_io = repair_io_failure_callback;
1891 bio->bi_size = 0;
1892 map_length = length;
1893
1894 ret = btrfs_map_block(map_tree, WRITE, logical,
1895 &map_length, &bbio, mirror_num);
1896 if (ret) {
1897 bio_put(bio);
1898 return -EIO;
1899 }
1900 BUG_ON(mirror_num != bbio->mirror_num);
1901 sector = bbio->stripes[mirror_num-1].physical >> 9;
1902 bio->bi_sector = sector;
1903 dev = bbio->stripes[mirror_num-1].dev;
1904 kfree(bbio);
1905 if (!dev || !dev->bdev || !dev->writeable) {
1906 bio_put(bio);
1907 return -EIO;
1908 }
1909 bio->bi_bdev = dev->bdev;
1910 bio_add_page(bio, page, length, start-page_offset(page));
1911 btrfsic_submit_bio(WRITE_SYNC, bio);
1912 wait_for_completion(&compl);
1913
1914 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1915 /* try to remap that extent elsewhere? */
1916 bio_put(bio);
1917 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
1918 return -EIO;
1919 }
1920
1921 printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1922 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1923 start, rcu_str_deref(dev->name), sector);
1924
1925 bio_put(bio);
1926 return 0;
1927}
1928
1929int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1930 int mirror_num)
1931{
1932 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1933 u64 start = eb->start;
1934 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1935 int ret = 0;
1936
1937 for (i = 0; i < num_pages; i++) {
1938 struct page *p = extent_buffer_page(eb, i);
1939 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1940 start, p, mirror_num);
1941 if (ret)
1942 break;
1943 start += PAGE_CACHE_SIZE;
1944 }
1945
1946 return ret;
1947}
1948
1949/*
1950 * each time an IO finishes, we do a fast check in the IO failure tree
1951 * to see if we need to process or clean up an io_failure_record
1952 */
1953static int clean_io_failure(u64 start, struct page *page)
1954{
1955 u64 private;
1956 u64 private_failure;
1957 struct io_failure_record *failrec;
1958 struct btrfs_mapping_tree *map_tree;
1959 struct extent_state *state;
1960 int num_copies;
1961 int did_repair = 0;
1962 int ret;
1963 struct inode *inode = page->mapping->host;
1964
1965 private = 0;
1966 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1967 (u64)-1, 1, EXTENT_DIRTY, 0);
1968 if (!ret)
1969 return 0;
1970
1971 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
1972 &private_failure);
1973 if (ret)
1974 return 0;
1975
1976 failrec = (struct io_failure_record *)(unsigned long) private_failure;
1977 BUG_ON(!failrec->this_mirror);
1978
1979 if (failrec->in_validation) {
1980 /* there was no real error, just free the record */
1981 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
1982 failrec->start);
1983 did_repair = 1;
1984 goto out;
1985 }
1986
1987 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1988 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1989 failrec->start,
1990 EXTENT_LOCKED);
1991 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1992
1993 if (state && state->start == failrec->start) {
1994 map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
1995 num_copies = btrfs_num_copies(map_tree, failrec->logical,
1996 failrec->len);
1997 if (num_copies > 1) {
1998 ret = repair_io_failure(map_tree, start, failrec->len,
1999 failrec->logical, page,
2000 failrec->failed_mirror);
2001 did_repair = !ret;
2002 }
2003 }
2004
2005out:
2006 if (!ret)
2007 ret = free_io_failure(inode, failrec, did_repair);
2008
2009 return ret;
2010}
2011
2012/*
2013 * this is a generic handler for readpage errors (default
2014 * readpage_io_failed_hook). if other copies exist, read those and write back
2015 * good data to the failed position. does not investigate in remapping the
2016 * failed extent elsewhere, hoping the device will be smart enough to do this as
2017 * needed
2018 */
2019
2020static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2021 u64 start, u64 end, int failed_mirror,
2022 struct extent_state *state)
2023{
2024 struct io_failure_record *failrec = NULL;
2025 u64 private;
2026 struct extent_map *em;
2027 struct inode *inode = page->mapping->host;
2028 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2029 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2030 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2031 struct bio *bio;
2032 int num_copies;
2033 int ret;
2034 int read_mode;
2035 u64 logical;
2036
2037 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2038
2039 ret = get_state_private(failure_tree, start, &private);
2040 if (ret) {
2041 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2042 if (!failrec)
2043 return -ENOMEM;
2044 failrec->start = start;
2045 failrec->len = end - start + 1;
2046 failrec->this_mirror = 0;
2047 failrec->bio_flags = 0;
2048 failrec->in_validation = 0;
2049
2050 read_lock(&em_tree->lock);
2051 em = lookup_extent_mapping(em_tree, start, failrec->len);
2052 if (!em) {
2053 read_unlock(&em_tree->lock);
2054 kfree(failrec);
2055 return -EIO;
2056 }
2057
2058 if (em->start > start || em->start + em->len < start) {
2059 free_extent_map(em);
2060 em = NULL;
2061 }
2062 read_unlock(&em_tree->lock);
2063
2064 if (!em || IS_ERR(em)) {
2065 kfree(failrec);
2066 return -EIO;
2067 }
2068 logical = start - em->start;
2069 logical = em->block_start + logical;
2070 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2071 logical = em->block_start;
2072 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2073 extent_set_compress_type(&failrec->bio_flags,
2074 em->compress_type);
2075 }
2076 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2077 "len=%llu\n", logical, start, failrec->len);
2078 failrec->logical = logical;
2079 free_extent_map(em);
2080
2081 /* set the bits in the private failure tree */
2082 ret = set_extent_bits(failure_tree, start, end,
2083 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2084 if (ret >= 0)
2085 ret = set_state_private(failure_tree, start,
2086 (u64)(unsigned long)failrec);
2087 /* set the bits in the inode's tree */
2088 if (ret >= 0)
2089 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2090 GFP_NOFS);
2091 if (ret < 0) {
2092 kfree(failrec);
2093 return ret;
2094 }
2095 } else {
2096 failrec = (struct io_failure_record *)(unsigned long)private;
2097 pr_debug("bio_readpage_error: (found) logical=%llu, "
2098 "start=%llu, len=%llu, validation=%d\n",
2099 failrec->logical, failrec->start, failrec->len,
2100 failrec->in_validation);
2101 /*
2102 * when data can be on disk more than twice, add to failrec here
2103 * (e.g. with a list for failed_mirror) to make
2104 * clean_io_failure() clean all those errors at once.
2105 */
2106 }
2107 num_copies = btrfs_num_copies(
2108 &BTRFS_I(inode)->root->fs_info->mapping_tree,
2109 failrec->logical, failrec->len);
2110 if (num_copies == 1) {
2111 /*
2112 * we only have a single copy of the data, so don't bother with
2113 * all the retry and error correction code that follows. no
2114 * matter what the error is, it is very likely to persist.
2115 */
2116 pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
2117 "state=%p, num_copies=%d, next_mirror %d, "
2118 "failed_mirror %d\n", state, num_copies,
2119 failrec->this_mirror, failed_mirror);
2120 free_io_failure(inode, failrec, 0);
2121 return -EIO;
2122 }
2123
2124 if (!state) {
2125 spin_lock(&tree->lock);
2126 state = find_first_extent_bit_state(tree, failrec->start,
2127 EXTENT_LOCKED);
2128 if (state && state->start != failrec->start)
2129 state = NULL;
2130 spin_unlock(&tree->lock);
2131 }
2132
2133 /*
2134 * there are two premises:
2135 * a) deliver good data to the caller
2136 * b) correct the bad sectors on disk
2137 */
2138 if (failed_bio->bi_vcnt > 1) {
2139 /*
2140 * to fulfill b), we need to know the exact failing sectors, as
2141 * we don't want to rewrite any more than the failed ones. thus,
2142 * we need separate read requests for the failed bio
2143 *
2144 * if the following BUG_ON triggers, our validation request got
2145 * merged. we need separate requests for our algorithm to work.
2146 */
2147 BUG_ON(failrec->in_validation);
2148 failrec->in_validation = 1;
2149 failrec->this_mirror = failed_mirror;
2150 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2151 } else {
2152 /*
2153 * we're ready to fulfill a) and b) alongside. get a good copy
2154 * of the failed sector and if we succeed, we have setup
2155 * everything for repair_io_failure to do the rest for us.
2156 */
2157 if (failrec->in_validation) {
2158 BUG_ON(failrec->this_mirror != failed_mirror);
2159 failrec->in_validation = 0;
2160 failrec->this_mirror = 0;
2161 }
2162 failrec->failed_mirror = failed_mirror;
2163 failrec->this_mirror++;
2164 if (failrec->this_mirror == failed_mirror)
2165 failrec->this_mirror++;
2166 read_mode = READ_SYNC;
2167 }
2168
2169 if (!state || failrec->this_mirror > num_copies) {
2170 pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
2171 "next_mirror %d, failed_mirror %d\n", state,
2172 num_copies, failrec->this_mirror, failed_mirror);
2173 free_io_failure(inode, failrec, 0);
2174 return -EIO;
2175 }
2176
2177 bio = bio_alloc(GFP_NOFS, 1);
2178 if (!bio) {
2179 free_io_failure(inode, failrec, 0);
2180 return -EIO;
2181 }
2182 bio->bi_private = state;
2183 bio->bi_end_io = failed_bio->bi_end_io;
2184 bio->bi_sector = failrec->logical >> 9;
2185 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2186 bio->bi_size = 0;
2187
2188 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2189
2190 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2191 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2192 failrec->this_mirror, num_copies, failrec->in_validation);
2193
2194 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2195 failrec->this_mirror,
2196 failrec->bio_flags, 0);
2197 return ret;
2198}
2199
2200/* lots and lots of room for performance fixes in the end_bio funcs */
2201
2202int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2203{
2204 int uptodate = (err == 0);
2205 struct extent_io_tree *tree;
2206 int ret;
2207
2208 tree = &BTRFS_I(page->mapping->host)->io_tree;
2209
2210 if (tree->ops && tree->ops->writepage_end_io_hook) {
2211 ret = tree->ops->writepage_end_io_hook(page, start,
2212 end, NULL, uptodate);
2213 if (ret)
2214 uptodate = 0;
2215 }
2216
2217 if (!uptodate) {
2218 ClearPageUptodate(page);
2219 SetPageError(page);
2220 }
2221 return 0;
2222}
2223
2224/*
2225 * after a writepage IO is done, we need to:
2226 * clear the uptodate bits on error
2227 * clear the writeback bits in the extent tree for this IO
2228 * end_page_writeback if the page has no more pending IO
2229 *
2230 * Scheduling is not allowed, so the extent state tree is expected
2231 * to have one and only one object corresponding to this IO.
2232 */
2233static void end_bio_extent_writepage(struct bio *bio, int err)
2234{
2235 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2236 struct extent_io_tree *tree;
2237 u64 start;
2238 u64 end;
2239 int whole_page;
2240
2241 do {
2242 struct page *page = bvec->bv_page;
2243 tree = &BTRFS_I(page->mapping->host)->io_tree;
2244
2245 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2246 bvec->bv_offset;
2247 end = start + bvec->bv_len - 1;
2248
2249 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2250 whole_page = 1;
2251 else
2252 whole_page = 0;
2253
2254 if (--bvec >= bio->bi_io_vec)
2255 prefetchw(&bvec->bv_page->flags);
2256
2257 if (end_extent_writepage(page, err, start, end))
2258 continue;
2259
2260 if (whole_page)
2261 end_page_writeback(page);
2262 else
2263 check_page_writeback(tree, page);
2264 } while (bvec >= bio->bi_io_vec);
2265
2266 bio_put(bio);
2267}
2268
2269/*
2270 * after a readpage IO is done, we need to:
2271 * clear the uptodate bits on error
2272 * set the uptodate bits if things worked
2273 * set the page up to date if all extents in the tree are uptodate
2274 * clear the lock bit in the extent tree
2275 * unlock the page if there are no other extents locked for it
2276 *
2277 * Scheduling is not allowed, so the extent state tree is expected
2278 * to have one and only one object corresponding to this IO.
2279 */
2280static void end_bio_extent_readpage(struct bio *bio, int err)
2281{
2282 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2283 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2284 struct bio_vec *bvec = bio->bi_io_vec;
2285 struct extent_io_tree *tree;
2286 u64 start;
2287 u64 end;
2288 int whole_page;
2289 int mirror;
2290 int ret;
2291
2292 if (err)
2293 uptodate = 0;
2294
2295 do {
2296 struct page *page = bvec->bv_page;
2297 struct extent_state *cached = NULL;
2298 struct extent_state *state;
2299
2300 pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
2301 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
2302 (long int)bio->bi_bdev);
2303 tree = &BTRFS_I(page->mapping->host)->io_tree;
2304
2305 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
2306 bvec->bv_offset;
2307 end = start + bvec->bv_len - 1;
2308
2309 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
2310 whole_page = 1;
2311 else
2312 whole_page = 0;
2313
2314 if (++bvec <= bvec_end)
2315 prefetchw(&bvec->bv_page->flags);
2316
2317 spin_lock(&tree->lock);
2318 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
2319 if (state && state->start == start) {
2320 /*
2321 * take a reference on the state, unlock will drop
2322 * the ref
2323 */
2324 cache_state(state, &cached);
2325 }
2326 spin_unlock(&tree->lock);
2327
2328 mirror = (int)(unsigned long)bio->bi_bdev;
2329 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2330 ret = tree->ops->readpage_end_io_hook(page, start, end,
2331 state, mirror);
2332 if (ret)
2333 uptodate = 0;
2334 else
2335 clean_io_failure(start, page);
2336 }
2337
2338 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2339 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2340 if (!ret && !err &&
2341 test_bit(BIO_UPTODATE, &bio->bi_flags))
2342 uptodate = 1;
2343 } else if (!uptodate) {
2344 /*
2345 * The generic bio_readpage_error handles errors the
2346 * following way: If possible, new read requests are
2347 * created and submitted and will end up in
2348 * end_bio_extent_readpage as well (if we're lucky, not
2349 * in the !uptodate case). In that case it returns 0 and
2350 * we just go on with the next page in our bio. If it
2351 * can't handle the error it will return -EIO and we
2352 * remain responsible for that page.
2353 */
2354 ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
2355 if (ret == 0) {
2356 uptodate =
2357 test_bit(BIO_UPTODATE, &bio->bi_flags);
2358 if (err)
2359 uptodate = 0;
2360 uncache_state(&cached);
2361 continue;
2362 }
2363 }
2364
2365 if (uptodate && tree->track_uptodate) {
2366 set_extent_uptodate(tree, start, end, &cached,
2367 GFP_ATOMIC);
2368 }
2369 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2370
2371 if (whole_page) {
2372 if (uptodate) {
2373 SetPageUptodate(page);
2374 } else {
2375 ClearPageUptodate(page);
2376 SetPageError(page);
2377 }
2378 unlock_page(page);
2379 } else {
2380 if (uptodate) {
2381 check_page_uptodate(tree, page);
2382 } else {
2383 ClearPageUptodate(page);
2384 SetPageError(page);
2385 }
2386 check_page_locked(tree, page);
2387 }
2388 } while (bvec <= bvec_end);
2389
2390 bio_put(bio);
2391}
2392
2393struct bio *
2394btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2395 gfp_t gfp_flags)
2396{
2397 struct bio *bio;
2398
2399 bio = bio_alloc(gfp_flags, nr_vecs);
2400
2401 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2402 while (!bio && (nr_vecs /= 2))
2403 bio = bio_alloc(gfp_flags, nr_vecs);
2404 }
2405
2406 if (bio) {
2407 bio->bi_size = 0;
2408 bio->bi_bdev = bdev;
2409 bio->bi_sector = first_sector;
2410 }
2411 return bio;
2412}
2413
2414/*
2415 * Since writes are async, they will only return -ENOMEM.
2416 * Reads can return the full range of I/O error conditions.
2417 */
2418static int __must_check submit_one_bio(int rw, struct bio *bio,
2419 int mirror_num, unsigned long bio_flags)
2420{
2421 int ret = 0;
2422 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2423 struct page *page = bvec->bv_page;
2424 struct extent_io_tree *tree = bio->bi_private;
2425 u64 start;
2426
2427 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
2428
2429 bio->bi_private = NULL;
2430
2431 bio_get(bio);
2432
2433 if (tree->ops && tree->ops->submit_bio_hook)
2434 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2435 mirror_num, bio_flags, start);
2436 else
2437 btrfsic_submit_bio(rw, bio);
2438
2439 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2440 ret = -EOPNOTSUPP;
2441 bio_put(bio);
2442 return ret;
2443}
2444
2445static int merge_bio(struct extent_io_tree *tree, struct page *page,
2446 unsigned long offset, size_t size, struct bio *bio,
2447 unsigned long bio_flags)
2448{
2449 int ret = 0;
2450 if (tree->ops && tree->ops->merge_bio_hook)
2451 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2452 bio_flags);
2453 BUG_ON(ret < 0);
2454 return ret;
2455
2456}
2457
2458static int submit_extent_page(int rw, struct extent_io_tree *tree,
2459 struct page *page, sector_t sector,
2460 size_t size, unsigned long offset,
2461 struct block_device *bdev,
2462 struct bio **bio_ret,
2463 unsigned long max_pages,
2464 bio_end_io_t end_io_func,
2465 int mirror_num,
2466 unsigned long prev_bio_flags,
2467 unsigned long bio_flags)
2468{
2469 int ret = 0;
2470 struct bio *bio;
2471 int nr;
2472 int contig = 0;
2473 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2474 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2475 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2476
2477 if (bio_ret && *bio_ret) {
2478 bio = *bio_ret;
2479 if (old_compressed)
2480 contig = bio->bi_sector == sector;
2481 else
2482 contig = bio->bi_sector + (bio->bi_size >> 9) ==
2483 sector;
2484
2485 if (prev_bio_flags != bio_flags || !contig ||
2486 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2487 bio_add_page(bio, page, page_size, offset) < page_size) {
2488 ret = submit_one_bio(rw, bio, mirror_num,
2489 prev_bio_flags);
2490 if (ret < 0)
2491 return ret;
2492 bio = NULL;
2493 } else {
2494 return 0;
2495 }
2496 }
2497 if (this_compressed)
2498 nr = BIO_MAX_PAGES;
2499 else
2500 nr = bio_get_nr_vecs(bdev);
2501
2502 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2503 if (!bio)
2504 return -ENOMEM;
2505
2506 bio_add_page(bio, page, page_size, offset);
2507 bio->bi_end_io = end_io_func;
2508 bio->bi_private = tree;
2509
2510 if (bio_ret)
2511 *bio_ret = bio;
2512 else
2513 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2514
2515 return ret;
2516}
2517
2518void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2519{
2520 if (!PagePrivate(page)) {
2521 SetPagePrivate(page);
2522 page_cache_get(page);
2523 set_page_private(page, (unsigned long)eb);
2524 } else {
2525 WARN_ON(page->private != (unsigned long)eb);
2526 }
2527}
2528
2529void set_page_extent_mapped(struct page *page)
2530{
2531 if (!PagePrivate(page)) {
2532 SetPagePrivate(page);
2533 page_cache_get(page);
2534 set_page_private(page, EXTENT_PAGE_PRIVATE);
2535 }
2536}
2537
2538/*
2539 * basic readpage implementation. Locked extent state structs are inserted
2540 * into the tree that are removed when the IO is done (by the end_io
2541 * handlers)
2542 * XXX JDM: This needs looking at to ensure proper page locking
2543 */
2544static int __extent_read_full_page(struct extent_io_tree *tree,
2545 struct page *page,
2546 get_extent_t *get_extent,
2547 struct bio **bio, int mirror_num,
2548 unsigned long *bio_flags)
2549{
2550 struct inode *inode = page->mapping->host;
2551 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2552 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2553 u64 end;
2554 u64 cur = start;
2555 u64 extent_offset;
2556 u64 last_byte = i_size_read(inode);
2557 u64 block_start;
2558 u64 cur_end;
2559 sector_t sector;
2560 struct extent_map *em;
2561 struct block_device *bdev;
2562 struct btrfs_ordered_extent *ordered;
2563 int ret;
2564 int nr = 0;
2565 size_t pg_offset = 0;
2566 size_t iosize;
2567 size_t disk_io_size;
2568 size_t blocksize = inode->i_sb->s_blocksize;
2569 unsigned long this_bio_flag = 0;
2570
2571 set_page_extent_mapped(page);
2572
2573 if (!PageUptodate(page)) {
2574 if (cleancache_get_page(page) == 0) {
2575 BUG_ON(blocksize != PAGE_SIZE);
2576 goto out;
2577 }
2578 }
2579
2580 end = page_end;
2581 while (1) {
2582 lock_extent(tree, start, end);
2583 ordered = btrfs_lookup_ordered_extent(inode, start);
2584 if (!ordered)
2585 break;
2586 unlock_extent(tree, start, end);
2587 btrfs_start_ordered_extent(inode, ordered, 1);
2588 btrfs_put_ordered_extent(ordered);
2589 }
2590
2591 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2592 char *userpage;
2593 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2594
2595 if (zero_offset) {
2596 iosize = PAGE_CACHE_SIZE - zero_offset;
2597 userpage = kmap_atomic(page);
2598 memset(userpage + zero_offset, 0, iosize);
2599 flush_dcache_page(page);
2600 kunmap_atomic(userpage);
2601 }
2602 }
2603 while (cur <= end) {
2604 if (cur >= last_byte) {
2605 char *userpage;
2606 struct extent_state *cached = NULL;
2607
2608 iosize = PAGE_CACHE_SIZE - pg_offset;
2609 userpage = kmap_atomic(page);
2610 memset(userpage + pg_offset, 0, iosize);
2611 flush_dcache_page(page);
2612 kunmap_atomic(userpage);
2613 set_extent_uptodate(tree, cur, cur + iosize - 1,
2614 &cached, GFP_NOFS);
2615 unlock_extent_cached(tree, cur, cur + iosize - 1,
2616 &cached, GFP_NOFS);
2617 break;
2618 }
2619 em = get_extent(inode, page, pg_offset, cur,
2620 end - cur + 1, 0);
2621 if (IS_ERR_OR_NULL(em)) {
2622 SetPageError(page);
2623 unlock_extent(tree, cur, end);
2624 break;
2625 }
2626 extent_offset = cur - em->start;
2627 BUG_ON(extent_map_end(em) <= cur);
2628 BUG_ON(end < cur);
2629
2630 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2631 this_bio_flag = EXTENT_BIO_COMPRESSED;
2632 extent_set_compress_type(&this_bio_flag,
2633 em->compress_type);
2634 }
2635
2636 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2637 cur_end = min(extent_map_end(em) - 1, end);
2638 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2639 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2640 disk_io_size = em->block_len;
2641 sector = em->block_start >> 9;
2642 } else {
2643 sector = (em->block_start + extent_offset) >> 9;
2644 disk_io_size = iosize;
2645 }
2646 bdev = em->bdev;
2647 block_start = em->block_start;
2648 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2649 block_start = EXTENT_MAP_HOLE;
2650 free_extent_map(em);
2651 em = NULL;
2652
2653 /* we've found a hole, just zero and go on */
2654 if (block_start == EXTENT_MAP_HOLE) {
2655 char *userpage;
2656 struct extent_state *cached = NULL;
2657
2658 userpage = kmap_atomic(page);
2659 memset(userpage + pg_offset, 0, iosize);
2660 flush_dcache_page(page);
2661 kunmap_atomic(userpage);
2662
2663 set_extent_uptodate(tree, cur, cur + iosize - 1,
2664 &cached, GFP_NOFS);
2665 unlock_extent_cached(tree, cur, cur + iosize - 1,
2666 &cached, GFP_NOFS);
2667 cur = cur + iosize;
2668 pg_offset += iosize;
2669 continue;
2670 }
2671 /* the get_extent function already copied into the page */
2672 if (test_range_bit(tree, cur, cur_end,
2673 EXTENT_UPTODATE, 1, NULL)) {
2674 check_page_uptodate(tree, page);
2675 unlock_extent(tree, cur, cur + iosize - 1);
2676 cur = cur + iosize;
2677 pg_offset += iosize;
2678 continue;
2679 }
2680 /* we have an inline extent but it didn't get marked up
2681 * to date. Error out
2682 */
2683 if (block_start == EXTENT_MAP_INLINE) {
2684 SetPageError(page);
2685 unlock_extent(tree, cur, cur + iosize - 1);
2686 cur = cur + iosize;
2687 pg_offset += iosize;
2688 continue;
2689 }
2690
2691 ret = 0;
2692 if (tree->ops && tree->ops->readpage_io_hook) {
2693 ret = tree->ops->readpage_io_hook(page, cur,
2694 cur + iosize - 1);
2695 }
2696 if (!ret) {
2697 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2698 pnr -= page->index;
2699 ret = submit_extent_page(READ, tree, page,
2700 sector, disk_io_size, pg_offset,
2701 bdev, bio, pnr,
2702 end_bio_extent_readpage, mirror_num,
2703 *bio_flags,
2704 this_bio_flag);
2705 BUG_ON(ret == -ENOMEM);
2706 nr++;
2707 *bio_flags = this_bio_flag;
2708 }
2709 if (ret)
2710 SetPageError(page);
2711 cur = cur + iosize;
2712 pg_offset += iosize;
2713 }
2714out:
2715 if (!nr) {
2716 if (!PageError(page))
2717 SetPageUptodate(page);
2718 unlock_page(page);
2719 }
2720 return 0;
2721}
2722
2723int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2724 get_extent_t *get_extent, int mirror_num)
2725{
2726 struct bio *bio = NULL;
2727 unsigned long bio_flags = 0;
2728 int ret;
2729
2730 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
2731 &bio_flags);
2732 if (bio)
2733 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
2734 return ret;
2735}
2736
2737static noinline void update_nr_written(struct page *page,
2738 struct writeback_control *wbc,
2739 unsigned long nr_written)
2740{
2741 wbc->nr_to_write -= nr_written;
2742 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2743 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2744 page->mapping->writeback_index = page->index + nr_written;
2745}
2746
2747/*
2748 * the writepage semantics are similar to regular writepage. extent
2749 * records are inserted to lock ranges in the tree, and as dirty areas
2750 * are found, they are marked writeback. Then the lock bits are removed
2751 * and the end_io handler clears the writeback ranges
2752 */
2753static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2754 void *data)
2755{
2756 struct inode *inode = page->mapping->host;
2757 struct extent_page_data *epd = data;
2758 struct extent_io_tree *tree = epd->tree;
2759 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2760 u64 delalloc_start;
2761 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2762 u64 end;
2763 u64 cur = start;
2764 u64 extent_offset;
2765 u64 last_byte = i_size_read(inode);
2766 u64 block_start;
2767 u64 iosize;
2768 sector_t sector;
2769 struct extent_state *cached_state = NULL;
2770 struct extent_map *em;
2771 struct block_device *bdev;
2772 int ret;
2773 int nr = 0;
2774 size_t pg_offset = 0;
2775 size_t blocksize;
2776 loff_t i_size = i_size_read(inode);
2777 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2778 u64 nr_delalloc;
2779 u64 delalloc_end;
2780 int page_started;
2781 int compressed;
2782 int write_flags;
2783 unsigned long nr_written = 0;
2784 bool fill_delalloc = true;
2785
2786 if (wbc->sync_mode == WB_SYNC_ALL)
2787 write_flags = WRITE_SYNC;
2788 else
2789 write_flags = WRITE;
2790
2791 trace___extent_writepage(page, inode, wbc);
2792
2793 WARN_ON(!PageLocked(page));
2794
2795 ClearPageError(page);
2796
2797 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2798 if (page->index > end_index ||
2799 (page->index == end_index && !pg_offset)) {
2800 page->mapping->a_ops->invalidatepage(page, 0);
2801 unlock_page(page);
2802 return 0;
2803 }
2804
2805 if (page->index == end_index) {
2806 char *userpage;
2807
2808 userpage = kmap_atomic(page);
2809 memset(userpage + pg_offset, 0,
2810 PAGE_CACHE_SIZE - pg_offset);
2811 kunmap_atomic(userpage);
2812 flush_dcache_page(page);
2813 }
2814 pg_offset = 0;
2815
2816 set_page_extent_mapped(page);
2817
2818 if (!tree->ops || !tree->ops->fill_delalloc)
2819 fill_delalloc = false;
2820
2821 delalloc_start = start;
2822 delalloc_end = 0;
2823 page_started = 0;
2824 if (!epd->extent_locked && fill_delalloc) {
2825 u64 delalloc_to_write = 0;
2826 /*
2827 * make sure the wbc mapping index is at least updated
2828 * to this page.
2829 */
2830 update_nr_written(page, wbc, 0);
2831
2832 while (delalloc_end < page_end) {
2833 nr_delalloc = find_lock_delalloc_range(inode, tree,
2834 page,
2835 &delalloc_start,
2836 &delalloc_end,
2837 128 * 1024 * 1024);
2838 if (nr_delalloc == 0) {
2839 delalloc_start = delalloc_end + 1;
2840 continue;
2841 }
2842 ret = tree->ops->fill_delalloc(inode, page,
2843 delalloc_start,
2844 delalloc_end,
2845 &page_started,
2846 &nr_written);
2847 /* File system has been set read-only */
2848 if (ret) {
2849 SetPageError(page);
2850 goto done;
2851 }
2852 /*
2853 * delalloc_end is already one less than the total
2854 * length, so we don't subtract one from
2855 * PAGE_CACHE_SIZE
2856 */
2857 delalloc_to_write += (delalloc_end - delalloc_start +
2858 PAGE_CACHE_SIZE) >>
2859 PAGE_CACHE_SHIFT;
2860 delalloc_start = delalloc_end + 1;
2861 }
2862 if (wbc->nr_to_write < delalloc_to_write) {
2863 int thresh = 8192;
2864
2865 if (delalloc_to_write < thresh * 2)
2866 thresh = delalloc_to_write;
2867 wbc->nr_to_write = min_t(u64, delalloc_to_write,
2868 thresh);
2869 }
2870
2871 /* did the fill delalloc function already unlock and start
2872 * the IO?
2873 */
2874 if (page_started) {
2875 ret = 0;
2876 /*
2877 * we've unlocked the page, so we can't update
2878 * the mapping's writeback index, just update
2879 * nr_to_write.
2880 */
2881 wbc->nr_to_write -= nr_written;
2882 goto done_unlocked;
2883 }
2884 }
2885 if (tree->ops && tree->ops->writepage_start_hook) {
2886 ret = tree->ops->writepage_start_hook(page, start,
2887 page_end);
2888 if (ret) {
2889 /* Fixup worker will requeue */
2890 if (ret == -EBUSY)
2891 wbc->pages_skipped++;
2892 else
2893 redirty_page_for_writepage(wbc, page);
2894 update_nr_written(page, wbc, nr_written);
2895 unlock_page(page);
2896 ret = 0;
2897 goto done_unlocked;
2898 }
2899 }
2900
2901 /*
2902 * we don't want to touch the inode after unlocking the page,
2903 * so we update the mapping writeback index now
2904 */
2905 update_nr_written(page, wbc, nr_written + 1);
2906
2907 end = page_end;
2908 if (last_byte <= start) {
2909 if (tree->ops && tree->ops->writepage_end_io_hook)
2910 tree->ops->writepage_end_io_hook(page, start,
2911 page_end, NULL, 1);
2912 goto done;
2913 }
2914
2915 blocksize = inode->i_sb->s_blocksize;
2916
2917 while (cur <= end) {
2918 if (cur >= last_byte) {
2919 if (tree->ops && tree->ops->writepage_end_io_hook)
2920 tree->ops->writepage_end_io_hook(page, cur,
2921 page_end, NULL, 1);
2922 break;
2923 }
2924 em = epd->get_extent(inode, page, pg_offset, cur,
2925 end - cur + 1, 1);
2926 if (IS_ERR_OR_NULL(em)) {
2927 SetPageError(page);
2928 break;
2929 }
2930
2931 extent_offset = cur - em->start;
2932 BUG_ON(extent_map_end(em) <= cur);
2933 BUG_ON(end < cur);
2934 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2935 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2936 sector = (em->block_start + extent_offset) >> 9;
2937 bdev = em->bdev;
2938 block_start = em->block_start;
2939 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2940 free_extent_map(em);
2941 em = NULL;
2942
2943 /*
2944 * compressed and inline extents are written through other
2945 * paths in the FS
2946 */
2947 if (compressed || block_start == EXTENT_MAP_HOLE ||
2948 block_start == EXTENT_MAP_INLINE) {
2949 /*
2950 * end_io notification does not happen here for
2951 * compressed extents
2952 */
2953 if (!compressed && tree->ops &&
2954 tree->ops->writepage_end_io_hook)
2955 tree->ops->writepage_end_io_hook(page, cur,
2956 cur + iosize - 1,
2957 NULL, 1);
2958 else if (compressed) {
2959 /* we don't want to end_page_writeback on
2960 * a compressed extent. this happens
2961 * elsewhere
2962 */
2963 nr++;
2964 }
2965
2966 cur += iosize;
2967 pg_offset += iosize;
2968 continue;
2969 }
2970 /* leave this out until we have a page_mkwrite call */
2971 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2972 EXTENT_DIRTY, 0, NULL)) {
2973 cur = cur + iosize;
2974 pg_offset += iosize;
2975 continue;
2976 }
2977
2978 if (tree->ops && tree->ops->writepage_io_hook) {
2979 ret = tree->ops->writepage_io_hook(page, cur,
2980 cur + iosize - 1);
2981 } else {
2982 ret = 0;
2983 }
2984 if (ret) {
2985 SetPageError(page);
2986 } else {
2987 unsigned long max_nr = end_index + 1;
2988
2989 set_range_writeback(tree, cur, cur + iosize - 1);
2990 if (!PageWriteback(page)) {
2991 printk(KERN_ERR "btrfs warning page %lu not "
2992 "writeback, cur %llu end %llu\n",
2993 page->index, (unsigned long long)cur,
2994 (unsigned long long)end);
2995 }
2996
2997 ret = submit_extent_page(write_flags, tree, page,
2998 sector, iosize, pg_offset,
2999 bdev, &epd->bio, max_nr,
3000 end_bio_extent_writepage,
3001 0, 0, 0);
3002 if (ret)
3003 SetPageError(page);
3004 }
3005 cur = cur + iosize;
3006 pg_offset += iosize;
3007 nr++;
3008 }
3009done:
3010 if (nr == 0) {
3011 /* make sure the mapping tag for page dirty gets cleared */
3012 set_page_writeback(page);
3013 end_page_writeback(page);
3014 }
3015 unlock_page(page);
3016
3017done_unlocked:
3018
3019 /* drop our reference on any cached states */
3020 free_extent_state(cached_state);
3021 return 0;
3022}
3023
3024static int eb_wait(void *word)
3025{
3026 io_schedule();
3027 return 0;
3028}
3029
3030static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3031{
3032 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3033 TASK_UNINTERRUPTIBLE);
3034}
3035
3036static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3037 struct btrfs_fs_info *fs_info,
3038 struct extent_page_data *epd)
3039{
3040 unsigned long i, num_pages;
3041 int flush = 0;
3042 int ret = 0;
3043
3044 if (!btrfs_try_tree_write_lock(eb)) {
3045 flush = 1;
3046 flush_write_bio(epd);
3047 btrfs_tree_lock(eb);
3048 }
3049
3050 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3051 btrfs_tree_unlock(eb);
3052 if (!epd->sync_io)
3053 return 0;
3054 if (!flush) {
3055 flush_write_bio(epd);
3056 flush = 1;
3057 }
3058 while (1) {
3059 wait_on_extent_buffer_writeback(eb);
3060 btrfs_tree_lock(eb);
3061 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3062 break;
3063 btrfs_tree_unlock(eb);
3064 }
3065 }
3066
3067 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3068 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3069 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3070 spin_lock(&fs_info->delalloc_lock);
3071 if (fs_info->dirty_metadata_bytes >= eb->len)
3072 fs_info->dirty_metadata_bytes -= eb->len;
3073 else
3074 WARN_ON(1);
3075 spin_unlock(&fs_info->delalloc_lock);
3076 ret = 1;
3077 }
3078
3079 btrfs_tree_unlock(eb);
3080
3081 if (!ret)
3082 return ret;
3083
3084 num_pages = num_extent_pages(eb->start, eb->len);
3085 for (i = 0; i < num_pages; i++) {
3086 struct page *p = extent_buffer_page(eb, i);
3087
3088 if (!trylock_page(p)) {
3089 if (!flush) {
3090 flush_write_bio(epd);
3091 flush = 1;
3092 }
3093 lock_page(p);
3094 }
3095 }
3096
3097 return ret;
3098}
3099
3100static void end_extent_buffer_writeback(struct extent_buffer *eb)
3101{
3102 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3103 smp_mb__after_clear_bit();
3104 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3105}
3106
3107static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3108{
3109 int uptodate = err == 0;
3110 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3111 struct extent_buffer *eb;
3112 int done;
3113
3114 do {
3115 struct page *page = bvec->bv_page;
3116
3117 bvec--;
3118 eb = (struct extent_buffer *)page->private;
3119 BUG_ON(!eb);
3120 done = atomic_dec_and_test(&eb->io_pages);
3121
3122 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3123 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3124 ClearPageUptodate(page);
3125 SetPageError(page);
3126 }
3127
3128 end_page_writeback(page);
3129
3130 if (!done)
3131 continue;
3132
3133 end_extent_buffer_writeback(eb);
3134 } while (bvec >= bio->bi_io_vec);
3135
3136 bio_put(bio);
3137
3138}
3139
3140static int write_one_eb(struct extent_buffer *eb,
3141 struct btrfs_fs_info *fs_info,
3142 struct writeback_control *wbc,
3143 struct extent_page_data *epd)
3144{
3145 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3146 u64 offset = eb->start;
3147 unsigned long i, num_pages;
3148 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3149 int ret = 0;
3150
3151 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3152 num_pages = num_extent_pages(eb->start, eb->len);
3153 atomic_set(&eb->io_pages, num_pages);
3154 for (i = 0; i < num_pages; i++) {
3155 struct page *p = extent_buffer_page(eb, i);
3156
3157 clear_page_dirty_for_io(p);
3158 set_page_writeback(p);
3159 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3160 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3161 -1, end_bio_extent_buffer_writepage,
3162 0, 0, 0);
3163 if (ret) {
3164 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3165 SetPageError(p);
3166 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3167 end_extent_buffer_writeback(eb);
3168 ret = -EIO;
3169 break;
3170 }
3171 offset += PAGE_CACHE_SIZE;
3172 update_nr_written(p, wbc, 1);
3173 unlock_page(p);
3174 }
3175
3176 if (unlikely(ret)) {
3177 for (; i < num_pages; i++) {
3178 struct page *p = extent_buffer_page(eb, i);
3179 unlock_page(p);
3180 }
3181 }
3182
3183 return ret;
3184}
3185
3186int btree_write_cache_pages(struct address_space *mapping,
3187 struct writeback_control *wbc)
3188{
3189 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3190 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3191 struct extent_buffer *eb, *prev_eb = NULL;
3192 struct extent_page_data epd = {
3193 .bio = NULL,
3194 .tree = tree,
3195 .extent_locked = 0,
3196 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3197 };
3198 int ret = 0;
3199 int done = 0;
3200 int nr_to_write_done = 0;
3201 struct pagevec pvec;
3202 int nr_pages;
3203 pgoff_t index;
3204 pgoff_t end; /* Inclusive */
3205 int scanned = 0;
3206 int tag;
3207
3208 pagevec_init(&pvec, 0);
3209 if (wbc->range_cyclic) {
3210 index = mapping->writeback_index; /* Start from prev offset */
3211 end = -1;
3212 } else {
3213 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3214 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3215 scanned = 1;
3216 }
3217 if (wbc->sync_mode == WB_SYNC_ALL)
3218 tag = PAGECACHE_TAG_TOWRITE;
3219 else
3220 tag = PAGECACHE_TAG_DIRTY;
3221retry:
3222 if (wbc->sync_mode == WB_SYNC_ALL)
3223 tag_pages_for_writeback(mapping, index, end);
3224 while (!done && !nr_to_write_done && (index <= end) &&
3225 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3226 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3227 unsigned i;
3228
3229 scanned = 1;
3230 for (i = 0; i < nr_pages; i++) {
3231 struct page *page = pvec.pages[i];
3232
3233 if (!PagePrivate(page))
3234 continue;
3235
3236 if (!wbc->range_cyclic && page->index > end) {
3237 done = 1;
3238 break;
3239 }
3240
3241 eb = (struct extent_buffer *)page->private;
3242 if (!eb) {
3243 WARN_ON(1);
3244 continue;
3245 }
3246
3247 if (eb == prev_eb)
3248 continue;
3249
3250 if (!atomic_inc_not_zero(&eb->refs)) {
3251 WARN_ON(1);
3252 continue;
3253 }
3254
3255 prev_eb = eb;
3256 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3257 if (!ret) {
3258 free_extent_buffer(eb);
3259 continue;
3260 }
3261
3262 ret = write_one_eb(eb, fs_info, wbc, &epd);
3263 if (ret) {
3264 done = 1;
3265 free_extent_buffer(eb);
3266 break;
3267 }
3268 free_extent_buffer(eb);
3269
3270 /*
3271 * the filesystem may choose to bump up nr_to_write.
3272 * We have to make sure to honor the new nr_to_write
3273 * at any time
3274 */
3275 nr_to_write_done = wbc->nr_to_write <= 0;
3276 }
3277 pagevec_release(&pvec);
3278 cond_resched();
3279 }
3280 if (!scanned && !done) {
3281 /*
3282 * We hit the last page and there is more work to be done: wrap
3283 * back to the start of the file
3284 */
3285 scanned = 1;
3286 index = 0;
3287 goto retry;
3288 }
3289 flush_write_bio(&epd);
3290 return ret;
3291}
3292
3293/**
3294 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3295 * @mapping: address space structure to write
3296 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3297 * @writepage: function called for each page
3298 * @data: data passed to writepage function
3299 *
3300 * If a page is already under I/O, write_cache_pages() skips it, even
3301 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3302 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3303 * and msync() need to guarantee that all the data which was dirty at the time
3304 * the call was made get new I/O started against them. If wbc->sync_mode is
3305 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3306 * existing IO to complete.
3307 */
3308static int extent_write_cache_pages(struct extent_io_tree *tree,
3309 struct address_space *mapping,
3310 struct writeback_control *wbc,
3311 writepage_t writepage, void *data,
3312 void (*flush_fn)(void *))
3313{
3314 struct inode *inode = mapping->host;
3315 int ret = 0;
3316 int done = 0;
3317 int nr_to_write_done = 0;
3318 struct pagevec pvec;
3319 int nr_pages;
3320 pgoff_t index;
3321 pgoff_t end; /* Inclusive */
3322 int scanned = 0;
3323 int tag;
3324
3325 /*
3326 * We have to hold onto the inode so that ordered extents can do their
3327 * work when the IO finishes. The alternative to this is failing to add
3328 * an ordered extent if the igrab() fails there and that is a huge pain
3329 * to deal with, so instead just hold onto the inode throughout the
3330 * writepages operation. If it fails here we are freeing up the inode
3331 * anyway and we'd rather not waste our time writing out stuff that is
3332 * going to be truncated anyway.
3333 */
3334 if (!igrab(inode))
3335 return 0;
3336
3337 pagevec_init(&pvec, 0);
3338 if (wbc->range_cyclic) {
3339 index = mapping->writeback_index; /* Start from prev offset */
3340 end = -1;
3341 } else {
3342 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3343 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3344 scanned = 1;
3345 }
3346 if (wbc->sync_mode == WB_SYNC_ALL)
3347 tag = PAGECACHE_TAG_TOWRITE;
3348 else
3349 tag = PAGECACHE_TAG_DIRTY;
3350retry:
3351 if (wbc->sync_mode == WB_SYNC_ALL)
3352 tag_pages_for_writeback(mapping, index, end);
3353 while (!done && !nr_to_write_done && (index <= end) &&
3354 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3355 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3356 unsigned i;
3357
3358 scanned = 1;
3359 for (i = 0; i < nr_pages; i++) {
3360 struct page *page = pvec.pages[i];
3361
3362 /*
3363 * At this point we hold neither mapping->tree_lock nor
3364 * lock on the page itself: the page may be truncated or
3365 * invalidated (changing page->mapping to NULL), or even
3366 * swizzled back from swapper_space to tmpfs file
3367 * mapping
3368 */
3369 if (tree->ops &&
3370 tree->ops->write_cache_pages_lock_hook) {
3371 tree->ops->write_cache_pages_lock_hook(page,
3372 data, flush_fn);
3373 } else {
3374 if (!trylock_page(page)) {
3375 flush_fn(data);
3376 lock_page(page);
3377 }
3378 }
3379
3380 if (unlikely(page->mapping != mapping)) {
3381 unlock_page(page);
3382 continue;
3383 }
3384
3385 if (!wbc->range_cyclic && page->index > end) {
3386 done = 1;
3387 unlock_page(page);
3388 continue;
3389 }
3390
3391 if (wbc->sync_mode != WB_SYNC_NONE) {
3392 if (PageWriteback(page))
3393 flush_fn(data);
3394 wait_on_page_writeback(page);
3395 }
3396
3397 if (PageWriteback(page) ||
3398 !clear_page_dirty_for_io(page)) {
3399 unlock_page(page);
3400 continue;
3401 }
3402
3403 ret = (*writepage)(page, wbc, data);
3404
3405 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3406 unlock_page(page);
3407 ret = 0;
3408 }
3409 if (ret)
3410 done = 1;
3411
3412 /*
3413 * the filesystem may choose to bump up nr_to_write.
3414 * We have to make sure to honor the new nr_to_write
3415 * at any time
3416 */
3417 nr_to_write_done = wbc->nr_to_write <= 0;
3418 }
3419 pagevec_release(&pvec);
3420 cond_resched();
3421 }
3422 if (!scanned && !done) {
3423 /*
3424 * We hit the last page and there is more work to be done: wrap
3425 * back to the start of the file
3426 */
3427 scanned = 1;
3428 index = 0;
3429 goto retry;
3430 }
3431 btrfs_add_delayed_iput(inode);
3432 return ret;
3433}
3434
3435static void flush_epd_write_bio(struct extent_page_data *epd)
3436{
3437 if (epd->bio) {
3438 int rw = WRITE;
3439 int ret;
3440
3441 if (epd->sync_io)
3442 rw = WRITE_SYNC;
3443
3444 ret = submit_one_bio(rw, epd->bio, 0, 0);
3445 BUG_ON(ret < 0); /* -ENOMEM */
3446 epd->bio = NULL;
3447 }
3448}
3449
3450static noinline void flush_write_bio(void *data)
3451{
3452 struct extent_page_data *epd = data;
3453 flush_epd_write_bio(epd);
3454}
3455
3456int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3457 get_extent_t *get_extent,
3458 struct writeback_control *wbc)
3459{
3460 int ret;
3461 struct extent_page_data epd = {
3462 .bio = NULL,
3463 .tree = tree,
3464 .get_extent = get_extent,
3465 .extent_locked = 0,
3466 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3467 };
3468
3469 ret = __extent_writepage(page, wbc, &epd);
3470
3471 flush_epd_write_bio(&epd);
3472 return ret;
3473}
3474
3475int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3476 u64 start, u64 end, get_extent_t *get_extent,
3477 int mode)
3478{
3479 int ret = 0;
3480 struct address_space *mapping = inode->i_mapping;
3481 struct page *page;
3482 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3483 PAGE_CACHE_SHIFT;
3484
3485 struct extent_page_data epd = {
3486 .bio = NULL,
3487 .tree = tree,
3488 .get_extent = get_extent,
3489 .extent_locked = 1,
3490 .sync_io = mode == WB_SYNC_ALL,
3491 };
3492 struct writeback_control wbc_writepages = {
3493 .sync_mode = mode,
3494 .nr_to_write = nr_pages * 2,
3495 .range_start = start,
3496 .range_end = end + 1,
3497 };
3498
3499 while (start <= end) {
3500 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3501 if (clear_page_dirty_for_io(page))
3502 ret = __extent_writepage(page, &wbc_writepages, &epd);
3503 else {
3504 if (tree->ops && tree->ops->writepage_end_io_hook)
3505 tree->ops->writepage_end_io_hook(page, start,
3506 start + PAGE_CACHE_SIZE - 1,
3507 NULL, 1);
3508 unlock_page(page);
3509 }
3510 page_cache_release(page);
3511 start += PAGE_CACHE_SIZE;
3512 }
3513
3514 flush_epd_write_bio(&epd);
3515 return ret;
3516}
3517
3518int extent_writepages(struct extent_io_tree *tree,
3519 struct address_space *mapping,
3520 get_extent_t *get_extent,
3521 struct writeback_control *wbc)
3522{
3523 int ret = 0;
3524 struct extent_page_data epd = {
3525 .bio = NULL,
3526 .tree = tree,
3527 .get_extent = get_extent,
3528 .extent_locked = 0,
3529 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3530 };
3531
3532 ret = extent_write_cache_pages(tree, mapping, wbc,
3533 __extent_writepage, &epd,
3534 flush_write_bio);
3535 flush_epd_write_bio(&epd);
3536 return ret;
3537}
3538
3539int extent_readpages(struct extent_io_tree *tree,
3540 struct address_space *mapping,
3541 struct list_head *pages, unsigned nr_pages,
3542 get_extent_t get_extent)
3543{
3544 struct bio *bio = NULL;
3545 unsigned page_idx;
3546 unsigned long bio_flags = 0;
3547
3548 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3549 struct page *page = list_entry(pages->prev, struct page, lru);
3550
3551 prefetchw(&page->flags);
3552 list_del(&page->lru);
3553 if (!add_to_page_cache_lru(page, mapping,
3554 page->index, GFP_NOFS)) {
3555 __extent_read_full_page(tree, page, get_extent,
3556 &bio, 0, &bio_flags);
3557 }
3558 page_cache_release(page);
3559 }
3560 BUG_ON(!list_empty(pages));
3561 if (bio)
3562 return submit_one_bio(READ, bio, 0, bio_flags);
3563 return 0;
3564}
3565
3566/*
3567 * basic invalidatepage code, this waits on any locked or writeback
3568 * ranges corresponding to the page, and then deletes any extent state
3569 * records from the tree
3570 */
3571int extent_invalidatepage(struct extent_io_tree *tree,
3572 struct page *page, unsigned long offset)
3573{
3574 struct extent_state *cached_state = NULL;
3575 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
3576 u64 end = start + PAGE_CACHE_SIZE - 1;
3577 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3578
3579 start += (offset + blocksize - 1) & ~(blocksize - 1);
3580 if (start > end)
3581 return 0;
3582
3583 lock_extent_bits(tree, start, end, 0, &cached_state);
3584 wait_on_page_writeback(page);
3585 clear_extent_bit(tree, start, end,
3586 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3587 EXTENT_DO_ACCOUNTING,
3588 1, 1, &cached_state, GFP_NOFS);
3589 return 0;
3590}
3591
3592/*
3593 * a helper for releasepage, this tests for areas of the page that
3594 * are locked or under IO and drops the related state bits if it is safe
3595 * to drop the page.
3596 */
3597int try_release_extent_state(struct extent_map_tree *map,
3598 struct extent_io_tree *tree, struct page *page,
3599 gfp_t mask)
3600{
3601 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3602 u64 end = start + PAGE_CACHE_SIZE - 1;
3603 int ret = 1;
3604
3605 if (test_range_bit(tree, start, end,
3606 EXTENT_IOBITS, 0, NULL))
3607 ret = 0;
3608 else {
3609 if ((mask & GFP_NOFS) == GFP_NOFS)
3610 mask = GFP_NOFS;
3611 /*
3612 * at this point we can safely clear everything except the
3613 * locked bit and the nodatasum bit
3614 */
3615 ret = clear_extent_bit(tree, start, end,
3616 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3617 0, 0, NULL, mask);
3618
3619 /* if clear_extent_bit failed for enomem reasons,
3620 * we can't allow the release to continue.
3621 */
3622 if (ret < 0)
3623 ret = 0;
3624 else
3625 ret = 1;
3626 }
3627 return ret;
3628}
3629
3630/*
3631 * a helper for releasepage. As long as there are no locked extents
3632 * in the range corresponding to the page, both state records and extent
3633 * map records are removed
3634 */
3635int try_release_extent_mapping(struct extent_map_tree *map,
3636 struct extent_io_tree *tree, struct page *page,
3637 gfp_t mask)
3638{
3639 struct extent_map *em;
3640 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
3641 u64 end = start + PAGE_CACHE_SIZE - 1;
3642
3643 if ((mask & __GFP_WAIT) &&
3644 page->mapping->host->i_size > 16 * 1024 * 1024) {
3645 u64 len;
3646 while (start <= end) {
3647 len = end - start + 1;
3648 write_lock(&map->lock);
3649 em = lookup_extent_mapping(map, start, len);
3650 if (!em) {
3651 write_unlock(&map->lock);
3652 break;
3653 }
3654 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
3655 em->start != start) {
3656 write_unlock(&map->lock);
3657 free_extent_map(em);
3658 break;
3659 }
3660 if (!test_range_bit(tree, em->start,
3661 extent_map_end(em) - 1,
3662 EXTENT_LOCKED | EXTENT_WRITEBACK,
3663 0, NULL)) {
3664 remove_extent_mapping(map, em);
3665 /* once for the rb tree */
3666 free_extent_map(em);
3667 }
3668 start = extent_map_end(em);
3669 write_unlock(&map->lock);
3670
3671 /* once for us */
3672 free_extent_map(em);
3673 }
3674 }
3675 return try_release_extent_state(map, tree, page, mask);
3676}
3677
3678/*
3679 * helper function for fiemap, which doesn't want to see any holes.
3680 * This maps until we find something past 'last'
3681 */
3682static struct extent_map *get_extent_skip_holes(struct inode *inode,
3683 u64 offset,
3684 u64 last,
3685 get_extent_t *get_extent)
3686{
3687 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
3688 struct extent_map *em;
3689 u64 len;
3690
3691 if (offset >= last)
3692 return NULL;
3693
3694 while(1) {
3695 len = last - offset;
3696 if (len == 0)
3697 break;
3698 len = (len + sectorsize - 1) & ~(sectorsize - 1);
3699 em = get_extent(inode, NULL, 0, offset, len, 0);
3700 if (IS_ERR_OR_NULL(em))
3701 return em;
3702
3703 /* if this isn't a hole return it */
3704 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
3705 em->block_start != EXTENT_MAP_HOLE) {
3706 return em;
3707 }
3708
3709 /* this is a hole, advance to the next extent */
3710 offset = extent_map_end(em);
3711 free_extent_map(em);
3712 if (offset >= last)
3713 break;
3714 }
3715 return NULL;
3716}
3717
3718int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3719 __u64 start, __u64 len, get_extent_t *get_extent)
3720{
3721 int ret = 0;
3722 u64 off = start;
3723 u64 max = start + len;
3724 u32 flags = 0;
3725 u32 found_type;
3726 u64 last;
3727 u64 last_for_get_extent = 0;
3728 u64 disko = 0;
3729 u64 isize = i_size_read(inode);
3730 struct btrfs_key found_key;
3731 struct extent_map *em = NULL;
3732 struct extent_state *cached_state = NULL;
3733 struct btrfs_path *path;
3734 struct btrfs_file_extent_item *item;
3735 int end = 0;
3736 u64 em_start = 0;
3737 u64 em_len = 0;
3738 u64 em_end = 0;
3739 unsigned long emflags;
3740
3741 if (len == 0)
3742 return -EINVAL;
3743
3744 path = btrfs_alloc_path();
3745 if (!path)
3746 return -ENOMEM;
3747 path->leave_spinning = 1;
3748
3749 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
3750 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
3751
3752 /*
3753 * lookup the last file extent. We're not using i_size here
3754 * because there might be preallocation past i_size
3755 */
3756 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3757 path, btrfs_ino(inode), -1, 0);
3758 if (ret < 0) {
3759 btrfs_free_path(path);
3760 return ret;
3761 }
3762 WARN_ON(!ret);
3763 path->slots[0]--;
3764 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3765 struct btrfs_file_extent_item);
3766 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
3767 found_type = btrfs_key_type(&found_key);
3768
3769 /* No extents, but there might be delalloc bits */
3770 if (found_key.objectid != btrfs_ino(inode) ||
3771 found_type != BTRFS_EXTENT_DATA_KEY) {
3772 /* have to trust i_size as the end */
3773 last = (u64)-1;
3774 last_for_get_extent = isize;
3775 } else {
3776 /*
3777 * remember the start of the last extent. There are a
3778 * bunch of different factors that go into the length of the
3779 * extent, so its much less complex to remember where it started
3780 */
3781 last = found_key.offset;
3782 last_for_get_extent = last + 1;
3783 }
3784 btrfs_free_path(path);
3785
3786 /*
3787 * we might have some extents allocated but more delalloc past those
3788 * extents. so, we trust isize unless the start of the last extent is
3789 * beyond isize
3790 */
3791 if (last < isize) {
3792 last = (u64)-1;
3793 last_for_get_extent = isize;
3794 }
3795
3796 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3797 &cached_state);
3798
3799 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3800 get_extent);
3801 if (!em)
3802 goto out;
3803 if (IS_ERR(em)) {
3804 ret = PTR_ERR(em);
3805 goto out;
3806 }
3807
3808 while (!end) {
3809 u64 offset_in_extent;
3810
3811 /* break if the extent we found is outside the range */
3812 if (em->start >= max || extent_map_end(em) < off)
3813 break;
3814
3815 /*
3816 * get_extent may return an extent that starts before our
3817 * requested range. We have to make sure the ranges
3818 * we return to fiemap always move forward and don't
3819 * overlap, so adjust the offsets here
3820 */
3821 em_start = max(em->start, off);
3822
3823 /*
3824 * record the offset from the start of the extent
3825 * for adjusting the disk offset below
3826 */
3827 offset_in_extent = em_start - em->start;
3828 em_end = extent_map_end(em);
3829 em_len = em_end - em_start;
3830 emflags = em->flags;
3831 disko = 0;
3832 flags = 0;
3833
3834 /*
3835 * bump off for our next call to get_extent
3836 */
3837 off = extent_map_end(em);
3838 if (off >= max)
3839 end = 1;
3840
3841 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
3842 end = 1;
3843 flags |= FIEMAP_EXTENT_LAST;
3844 } else if (em->block_start == EXTENT_MAP_INLINE) {
3845 flags |= (FIEMAP_EXTENT_DATA_INLINE |
3846 FIEMAP_EXTENT_NOT_ALIGNED);
3847 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
3848 flags |= (FIEMAP_EXTENT_DELALLOC |
3849 FIEMAP_EXTENT_UNKNOWN);
3850 } else {
3851 disko = em->block_start + offset_in_extent;
3852 }
3853 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
3854 flags |= FIEMAP_EXTENT_ENCODED;
3855
3856 free_extent_map(em);
3857 em = NULL;
3858 if ((em_start >= last) || em_len == (u64)-1 ||
3859 (last == (u64)-1 && isize <= em_end)) {
3860 flags |= FIEMAP_EXTENT_LAST;
3861 end = 1;
3862 }
3863
3864 /* now scan forward to see if this is really the last extent. */
3865 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3866 get_extent);
3867 if (IS_ERR(em)) {
3868 ret = PTR_ERR(em);
3869 goto out;
3870 }
3871 if (!em) {
3872 flags |= FIEMAP_EXTENT_LAST;
3873 end = 1;
3874 }
3875 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3876 em_len, flags);
3877 if (ret)
3878 goto out_free;
3879 }
3880out_free:
3881 free_extent_map(em);
3882out:
3883 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
3884 &cached_state, GFP_NOFS);
3885 return ret;
3886}
3887
3888inline struct page *extent_buffer_page(struct extent_buffer *eb,
3889 unsigned long i)
3890{
3891 return eb->pages[i];
3892}
3893
3894inline unsigned long num_extent_pages(u64 start, u64 len)
3895{
3896 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3897 (start >> PAGE_CACHE_SHIFT);
3898}
3899
3900static void __free_extent_buffer(struct extent_buffer *eb)
3901{
3902#if LEAK_DEBUG
3903 unsigned long flags;
3904 spin_lock_irqsave(&leak_lock, flags);
3905 list_del(&eb->leak_list);
3906 spin_unlock_irqrestore(&leak_lock, flags);
3907#endif
3908 if (eb->pages && eb->pages != eb->inline_pages)
3909 kfree(eb->pages);
3910 kmem_cache_free(extent_buffer_cache, eb);
3911}
3912
3913static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3914 u64 start,
3915 unsigned long len,
3916 gfp_t mask)
3917{
3918 struct extent_buffer *eb = NULL;
3919#if LEAK_DEBUG
3920 unsigned long flags;
3921#endif
3922
3923 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3924 if (eb == NULL)
3925 return NULL;
3926 eb->start = start;
3927 eb->len = len;
3928 eb->tree = tree;
3929 eb->bflags = 0;
3930 rwlock_init(&eb->lock);
3931 atomic_set(&eb->write_locks, 0);
3932 atomic_set(&eb->read_locks, 0);
3933 atomic_set(&eb->blocking_readers, 0);
3934 atomic_set(&eb->blocking_writers, 0);
3935 atomic_set(&eb->spinning_readers, 0);
3936 atomic_set(&eb->spinning_writers, 0);
3937 eb->lock_nested = 0;
3938 init_waitqueue_head(&eb->write_lock_wq);
3939 init_waitqueue_head(&eb->read_lock_wq);
3940
3941#if LEAK_DEBUG
3942 spin_lock_irqsave(&leak_lock, flags);
3943 list_add(&eb->leak_list, &buffers);
3944 spin_unlock_irqrestore(&leak_lock, flags);
3945#endif
3946 spin_lock_init(&eb->refs_lock);
3947 atomic_set(&eb->refs, 1);
3948 atomic_set(&eb->io_pages, 0);
3949
3950 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3951 struct page **pages;
3952 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3953 PAGE_CACHE_SHIFT;
3954 pages = kzalloc(num_pages, mask);
3955 if (!pages) {
3956 __free_extent_buffer(eb);
3957 return NULL;
3958 }
3959 eb->pages = pages;
3960 } else {
3961 eb->pages = eb->inline_pages;
3962 }
3963
3964 return eb;
3965}
3966
3967struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
3968{
3969 unsigned long i;
3970 struct page *p;
3971 struct extent_buffer *new;
3972 unsigned long num_pages = num_extent_pages(src->start, src->len);
3973
3974 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
3975 if (new == NULL)
3976 return NULL;
3977
3978 for (i = 0; i < num_pages; i++) {
3979 p = alloc_page(GFP_ATOMIC);
3980 BUG_ON(!p);
3981 attach_extent_buffer_page(new, p);
3982 WARN_ON(PageDirty(p));
3983 SetPageUptodate(p);
3984 new->pages[i] = p;
3985 }
3986
3987 copy_extent_buffer(new, src, 0, 0, src->len);
3988 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
3989 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
3990
3991 return new;
3992}
3993
3994struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
3995{
3996 struct extent_buffer *eb;
3997 unsigned long num_pages = num_extent_pages(0, len);
3998 unsigned long i;
3999
4000 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4001 if (!eb)
4002 return NULL;
4003
4004 for (i = 0; i < num_pages; i++) {
4005 eb->pages[i] = alloc_page(GFP_ATOMIC);
4006 if (!eb->pages[i])
4007 goto err;
4008 }
4009 set_extent_buffer_uptodate(eb);
4010 btrfs_set_header_nritems(eb, 0);
4011 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4012
4013 return eb;
4014err:
4015 for (i--; i > 0; i--)
4016 __free_page(eb->pages[i]);
4017 __free_extent_buffer(eb);
4018 return NULL;
4019}
4020
4021static int extent_buffer_under_io(struct extent_buffer *eb)
4022{
4023 return (atomic_read(&eb->io_pages) ||
4024 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4025 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4026}
4027
4028/*
4029 * Helper for releasing extent buffer page.
4030 */
4031static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4032 unsigned long start_idx)
4033{
4034 unsigned long index;
4035 unsigned long num_pages;
4036 struct page *page;
4037 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4038
4039 BUG_ON(extent_buffer_under_io(eb));
4040
4041 num_pages = num_extent_pages(eb->start, eb->len);
4042 index = start_idx + num_pages;
4043 if (start_idx >= index)
4044 return;
4045
4046 do {
4047 index--;
4048 page = extent_buffer_page(eb, index);
4049 if (page && mapped) {
4050 spin_lock(&page->mapping->private_lock);
4051 /*
4052 * We do this since we'll remove the pages after we've
4053 * removed the eb from the radix tree, so we could race
4054 * and have this page now attached to the new eb. So
4055 * only clear page_private if it's still connected to
4056 * this eb.
4057 */
4058 if (PagePrivate(page) &&
4059 page->private == (unsigned long)eb) {
4060 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4061 BUG_ON(PageDirty(page));
4062 BUG_ON(PageWriteback(page));
4063 /*
4064 * We need to make sure we haven't be attached
4065 * to a new eb.
4066 */
4067 ClearPagePrivate(page);
4068 set_page_private(page, 0);
4069 /* One for the page private */
4070 page_cache_release(page);
4071 }
4072 spin_unlock(&page->mapping->private_lock);
4073
4074 }
4075 if (page) {
4076 /* One for when we alloced the page */
4077 page_cache_release(page);
4078 }
4079 } while (index != start_idx);
4080}
4081
4082/*
4083 * Helper for releasing the extent buffer.
4084 */
4085static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4086{
4087 btrfs_release_extent_buffer_page(eb, 0);
4088 __free_extent_buffer(eb);
4089}
4090
4091static void check_buffer_tree_ref(struct extent_buffer *eb)
4092{
4093 /* the ref bit is tricky. We have to make sure it is set
4094 * if we have the buffer dirty. Otherwise the
4095 * code to free a buffer can end up dropping a dirty
4096 * page
4097 *
4098 * Once the ref bit is set, it won't go away while the
4099 * buffer is dirty or in writeback, and it also won't
4100 * go away while we have the reference count on the
4101 * eb bumped.
4102 *
4103 * We can't just set the ref bit without bumping the
4104 * ref on the eb because free_extent_buffer might
4105 * see the ref bit and try to clear it. If this happens
4106 * free_extent_buffer might end up dropping our original
4107 * ref by mistake and freeing the page before we are able
4108 * to add one more ref.
4109 *
4110 * So bump the ref count first, then set the bit. If someone
4111 * beat us to it, drop the ref we added.
4112 */
4113 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4114 atomic_inc(&eb->refs);
4115 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4116 atomic_dec(&eb->refs);
4117 }
4118}
4119
4120static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4121{
4122 unsigned long num_pages, i;
4123
4124 check_buffer_tree_ref(eb);
4125
4126 num_pages = num_extent_pages(eb->start, eb->len);
4127 for (i = 0; i < num_pages; i++) {
4128 struct page *p = extent_buffer_page(eb, i);
4129 mark_page_accessed(p);
4130 }
4131}
4132
4133struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4134 u64 start, unsigned long len)
4135{
4136 unsigned long num_pages = num_extent_pages(start, len);
4137 unsigned long i;
4138 unsigned long index = start >> PAGE_CACHE_SHIFT;
4139 struct extent_buffer *eb;
4140 struct extent_buffer *exists = NULL;
4141 struct page *p;
4142 struct address_space *mapping = tree->mapping;
4143 int uptodate = 1;
4144 int ret;
4145
4146 rcu_read_lock();
4147 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4148 if (eb && atomic_inc_not_zero(&eb->refs)) {
4149 rcu_read_unlock();
4150 mark_extent_buffer_accessed(eb);
4151 return eb;
4152 }
4153 rcu_read_unlock();
4154
4155 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4156 if (!eb)
4157 return NULL;
4158
4159 for (i = 0; i < num_pages; i++, index++) {
4160 p = find_or_create_page(mapping, index, GFP_NOFS);
4161 if (!p) {
4162 WARN_ON(1);
4163 goto free_eb;
4164 }
4165
4166 spin_lock(&mapping->private_lock);
4167 if (PagePrivate(p)) {
4168 /*
4169 * We could have already allocated an eb for this page
4170 * and attached one so lets see if we can get a ref on
4171 * the existing eb, and if we can we know it's good and
4172 * we can just return that one, else we know we can just
4173 * overwrite page->private.
4174 */
4175 exists = (struct extent_buffer *)p->private;
4176 if (atomic_inc_not_zero(&exists->refs)) {
4177 spin_unlock(&mapping->private_lock);
4178 unlock_page(p);
4179 page_cache_release(p);
4180 mark_extent_buffer_accessed(exists);
4181 goto free_eb;
4182 }
4183
4184 /*
4185 * Do this so attach doesn't complain and we need to
4186 * drop the ref the old guy had.
4187 */
4188 ClearPagePrivate(p);
4189 WARN_ON(PageDirty(p));
4190 page_cache_release(p);
4191 }
4192 attach_extent_buffer_page(eb, p);
4193 spin_unlock(&mapping->private_lock);
4194 WARN_ON(PageDirty(p));
4195 mark_page_accessed(p);
4196 eb->pages[i] = p;
4197 if (!PageUptodate(p))
4198 uptodate = 0;
4199
4200 /*
4201 * see below about how we avoid a nasty race with release page
4202 * and why we unlock later
4203 */
4204 }
4205 if (uptodate)
4206 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4207again:
4208 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4209 if (ret)
4210 goto free_eb;
4211
4212 spin_lock(&tree->buffer_lock);
4213 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4214 if (ret == -EEXIST) {
4215 exists = radix_tree_lookup(&tree->buffer,
4216 start >> PAGE_CACHE_SHIFT);
4217 if (!atomic_inc_not_zero(&exists->refs)) {
4218 spin_unlock(&tree->buffer_lock);
4219 radix_tree_preload_end();
4220 exists = NULL;
4221 goto again;
4222 }
4223 spin_unlock(&tree->buffer_lock);
4224 radix_tree_preload_end();
4225 mark_extent_buffer_accessed(exists);
4226 goto free_eb;
4227 }
4228 /* add one reference for the tree */
4229 spin_lock(&eb->refs_lock);
4230 check_buffer_tree_ref(eb);
4231 spin_unlock(&eb->refs_lock);
4232 spin_unlock(&tree->buffer_lock);
4233 radix_tree_preload_end();
4234
4235 /*
4236 * there is a race where release page may have
4237 * tried to find this extent buffer in the radix
4238 * but failed. It will tell the VM it is safe to
4239 * reclaim the, and it will clear the page private bit.
4240 * We must make sure to set the page private bit properly
4241 * after the extent buffer is in the radix tree so
4242 * it doesn't get lost
4243 */
4244 SetPageChecked(eb->pages[0]);
4245 for (i = 1; i < num_pages; i++) {
4246 p = extent_buffer_page(eb, i);
4247 ClearPageChecked(p);
4248 unlock_page(p);
4249 }
4250 unlock_page(eb->pages[0]);
4251 return eb;
4252
4253free_eb:
4254 for (i = 0; i < num_pages; i++) {
4255 if (eb->pages[i])
4256 unlock_page(eb->pages[i]);
4257 }
4258
4259 WARN_ON(!atomic_dec_and_test(&eb->refs));
4260 btrfs_release_extent_buffer(eb);
4261 return exists;
4262}
4263
4264struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4265 u64 start, unsigned long len)
4266{
4267 struct extent_buffer *eb;
4268
4269 rcu_read_lock();
4270 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4271 if (eb && atomic_inc_not_zero(&eb->refs)) {
4272 rcu_read_unlock();
4273 mark_extent_buffer_accessed(eb);
4274 return eb;
4275 }
4276 rcu_read_unlock();
4277
4278 return NULL;
4279}
4280
4281static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4282{
4283 struct extent_buffer *eb =
4284 container_of(head, struct extent_buffer, rcu_head);
4285
4286 __free_extent_buffer(eb);
4287}
4288
4289/* Expects to have eb->eb_lock already held */
4290static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4291{
4292 WARN_ON(atomic_read(&eb->refs) == 0);
4293 if (atomic_dec_and_test(&eb->refs)) {
4294 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4295 spin_unlock(&eb->refs_lock);
4296 } else {
4297 struct extent_io_tree *tree = eb->tree;
4298
4299 spin_unlock(&eb->refs_lock);
4300
4301 spin_lock(&tree->buffer_lock);
4302 radix_tree_delete(&tree->buffer,
4303 eb->start >> PAGE_CACHE_SHIFT);
4304 spin_unlock(&tree->buffer_lock);
4305 }
4306
4307 /* Should be safe to release our pages at this point */
4308 btrfs_release_extent_buffer_page(eb, 0);
4309
4310 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4311 return;
4312 }
4313 spin_unlock(&eb->refs_lock);
4314}
4315
4316void free_extent_buffer(struct extent_buffer *eb)
4317{
4318 if (!eb)
4319 return;
4320
4321 spin_lock(&eb->refs_lock);
4322 if (atomic_read(&eb->refs) == 2 &&
4323 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4324 atomic_dec(&eb->refs);
4325
4326 if (atomic_read(&eb->refs) == 2 &&
4327 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4328 !extent_buffer_under_io(eb) &&
4329 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4330 atomic_dec(&eb->refs);
4331
4332 /*
4333 * I know this is terrible, but it's temporary until we stop tracking
4334 * the uptodate bits and such for the extent buffers.
4335 */
4336 release_extent_buffer(eb, GFP_ATOMIC);
4337}
4338
4339void free_extent_buffer_stale(struct extent_buffer *eb)
4340{
4341 if (!eb)
4342 return;
4343
4344 spin_lock(&eb->refs_lock);
4345 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4346
4347 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4348 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4349 atomic_dec(&eb->refs);
4350 release_extent_buffer(eb, GFP_NOFS);
4351}
4352
4353void clear_extent_buffer_dirty(struct extent_buffer *eb)
4354{
4355 unsigned long i;
4356 unsigned long num_pages;
4357 struct page *page;
4358
4359 num_pages = num_extent_pages(eb->start, eb->len);
4360
4361 for (i = 0; i < num_pages; i++) {
4362 page = extent_buffer_page(eb, i);
4363 if (!PageDirty(page))
4364 continue;
4365
4366 lock_page(page);
4367 WARN_ON(!PagePrivate(page));
4368
4369 clear_page_dirty_for_io(page);
4370 spin_lock_irq(&page->mapping->tree_lock);
4371 if (!PageDirty(page)) {
4372 radix_tree_tag_clear(&page->mapping->page_tree,
4373 page_index(page),
4374 PAGECACHE_TAG_DIRTY);
4375 }
4376 spin_unlock_irq(&page->mapping->tree_lock);
4377 ClearPageError(page);
4378 unlock_page(page);
4379 }
4380 WARN_ON(atomic_read(&eb->refs) == 0);
4381}
4382
4383int set_extent_buffer_dirty(struct extent_buffer *eb)
4384{
4385 unsigned long i;
4386 unsigned long num_pages;
4387 int was_dirty = 0;
4388
4389 check_buffer_tree_ref(eb);
4390
4391 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4392
4393 num_pages = num_extent_pages(eb->start, eb->len);
4394 WARN_ON(atomic_read(&eb->refs) == 0);
4395 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4396
4397 for (i = 0; i < num_pages; i++)
4398 set_page_dirty(extent_buffer_page(eb, i));
4399 return was_dirty;
4400}
4401
4402static int range_straddles_pages(u64 start, u64 len)
4403{
4404 if (len < PAGE_CACHE_SIZE)
4405 return 1;
4406 if (start & (PAGE_CACHE_SIZE - 1))
4407 return 1;
4408 if ((start + len) & (PAGE_CACHE_SIZE - 1))
4409 return 1;
4410 return 0;
4411}
4412
4413int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4414{
4415 unsigned long i;
4416 struct page *page;
4417 unsigned long num_pages;
4418
4419 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4420 num_pages = num_extent_pages(eb->start, eb->len);
4421 for (i = 0; i < num_pages; i++) {
4422 page = extent_buffer_page(eb, i);
4423 if (page)
4424 ClearPageUptodate(page);
4425 }
4426 return 0;
4427}
4428
4429int set_extent_buffer_uptodate(struct extent_buffer *eb)
4430{
4431 unsigned long i;
4432 struct page *page;
4433 unsigned long num_pages;
4434
4435 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4436 num_pages = num_extent_pages(eb->start, eb->len);
4437 for (i = 0; i < num_pages; i++) {
4438 page = extent_buffer_page(eb, i);
4439 SetPageUptodate(page);
4440 }
4441 return 0;
4442}
4443
4444int extent_range_uptodate(struct extent_io_tree *tree,
4445 u64 start, u64 end)
4446{
4447 struct page *page;
4448 int ret;
4449 int pg_uptodate = 1;
4450 int uptodate;
4451 unsigned long index;
4452
4453 if (range_straddles_pages(start, end - start + 1)) {
4454 ret = test_range_bit(tree, start, end,
4455 EXTENT_UPTODATE, 1, NULL);
4456 if (ret)
4457 return 1;
4458 }
4459 while (start <= end) {
4460 index = start >> PAGE_CACHE_SHIFT;
4461 page = find_get_page(tree->mapping, index);
4462 if (!page)
4463 return 1;
4464 uptodate = PageUptodate(page);
4465 page_cache_release(page);
4466 if (!uptodate) {
4467 pg_uptodate = 0;
4468 break;
4469 }
4470 start += PAGE_CACHE_SIZE;
4471 }
4472 return pg_uptodate;
4473}
4474
4475int extent_buffer_uptodate(struct extent_buffer *eb)
4476{
4477 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4478}
4479
4480int read_extent_buffer_pages(struct extent_io_tree *tree,
4481 struct extent_buffer *eb, u64 start, int wait,
4482 get_extent_t *get_extent, int mirror_num)
4483{
4484 unsigned long i;
4485 unsigned long start_i;
4486 struct page *page;
4487 int err;
4488 int ret = 0;
4489 int locked_pages = 0;
4490 int all_uptodate = 1;
4491 unsigned long num_pages;
4492 unsigned long num_reads = 0;
4493 struct bio *bio = NULL;
4494 unsigned long bio_flags = 0;
4495
4496 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4497 return 0;
4498
4499 if (start) {
4500 WARN_ON(start < eb->start);
4501 start_i = (start >> PAGE_CACHE_SHIFT) -
4502 (eb->start >> PAGE_CACHE_SHIFT);
4503 } else {
4504 start_i = 0;
4505 }
4506
4507 num_pages = num_extent_pages(eb->start, eb->len);
4508 for (i = start_i; i < num_pages; i++) {
4509 page = extent_buffer_page(eb, i);
4510 if (wait == WAIT_NONE) {
4511 if (!trylock_page(page))
4512 goto unlock_exit;
4513 } else {
4514 lock_page(page);
4515 }
4516 locked_pages++;
4517 if (!PageUptodate(page)) {
4518 num_reads++;
4519 all_uptodate = 0;
4520 }
4521 }
4522 if (all_uptodate) {
4523 if (start_i == 0)
4524 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4525 goto unlock_exit;
4526 }
4527
4528 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4529 eb->read_mirror = 0;
4530 atomic_set(&eb->io_pages, num_reads);
4531 for (i = start_i; i < num_pages; i++) {
4532 page = extent_buffer_page(eb, i);
4533 if (!PageUptodate(page)) {
4534 ClearPageError(page);
4535 err = __extent_read_full_page(tree, page,
4536 get_extent, &bio,
4537 mirror_num, &bio_flags);
4538 if (err)
4539 ret = err;
4540 } else {
4541 unlock_page(page);
4542 }
4543 }
4544
4545 if (bio) {
4546 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4547 if (err)
4548 return err;
4549 }
4550
4551 if (ret || wait != WAIT_COMPLETE)
4552 return ret;
4553
4554 for (i = start_i; i < num_pages; i++) {
4555 page = extent_buffer_page(eb, i);
4556 wait_on_page_locked(page);
4557 if (!PageUptodate(page))
4558 ret = -EIO;
4559 }
4560
4561 return ret;
4562
4563unlock_exit:
4564 i = start_i;
4565 while (locked_pages > 0) {
4566 page = extent_buffer_page(eb, i);
4567 i++;
4568 unlock_page(page);
4569 locked_pages--;
4570 }
4571 return ret;
4572}
4573
4574void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4575 unsigned long start,
4576 unsigned long len)
4577{
4578 size_t cur;
4579 size_t offset;
4580 struct page *page;
4581 char *kaddr;
4582 char *dst = (char *)dstv;
4583 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4584 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4585
4586 WARN_ON(start > eb->len);
4587 WARN_ON(start + len > eb->start + eb->len);
4588
4589 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4590
4591 while (len > 0) {
4592 page = extent_buffer_page(eb, i);
4593
4594 cur = min(len, (PAGE_CACHE_SIZE - offset));
4595 kaddr = page_address(page);
4596 memcpy(dst, kaddr + offset, cur);
4597
4598 dst += cur;
4599 len -= cur;
4600 offset = 0;
4601 i++;
4602 }
4603}
4604
4605int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4606 unsigned long min_len, char **map,
4607 unsigned long *map_start,
4608 unsigned long *map_len)
4609{
4610 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4611 char *kaddr;
4612 struct page *p;
4613 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4614 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4615 unsigned long end_i = (start_offset + start + min_len - 1) >>
4616 PAGE_CACHE_SHIFT;
4617
4618 if (i != end_i)
4619 return -EINVAL;
4620
4621 if (i == 0) {
4622 offset = start_offset;
4623 *map_start = 0;
4624 } else {
4625 offset = 0;
4626 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4627 }
4628
4629 if (start + min_len > eb->len) {
4630 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4631 "wanted %lu %lu\n", (unsigned long long)eb->start,
4632 eb->len, start, min_len);
4633 WARN_ON(1);
4634 return -EINVAL;
4635 }
4636
4637 p = extent_buffer_page(eb, i);
4638 kaddr = page_address(p);
4639 *map = kaddr + offset;
4640 *map_len = PAGE_CACHE_SIZE - offset;
4641 return 0;
4642}
4643
4644int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4645 unsigned long start,
4646 unsigned long len)
4647{
4648 size_t cur;
4649 size_t offset;
4650 struct page *page;
4651 char *kaddr;
4652 char *ptr = (char *)ptrv;
4653 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4654 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4655 int ret = 0;
4656
4657 WARN_ON(start > eb->len);
4658 WARN_ON(start + len > eb->start + eb->len);
4659
4660 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4661
4662 while (len > 0) {
4663 page = extent_buffer_page(eb, i);
4664
4665 cur = min(len, (PAGE_CACHE_SIZE - offset));
4666
4667 kaddr = page_address(page);
4668 ret = memcmp(ptr, kaddr + offset, cur);
4669 if (ret)
4670 break;
4671
4672 ptr += cur;
4673 len -= cur;
4674 offset = 0;
4675 i++;
4676 }
4677 return ret;
4678}
4679
4680void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
4681 unsigned long start, unsigned long len)
4682{
4683 size_t cur;
4684 size_t offset;
4685 struct page *page;
4686 char *kaddr;
4687 char *src = (char *)srcv;
4688 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4689 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4690
4691 WARN_ON(start > eb->len);
4692 WARN_ON(start + len > eb->start + eb->len);
4693
4694 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4695
4696 while (len > 0) {
4697 page = extent_buffer_page(eb, i);
4698 WARN_ON(!PageUptodate(page));
4699
4700 cur = min(len, PAGE_CACHE_SIZE - offset);
4701 kaddr = page_address(page);
4702 memcpy(kaddr + offset, src, cur);
4703
4704 src += cur;
4705 len -= cur;
4706 offset = 0;
4707 i++;
4708 }
4709}
4710
4711void memset_extent_buffer(struct extent_buffer *eb, char c,
4712 unsigned long start, unsigned long len)
4713{
4714 size_t cur;
4715 size_t offset;
4716 struct page *page;
4717 char *kaddr;
4718 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4719 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4720
4721 WARN_ON(start > eb->len);
4722 WARN_ON(start + len > eb->start + eb->len);
4723
4724 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
4725
4726 while (len > 0) {
4727 page = extent_buffer_page(eb, i);
4728 WARN_ON(!PageUptodate(page));
4729
4730 cur = min(len, PAGE_CACHE_SIZE - offset);
4731 kaddr = page_address(page);
4732 memset(kaddr + offset, c, cur);
4733
4734 len -= cur;
4735 offset = 0;
4736 i++;
4737 }
4738}
4739
4740void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
4741 unsigned long dst_offset, unsigned long src_offset,
4742 unsigned long len)
4743{
4744 u64 dst_len = dst->len;
4745 size_t cur;
4746 size_t offset;
4747 struct page *page;
4748 char *kaddr;
4749 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4750 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4751
4752 WARN_ON(src->len != dst_len);
4753
4754 offset = (start_offset + dst_offset) &
4755 ((unsigned long)PAGE_CACHE_SIZE - 1);
4756
4757 while (len > 0) {
4758 page = extent_buffer_page(dst, i);
4759 WARN_ON(!PageUptodate(page));
4760
4761 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
4762
4763 kaddr = page_address(page);
4764 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4765
4766 src_offset += cur;
4767 len -= cur;
4768 offset = 0;
4769 i++;
4770 }
4771}
4772
4773static void move_pages(struct page *dst_page, struct page *src_page,
4774 unsigned long dst_off, unsigned long src_off,
4775 unsigned long len)
4776{
4777 char *dst_kaddr = page_address(dst_page);
4778 if (dst_page == src_page) {
4779 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
4780 } else {
4781 char *src_kaddr = page_address(src_page);
4782 char *p = dst_kaddr + dst_off + len;
4783 char *s = src_kaddr + src_off + len;
4784
4785 while (len--)
4786 *--p = *--s;
4787 }
4788}
4789
4790static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4791{
4792 unsigned long distance = (src > dst) ? src - dst : dst - src;
4793 return distance < len;
4794}
4795
4796static void copy_pages(struct page *dst_page, struct page *src_page,
4797 unsigned long dst_off, unsigned long src_off,
4798 unsigned long len)
4799{
4800 char *dst_kaddr = page_address(dst_page);
4801 char *src_kaddr;
4802 int must_memmove = 0;
4803
4804 if (dst_page != src_page) {
4805 src_kaddr = page_address(src_page);
4806 } else {
4807 src_kaddr = dst_kaddr;
4808 if (areas_overlap(src_off, dst_off, len))
4809 must_memmove = 1;
4810 }
4811
4812 if (must_memmove)
4813 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4814 else
4815 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4816}
4817
4818void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4819 unsigned long src_offset, unsigned long len)
4820{
4821 size_t cur;
4822 size_t dst_off_in_page;
4823 size_t src_off_in_page;
4824 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4825 unsigned long dst_i;
4826 unsigned long src_i;
4827
4828 if (src_offset + len > dst->len) {
4829 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4830 "len %lu dst len %lu\n", src_offset, len, dst->len);
4831 BUG_ON(1);
4832 }
4833 if (dst_offset + len > dst->len) {
4834 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4835 "len %lu dst len %lu\n", dst_offset, len, dst->len);
4836 BUG_ON(1);
4837 }
4838
4839 while (len > 0) {
4840 dst_off_in_page = (start_offset + dst_offset) &
4841 ((unsigned long)PAGE_CACHE_SIZE - 1);
4842 src_off_in_page = (start_offset + src_offset) &
4843 ((unsigned long)PAGE_CACHE_SIZE - 1);
4844
4845 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
4846 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
4847
4848 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
4849 src_off_in_page));
4850 cur = min_t(unsigned long, cur,
4851 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
4852
4853 copy_pages(extent_buffer_page(dst, dst_i),
4854 extent_buffer_page(dst, src_i),
4855 dst_off_in_page, src_off_in_page, cur);
4856
4857 src_offset += cur;
4858 dst_offset += cur;
4859 len -= cur;
4860 }
4861}
4862
4863void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4864 unsigned long src_offset, unsigned long len)
4865{
4866 size_t cur;
4867 size_t dst_off_in_page;
4868 size_t src_off_in_page;
4869 unsigned long dst_end = dst_offset + len - 1;
4870 unsigned long src_end = src_offset + len - 1;
4871 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
4872 unsigned long dst_i;
4873 unsigned long src_i;
4874
4875 if (src_offset + len > dst->len) {
4876 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
4877 "len %lu len %lu\n", src_offset, len, dst->len);
4878 BUG_ON(1);
4879 }
4880 if (dst_offset + len > dst->len) {
4881 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
4882 "len %lu len %lu\n", dst_offset, len, dst->len);
4883 BUG_ON(1);
4884 }
4885 if (dst_offset < src_offset) {
4886 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4887 return;
4888 }
4889 while (len > 0) {
4890 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
4891 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
4892
4893 dst_off_in_page = (start_offset + dst_end) &
4894 ((unsigned long)PAGE_CACHE_SIZE - 1);
4895 src_off_in_page = (start_offset + src_end) &
4896 ((unsigned long)PAGE_CACHE_SIZE - 1);
4897
4898 cur = min_t(unsigned long, len, src_off_in_page + 1);
4899 cur = min(cur, dst_off_in_page + 1);
4900 move_pages(extent_buffer_page(dst, dst_i),
4901 extent_buffer_page(dst, src_i),
4902 dst_off_in_page - cur + 1,
4903 src_off_in_page - cur + 1, cur);
4904
4905 dst_end -= cur;
4906 src_end -= cur;
4907 len -= cur;
4908 }
4909}
4910
4911int try_release_extent_buffer(struct page *page, gfp_t mask)
4912{
4913 struct extent_buffer *eb;
4914
4915 /*
4916 * We need to make sure noboody is attaching this page to an eb right
4917 * now.
4918 */
4919 spin_lock(&page->mapping->private_lock);
4920 if (!PagePrivate(page)) {
4921 spin_unlock(&page->mapping->private_lock);
4922 return 1;
4923 }
4924
4925 eb = (struct extent_buffer *)page->private;
4926 BUG_ON(!eb);
4927
4928 /*
4929 * This is a little awful but should be ok, we need to make sure that
4930 * the eb doesn't disappear out from under us while we're looking at
4931 * this page.
4932 */
4933 spin_lock(&eb->refs_lock);
4934 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4935 spin_unlock(&eb->refs_lock);
4936 spin_unlock(&page->mapping->private_lock);
4937 return 0;
4938 }
4939 spin_unlock(&page->mapping->private_lock);
4940
4941 if ((mask & GFP_NOFS) == GFP_NOFS)
4942 mask = GFP_NOFS;
4943
4944 /*
4945 * If tree ref isn't set then we know the ref on this eb is a real ref,
4946 * so just return, this page will likely be freed soon anyway.
4947 */
4948 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4949 spin_unlock(&eb->refs_lock);
4950 return 0;
4951 }
4952 release_extent_buffer(eb, mask);
4953
4954 return 1;
4955}
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
10#include <linux/writeback.h>
11#include <linux/pagevec.h>
12#include <linux/prefetch.h>
13#include <linux/cleancache.h>
14#include "extent_io.h"
15#include "extent_map.h"
16#include "ctree.h"
17#include "btrfs_inode.h"
18#include "volumes.h"
19#include "check-integrity.h"
20#include "locking.h"
21#include "rcu-string.h"
22#include "backref.h"
23
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26static struct bio_set *btrfs_bioset;
27
28static inline bool extent_state_in_tree(const struct extent_state *state)
29{
30 return !RB_EMPTY_NODE(&state->rb_node);
31}
32
33#ifdef CONFIG_BTRFS_DEBUG
34static LIST_HEAD(buffers);
35static LIST_HEAD(states);
36
37static DEFINE_SPINLOCK(leak_lock);
38
39static inline
40void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
41{
42 unsigned long flags;
43
44 spin_lock_irqsave(&leak_lock, flags);
45 list_add(new, head);
46 spin_unlock_irqrestore(&leak_lock, flags);
47}
48
49static inline
50void btrfs_leak_debug_del(struct list_head *entry)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&leak_lock, flags);
55 list_del(entry);
56 spin_unlock_irqrestore(&leak_lock, flags);
57}
58
59static inline
60void btrfs_leak_debug_check(void)
61{
62 struct extent_state *state;
63 struct extent_buffer *eb;
64
65 while (!list_empty(&states)) {
66 state = list_entry(states.next, struct extent_state, leak_list);
67 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
68 state->start, state->end, state->state,
69 extent_state_in_tree(state),
70 atomic_read(&state->refs));
71 list_del(&state->leak_list);
72 kmem_cache_free(extent_state_cache, state);
73 }
74
75 while (!list_empty(&buffers)) {
76 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
77 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
78 "refs %d\n",
79 eb->start, eb->len, atomic_read(&eb->refs));
80 list_del(&eb->leak_list);
81 kmem_cache_free(extent_buffer_cache, eb);
82 }
83}
84
85#define btrfs_debug_check_extent_io_range(tree, start, end) \
86 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
87static inline void __btrfs_debug_check_extent_io_range(const char *caller,
88 struct extent_io_tree *tree, u64 start, u64 end)
89{
90 struct inode *inode;
91 u64 isize;
92
93 if (!tree->mapping)
94 return;
95
96 inode = tree->mapping->host;
97 isize = i_size_read(inode);
98 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
99 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
100 "%s: ino %llu isize %llu odd range [%llu,%llu]",
101 caller, btrfs_ino(inode), isize, start, end);
102 }
103}
104#else
105#define btrfs_leak_debug_add(new, head) do {} while (0)
106#define btrfs_leak_debug_del(entry) do {} while (0)
107#define btrfs_leak_debug_check() do {} while (0)
108#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
109#endif
110
111#define BUFFER_LRU_MAX 64
112
113struct tree_entry {
114 u64 start;
115 u64 end;
116 struct rb_node rb_node;
117};
118
119struct extent_page_data {
120 struct bio *bio;
121 struct extent_io_tree *tree;
122 get_extent_t *get_extent;
123 unsigned long bio_flags;
124
125 /* tells writepage not to lock the state bits for this range
126 * it still does the unlocking
127 */
128 unsigned int extent_locked:1;
129
130 /* tells the submit_bio code to use a WRITE_SYNC */
131 unsigned int sync_io:1;
132};
133
134static void add_extent_changeset(struct extent_state *state, unsigned bits,
135 struct extent_changeset *changeset,
136 int set)
137{
138 int ret;
139
140 if (!changeset)
141 return;
142 if (set && (state->state & bits) == bits)
143 return;
144 if (!set && (state->state & bits) == 0)
145 return;
146 changeset->bytes_changed += state->end - state->start + 1;
147 ret = ulist_add(changeset->range_changed, state->start, state->end,
148 GFP_ATOMIC);
149 /* ENOMEM */
150 BUG_ON(ret < 0);
151}
152
153static noinline void flush_write_bio(void *data);
154static inline struct btrfs_fs_info *
155tree_fs_info(struct extent_io_tree *tree)
156{
157 if (!tree->mapping)
158 return NULL;
159 return btrfs_sb(tree->mapping->host->i_sb);
160}
161
162int __init extent_io_init(void)
163{
164 extent_state_cache = kmem_cache_create("btrfs_extent_state",
165 sizeof(struct extent_state), 0,
166 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
167 if (!extent_state_cache)
168 return -ENOMEM;
169
170 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
171 sizeof(struct extent_buffer), 0,
172 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
173 if (!extent_buffer_cache)
174 goto free_state_cache;
175
176 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
177 offsetof(struct btrfs_io_bio, bio));
178 if (!btrfs_bioset)
179 goto free_buffer_cache;
180
181 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
182 goto free_bioset;
183
184 return 0;
185
186free_bioset:
187 bioset_free(btrfs_bioset);
188 btrfs_bioset = NULL;
189
190free_buffer_cache:
191 kmem_cache_destroy(extent_buffer_cache);
192 extent_buffer_cache = NULL;
193
194free_state_cache:
195 kmem_cache_destroy(extent_state_cache);
196 extent_state_cache = NULL;
197 return -ENOMEM;
198}
199
200void extent_io_exit(void)
201{
202 btrfs_leak_debug_check();
203
204 /*
205 * Make sure all delayed rcu free are flushed before we
206 * destroy caches.
207 */
208 rcu_barrier();
209 kmem_cache_destroy(extent_state_cache);
210 kmem_cache_destroy(extent_buffer_cache);
211 if (btrfs_bioset)
212 bioset_free(btrfs_bioset);
213}
214
215void extent_io_tree_init(struct extent_io_tree *tree,
216 struct address_space *mapping)
217{
218 tree->state = RB_ROOT;
219 tree->ops = NULL;
220 tree->dirty_bytes = 0;
221 spin_lock_init(&tree->lock);
222 tree->mapping = mapping;
223}
224
225static struct extent_state *alloc_extent_state(gfp_t mask)
226{
227 struct extent_state *state;
228
229 state = kmem_cache_alloc(extent_state_cache, mask);
230 if (!state)
231 return state;
232 state->state = 0;
233 state->failrec = NULL;
234 RB_CLEAR_NODE(&state->rb_node);
235 btrfs_leak_debug_add(&state->leak_list, &states);
236 atomic_set(&state->refs, 1);
237 init_waitqueue_head(&state->wq);
238 trace_alloc_extent_state(state, mask, _RET_IP_);
239 return state;
240}
241
242void free_extent_state(struct extent_state *state)
243{
244 if (!state)
245 return;
246 if (atomic_dec_and_test(&state->refs)) {
247 WARN_ON(extent_state_in_tree(state));
248 btrfs_leak_debug_del(&state->leak_list);
249 trace_free_extent_state(state, _RET_IP_);
250 kmem_cache_free(extent_state_cache, state);
251 }
252}
253
254static struct rb_node *tree_insert(struct rb_root *root,
255 struct rb_node *search_start,
256 u64 offset,
257 struct rb_node *node,
258 struct rb_node ***p_in,
259 struct rb_node **parent_in)
260{
261 struct rb_node **p;
262 struct rb_node *parent = NULL;
263 struct tree_entry *entry;
264
265 if (p_in && parent_in) {
266 p = *p_in;
267 parent = *parent_in;
268 goto do_insert;
269 }
270
271 p = search_start ? &search_start : &root->rb_node;
272 while (*p) {
273 parent = *p;
274 entry = rb_entry(parent, struct tree_entry, rb_node);
275
276 if (offset < entry->start)
277 p = &(*p)->rb_left;
278 else if (offset > entry->end)
279 p = &(*p)->rb_right;
280 else
281 return parent;
282 }
283
284do_insert:
285 rb_link_node(node, parent, p);
286 rb_insert_color(node, root);
287 return NULL;
288}
289
290static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
291 struct rb_node **prev_ret,
292 struct rb_node **next_ret,
293 struct rb_node ***p_ret,
294 struct rb_node **parent_ret)
295{
296 struct rb_root *root = &tree->state;
297 struct rb_node **n = &root->rb_node;
298 struct rb_node *prev = NULL;
299 struct rb_node *orig_prev = NULL;
300 struct tree_entry *entry;
301 struct tree_entry *prev_entry = NULL;
302
303 while (*n) {
304 prev = *n;
305 entry = rb_entry(prev, struct tree_entry, rb_node);
306 prev_entry = entry;
307
308 if (offset < entry->start)
309 n = &(*n)->rb_left;
310 else if (offset > entry->end)
311 n = &(*n)->rb_right;
312 else
313 return *n;
314 }
315
316 if (p_ret)
317 *p_ret = n;
318 if (parent_ret)
319 *parent_ret = prev;
320
321 if (prev_ret) {
322 orig_prev = prev;
323 while (prev && offset > prev_entry->end) {
324 prev = rb_next(prev);
325 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
326 }
327 *prev_ret = prev;
328 prev = orig_prev;
329 }
330
331 if (next_ret) {
332 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
333 while (prev && offset < prev_entry->start) {
334 prev = rb_prev(prev);
335 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
336 }
337 *next_ret = prev;
338 }
339 return NULL;
340}
341
342static inline struct rb_node *
343tree_search_for_insert(struct extent_io_tree *tree,
344 u64 offset,
345 struct rb_node ***p_ret,
346 struct rb_node **parent_ret)
347{
348 struct rb_node *prev = NULL;
349 struct rb_node *ret;
350
351 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
352 if (!ret)
353 return prev;
354 return ret;
355}
356
357static inline struct rb_node *tree_search(struct extent_io_tree *tree,
358 u64 offset)
359{
360 return tree_search_for_insert(tree, offset, NULL, NULL);
361}
362
363static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
364 struct extent_state *other)
365{
366 if (tree->ops && tree->ops->merge_extent_hook)
367 tree->ops->merge_extent_hook(tree->mapping->host, new,
368 other);
369}
370
371/*
372 * utility function to look for merge candidates inside a given range.
373 * Any extents with matching state are merged together into a single
374 * extent in the tree. Extents with EXTENT_IO in their state field
375 * are not merged because the end_io handlers need to be able to do
376 * operations on them without sleeping (or doing allocations/splits).
377 *
378 * This should be called with the tree lock held.
379 */
380static void merge_state(struct extent_io_tree *tree,
381 struct extent_state *state)
382{
383 struct extent_state *other;
384 struct rb_node *other_node;
385
386 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
387 return;
388
389 other_node = rb_prev(&state->rb_node);
390 if (other_node) {
391 other = rb_entry(other_node, struct extent_state, rb_node);
392 if (other->end == state->start - 1 &&
393 other->state == state->state) {
394 merge_cb(tree, state, other);
395 state->start = other->start;
396 rb_erase(&other->rb_node, &tree->state);
397 RB_CLEAR_NODE(&other->rb_node);
398 free_extent_state(other);
399 }
400 }
401 other_node = rb_next(&state->rb_node);
402 if (other_node) {
403 other = rb_entry(other_node, struct extent_state, rb_node);
404 if (other->start == state->end + 1 &&
405 other->state == state->state) {
406 merge_cb(tree, state, other);
407 state->end = other->end;
408 rb_erase(&other->rb_node, &tree->state);
409 RB_CLEAR_NODE(&other->rb_node);
410 free_extent_state(other);
411 }
412 }
413}
414
415static void set_state_cb(struct extent_io_tree *tree,
416 struct extent_state *state, unsigned *bits)
417{
418 if (tree->ops && tree->ops->set_bit_hook)
419 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
420}
421
422static void clear_state_cb(struct extent_io_tree *tree,
423 struct extent_state *state, unsigned *bits)
424{
425 if (tree->ops && tree->ops->clear_bit_hook)
426 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
427}
428
429static void set_state_bits(struct extent_io_tree *tree,
430 struct extent_state *state, unsigned *bits,
431 struct extent_changeset *changeset);
432
433/*
434 * insert an extent_state struct into the tree. 'bits' are set on the
435 * struct before it is inserted.
436 *
437 * This may return -EEXIST if the extent is already there, in which case the
438 * state struct is freed.
439 *
440 * The tree lock is not taken internally. This is a utility function and
441 * probably isn't what you want to call (see set/clear_extent_bit).
442 */
443static int insert_state(struct extent_io_tree *tree,
444 struct extent_state *state, u64 start, u64 end,
445 struct rb_node ***p,
446 struct rb_node **parent,
447 unsigned *bits, struct extent_changeset *changeset)
448{
449 struct rb_node *node;
450
451 if (end < start)
452 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
453 end, start);
454 state->start = start;
455 state->end = end;
456
457 set_state_bits(tree, state, bits, changeset);
458
459 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
460 if (node) {
461 struct extent_state *found;
462 found = rb_entry(node, struct extent_state, rb_node);
463 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
464 "%llu %llu\n",
465 found->start, found->end, start, end);
466 return -EEXIST;
467 }
468 merge_state(tree, state);
469 return 0;
470}
471
472static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
473 u64 split)
474{
475 if (tree->ops && tree->ops->split_extent_hook)
476 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
477}
478
479/*
480 * split a given extent state struct in two, inserting the preallocated
481 * struct 'prealloc' as the newly created second half. 'split' indicates an
482 * offset inside 'orig' where it should be split.
483 *
484 * Before calling,
485 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
486 * are two extent state structs in the tree:
487 * prealloc: [orig->start, split - 1]
488 * orig: [ split, orig->end ]
489 *
490 * The tree locks are not taken by this function. They need to be held
491 * by the caller.
492 */
493static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
494 struct extent_state *prealloc, u64 split)
495{
496 struct rb_node *node;
497
498 split_cb(tree, orig, split);
499
500 prealloc->start = orig->start;
501 prealloc->end = split - 1;
502 prealloc->state = orig->state;
503 orig->start = split;
504
505 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
506 &prealloc->rb_node, NULL, NULL);
507 if (node) {
508 free_extent_state(prealloc);
509 return -EEXIST;
510 }
511 return 0;
512}
513
514static struct extent_state *next_state(struct extent_state *state)
515{
516 struct rb_node *next = rb_next(&state->rb_node);
517 if (next)
518 return rb_entry(next, struct extent_state, rb_node);
519 else
520 return NULL;
521}
522
523/*
524 * utility function to clear some bits in an extent state struct.
525 * it will optionally wake up any one waiting on this state (wake == 1).
526 *
527 * If no bits are set on the state struct after clearing things, the
528 * struct is freed and removed from the tree
529 */
530static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
531 struct extent_state *state,
532 unsigned *bits, int wake,
533 struct extent_changeset *changeset)
534{
535 struct extent_state *next;
536 unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
537
538 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
539 u64 range = state->end - state->start + 1;
540 WARN_ON(range > tree->dirty_bytes);
541 tree->dirty_bytes -= range;
542 }
543 clear_state_cb(tree, state, bits);
544 add_extent_changeset(state, bits_to_clear, changeset, 0);
545 state->state &= ~bits_to_clear;
546 if (wake)
547 wake_up(&state->wq);
548 if (state->state == 0) {
549 next = next_state(state);
550 if (extent_state_in_tree(state)) {
551 rb_erase(&state->rb_node, &tree->state);
552 RB_CLEAR_NODE(&state->rb_node);
553 free_extent_state(state);
554 } else {
555 WARN_ON(1);
556 }
557 } else {
558 merge_state(tree, state);
559 next = next_state(state);
560 }
561 return next;
562}
563
564static struct extent_state *
565alloc_extent_state_atomic(struct extent_state *prealloc)
566{
567 if (!prealloc)
568 prealloc = alloc_extent_state(GFP_ATOMIC);
569
570 return prealloc;
571}
572
573static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
574{
575 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
576 "Extent tree was modified by another "
577 "thread while locked.");
578}
579
580/*
581 * clear some bits on a range in the tree. This may require splitting
582 * or inserting elements in the tree, so the gfp mask is used to
583 * indicate which allocations or sleeping are allowed.
584 *
585 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
586 * the given range from the tree regardless of state (ie for truncate).
587 *
588 * the range [start, end] is inclusive.
589 *
590 * This takes the tree lock, and returns 0 on success and < 0 on error.
591 */
592static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
593 unsigned bits, int wake, int delete,
594 struct extent_state **cached_state,
595 gfp_t mask, struct extent_changeset *changeset)
596{
597 struct extent_state *state;
598 struct extent_state *cached;
599 struct extent_state *prealloc = NULL;
600 struct rb_node *node;
601 u64 last_end;
602 int err;
603 int clear = 0;
604
605 btrfs_debug_check_extent_io_range(tree, start, end);
606
607 if (bits & EXTENT_DELALLOC)
608 bits |= EXTENT_NORESERVE;
609
610 if (delete)
611 bits |= ~EXTENT_CTLBITS;
612 bits |= EXTENT_FIRST_DELALLOC;
613
614 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
615 clear = 1;
616again:
617 if (!prealloc && gfpflags_allow_blocking(mask)) {
618 /*
619 * Don't care for allocation failure here because we might end
620 * up not needing the pre-allocated extent state at all, which
621 * is the case if we only have in the tree extent states that
622 * cover our input range and don't cover too any other range.
623 * If we end up needing a new extent state we allocate it later.
624 */
625 prealloc = alloc_extent_state(mask);
626 }
627
628 spin_lock(&tree->lock);
629 if (cached_state) {
630 cached = *cached_state;
631
632 if (clear) {
633 *cached_state = NULL;
634 cached_state = NULL;
635 }
636
637 if (cached && extent_state_in_tree(cached) &&
638 cached->start <= start && cached->end > start) {
639 if (clear)
640 atomic_dec(&cached->refs);
641 state = cached;
642 goto hit_next;
643 }
644 if (clear)
645 free_extent_state(cached);
646 }
647 /*
648 * this search will find the extents that end after
649 * our range starts
650 */
651 node = tree_search(tree, start);
652 if (!node)
653 goto out;
654 state = rb_entry(node, struct extent_state, rb_node);
655hit_next:
656 if (state->start > end)
657 goto out;
658 WARN_ON(state->end < start);
659 last_end = state->end;
660
661 /* the state doesn't have the wanted bits, go ahead */
662 if (!(state->state & bits)) {
663 state = next_state(state);
664 goto next;
665 }
666
667 /*
668 * | ---- desired range ---- |
669 * | state | or
670 * | ------------- state -------------- |
671 *
672 * We need to split the extent we found, and may flip
673 * bits on second half.
674 *
675 * If the extent we found extends past our range, we
676 * just split and search again. It'll get split again
677 * the next time though.
678 *
679 * If the extent we found is inside our range, we clear
680 * the desired bit on it.
681 */
682
683 if (state->start < start) {
684 prealloc = alloc_extent_state_atomic(prealloc);
685 BUG_ON(!prealloc);
686 err = split_state(tree, state, prealloc, start);
687 if (err)
688 extent_io_tree_panic(tree, err);
689
690 prealloc = NULL;
691 if (err)
692 goto out;
693 if (state->end <= end) {
694 state = clear_state_bit(tree, state, &bits, wake,
695 changeset);
696 goto next;
697 }
698 goto search_again;
699 }
700 /*
701 * | ---- desired range ---- |
702 * | state |
703 * We need to split the extent, and clear the bit
704 * on the first half
705 */
706 if (state->start <= end && state->end > end) {
707 prealloc = alloc_extent_state_atomic(prealloc);
708 BUG_ON(!prealloc);
709 err = split_state(tree, state, prealloc, end + 1);
710 if (err)
711 extent_io_tree_panic(tree, err);
712
713 if (wake)
714 wake_up(&state->wq);
715
716 clear_state_bit(tree, prealloc, &bits, wake, changeset);
717
718 prealloc = NULL;
719 goto out;
720 }
721
722 state = clear_state_bit(tree, state, &bits, wake, changeset);
723next:
724 if (last_end == (u64)-1)
725 goto out;
726 start = last_end + 1;
727 if (start <= end && state && !need_resched())
728 goto hit_next;
729 goto search_again;
730
731out:
732 spin_unlock(&tree->lock);
733 if (prealloc)
734 free_extent_state(prealloc);
735
736 return 0;
737
738search_again:
739 if (start > end)
740 goto out;
741 spin_unlock(&tree->lock);
742 if (gfpflags_allow_blocking(mask))
743 cond_resched();
744 goto again;
745}
746
747static void wait_on_state(struct extent_io_tree *tree,
748 struct extent_state *state)
749 __releases(tree->lock)
750 __acquires(tree->lock)
751{
752 DEFINE_WAIT(wait);
753 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
754 spin_unlock(&tree->lock);
755 schedule();
756 spin_lock(&tree->lock);
757 finish_wait(&state->wq, &wait);
758}
759
760/*
761 * waits for one or more bits to clear on a range in the state tree.
762 * The range [start, end] is inclusive.
763 * The tree lock is taken by this function
764 */
765static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
766 unsigned long bits)
767{
768 struct extent_state *state;
769 struct rb_node *node;
770
771 btrfs_debug_check_extent_io_range(tree, start, end);
772
773 spin_lock(&tree->lock);
774again:
775 while (1) {
776 /*
777 * this search will find all the extents that end after
778 * our range starts
779 */
780 node = tree_search(tree, start);
781process_node:
782 if (!node)
783 break;
784
785 state = rb_entry(node, struct extent_state, rb_node);
786
787 if (state->start > end)
788 goto out;
789
790 if (state->state & bits) {
791 start = state->start;
792 atomic_inc(&state->refs);
793 wait_on_state(tree, state);
794 free_extent_state(state);
795 goto again;
796 }
797 start = state->end + 1;
798
799 if (start > end)
800 break;
801
802 if (!cond_resched_lock(&tree->lock)) {
803 node = rb_next(node);
804 goto process_node;
805 }
806 }
807out:
808 spin_unlock(&tree->lock);
809}
810
811static void set_state_bits(struct extent_io_tree *tree,
812 struct extent_state *state,
813 unsigned *bits, struct extent_changeset *changeset)
814{
815 unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
816
817 set_state_cb(tree, state, bits);
818 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
819 u64 range = state->end - state->start + 1;
820 tree->dirty_bytes += range;
821 }
822 add_extent_changeset(state, bits_to_set, changeset, 1);
823 state->state |= bits_to_set;
824}
825
826static void cache_state_if_flags(struct extent_state *state,
827 struct extent_state **cached_ptr,
828 unsigned flags)
829{
830 if (cached_ptr && !(*cached_ptr)) {
831 if (!flags || (state->state & flags)) {
832 *cached_ptr = state;
833 atomic_inc(&state->refs);
834 }
835 }
836}
837
838static void cache_state(struct extent_state *state,
839 struct extent_state **cached_ptr)
840{
841 return cache_state_if_flags(state, cached_ptr,
842 EXTENT_IOBITS | EXTENT_BOUNDARY);
843}
844
845/*
846 * set some bits on a range in the tree. This may require allocations or
847 * sleeping, so the gfp mask is used to indicate what is allowed.
848 *
849 * If any of the exclusive bits are set, this will fail with -EEXIST if some
850 * part of the range already has the desired bits set. The start of the
851 * existing range is returned in failed_start in this case.
852 *
853 * [start, end] is inclusive This takes the tree lock.
854 */
855
856static int __must_check
857__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
858 unsigned bits, unsigned exclusive_bits,
859 u64 *failed_start, struct extent_state **cached_state,
860 gfp_t mask, struct extent_changeset *changeset)
861{
862 struct extent_state *state;
863 struct extent_state *prealloc = NULL;
864 struct rb_node *node;
865 struct rb_node **p;
866 struct rb_node *parent;
867 int err = 0;
868 u64 last_start;
869 u64 last_end;
870
871 btrfs_debug_check_extent_io_range(tree, start, end);
872
873 bits |= EXTENT_FIRST_DELALLOC;
874again:
875 if (!prealloc && gfpflags_allow_blocking(mask)) {
876 prealloc = alloc_extent_state(mask);
877 BUG_ON(!prealloc);
878 }
879
880 spin_lock(&tree->lock);
881 if (cached_state && *cached_state) {
882 state = *cached_state;
883 if (state->start <= start && state->end > start &&
884 extent_state_in_tree(state)) {
885 node = &state->rb_node;
886 goto hit_next;
887 }
888 }
889 /*
890 * this search will find all the extents that end after
891 * our range starts.
892 */
893 node = tree_search_for_insert(tree, start, &p, &parent);
894 if (!node) {
895 prealloc = alloc_extent_state_atomic(prealloc);
896 BUG_ON(!prealloc);
897 err = insert_state(tree, prealloc, start, end,
898 &p, &parent, &bits, changeset);
899 if (err)
900 extent_io_tree_panic(tree, err);
901
902 cache_state(prealloc, cached_state);
903 prealloc = NULL;
904 goto out;
905 }
906 state = rb_entry(node, struct extent_state, rb_node);
907hit_next:
908 last_start = state->start;
909 last_end = state->end;
910
911 /*
912 * | ---- desired range ---- |
913 * | state |
914 *
915 * Just lock what we found and keep going
916 */
917 if (state->start == start && state->end <= end) {
918 if (state->state & exclusive_bits) {
919 *failed_start = state->start;
920 err = -EEXIST;
921 goto out;
922 }
923
924 set_state_bits(tree, state, &bits, changeset);
925 cache_state(state, cached_state);
926 merge_state(tree, state);
927 if (last_end == (u64)-1)
928 goto out;
929 start = last_end + 1;
930 state = next_state(state);
931 if (start < end && state && state->start == start &&
932 !need_resched())
933 goto hit_next;
934 goto search_again;
935 }
936
937 /*
938 * | ---- desired range ---- |
939 * | state |
940 * or
941 * | ------------- state -------------- |
942 *
943 * We need to split the extent we found, and may flip bits on
944 * second half.
945 *
946 * If the extent we found extends past our
947 * range, we just split and search again. It'll get split
948 * again the next time though.
949 *
950 * If the extent we found is inside our range, we set the
951 * desired bit on it.
952 */
953 if (state->start < start) {
954 if (state->state & exclusive_bits) {
955 *failed_start = start;
956 err = -EEXIST;
957 goto out;
958 }
959
960 prealloc = alloc_extent_state_atomic(prealloc);
961 BUG_ON(!prealloc);
962 err = split_state(tree, state, prealloc, start);
963 if (err)
964 extent_io_tree_panic(tree, err);
965
966 prealloc = NULL;
967 if (err)
968 goto out;
969 if (state->end <= end) {
970 set_state_bits(tree, state, &bits, changeset);
971 cache_state(state, cached_state);
972 merge_state(tree, state);
973 if (last_end == (u64)-1)
974 goto out;
975 start = last_end + 1;
976 state = next_state(state);
977 if (start < end && state && state->start == start &&
978 !need_resched())
979 goto hit_next;
980 }
981 goto search_again;
982 }
983 /*
984 * | ---- desired range ---- |
985 * | state | or | state |
986 *
987 * There's a hole, we need to insert something in it and
988 * ignore the extent we found.
989 */
990 if (state->start > start) {
991 u64 this_end;
992 if (end < last_start)
993 this_end = end;
994 else
995 this_end = last_start - 1;
996
997 prealloc = alloc_extent_state_atomic(prealloc);
998 BUG_ON(!prealloc);
999
1000 /*
1001 * Avoid to free 'prealloc' if it can be merged with
1002 * the later extent.
1003 */
1004 err = insert_state(tree, prealloc, start, this_end,
1005 NULL, NULL, &bits, changeset);
1006 if (err)
1007 extent_io_tree_panic(tree, err);
1008
1009 cache_state(prealloc, cached_state);
1010 prealloc = NULL;
1011 start = this_end + 1;
1012 goto search_again;
1013 }
1014 /*
1015 * | ---- desired range ---- |
1016 * | state |
1017 * We need to split the extent, and set the bit
1018 * on the first half
1019 */
1020 if (state->start <= end && state->end > end) {
1021 if (state->state & exclusive_bits) {
1022 *failed_start = start;
1023 err = -EEXIST;
1024 goto out;
1025 }
1026
1027 prealloc = alloc_extent_state_atomic(prealloc);
1028 BUG_ON(!prealloc);
1029 err = split_state(tree, state, prealloc, end + 1);
1030 if (err)
1031 extent_io_tree_panic(tree, err);
1032
1033 set_state_bits(tree, prealloc, &bits, changeset);
1034 cache_state(prealloc, cached_state);
1035 merge_state(tree, prealloc);
1036 prealloc = NULL;
1037 goto out;
1038 }
1039
1040 goto search_again;
1041
1042out:
1043 spin_unlock(&tree->lock);
1044 if (prealloc)
1045 free_extent_state(prealloc);
1046
1047 return err;
1048
1049search_again:
1050 if (start > end)
1051 goto out;
1052 spin_unlock(&tree->lock);
1053 if (gfpflags_allow_blocking(mask))
1054 cond_resched();
1055 goto again;
1056}
1057
1058int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1059 unsigned bits, u64 * failed_start,
1060 struct extent_state **cached_state, gfp_t mask)
1061{
1062 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1063 cached_state, mask, NULL);
1064}
1065
1066
1067/**
1068 * convert_extent_bit - convert all bits in a given range from one bit to
1069 * another
1070 * @tree: the io tree to search
1071 * @start: the start offset in bytes
1072 * @end: the end offset in bytes (inclusive)
1073 * @bits: the bits to set in this range
1074 * @clear_bits: the bits to clear in this range
1075 * @cached_state: state that we're going to cache
1076 * @mask: the allocation mask
1077 *
1078 * This will go through and set bits for the given range. If any states exist
1079 * already in this range they are set with the given bit and cleared of the
1080 * clear_bits. This is only meant to be used by things that are mergeable, ie
1081 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1082 * boundary bits like LOCK.
1083 */
1084int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1085 unsigned bits, unsigned clear_bits,
1086 struct extent_state **cached_state, gfp_t mask)
1087{
1088 struct extent_state *state;
1089 struct extent_state *prealloc = NULL;
1090 struct rb_node *node;
1091 struct rb_node **p;
1092 struct rb_node *parent;
1093 int err = 0;
1094 u64 last_start;
1095 u64 last_end;
1096 bool first_iteration = true;
1097
1098 btrfs_debug_check_extent_io_range(tree, start, end);
1099
1100again:
1101 if (!prealloc && gfpflags_allow_blocking(mask)) {
1102 /*
1103 * Best effort, don't worry if extent state allocation fails
1104 * here for the first iteration. We might have a cached state
1105 * that matches exactly the target range, in which case no
1106 * extent state allocations are needed. We'll only know this
1107 * after locking the tree.
1108 */
1109 prealloc = alloc_extent_state(mask);
1110 if (!prealloc && !first_iteration)
1111 return -ENOMEM;
1112 }
1113
1114 spin_lock(&tree->lock);
1115 if (cached_state && *cached_state) {
1116 state = *cached_state;
1117 if (state->start <= start && state->end > start &&
1118 extent_state_in_tree(state)) {
1119 node = &state->rb_node;
1120 goto hit_next;
1121 }
1122 }
1123
1124 /*
1125 * this search will find all the extents that end after
1126 * our range starts.
1127 */
1128 node = tree_search_for_insert(tree, start, &p, &parent);
1129 if (!node) {
1130 prealloc = alloc_extent_state_atomic(prealloc);
1131 if (!prealloc) {
1132 err = -ENOMEM;
1133 goto out;
1134 }
1135 err = insert_state(tree, prealloc, start, end,
1136 &p, &parent, &bits, NULL);
1137 if (err)
1138 extent_io_tree_panic(tree, err);
1139 cache_state(prealloc, cached_state);
1140 prealloc = NULL;
1141 goto out;
1142 }
1143 state = rb_entry(node, struct extent_state, rb_node);
1144hit_next:
1145 last_start = state->start;
1146 last_end = state->end;
1147
1148 /*
1149 * | ---- desired range ---- |
1150 * | state |
1151 *
1152 * Just lock what we found and keep going
1153 */
1154 if (state->start == start && state->end <= end) {
1155 set_state_bits(tree, state, &bits, NULL);
1156 cache_state(state, cached_state);
1157 state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1158 if (last_end == (u64)-1)
1159 goto out;
1160 start = last_end + 1;
1161 if (start < end && state && state->start == start &&
1162 !need_resched())
1163 goto hit_next;
1164 goto search_again;
1165 }
1166
1167 /*
1168 * | ---- desired range ---- |
1169 * | state |
1170 * or
1171 * | ------------- state -------------- |
1172 *
1173 * We need to split the extent we found, and may flip bits on
1174 * second half.
1175 *
1176 * If the extent we found extends past our
1177 * range, we just split and search again. It'll get split
1178 * again the next time though.
1179 *
1180 * If the extent we found is inside our range, we set the
1181 * desired bit on it.
1182 */
1183 if (state->start < start) {
1184 prealloc = alloc_extent_state_atomic(prealloc);
1185 if (!prealloc) {
1186 err = -ENOMEM;
1187 goto out;
1188 }
1189 err = split_state(tree, state, prealloc, start);
1190 if (err)
1191 extent_io_tree_panic(tree, err);
1192 prealloc = NULL;
1193 if (err)
1194 goto out;
1195 if (state->end <= end) {
1196 set_state_bits(tree, state, &bits, NULL);
1197 cache_state(state, cached_state);
1198 state = clear_state_bit(tree, state, &clear_bits, 0,
1199 NULL);
1200 if (last_end == (u64)-1)
1201 goto out;
1202 start = last_end + 1;
1203 if (start < end && state && state->start == start &&
1204 !need_resched())
1205 goto hit_next;
1206 }
1207 goto search_again;
1208 }
1209 /*
1210 * | ---- desired range ---- |
1211 * | state | or | state |
1212 *
1213 * There's a hole, we need to insert something in it and
1214 * ignore the extent we found.
1215 */
1216 if (state->start > start) {
1217 u64 this_end;
1218 if (end < last_start)
1219 this_end = end;
1220 else
1221 this_end = last_start - 1;
1222
1223 prealloc = alloc_extent_state_atomic(prealloc);
1224 if (!prealloc) {
1225 err = -ENOMEM;
1226 goto out;
1227 }
1228
1229 /*
1230 * Avoid to free 'prealloc' if it can be merged with
1231 * the later extent.
1232 */
1233 err = insert_state(tree, prealloc, start, this_end,
1234 NULL, NULL, &bits, NULL);
1235 if (err)
1236 extent_io_tree_panic(tree, err);
1237 cache_state(prealloc, cached_state);
1238 prealloc = NULL;
1239 start = this_end + 1;
1240 goto search_again;
1241 }
1242 /*
1243 * | ---- desired range ---- |
1244 * | state |
1245 * We need to split the extent, and set the bit
1246 * on the first half
1247 */
1248 if (state->start <= end && state->end > end) {
1249 prealloc = alloc_extent_state_atomic(prealloc);
1250 if (!prealloc) {
1251 err = -ENOMEM;
1252 goto out;
1253 }
1254
1255 err = split_state(tree, state, prealloc, end + 1);
1256 if (err)
1257 extent_io_tree_panic(tree, err);
1258
1259 set_state_bits(tree, prealloc, &bits, NULL);
1260 cache_state(prealloc, cached_state);
1261 clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1262 prealloc = NULL;
1263 goto out;
1264 }
1265
1266 goto search_again;
1267
1268out:
1269 spin_unlock(&tree->lock);
1270 if (prealloc)
1271 free_extent_state(prealloc);
1272
1273 return err;
1274
1275search_again:
1276 if (start > end)
1277 goto out;
1278 spin_unlock(&tree->lock);
1279 if (gfpflags_allow_blocking(mask))
1280 cond_resched();
1281 first_iteration = false;
1282 goto again;
1283}
1284
1285/* wrappers around set/clear extent bit */
1286int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1287 unsigned bits, gfp_t mask,
1288 struct extent_changeset *changeset)
1289{
1290 /*
1291 * We don't support EXTENT_LOCKED yet, as current changeset will
1292 * record any bits changed, so for EXTENT_LOCKED case, it will
1293 * either fail with -EEXIST or changeset will record the whole
1294 * range.
1295 */
1296 BUG_ON(bits & EXTENT_LOCKED);
1297
1298 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
1299 changeset);
1300}
1301
1302int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1303 unsigned bits, int wake, int delete,
1304 struct extent_state **cached, gfp_t mask)
1305{
1306 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1307 cached, mask, NULL);
1308}
1309
1310int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1311 unsigned bits, gfp_t mask,
1312 struct extent_changeset *changeset)
1313{
1314 /*
1315 * Don't support EXTENT_LOCKED case, same reason as
1316 * set_record_extent_bits().
1317 */
1318 BUG_ON(bits & EXTENT_LOCKED);
1319
1320 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
1321 changeset);
1322}
1323
1324/*
1325 * either insert or lock state struct between start and end use mask to tell
1326 * us if waiting is desired.
1327 */
1328int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1329 struct extent_state **cached_state)
1330{
1331 int err;
1332 u64 failed_start;
1333
1334 while (1) {
1335 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1336 EXTENT_LOCKED, &failed_start,
1337 cached_state, GFP_NOFS, NULL);
1338 if (err == -EEXIST) {
1339 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1340 start = failed_start;
1341 } else
1342 break;
1343 WARN_ON(start > end);
1344 }
1345 return err;
1346}
1347
1348int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1349{
1350 int err;
1351 u64 failed_start;
1352
1353 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1354 &failed_start, NULL, GFP_NOFS, NULL);
1355 if (err == -EEXIST) {
1356 if (failed_start > start)
1357 clear_extent_bit(tree, start, failed_start - 1,
1358 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1359 return 0;
1360 }
1361 return 1;
1362}
1363
1364void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1365{
1366 unsigned long index = start >> PAGE_SHIFT;
1367 unsigned long end_index = end >> PAGE_SHIFT;
1368 struct page *page;
1369
1370 while (index <= end_index) {
1371 page = find_get_page(inode->i_mapping, index);
1372 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1373 clear_page_dirty_for_io(page);
1374 put_page(page);
1375 index++;
1376 }
1377}
1378
1379void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1380{
1381 unsigned long index = start >> PAGE_SHIFT;
1382 unsigned long end_index = end >> PAGE_SHIFT;
1383 struct page *page;
1384
1385 while (index <= end_index) {
1386 page = find_get_page(inode->i_mapping, index);
1387 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1388 __set_page_dirty_nobuffers(page);
1389 account_page_redirty(page);
1390 put_page(page);
1391 index++;
1392 }
1393}
1394
1395/*
1396 * helper function to set both pages and extents in the tree writeback
1397 */
1398static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1399{
1400 unsigned long index = start >> PAGE_SHIFT;
1401 unsigned long end_index = end >> PAGE_SHIFT;
1402 struct page *page;
1403
1404 while (index <= end_index) {
1405 page = find_get_page(tree->mapping, index);
1406 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1407 set_page_writeback(page);
1408 put_page(page);
1409 index++;
1410 }
1411}
1412
1413/* find the first state struct with 'bits' set after 'start', and
1414 * return it. tree->lock must be held. NULL will returned if
1415 * nothing was found after 'start'
1416 */
1417static struct extent_state *
1418find_first_extent_bit_state(struct extent_io_tree *tree,
1419 u64 start, unsigned bits)
1420{
1421 struct rb_node *node;
1422 struct extent_state *state;
1423
1424 /*
1425 * this search will find all the extents that end after
1426 * our range starts.
1427 */
1428 node = tree_search(tree, start);
1429 if (!node)
1430 goto out;
1431
1432 while (1) {
1433 state = rb_entry(node, struct extent_state, rb_node);
1434 if (state->end >= start && (state->state & bits))
1435 return state;
1436
1437 node = rb_next(node);
1438 if (!node)
1439 break;
1440 }
1441out:
1442 return NULL;
1443}
1444
1445/*
1446 * find the first offset in the io tree with 'bits' set. zero is
1447 * returned if we find something, and *start_ret and *end_ret are
1448 * set to reflect the state struct that was found.
1449 *
1450 * If nothing was found, 1 is returned. If found something, return 0.
1451 */
1452int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1453 u64 *start_ret, u64 *end_ret, unsigned bits,
1454 struct extent_state **cached_state)
1455{
1456 struct extent_state *state;
1457 struct rb_node *n;
1458 int ret = 1;
1459
1460 spin_lock(&tree->lock);
1461 if (cached_state && *cached_state) {
1462 state = *cached_state;
1463 if (state->end == start - 1 && extent_state_in_tree(state)) {
1464 n = rb_next(&state->rb_node);
1465 while (n) {
1466 state = rb_entry(n, struct extent_state,
1467 rb_node);
1468 if (state->state & bits)
1469 goto got_it;
1470 n = rb_next(n);
1471 }
1472 free_extent_state(*cached_state);
1473 *cached_state = NULL;
1474 goto out;
1475 }
1476 free_extent_state(*cached_state);
1477 *cached_state = NULL;
1478 }
1479
1480 state = find_first_extent_bit_state(tree, start, bits);
1481got_it:
1482 if (state) {
1483 cache_state_if_flags(state, cached_state, 0);
1484 *start_ret = state->start;
1485 *end_ret = state->end;
1486 ret = 0;
1487 }
1488out:
1489 spin_unlock(&tree->lock);
1490 return ret;
1491}
1492
1493/*
1494 * find a contiguous range of bytes in the file marked as delalloc, not
1495 * more than 'max_bytes'. start and end are used to return the range,
1496 *
1497 * 1 is returned if we find something, 0 if nothing was in the tree
1498 */
1499static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1500 u64 *start, u64 *end, u64 max_bytes,
1501 struct extent_state **cached_state)
1502{
1503 struct rb_node *node;
1504 struct extent_state *state;
1505 u64 cur_start = *start;
1506 u64 found = 0;
1507 u64 total_bytes = 0;
1508
1509 spin_lock(&tree->lock);
1510
1511 /*
1512 * this search will find all the extents that end after
1513 * our range starts.
1514 */
1515 node = tree_search(tree, cur_start);
1516 if (!node) {
1517 if (!found)
1518 *end = (u64)-1;
1519 goto out;
1520 }
1521
1522 while (1) {
1523 state = rb_entry(node, struct extent_state, rb_node);
1524 if (found && (state->start != cur_start ||
1525 (state->state & EXTENT_BOUNDARY))) {
1526 goto out;
1527 }
1528 if (!(state->state & EXTENT_DELALLOC)) {
1529 if (!found)
1530 *end = state->end;
1531 goto out;
1532 }
1533 if (!found) {
1534 *start = state->start;
1535 *cached_state = state;
1536 atomic_inc(&state->refs);
1537 }
1538 found++;
1539 *end = state->end;
1540 cur_start = state->end + 1;
1541 node = rb_next(node);
1542 total_bytes += state->end - state->start + 1;
1543 if (total_bytes >= max_bytes)
1544 break;
1545 if (!node)
1546 break;
1547 }
1548out:
1549 spin_unlock(&tree->lock);
1550 return found;
1551}
1552
1553static noinline void __unlock_for_delalloc(struct inode *inode,
1554 struct page *locked_page,
1555 u64 start, u64 end)
1556{
1557 int ret;
1558 struct page *pages[16];
1559 unsigned long index = start >> PAGE_SHIFT;
1560 unsigned long end_index = end >> PAGE_SHIFT;
1561 unsigned long nr_pages = end_index - index + 1;
1562 int i;
1563
1564 if (index == locked_page->index && end_index == index)
1565 return;
1566
1567 while (nr_pages > 0) {
1568 ret = find_get_pages_contig(inode->i_mapping, index,
1569 min_t(unsigned long, nr_pages,
1570 ARRAY_SIZE(pages)), pages);
1571 for (i = 0; i < ret; i++) {
1572 if (pages[i] != locked_page)
1573 unlock_page(pages[i]);
1574 put_page(pages[i]);
1575 }
1576 nr_pages -= ret;
1577 index += ret;
1578 cond_resched();
1579 }
1580}
1581
1582static noinline int lock_delalloc_pages(struct inode *inode,
1583 struct page *locked_page,
1584 u64 delalloc_start,
1585 u64 delalloc_end)
1586{
1587 unsigned long index = delalloc_start >> PAGE_SHIFT;
1588 unsigned long start_index = index;
1589 unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1590 unsigned long pages_locked = 0;
1591 struct page *pages[16];
1592 unsigned long nrpages;
1593 int ret;
1594 int i;
1595
1596 /* the caller is responsible for locking the start index */
1597 if (index == locked_page->index && index == end_index)
1598 return 0;
1599
1600 /* skip the page at the start index */
1601 nrpages = end_index - index + 1;
1602 while (nrpages > 0) {
1603 ret = find_get_pages_contig(inode->i_mapping, index,
1604 min_t(unsigned long,
1605 nrpages, ARRAY_SIZE(pages)), pages);
1606 if (ret == 0) {
1607 ret = -EAGAIN;
1608 goto done;
1609 }
1610 /* now we have an array of pages, lock them all */
1611 for (i = 0; i < ret; i++) {
1612 /*
1613 * the caller is taking responsibility for
1614 * locked_page
1615 */
1616 if (pages[i] != locked_page) {
1617 lock_page(pages[i]);
1618 if (!PageDirty(pages[i]) ||
1619 pages[i]->mapping != inode->i_mapping) {
1620 ret = -EAGAIN;
1621 unlock_page(pages[i]);
1622 put_page(pages[i]);
1623 goto done;
1624 }
1625 }
1626 put_page(pages[i]);
1627 pages_locked++;
1628 }
1629 nrpages -= ret;
1630 index += ret;
1631 cond_resched();
1632 }
1633 ret = 0;
1634done:
1635 if (ret && pages_locked) {
1636 __unlock_for_delalloc(inode, locked_page,
1637 delalloc_start,
1638 ((u64)(start_index + pages_locked - 1)) <<
1639 PAGE_SHIFT);
1640 }
1641 return ret;
1642}
1643
1644/*
1645 * find a contiguous range of bytes in the file marked as delalloc, not
1646 * more than 'max_bytes'. start and end are used to return the range,
1647 *
1648 * 1 is returned if we find something, 0 if nothing was in the tree
1649 */
1650STATIC u64 find_lock_delalloc_range(struct inode *inode,
1651 struct extent_io_tree *tree,
1652 struct page *locked_page, u64 *start,
1653 u64 *end, u64 max_bytes)
1654{
1655 u64 delalloc_start;
1656 u64 delalloc_end;
1657 u64 found;
1658 struct extent_state *cached_state = NULL;
1659 int ret;
1660 int loops = 0;
1661
1662again:
1663 /* step one, find a bunch of delalloc bytes starting at start */
1664 delalloc_start = *start;
1665 delalloc_end = 0;
1666 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1667 max_bytes, &cached_state);
1668 if (!found || delalloc_end <= *start) {
1669 *start = delalloc_start;
1670 *end = delalloc_end;
1671 free_extent_state(cached_state);
1672 return 0;
1673 }
1674
1675 /*
1676 * start comes from the offset of locked_page. We have to lock
1677 * pages in order, so we can't process delalloc bytes before
1678 * locked_page
1679 */
1680 if (delalloc_start < *start)
1681 delalloc_start = *start;
1682
1683 /*
1684 * make sure to limit the number of pages we try to lock down
1685 */
1686 if (delalloc_end + 1 - delalloc_start > max_bytes)
1687 delalloc_end = delalloc_start + max_bytes - 1;
1688
1689 /* step two, lock all the pages after the page that has start */
1690 ret = lock_delalloc_pages(inode, locked_page,
1691 delalloc_start, delalloc_end);
1692 if (ret == -EAGAIN) {
1693 /* some of the pages are gone, lets avoid looping by
1694 * shortening the size of the delalloc range we're searching
1695 */
1696 free_extent_state(cached_state);
1697 cached_state = NULL;
1698 if (!loops) {
1699 max_bytes = PAGE_SIZE;
1700 loops = 1;
1701 goto again;
1702 } else {
1703 found = 0;
1704 goto out_failed;
1705 }
1706 }
1707 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1708
1709 /* step three, lock the state bits for the whole range */
1710 lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1711
1712 /* then test to make sure it is all still delalloc */
1713 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1714 EXTENT_DELALLOC, 1, cached_state);
1715 if (!ret) {
1716 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1717 &cached_state, GFP_NOFS);
1718 __unlock_for_delalloc(inode, locked_page,
1719 delalloc_start, delalloc_end);
1720 cond_resched();
1721 goto again;
1722 }
1723 free_extent_state(cached_state);
1724 *start = delalloc_start;
1725 *end = delalloc_end;
1726out_failed:
1727 return found;
1728}
1729
1730void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1731 struct page *locked_page,
1732 unsigned clear_bits,
1733 unsigned long page_ops)
1734{
1735 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1736 int ret;
1737 struct page *pages[16];
1738 unsigned long index = start >> PAGE_SHIFT;
1739 unsigned long end_index = end >> PAGE_SHIFT;
1740 unsigned long nr_pages = end_index - index + 1;
1741 int i;
1742
1743 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1744 if (page_ops == 0)
1745 return;
1746
1747 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1748 mapping_set_error(inode->i_mapping, -EIO);
1749
1750 while (nr_pages > 0) {
1751 ret = find_get_pages_contig(inode->i_mapping, index,
1752 min_t(unsigned long,
1753 nr_pages, ARRAY_SIZE(pages)), pages);
1754 for (i = 0; i < ret; i++) {
1755
1756 if (page_ops & PAGE_SET_PRIVATE2)
1757 SetPagePrivate2(pages[i]);
1758
1759 if (pages[i] == locked_page) {
1760 put_page(pages[i]);
1761 continue;
1762 }
1763 if (page_ops & PAGE_CLEAR_DIRTY)
1764 clear_page_dirty_for_io(pages[i]);
1765 if (page_ops & PAGE_SET_WRITEBACK)
1766 set_page_writeback(pages[i]);
1767 if (page_ops & PAGE_SET_ERROR)
1768 SetPageError(pages[i]);
1769 if (page_ops & PAGE_END_WRITEBACK)
1770 end_page_writeback(pages[i]);
1771 if (page_ops & PAGE_UNLOCK)
1772 unlock_page(pages[i]);
1773 put_page(pages[i]);
1774 }
1775 nr_pages -= ret;
1776 index += ret;
1777 cond_resched();
1778 }
1779}
1780
1781/*
1782 * count the number of bytes in the tree that have a given bit(s)
1783 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1784 * cached. The total number found is returned.
1785 */
1786u64 count_range_bits(struct extent_io_tree *tree,
1787 u64 *start, u64 search_end, u64 max_bytes,
1788 unsigned bits, int contig)
1789{
1790 struct rb_node *node;
1791 struct extent_state *state;
1792 u64 cur_start = *start;
1793 u64 total_bytes = 0;
1794 u64 last = 0;
1795 int found = 0;
1796
1797 if (WARN_ON(search_end <= cur_start))
1798 return 0;
1799
1800 spin_lock(&tree->lock);
1801 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1802 total_bytes = tree->dirty_bytes;
1803 goto out;
1804 }
1805 /*
1806 * this search will find all the extents that end after
1807 * our range starts.
1808 */
1809 node = tree_search(tree, cur_start);
1810 if (!node)
1811 goto out;
1812
1813 while (1) {
1814 state = rb_entry(node, struct extent_state, rb_node);
1815 if (state->start > search_end)
1816 break;
1817 if (contig && found && state->start > last + 1)
1818 break;
1819 if (state->end >= cur_start && (state->state & bits) == bits) {
1820 total_bytes += min(search_end, state->end) + 1 -
1821 max(cur_start, state->start);
1822 if (total_bytes >= max_bytes)
1823 break;
1824 if (!found) {
1825 *start = max(cur_start, state->start);
1826 found = 1;
1827 }
1828 last = state->end;
1829 } else if (contig && found) {
1830 break;
1831 }
1832 node = rb_next(node);
1833 if (!node)
1834 break;
1835 }
1836out:
1837 spin_unlock(&tree->lock);
1838 return total_bytes;
1839}
1840
1841/*
1842 * set the private field for a given byte offset in the tree. If there isn't
1843 * an extent_state there already, this does nothing.
1844 */
1845static noinline int set_state_failrec(struct extent_io_tree *tree, u64 start,
1846 struct io_failure_record *failrec)
1847{
1848 struct rb_node *node;
1849 struct extent_state *state;
1850 int ret = 0;
1851
1852 spin_lock(&tree->lock);
1853 /*
1854 * this search will find all the extents that end after
1855 * our range starts.
1856 */
1857 node = tree_search(tree, start);
1858 if (!node) {
1859 ret = -ENOENT;
1860 goto out;
1861 }
1862 state = rb_entry(node, struct extent_state, rb_node);
1863 if (state->start != start) {
1864 ret = -ENOENT;
1865 goto out;
1866 }
1867 state->failrec = failrec;
1868out:
1869 spin_unlock(&tree->lock);
1870 return ret;
1871}
1872
1873static noinline int get_state_failrec(struct extent_io_tree *tree, u64 start,
1874 struct io_failure_record **failrec)
1875{
1876 struct rb_node *node;
1877 struct extent_state *state;
1878 int ret = 0;
1879
1880 spin_lock(&tree->lock);
1881 /*
1882 * this search will find all the extents that end after
1883 * our range starts.
1884 */
1885 node = tree_search(tree, start);
1886 if (!node) {
1887 ret = -ENOENT;
1888 goto out;
1889 }
1890 state = rb_entry(node, struct extent_state, rb_node);
1891 if (state->start != start) {
1892 ret = -ENOENT;
1893 goto out;
1894 }
1895 *failrec = state->failrec;
1896out:
1897 spin_unlock(&tree->lock);
1898 return ret;
1899}
1900
1901/*
1902 * searches a range in the state tree for a given mask.
1903 * If 'filled' == 1, this returns 1 only if every extent in the tree
1904 * has the bits set. Otherwise, 1 is returned if any bit in the
1905 * range is found set.
1906 */
1907int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1908 unsigned bits, int filled, struct extent_state *cached)
1909{
1910 struct extent_state *state = NULL;
1911 struct rb_node *node;
1912 int bitset = 0;
1913
1914 spin_lock(&tree->lock);
1915 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1916 cached->end > start)
1917 node = &cached->rb_node;
1918 else
1919 node = tree_search(tree, start);
1920 while (node && start <= end) {
1921 state = rb_entry(node, struct extent_state, rb_node);
1922
1923 if (filled && state->start > start) {
1924 bitset = 0;
1925 break;
1926 }
1927
1928 if (state->start > end)
1929 break;
1930
1931 if (state->state & bits) {
1932 bitset = 1;
1933 if (!filled)
1934 break;
1935 } else if (filled) {
1936 bitset = 0;
1937 break;
1938 }
1939
1940 if (state->end == (u64)-1)
1941 break;
1942
1943 start = state->end + 1;
1944 if (start > end)
1945 break;
1946 node = rb_next(node);
1947 if (!node) {
1948 if (filled)
1949 bitset = 0;
1950 break;
1951 }
1952 }
1953 spin_unlock(&tree->lock);
1954 return bitset;
1955}
1956
1957/*
1958 * helper function to set a given page up to date if all the
1959 * extents in the tree for that page are up to date
1960 */
1961static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1962{
1963 u64 start = page_offset(page);
1964 u64 end = start + PAGE_SIZE - 1;
1965 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1966 SetPageUptodate(page);
1967}
1968
1969int free_io_failure(struct inode *inode, struct io_failure_record *rec)
1970{
1971 int ret;
1972 int err = 0;
1973 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1974
1975 set_state_failrec(failure_tree, rec->start, NULL);
1976 ret = clear_extent_bits(failure_tree, rec->start,
1977 rec->start + rec->len - 1,
1978 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1979 if (ret)
1980 err = ret;
1981
1982 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1983 rec->start + rec->len - 1,
1984 EXTENT_DAMAGED, GFP_NOFS);
1985 if (ret && !err)
1986 err = ret;
1987
1988 kfree(rec);
1989 return err;
1990}
1991
1992/*
1993 * this bypasses the standard btrfs submit functions deliberately, as
1994 * the standard behavior is to write all copies in a raid setup. here we only
1995 * want to write the one bad copy. so we do the mapping for ourselves and issue
1996 * submit_bio directly.
1997 * to avoid any synchronization issues, wait for the data after writing, which
1998 * actually prevents the read that triggered the error from finishing.
1999 * currently, there can be no more than two copies of every data bit. thus,
2000 * exactly one rewrite is required.
2001 */
2002int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
2003 struct page *page, unsigned int pg_offset, int mirror_num)
2004{
2005 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2006 struct bio *bio;
2007 struct btrfs_device *dev;
2008 u64 map_length = 0;
2009 u64 sector;
2010 struct btrfs_bio *bbio = NULL;
2011 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2012 int ret;
2013
2014 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2015 BUG_ON(!mirror_num);
2016
2017 /* we can't repair anything in raid56 yet */
2018 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2019 return 0;
2020
2021 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2022 if (!bio)
2023 return -EIO;
2024 bio->bi_iter.bi_size = 0;
2025 map_length = length;
2026
2027 ret = btrfs_map_block(fs_info, WRITE, logical,
2028 &map_length, &bbio, mirror_num);
2029 if (ret) {
2030 bio_put(bio);
2031 return -EIO;
2032 }
2033 BUG_ON(mirror_num != bbio->mirror_num);
2034 sector = bbio->stripes[mirror_num-1].physical >> 9;
2035 bio->bi_iter.bi_sector = sector;
2036 dev = bbio->stripes[mirror_num-1].dev;
2037 btrfs_put_bbio(bbio);
2038 if (!dev || !dev->bdev || !dev->writeable) {
2039 bio_put(bio);
2040 return -EIO;
2041 }
2042 bio->bi_bdev = dev->bdev;
2043 bio_add_page(bio, page, length, pg_offset);
2044
2045 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2046 /* try to remap that extent elsewhere? */
2047 bio_put(bio);
2048 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2049 return -EIO;
2050 }
2051
2052 btrfs_info_rl_in_rcu(fs_info,
2053 "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2054 btrfs_ino(inode), start,
2055 rcu_str_deref(dev->name), sector);
2056 bio_put(bio);
2057 return 0;
2058}
2059
2060int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2061 int mirror_num)
2062{
2063 u64 start = eb->start;
2064 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2065 int ret = 0;
2066
2067 if (root->fs_info->sb->s_flags & MS_RDONLY)
2068 return -EROFS;
2069
2070 for (i = 0; i < num_pages; i++) {
2071 struct page *p = eb->pages[i];
2072
2073 ret = repair_io_failure(root->fs_info->btree_inode, start,
2074 PAGE_SIZE, start, p,
2075 start - page_offset(p), mirror_num);
2076 if (ret)
2077 break;
2078 start += PAGE_SIZE;
2079 }
2080
2081 return ret;
2082}
2083
2084/*
2085 * each time an IO finishes, we do a fast check in the IO failure tree
2086 * to see if we need to process or clean up an io_failure_record
2087 */
2088int clean_io_failure(struct inode *inode, u64 start, struct page *page,
2089 unsigned int pg_offset)
2090{
2091 u64 private;
2092 struct io_failure_record *failrec;
2093 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2094 struct extent_state *state;
2095 int num_copies;
2096 int ret;
2097
2098 private = 0;
2099 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2100 (u64)-1, 1, EXTENT_DIRTY, 0);
2101 if (!ret)
2102 return 0;
2103
2104 ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start,
2105 &failrec);
2106 if (ret)
2107 return 0;
2108
2109 BUG_ON(!failrec->this_mirror);
2110
2111 if (failrec->in_validation) {
2112 /* there was no real error, just free the record */
2113 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2114 failrec->start);
2115 goto out;
2116 }
2117 if (fs_info->sb->s_flags & MS_RDONLY)
2118 goto out;
2119
2120 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2121 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2122 failrec->start,
2123 EXTENT_LOCKED);
2124 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2125
2126 if (state && state->start <= failrec->start &&
2127 state->end >= failrec->start + failrec->len - 1) {
2128 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2129 failrec->len);
2130 if (num_copies > 1) {
2131 repair_io_failure(inode, start, failrec->len,
2132 failrec->logical, page,
2133 pg_offset, failrec->failed_mirror);
2134 }
2135 }
2136
2137out:
2138 free_io_failure(inode, failrec);
2139
2140 return 0;
2141}
2142
2143/*
2144 * Can be called when
2145 * - hold extent lock
2146 * - under ordered extent
2147 * - the inode is freeing
2148 */
2149void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
2150{
2151 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2152 struct io_failure_record *failrec;
2153 struct extent_state *state, *next;
2154
2155 if (RB_EMPTY_ROOT(&failure_tree->state))
2156 return;
2157
2158 spin_lock(&failure_tree->lock);
2159 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2160 while (state) {
2161 if (state->start > end)
2162 break;
2163
2164 ASSERT(state->end <= end);
2165
2166 next = next_state(state);
2167
2168 failrec = state->failrec;
2169 free_extent_state(state);
2170 kfree(failrec);
2171
2172 state = next;
2173 }
2174 spin_unlock(&failure_tree->lock);
2175}
2176
2177int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
2178 struct io_failure_record **failrec_ret)
2179{
2180 struct io_failure_record *failrec;
2181 struct extent_map *em;
2182 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2183 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2184 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2185 int ret;
2186 u64 logical;
2187
2188 ret = get_state_failrec(failure_tree, start, &failrec);
2189 if (ret) {
2190 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2191 if (!failrec)
2192 return -ENOMEM;
2193
2194 failrec->start = start;
2195 failrec->len = end - start + 1;
2196 failrec->this_mirror = 0;
2197 failrec->bio_flags = 0;
2198 failrec->in_validation = 0;
2199
2200 read_lock(&em_tree->lock);
2201 em = lookup_extent_mapping(em_tree, start, failrec->len);
2202 if (!em) {
2203 read_unlock(&em_tree->lock);
2204 kfree(failrec);
2205 return -EIO;
2206 }
2207
2208 if (em->start > start || em->start + em->len <= start) {
2209 free_extent_map(em);
2210 em = NULL;
2211 }
2212 read_unlock(&em_tree->lock);
2213 if (!em) {
2214 kfree(failrec);
2215 return -EIO;
2216 }
2217
2218 logical = start - em->start;
2219 logical = em->block_start + logical;
2220 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2221 logical = em->block_start;
2222 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2223 extent_set_compress_type(&failrec->bio_flags,
2224 em->compress_type);
2225 }
2226
2227 pr_debug("Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu\n",
2228 logical, start, failrec->len);
2229
2230 failrec->logical = logical;
2231 free_extent_map(em);
2232
2233 /* set the bits in the private failure tree */
2234 ret = set_extent_bits(failure_tree, start, end,
2235 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2236 if (ret >= 0)
2237 ret = set_state_failrec(failure_tree, start, failrec);
2238 /* set the bits in the inode's tree */
2239 if (ret >= 0)
2240 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241 GFP_NOFS);
2242 if (ret < 0) {
2243 kfree(failrec);
2244 return ret;
2245 }
2246 } else {
2247 pr_debug("Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d\n",
2248 failrec->logical, failrec->start, failrec->len,
2249 failrec->in_validation);
2250 /*
2251 * when data can be on disk more than twice, add to failrec here
2252 * (e.g. with a list for failed_mirror) to make
2253 * clean_io_failure() clean all those errors at once.
2254 */
2255 }
2256
2257 *failrec_ret = failrec;
2258
2259 return 0;
2260}
2261
2262int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
2263 struct io_failure_record *failrec, int failed_mirror)
2264{
2265 int num_copies;
2266
2267 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2268 failrec->logical, failrec->len);
2269 if (num_copies == 1) {
2270 /*
2271 * we only have a single copy of the data, so don't bother with
2272 * all the retry and error correction code that follows. no
2273 * matter what the error is, it is very likely to persist.
2274 */
2275 pr_debug("Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2276 num_copies, failrec->this_mirror, failed_mirror);
2277 return 0;
2278 }
2279
2280 /*
2281 * there are two premises:
2282 * a) deliver good data to the caller
2283 * b) correct the bad sectors on disk
2284 */
2285 if (failed_bio->bi_vcnt > 1) {
2286 /*
2287 * to fulfill b), we need to know the exact failing sectors, as
2288 * we don't want to rewrite any more than the failed ones. thus,
2289 * we need separate read requests for the failed bio
2290 *
2291 * if the following BUG_ON triggers, our validation request got
2292 * merged. we need separate requests for our algorithm to work.
2293 */
2294 BUG_ON(failrec->in_validation);
2295 failrec->in_validation = 1;
2296 failrec->this_mirror = failed_mirror;
2297 } else {
2298 /*
2299 * we're ready to fulfill a) and b) alongside. get a good copy
2300 * of the failed sector and if we succeed, we have setup
2301 * everything for repair_io_failure to do the rest for us.
2302 */
2303 if (failrec->in_validation) {
2304 BUG_ON(failrec->this_mirror != failed_mirror);
2305 failrec->in_validation = 0;
2306 failrec->this_mirror = 0;
2307 }
2308 failrec->failed_mirror = failed_mirror;
2309 failrec->this_mirror++;
2310 if (failrec->this_mirror == failed_mirror)
2311 failrec->this_mirror++;
2312 }
2313
2314 if (failrec->this_mirror > num_copies) {
2315 pr_debug("Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2316 num_copies, failrec->this_mirror, failed_mirror);
2317 return 0;
2318 }
2319
2320 return 1;
2321}
2322
2323
2324struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
2325 struct io_failure_record *failrec,
2326 struct page *page, int pg_offset, int icsum,
2327 bio_end_io_t *endio_func, void *data)
2328{
2329 struct bio *bio;
2330 struct btrfs_io_bio *btrfs_failed_bio;
2331 struct btrfs_io_bio *btrfs_bio;
2332
2333 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2334 if (!bio)
2335 return NULL;
2336
2337 bio->bi_end_io = endio_func;
2338 bio->bi_iter.bi_sector = failrec->logical >> 9;
2339 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2340 bio->bi_iter.bi_size = 0;
2341 bio->bi_private = data;
2342
2343 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2344 if (btrfs_failed_bio->csum) {
2345 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2346 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2347
2348 btrfs_bio = btrfs_io_bio(bio);
2349 btrfs_bio->csum = btrfs_bio->csum_inline;
2350 icsum *= csum_size;
2351 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
2352 csum_size);
2353 }
2354
2355 bio_add_page(bio, page, failrec->len, pg_offset);
2356
2357 return bio;
2358}
2359
2360/*
2361 * this is a generic handler for readpage errors (default
2362 * readpage_io_failed_hook). if other copies exist, read those and write back
2363 * good data to the failed position. does not investigate in remapping the
2364 * failed extent elsewhere, hoping the device will be smart enough to do this as
2365 * needed
2366 */
2367
2368static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2369 struct page *page, u64 start, u64 end,
2370 int failed_mirror)
2371{
2372 struct io_failure_record *failrec;
2373 struct inode *inode = page->mapping->host;
2374 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2375 struct bio *bio;
2376 int read_mode;
2377 int ret;
2378
2379 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2380
2381 ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
2382 if (ret)
2383 return ret;
2384
2385 ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
2386 if (!ret) {
2387 free_io_failure(inode, failrec);
2388 return -EIO;
2389 }
2390
2391 if (failed_bio->bi_vcnt > 1)
2392 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2393 else
2394 read_mode = READ_SYNC;
2395
2396 phy_offset >>= inode->i_sb->s_blocksize_bits;
2397 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
2398 start - page_offset(page),
2399 (int)phy_offset, failed_bio->bi_end_io,
2400 NULL);
2401 if (!bio) {
2402 free_io_failure(inode, failrec);
2403 return -EIO;
2404 }
2405
2406 pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
2407 read_mode, failrec->this_mirror, failrec->in_validation);
2408
2409 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2410 failrec->this_mirror,
2411 failrec->bio_flags, 0);
2412 if (ret) {
2413 free_io_failure(inode, failrec);
2414 bio_put(bio);
2415 }
2416
2417 return ret;
2418}
2419
2420/* lots and lots of room for performance fixes in the end_bio funcs */
2421
2422void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2423{
2424 int uptodate = (err == 0);
2425 struct extent_io_tree *tree;
2426 int ret = 0;
2427
2428 tree = &BTRFS_I(page->mapping->host)->io_tree;
2429
2430 if (tree->ops && tree->ops->writepage_end_io_hook) {
2431 ret = tree->ops->writepage_end_io_hook(page, start,
2432 end, NULL, uptodate);
2433 if (ret)
2434 uptodate = 0;
2435 }
2436
2437 if (!uptodate) {
2438 ClearPageUptodate(page);
2439 SetPageError(page);
2440 ret = ret < 0 ? ret : -EIO;
2441 mapping_set_error(page->mapping, ret);
2442 }
2443}
2444
2445/*
2446 * after a writepage IO is done, we need to:
2447 * clear the uptodate bits on error
2448 * clear the writeback bits in the extent tree for this IO
2449 * end_page_writeback if the page has no more pending IO
2450 *
2451 * Scheduling is not allowed, so the extent state tree is expected
2452 * to have one and only one object corresponding to this IO.
2453 */
2454static void end_bio_extent_writepage(struct bio *bio)
2455{
2456 struct bio_vec *bvec;
2457 u64 start;
2458 u64 end;
2459 int i;
2460
2461 bio_for_each_segment_all(bvec, bio, i) {
2462 struct page *page = bvec->bv_page;
2463
2464 /* We always issue full-page reads, but if some block
2465 * in a page fails to read, blk_update_request() will
2466 * advance bv_offset and adjust bv_len to compensate.
2467 * Print a warning for nonzero offsets, and an error
2468 * if they don't add up to a full page. */
2469 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2470 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2471 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2472 "partial page write in btrfs with offset %u and length %u",
2473 bvec->bv_offset, bvec->bv_len);
2474 else
2475 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2476 "incomplete page write in btrfs with offset %u and "
2477 "length %u",
2478 bvec->bv_offset, bvec->bv_len);
2479 }
2480
2481 start = page_offset(page);
2482 end = start + bvec->bv_offset + bvec->bv_len - 1;
2483
2484 end_extent_writepage(page, bio->bi_error, start, end);
2485 end_page_writeback(page);
2486 }
2487
2488 bio_put(bio);
2489}
2490
2491static void
2492endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2493 int uptodate)
2494{
2495 struct extent_state *cached = NULL;
2496 u64 end = start + len - 1;
2497
2498 if (uptodate && tree->track_uptodate)
2499 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2500 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2501}
2502
2503/*
2504 * after a readpage IO is done, we need to:
2505 * clear the uptodate bits on error
2506 * set the uptodate bits if things worked
2507 * set the page up to date if all extents in the tree are uptodate
2508 * clear the lock bit in the extent tree
2509 * unlock the page if there are no other extents locked for it
2510 *
2511 * Scheduling is not allowed, so the extent state tree is expected
2512 * to have one and only one object corresponding to this IO.
2513 */
2514static void end_bio_extent_readpage(struct bio *bio)
2515{
2516 struct bio_vec *bvec;
2517 int uptodate = !bio->bi_error;
2518 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2519 struct extent_io_tree *tree;
2520 u64 offset = 0;
2521 u64 start;
2522 u64 end;
2523 u64 len;
2524 u64 extent_start = 0;
2525 u64 extent_len = 0;
2526 int mirror;
2527 int ret;
2528 int i;
2529
2530 bio_for_each_segment_all(bvec, bio, i) {
2531 struct page *page = bvec->bv_page;
2532 struct inode *inode = page->mapping->host;
2533
2534 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2535 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
2536 bio->bi_error, io_bio->mirror_num);
2537 tree = &BTRFS_I(inode)->io_tree;
2538
2539 /* We always issue full-page reads, but if some block
2540 * in a page fails to read, blk_update_request() will
2541 * advance bv_offset and adjust bv_len to compensate.
2542 * Print a warning for nonzero offsets, and an error
2543 * if they don't add up to a full page. */
2544 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2545 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2546 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2547 "partial page read in btrfs with offset %u and length %u",
2548 bvec->bv_offset, bvec->bv_len);
2549 else
2550 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2551 "incomplete page read in btrfs with offset %u and "
2552 "length %u",
2553 bvec->bv_offset, bvec->bv_len);
2554 }
2555
2556 start = page_offset(page);
2557 end = start + bvec->bv_offset + bvec->bv_len - 1;
2558 len = bvec->bv_len;
2559
2560 mirror = io_bio->mirror_num;
2561 if (likely(uptodate && tree->ops &&
2562 tree->ops->readpage_end_io_hook)) {
2563 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2564 page, start, end,
2565 mirror);
2566 if (ret)
2567 uptodate = 0;
2568 else
2569 clean_io_failure(inode, start, page, 0);
2570 }
2571
2572 if (likely(uptodate))
2573 goto readpage_ok;
2574
2575 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2576 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2577 if (!ret && !bio->bi_error)
2578 uptodate = 1;
2579 } else {
2580 /*
2581 * The generic bio_readpage_error handles errors the
2582 * following way: If possible, new read requests are
2583 * created and submitted and will end up in
2584 * end_bio_extent_readpage as well (if we're lucky, not
2585 * in the !uptodate case). In that case it returns 0 and
2586 * we just go on with the next page in our bio. If it
2587 * can't handle the error it will return -EIO and we
2588 * remain responsible for that page.
2589 */
2590 ret = bio_readpage_error(bio, offset, page, start, end,
2591 mirror);
2592 if (ret == 0) {
2593 uptodate = !bio->bi_error;
2594 offset += len;
2595 continue;
2596 }
2597 }
2598readpage_ok:
2599 if (likely(uptodate)) {
2600 loff_t i_size = i_size_read(inode);
2601 pgoff_t end_index = i_size >> PAGE_SHIFT;
2602 unsigned off;
2603
2604 /* Zero out the end if this page straddles i_size */
2605 off = i_size & (PAGE_SIZE-1);
2606 if (page->index == end_index && off)
2607 zero_user_segment(page, off, PAGE_SIZE);
2608 SetPageUptodate(page);
2609 } else {
2610 ClearPageUptodate(page);
2611 SetPageError(page);
2612 }
2613 unlock_page(page);
2614 offset += len;
2615
2616 if (unlikely(!uptodate)) {
2617 if (extent_len) {
2618 endio_readpage_release_extent(tree,
2619 extent_start,
2620 extent_len, 1);
2621 extent_start = 0;
2622 extent_len = 0;
2623 }
2624 endio_readpage_release_extent(tree, start,
2625 end - start + 1, 0);
2626 } else if (!extent_len) {
2627 extent_start = start;
2628 extent_len = end + 1 - start;
2629 } else if (extent_start + extent_len == start) {
2630 extent_len += end + 1 - start;
2631 } else {
2632 endio_readpage_release_extent(tree, extent_start,
2633 extent_len, uptodate);
2634 extent_start = start;
2635 extent_len = end + 1 - start;
2636 }
2637 }
2638
2639 if (extent_len)
2640 endio_readpage_release_extent(tree, extent_start, extent_len,
2641 uptodate);
2642 if (io_bio->end_io)
2643 io_bio->end_io(io_bio, bio->bi_error);
2644 bio_put(bio);
2645}
2646
2647/*
2648 * this allocates from the btrfs_bioset. We're returning a bio right now
2649 * but you can call btrfs_io_bio for the appropriate container_of magic
2650 */
2651struct bio *
2652btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2653 gfp_t gfp_flags)
2654{
2655 struct btrfs_io_bio *btrfs_bio;
2656 struct bio *bio;
2657
2658 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2659
2660 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2661 while (!bio && (nr_vecs /= 2)) {
2662 bio = bio_alloc_bioset(gfp_flags,
2663 nr_vecs, btrfs_bioset);
2664 }
2665 }
2666
2667 if (bio) {
2668 bio->bi_bdev = bdev;
2669 bio->bi_iter.bi_sector = first_sector;
2670 btrfs_bio = btrfs_io_bio(bio);
2671 btrfs_bio->csum = NULL;
2672 btrfs_bio->csum_allocated = NULL;
2673 btrfs_bio->end_io = NULL;
2674 }
2675 return bio;
2676}
2677
2678struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2679{
2680 struct btrfs_io_bio *btrfs_bio;
2681 struct bio *new;
2682
2683 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2684 if (new) {
2685 btrfs_bio = btrfs_io_bio(new);
2686 btrfs_bio->csum = NULL;
2687 btrfs_bio->csum_allocated = NULL;
2688 btrfs_bio->end_io = NULL;
2689
2690#ifdef CONFIG_BLK_CGROUP
2691 /* FIXME, put this into bio_clone_bioset */
2692 if (bio->bi_css)
2693 bio_associate_blkcg(new, bio->bi_css);
2694#endif
2695 }
2696 return new;
2697}
2698
2699/* this also allocates from the btrfs_bioset */
2700struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2701{
2702 struct btrfs_io_bio *btrfs_bio;
2703 struct bio *bio;
2704
2705 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2706 if (bio) {
2707 btrfs_bio = btrfs_io_bio(bio);
2708 btrfs_bio->csum = NULL;
2709 btrfs_bio->csum_allocated = NULL;
2710 btrfs_bio->end_io = NULL;
2711 }
2712 return bio;
2713}
2714
2715
2716static int __must_check submit_one_bio(int rw, struct bio *bio,
2717 int mirror_num, unsigned long bio_flags)
2718{
2719 int ret = 0;
2720 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2721 struct page *page = bvec->bv_page;
2722 struct extent_io_tree *tree = bio->bi_private;
2723 u64 start;
2724
2725 start = page_offset(page) + bvec->bv_offset;
2726
2727 bio->bi_private = NULL;
2728
2729 bio_get(bio);
2730
2731 if (tree->ops && tree->ops->submit_bio_hook)
2732 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2733 mirror_num, bio_flags, start);
2734 else
2735 btrfsic_submit_bio(rw, bio);
2736
2737 bio_put(bio);
2738 return ret;
2739}
2740
2741static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2742 unsigned long offset, size_t size, struct bio *bio,
2743 unsigned long bio_flags)
2744{
2745 int ret = 0;
2746 if (tree->ops && tree->ops->merge_bio_hook)
2747 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2748 bio_flags);
2749 BUG_ON(ret < 0);
2750 return ret;
2751
2752}
2753
2754static int submit_extent_page(int rw, struct extent_io_tree *tree,
2755 struct writeback_control *wbc,
2756 struct page *page, sector_t sector,
2757 size_t size, unsigned long offset,
2758 struct block_device *bdev,
2759 struct bio **bio_ret,
2760 unsigned long max_pages,
2761 bio_end_io_t end_io_func,
2762 int mirror_num,
2763 unsigned long prev_bio_flags,
2764 unsigned long bio_flags,
2765 bool force_bio_submit)
2766{
2767 int ret = 0;
2768 struct bio *bio;
2769 int contig = 0;
2770 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2771 size_t page_size = min_t(size_t, size, PAGE_SIZE);
2772
2773 if (bio_ret && *bio_ret) {
2774 bio = *bio_ret;
2775 if (old_compressed)
2776 contig = bio->bi_iter.bi_sector == sector;
2777 else
2778 contig = bio_end_sector(bio) == sector;
2779
2780 if (prev_bio_flags != bio_flags || !contig ||
2781 force_bio_submit ||
2782 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2783 bio_add_page(bio, page, page_size, offset) < page_size) {
2784 ret = submit_one_bio(rw, bio, mirror_num,
2785 prev_bio_flags);
2786 if (ret < 0) {
2787 *bio_ret = NULL;
2788 return ret;
2789 }
2790 bio = NULL;
2791 } else {
2792 if (wbc)
2793 wbc_account_io(wbc, page, page_size);
2794 return 0;
2795 }
2796 }
2797
2798 bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
2799 GFP_NOFS | __GFP_HIGH);
2800 if (!bio)
2801 return -ENOMEM;
2802
2803 bio_add_page(bio, page, page_size, offset);
2804 bio->bi_end_io = end_io_func;
2805 bio->bi_private = tree;
2806 if (wbc) {
2807 wbc_init_bio(wbc, bio);
2808 wbc_account_io(wbc, page, page_size);
2809 }
2810
2811 if (bio_ret)
2812 *bio_ret = bio;
2813 else
2814 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2815
2816 return ret;
2817}
2818
2819static void attach_extent_buffer_page(struct extent_buffer *eb,
2820 struct page *page)
2821{
2822 if (!PagePrivate(page)) {
2823 SetPagePrivate(page);
2824 get_page(page);
2825 set_page_private(page, (unsigned long)eb);
2826 } else {
2827 WARN_ON(page->private != (unsigned long)eb);
2828 }
2829}
2830
2831void set_page_extent_mapped(struct page *page)
2832{
2833 if (!PagePrivate(page)) {
2834 SetPagePrivate(page);
2835 get_page(page);
2836 set_page_private(page, EXTENT_PAGE_PRIVATE);
2837 }
2838}
2839
2840static struct extent_map *
2841__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2842 u64 start, u64 len, get_extent_t *get_extent,
2843 struct extent_map **em_cached)
2844{
2845 struct extent_map *em;
2846
2847 if (em_cached && *em_cached) {
2848 em = *em_cached;
2849 if (extent_map_in_tree(em) && start >= em->start &&
2850 start < extent_map_end(em)) {
2851 atomic_inc(&em->refs);
2852 return em;
2853 }
2854
2855 free_extent_map(em);
2856 *em_cached = NULL;
2857 }
2858
2859 em = get_extent(inode, page, pg_offset, start, len, 0);
2860 if (em_cached && !IS_ERR_OR_NULL(em)) {
2861 BUG_ON(*em_cached);
2862 atomic_inc(&em->refs);
2863 *em_cached = em;
2864 }
2865 return em;
2866}
2867/*
2868 * basic readpage implementation. Locked extent state structs are inserted
2869 * into the tree that are removed when the IO is done (by the end_io
2870 * handlers)
2871 * XXX JDM: This needs looking at to ensure proper page locking
2872 */
2873static int __do_readpage(struct extent_io_tree *tree,
2874 struct page *page,
2875 get_extent_t *get_extent,
2876 struct extent_map **em_cached,
2877 struct bio **bio, int mirror_num,
2878 unsigned long *bio_flags, int rw,
2879 u64 *prev_em_start)
2880{
2881 struct inode *inode = page->mapping->host;
2882 u64 start = page_offset(page);
2883 u64 page_end = start + PAGE_SIZE - 1;
2884 u64 end;
2885 u64 cur = start;
2886 u64 extent_offset;
2887 u64 last_byte = i_size_read(inode);
2888 u64 block_start;
2889 u64 cur_end;
2890 sector_t sector;
2891 struct extent_map *em;
2892 struct block_device *bdev;
2893 int ret;
2894 int nr = 0;
2895 size_t pg_offset = 0;
2896 size_t iosize;
2897 size_t disk_io_size;
2898 size_t blocksize = inode->i_sb->s_blocksize;
2899 unsigned long this_bio_flag = 0;
2900
2901 set_page_extent_mapped(page);
2902
2903 end = page_end;
2904 if (!PageUptodate(page)) {
2905 if (cleancache_get_page(page) == 0) {
2906 BUG_ON(blocksize != PAGE_SIZE);
2907 unlock_extent(tree, start, end);
2908 goto out;
2909 }
2910 }
2911
2912 if (page->index == last_byte >> PAGE_SHIFT) {
2913 char *userpage;
2914 size_t zero_offset = last_byte & (PAGE_SIZE - 1);
2915
2916 if (zero_offset) {
2917 iosize = PAGE_SIZE - zero_offset;
2918 userpage = kmap_atomic(page);
2919 memset(userpage + zero_offset, 0, iosize);
2920 flush_dcache_page(page);
2921 kunmap_atomic(userpage);
2922 }
2923 }
2924 while (cur <= end) {
2925 unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1;
2926 bool force_bio_submit = false;
2927
2928 if (cur >= last_byte) {
2929 char *userpage;
2930 struct extent_state *cached = NULL;
2931
2932 iosize = PAGE_SIZE - pg_offset;
2933 userpage = kmap_atomic(page);
2934 memset(userpage + pg_offset, 0, iosize);
2935 flush_dcache_page(page);
2936 kunmap_atomic(userpage);
2937 set_extent_uptodate(tree, cur, cur + iosize - 1,
2938 &cached, GFP_NOFS);
2939 unlock_extent_cached(tree, cur,
2940 cur + iosize - 1,
2941 &cached, GFP_NOFS);
2942 break;
2943 }
2944 em = __get_extent_map(inode, page, pg_offset, cur,
2945 end - cur + 1, get_extent, em_cached);
2946 if (IS_ERR_OR_NULL(em)) {
2947 SetPageError(page);
2948 unlock_extent(tree, cur, end);
2949 break;
2950 }
2951 extent_offset = cur - em->start;
2952 BUG_ON(extent_map_end(em) <= cur);
2953 BUG_ON(end < cur);
2954
2955 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2956 this_bio_flag |= EXTENT_BIO_COMPRESSED;
2957 extent_set_compress_type(&this_bio_flag,
2958 em->compress_type);
2959 }
2960
2961 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2962 cur_end = min(extent_map_end(em) - 1, end);
2963 iosize = ALIGN(iosize, blocksize);
2964 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2965 disk_io_size = em->block_len;
2966 sector = em->block_start >> 9;
2967 } else {
2968 sector = (em->block_start + extent_offset) >> 9;
2969 disk_io_size = iosize;
2970 }
2971 bdev = em->bdev;
2972 block_start = em->block_start;
2973 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2974 block_start = EXTENT_MAP_HOLE;
2975
2976 /*
2977 * If we have a file range that points to a compressed extent
2978 * and it's followed by a consecutive file range that points to
2979 * to the same compressed extent (possibly with a different
2980 * offset and/or length, so it either points to the whole extent
2981 * or only part of it), we must make sure we do not submit a
2982 * single bio to populate the pages for the 2 ranges because
2983 * this makes the compressed extent read zero out the pages
2984 * belonging to the 2nd range. Imagine the following scenario:
2985 *
2986 * File layout
2987 * [0 - 8K] [8K - 24K]
2988 * | |
2989 * | |
2990 * points to extent X, points to extent X,
2991 * offset 4K, length of 8K offset 0, length 16K
2992 *
2993 * [extent X, compressed length = 4K uncompressed length = 16K]
2994 *
2995 * If the bio to read the compressed extent covers both ranges,
2996 * it will decompress extent X into the pages belonging to the
2997 * first range and then it will stop, zeroing out the remaining
2998 * pages that belong to the other range that points to extent X.
2999 * So here we make sure we submit 2 bios, one for the first
3000 * range and another one for the third range. Both will target
3001 * the same physical extent from disk, but we can't currently
3002 * make the compressed bio endio callback populate the pages
3003 * for both ranges because each compressed bio is tightly
3004 * coupled with a single extent map, and each range can have
3005 * an extent map with a different offset value relative to the
3006 * uncompressed data of our extent and different lengths. This
3007 * is a corner case so we prioritize correctness over
3008 * non-optimal behavior (submitting 2 bios for the same extent).
3009 */
3010 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3011 prev_em_start && *prev_em_start != (u64)-1 &&
3012 *prev_em_start != em->orig_start)
3013 force_bio_submit = true;
3014
3015 if (prev_em_start)
3016 *prev_em_start = em->orig_start;
3017
3018 free_extent_map(em);
3019 em = NULL;
3020
3021 /* we've found a hole, just zero and go on */
3022 if (block_start == EXTENT_MAP_HOLE) {
3023 char *userpage;
3024 struct extent_state *cached = NULL;
3025
3026 userpage = kmap_atomic(page);
3027 memset(userpage + pg_offset, 0, iosize);
3028 flush_dcache_page(page);
3029 kunmap_atomic(userpage);
3030
3031 set_extent_uptodate(tree, cur, cur + iosize - 1,
3032 &cached, GFP_NOFS);
3033 unlock_extent_cached(tree, cur,
3034 cur + iosize - 1,
3035 &cached, GFP_NOFS);
3036 cur = cur + iosize;
3037 pg_offset += iosize;
3038 continue;
3039 }
3040 /* the get_extent function already copied into the page */
3041 if (test_range_bit(tree, cur, cur_end,
3042 EXTENT_UPTODATE, 1, NULL)) {
3043 check_page_uptodate(tree, page);
3044 unlock_extent(tree, cur, cur + iosize - 1);
3045 cur = cur + iosize;
3046 pg_offset += iosize;
3047 continue;
3048 }
3049 /* we have an inline extent but it didn't get marked up
3050 * to date. Error out
3051 */
3052 if (block_start == EXTENT_MAP_INLINE) {
3053 SetPageError(page);
3054 unlock_extent(tree, cur, cur + iosize - 1);
3055 cur = cur + iosize;
3056 pg_offset += iosize;
3057 continue;
3058 }
3059
3060 pnr -= page->index;
3061 ret = submit_extent_page(rw, tree, NULL, page,
3062 sector, disk_io_size, pg_offset,
3063 bdev, bio, pnr,
3064 end_bio_extent_readpage, mirror_num,
3065 *bio_flags,
3066 this_bio_flag,
3067 force_bio_submit);
3068 if (!ret) {
3069 nr++;
3070 *bio_flags = this_bio_flag;
3071 } else {
3072 SetPageError(page);
3073 unlock_extent(tree, cur, cur + iosize - 1);
3074 }
3075 cur = cur + iosize;
3076 pg_offset += iosize;
3077 }
3078out:
3079 if (!nr) {
3080 if (!PageError(page))
3081 SetPageUptodate(page);
3082 unlock_page(page);
3083 }
3084 return 0;
3085}
3086
3087static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3088 struct page *pages[], int nr_pages,
3089 u64 start, u64 end,
3090 get_extent_t *get_extent,
3091 struct extent_map **em_cached,
3092 struct bio **bio, int mirror_num,
3093 unsigned long *bio_flags, int rw,
3094 u64 *prev_em_start)
3095{
3096 struct inode *inode;
3097 struct btrfs_ordered_extent *ordered;
3098 int index;
3099
3100 inode = pages[0]->mapping->host;
3101 while (1) {
3102 lock_extent(tree, start, end);
3103 ordered = btrfs_lookup_ordered_range(inode, start,
3104 end - start + 1);
3105 if (!ordered)
3106 break;
3107 unlock_extent(tree, start, end);
3108 btrfs_start_ordered_extent(inode, ordered, 1);
3109 btrfs_put_ordered_extent(ordered);
3110 }
3111
3112 for (index = 0; index < nr_pages; index++) {
3113 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3114 mirror_num, bio_flags, rw, prev_em_start);
3115 put_page(pages[index]);
3116 }
3117}
3118
3119static void __extent_readpages(struct extent_io_tree *tree,
3120 struct page *pages[],
3121 int nr_pages, get_extent_t *get_extent,
3122 struct extent_map **em_cached,
3123 struct bio **bio, int mirror_num,
3124 unsigned long *bio_flags, int rw,
3125 u64 *prev_em_start)
3126{
3127 u64 start = 0;
3128 u64 end = 0;
3129 u64 page_start;
3130 int index;
3131 int first_index = 0;
3132
3133 for (index = 0; index < nr_pages; index++) {
3134 page_start = page_offset(pages[index]);
3135 if (!end) {
3136 start = page_start;
3137 end = start + PAGE_SIZE - 1;
3138 first_index = index;
3139 } else if (end + 1 == page_start) {
3140 end += PAGE_SIZE;
3141 } else {
3142 __do_contiguous_readpages(tree, &pages[first_index],
3143 index - first_index, start,
3144 end, get_extent, em_cached,
3145 bio, mirror_num, bio_flags,
3146 rw, prev_em_start);
3147 start = page_start;
3148 end = start + PAGE_SIZE - 1;
3149 first_index = index;
3150 }
3151 }
3152
3153 if (end)
3154 __do_contiguous_readpages(tree, &pages[first_index],
3155 index - first_index, start,
3156 end, get_extent, em_cached, bio,
3157 mirror_num, bio_flags, rw,
3158 prev_em_start);
3159}
3160
3161static int __extent_read_full_page(struct extent_io_tree *tree,
3162 struct page *page,
3163 get_extent_t *get_extent,
3164 struct bio **bio, int mirror_num,
3165 unsigned long *bio_flags, int rw)
3166{
3167 struct inode *inode = page->mapping->host;
3168 struct btrfs_ordered_extent *ordered;
3169 u64 start = page_offset(page);
3170 u64 end = start + PAGE_SIZE - 1;
3171 int ret;
3172
3173 while (1) {
3174 lock_extent(tree, start, end);
3175 ordered = btrfs_lookup_ordered_range(inode, start,
3176 PAGE_SIZE);
3177 if (!ordered)
3178 break;
3179 unlock_extent(tree, start, end);
3180 btrfs_start_ordered_extent(inode, ordered, 1);
3181 btrfs_put_ordered_extent(ordered);
3182 }
3183
3184 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3185 bio_flags, rw, NULL);
3186 return ret;
3187}
3188
3189int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3190 get_extent_t *get_extent, int mirror_num)
3191{
3192 struct bio *bio = NULL;
3193 unsigned long bio_flags = 0;
3194 int ret;
3195
3196 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3197 &bio_flags, READ);
3198 if (bio)
3199 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3200 return ret;
3201}
3202
3203static noinline void update_nr_written(struct page *page,
3204 struct writeback_control *wbc,
3205 unsigned long nr_written)
3206{
3207 wbc->nr_to_write -= nr_written;
3208 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3209 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3210 page->mapping->writeback_index = page->index + nr_written;
3211}
3212
3213/*
3214 * helper for __extent_writepage, doing all of the delayed allocation setup.
3215 *
3216 * This returns 1 if our fill_delalloc function did all the work required
3217 * to write the page (copy into inline extent). In this case the IO has
3218 * been started and the page is already unlocked.
3219 *
3220 * This returns 0 if all went well (page still locked)
3221 * This returns < 0 if there were errors (page still locked)
3222 */
3223static noinline_for_stack int writepage_delalloc(struct inode *inode,
3224 struct page *page, struct writeback_control *wbc,
3225 struct extent_page_data *epd,
3226 u64 delalloc_start,
3227 unsigned long *nr_written)
3228{
3229 struct extent_io_tree *tree = epd->tree;
3230 u64 page_end = delalloc_start + PAGE_SIZE - 1;
3231 u64 nr_delalloc;
3232 u64 delalloc_to_write = 0;
3233 u64 delalloc_end = 0;
3234 int ret;
3235 int page_started = 0;
3236
3237 if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
3238 return 0;
3239
3240 while (delalloc_end < page_end) {
3241 nr_delalloc = find_lock_delalloc_range(inode, tree,
3242 page,
3243 &delalloc_start,
3244 &delalloc_end,
3245 BTRFS_MAX_EXTENT_SIZE);
3246 if (nr_delalloc == 0) {
3247 delalloc_start = delalloc_end + 1;
3248 continue;
3249 }
3250 ret = tree->ops->fill_delalloc(inode, page,
3251 delalloc_start,
3252 delalloc_end,
3253 &page_started,
3254 nr_written);
3255 /* File system has been set read-only */
3256 if (ret) {
3257 SetPageError(page);
3258 /* fill_delalloc should be return < 0 for error
3259 * but just in case, we use > 0 here meaning the
3260 * IO is started, so we don't want to return > 0
3261 * unless things are going well.
3262 */
3263 ret = ret < 0 ? ret : -EIO;
3264 goto done;
3265 }
3266 /*
3267 * delalloc_end is already one less than the total length, so
3268 * we don't subtract one from PAGE_SIZE
3269 */
3270 delalloc_to_write += (delalloc_end - delalloc_start +
3271 PAGE_SIZE) >> PAGE_SHIFT;
3272 delalloc_start = delalloc_end + 1;
3273 }
3274 if (wbc->nr_to_write < delalloc_to_write) {
3275 int thresh = 8192;
3276
3277 if (delalloc_to_write < thresh * 2)
3278 thresh = delalloc_to_write;
3279 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3280 thresh);
3281 }
3282
3283 /* did the fill delalloc function already unlock and start
3284 * the IO?
3285 */
3286 if (page_started) {
3287 /*
3288 * we've unlocked the page, so we can't update
3289 * the mapping's writeback index, just update
3290 * nr_to_write.
3291 */
3292 wbc->nr_to_write -= *nr_written;
3293 return 1;
3294 }
3295
3296 ret = 0;
3297
3298done:
3299 return ret;
3300}
3301
3302/*
3303 * helper for __extent_writepage. This calls the writepage start hooks,
3304 * and does the loop to map the page into extents and bios.
3305 *
3306 * We return 1 if the IO is started and the page is unlocked,
3307 * 0 if all went well (page still locked)
3308 * < 0 if there were errors (page still locked)
3309 */
3310static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3311 struct page *page,
3312 struct writeback_control *wbc,
3313 struct extent_page_data *epd,
3314 loff_t i_size,
3315 unsigned long nr_written,
3316 int write_flags, int *nr_ret)
3317{
3318 struct extent_io_tree *tree = epd->tree;
3319 u64 start = page_offset(page);
3320 u64 page_end = start + PAGE_SIZE - 1;
3321 u64 end;
3322 u64 cur = start;
3323 u64 extent_offset;
3324 u64 block_start;
3325 u64 iosize;
3326 sector_t sector;
3327 struct extent_state *cached_state = NULL;
3328 struct extent_map *em;
3329 struct block_device *bdev;
3330 size_t pg_offset = 0;
3331 size_t blocksize;
3332 int ret = 0;
3333 int nr = 0;
3334 bool compressed;
3335
3336 if (tree->ops && tree->ops->writepage_start_hook) {
3337 ret = tree->ops->writepage_start_hook(page, start,
3338 page_end);
3339 if (ret) {
3340 /* Fixup worker will requeue */
3341 if (ret == -EBUSY)
3342 wbc->pages_skipped++;
3343 else
3344 redirty_page_for_writepage(wbc, page);
3345
3346 update_nr_written(page, wbc, nr_written);
3347 unlock_page(page);
3348 ret = 1;
3349 goto done_unlocked;
3350 }
3351 }
3352
3353 /*
3354 * we don't want to touch the inode after unlocking the page,
3355 * so we update the mapping writeback index now
3356 */
3357 update_nr_written(page, wbc, nr_written + 1);
3358
3359 end = page_end;
3360 if (i_size <= start) {
3361 if (tree->ops && tree->ops->writepage_end_io_hook)
3362 tree->ops->writepage_end_io_hook(page, start,
3363 page_end, NULL, 1);
3364 goto done;
3365 }
3366
3367 blocksize = inode->i_sb->s_blocksize;
3368
3369 while (cur <= end) {
3370 u64 em_end;
3371 if (cur >= i_size) {
3372 if (tree->ops && tree->ops->writepage_end_io_hook)
3373 tree->ops->writepage_end_io_hook(page, cur,
3374 page_end, NULL, 1);
3375 break;
3376 }
3377 em = epd->get_extent(inode, page, pg_offset, cur,
3378 end - cur + 1, 1);
3379 if (IS_ERR_OR_NULL(em)) {
3380 SetPageError(page);
3381 ret = PTR_ERR_OR_ZERO(em);
3382 break;
3383 }
3384
3385 extent_offset = cur - em->start;
3386 em_end = extent_map_end(em);
3387 BUG_ON(em_end <= cur);
3388 BUG_ON(end < cur);
3389 iosize = min(em_end - cur, end - cur + 1);
3390 iosize = ALIGN(iosize, blocksize);
3391 sector = (em->block_start + extent_offset) >> 9;
3392 bdev = em->bdev;
3393 block_start = em->block_start;
3394 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3395 free_extent_map(em);
3396 em = NULL;
3397
3398 /*
3399 * compressed and inline extents are written through other
3400 * paths in the FS
3401 */
3402 if (compressed || block_start == EXTENT_MAP_HOLE ||
3403 block_start == EXTENT_MAP_INLINE) {
3404 /*
3405 * end_io notification does not happen here for
3406 * compressed extents
3407 */
3408 if (!compressed && tree->ops &&
3409 tree->ops->writepage_end_io_hook)
3410 tree->ops->writepage_end_io_hook(page, cur,
3411 cur + iosize - 1,
3412 NULL, 1);
3413 else if (compressed) {
3414 /* we don't want to end_page_writeback on
3415 * a compressed extent. this happens
3416 * elsewhere
3417 */
3418 nr++;
3419 }
3420
3421 cur += iosize;
3422 pg_offset += iosize;
3423 continue;
3424 }
3425
3426 if (tree->ops && tree->ops->writepage_io_hook) {
3427 ret = tree->ops->writepage_io_hook(page, cur,
3428 cur + iosize - 1);
3429 } else {
3430 ret = 0;
3431 }
3432 if (ret) {
3433 SetPageError(page);
3434 } else {
3435 unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1;
3436
3437 set_range_writeback(tree, cur, cur + iosize - 1);
3438 if (!PageWriteback(page)) {
3439 btrfs_err(BTRFS_I(inode)->root->fs_info,
3440 "page %lu not writeback, cur %llu end %llu",
3441 page->index, cur, end);
3442 }
3443
3444 ret = submit_extent_page(write_flags, tree, wbc, page,
3445 sector, iosize, pg_offset,
3446 bdev, &epd->bio, max_nr,
3447 end_bio_extent_writepage,
3448 0, 0, 0, false);
3449 if (ret)
3450 SetPageError(page);
3451 }
3452 cur = cur + iosize;
3453 pg_offset += iosize;
3454 nr++;
3455 }
3456done:
3457 *nr_ret = nr;
3458
3459done_unlocked:
3460
3461 /* drop our reference on any cached states */
3462 free_extent_state(cached_state);
3463 return ret;
3464}
3465
3466/*
3467 * the writepage semantics are similar to regular writepage. extent
3468 * records are inserted to lock ranges in the tree, and as dirty areas
3469 * are found, they are marked writeback. Then the lock bits are removed
3470 * and the end_io handler clears the writeback ranges
3471 */
3472static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3473 void *data)
3474{
3475 struct inode *inode = page->mapping->host;
3476 struct extent_page_data *epd = data;
3477 u64 start = page_offset(page);
3478 u64 page_end = start + PAGE_SIZE - 1;
3479 int ret;
3480 int nr = 0;
3481 size_t pg_offset = 0;
3482 loff_t i_size = i_size_read(inode);
3483 unsigned long end_index = i_size >> PAGE_SHIFT;
3484 int write_flags;
3485 unsigned long nr_written = 0;
3486
3487 if (wbc->sync_mode == WB_SYNC_ALL)
3488 write_flags = WRITE_SYNC;
3489 else
3490 write_flags = WRITE;
3491
3492 trace___extent_writepage(page, inode, wbc);
3493
3494 WARN_ON(!PageLocked(page));
3495
3496 ClearPageError(page);
3497
3498 pg_offset = i_size & (PAGE_SIZE - 1);
3499 if (page->index > end_index ||
3500 (page->index == end_index && !pg_offset)) {
3501 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3502 unlock_page(page);
3503 return 0;
3504 }
3505
3506 if (page->index == end_index) {
3507 char *userpage;
3508
3509 userpage = kmap_atomic(page);
3510 memset(userpage + pg_offset, 0,
3511 PAGE_SIZE - pg_offset);
3512 kunmap_atomic(userpage);
3513 flush_dcache_page(page);
3514 }
3515
3516 pg_offset = 0;
3517
3518 set_page_extent_mapped(page);
3519
3520 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
3521 if (ret == 1)
3522 goto done_unlocked;
3523 if (ret)
3524 goto done;
3525
3526 ret = __extent_writepage_io(inode, page, wbc, epd,
3527 i_size, nr_written, write_flags, &nr);
3528 if (ret == 1)
3529 goto done_unlocked;
3530
3531done:
3532 if (nr == 0) {
3533 /* make sure the mapping tag for page dirty gets cleared */
3534 set_page_writeback(page);
3535 end_page_writeback(page);
3536 }
3537 if (PageError(page)) {
3538 ret = ret < 0 ? ret : -EIO;
3539 end_extent_writepage(page, ret, start, page_end);
3540 }
3541 unlock_page(page);
3542 return ret;
3543
3544done_unlocked:
3545 return 0;
3546}
3547
3548void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3549{
3550 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3551 TASK_UNINTERRUPTIBLE);
3552}
3553
3554static noinline_for_stack int
3555lock_extent_buffer_for_io(struct extent_buffer *eb,
3556 struct btrfs_fs_info *fs_info,
3557 struct extent_page_data *epd)
3558{
3559 unsigned long i, num_pages;
3560 int flush = 0;
3561 int ret = 0;
3562
3563 if (!btrfs_try_tree_write_lock(eb)) {
3564 flush = 1;
3565 flush_write_bio(epd);
3566 btrfs_tree_lock(eb);
3567 }
3568
3569 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3570 btrfs_tree_unlock(eb);
3571 if (!epd->sync_io)
3572 return 0;
3573 if (!flush) {
3574 flush_write_bio(epd);
3575 flush = 1;
3576 }
3577 while (1) {
3578 wait_on_extent_buffer_writeback(eb);
3579 btrfs_tree_lock(eb);
3580 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3581 break;
3582 btrfs_tree_unlock(eb);
3583 }
3584 }
3585
3586 /*
3587 * We need to do this to prevent races in people who check if the eb is
3588 * under IO since we can end up having no IO bits set for a short period
3589 * of time.
3590 */
3591 spin_lock(&eb->refs_lock);
3592 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3593 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3594 spin_unlock(&eb->refs_lock);
3595 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3596 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3597 -eb->len,
3598 fs_info->dirty_metadata_batch);
3599 ret = 1;
3600 } else {
3601 spin_unlock(&eb->refs_lock);
3602 }
3603
3604 btrfs_tree_unlock(eb);
3605
3606 if (!ret)
3607 return ret;
3608
3609 num_pages = num_extent_pages(eb->start, eb->len);
3610 for (i = 0; i < num_pages; i++) {
3611 struct page *p = eb->pages[i];
3612
3613 if (!trylock_page(p)) {
3614 if (!flush) {
3615 flush_write_bio(epd);
3616 flush = 1;
3617 }
3618 lock_page(p);
3619 }
3620 }
3621
3622 return ret;
3623}
3624
3625static void end_extent_buffer_writeback(struct extent_buffer *eb)
3626{
3627 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3628 smp_mb__after_atomic();
3629 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3630}
3631
3632static void set_btree_ioerr(struct page *page)
3633{
3634 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3635 struct btrfs_inode *btree_ino = BTRFS_I(eb->fs_info->btree_inode);
3636
3637 SetPageError(page);
3638 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3639 return;
3640
3641 /*
3642 * If writeback for a btree extent that doesn't belong to a log tree
3643 * failed, increment the counter transaction->eb_write_errors.
3644 * We do this because while the transaction is running and before it's
3645 * committing (when we call filemap_fdata[write|wait]_range against
3646 * the btree inode), we might have
3647 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3648 * returns an error or an error happens during writeback, when we're
3649 * committing the transaction we wouldn't know about it, since the pages
3650 * can be no longer dirty nor marked anymore for writeback (if a
3651 * subsequent modification to the extent buffer didn't happen before the
3652 * transaction commit), which makes filemap_fdata[write|wait]_range not
3653 * able to find the pages tagged with SetPageError at transaction
3654 * commit time. So if this happens we must abort the transaction,
3655 * otherwise we commit a super block with btree roots that point to
3656 * btree nodes/leafs whose content on disk is invalid - either garbage
3657 * or the content of some node/leaf from a past generation that got
3658 * cowed or deleted and is no longer valid.
3659 *
3660 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3661 * not be enough - we need to distinguish between log tree extents vs
3662 * non-log tree extents, and the next filemap_fdatawait_range() call
3663 * will catch and clear such errors in the mapping - and that call might
3664 * be from a log sync and not from a transaction commit. Also, checking
3665 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3666 * not done and would not be reliable - the eb might have been released
3667 * from memory and reading it back again means that flag would not be
3668 * set (since it's a runtime flag, not persisted on disk).
3669 *
3670 * Using the flags below in the btree inode also makes us achieve the
3671 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3672 * writeback for all dirty pages and before filemap_fdatawait_range()
3673 * is called, the writeback for all dirty pages had already finished
3674 * with errors - because we were not using AS_EIO/AS_ENOSPC,
3675 * filemap_fdatawait_range() would return success, as it could not know
3676 * that writeback errors happened (the pages were no longer tagged for
3677 * writeback).
3678 */
3679 switch (eb->log_index) {
3680 case -1:
3681 set_bit(BTRFS_INODE_BTREE_ERR, &btree_ino->runtime_flags);
3682 break;
3683 case 0:
3684 set_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
3685 break;
3686 case 1:
3687 set_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
3688 break;
3689 default:
3690 BUG(); /* unexpected, logic error */
3691 }
3692}
3693
3694static void end_bio_extent_buffer_writepage(struct bio *bio)
3695{
3696 struct bio_vec *bvec;
3697 struct extent_buffer *eb;
3698 int i, done;
3699
3700 bio_for_each_segment_all(bvec, bio, i) {
3701 struct page *page = bvec->bv_page;
3702
3703 eb = (struct extent_buffer *)page->private;
3704 BUG_ON(!eb);
3705 done = atomic_dec_and_test(&eb->io_pages);
3706
3707 if (bio->bi_error ||
3708 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3709 ClearPageUptodate(page);
3710 set_btree_ioerr(page);
3711 }
3712
3713 end_page_writeback(page);
3714
3715 if (!done)
3716 continue;
3717
3718 end_extent_buffer_writeback(eb);
3719 }
3720
3721 bio_put(bio);
3722}
3723
3724static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3725 struct btrfs_fs_info *fs_info,
3726 struct writeback_control *wbc,
3727 struct extent_page_data *epd)
3728{
3729 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3730 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3731 u64 offset = eb->start;
3732 unsigned long i, num_pages;
3733 unsigned long bio_flags = 0;
3734 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3735 int ret = 0;
3736
3737 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3738 num_pages = num_extent_pages(eb->start, eb->len);
3739 atomic_set(&eb->io_pages, num_pages);
3740 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3741 bio_flags = EXTENT_BIO_TREE_LOG;
3742
3743 for (i = 0; i < num_pages; i++) {
3744 struct page *p = eb->pages[i];
3745
3746 clear_page_dirty_for_io(p);
3747 set_page_writeback(p);
3748 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3749 PAGE_SIZE, 0, bdev, &epd->bio,
3750 -1, end_bio_extent_buffer_writepage,
3751 0, epd->bio_flags, bio_flags, false);
3752 epd->bio_flags = bio_flags;
3753 if (ret) {
3754 set_btree_ioerr(p);
3755 end_page_writeback(p);
3756 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3757 end_extent_buffer_writeback(eb);
3758 ret = -EIO;
3759 break;
3760 }
3761 offset += PAGE_SIZE;
3762 update_nr_written(p, wbc, 1);
3763 unlock_page(p);
3764 }
3765
3766 if (unlikely(ret)) {
3767 for (; i < num_pages; i++) {
3768 struct page *p = eb->pages[i];
3769 clear_page_dirty_for_io(p);
3770 unlock_page(p);
3771 }
3772 }
3773
3774 return ret;
3775}
3776
3777int btree_write_cache_pages(struct address_space *mapping,
3778 struct writeback_control *wbc)
3779{
3780 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3781 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3782 struct extent_buffer *eb, *prev_eb = NULL;
3783 struct extent_page_data epd = {
3784 .bio = NULL,
3785 .tree = tree,
3786 .extent_locked = 0,
3787 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3788 .bio_flags = 0,
3789 };
3790 int ret = 0;
3791 int done = 0;
3792 int nr_to_write_done = 0;
3793 struct pagevec pvec;
3794 int nr_pages;
3795 pgoff_t index;
3796 pgoff_t end; /* Inclusive */
3797 int scanned = 0;
3798 int tag;
3799
3800 pagevec_init(&pvec, 0);
3801 if (wbc->range_cyclic) {
3802 index = mapping->writeback_index; /* Start from prev offset */
3803 end = -1;
3804 } else {
3805 index = wbc->range_start >> PAGE_SHIFT;
3806 end = wbc->range_end >> PAGE_SHIFT;
3807 scanned = 1;
3808 }
3809 if (wbc->sync_mode == WB_SYNC_ALL)
3810 tag = PAGECACHE_TAG_TOWRITE;
3811 else
3812 tag = PAGECACHE_TAG_DIRTY;
3813retry:
3814 if (wbc->sync_mode == WB_SYNC_ALL)
3815 tag_pages_for_writeback(mapping, index, end);
3816 while (!done && !nr_to_write_done && (index <= end) &&
3817 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3818 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3819 unsigned i;
3820
3821 scanned = 1;
3822 for (i = 0; i < nr_pages; i++) {
3823 struct page *page = pvec.pages[i];
3824
3825 if (!PagePrivate(page))
3826 continue;
3827
3828 if (!wbc->range_cyclic && page->index > end) {
3829 done = 1;
3830 break;
3831 }
3832
3833 spin_lock(&mapping->private_lock);
3834 if (!PagePrivate(page)) {
3835 spin_unlock(&mapping->private_lock);
3836 continue;
3837 }
3838
3839 eb = (struct extent_buffer *)page->private;
3840
3841 /*
3842 * Shouldn't happen and normally this would be a BUG_ON
3843 * but no sense in crashing the users box for something
3844 * we can survive anyway.
3845 */
3846 if (WARN_ON(!eb)) {
3847 spin_unlock(&mapping->private_lock);
3848 continue;
3849 }
3850
3851 if (eb == prev_eb) {
3852 spin_unlock(&mapping->private_lock);
3853 continue;
3854 }
3855
3856 ret = atomic_inc_not_zero(&eb->refs);
3857 spin_unlock(&mapping->private_lock);
3858 if (!ret)
3859 continue;
3860
3861 prev_eb = eb;
3862 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3863 if (!ret) {
3864 free_extent_buffer(eb);
3865 continue;
3866 }
3867
3868 ret = write_one_eb(eb, fs_info, wbc, &epd);
3869 if (ret) {
3870 done = 1;
3871 free_extent_buffer(eb);
3872 break;
3873 }
3874 free_extent_buffer(eb);
3875
3876 /*
3877 * the filesystem may choose to bump up nr_to_write.
3878 * We have to make sure to honor the new nr_to_write
3879 * at any time
3880 */
3881 nr_to_write_done = wbc->nr_to_write <= 0;
3882 }
3883 pagevec_release(&pvec);
3884 cond_resched();
3885 }
3886 if (!scanned && !done) {
3887 /*
3888 * We hit the last page and there is more work to be done: wrap
3889 * back to the start of the file
3890 */
3891 scanned = 1;
3892 index = 0;
3893 goto retry;
3894 }
3895 flush_write_bio(&epd);
3896 return ret;
3897}
3898
3899/**
3900 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3901 * @mapping: address space structure to write
3902 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3903 * @writepage: function called for each page
3904 * @data: data passed to writepage function
3905 *
3906 * If a page is already under I/O, write_cache_pages() skips it, even
3907 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3908 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3909 * and msync() need to guarantee that all the data which was dirty at the time
3910 * the call was made get new I/O started against them. If wbc->sync_mode is
3911 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3912 * existing IO to complete.
3913 */
3914static int extent_write_cache_pages(struct extent_io_tree *tree,
3915 struct address_space *mapping,
3916 struct writeback_control *wbc,
3917 writepage_t writepage, void *data,
3918 void (*flush_fn)(void *))
3919{
3920 struct inode *inode = mapping->host;
3921 int ret = 0;
3922 int done = 0;
3923 int err = 0;
3924 int nr_to_write_done = 0;
3925 struct pagevec pvec;
3926 int nr_pages;
3927 pgoff_t index;
3928 pgoff_t end; /* Inclusive */
3929 int scanned = 0;
3930 int tag;
3931
3932 /*
3933 * We have to hold onto the inode so that ordered extents can do their
3934 * work when the IO finishes. The alternative to this is failing to add
3935 * an ordered extent if the igrab() fails there and that is a huge pain
3936 * to deal with, so instead just hold onto the inode throughout the
3937 * writepages operation. If it fails here we are freeing up the inode
3938 * anyway and we'd rather not waste our time writing out stuff that is
3939 * going to be truncated anyway.
3940 */
3941 if (!igrab(inode))
3942 return 0;
3943
3944 pagevec_init(&pvec, 0);
3945 if (wbc->range_cyclic) {
3946 index = mapping->writeback_index; /* Start from prev offset */
3947 end = -1;
3948 } else {
3949 index = wbc->range_start >> PAGE_SHIFT;
3950 end = wbc->range_end >> PAGE_SHIFT;
3951 scanned = 1;
3952 }
3953 if (wbc->sync_mode == WB_SYNC_ALL)
3954 tag = PAGECACHE_TAG_TOWRITE;
3955 else
3956 tag = PAGECACHE_TAG_DIRTY;
3957retry:
3958 if (wbc->sync_mode == WB_SYNC_ALL)
3959 tag_pages_for_writeback(mapping, index, end);
3960 while (!done && !nr_to_write_done && (index <= end) &&
3961 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3962 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3963 unsigned i;
3964
3965 scanned = 1;
3966 for (i = 0; i < nr_pages; i++) {
3967 struct page *page = pvec.pages[i];
3968
3969 /*
3970 * At this point we hold neither mapping->tree_lock nor
3971 * lock on the page itself: the page may be truncated or
3972 * invalidated (changing page->mapping to NULL), or even
3973 * swizzled back from swapper_space to tmpfs file
3974 * mapping
3975 */
3976 if (!trylock_page(page)) {
3977 flush_fn(data);
3978 lock_page(page);
3979 }
3980
3981 if (unlikely(page->mapping != mapping)) {
3982 unlock_page(page);
3983 continue;
3984 }
3985
3986 if (!wbc->range_cyclic && page->index > end) {
3987 done = 1;
3988 unlock_page(page);
3989 continue;
3990 }
3991
3992 if (wbc->sync_mode != WB_SYNC_NONE) {
3993 if (PageWriteback(page))
3994 flush_fn(data);
3995 wait_on_page_writeback(page);
3996 }
3997
3998 if (PageWriteback(page) ||
3999 !clear_page_dirty_for_io(page)) {
4000 unlock_page(page);
4001 continue;
4002 }
4003
4004 ret = (*writepage)(page, wbc, data);
4005
4006 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
4007 unlock_page(page);
4008 ret = 0;
4009 }
4010 if (!err && ret < 0)
4011 err = ret;
4012
4013 /*
4014 * the filesystem may choose to bump up nr_to_write.
4015 * We have to make sure to honor the new nr_to_write
4016 * at any time
4017 */
4018 nr_to_write_done = wbc->nr_to_write <= 0;
4019 }
4020 pagevec_release(&pvec);
4021 cond_resched();
4022 }
4023 if (!scanned && !done && !err) {
4024 /*
4025 * We hit the last page and there is more work to be done: wrap
4026 * back to the start of the file
4027 */
4028 scanned = 1;
4029 index = 0;
4030 goto retry;
4031 }
4032 btrfs_add_delayed_iput(inode);
4033 return err;
4034}
4035
4036static void flush_epd_write_bio(struct extent_page_data *epd)
4037{
4038 if (epd->bio) {
4039 int rw = WRITE;
4040 int ret;
4041
4042 if (epd->sync_io)
4043 rw = WRITE_SYNC;
4044
4045 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
4046 BUG_ON(ret < 0); /* -ENOMEM */
4047 epd->bio = NULL;
4048 }
4049}
4050
4051static noinline void flush_write_bio(void *data)
4052{
4053 struct extent_page_data *epd = data;
4054 flush_epd_write_bio(epd);
4055}
4056
4057int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
4058 get_extent_t *get_extent,
4059 struct writeback_control *wbc)
4060{
4061 int ret;
4062 struct extent_page_data epd = {
4063 .bio = NULL,
4064 .tree = tree,
4065 .get_extent = get_extent,
4066 .extent_locked = 0,
4067 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4068 .bio_flags = 0,
4069 };
4070
4071 ret = __extent_writepage(page, wbc, &epd);
4072
4073 flush_epd_write_bio(&epd);
4074 return ret;
4075}
4076
4077int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
4078 u64 start, u64 end, get_extent_t *get_extent,
4079 int mode)
4080{
4081 int ret = 0;
4082 struct address_space *mapping = inode->i_mapping;
4083 struct page *page;
4084 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4085 PAGE_SHIFT;
4086
4087 struct extent_page_data epd = {
4088 .bio = NULL,
4089 .tree = tree,
4090 .get_extent = get_extent,
4091 .extent_locked = 1,
4092 .sync_io = mode == WB_SYNC_ALL,
4093 .bio_flags = 0,
4094 };
4095 struct writeback_control wbc_writepages = {
4096 .sync_mode = mode,
4097 .nr_to_write = nr_pages * 2,
4098 .range_start = start,
4099 .range_end = end + 1,
4100 };
4101
4102 while (start <= end) {
4103 page = find_get_page(mapping, start >> PAGE_SHIFT);
4104 if (clear_page_dirty_for_io(page))
4105 ret = __extent_writepage(page, &wbc_writepages, &epd);
4106 else {
4107 if (tree->ops && tree->ops->writepage_end_io_hook)
4108 tree->ops->writepage_end_io_hook(page, start,
4109 start + PAGE_SIZE - 1,
4110 NULL, 1);
4111 unlock_page(page);
4112 }
4113 put_page(page);
4114 start += PAGE_SIZE;
4115 }
4116
4117 flush_epd_write_bio(&epd);
4118 return ret;
4119}
4120
4121int extent_writepages(struct extent_io_tree *tree,
4122 struct address_space *mapping,
4123 get_extent_t *get_extent,
4124 struct writeback_control *wbc)
4125{
4126 int ret = 0;
4127 struct extent_page_data epd = {
4128 .bio = NULL,
4129 .tree = tree,
4130 .get_extent = get_extent,
4131 .extent_locked = 0,
4132 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4133 .bio_flags = 0,
4134 };
4135
4136 ret = extent_write_cache_pages(tree, mapping, wbc,
4137 __extent_writepage, &epd,
4138 flush_write_bio);
4139 flush_epd_write_bio(&epd);
4140 return ret;
4141}
4142
4143int extent_readpages(struct extent_io_tree *tree,
4144 struct address_space *mapping,
4145 struct list_head *pages, unsigned nr_pages,
4146 get_extent_t get_extent)
4147{
4148 struct bio *bio = NULL;
4149 unsigned page_idx;
4150 unsigned long bio_flags = 0;
4151 struct page *pagepool[16];
4152 struct page *page;
4153 struct extent_map *em_cached = NULL;
4154 int nr = 0;
4155 u64 prev_em_start = (u64)-1;
4156
4157 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4158 page = list_entry(pages->prev, struct page, lru);
4159
4160 prefetchw(&page->flags);
4161 list_del(&page->lru);
4162 if (add_to_page_cache_lru(page, mapping,
4163 page->index, GFP_NOFS)) {
4164 put_page(page);
4165 continue;
4166 }
4167
4168 pagepool[nr++] = page;
4169 if (nr < ARRAY_SIZE(pagepool))
4170 continue;
4171 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4172 &bio, 0, &bio_flags, READ, &prev_em_start);
4173 nr = 0;
4174 }
4175 if (nr)
4176 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4177 &bio, 0, &bio_flags, READ, &prev_em_start);
4178
4179 if (em_cached)
4180 free_extent_map(em_cached);
4181
4182 BUG_ON(!list_empty(pages));
4183 if (bio)
4184 return submit_one_bio(READ, bio, 0, bio_flags);
4185 return 0;
4186}
4187
4188/*
4189 * basic invalidatepage code, this waits on any locked or writeback
4190 * ranges corresponding to the page, and then deletes any extent state
4191 * records from the tree
4192 */
4193int extent_invalidatepage(struct extent_io_tree *tree,
4194 struct page *page, unsigned long offset)
4195{
4196 struct extent_state *cached_state = NULL;
4197 u64 start = page_offset(page);
4198 u64 end = start + PAGE_SIZE - 1;
4199 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4200
4201 start += ALIGN(offset, blocksize);
4202 if (start > end)
4203 return 0;
4204
4205 lock_extent_bits(tree, start, end, &cached_state);
4206 wait_on_page_writeback(page);
4207 clear_extent_bit(tree, start, end,
4208 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4209 EXTENT_DO_ACCOUNTING,
4210 1, 1, &cached_state, GFP_NOFS);
4211 return 0;
4212}
4213
4214/*
4215 * a helper for releasepage, this tests for areas of the page that
4216 * are locked or under IO and drops the related state bits if it is safe
4217 * to drop the page.
4218 */
4219static int try_release_extent_state(struct extent_map_tree *map,
4220 struct extent_io_tree *tree,
4221 struct page *page, gfp_t mask)
4222{
4223 u64 start = page_offset(page);
4224 u64 end = start + PAGE_SIZE - 1;
4225 int ret = 1;
4226
4227 if (test_range_bit(tree, start, end,
4228 EXTENT_IOBITS, 0, NULL))
4229 ret = 0;
4230 else {
4231 if ((mask & GFP_NOFS) == GFP_NOFS)
4232 mask = GFP_NOFS;
4233 /*
4234 * at this point we can safely clear everything except the
4235 * locked bit and the nodatasum bit
4236 */
4237 ret = clear_extent_bit(tree, start, end,
4238 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4239 0, 0, NULL, mask);
4240
4241 /* if clear_extent_bit failed for enomem reasons,
4242 * we can't allow the release to continue.
4243 */
4244 if (ret < 0)
4245 ret = 0;
4246 else
4247 ret = 1;
4248 }
4249 return ret;
4250}
4251
4252/*
4253 * a helper for releasepage. As long as there are no locked extents
4254 * in the range corresponding to the page, both state records and extent
4255 * map records are removed
4256 */
4257int try_release_extent_mapping(struct extent_map_tree *map,
4258 struct extent_io_tree *tree, struct page *page,
4259 gfp_t mask)
4260{
4261 struct extent_map *em;
4262 u64 start = page_offset(page);
4263 u64 end = start + PAGE_SIZE - 1;
4264
4265 if (gfpflags_allow_blocking(mask) &&
4266 page->mapping->host->i_size > SZ_16M) {
4267 u64 len;
4268 while (start <= end) {
4269 len = end - start + 1;
4270 write_lock(&map->lock);
4271 em = lookup_extent_mapping(map, start, len);
4272 if (!em) {
4273 write_unlock(&map->lock);
4274 break;
4275 }
4276 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4277 em->start != start) {
4278 write_unlock(&map->lock);
4279 free_extent_map(em);
4280 break;
4281 }
4282 if (!test_range_bit(tree, em->start,
4283 extent_map_end(em) - 1,
4284 EXTENT_LOCKED | EXTENT_WRITEBACK,
4285 0, NULL)) {
4286 remove_extent_mapping(map, em);
4287 /* once for the rb tree */
4288 free_extent_map(em);
4289 }
4290 start = extent_map_end(em);
4291 write_unlock(&map->lock);
4292
4293 /* once for us */
4294 free_extent_map(em);
4295 }
4296 }
4297 return try_release_extent_state(map, tree, page, mask);
4298}
4299
4300/*
4301 * helper function for fiemap, which doesn't want to see any holes.
4302 * This maps until we find something past 'last'
4303 */
4304static struct extent_map *get_extent_skip_holes(struct inode *inode,
4305 u64 offset,
4306 u64 last,
4307 get_extent_t *get_extent)
4308{
4309 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4310 struct extent_map *em;
4311 u64 len;
4312
4313 if (offset >= last)
4314 return NULL;
4315
4316 while (1) {
4317 len = last - offset;
4318 if (len == 0)
4319 break;
4320 len = ALIGN(len, sectorsize);
4321 em = get_extent(inode, NULL, 0, offset, len, 0);
4322 if (IS_ERR_OR_NULL(em))
4323 return em;
4324
4325 /* if this isn't a hole return it */
4326 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4327 em->block_start != EXTENT_MAP_HOLE) {
4328 return em;
4329 }
4330
4331 /* this is a hole, advance to the next extent */
4332 offset = extent_map_end(em);
4333 free_extent_map(em);
4334 if (offset >= last)
4335 break;
4336 }
4337 return NULL;
4338}
4339
4340int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4341 __u64 start, __u64 len, get_extent_t *get_extent)
4342{
4343 int ret = 0;
4344 u64 off = start;
4345 u64 max = start + len;
4346 u32 flags = 0;
4347 u32 found_type;
4348 u64 last;
4349 u64 last_for_get_extent = 0;
4350 u64 disko = 0;
4351 u64 isize = i_size_read(inode);
4352 struct btrfs_key found_key;
4353 struct extent_map *em = NULL;
4354 struct extent_state *cached_state = NULL;
4355 struct btrfs_path *path;
4356 struct btrfs_root *root = BTRFS_I(inode)->root;
4357 int end = 0;
4358 u64 em_start = 0;
4359 u64 em_len = 0;
4360 u64 em_end = 0;
4361
4362 if (len == 0)
4363 return -EINVAL;
4364
4365 path = btrfs_alloc_path();
4366 if (!path)
4367 return -ENOMEM;
4368 path->leave_spinning = 1;
4369
4370 start = round_down(start, BTRFS_I(inode)->root->sectorsize);
4371 len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
4372
4373 /*
4374 * lookup the last file extent. We're not using i_size here
4375 * because there might be preallocation past i_size
4376 */
4377 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4378 0);
4379 if (ret < 0) {
4380 btrfs_free_path(path);
4381 return ret;
4382 }
4383 WARN_ON(!ret);
4384 path->slots[0]--;
4385 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4386 found_type = found_key.type;
4387
4388 /* No extents, but there might be delalloc bits */
4389 if (found_key.objectid != btrfs_ino(inode) ||
4390 found_type != BTRFS_EXTENT_DATA_KEY) {
4391 /* have to trust i_size as the end */
4392 last = (u64)-1;
4393 last_for_get_extent = isize;
4394 } else {
4395 /*
4396 * remember the start of the last extent. There are a
4397 * bunch of different factors that go into the length of the
4398 * extent, so its much less complex to remember where it started
4399 */
4400 last = found_key.offset;
4401 last_for_get_extent = last + 1;
4402 }
4403 btrfs_release_path(path);
4404
4405 /*
4406 * we might have some extents allocated but more delalloc past those
4407 * extents. so, we trust isize unless the start of the last extent is
4408 * beyond isize
4409 */
4410 if (last < isize) {
4411 last = (u64)-1;
4412 last_for_get_extent = isize;
4413 }
4414
4415 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4416 &cached_state);
4417
4418 em = get_extent_skip_holes(inode, start, last_for_get_extent,
4419 get_extent);
4420 if (!em)
4421 goto out;
4422 if (IS_ERR(em)) {
4423 ret = PTR_ERR(em);
4424 goto out;
4425 }
4426
4427 while (!end) {
4428 u64 offset_in_extent = 0;
4429
4430 /* break if the extent we found is outside the range */
4431 if (em->start >= max || extent_map_end(em) < off)
4432 break;
4433
4434 /*
4435 * get_extent may return an extent that starts before our
4436 * requested range. We have to make sure the ranges
4437 * we return to fiemap always move forward and don't
4438 * overlap, so adjust the offsets here
4439 */
4440 em_start = max(em->start, off);
4441
4442 /*
4443 * record the offset from the start of the extent
4444 * for adjusting the disk offset below. Only do this if the
4445 * extent isn't compressed since our in ram offset may be past
4446 * what we have actually allocated on disk.
4447 */
4448 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4449 offset_in_extent = em_start - em->start;
4450 em_end = extent_map_end(em);
4451 em_len = em_end - em_start;
4452 disko = 0;
4453 flags = 0;
4454
4455 /*
4456 * bump off for our next call to get_extent
4457 */
4458 off = extent_map_end(em);
4459 if (off >= max)
4460 end = 1;
4461
4462 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4463 end = 1;
4464 flags |= FIEMAP_EXTENT_LAST;
4465 } else if (em->block_start == EXTENT_MAP_INLINE) {
4466 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4467 FIEMAP_EXTENT_NOT_ALIGNED);
4468 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4469 flags |= (FIEMAP_EXTENT_DELALLOC |
4470 FIEMAP_EXTENT_UNKNOWN);
4471 } else if (fieinfo->fi_extents_max) {
4472 u64 bytenr = em->block_start -
4473 (em->start - em->orig_start);
4474
4475 disko = em->block_start + offset_in_extent;
4476
4477 /*
4478 * As btrfs supports shared space, this information
4479 * can be exported to userspace tools via
4480 * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4481 * then we're just getting a count and we can skip the
4482 * lookup stuff.
4483 */
4484 ret = btrfs_check_shared(NULL, root->fs_info,
4485 root->objectid,
4486 btrfs_ino(inode), bytenr);
4487 if (ret < 0)
4488 goto out_free;
4489 if (ret)
4490 flags |= FIEMAP_EXTENT_SHARED;
4491 ret = 0;
4492 }
4493 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4494 flags |= FIEMAP_EXTENT_ENCODED;
4495 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4496 flags |= FIEMAP_EXTENT_UNWRITTEN;
4497
4498 free_extent_map(em);
4499 em = NULL;
4500 if ((em_start >= last) || em_len == (u64)-1 ||
4501 (last == (u64)-1 && isize <= em_end)) {
4502 flags |= FIEMAP_EXTENT_LAST;
4503 end = 1;
4504 }
4505
4506 /* now scan forward to see if this is really the last extent. */
4507 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4508 get_extent);
4509 if (IS_ERR(em)) {
4510 ret = PTR_ERR(em);
4511 goto out;
4512 }
4513 if (!em) {
4514 flags |= FIEMAP_EXTENT_LAST;
4515 end = 1;
4516 }
4517 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4518 em_len, flags);
4519 if (ret) {
4520 if (ret == 1)
4521 ret = 0;
4522 goto out_free;
4523 }
4524 }
4525out_free:
4526 free_extent_map(em);
4527out:
4528 btrfs_free_path(path);
4529 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4530 &cached_state, GFP_NOFS);
4531 return ret;
4532}
4533
4534static void __free_extent_buffer(struct extent_buffer *eb)
4535{
4536 btrfs_leak_debug_del(&eb->leak_list);
4537 kmem_cache_free(extent_buffer_cache, eb);
4538}
4539
4540int extent_buffer_under_io(struct extent_buffer *eb)
4541{
4542 return (atomic_read(&eb->io_pages) ||
4543 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4544 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4545}
4546
4547/*
4548 * Helper for releasing extent buffer page.
4549 */
4550static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
4551{
4552 unsigned long index;
4553 struct page *page;
4554 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4555
4556 BUG_ON(extent_buffer_under_io(eb));
4557
4558 index = num_extent_pages(eb->start, eb->len);
4559 if (index == 0)
4560 return;
4561
4562 do {
4563 index--;
4564 page = eb->pages[index];
4565 if (!page)
4566 continue;
4567 if (mapped)
4568 spin_lock(&page->mapping->private_lock);
4569 /*
4570 * We do this since we'll remove the pages after we've
4571 * removed the eb from the radix tree, so we could race
4572 * and have this page now attached to the new eb. So
4573 * only clear page_private if it's still connected to
4574 * this eb.
4575 */
4576 if (PagePrivate(page) &&
4577 page->private == (unsigned long)eb) {
4578 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4579 BUG_ON(PageDirty(page));
4580 BUG_ON(PageWriteback(page));
4581 /*
4582 * We need to make sure we haven't be attached
4583 * to a new eb.
4584 */
4585 ClearPagePrivate(page);
4586 set_page_private(page, 0);
4587 /* One for the page private */
4588 put_page(page);
4589 }
4590
4591 if (mapped)
4592 spin_unlock(&page->mapping->private_lock);
4593
4594 /* One for when we alloced the page */
4595 put_page(page);
4596 } while (index != 0);
4597}
4598
4599/*
4600 * Helper for releasing the extent buffer.
4601 */
4602static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4603{
4604 btrfs_release_extent_buffer_page(eb);
4605 __free_extent_buffer(eb);
4606}
4607
4608static struct extent_buffer *
4609__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4610 unsigned long len)
4611{
4612 struct extent_buffer *eb = NULL;
4613
4614 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4615 eb->start = start;
4616 eb->len = len;
4617 eb->fs_info = fs_info;
4618 eb->bflags = 0;
4619 rwlock_init(&eb->lock);
4620 atomic_set(&eb->write_locks, 0);
4621 atomic_set(&eb->read_locks, 0);
4622 atomic_set(&eb->blocking_readers, 0);
4623 atomic_set(&eb->blocking_writers, 0);
4624 atomic_set(&eb->spinning_readers, 0);
4625 atomic_set(&eb->spinning_writers, 0);
4626 eb->lock_nested = 0;
4627 init_waitqueue_head(&eb->write_lock_wq);
4628 init_waitqueue_head(&eb->read_lock_wq);
4629
4630 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4631
4632 spin_lock_init(&eb->refs_lock);
4633 atomic_set(&eb->refs, 1);
4634 atomic_set(&eb->io_pages, 0);
4635
4636 /*
4637 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4638 */
4639 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4640 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4641 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4642
4643 return eb;
4644}
4645
4646struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4647{
4648 unsigned long i;
4649 struct page *p;
4650 struct extent_buffer *new;
4651 unsigned long num_pages = num_extent_pages(src->start, src->len);
4652
4653 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
4654 if (new == NULL)
4655 return NULL;
4656
4657 for (i = 0; i < num_pages; i++) {
4658 p = alloc_page(GFP_NOFS);
4659 if (!p) {
4660 btrfs_release_extent_buffer(new);
4661 return NULL;
4662 }
4663 attach_extent_buffer_page(new, p);
4664 WARN_ON(PageDirty(p));
4665 SetPageUptodate(p);
4666 new->pages[i] = p;
4667 }
4668
4669 copy_extent_buffer(new, src, 0, 0, src->len);
4670 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4671 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4672
4673 return new;
4674}
4675
4676struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4677 u64 start, unsigned long len)
4678{
4679 struct extent_buffer *eb;
4680 unsigned long num_pages;
4681 unsigned long i;
4682
4683 num_pages = num_extent_pages(start, len);
4684
4685 eb = __alloc_extent_buffer(fs_info, start, len);
4686 if (!eb)
4687 return NULL;
4688
4689 for (i = 0; i < num_pages; i++) {
4690 eb->pages[i] = alloc_page(GFP_NOFS);
4691 if (!eb->pages[i])
4692 goto err;
4693 }
4694 set_extent_buffer_uptodate(eb);
4695 btrfs_set_header_nritems(eb, 0);
4696 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4697
4698 return eb;
4699err:
4700 for (; i > 0; i--)
4701 __free_page(eb->pages[i - 1]);
4702 __free_extent_buffer(eb);
4703 return NULL;
4704}
4705
4706struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4707 u64 start)
4708{
4709 unsigned long len;
4710
4711 if (!fs_info) {
4712 /*
4713 * Called only from tests that don't always have a fs_info
4714 * available, but we know that nodesize is 4096
4715 */
4716 len = 4096;
4717 } else {
4718 len = fs_info->tree_root->nodesize;
4719 }
4720
4721 return __alloc_dummy_extent_buffer(fs_info, start, len);
4722}
4723
4724static void check_buffer_tree_ref(struct extent_buffer *eb)
4725{
4726 int refs;
4727 /* the ref bit is tricky. We have to make sure it is set
4728 * if we have the buffer dirty. Otherwise the
4729 * code to free a buffer can end up dropping a dirty
4730 * page
4731 *
4732 * Once the ref bit is set, it won't go away while the
4733 * buffer is dirty or in writeback, and it also won't
4734 * go away while we have the reference count on the
4735 * eb bumped.
4736 *
4737 * We can't just set the ref bit without bumping the
4738 * ref on the eb because free_extent_buffer might
4739 * see the ref bit and try to clear it. If this happens
4740 * free_extent_buffer might end up dropping our original
4741 * ref by mistake and freeing the page before we are able
4742 * to add one more ref.
4743 *
4744 * So bump the ref count first, then set the bit. If someone
4745 * beat us to it, drop the ref we added.
4746 */
4747 refs = atomic_read(&eb->refs);
4748 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4749 return;
4750
4751 spin_lock(&eb->refs_lock);
4752 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4753 atomic_inc(&eb->refs);
4754 spin_unlock(&eb->refs_lock);
4755}
4756
4757static void mark_extent_buffer_accessed(struct extent_buffer *eb,
4758 struct page *accessed)
4759{
4760 unsigned long num_pages, i;
4761
4762 check_buffer_tree_ref(eb);
4763
4764 num_pages = num_extent_pages(eb->start, eb->len);
4765 for (i = 0; i < num_pages; i++) {
4766 struct page *p = eb->pages[i];
4767
4768 if (p != accessed)
4769 mark_page_accessed(p);
4770 }
4771}
4772
4773struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4774 u64 start)
4775{
4776 struct extent_buffer *eb;
4777
4778 rcu_read_lock();
4779 eb = radix_tree_lookup(&fs_info->buffer_radix,
4780 start >> PAGE_SHIFT);
4781 if (eb && atomic_inc_not_zero(&eb->refs)) {
4782 rcu_read_unlock();
4783 /*
4784 * Lock our eb's refs_lock to avoid races with
4785 * free_extent_buffer. When we get our eb it might be flagged
4786 * with EXTENT_BUFFER_STALE and another task running
4787 * free_extent_buffer might have seen that flag set,
4788 * eb->refs == 2, that the buffer isn't under IO (dirty and
4789 * writeback flags not set) and it's still in the tree (flag
4790 * EXTENT_BUFFER_TREE_REF set), therefore being in the process
4791 * of decrementing the extent buffer's reference count twice.
4792 * So here we could race and increment the eb's reference count,
4793 * clear its stale flag, mark it as dirty and drop our reference
4794 * before the other task finishes executing free_extent_buffer,
4795 * which would later result in an attempt to free an extent
4796 * buffer that is dirty.
4797 */
4798 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
4799 spin_lock(&eb->refs_lock);
4800 spin_unlock(&eb->refs_lock);
4801 }
4802 mark_extent_buffer_accessed(eb, NULL);
4803 return eb;
4804 }
4805 rcu_read_unlock();
4806
4807 return NULL;
4808}
4809
4810#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4811struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4812 u64 start)
4813{
4814 struct extent_buffer *eb, *exists = NULL;
4815 int ret;
4816
4817 eb = find_extent_buffer(fs_info, start);
4818 if (eb)
4819 return eb;
4820 eb = alloc_dummy_extent_buffer(fs_info, start);
4821 if (!eb)
4822 return NULL;
4823 eb->fs_info = fs_info;
4824again:
4825 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4826 if (ret)
4827 goto free_eb;
4828 spin_lock(&fs_info->buffer_lock);
4829 ret = radix_tree_insert(&fs_info->buffer_radix,
4830 start >> PAGE_SHIFT, eb);
4831 spin_unlock(&fs_info->buffer_lock);
4832 radix_tree_preload_end();
4833 if (ret == -EEXIST) {
4834 exists = find_extent_buffer(fs_info, start);
4835 if (exists)
4836 goto free_eb;
4837 else
4838 goto again;
4839 }
4840 check_buffer_tree_ref(eb);
4841 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4842
4843 /*
4844 * We will free dummy extent buffer's if they come into
4845 * free_extent_buffer with a ref count of 2, but if we are using this we
4846 * want the buffers to stay in memory until we're done with them, so
4847 * bump the ref count again.
4848 */
4849 atomic_inc(&eb->refs);
4850 return eb;
4851free_eb:
4852 btrfs_release_extent_buffer(eb);
4853 return exists;
4854}
4855#endif
4856
4857struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4858 u64 start)
4859{
4860 unsigned long len = fs_info->tree_root->nodesize;
4861 unsigned long num_pages = num_extent_pages(start, len);
4862 unsigned long i;
4863 unsigned long index = start >> PAGE_SHIFT;
4864 struct extent_buffer *eb;
4865 struct extent_buffer *exists = NULL;
4866 struct page *p;
4867 struct address_space *mapping = fs_info->btree_inode->i_mapping;
4868 int uptodate = 1;
4869 int ret;
4870
4871 eb = find_extent_buffer(fs_info, start);
4872 if (eb)
4873 return eb;
4874
4875 eb = __alloc_extent_buffer(fs_info, start, len);
4876 if (!eb)
4877 return NULL;
4878
4879 for (i = 0; i < num_pages; i++, index++) {
4880 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4881 if (!p)
4882 goto free_eb;
4883
4884 spin_lock(&mapping->private_lock);
4885 if (PagePrivate(p)) {
4886 /*
4887 * We could have already allocated an eb for this page
4888 * and attached one so lets see if we can get a ref on
4889 * the existing eb, and if we can we know it's good and
4890 * we can just return that one, else we know we can just
4891 * overwrite page->private.
4892 */
4893 exists = (struct extent_buffer *)p->private;
4894 if (atomic_inc_not_zero(&exists->refs)) {
4895 spin_unlock(&mapping->private_lock);
4896 unlock_page(p);
4897 put_page(p);
4898 mark_extent_buffer_accessed(exists, p);
4899 goto free_eb;
4900 }
4901 exists = NULL;
4902
4903 /*
4904 * Do this so attach doesn't complain and we need to
4905 * drop the ref the old guy had.
4906 */
4907 ClearPagePrivate(p);
4908 WARN_ON(PageDirty(p));
4909 put_page(p);
4910 }
4911 attach_extent_buffer_page(eb, p);
4912 spin_unlock(&mapping->private_lock);
4913 WARN_ON(PageDirty(p));
4914 eb->pages[i] = p;
4915 if (!PageUptodate(p))
4916 uptodate = 0;
4917
4918 /*
4919 * see below about how we avoid a nasty race with release page
4920 * and why we unlock later
4921 */
4922 }
4923 if (uptodate)
4924 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4925again:
4926 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4927 if (ret)
4928 goto free_eb;
4929
4930 spin_lock(&fs_info->buffer_lock);
4931 ret = radix_tree_insert(&fs_info->buffer_radix,
4932 start >> PAGE_SHIFT, eb);
4933 spin_unlock(&fs_info->buffer_lock);
4934 radix_tree_preload_end();
4935 if (ret == -EEXIST) {
4936 exists = find_extent_buffer(fs_info, start);
4937 if (exists)
4938 goto free_eb;
4939 else
4940 goto again;
4941 }
4942 /* add one reference for the tree */
4943 check_buffer_tree_ref(eb);
4944 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4945
4946 /*
4947 * there is a race where release page may have
4948 * tried to find this extent buffer in the radix
4949 * but failed. It will tell the VM it is safe to
4950 * reclaim the, and it will clear the page private bit.
4951 * We must make sure to set the page private bit properly
4952 * after the extent buffer is in the radix tree so
4953 * it doesn't get lost
4954 */
4955 SetPageChecked(eb->pages[0]);
4956 for (i = 1; i < num_pages; i++) {
4957 p = eb->pages[i];
4958 ClearPageChecked(p);
4959 unlock_page(p);
4960 }
4961 unlock_page(eb->pages[0]);
4962 return eb;
4963
4964free_eb:
4965 WARN_ON(!atomic_dec_and_test(&eb->refs));
4966 for (i = 0; i < num_pages; i++) {
4967 if (eb->pages[i])
4968 unlock_page(eb->pages[i]);
4969 }
4970
4971 btrfs_release_extent_buffer(eb);
4972 return exists;
4973}
4974
4975static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4976{
4977 struct extent_buffer *eb =
4978 container_of(head, struct extent_buffer, rcu_head);
4979
4980 __free_extent_buffer(eb);
4981}
4982
4983/* Expects to have eb->eb_lock already held */
4984static int release_extent_buffer(struct extent_buffer *eb)
4985{
4986 WARN_ON(atomic_read(&eb->refs) == 0);
4987 if (atomic_dec_and_test(&eb->refs)) {
4988 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4989 struct btrfs_fs_info *fs_info = eb->fs_info;
4990
4991 spin_unlock(&eb->refs_lock);
4992
4993 spin_lock(&fs_info->buffer_lock);
4994 radix_tree_delete(&fs_info->buffer_radix,
4995 eb->start >> PAGE_SHIFT);
4996 spin_unlock(&fs_info->buffer_lock);
4997 } else {
4998 spin_unlock(&eb->refs_lock);
4999 }
5000
5001 /* Should be safe to release our pages at this point */
5002 btrfs_release_extent_buffer_page(eb);
5003#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5004 if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) {
5005 __free_extent_buffer(eb);
5006 return 1;
5007 }
5008#endif
5009 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5010 return 1;
5011 }
5012 spin_unlock(&eb->refs_lock);
5013
5014 return 0;
5015}
5016
5017void free_extent_buffer(struct extent_buffer *eb)
5018{
5019 int refs;
5020 int old;
5021 if (!eb)
5022 return;
5023
5024 while (1) {
5025 refs = atomic_read(&eb->refs);
5026 if (refs <= 3)
5027 break;
5028 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5029 if (old == refs)
5030 return;
5031 }
5032
5033 spin_lock(&eb->refs_lock);
5034 if (atomic_read(&eb->refs) == 2 &&
5035 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
5036 atomic_dec(&eb->refs);
5037
5038 if (atomic_read(&eb->refs) == 2 &&
5039 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5040 !extent_buffer_under_io(eb) &&
5041 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5042 atomic_dec(&eb->refs);
5043
5044 /*
5045 * I know this is terrible, but it's temporary until we stop tracking
5046 * the uptodate bits and such for the extent buffers.
5047 */
5048 release_extent_buffer(eb);
5049}
5050
5051void free_extent_buffer_stale(struct extent_buffer *eb)
5052{
5053 if (!eb)
5054 return;
5055
5056 spin_lock(&eb->refs_lock);
5057 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5058
5059 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5060 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5061 atomic_dec(&eb->refs);
5062 release_extent_buffer(eb);
5063}
5064
5065void clear_extent_buffer_dirty(struct extent_buffer *eb)
5066{
5067 unsigned long i;
5068 unsigned long num_pages;
5069 struct page *page;
5070
5071 num_pages = num_extent_pages(eb->start, eb->len);
5072
5073 for (i = 0; i < num_pages; i++) {
5074 page = eb->pages[i];
5075 if (!PageDirty(page))
5076 continue;
5077
5078 lock_page(page);
5079 WARN_ON(!PagePrivate(page));
5080
5081 clear_page_dirty_for_io(page);
5082 spin_lock_irq(&page->mapping->tree_lock);
5083 if (!PageDirty(page)) {
5084 radix_tree_tag_clear(&page->mapping->page_tree,
5085 page_index(page),
5086 PAGECACHE_TAG_DIRTY);
5087 }
5088 spin_unlock_irq(&page->mapping->tree_lock);
5089 ClearPageError(page);
5090 unlock_page(page);
5091 }
5092 WARN_ON(atomic_read(&eb->refs) == 0);
5093}
5094
5095int set_extent_buffer_dirty(struct extent_buffer *eb)
5096{
5097 unsigned long i;
5098 unsigned long num_pages;
5099 int was_dirty = 0;
5100
5101 check_buffer_tree_ref(eb);
5102
5103 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5104
5105 num_pages = num_extent_pages(eb->start, eb->len);
5106 WARN_ON(atomic_read(&eb->refs) == 0);
5107 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5108
5109 for (i = 0; i < num_pages; i++)
5110 set_page_dirty(eb->pages[i]);
5111 return was_dirty;
5112}
5113
5114void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5115{
5116 unsigned long i;
5117 struct page *page;
5118 unsigned long num_pages;
5119
5120 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5121 num_pages = num_extent_pages(eb->start, eb->len);
5122 for (i = 0; i < num_pages; i++) {
5123 page = eb->pages[i];
5124 if (page)
5125 ClearPageUptodate(page);
5126 }
5127}
5128
5129void set_extent_buffer_uptodate(struct extent_buffer *eb)
5130{
5131 unsigned long i;
5132 struct page *page;
5133 unsigned long num_pages;
5134
5135 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5136 num_pages = num_extent_pages(eb->start, eb->len);
5137 for (i = 0; i < num_pages; i++) {
5138 page = eb->pages[i];
5139 SetPageUptodate(page);
5140 }
5141}
5142
5143int extent_buffer_uptodate(struct extent_buffer *eb)
5144{
5145 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5146}
5147
5148int read_extent_buffer_pages(struct extent_io_tree *tree,
5149 struct extent_buffer *eb, u64 start, int wait,
5150 get_extent_t *get_extent, int mirror_num)
5151{
5152 unsigned long i;
5153 unsigned long start_i;
5154 struct page *page;
5155 int err;
5156 int ret = 0;
5157 int locked_pages = 0;
5158 int all_uptodate = 1;
5159 unsigned long num_pages;
5160 unsigned long num_reads = 0;
5161 struct bio *bio = NULL;
5162 unsigned long bio_flags = 0;
5163
5164 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5165 return 0;
5166
5167 if (start) {
5168 WARN_ON(start < eb->start);
5169 start_i = (start >> PAGE_SHIFT) -
5170 (eb->start >> PAGE_SHIFT);
5171 } else {
5172 start_i = 0;
5173 }
5174
5175 num_pages = num_extent_pages(eb->start, eb->len);
5176 for (i = start_i; i < num_pages; i++) {
5177 page = eb->pages[i];
5178 if (wait == WAIT_NONE) {
5179 if (!trylock_page(page))
5180 goto unlock_exit;
5181 } else {
5182 lock_page(page);
5183 }
5184 locked_pages++;
5185 if (!PageUptodate(page)) {
5186 num_reads++;
5187 all_uptodate = 0;
5188 }
5189 }
5190 if (all_uptodate) {
5191 if (start_i == 0)
5192 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5193 goto unlock_exit;
5194 }
5195
5196 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5197 eb->read_mirror = 0;
5198 atomic_set(&eb->io_pages, num_reads);
5199 for (i = start_i; i < num_pages; i++) {
5200 page = eb->pages[i];
5201 if (!PageUptodate(page)) {
5202 ClearPageError(page);
5203 err = __extent_read_full_page(tree, page,
5204 get_extent, &bio,
5205 mirror_num, &bio_flags,
5206 READ | REQ_META);
5207 if (err)
5208 ret = err;
5209 } else {
5210 unlock_page(page);
5211 }
5212 }
5213
5214 if (bio) {
5215 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
5216 bio_flags);
5217 if (err)
5218 return err;
5219 }
5220
5221 if (ret || wait != WAIT_COMPLETE)
5222 return ret;
5223
5224 for (i = start_i; i < num_pages; i++) {
5225 page = eb->pages[i];
5226 wait_on_page_locked(page);
5227 if (!PageUptodate(page))
5228 ret = -EIO;
5229 }
5230
5231 return ret;
5232
5233unlock_exit:
5234 i = start_i;
5235 while (locked_pages > 0) {
5236 page = eb->pages[i];
5237 i++;
5238 unlock_page(page);
5239 locked_pages--;
5240 }
5241 return ret;
5242}
5243
5244void read_extent_buffer(struct extent_buffer *eb, void *dstv,
5245 unsigned long start,
5246 unsigned long len)
5247{
5248 size_t cur;
5249 size_t offset;
5250 struct page *page;
5251 char *kaddr;
5252 char *dst = (char *)dstv;
5253 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5254 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5255
5256 WARN_ON(start > eb->len);
5257 WARN_ON(start + len > eb->start + eb->len);
5258
5259 offset = (start_offset + start) & (PAGE_SIZE - 1);
5260
5261 while (len > 0) {
5262 page = eb->pages[i];
5263
5264 cur = min(len, (PAGE_SIZE - offset));
5265 kaddr = page_address(page);
5266 memcpy(dst, kaddr + offset, cur);
5267
5268 dst += cur;
5269 len -= cur;
5270 offset = 0;
5271 i++;
5272 }
5273}
5274
5275int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5276 unsigned long start,
5277 unsigned long len)
5278{
5279 size_t cur;
5280 size_t offset;
5281 struct page *page;
5282 char *kaddr;
5283 char __user *dst = (char __user *)dstv;
5284 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5285 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5286 int ret = 0;
5287
5288 WARN_ON(start > eb->len);
5289 WARN_ON(start + len > eb->start + eb->len);
5290
5291 offset = (start_offset + start) & (PAGE_SIZE - 1);
5292
5293 while (len > 0) {
5294 page = eb->pages[i];
5295
5296 cur = min(len, (PAGE_SIZE - offset));
5297 kaddr = page_address(page);
5298 if (copy_to_user(dst, kaddr + offset, cur)) {
5299 ret = -EFAULT;
5300 break;
5301 }
5302
5303 dst += cur;
5304 len -= cur;
5305 offset = 0;
5306 i++;
5307 }
5308
5309 return ret;
5310}
5311
5312int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5313 unsigned long min_len, char **map,
5314 unsigned long *map_start,
5315 unsigned long *map_len)
5316{
5317 size_t offset = start & (PAGE_SIZE - 1);
5318 char *kaddr;
5319 struct page *p;
5320 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5321 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5322 unsigned long end_i = (start_offset + start + min_len - 1) >>
5323 PAGE_SHIFT;
5324
5325 if (i != end_i)
5326 return -EINVAL;
5327
5328 if (i == 0) {
5329 offset = start_offset;
5330 *map_start = 0;
5331 } else {
5332 offset = 0;
5333 *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
5334 }
5335
5336 if (start + min_len > eb->len) {
5337 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
5338 "wanted %lu %lu\n",
5339 eb->start, eb->len, start, min_len);
5340 return -EINVAL;
5341 }
5342
5343 p = eb->pages[i];
5344 kaddr = page_address(p);
5345 *map = kaddr + offset;
5346 *map_len = PAGE_SIZE - offset;
5347 return 0;
5348}
5349
5350int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
5351 unsigned long start,
5352 unsigned long len)
5353{
5354 size_t cur;
5355 size_t offset;
5356 struct page *page;
5357 char *kaddr;
5358 char *ptr = (char *)ptrv;
5359 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5360 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5361 int ret = 0;
5362
5363 WARN_ON(start > eb->len);
5364 WARN_ON(start + len > eb->start + eb->len);
5365
5366 offset = (start_offset + start) & (PAGE_SIZE - 1);
5367
5368 while (len > 0) {
5369 page = eb->pages[i];
5370
5371 cur = min(len, (PAGE_SIZE - offset));
5372
5373 kaddr = page_address(page);
5374 ret = memcmp(ptr, kaddr + offset, cur);
5375 if (ret)
5376 break;
5377
5378 ptr += cur;
5379 len -= cur;
5380 offset = 0;
5381 i++;
5382 }
5383 return ret;
5384}
5385
5386void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5387 unsigned long start, unsigned long len)
5388{
5389 size_t cur;
5390 size_t offset;
5391 struct page *page;
5392 char *kaddr;
5393 char *src = (char *)srcv;
5394 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5395 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5396
5397 WARN_ON(start > eb->len);
5398 WARN_ON(start + len > eb->start + eb->len);
5399
5400 offset = (start_offset + start) & (PAGE_SIZE - 1);
5401
5402 while (len > 0) {
5403 page = eb->pages[i];
5404 WARN_ON(!PageUptodate(page));
5405
5406 cur = min(len, PAGE_SIZE - offset);
5407 kaddr = page_address(page);
5408 memcpy(kaddr + offset, src, cur);
5409
5410 src += cur;
5411 len -= cur;
5412 offset = 0;
5413 i++;
5414 }
5415}
5416
5417void memset_extent_buffer(struct extent_buffer *eb, char c,
5418 unsigned long start, unsigned long len)
5419{
5420 size_t cur;
5421 size_t offset;
5422 struct page *page;
5423 char *kaddr;
5424 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5425 unsigned long i = (start_offset + start) >> PAGE_SHIFT;
5426
5427 WARN_ON(start > eb->len);
5428 WARN_ON(start + len > eb->start + eb->len);
5429
5430 offset = (start_offset + start) & (PAGE_SIZE - 1);
5431
5432 while (len > 0) {
5433 page = eb->pages[i];
5434 WARN_ON(!PageUptodate(page));
5435
5436 cur = min(len, PAGE_SIZE - offset);
5437 kaddr = page_address(page);
5438 memset(kaddr + offset, c, cur);
5439
5440 len -= cur;
5441 offset = 0;
5442 i++;
5443 }
5444}
5445
5446void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5447 unsigned long dst_offset, unsigned long src_offset,
5448 unsigned long len)
5449{
5450 u64 dst_len = dst->len;
5451 size_t cur;
5452 size_t offset;
5453 struct page *page;
5454 char *kaddr;
5455 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5456 unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
5457
5458 WARN_ON(src->len != dst_len);
5459
5460 offset = (start_offset + dst_offset) &
5461 (PAGE_SIZE - 1);
5462
5463 while (len > 0) {
5464 page = dst->pages[i];
5465 WARN_ON(!PageUptodate(page));
5466
5467 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5468
5469 kaddr = page_address(page);
5470 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5471
5472 src_offset += cur;
5473 len -= cur;
5474 offset = 0;
5475 i++;
5476 }
5477}
5478
5479/*
5480 * The extent buffer bitmap operations are done with byte granularity because
5481 * bitmap items are not guaranteed to be aligned to a word and therefore a
5482 * single word in a bitmap may straddle two pages in the extent buffer.
5483 */
5484#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
5485#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
5486#define BITMAP_FIRST_BYTE_MASK(start) \
5487 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
5488#define BITMAP_LAST_BYTE_MASK(nbits) \
5489 (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
5490
5491/*
5492 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5493 * given bit number
5494 * @eb: the extent buffer
5495 * @start: offset of the bitmap item in the extent buffer
5496 * @nr: bit number
5497 * @page_index: return index of the page in the extent buffer that contains the
5498 * given bit number
5499 * @page_offset: return offset into the page given by page_index
5500 *
5501 * This helper hides the ugliness of finding the byte in an extent buffer which
5502 * contains a given bit.
5503 */
5504static inline void eb_bitmap_offset(struct extent_buffer *eb,
5505 unsigned long start, unsigned long nr,
5506 unsigned long *page_index,
5507 size_t *page_offset)
5508{
5509 size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
5510 size_t byte_offset = BIT_BYTE(nr);
5511 size_t offset;
5512
5513 /*
5514 * The byte we want is the offset of the extent buffer + the offset of
5515 * the bitmap item in the extent buffer + the offset of the byte in the
5516 * bitmap item.
5517 */
5518 offset = start_offset + start + byte_offset;
5519
5520 *page_index = offset >> PAGE_SHIFT;
5521 *page_offset = offset & (PAGE_SIZE - 1);
5522}
5523
5524/**
5525 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5526 * @eb: the extent buffer
5527 * @start: offset of the bitmap item in the extent buffer
5528 * @nr: bit number to test
5529 */
5530int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
5531 unsigned long nr)
5532{
5533 char *kaddr;
5534 struct page *page;
5535 unsigned long i;
5536 size_t offset;
5537
5538 eb_bitmap_offset(eb, start, nr, &i, &offset);
5539 page = eb->pages[i];
5540 WARN_ON(!PageUptodate(page));
5541 kaddr = page_address(page);
5542 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5543}
5544
5545/**
5546 * extent_buffer_bitmap_set - set an area of a bitmap
5547 * @eb: the extent buffer
5548 * @start: offset of the bitmap item in the extent buffer
5549 * @pos: bit number of the first bit
5550 * @len: number of bits to set
5551 */
5552void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
5553 unsigned long pos, unsigned long len)
5554{
5555 char *kaddr;
5556 struct page *page;
5557 unsigned long i;
5558 size_t offset;
5559 const unsigned int size = pos + len;
5560 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5561 unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5562
5563 eb_bitmap_offset(eb, start, pos, &i, &offset);
5564 page = eb->pages[i];
5565 WARN_ON(!PageUptodate(page));
5566 kaddr = page_address(page);
5567
5568 while (len >= bits_to_set) {
5569 kaddr[offset] |= mask_to_set;
5570 len -= bits_to_set;
5571 bits_to_set = BITS_PER_BYTE;
5572 mask_to_set = ~0U;
5573 if (++offset >= PAGE_SIZE && len > 0) {
5574 offset = 0;
5575 page = eb->pages[++i];
5576 WARN_ON(!PageUptodate(page));
5577 kaddr = page_address(page);
5578 }
5579 }
5580 if (len) {
5581 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5582 kaddr[offset] |= mask_to_set;
5583 }
5584}
5585
5586
5587/**
5588 * extent_buffer_bitmap_clear - clear an area of a bitmap
5589 * @eb: the extent buffer
5590 * @start: offset of the bitmap item in the extent buffer
5591 * @pos: bit number of the first bit
5592 * @len: number of bits to clear
5593 */
5594void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
5595 unsigned long pos, unsigned long len)
5596{
5597 char *kaddr;
5598 struct page *page;
5599 unsigned long i;
5600 size_t offset;
5601 const unsigned int size = pos + len;
5602 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5603 unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5604
5605 eb_bitmap_offset(eb, start, pos, &i, &offset);
5606 page = eb->pages[i];
5607 WARN_ON(!PageUptodate(page));
5608 kaddr = page_address(page);
5609
5610 while (len >= bits_to_clear) {
5611 kaddr[offset] &= ~mask_to_clear;
5612 len -= bits_to_clear;
5613 bits_to_clear = BITS_PER_BYTE;
5614 mask_to_clear = ~0U;
5615 if (++offset >= PAGE_SIZE && len > 0) {
5616 offset = 0;
5617 page = eb->pages[++i];
5618 WARN_ON(!PageUptodate(page));
5619 kaddr = page_address(page);
5620 }
5621 }
5622 if (len) {
5623 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5624 kaddr[offset] &= ~mask_to_clear;
5625 }
5626}
5627
5628static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5629{
5630 unsigned long distance = (src > dst) ? src - dst : dst - src;
5631 return distance < len;
5632}
5633
5634static void copy_pages(struct page *dst_page, struct page *src_page,
5635 unsigned long dst_off, unsigned long src_off,
5636 unsigned long len)
5637{
5638 char *dst_kaddr = page_address(dst_page);
5639 char *src_kaddr;
5640 int must_memmove = 0;
5641
5642 if (dst_page != src_page) {
5643 src_kaddr = page_address(src_page);
5644 } else {
5645 src_kaddr = dst_kaddr;
5646 if (areas_overlap(src_off, dst_off, len))
5647 must_memmove = 1;
5648 }
5649
5650 if (must_memmove)
5651 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5652 else
5653 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5654}
5655
5656void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5657 unsigned long src_offset, unsigned long len)
5658{
5659 size_t cur;
5660 size_t dst_off_in_page;
5661 size_t src_off_in_page;
5662 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5663 unsigned long dst_i;
5664 unsigned long src_i;
5665
5666 if (src_offset + len > dst->len) {
5667 btrfs_err(dst->fs_info,
5668 "memmove bogus src_offset %lu move "
5669 "len %lu dst len %lu", src_offset, len, dst->len);
5670 BUG_ON(1);
5671 }
5672 if (dst_offset + len > dst->len) {
5673 btrfs_err(dst->fs_info,
5674 "memmove bogus dst_offset %lu move "
5675 "len %lu dst len %lu", dst_offset, len, dst->len);
5676 BUG_ON(1);
5677 }
5678
5679 while (len > 0) {
5680 dst_off_in_page = (start_offset + dst_offset) &
5681 (PAGE_SIZE - 1);
5682 src_off_in_page = (start_offset + src_offset) &
5683 (PAGE_SIZE - 1);
5684
5685 dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
5686 src_i = (start_offset + src_offset) >> PAGE_SHIFT;
5687
5688 cur = min(len, (unsigned long)(PAGE_SIZE -
5689 src_off_in_page));
5690 cur = min_t(unsigned long, cur,
5691 (unsigned long)(PAGE_SIZE - dst_off_in_page));
5692
5693 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5694 dst_off_in_page, src_off_in_page, cur);
5695
5696 src_offset += cur;
5697 dst_offset += cur;
5698 len -= cur;
5699 }
5700}
5701
5702void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5703 unsigned long src_offset, unsigned long len)
5704{
5705 size_t cur;
5706 size_t dst_off_in_page;
5707 size_t src_off_in_page;
5708 unsigned long dst_end = dst_offset + len - 1;
5709 unsigned long src_end = src_offset + len - 1;
5710 size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
5711 unsigned long dst_i;
5712 unsigned long src_i;
5713
5714 if (src_offset + len > dst->len) {
5715 btrfs_err(dst->fs_info, "memmove bogus src_offset %lu move "
5716 "len %lu len %lu", src_offset, len, dst->len);
5717 BUG_ON(1);
5718 }
5719 if (dst_offset + len > dst->len) {
5720 btrfs_err(dst->fs_info, "memmove bogus dst_offset %lu move "
5721 "len %lu len %lu", dst_offset, len, dst->len);
5722 BUG_ON(1);
5723 }
5724 if (dst_offset < src_offset) {
5725 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5726 return;
5727 }
5728 while (len > 0) {
5729 dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
5730 src_i = (start_offset + src_end) >> PAGE_SHIFT;
5731
5732 dst_off_in_page = (start_offset + dst_end) &
5733 (PAGE_SIZE - 1);
5734 src_off_in_page = (start_offset + src_end) &
5735 (PAGE_SIZE - 1);
5736
5737 cur = min_t(unsigned long, len, src_off_in_page + 1);
5738 cur = min(cur, dst_off_in_page + 1);
5739 copy_pages(dst->pages[dst_i], dst->pages[src_i],
5740 dst_off_in_page - cur + 1,
5741 src_off_in_page - cur + 1, cur);
5742
5743 dst_end -= cur;
5744 src_end -= cur;
5745 len -= cur;
5746 }
5747}
5748
5749int try_release_extent_buffer(struct page *page)
5750{
5751 struct extent_buffer *eb;
5752
5753 /*
5754 * We need to make sure noboody is attaching this page to an eb right
5755 * now.
5756 */
5757 spin_lock(&page->mapping->private_lock);
5758 if (!PagePrivate(page)) {
5759 spin_unlock(&page->mapping->private_lock);
5760 return 1;
5761 }
5762
5763 eb = (struct extent_buffer *)page->private;
5764 BUG_ON(!eb);
5765
5766 /*
5767 * This is a little awful but should be ok, we need to make sure that
5768 * the eb doesn't disappear out from under us while we're looking at
5769 * this page.
5770 */
5771 spin_lock(&eb->refs_lock);
5772 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5773 spin_unlock(&eb->refs_lock);
5774 spin_unlock(&page->mapping->private_lock);
5775 return 0;
5776 }
5777 spin_unlock(&page->mapping->private_lock);
5778
5779 /*
5780 * If tree ref isn't set then we know the ref on this eb is a real ref,
5781 * so just return, this page will likely be freed soon anyway.
5782 */
5783 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5784 spin_unlock(&eb->refs_lock);
5785 return 0;
5786 }
5787
5788 return release_extent_buffer(eb);
5789}