Loading...
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/slab.h>
20#include <linux/blkdev.h>
21#include <linux/writeback.h>
22#include <linux/pagevec.h>
23#include "ctree.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "extent_io.h"
27
28static u64 entry_end(struct btrfs_ordered_extent *entry)
29{
30 if (entry->file_offset + entry->len < entry->file_offset)
31 return (u64)-1;
32 return entry->file_offset + entry->len;
33}
34
35/* returns NULL if the insertion worked, or it returns the node it did find
36 * in the tree
37 */
38static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
39 struct rb_node *node)
40{
41 struct rb_node **p = &root->rb_node;
42 struct rb_node *parent = NULL;
43 struct btrfs_ordered_extent *entry;
44
45 while (*p) {
46 parent = *p;
47 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
48
49 if (file_offset < entry->file_offset)
50 p = &(*p)->rb_left;
51 else if (file_offset >= entry_end(entry))
52 p = &(*p)->rb_right;
53 else
54 return parent;
55 }
56
57 rb_link_node(node, parent, p);
58 rb_insert_color(node, root);
59 return NULL;
60}
61
62/*
63 * look for a given offset in the tree, and if it can't be found return the
64 * first lesser offset
65 */
66static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
67 struct rb_node **prev_ret)
68{
69 struct rb_node *n = root->rb_node;
70 struct rb_node *prev = NULL;
71 struct rb_node *test;
72 struct btrfs_ordered_extent *entry;
73 struct btrfs_ordered_extent *prev_entry = NULL;
74
75 while (n) {
76 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
77 prev = n;
78 prev_entry = entry;
79
80 if (file_offset < entry->file_offset)
81 n = n->rb_left;
82 else if (file_offset >= entry_end(entry))
83 n = n->rb_right;
84 else
85 return n;
86 }
87 if (!prev_ret)
88 return NULL;
89
90 while (prev && file_offset >= entry_end(prev_entry)) {
91 test = rb_next(prev);
92 if (!test)
93 break;
94 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
95 rb_node);
96 if (file_offset < entry_end(prev_entry))
97 break;
98
99 prev = test;
100 }
101 if (prev)
102 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
103 rb_node);
104 while (prev && file_offset < entry_end(prev_entry)) {
105 test = rb_prev(prev);
106 if (!test)
107 break;
108 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
109 rb_node);
110 prev = test;
111 }
112 *prev_ret = prev;
113 return NULL;
114}
115
116/*
117 * helper to check if a given offset is inside a given entry
118 */
119static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
120{
121 if (file_offset < entry->file_offset ||
122 entry->file_offset + entry->len <= file_offset)
123 return 0;
124 return 1;
125}
126
127static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
128 u64 len)
129{
130 if (file_offset + len <= entry->file_offset ||
131 entry->file_offset + entry->len <= file_offset)
132 return 0;
133 return 1;
134}
135
136/*
137 * look find the first ordered struct that has this offset, otherwise
138 * the first one less than this offset
139 */
140static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
141 u64 file_offset)
142{
143 struct rb_root *root = &tree->tree;
144 struct rb_node *prev = NULL;
145 struct rb_node *ret;
146 struct btrfs_ordered_extent *entry;
147
148 if (tree->last) {
149 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
150 rb_node);
151 if (offset_in_entry(entry, file_offset))
152 return tree->last;
153 }
154 ret = __tree_search(root, file_offset, &prev);
155 if (!ret)
156 ret = prev;
157 if (ret)
158 tree->last = ret;
159 return ret;
160}
161
162/* allocate and add a new ordered_extent into the per-inode tree.
163 * file_offset is the logical offset in the file
164 *
165 * start is the disk block number of an extent already reserved in the
166 * extent allocation tree
167 *
168 * len is the length of the extent
169 *
170 * The tree is given a single reference on the ordered extent that was
171 * inserted.
172 */
173static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
174 u64 start, u64 len, u64 disk_len,
175 int type, int dio, int compress_type)
176{
177 struct btrfs_ordered_inode_tree *tree;
178 struct rb_node *node;
179 struct btrfs_ordered_extent *entry;
180
181 tree = &BTRFS_I(inode)->ordered_tree;
182 entry = kzalloc(sizeof(*entry), GFP_NOFS);
183 if (!entry)
184 return -ENOMEM;
185
186 entry->file_offset = file_offset;
187 entry->start = start;
188 entry->len = len;
189 entry->disk_len = disk_len;
190 entry->bytes_left = len;
191 entry->inode = inode;
192 entry->compress_type = compress_type;
193 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
194 set_bit(type, &entry->flags);
195
196 if (dio)
197 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
198
199 /* one ref for the tree */
200 atomic_set(&entry->refs, 1);
201 init_waitqueue_head(&entry->wait);
202 INIT_LIST_HEAD(&entry->list);
203 INIT_LIST_HEAD(&entry->root_extent_list);
204
205 trace_btrfs_ordered_extent_add(inode, entry);
206
207 spin_lock(&tree->lock);
208 node = tree_insert(&tree->tree, file_offset,
209 &entry->rb_node);
210 BUG_ON(node);
211 spin_unlock(&tree->lock);
212
213 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
214 list_add_tail(&entry->root_extent_list,
215 &BTRFS_I(inode)->root->fs_info->ordered_extents);
216 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
217
218 BUG_ON(node);
219 return 0;
220}
221
222int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
223 u64 start, u64 len, u64 disk_len, int type)
224{
225 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
226 disk_len, type, 0,
227 BTRFS_COMPRESS_NONE);
228}
229
230int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
231 u64 start, u64 len, u64 disk_len, int type)
232{
233 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
234 disk_len, type, 1,
235 BTRFS_COMPRESS_NONE);
236}
237
238int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
239 u64 start, u64 len, u64 disk_len,
240 int type, int compress_type)
241{
242 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
243 disk_len, type, 0,
244 compress_type);
245}
246
247/*
248 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
249 * when an ordered extent is finished. If the list covers more than one
250 * ordered extent, it is split across multiples.
251 */
252int btrfs_add_ordered_sum(struct inode *inode,
253 struct btrfs_ordered_extent *entry,
254 struct btrfs_ordered_sum *sum)
255{
256 struct btrfs_ordered_inode_tree *tree;
257
258 tree = &BTRFS_I(inode)->ordered_tree;
259 spin_lock(&tree->lock);
260 list_add_tail(&sum->list, &entry->list);
261 spin_unlock(&tree->lock);
262 return 0;
263}
264
265/*
266 * this is used to account for finished IO across a given range
267 * of the file. The IO may span ordered extents. If
268 * a given ordered_extent is completely done, 1 is returned, otherwise
269 * 0.
270 *
271 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
272 * to make sure this function only returns 1 once for a given ordered extent.
273 *
274 * file_offset is updated to one byte past the range that is recorded as
275 * complete. This allows you to walk forward in the file.
276 */
277int btrfs_dec_test_first_ordered_pending(struct inode *inode,
278 struct btrfs_ordered_extent **cached,
279 u64 *file_offset, u64 io_size)
280{
281 struct btrfs_ordered_inode_tree *tree;
282 struct rb_node *node;
283 struct btrfs_ordered_extent *entry = NULL;
284 int ret;
285 u64 dec_end;
286 u64 dec_start;
287 u64 to_dec;
288
289 tree = &BTRFS_I(inode)->ordered_tree;
290 spin_lock(&tree->lock);
291 node = tree_search(tree, *file_offset);
292 if (!node) {
293 ret = 1;
294 goto out;
295 }
296
297 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
298 if (!offset_in_entry(entry, *file_offset)) {
299 ret = 1;
300 goto out;
301 }
302
303 dec_start = max(*file_offset, entry->file_offset);
304 dec_end = min(*file_offset + io_size, entry->file_offset +
305 entry->len);
306 *file_offset = dec_end;
307 if (dec_start > dec_end) {
308 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
309 (unsigned long long)dec_start,
310 (unsigned long long)dec_end);
311 }
312 to_dec = dec_end - dec_start;
313 if (to_dec > entry->bytes_left) {
314 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
315 (unsigned long long)entry->bytes_left,
316 (unsigned long long)to_dec);
317 }
318 entry->bytes_left -= to_dec;
319 if (entry->bytes_left == 0)
320 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
321 else
322 ret = 1;
323out:
324 if (!ret && cached && entry) {
325 *cached = entry;
326 atomic_inc(&entry->refs);
327 }
328 spin_unlock(&tree->lock);
329 return ret == 0;
330}
331
332/*
333 * this is used to account for finished IO across a given range
334 * of the file. The IO should not span ordered extents. If
335 * a given ordered_extent is completely done, 1 is returned, otherwise
336 * 0.
337 *
338 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
339 * to make sure this function only returns 1 once for a given ordered extent.
340 */
341int btrfs_dec_test_ordered_pending(struct inode *inode,
342 struct btrfs_ordered_extent **cached,
343 u64 file_offset, u64 io_size)
344{
345 struct btrfs_ordered_inode_tree *tree;
346 struct rb_node *node;
347 struct btrfs_ordered_extent *entry = NULL;
348 int ret;
349
350 tree = &BTRFS_I(inode)->ordered_tree;
351 spin_lock(&tree->lock);
352 node = tree_search(tree, file_offset);
353 if (!node) {
354 ret = 1;
355 goto out;
356 }
357
358 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
359 if (!offset_in_entry(entry, file_offset)) {
360 ret = 1;
361 goto out;
362 }
363
364 if (io_size > entry->bytes_left) {
365 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
366 (unsigned long long)entry->bytes_left,
367 (unsigned long long)io_size);
368 }
369 entry->bytes_left -= io_size;
370 if (entry->bytes_left == 0)
371 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
372 else
373 ret = 1;
374out:
375 if (!ret && cached && entry) {
376 *cached = entry;
377 atomic_inc(&entry->refs);
378 }
379 spin_unlock(&tree->lock);
380 return ret == 0;
381}
382
383/*
384 * used to drop a reference on an ordered extent. This will free
385 * the extent if the last reference is dropped
386 */
387int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
388{
389 struct list_head *cur;
390 struct btrfs_ordered_sum *sum;
391
392 trace_btrfs_ordered_extent_put(entry->inode, entry);
393
394 if (atomic_dec_and_test(&entry->refs)) {
395 while (!list_empty(&entry->list)) {
396 cur = entry->list.next;
397 sum = list_entry(cur, struct btrfs_ordered_sum, list);
398 list_del(&sum->list);
399 kfree(sum);
400 }
401 kfree(entry);
402 }
403 return 0;
404}
405
406/*
407 * remove an ordered extent from the tree. No references are dropped
408 * and you must wake_up entry->wait. You must hold the tree lock
409 * while you call this function.
410 */
411static int __btrfs_remove_ordered_extent(struct inode *inode,
412 struct btrfs_ordered_extent *entry)
413{
414 struct btrfs_ordered_inode_tree *tree;
415 struct btrfs_root *root = BTRFS_I(inode)->root;
416 struct rb_node *node;
417
418 tree = &BTRFS_I(inode)->ordered_tree;
419 node = &entry->rb_node;
420 rb_erase(node, &tree->tree);
421 tree->last = NULL;
422 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
423
424 spin_lock(&root->fs_info->ordered_extent_lock);
425 list_del_init(&entry->root_extent_list);
426
427 trace_btrfs_ordered_extent_remove(inode, entry);
428
429 /*
430 * we have no more ordered extents for this inode and
431 * no dirty pages. We can safely remove it from the
432 * list of ordered extents
433 */
434 if (RB_EMPTY_ROOT(&tree->tree) &&
435 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
436 list_del_init(&BTRFS_I(inode)->ordered_operations);
437 }
438 spin_unlock(&root->fs_info->ordered_extent_lock);
439
440 return 0;
441}
442
443/*
444 * remove an ordered extent from the tree. No references are dropped
445 * but any waiters are woken.
446 */
447int btrfs_remove_ordered_extent(struct inode *inode,
448 struct btrfs_ordered_extent *entry)
449{
450 struct btrfs_ordered_inode_tree *tree;
451 int ret;
452
453 tree = &BTRFS_I(inode)->ordered_tree;
454 spin_lock(&tree->lock);
455 ret = __btrfs_remove_ordered_extent(inode, entry);
456 spin_unlock(&tree->lock);
457 wake_up(&entry->wait);
458
459 return ret;
460}
461
462/*
463 * wait for all the ordered extents in a root. This is done when balancing
464 * space between drives.
465 */
466int btrfs_wait_ordered_extents(struct btrfs_root *root,
467 int nocow_only, int delay_iput)
468{
469 struct list_head splice;
470 struct list_head *cur;
471 struct btrfs_ordered_extent *ordered;
472 struct inode *inode;
473
474 INIT_LIST_HEAD(&splice);
475
476 spin_lock(&root->fs_info->ordered_extent_lock);
477 list_splice_init(&root->fs_info->ordered_extents, &splice);
478 while (!list_empty(&splice)) {
479 cur = splice.next;
480 ordered = list_entry(cur, struct btrfs_ordered_extent,
481 root_extent_list);
482 if (nocow_only &&
483 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
484 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
485 list_move(&ordered->root_extent_list,
486 &root->fs_info->ordered_extents);
487 cond_resched_lock(&root->fs_info->ordered_extent_lock);
488 continue;
489 }
490
491 list_del_init(&ordered->root_extent_list);
492 atomic_inc(&ordered->refs);
493
494 /*
495 * the inode may be getting freed (in sys_unlink path).
496 */
497 inode = igrab(ordered->inode);
498
499 spin_unlock(&root->fs_info->ordered_extent_lock);
500
501 if (inode) {
502 btrfs_start_ordered_extent(inode, ordered, 1);
503 btrfs_put_ordered_extent(ordered);
504 if (delay_iput)
505 btrfs_add_delayed_iput(inode);
506 else
507 iput(inode);
508 } else {
509 btrfs_put_ordered_extent(ordered);
510 }
511
512 spin_lock(&root->fs_info->ordered_extent_lock);
513 }
514 spin_unlock(&root->fs_info->ordered_extent_lock);
515 return 0;
516}
517
518/*
519 * this is used during transaction commit to write all the inodes
520 * added to the ordered operation list. These files must be fully on
521 * disk before the transaction commits.
522 *
523 * we have two modes here, one is to just start the IO via filemap_flush
524 * and the other is to wait for all the io. When we wait, we have an
525 * extra check to make sure the ordered operation list really is empty
526 * before we return
527 */
528int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
529{
530 struct btrfs_inode *btrfs_inode;
531 struct inode *inode;
532 struct list_head splice;
533
534 INIT_LIST_HEAD(&splice);
535
536 mutex_lock(&root->fs_info->ordered_operations_mutex);
537 spin_lock(&root->fs_info->ordered_extent_lock);
538again:
539 list_splice_init(&root->fs_info->ordered_operations, &splice);
540
541 while (!list_empty(&splice)) {
542 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
543 ordered_operations);
544
545 inode = &btrfs_inode->vfs_inode;
546
547 list_del_init(&btrfs_inode->ordered_operations);
548
549 /*
550 * the inode may be getting freed (in sys_unlink path).
551 */
552 inode = igrab(inode);
553
554 if (!wait && inode) {
555 list_add_tail(&BTRFS_I(inode)->ordered_operations,
556 &root->fs_info->ordered_operations);
557 }
558 spin_unlock(&root->fs_info->ordered_extent_lock);
559
560 if (inode) {
561 if (wait)
562 btrfs_wait_ordered_range(inode, 0, (u64)-1);
563 else
564 filemap_flush(inode->i_mapping);
565 btrfs_add_delayed_iput(inode);
566 }
567
568 cond_resched();
569 spin_lock(&root->fs_info->ordered_extent_lock);
570 }
571 if (wait && !list_empty(&root->fs_info->ordered_operations))
572 goto again;
573
574 spin_unlock(&root->fs_info->ordered_extent_lock);
575 mutex_unlock(&root->fs_info->ordered_operations_mutex);
576
577 return 0;
578}
579
580/*
581 * Used to start IO or wait for a given ordered extent to finish.
582 *
583 * If wait is one, this effectively waits on page writeback for all the pages
584 * in the extent, and it waits on the io completion code to insert
585 * metadata into the btree corresponding to the extent
586 */
587void btrfs_start_ordered_extent(struct inode *inode,
588 struct btrfs_ordered_extent *entry,
589 int wait)
590{
591 u64 start = entry->file_offset;
592 u64 end = start + entry->len - 1;
593
594 trace_btrfs_ordered_extent_start(inode, entry);
595
596 /*
597 * pages in the range can be dirty, clean or writeback. We
598 * start IO on any dirty ones so the wait doesn't stall waiting
599 * for pdflush to find them
600 */
601 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
602 filemap_fdatawrite_range(inode->i_mapping, start, end);
603 if (wait) {
604 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
605 &entry->flags));
606 }
607}
608
609/*
610 * Used to wait on ordered extents across a large range of bytes.
611 */
612int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613{
614 u64 end;
615 u64 orig_end;
616 struct btrfs_ordered_extent *ordered;
617 int found;
618
619 if (start + len < start) {
620 orig_end = INT_LIMIT(loff_t);
621 } else {
622 orig_end = start + len - 1;
623 if (orig_end > INT_LIMIT(loff_t))
624 orig_end = INT_LIMIT(loff_t);
625 }
626again:
627 /* start IO across the range first to instantiate any delalloc
628 * extents
629 */
630 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
631
632 /* The compression code will leave pages locked but return from
633 * writepage without setting the page writeback. Starting again
634 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
635 */
636 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
637
638 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
639
640 end = orig_end;
641 found = 0;
642 while (1) {
643 ordered = btrfs_lookup_first_ordered_extent(inode, end);
644 if (!ordered)
645 break;
646 if (ordered->file_offset > orig_end) {
647 btrfs_put_ordered_extent(ordered);
648 break;
649 }
650 if (ordered->file_offset + ordered->len < start) {
651 btrfs_put_ordered_extent(ordered);
652 break;
653 }
654 found++;
655 btrfs_start_ordered_extent(inode, ordered, 1);
656 end = ordered->file_offset;
657 btrfs_put_ordered_extent(ordered);
658 if (end == 0 || end == start)
659 break;
660 end--;
661 }
662 if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
663 EXTENT_DELALLOC, 0, NULL)) {
664 schedule_timeout(1);
665 goto again;
666 }
667 return 0;
668}
669
670/*
671 * find an ordered extent corresponding to file_offset. return NULL if
672 * nothing is found, otherwise take a reference on the extent and return it
673 */
674struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
675 u64 file_offset)
676{
677 struct btrfs_ordered_inode_tree *tree;
678 struct rb_node *node;
679 struct btrfs_ordered_extent *entry = NULL;
680
681 tree = &BTRFS_I(inode)->ordered_tree;
682 spin_lock(&tree->lock);
683 node = tree_search(tree, file_offset);
684 if (!node)
685 goto out;
686
687 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
688 if (!offset_in_entry(entry, file_offset))
689 entry = NULL;
690 if (entry)
691 atomic_inc(&entry->refs);
692out:
693 spin_unlock(&tree->lock);
694 return entry;
695}
696
697/* Since the DIO code tries to lock a wide area we need to look for any ordered
698 * extents that exist in the range, rather than just the start of the range.
699 */
700struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
701 u64 file_offset,
702 u64 len)
703{
704 struct btrfs_ordered_inode_tree *tree;
705 struct rb_node *node;
706 struct btrfs_ordered_extent *entry = NULL;
707
708 tree = &BTRFS_I(inode)->ordered_tree;
709 spin_lock(&tree->lock);
710 node = tree_search(tree, file_offset);
711 if (!node) {
712 node = tree_search(tree, file_offset + len);
713 if (!node)
714 goto out;
715 }
716
717 while (1) {
718 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
719 if (range_overlaps(entry, file_offset, len))
720 break;
721
722 if (entry->file_offset >= file_offset + len) {
723 entry = NULL;
724 break;
725 }
726 entry = NULL;
727 node = rb_next(node);
728 if (!node)
729 break;
730 }
731out:
732 if (entry)
733 atomic_inc(&entry->refs);
734 spin_unlock(&tree->lock);
735 return entry;
736}
737
738/*
739 * lookup and return any extent before 'file_offset'. NULL is returned
740 * if none is found
741 */
742struct btrfs_ordered_extent *
743btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
744{
745 struct btrfs_ordered_inode_tree *tree;
746 struct rb_node *node;
747 struct btrfs_ordered_extent *entry = NULL;
748
749 tree = &BTRFS_I(inode)->ordered_tree;
750 spin_lock(&tree->lock);
751 node = tree_search(tree, file_offset);
752 if (!node)
753 goto out;
754
755 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
756 atomic_inc(&entry->refs);
757out:
758 spin_unlock(&tree->lock);
759 return entry;
760}
761
762/*
763 * After an extent is done, call this to conditionally update the on disk
764 * i_size. i_size is updated to cover any fully written part of the file.
765 */
766int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
767 struct btrfs_ordered_extent *ordered)
768{
769 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
770 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
771 u64 disk_i_size;
772 u64 new_i_size;
773 u64 i_size_test;
774 u64 i_size = i_size_read(inode);
775 struct rb_node *node;
776 struct rb_node *prev = NULL;
777 struct btrfs_ordered_extent *test;
778 int ret = 1;
779
780 if (ordered)
781 offset = entry_end(ordered);
782 else
783 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
784
785 spin_lock(&tree->lock);
786 disk_i_size = BTRFS_I(inode)->disk_i_size;
787
788 /* truncate file */
789 if (disk_i_size > i_size) {
790 BTRFS_I(inode)->disk_i_size = i_size;
791 ret = 0;
792 goto out;
793 }
794
795 /*
796 * if the disk i_size is already at the inode->i_size, or
797 * this ordered extent is inside the disk i_size, we're done
798 */
799 if (disk_i_size == i_size || offset <= disk_i_size) {
800 goto out;
801 }
802
803 /*
804 * we can't update the disk_isize if there are delalloc bytes
805 * between disk_i_size and this ordered extent
806 */
807 if (test_range_bit(io_tree, disk_i_size, offset - 1,
808 EXTENT_DELALLOC, 0, NULL)) {
809 goto out;
810 }
811 /*
812 * walk backward from this ordered extent to disk_i_size.
813 * if we find an ordered extent then we can't update disk i_size
814 * yet
815 */
816 if (ordered) {
817 node = rb_prev(&ordered->rb_node);
818 } else {
819 prev = tree_search(tree, offset);
820 /*
821 * we insert file extents without involving ordered struct,
822 * so there should be no ordered struct cover this offset
823 */
824 if (prev) {
825 test = rb_entry(prev, struct btrfs_ordered_extent,
826 rb_node);
827 BUG_ON(offset_in_entry(test, offset));
828 }
829 node = prev;
830 }
831 while (node) {
832 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
833 if (test->file_offset + test->len <= disk_i_size)
834 break;
835 if (test->file_offset >= i_size)
836 break;
837 if (test->file_offset >= disk_i_size)
838 goto out;
839 node = rb_prev(node);
840 }
841 new_i_size = min_t(u64, offset, i_size);
842
843 /*
844 * at this point, we know we can safely update i_size to at least
845 * the offset from this ordered extent. But, we need to
846 * walk forward and see if ios from higher up in the file have
847 * finished.
848 */
849 if (ordered) {
850 node = rb_next(&ordered->rb_node);
851 } else {
852 if (prev)
853 node = rb_next(prev);
854 else
855 node = rb_first(&tree->tree);
856 }
857 i_size_test = 0;
858 if (node) {
859 /*
860 * do we have an area where IO might have finished
861 * between our ordered extent and the next one.
862 */
863 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
864 if (test->file_offset > offset)
865 i_size_test = test->file_offset;
866 } else {
867 i_size_test = i_size;
868 }
869
870 /*
871 * i_size_test is the end of a region after this ordered
872 * extent where there are no ordered extents. As long as there
873 * are no delalloc bytes in this area, it is safe to update
874 * disk_i_size to the end of the region.
875 */
876 if (i_size_test > offset &&
877 !test_range_bit(io_tree, offset, i_size_test - 1,
878 EXTENT_DELALLOC, 0, NULL)) {
879 new_i_size = min_t(u64, i_size_test, i_size);
880 }
881 BTRFS_I(inode)->disk_i_size = new_i_size;
882 ret = 0;
883out:
884 /*
885 * we need to remove the ordered extent with the tree lock held
886 * so that other people calling this function don't find our fully
887 * processed ordered entry and skip updating the i_size
888 */
889 if (ordered)
890 __btrfs_remove_ordered_extent(inode, ordered);
891 spin_unlock(&tree->lock);
892 if (ordered)
893 wake_up(&ordered->wait);
894 return ret;
895}
896
897/*
898 * search the ordered extents for one corresponding to 'offset' and
899 * try to find a checksum. This is used because we allow pages to
900 * be reclaimed before their checksum is actually put into the btree
901 */
902int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
903 u32 *sum)
904{
905 struct btrfs_ordered_sum *ordered_sum;
906 struct btrfs_sector_sum *sector_sums;
907 struct btrfs_ordered_extent *ordered;
908 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
909 unsigned long num_sectors;
910 unsigned long i;
911 u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
912 int ret = 1;
913
914 ordered = btrfs_lookup_ordered_extent(inode, offset);
915 if (!ordered)
916 return 1;
917
918 spin_lock(&tree->lock);
919 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
920 if (disk_bytenr >= ordered_sum->bytenr) {
921 num_sectors = ordered_sum->len / sectorsize;
922 sector_sums = ordered_sum->sums;
923 for (i = 0; i < num_sectors; i++) {
924 if (sector_sums[i].bytenr == disk_bytenr) {
925 *sum = sector_sums[i].sum;
926 ret = 0;
927 goto out;
928 }
929 }
930 }
931 }
932out:
933 spin_unlock(&tree->lock);
934 btrfs_put_ordered_extent(ordered);
935 return ret;
936}
937
938
939/*
940 * add a given inode to the list of inodes that must be fully on
941 * disk before a transaction commit finishes.
942 *
943 * This basically gives us the ext3 style data=ordered mode, and it is mostly
944 * used to make sure renamed files are fully on disk.
945 *
946 * It is a noop if the inode is already fully on disk.
947 *
948 * If trans is not null, we'll do a friendly check for a transaction that
949 * is already flushing things and force the IO down ourselves.
950 */
951int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
952 struct btrfs_root *root,
953 struct inode *inode)
954{
955 u64 last_mod;
956
957 last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
958
959 /*
960 * if this file hasn't been changed since the last transaction
961 * commit, we can safely return without doing anything
962 */
963 if (last_mod < root->fs_info->last_trans_committed)
964 return 0;
965
966 /*
967 * the transaction is already committing. Just start the IO and
968 * don't bother with all of this list nonsense
969 */
970 if (trans && root->fs_info->running_transaction->blocked) {
971 btrfs_wait_ordered_range(inode, 0, (u64)-1);
972 return 0;
973 }
974
975 spin_lock(&root->fs_info->ordered_extent_lock);
976 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
977 list_add_tail(&BTRFS_I(inode)->ordered_operations,
978 &root->fs_info->ordered_operations);
979 }
980 spin_unlock(&root->fs_info->ordered_extent_lock);
981
982 return 0;
983}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19
20static struct kmem_cache *btrfs_ordered_extent_cache;
21
22static u64 entry_end(struct btrfs_ordered_extent *entry)
23{
24 if (entry->file_offset + entry->num_bytes < entry->file_offset)
25 return (u64)-1;
26 return entry->file_offset + entry->num_bytes;
27}
28
29/* returns NULL if the insertion worked, or it returns the node it did find
30 * in the tree
31 */
32static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
33 struct rb_node *node)
34{
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
38
39 while (*p) {
40 parent = *p;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
42
43 if (file_offset < entry->file_offset)
44 p = &(*p)->rb_left;
45 else if (file_offset >= entry_end(entry))
46 p = &(*p)->rb_right;
47 else
48 return parent;
49 }
50
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
53 return NULL;
54}
55
56/*
57 * look for a given offset in the tree, and if it can't be found return the
58 * first lesser offset
59 */
60static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
62{
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
65 struct rb_node *test;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
68
69 while (n) {
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
71 prev = n;
72 prev_entry = entry;
73
74 if (file_offset < entry->file_offset)
75 n = n->rb_left;
76 else if (file_offset >= entry_end(entry))
77 n = n->rb_right;
78 else
79 return n;
80 }
81 if (!prev_ret)
82 return NULL;
83
84 while (prev && file_offset >= entry_end(prev_entry)) {
85 test = rb_next(prev);
86 if (!test)
87 break;
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89 rb_node);
90 if (file_offset < entry_end(prev_entry))
91 break;
92
93 prev = test;
94 }
95 if (prev)
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97 rb_node);
98 while (prev && file_offset < entry_end(prev_entry)) {
99 test = rb_prev(prev);
100 if (!test)
101 break;
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103 rb_node);
104 prev = test;
105 }
106 *prev_ret = prev;
107 return NULL;
108}
109
110/*
111 * helper to check if a given offset is inside a given entry
112 */
113static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
114{
115 if (file_offset < entry->file_offset ||
116 entry->file_offset + entry->num_bytes <= file_offset)
117 return 0;
118 return 1;
119}
120
121static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
122 u64 len)
123{
124 if (file_offset + len <= entry->file_offset ||
125 entry->file_offset + entry->num_bytes <= file_offset)
126 return 0;
127 return 1;
128}
129
130/*
131 * look find the first ordered struct that has this offset, otherwise
132 * the first one less than this offset
133 */
134static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
135 u64 file_offset)
136{
137 struct rb_root *root = &tree->tree;
138 struct rb_node *prev = NULL;
139 struct rb_node *ret;
140 struct btrfs_ordered_extent *entry;
141
142 if (tree->last) {
143 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
144 rb_node);
145 if (offset_in_entry(entry, file_offset))
146 return tree->last;
147 }
148 ret = __tree_search(root, file_offset, &prev);
149 if (!ret)
150 ret = prev;
151 if (ret)
152 tree->last = ret;
153 return ret;
154}
155
156/*
157 * Allocate and add a new ordered_extent into the per-inode tree.
158 *
159 * The tree is given a single reference on the ordered extent that was
160 * inserted.
161 */
162static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163 u64 disk_bytenr, u64 num_bytes,
164 u64 disk_num_bytes, int type, int dio,
165 int compress_type)
166{
167 struct btrfs_root *root = inode->root;
168 struct btrfs_fs_info *fs_info = root->fs_info;
169 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
170 struct rb_node *node;
171 struct btrfs_ordered_extent *entry;
172 int ret;
173
174 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175 /* For nocow write, we can release the qgroup rsv right now */
176 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
177 if (ret < 0)
178 return ret;
179 ret = 0;
180 } else {
181 /*
182 * The ordered extent has reserved qgroup space, release now
183 * and pass the reserved number for qgroup_record to free.
184 */
185 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
186 if (ret < 0)
187 return ret;
188 }
189 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
190 if (!entry)
191 return -ENOMEM;
192
193 entry->file_offset = file_offset;
194 entry->disk_bytenr = disk_bytenr;
195 entry->num_bytes = num_bytes;
196 entry->disk_num_bytes = disk_num_bytes;
197 entry->bytes_left = num_bytes;
198 entry->inode = igrab(&inode->vfs_inode);
199 entry->compress_type = compress_type;
200 entry->truncated_len = (u64)-1;
201 entry->qgroup_rsv = ret;
202 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203 set_bit(type, &entry->flags);
204
205 if (dio) {
206 percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
207 fs_info->delalloc_batch);
208 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
209 }
210
211 /* one ref for the tree */
212 refcount_set(&entry->refs, 1);
213 init_waitqueue_head(&entry->wait);
214 INIT_LIST_HEAD(&entry->list);
215 INIT_LIST_HEAD(&entry->root_extent_list);
216 INIT_LIST_HEAD(&entry->work_list);
217 init_completion(&entry->completion);
218
219 trace_btrfs_ordered_extent_add(&inode->vfs_inode, entry);
220
221 spin_lock_irq(&tree->lock);
222 node = tree_insert(&tree->tree, file_offset,
223 &entry->rb_node);
224 if (node)
225 btrfs_panic(fs_info, -EEXIST,
226 "inconsistency in ordered tree at offset %llu",
227 file_offset);
228 spin_unlock_irq(&tree->lock);
229
230 spin_lock(&root->ordered_extent_lock);
231 list_add_tail(&entry->root_extent_list,
232 &root->ordered_extents);
233 root->nr_ordered_extents++;
234 if (root->nr_ordered_extents == 1) {
235 spin_lock(&fs_info->ordered_root_lock);
236 BUG_ON(!list_empty(&root->ordered_root));
237 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
238 spin_unlock(&fs_info->ordered_root_lock);
239 }
240 spin_unlock(&root->ordered_extent_lock);
241
242 /*
243 * We don't need the count_max_extents here, we can assume that all of
244 * that work has been done at higher layers, so this is truly the
245 * smallest the extent is going to get.
246 */
247 spin_lock(&inode->lock);
248 btrfs_mod_outstanding_extents(inode, 1);
249 spin_unlock(&inode->lock);
250
251 return 0;
252}
253
254int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
255 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
256 int type)
257{
258 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
259 num_bytes, disk_num_bytes, type, 0,
260 BTRFS_COMPRESS_NONE);
261}
262
263int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
264 u64 disk_bytenr, u64 num_bytes,
265 u64 disk_num_bytes, int type)
266{
267 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
268 num_bytes, disk_num_bytes, type, 1,
269 BTRFS_COMPRESS_NONE);
270}
271
272int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
273 u64 disk_bytenr, u64 num_bytes,
274 u64 disk_num_bytes, int type,
275 int compress_type)
276{
277 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
278 num_bytes, disk_num_bytes, type, 0,
279 compress_type);
280}
281
282/*
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
286 */
287void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
288 struct btrfs_ordered_sum *sum)
289{
290 struct btrfs_ordered_inode_tree *tree;
291
292 tree = &BTRFS_I(entry->inode)->ordered_tree;
293 spin_lock_irq(&tree->lock);
294 list_add_tail(&sum->list, &entry->list);
295 spin_unlock_irq(&tree->lock);
296}
297
298/*
299 * this is used to account for finished IO across a given range
300 * of the file. The IO may span ordered extents. If
301 * a given ordered_extent is completely done, 1 is returned, otherwise
302 * 0.
303 *
304 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
305 * to make sure this function only returns 1 once for a given ordered extent.
306 *
307 * file_offset is updated to one byte past the range that is recorded as
308 * complete. This allows you to walk forward in the file.
309 */
310int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
311 struct btrfs_ordered_extent **cached,
312 u64 *file_offset, u64 io_size, int uptodate)
313{
314 struct btrfs_fs_info *fs_info = inode->root->fs_info;
315 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
316 struct rb_node *node;
317 struct btrfs_ordered_extent *entry = NULL;
318 int ret;
319 unsigned long flags;
320 u64 dec_end;
321 u64 dec_start;
322 u64 to_dec;
323
324 spin_lock_irqsave(&tree->lock, flags);
325 node = tree_search(tree, *file_offset);
326 if (!node) {
327 ret = 1;
328 goto out;
329 }
330
331 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
332 if (!offset_in_entry(entry, *file_offset)) {
333 ret = 1;
334 goto out;
335 }
336
337 dec_start = max(*file_offset, entry->file_offset);
338 dec_end = min(*file_offset + io_size,
339 entry->file_offset + entry->num_bytes);
340 *file_offset = dec_end;
341 if (dec_start > dec_end) {
342 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
343 dec_start, dec_end);
344 }
345 to_dec = dec_end - dec_start;
346 if (to_dec > entry->bytes_left) {
347 btrfs_crit(fs_info,
348 "bad ordered accounting left %llu size %llu",
349 entry->bytes_left, to_dec);
350 }
351 entry->bytes_left -= to_dec;
352 if (!uptodate)
353 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
354
355 if (entry->bytes_left == 0) {
356 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
357 /* test_and_set_bit implies a barrier */
358 cond_wake_up_nomb(&entry->wait);
359 } else {
360 ret = 1;
361 }
362out:
363 if (!ret && cached && entry) {
364 *cached = entry;
365 refcount_inc(&entry->refs);
366 }
367 spin_unlock_irqrestore(&tree->lock, flags);
368 return ret == 0;
369}
370
371/*
372 * this is used to account for finished IO across a given range
373 * of the file. The IO should not span ordered extents. If
374 * a given ordered_extent is completely done, 1 is returned, otherwise
375 * 0.
376 *
377 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
378 * to make sure this function only returns 1 once for a given ordered extent.
379 */
380int btrfs_dec_test_ordered_pending(struct inode *inode,
381 struct btrfs_ordered_extent **cached,
382 u64 file_offset, u64 io_size, int uptodate)
383{
384 struct btrfs_ordered_inode_tree *tree;
385 struct rb_node *node;
386 struct btrfs_ordered_extent *entry = NULL;
387 unsigned long flags;
388 int ret;
389
390 tree = &BTRFS_I(inode)->ordered_tree;
391 spin_lock_irqsave(&tree->lock, flags);
392 if (cached && *cached) {
393 entry = *cached;
394 goto have_entry;
395 }
396
397 node = tree_search(tree, file_offset);
398 if (!node) {
399 ret = 1;
400 goto out;
401 }
402
403 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
404have_entry:
405 if (!offset_in_entry(entry, file_offset)) {
406 ret = 1;
407 goto out;
408 }
409
410 if (io_size > entry->bytes_left) {
411 btrfs_crit(BTRFS_I(inode)->root->fs_info,
412 "bad ordered accounting left %llu size %llu",
413 entry->bytes_left, io_size);
414 }
415 entry->bytes_left -= io_size;
416 if (!uptodate)
417 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
418
419 if (entry->bytes_left == 0) {
420 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421 /* test_and_set_bit implies a barrier */
422 cond_wake_up_nomb(&entry->wait);
423 } else {
424 ret = 1;
425 }
426out:
427 if (!ret && cached && entry) {
428 *cached = entry;
429 refcount_inc(&entry->refs);
430 }
431 spin_unlock_irqrestore(&tree->lock, flags);
432 return ret == 0;
433}
434
435/*
436 * used to drop a reference on an ordered extent. This will free
437 * the extent if the last reference is dropped
438 */
439void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
440{
441 struct list_head *cur;
442 struct btrfs_ordered_sum *sum;
443
444 trace_btrfs_ordered_extent_put(entry->inode, entry);
445
446 if (refcount_dec_and_test(&entry->refs)) {
447 ASSERT(list_empty(&entry->root_extent_list));
448 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
449 if (entry->inode)
450 btrfs_add_delayed_iput(entry->inode);
451 while (!list_empty(&entry->list)) {
452 cur = entry->list.next;
453 sum = list_entry(cur, struct btrfs_ordered_sum, list);
454 list_del(&sum->list);
455 kvfree(sum);
456 }
457 kmem_cache_free(btrfs_ordered_extent_cache, entry);
458 }
459}
460
461/*
462 * remove an ordered extent from the tree. No references are dropped
463 * and waiters are woken up.
464 */
465void btrfs_remove_ordered_extent(struct inode *inode,
466 struct btrfs_ordered_extent *entry)
467{
468 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
469 struct btrfs_ordered_inode_tree *tree;
470 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
471 struct btrfs_root *root = btrfs_inode->root;
472 struct rb_node *node;
473
474 /* This is paired with btrfs_add_ordered_extent. */
475 spin_lock(&btrfs_inode->lock);
476 btrfs_mod_outstanding_extents(btrfs_inode, -1);
477 spin_unlock(&btrfs_inode->lock);
478 if (root != fs_info->tree_root)
479 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
480 false);
481
482 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
483 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
484 fs_info->delalloc_batch);
485
486 tree = &btrfs_inode->ordered_tree;
487 spin_lock_irq(&tree->lock);
488 node = &entry->rb_node;
489 rb_erase(node, &tree->tree);
490 RB_CLEAR_NODE(node);
491 if (tree->last == node)
492 tree->last = NULL;
493 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
494 spin_unlock_irq(&tree->lock);
495
496 spin_lock(&root->ordered_extent_lock);
497 list_del_init(&entry->root_extent_list);
498 root->nr_ordered_extents--;
499
500 trace_btrfs_ordered_extent_remove(inode, entry);
501
502 if (!root->nr_ordered_extents) {
503 spin_lock(&fs_info->ordered_root_lock);
504 BUG_ON(list_empty(&root->ordered_root));
505 list_del_init(&root->ordered_root);
506 spin_unlock(&fs_info->ordered_root_lock);
507 }
508 spin_unlock(&root->ordered_extent_lock);
509 wake_up(&entry->wait);
510}
511
512static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
513{
514 struct btrfs_ordered_extent *ordered;
515
516 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
517 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
518 complete(&ordered->completion);
519}
520
521/*
522 * wait for all the ordered extents in a root. This is done when balancing
523 * space between drives.
524 */
525u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
526 const u64 range_start, const u64 range_len)
527{
528 struct btrfs_fs_info *fs_info = root->fs_info;
529 LIST_HEAD(splice);
530 LIST_HEAD(skipped);
531 LIST_HEAD(works);
532 struct btrfs_ordered_extent *ordered, *next;
533 u64 count = 0;
534 const u64 range_end = range_start + range_len;
535
536 mutex_lock(&root->ordered_extent_mutex);
537 spin_lock(&root->ordered_extent_lock);
538 list_splice_init(&root->ordered_extents, &splice);
539 while (!list_empty(&splice) && nr) {
540 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
541 root_extent_list);
542
543 if (range_end <= ordered->disk_bytenr ||
544 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
545 list_move_tail(&ordered->root_extent_list, &skipped);
546 cond_resched_lock(&root->ordered_extent_lock);
547 continue;
548 }
549
550 list_move_tail(&ordered->root_extent_list,
551 &root->ordered_extents);
552 refcount_inc(&ordered->refs);
553 spin_unlock(&root->ordered_extent_lock);
554
555 btrfs_init_work(&ordered->flush_work,
556 btrfs_run_ordered_extent_work, NULL, NULL);
557 list_add_tail(&ordered->work_list, &works);
558 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
559
560 cond_resched();
561 spin_lock(&root->ordered_extent_lock);
562 if (nr != U64_MAX)
563 nr--;
564 count++;
565 }
566 list_splice_tail(&skipped, &root->ordered_extents);
567 list_splice_tail(&splice, &root->ordered_extents);
568 spin_unlock(&root->ordered_extent_lock);
569
570 list_for_each_entry_safe(ordered, next, &works, work_list) {
571 list_del_init(&ordered->work_list);
572 wait_for_completion(&ordered->completion);
573 btrfs_put_ordered_extent(ordered);
574 cond_resched();
575 }
576 mutex_unlock(&root->ordered_extent_mutex);
577
578 return count;
579}
580
581void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
582 const u64 range_start, const u64 range_len)
583{
584 struct btrfs_root *root;
585 struct list_head splice;
586 u64 done;
587
588 INIT_LIST_HEAD(&splice);
589
590 mutex_lock(&fs_info->ordered_operations_mutex);
591 spin_lock(&fs_info->ordered_root_lock);
592 list_splice_init(&fs_info->ordered_roots, &splice);
593 while (!list_empty(&splice) && nr) {
594 root = list_first_entry(&splice, struct btrfs_root,
595 ordered_root);
596 root = btrfs_grab_root(root);
597 BUG_ON(!root);
598 list_move_tail(&root->ordered_root,
599 &fs_info->ordered_roots);
600 spin_unlock(&fs_info->ordered_root_lock);
601
602 done = btrfs_wait_ordered_extents(root, nr,
603 range_start, range_len);
604 btrfs_put_root(root);
605
606 spin_lock(&fs_info->ordered_root_lock);
607 if (nr != U64_MAX) {
608 nr -= done;
609 }
610 }
611 list_splice_tail(&splice, &fs_info->ordered_roots);
612 spin_unlock(&fs_info->ordered_root_lock);
613 mutex_unlock(&fs_info->ordered_operations_mutex);
614}
615
616/*
617 * Used to start IO or wait for a given ordered extent to finish.
618 *
619 * If wait is one, this effectively waits on page writeback for all the pages
620 * in the extent, and it waits on the io completion code to insert
621 * metadata into the btree corresponding to the extent
622 */
623void btrfs_start_ordered_extent(struct inode *inode,
624 struct btrfs_ordered_extent *entry,
625 int wait)
626{
627 u64 start = entry->file_offset;
628 u64 end = start + entry->num_bytes - 1;
629
630 trace_btrfs_ordered_extent_start(inode, entry);
631
632 /*
633 * pages in the range can be dirty, clean or writeback. We
634 * start IO on any dirty ones so the wait doesn't stall waiting
635 * for the flusher thread to find them
636 */
637 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
638 filemap_fdatawrite_range(inode->i_mapping, start, end);
639 if (wait) {
640 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
641 &entry->flags));
642 }
643}
644
645/*
646 * Used to wait on ordered extents across a large range of bytes.
647 */
648int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
649{
650 int ret = 0;
651 int ret_wb = 0;
652 u64 end;
653 u64 orig_end;
654 struct btrfs_ordered_extent *ordered;
655
656 if (start + len < start) {
657 orig_end = INT_LIMIT(loff_t);
658 } else {
659 orig_end = start + len - 1;
660 if (orig_end > INT_LIMIT(loff_t))
661 orig_end = INT_LIMIT(loff_t);
662 }
663
664 /* start IO across the range first to instantiate any delalloc
665 * extents
666 */
667 ret = btrfs_fdatawrite_range(inode, start, orig_end);
668 if (ret)
669 return ret;
670
671 /*
672 * If we have a writeback error don't return immediately. Wait first
673 * for any ordered extents that haven't completed yet. This is to make
674 * sure no one can dirty the same page ranges and call writepages()
675 * before the ordered extents complete - to avoid failures (-EEXIST)
676 * when adding the new ordered extents to the ordered tree.
677 */
678 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
679
680 end = orig_end;
681 while (1) {
682 ordered = btrfs_lookup_first_ordered_extent(inode, end);
683 if (!ordered)
684 break;
685 if (ordered->file_offset > orig_end) {
686 btrfs_put_ordered_extent(ordered);
687 break;
688 }
689 if (ordered->file_offset + ordered->num_bytes <= start) {
690 btrfs_put_ordered_extent(ordered);
691 break;
692 }
693 btrfs_start_ordered_extent(inode, ordered, 1);
694 end = ordered->file_offset;
695 /*
696 * If the ordered extent had an error save the error but don't
697 * exit without waiting first for all other ordered extents in
698 * the range to complete.
699 */
700 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
701 ret = -EIO;
702 btrfs_put_ordered_extent(ordered);
703 if (end == 0 || end == start)
704 break;
705 end--;
706 }
707 return ret_wb ? ret_wb : ret;
708}
709
710/*
711 * find an ordered extent corresponding to file_offset. return NULL if
712 * nothing is found, otherwise take a reference on the extent and return it
713 */
714struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
715 u64 file_offset)
716{
717 struct btrfs_ordered_inode_tree *tree;
718 struct rb_node *node;
719 struct btrfs_ordered_extent *entry = NULL;
720
721 tree = &inode->ordered_tree;
722 spin_lock_irq(&tree->lock);
723 node = tree_search(tree, file_offset);
724 if (!node)
725 goto out;
726
727 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
728 if (!offset_in_entry(entry, file_offset))
729 entry = NULL;
730 if (entry)
731 refcount_inc(&entry->refs);
732out:
733 spin_unlock_irq(&tree->lock);
734 return entry;
735}
736
737/* Since the DIO code tries to lock a wide area we need to look for any ordered
738 * extents that exist in the range, rather than just the start of the range.
739 */
740struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
741 struct btrfs_inode *inode, u64 file_offset, u64 len)
742{
743 struct btrfs_ordered_inode_tree *tree;
744 struct rb_node *node;
745 struct btrfs_ordered_extent *entry = NULL;
746
747 tree = &inode->ordered_tree;
748 spin_lock_irq(&tree->lock);
749 node = tree_search(tree, file_offset);
750 if (!node) {
751 node = tree_search(tree, file_offset + len);
752 if (!node)
753 goto out;
754 }
755
756 while (1) {
757 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
758 if (range_overlaps(entry, file_offset, len))
759 break;
760
761 if (entry->file_offset >= file_offset + len) {
762 entry = NULL;
763 break;
764 }
765 entry = NULL;
766 node = rb_next(node);
767 if (!node)
768 break;
769 }
770out:
771 if (entry)
772 refcount_inc(&entry->refs);
773 spin_unlock_irq(&tree->lock);
774 return entry;
775}
776
777/*
778 * lookup and return any extent before 'file_offset'. NULL is returned
779 * if none is found
780 */
781struct btrfs_ordered_extent *
782btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
783{
784 struct btrfs_ordered_inode_tree *tree;
785 struct rb_node *node;
786 struct btrfs_ordered_extent *entry = NULL;
787
788 tree = &BTRFS_I(inode)->ordered_tree;
789 spin_lock_irq(&tree->lock);
790 node = tree_search(tree, file_offset);
791 if (!node)
792 goto out;
793
794 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
795 refcount_inc(&entry->refs);
796out:
797 spin_unlock_irq(&tree->lock);
798 return entry;
799}
800
801/*
802 * search the ordered extents for one corresponding to 'offset' and
803 * try to find a checksum. This is used because we allow pages to
804 * be reclaimed before their checksum is actually put into the btree
805 */
806int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
807 u8 *sum, int len)
808{
809 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
810 struct btrfs_ordered_sum *ordered_sum;
811 struct btrfs_ordered_extent *ordered;
812 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
813 unsigned long num_sectors;
814 unsigned long i;
815 u32 sectorsize = btrfs_inode_sectorsize(inode);
816 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
817 int index = 0;
818
819 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), offset);
820 if (!ordered)
821 return 0;
822
823 spin_lock_irq(&tree->lock);
824 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
825 if (disk_bytenr >= ordered_sum->bytenr &&
826 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
827 i = (disk_bytenr - ordered_sum->bytenr) >>
828 inode->i_sb->s_blocksize_bits;
829 num_sectors = ordered_sum->len >>
830 inode->i_sb->s_blocksize_bits;
831 num_sectors = min_t(int, len - index, num_sectors - i);
832 memcpy(sum + index, ordered_sum->sums + i * csum_size,
833 num_sectors * csum_size);
834
835 index += (int)num_sectors * csum_size;
836 if (index == len)
837 goto out;
838 disk_bytenr += num_sectors * sectorsize;
839 }
840 }
841out:
842 spin_unlock_irq(&tree->lock);
843 btrfs_put_ordered_extent(ordered);
844 return index;
845}
846
847/*
848 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
849 * ordered extents in it are run to completion.
850 *
851 * @inode: Inode whose ordered tree is to be searched
852 * @start: Beginning of range to flush
853 * @end: Last byte of range to lock
854 * @cached_state: If passed, will return the extent state responsible for the
855 * locked range. It's the caller's responsibility to free the cached state.
856 *
857 * This function always returns with the given range locked, ensuring after it's
858 * called no order extent can be pending.
859 */
860void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
861 u64 end,
862 struct extent_state **cached_state)
863{
864 struct btrfs_ordered_extent *ordered;
865 struct extent_state *cache = NULL;
866 struct extent_state **cachedp = &cache;
867
868 if (cached_state)
869 cachedp = cached_state;
870
871 while (1) {
872 lock_extent_bits(&inode->io_tree, start, end, cachedp);
873 ordered = btrfs_lookup_ordered_range(inode, start,
874 end - start + 1);
875 if (!ordered) {
876 /*
877 * If no external cached_state has been passed then
878 * decrement the extra ref taken for cachedp since we
879 * aren't exposing it outside of this function
880 */
881 if (!cached_state)
882 refcount_dec(&cache->refs);
883 break;
884 }
885 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
886 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
887 btrfs_put_ordered_extent(ordered);
888 }
889}
890
891int __init ordered_data_init(void)
892{
893 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
894 sizeof(struct btrfs_ordered_extent), 0,
895 SLAB_MEM_SPREAD,
896 NULL);
897 if (!btrfs_ordered_extent_cache)
898 return -ENOMEM;
899
900 return 0;
901}
902
903void __cold ordered_data_exit(void)
904{
905 kmem_cache_destroy(btrfs_ordered_extent_cache);
906}