Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/pagevec.h>
10#include "ctree.h"
11#include "transaction.h"
12#include "btrfs_inode.h"
13#include "extent_io.h"
14#include "disk-io.h"
15#include "compression.h"
16
17static struct kmem_cache *btrfs_ordered_extent_cache;
18
19static u64 entry_end(struct btrfs_ordered_extent *entry)
20{
21 if (entry->file_offset + entry->len < entry->file_offset)
22 return (u64)-1;
23 return entry->file_offset + entry->len;
24}
25
26/* returns NULL if the insertion worked, or it returns the node it did find
27 * in the tree
28 */
29static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
30 struct rb_node *node)
31{
32 struct rb_node **p = &root->rb_node;
33 struct rb_node *parent = NULL;
34 struct btrfs_ordered_extent *entry;
35
36 while (*p) {
37 parent = *p;
38 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
39
40 if (file_offset < entry->file_offset)
41 p = &(*p)->rb_left;
42 else if (file_offset >= entry_end(entry))
43 p = &(*p)->rb_right;
44 else
45 return parent;
46 }
47
48 rb_link_node(node, parent, p);
49 rb_insert_color(node, root);
50 return NULL;
51}
52
53static void ordered_data_tree_panic(struct inode *inode, int errno,
54 u64 offset)
55{
56 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
57 btrfs_panic(fs_info, errno,
58 "Inconsistency in ordered tree at offset %llu", offset);
59}
60
61/*
62 * look for a given offset in the tree, and if it can't be found return the
63 * first lesser offset
64 */
65static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
66 struct rb_node **prev_ret)
67{
68 struct rb_node *n = root->rb_node;
69 struct rb_node *prev = NULL;
70 struct rb_node *test;
71 struct btrfs_ordered_extent *entry;
72 struct btrfs_ordered_extent *prev_entry = NULL;
73
74 while (n) {
75 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
76 prev = n;
77 prev_entry = entry;
78
79 if (file_offset < entry->file_offset)
80 n = n->rb_left;
81 else if (file_offset >= entry_end(entry))
82 n = n->rb_right;
83 else
84 return n;
85 }
86 if (!prev_ret)
87 return NULL;
88
89 while (prev && file_offset >= entry_end(prev_entry)) {
90 test = rb_next(prev);
91 if (!test)
92 break;
93 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
94 rb_node);
95 if (file_offset < entry_end(prev_entry))
96 break;
97
98 prev = test;
99 }
100 if (prev)
101 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
102 rb_node);
103 while (prev && file_offset < entry_end(prev_entry)) {
104 test = rb_prev(prev);
105 if (!test)
106 break;
107 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
108 rb_node);
109 prev = test;
110 }
111 *prev_ret = prev;
112 return NULL;
113}
114
115/*
116 * helper to check if a given offset is inside a given entry
117 */
118static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
119{
120 if (file_offset < entry->file_offset ||
121 entry->file_offset + entry->len <= file_offset)
122 return 0;
123 return 1;
124}
125
126static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
127 u64 len)
128{
129 if (file_offset + len <= entry->file_offset ||
130 entry->file_offset + entry->len <= file_offset)
131 return 0;
132 return 1;
133}
134
135/*
136 * look find the first ordered struct that has this offset, otherwise
137 * the first one less than this offset
138 */
139static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
140 u64 file_offset)
141{
142 struct rb_root *root = &tree->tree;
143 struct rb_node *prev = NULL;
144 struct rb_node *ret;
145 struct btrfs_ordered_extent *entry;
146
147 if (tree->last) {
148 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
149 rb_node);
150 if (offset_in_entry(entry, file_offset))
151 return tree->last;
152 }
153 ret = __tree_search(root, file_offset, &prev);
154 if (!ret)
155 ret = prev;
156 if (ret)
157 tree->last = ret;
158 return ret;
159}
160
161/* allocate and add a new ordered_extent into the per-inode tree.
162 * file_offset is the logical offset in the file
163 *
164 * start is the disk block number of an extent already reserved in the
165 * extent allocation tree
166 *
167 * len is the length of the extent
168 *
169 * The tree is given a single reference on the ordered extent that was
170 * inserted.
171 */
172static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
173 u64 start, u64 len, u64 disk_len,
174 int type, int dio, int compress_type)
175{
176 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
177 struct btrfs_root *root = BTRFS_I(inode)->root;
178 struct btrfs_ordered_inode_tree *tree;
179 struct rb_node *node;
180 struct btrfs_ordered_extent *entry;
181
182 tree = &BTRFS_I(inode)->ordered_tree;
183 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
184 if (!entry)
185 return -ENOMEM;
186
187 entry->file_offset = file_offset;
188 entry->start = start;
189 entry->len = len;
190 entry->disk_len = disk_len;
191 entry->bytes_left = len;
192 entry->inode = igrab(inode);
193 entry->compress_type = compress_type;
194 entry->truncated_len = (u64)-1;
195 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
196 set_bit(type, &entry->flags);
197
198 if (dio)
199 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
200
201 /* one ref for the tree */
202 refcount_set(&entry->refs, 1);
203 init_waitqueue_head(&entry->wait);
204 INIT_LIST_HEAD(&entry->list);
205 INIT_LIST_HEAD(&entry->root_extent_list);
206 INIT_LIST_HEAD(&entry->work_list);
207 init_completion(&entry->completion);
208 INIT_LIST_HEAD(&entry->log_list);
209 INIT_LIST_HEAD(&entry->trans_list);
210
211 trace_btrfs_ordered_extent_add(inode, entry);
212
213 spin_lock_irq(&tree->lock);
214 node = tree_insert(&tree->tree, file_offset,
215 &entry->rb_node);
216 if (node)
217 ordered_data_tree_panic(inode, -EEXIST, file_offset);
218 spin_unlock_irq(&tree->lock);
219
220 spin_lock(&root->ordered_extent_lock);
221 list_add_tail(&entry->root_extent_list,
222 &root->ordered_extents);
223 root->nr_ordered_extents++;
224 if (root->nr_ordered_extents == 1) {
225 spin_lock(&fs_info->ordered_root_lock);
226 BUG_ON(!list_empty(&root->ordered_root));
227 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
228 spin_unlock(&fs_info->ordered_root_lock);
229 }
230 spin_unlock(&root->ordered_extent_lock);
231
232 /*
233 * We don't need the count_max_extents here, we can assume that all of
234 * that work has been done at higher layers, so this is truly the
235 * smallest the extent is going to get.
236 */
237 spin_lock(&BTRFS_I(inode)->lock);
238 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
239 spin_unlock(&BTRFS_I(inode)->lock);
240
241 return 0;
242}
243
244int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
245 u64 start, u64 len, u64 disk_len, int type)
246{
247 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
248 disk_len, type, 0,
249 BTRFS_COMPRESS_NONE);
250}
251
252int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
253 u64 start, u64 len, u64 disk_len, int type)
254{
255 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
256 disk_len, type, 1,
257 BTRFS_COMPRESS_NONE);
258}
259
260int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
261 u64 start, u64 len, u64 disk_len,
262 int type, int compress_type)
263{
264 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
265 disk_len, type, 0,
266 compress_type);
267}
268
269/*
270 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
271 * when an ordered extent is finished. If the list covers more than one
272 * ordered extent, it is split across multiples.
273 */
274void btrfs_add_ordered_sum(struct inode *inode,
275 struct btrfs_ordered_extent *entry,
276 struct btrfs_ordered_sum *sum)
277{
278 struct btrfs_ordered_inode_tree *tree;
279
280 tree = &BTRFS_I(inode)->ordered_tree;
281 spin_lock_irq(&tree->lock);
282 list_add_tail(&sum->list, &entry->list);
283 spin_unlock_irq(&tree->lock);
284}
285
286/*
287 * this is used to account for finished IO across a given range
288 * of the file. The IO may span ordered extents. If
289 * a given ordered_extent is completely done, 1 is returned, otherwise
290 * 0.
291 *
292 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
293 * to make sure this function only returns 1 once for a given ordered extent.
294 *
295 * file_offset is updated to one byte past the range that is recorded as
296 * complete. This allows you to walk forward in the file.
297 */
298int btrfs_dec_test_first_ordered_pending(struct inode *inode,
299 struct btrfs_ordered_extent **cached,
300 u64 *file_offset, u64 io_size, int uptodate)
301{
302 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
303 struct btrfs_ordered_inode_tree *tree;
304 struct rb_node *node;
305 struct btrfs_ordered_extent *entry = NULL;
306 int ret;
307 unsigned long flags;
308 u64 dec_end;
309 u64 dec_start;
310 u64 to_dec;
311
312 tree = &BTRFS_I(inode)->ordered_tree;
313 spin_lock_irqsave(&tree->lock, flags);
314 node = tree_search(tree, *file_offset);
315 if (!node) {
316 ret = 1;
317 goto out;
318 }
319
320 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
321 if (!offset_in_entry(entry, *file_offset)) {
322 ret = 1;
323 goto out;
324 }
325
326 dec_start = max(*file_offset, entry->file_offset);
327 dec_end = min(*file_offset + io_size, entry->file_offset +
328 entry->len);
329 *file_offset = dec_end;
330 if (dec_start > dec_end) {
331 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
332 dec_start, dec_end);
333 }
334 to_dec = dec_end - dec_start;
335 if (to_dec > entry->bytes_left) {
336 btrfs_crit(fs_info,
337 "bad ordered accounting left %llu size %llu",
338 entry->bytes_left, to_dec);
339 }
340 entry->bytes_left -= to_dec;
341 if (!uptodate)
342 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
343
344 if (entry->bytes_left == 0) {
345 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
346 /*
347 * Implicit memory barrier after test_and_set_bit
348 */
349 if (waitqueue_active(&entry->wait))
350 wake_up(&entry->wait);
351 } else {
352 ret = 1;
353 }
354out:
355 if (!ret && cached && entry) {
356 *cached = entry;
357 refcount_inc(&entry->refs);
358 }
359 spin_unlock_irqrestore(&tree->lock, flags);
360 return ret == 0;
361}
362
363/*
364 * this is used to account for finished IO across a given range
365 * of the file. The IO should not span ordered extents. If
366 * a given ordered_extent is completely done, 1 is returned, otherwise
367 * 0.
368 *
369 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
370 * to make sure this function only returns 1 once for a given ordered extent.
371 */
372int btrfs_dec_test_ordered_pending(struct inode *inode,
373 struct btrfs_ordered_extent **cached,
374 u64 file_offset, u64 io_size, int uptodate)
375{
376 struct btrfs_ordered_inode_tree *tree;
377 struct rb_node *node;
378 struct btrfs_ordered_extent *entry = NULL;
379 unsigned long flags;
380 int ret;
381
382 tree = &BTRFS_I(inode)->ordered_tree;
383 spin_lock_irqsave(&tree->lock, flags);
384 if (cached && *cached) {
385 entry = *cached;
386 goto have_entry;
387 }
388
389 node = tree_search(tree, file_offset);
390 if (!node) {
391 ret = 1;
392 goto out;
393 }
394
395 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
396have_entry:
397 if (!offset_in_entry(entry, file_offset)) {
398 ret = 1;
399 goto out;
400 }
401
402 if (io_size > entry->bytes_left) {
403 btrfs_crit(BTRFS_I(inode)->root->fs_info,
404 "bad ordered accounting left %llu size %llu",
405 entry->bytes_left, io_size);
406 }
407 entry->bytes_left -= io_size;
408 if (!uptodate)
409 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
410
411 if (entry->bytes_left == 0) {
412 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
413 /*
414 * Implicit memory barrier after test_and_set_bit
415 */
416 if (waitqueue_active(&entry->wait))
417 wake_up(&entry->wait);
418 } else {
419 ret = 1;
420 }
421out:
422 if (!ret && cached && entry) {
423 *cached = entry;
424 refcount_inc(&entry->refs);
425 }
426 spin_unlock_irqrestore(&tree->lock, flags);
427 return ret == 0;
428}
429
430/* Needs to either be called under a log transaction or the log_mutex */
431void btrfs_get_logged_extents(struct btrfs_inode *inode,
432 struct list_head *logged_list,
433 const loff_t start,
434 const loff_t end)
435{
436 struct btrfs_ordered_inode_tree *tree;
437 struct btrfs_ordered_extent *ordered;
438 struct rb_node *n;
439 struct rb_node *prev;
440
441 tree = &inode->ordered_tree;
442 spin_lock_irq(&tree->lock);
443 n = __tree_search(&tree->tree, end, &prev);
444 if (!n)
445 n = prev;
446 for (; n; n = rb_prev(n)) {
447 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
448 if (ordered->file_offset > end)
449 continue;
450 if (entry_end(ordered) <= start)
451 break;
452 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
453 continue;
454 list_add(&ordered->log_list, logged_list);
455 refcount_inc(&ordered->refs);
456 }
457 spin_unlock_irq(&tree->lock);
458}
459
460void btrfs_put_logged_extents(struct list_head *logged_list)
461{
462 struct btrfs_ordered_extent *ordered;
463
464 while (!list_empty(logged_list)) {
465 ordered = list_first_entry(logged_list,
466 struct btrfs_ordered_extent,
467 log_list);
468 list_del_init(&ordered->log_list);
469 btrfs_put_ordered_extent(ordered);
470 }
471}
472
473void btrfs_submit_logged_extents(struct list_head *logged_list,
474 struct btrfs_root *log)
475{
476 int index = log->log_transid % 2;
477
478 spin_lock_irq(&log->log_extents_lock[index]);
479 list_splice_tail(logged_list, &log->logged_list[index]);
480 spin_unlock_irq(&log->log_extents_lock[index]);
481}
482
483void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
484 struct btrfs_root *log, u64 transid)
485{
486 struct btrfs_ordered_extent *ordered;
487 int index = transid % 2;
488
489 spin_lock_irq(&log->log_extents_lock[index]);
490 while (!list_empty(&log->logged_list[index])) {
491 struct inode *inode;
492 ordered = list_first_entry(&log->logged_list[index],
493 struct btrfs_ordered_extent,
494 log_list);
495 list_del_init(&ordered->log_list);
496 inode = ordered->inode;
497 spin_unlock_irq(&log->log_extents_lock[index]);
498
499 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
500 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
501 u64 start = ordered->file_offset;
502 u64 end = ordered->file_offset + ordered->len - 1;
503
504 WARN_ON(!inode);
505 filemap_fdatawrite_range(inode->i_mapping, start, end);
506 }
507 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
508 &ordered->flags));
509
510 /*
511 * In order to keep us from losing our ordered extent
512 * information when committing the transaction we have to make
513 * sure that any logged extents are completed when we go to
514 * commit the transaction. To do this we simply increase the
515 * current transactions pending_ordered counter and decrement it
516 * when the ordered extent completes.
517 */
518 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
519 struct btrfs_ordered_inode_tree *tree;
520
521 tree = &BTRFS_I(inode)->ordered_tree;
522 spin_lock_irq(&tree->lock);
523 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
524 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
525 atomic_inc(&trans->transaction->pending_ordered);
526 }
527 spin_unlock_irq(&tree->lock);
528 }
529 btrfs_put_ordered_extent(ordered);
530 spin_lock_irq(&log->log_extents_lock[index]);
531 }
532 spin_unlock_irq(&log->log_extents_lock[index]);
533}
534
535void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
536{
537 struct btrfs_ordered_extent *ordered;
538 int index = transid % 2;
539
540 spin_lock_irq(&log->log_extents_lock[index]);
541 while (!list_empty(&log->logged_list[index])) {
542 ordered = list_first_entry(&log->logged_list[index],
543 struct btrfs_ordered_extent,
544 log_list);
545 list_del_init(&ordered->log_list);
546 spin_unlock_irq(&log->log_extents_lock[index]);
547 btrfs_put_ordered_extent(ordered);
548 spin_lock_irq(&log->log_extents_lock[index]);
549 }
550 spin_unlock_irq(&log->log_extents_lock[index]);
551}
552
553/*
554 * used to drop a reference on an ordered extent. This will free
555 * the extent if the last reference is dropped
556 */
557void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
558{
559 struct list_head *cur;
560 struct btrfs_ordered_sum *sum;
561
562 trace_btrfs_ordered_extent_put(entry->inode, entry);
563
564 if (refcount_dec_and_test(&entry->refs)) {
565 ASSERT(list_empty(&entry->log_list));
566 ASSERT(list_empty(&entry->trans_list));
567 ASSERT(list_empty(&entry->root_extent_list));
568 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
569 if (entry->inode)
570 btrfs_add_delayed_iput(entry->inode);
571 while (!list_empty(&entry->list)) {
572 cur = entry->list.next;
573 sum = list_entry(cur, struct btrfs_ordered_sum, list);
574 list_del(&sum->list);
575 kfree(sum);
576 }
577 kmem_cache_free(btrfs_ordered_extent_cache, entry);
578 }
579}
580
581/*
582 * remove an ordered extent from the tree. No references are dropped
583 * and waiters are woken up.
584 */
585void btrfs_remove_ordered_extent(struct inode *inode,
586 struct btrfs_ordered_extent *entry)
587{
588 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
589 struct btrfs_ordered_inode_tree *tree;
590 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
591 struct btrfs_root *root = btrfs_inode->root;
592 struct rb_node *node;
593 bool dec_pending_ordered = false;
594
595 /* This is paired with btrfs_add_ordered_extent. */
596 spin_lock(&btrfs_inode->lock);
597 btrfs_mod_outstanding_extents(btrfs_inode, -1);
598 spin_unlock(&btrfs_inode->lock);
599 if (root != fs_info->tree_root)
600 btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false);
601
602 tree = &btrfs_inode->ordered_tree;
603 spin_lock_irq(&tree->lock);
604 node = &entry->rb_node;
605 rb_erase(node, &tree->tree);
606 RB_CLEAR_NODE(node);
607 if (tree->last == node)
608 tree->last = NULL;
609 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
610 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
611 dec_pending_ordered = true;
612 spin_unlock_irq(&tree->lock);
613
614 /*
615 * The current running transaction is waiting on us, we need to let it
616 * know that we're complete and wake it up.
617 */
618 if (dec_pending_ordered) {
619 struct btrfs_transaction *trans;
620
621 /*
622 * The checks for trans are just a formality, it should be set,
623 * but if it isn't we don't want to deref/assert under the spin
624 * lock, so be nice and check if trans is set, but ASSERT() so
625 * if it isn't set a developer will notice.
626 */
627 spin_lock(&fs_info->trans_lock);
628 trans = fs_info->running_transaction;
629 if (trans)
630 refcount_inc(&trans->use_count);
631 spin_unlock(&fs_info->trans_lock);
632
633 ASSERT(trans);
634 if (trans) {
635 if (atomic_dec_and_test(&trans->pending_ordered))
636 wake_up(&trans->pending_wait);
637 btrfs_put_transaction(trans);
638 }
639 }
640
641 spin_lock(&root->ordered_extent_lock);
642 list_del_init(&entry->root_extent_list);
643 root->nr_ordered_extents--;
644
645 trace_btrfs_ordered_extent_remove(inode, entry);
646
647 if (!root->nr_ordered_extents) {
648 spin_lock(&fs_info->ordered_root_lock);
649 BUG_ON(list_empty(&root->ordered_root));
650 list_del_init(&root->ordered_root);
651 spin_unlock(&fs_info->ordered_root_lock);
652 }
653 spin_unlock(&root->ordered_extent_lock);
654 wake_up(&entry->wait);
655}
656
657static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
658{
659 struct btrfs_ordered_extent *ordered;
660
661 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
662 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
663 complete(&ordered->completion);
664}
665
666/*
667 * wait for all the ordered extents in a root. This is done when balancing
668 * space between drives.
669 */
670u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
671 const u64 range_start, const u64 range_len)
672{
673 struct btrfs_fs_info *fs_info = root->fs_info;
674 LIST_HEAD(splice);
675 LIST_HEAD(skipped);
676 LIST_HEAD(works);
677 struct btrfs_ordered_extent *ordered, *next;
678 u64 count = 0;
679 const u64 range_end = range_start + range_len;
680
681 mutex_lock(&root->ordered_extent_mutex);
682 spin_lock(&root->ordered_extent_lock);
683 list_splice_init(&root->ordered_extents, &splice);
684 while (!list_empty(&splice) && nr) {
685 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
686 root_extent_list);
687
688 if (range_end <= ordered->start ||
689 ordered->start + ordered->disk_len <= range_start) {
690 list_move_tail(&ordered->root_extent_list, &skipped);
691 cond_resched_lock(&root->ordered_extent_lock);
692 continue;
693 }
694
695 list_move_tail(&ordered->root_extent_list,
696 &root->ordered_extents);
697 refcount_inc(&ordered->refs);
698 spin_unlock(&root->ordered_extent_lock);
699
700 btrfs_init_work(&ordered->flush_work,
701 btrfs_flush_delalloc_helper,
702 btrfs_run_ordered_extent_work, NULL, NULL);
703 list_add_tail(&ordered->work_list, &works);
704 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
705
706 cond_resched();
707 spin_lock(&root->ordered_extent_lock);
708 if (nr != U64_MAX)
709 nr--;
710 count++;
711 }
712 list_splice_tail(&skipped, &root->ordered_extents);
713 list_splice_tail(&splice, &root->ordered_extents);
714 spin_unlock(&root->ordered_extent_lock);
715
716 list_for_each_entry_safe(ordered, next, &works, work_list) {
717 list_del_init(&ordered->work_list);
718 wait_for_completion(&ordered->completion);
719 btrfs_put_ordered_extent(ordered);
720 cond_resched();
721 }
722 mutex_unlock(&root->ordered_extent_mutex);
723
724 return count;
725}
726
727u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
728 const u64 range_start, const u64 range_len)
729{
730 struct btrfs_root *root;
731 struct list_head splice;
732 u64 total_done = 0;
733 u64 done;
734
735 INIT_LIST_HEAD(&splice);
736
737 mutex_lock(&fs_info->ordered_operations_mutex);
738 spin_lock(&fs_info->ordered_root_lock);
739 list_splice_init(&fs_info->ordered_roots, &splice);
740 while (!list_empty(&splice) && nr) {
741 root = list_first_entry(&splice, struct btrfs_root,
742 ordered_root);
743 root = btrfs_grab_fs_root(root);
744 BUG_ON(!root);
745 list_move_tail(&root->ordered_root,
746 &fs_info->ordered_roots);
747 spin_unlock(&fs_info->ordered_root_lock);
748
749 done = btrfs_wait_ordered_extents(root, nr,
750 range_start, range_len);
751 btrfs_put_fs_root(root);
752 total_done += done;
753
754 spin_lock(&fs_info->ordered_root_lock);
755 if (nr != U64_MAX) {
756 nr -= done;
757 }
758 }
759 list_splice_tail(&splice, &fs_info->ordered_roots);
760 spin_unlock(&fs_info->ordered_root_lock);
761 mutex_unlock(&fs_info->ordered_operations_mutex);
762
763 return total_done;
764}
765
766/*
767 * Used to start IO or wait for a given ordered extent to finish.
768 *
769 * If wait is one, this effectively waits on page writeback for all the pages
770 * in the extent, and it waits on the io completion code to insert
771 * metadata into the btree corresponding to the extent
772 */
773void btrfs_start_ordered_extent(struct inode *inode,
774 struct btrfs_ordered_extent *entry,
775 int wait)
776{
777 u64 start = entry->file_offset;
778 u64 end = start + entry->len - 1;
779
780 trace_btrfs_ordered_extent_start(inode, entry);
781
782 /*
783 * pages in the range can be dirty, clean or writeback. We
784 * start IO on any dirty ones so the wait doesn't stall waiting
785 * for the flusher thread to find them
786 */
787 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
788 filemap_fdatawrite_range(inode->i_mapping, start, end);
789 if (wait) {
790 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
791 &entry->flags));
792 }
793}
794
795/*
796 * Used to wait on ordered extents across a large range of bytes.
797 */
798int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
799{
800 int ret = 0;
801 int ret_wb = 0;
802 u64 end;
803 u64 orig_end;
804 struct btrfs_ordered_extent *ordered;
805
806 if (start + len < start) {
807 orig_end = INT_LIMIT(loff_t);
808 } else {
809 orig_end = start + len - 1;
810 if (orig_end > INT_LIMIT(loff_t))
811 orig_end = INT_LIMIT(loff_t);
812 }
813
814 /* start IO across the range first to instantiate any delalloc
815 * extents
816 */
817 ret = btrfs_fdatawrite_range(inode, start, orig_end);
818 if (ret)
819 return ret;
820
821 /*
822 * If we have a writeback error don't return immediately. Wait first
823 * for any ordered extents that haven't completed yet. This is to make
824 * sure no one can dirty the same page ranges and call writepages()
825 * before the ordered extents complete - to avoid failures (-EEXIST)
826 * when adding the new ordered extents to the ordered tree.
827 */
828 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
829
830 end = orig_end;
831 while (1) {
832 ordered = btrfs_lookup_first_ordered_extent(inode, end);
833 if (!ordered)
834 break;
835 if (ordered->file_offset > orig_end) {
836 btrfs_put_ordered_extent(ordered);
837 break;
838 }
839 if (ordered->file_offset + ordered->len <= start) {
840 btrfs_put_ordered_extent(ordered);
841 break;
842 }
843 btrfs_start_ordered_extent(inode, ordered, 1);
844 end = ordered->file_offset;
845 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
846 ret = -EIO;
847 btrfs_put_ordered_extent(ordered);
848 if (ret || end == 0 || end == start)
849 break;
850 end--;
851 }
852 return ret_wb ? ret_wb : ret;
853}
854
855/*
856 * find an ordered extent corresponding to file_offset. return NULL if
857 * nothing is found, otherwise take a reference on the extent and return it
858 */
859struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
860 u64 file_offset)
861{
862 struct btrfs_ordered_inode_tree *tree;
863 struct rb_node *node;
864 struct btrfs_ordered_extent *entry = NULL;
865
866 tree = &BTRFS_I(inode)->ordered_tree;
867 spin_lock_irq(&tree->lock);
868 node = tree_search(tree, file_offset);
869 if (!node)
870 goto out;
871
872 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
873 if (!offset_in_entry(entry, file_offset))
874 entry = NULL;
875 if (entry)
876 refcount_inc(&entry->refs);
877out:
878 spin_unlock_irq(&tree->lock);
879 return entry;
880}
881
882/* Since the DIO code tries to lock a wide area we need to look for any ordered
883 * extents that exist in the range, rather than just the start of the range.
884 */
885struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
886 struct btrfs_inode *inode, u64 file_offset, u64 len)
887{
888 struct btrfs_ordered_inode_tree *tree;
889 struct rb_node *node;
890 struct btrfs_ordered_extent *entry = NULL;
891
892 tree = &inode->ordered_tree;
893 spin_lock_irq(&tree->lock);
894 node = tree_search(tree, file_offset);
895 if (!node) {
896 node = tree_search(tree, file_offset + len);
897 if (!node)
898 goto out;
899 }
900
901 while (1) {
902 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
903 if (range_overlaps(entry, file_offset, len))
904 break;
905
906 if (entry->file_offset >= file_offset + len) {
907 entry = NULL;
908 break;
909 }
910 entry = NULL;
911 node = rb_next(node);
912 if (!node)
913 break;
914 }
915out:
916 if (entry)
917 refcount_inc(&entry->refs);
918 spin_unlock_irq(&tree->lock);
919 return entry;
920}
921
922bool btrfs_have_ordered_extents_in_range(struct inode *inode,
923 u64 file_offset,
924 u64 len)
925{
926 struct btrfs_ordered_extent *oe;
927
928 oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
929 if (oe) {
930 btrfs_put_ordered_extent(oe);
931 return true;
932 }
933 return false;
934}
935
936/*
937 * lookup and return any extent before 'file_offset'. NULL is returned
938 * if none is found
939 */
940struct btrfs_ordered_extent *
941btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
942{
943 struct btrfs_ordered_inode_tree *tree;
944 struct rb_node *node;
945 struct btrfs_ordered_extent *entry = NULL;
946
947 tree = &BTRFS_I(inode)->ordered_tree;
948 spin_lock_irq(&tree->lock);
949 node = tree_search(tree, file_offset);
950 if (!node)
951 goto out;
952
953 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
954 refcount_inc(&entry->refs);
955out:
956 spin_unlock_irq(&tree->lock);
957 return entry;
958}
959
960/*
961 * After an extent is done, call this to conditionally update the on disk
962 * i_size. i_size is updated to cover any fully written part of the file.
963 */
964int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
965 struct btrfs_ordered_extent *ordered)
966{
967 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
968 u64 disk_i_size;
969 u64 new_i_size;
970 u64 i_size = i_size_read(inode);
971 struct rb_node *node;
972 struct rb_node *prev = NULL;
973 struct btrfs_ordered_extent *test;
974 int ret = 1;
975 u64 orig_offset = offset;
976
977 spin_lock_irq(&tree->lock);
978 if (ordered) {
979 offset = entry_end(ordered);
980 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
981 offset = min(offset,
982 ordered->file_offset +
983 ordered->truncated_len);
984 } else {
985 offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
986 }
987 disk_i_size = BTRFS_I(inode)->disk_i_size;
988
989 /*
990 * truncate file.
991 * If ordered is not NULL, then this is called from endio and
992 * disk_i_size will be updated by either truncate itself or any
993 * in-flight IOs which are inside the disk_i_size.
994 *
995 * Because btrfs_setsize() may set i_size with disk_i_size if truncate
996 * fails somehow, we need to make sure we have a precise disk_i_size by
997 * updating it as usual.
998 *
999 */
1000 if (!ordered && disk_i_size > i_size) {
1001 BTRFS_I(inode)->disk_i_size = orig_offset;
1002 ret = 0;
1003 goto out;
1004 }
1005
1006 /*
1007 * if the disk i_size is already at the inode->i_size, or
1008 * this ordered extent is inside the disk i_size, we're done
1009 */
1010 if (disk_i_size == i_size)
1011 goto out;
1012
1013 /*
1014 * We still need to update disk_i_size if outstanding_isize is greater
1015 * than disk_i_size.
1016 */
1017 if (offset <= disk_i_size &&
1018 (!ordered || ordered->outstanding_isize <= disk_i_size))
1019 goto out;
1020
1021 /*
1022 * walk backward from this ordered extent to disk_i_size.
1023 * if we find an ordered extent then we can't update disk i_size
1024 * yet
1025 */
1026 if (ordered) {
1027 node = rb_prev(&ordered->rb_node);
1028 } else {
1029 prev = tree_search(tree, offset);
1030 /*
1031 * we insert file extents without involving ordered struct,
1032 * so there should be no ordered struct cover this offset
1033 */
1034 if (prev) {
1035 test = rb_entry(prev, struct btrfs_ordered_extent,
1036 rb_node);
1037 BUG_ON(offset_in_entry(test, offset));
1038 }
1039 node = prev;
1040 }
1041 for (; node; node = rb_prev(node)) {
1042 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1043
1044 /* We treat this entry as if it doesn't exist */
1045 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
1046 continue;
1047
1048 if (entry_end(test) <= disk_i_size)
1049 break;
1050 if (test->file_offset >= i_size)
1051 break;
1052
1053 /*
1054 * We don't update disk_i_size now, so record this undealt
1055 * i_size. Or we will not know the real i_size.
1056 */
1057 if (test->outstanding_isize < offset)
1058 test->outstanding_isize = offset;
1059 if (ordered &&
1060 ordered->outstanding_isize > test->outstanding_isize)
1061 test->outstanding_isize = ordered->outstanding_isize;
1062 goto out;
1063 }
1064 new_i_size = min_t(u64, offset, i_size);
1065
1066 /*
1067 * Some ordered extents may completed before the current one, and
1068 * we hold the real i_size in ->outstanding_isize.
1069 */
1070 if (ordered && ordered->outstanding_isize > new_i_size)
1071 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1072 BTRFS_I(inode)->disk_i_size = new_i_size;
1073 ret = 0;
1074out:
1075 /*
1076 * We need to do this because we can't remove ordered extents until
1077 * after the i_disk_size has been updated and then the inode has been
1078 * updated to reflect the change, so we need to tell anybody who finds
1079 * this ordered extent that we've already done all the real work, we
1080 * just haven't completed all the other work.
1081 */
1082 if (ordered)
1083 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1084 spin_unlock_irq(&tree->lock);
1085 return ret;
1086}
1087
1088/*
1089 * search the ordered extents for one corresponding to 'offset' and
1090 * try to find a checksum. This is used because we allow pages to
1091 * be reclaimed before their checksum is actually put into the btree
1092 */
1093int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1094 u32 *sum, int len)
1095{
1096 struct btrfs_ordered_sum *ordered_sum;
1097 struct btrfs_ordered_extent *ordered;
1098 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1099 unsigned long num_sectors;
1100 unsigned long i;
1101 u32 sectorsize = btrfs_inode_sectorsize(inode);
1102 int index = 0;
1103
1104 ordered = btrfs_lookup_ordered_extent(inode, offset);
1105 if (!ordered)
1106 return 0;
1107
1108 spin_lock_irq(&tree->lock);
1109 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1110 if (disk_bytenr >= ordered_sum->bytenr &&
1111 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1112 i = (disk_bytenr - ordered_sum->bytenr) >>
1113 inode->i_sb->s_blocksize_bits;
1114 num_sectors = ordered_sum->len >>
1115 inode->i_sb->s_blocksize_bits;
1116 num_sectors = min_t(int, len - index, num_sectors - i);
1117 memcpy(sum + index, ordered_sum->sums + i,
1118 num_sectors);
1119
1120 index += (int)num_sectors;
1121 if (index == len)
1122 goto out;
1123 disk_bytenr += num_sectors * sectorsize;
1124 }
1125 }
1126out:
1127 spin_unlock_irq(&tree->lock);
1128 btrfs_put_ordered_extent(ordered);
1129 return index;
1130}
1131
1132int __init ordered_data_init(void)
1133{
1134 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1135 sizeof(struct btrfs_ordered_extent), 0,
1136 SLAB_MEM_SPREAD,
1137 NULL);
1138 if (!btrfs_ordered_extent_cache)
1139 return -ENOMEM;
1140
1141 return 0;
1142}
1143
1144void __cold ordered_data_exit(void)
1145{
1146 kmem_cache_destroy(btrfs_ordered_extent_cache);
1147}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19#include "subpage.h"
20
21static struct kmem_cache *btrfs_ordered_extent_cache;
22
23static u64 entry_end(struct btrfs_ordered_extent *entry)
24{
25 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return (u64)-1;
27 return entry->file_offset + entry->num_bytes;
28}
29
30/* returns NULL if the insertion worked, or it returns the node it did find
31 * in the tree
32 */
33static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34 struct rb_node *node)
35{
36 struct rb_node **p = &root->rb_node;
37 struct rb_node *parent = NULL;
38 struct btrfs_ordered_extent *entry;
39
40 while (*p) {
41 parent = *p;
42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43
44 if (file_offset < entry->file_offset)
45 p = &(*p)->rb_left;
46 else if (file_offset >= entry_end(entry))
47 p = &(*p)->rb_right;
48 else
49 return parent;
50 }
51
52 rb_link_node(node, parent, p);
53 rb_insert_color(node, root);
54 return NULL;
55}
56
57/*
58 * look for a given offset in the tree, and if it can't be found return the
59 * first lesser offset
60 */
61static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 struct rb_node **prev_ret)
63{
64 struct rb_node *n = root->rb_node;
65 struct rb_node *prev = NULL;
66 struct rb_node *test;
67 struct btrfs_ordered_extent *entry;
68 struct btrfs_ordered_extent *prev_entry = NULL;
69
70 while (n) {
71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72 prev = n;
73 prev_entry = entry;
74
75 if (file_offset < entry->file_offset)
76 n = n->rb_left;
77 else if (file_offset >= entry_end(entry))
78 n = n->rb_right;
79 else
80 return n;
81 }
82 if (!prev_ret)
83 return NULL;
84
85 while (prev && file_offset >= entry_end(prev_entry)) {
86 test = rb_next(prev);
87 if (!test)
88 break;
89 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 rb_node);
91 if (file_offset < entry_end(prev_entry))
92 break;
93
94 prev = test;
95 }
96 if (prev)
97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 rb_node);
99 while (prev && file_offset < entry_end(prev_entry)) {
100 test = rb_prev(prev);
101 if (!test)
102 break;
103 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104 rb_node);
105 prev = test;
106 }
107 *prev_ret = prev;
108 return NULL;
109}
110
111static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112 u64 len)
113{
114 if (file_offset + len <= entry->file_offset ||
115 entry->file_offset + entry->num_bytes <= file_offset)
116 return 0;
117 return 1;
118}
119
120/*
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
123 */
124static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125 u64 file_offset)
126{
127 struct rb_root *root = &tree->tree;
128 struct rb_node *prev = NULL;
129 struct rb_node *ret;
130 struct btrfs_ordered_extent *entry;
131
132 if (tree->last) {
133 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 rb_node);
135 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136 return tree->last;
137 }
138 ret = __tree_search(root, file_offset, &prev);
139 if (!ret)
140 ret = prev;
141 if (ret)
142 tree->last = ret;
143 return ret;
144}
145
146/*
147 * Allocate and add a new ordered_extent into the per-inode tree.
148 *
149 * The tree is given a single reference on the ordered extent that was
150 * inserted.
151 */
152static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
153 u64 disk_bytenr, u64 num_bytes,
154 u64 disk_num_bytes, int type, int dio,
155 int compress_type)
156{
157 struct btrfs_root *root = inode->root;
158 struct btrfs_fs_info *fs_info = root->fs_info;
159 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
160 struct rb_node *node;
161 struct btrfs_ordered_extent *entry;
162 int ret;
163
164 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
165 /* For nocow write, we can release the qgroup rsv right now */
166 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
167 if (ret < 0)
168 return ret;
169 ret = 0;
170 } else {
171 /*
172 * The ordered extent has reserved qgroup space, release now
173 * and pass the reserved number for qgroup_record to free.
174 */
175 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
176 if (ret < 0)
177 return ret;
178 }
179 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
180 if (!entry)
181 return -ENOMEM;
182
183 entry->file_offset = file_offset;
184 entry->disk_bytenr = disk_bytenr;
185 entry->num_bytes = num_bytes;
186 entry->disk_num_bytes = disk_num_bytes;
187 entry->bytes_left = num_bytes;
188 entry->inode = igrab(&inode->vfs_inode);
189 entry->compress_type = compress_type;
190 entry->truncated_len = (u64)-1;
191 entry->qgroup_rsv = ret;
192 entry->physical = (u64)-1;
193
194 ASSERT(type == BTRFS_ORDERED_REGULAR ||
195 type == BTRFS_ORDERED_NOCOW ||
196 type == BTRFS_ORDERED_PREALLOC ||
197 type == BTRFS_ORDERED_COMPRESSED);
198 set_bit(type, &entry->flags);
199
200 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
201 fs_info->delalloc_batch);
202
203 if (dio)
204 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
205
206 /* one ref for the tree */
207 refcount_set(&entry->refs, 1);
208 init_waitqueue_head(&entry->wait);
209 INIT_LIST_HEAD(&entry->list);
210 INIT_LIST_HEAD(&entry->log_list);
211 INIT_LIST_HEAD(&entry->root_extent_list);
212 INIT_LIST_HEAD(&entry->work_list);
213 init_completion(&entry->completion);
214
215 trace_btrfs_ordered_extent_add(inode, entry);
216
217 spin_lock_irq(&tree->lock);
218 node = tree_insert(&tree->tree, file_offset,
219 &entry->rb_node);
220 if (node)
221 btrfs_panic(fs_info, -EEXIST,
222 "inconsistency in ordered tree at offset %llu",
223 file_offset);
224 spin_unlock_irq(&tree->lock);
225
226 spin_lock(&root->ordered_extent_lock);
227 list_add_tail(&entry->root_extent_list,
228 &root->ordered_extents);
229 root->nr_ordered_extents++;
230 if (root->nr_ordered_extents == 1) {
231 spin_lock(&fs_info->ordered_root_lock);
232 BUG_ON(!list_empty(&root->ordered_root));
233 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
234 spin_unlock(&fs_info->ordered_root_lock);
235 }
236 spin_unlock(&root->ordered_extent_lock);
237
238 /*
239 * We don't need the count_max_extents here, we can assume that all of
240 * that work has been done at higher layers, so this is truly the
241 * smallest the extent is going to get.
242 */
243 spin_lock(&inode->lock);
244 btrfs_mod_outstanding_extents(inode, 1);
245 spin_unlock(&inode->lock);
246
247 return 0;
248}
249
250int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
251 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
252 int type)
253{
254 ASSERT(type == BTRFS_ORDERED_REGULAR ||
255 type == BTRFS_ORDERED_NOCOW ||
256 type == BTRFS_ORDERED_PREALLOC);
257 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
258 num_bytes, disk_num_bytes, type, 0,
259 BTRFS_COMPRESS_NONE);
260}
261
262int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
263 u64 disk_bytenr, u64 num_bytes,
264 u64 disk_num_bytes, int type)
265{
266 ASSERT(type == BTRFS_ORDERED_REGULAR ||
267 type == BTRFS_ORDERED_NOCOW ||
268 type == BTRFS_ORDERED_PREALLOC);
269 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
270 num_bytes, disk_num_bytes, type, 1,
271 BTRFS_COMPRESS_NONE);
272}
273
274int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
275 u64 disk_bytenr, u64 num_bytes,
276 u64 disk_num_bytes, int compress_type)
277{
278 ASSERT(compress_type != BTRFS_COMPRESS_NONE);
279 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
280 num_bytes, disk_num_bytes,
281 BTRFS_ORDERED_COMPRESSED, 0,
282 compress_type);
283}
284
285/*
286 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
287 * when an ordered extent is finished. If the list covers more than one
288 * ordered extent, it is split across multiples.
289 */
290void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
291 struct btrfs_ordered_sum *sum)
292{
293 struct btrfs_ordered_inode_tree *tree;
294
295 tree = &BTRFS_I(entry->inode)->ordered_tree;
296 spin_lock_irq(&tree->lock);
297 list_add_tail(&sum->list, &entry->list);
298 spin_unlock_irq(&tree->lock);
299}
300
301/*
302 * Mark all ordered extents io inside the specified range finished.
303 *
304 * @page: The invovled page for the opeartion.
305 * For uncompressed buffered IO, the page status also needs to be
306 * updated to indicate whether the pending ordered io is finished.
307 * Can be NULL for direct IO and compressed write.
308 * For these cases, callers are ensured they won't execute the
309 * endio function twice.
310 * @finish_func: The function to be executed when all the IO of an ordered
311 * extent are finished.
312 *
313 * This function is called for endio, thus the range must have ordered
314 * extent(s) coveri it.
315 */
316void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
317 struct page *page, u64 file_offset,
318 u64 num_bytes, btrfs_func_t finish_func,
319 bool uptodate)
320{
321 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
322 struct btrfs_fs_info *fs_info = inode->root->fs_info;
323 struct btrfs_workqueue *wq;
324 struct rb_node *node;
325 struct btrfs_ordered_extent *entry = NULL;
326 unsigned long flags;
327 u64 cur = file_offset;
328
329 if (btrfs_is_free_space_inode(inode))
330 wq = fs_info->endio_freespace_worker;
331 else
332 wq = fs_info->endio_write_workers;
333
334 if (page)
335 ASSERT(page->mapping && page_offset(page) <= file_offset &&
336 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
337
338 spin_lock_irqsave(&tree->lock, flags);
339 while (cur < file_offset + num_bytes) {
340 u64 entry_end;
341 u64 end;
342 u32 len;
343
344 node = tree_search(tree, cur);
345 /* No ordered extents at all */
346 if (!node)
347 break;
348
349 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
350 entry_end = entry->file_offset + entry->num_bytes;
351 /*
352 * |<-- OE --->| |
353 * cur
354 * Go to next OE.
355 */
356 if (cur >= entry_end) {
357 node = rb_next(node);
358 /* No more ordered extents, exit */
359 if (!node)
360 break;
361 entry = rb_entry(node, struct btrfs_ordered_extent,
362 rb_node);
363
364 /* Go to next ordered extent and continue */
365 cur = entry->file_offset;
366 continue;
367 }
368 /*
369 * | |<--- OE --->|
370 * cur
371 * Go to the start of OE.
372 */
373 if (cur < entry->file_offset) {
374 cur = entry->file_offset;
375 continue;
376 }
377
378 /*
379 * Now we are definitely inside one ordered extent.
380 *
381 * |<--- OE --->|
382 * |
383 * cur
384 */
385 end = min(entry->file_offset + entry->num_bytes,
386 file_offset + num_bytes) - 1;
387 ASSERT(end + 1 - cur < U32_MAX);
388 len = end + 1 - cur;
389
390 if (page) {
391 /*
392 * Ordered (Private2) bit indicates whether we still
393 * have pending io unfinished for the ordered extent.
394 *
395 * If there's no such bit, we need to skip to next range.
396 */
397 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
398 cur += len;
399 continue;
400 }
401 btrfs_page_clear_ordered(fs_info, page, cur, len);
402 }
403
404 /* Now we're fine to update the accounting */
405 if (unlikely(len > entry->bytes_left)) {
406 WARN_ON(1);
407 btrfs_crit(fs_info,
408"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
409 inode->root->root_key.objectid,
410 btrfs_ino(inode),
411 entry->file_offset,
412 entry->num_bytes,
413 len, entry->bytes_left);
414 entry->bytes_left = 0;
415 } else {
416 entry->bytes_left -= len;
417 }
418
419 if (!uptodate)
420 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
421
422 /*
423 * All the IO of the ordered extent is finished, we need to queue
424 * the finish_func to be executed.
425 */
426 if (entry->bytes_left == 0) {
427 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
428 cond_wake_up(&entry->wait);
429 refcount_inc(&entry->refs);
430 spin_unlock_irqrestore(&tree->lock, flags);
431 btrfs_init_work(&entry->work, finish_func, NULL, NULL);
432 btrfs_queue_work(wq, &entry->work);
433 spin_lock_irqsave(&tree->lock, flags);
434 }
435 cur += len;
436 }
437 spin_unlock_irqrestore(&tree->lock, flags);
438}
439
440/*
441 * Finish IO for one ordered extent across a given range. The range can only
442 * contain one ordered extent.
443 *
444 * @cached: The cached ordered extent. If not NULL, we can skip the tree
445 * search and use the ordered extent directly.
446 * Will be also used to store the finished ordered extent.
447 * @file_offset: File offset for the finished IO
448 * @io_size: Length of the finish IO range
449 * @uptodate: If the IO finishes without problem
450 *
451 * Return true if the ordered extent is finished in the range, and update
452 * @cached.
453 * Return false otherwise.
454 *
455 * NOTE: The range can NOT cross multiple ordered extents.
456 * Thus caller should ensure the range doesn't cross ordered extents.
457 */
458bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
459 struct btrfs_ordered_extent **cached,
460 u64 file_offset, u64 io_size, int uptodate)
461{
462 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
463 struct rb_node *node;
464 struct btrfs_ordered_extent *entry = NULL;
465 unsigned long flags;
466 bool finished = false;
467
468 spin_lock_irqsave(&tree->lock, flags);
469 if (cached && *cached) {
470 entry = *cached;
471 goto have_entry;
472 }
473
474 node = tree_search(tree, file_offset);
475 if (!node)
476 goto out;
477
478 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
479have_entry:
480 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
481 goto out;
482
483 if (io_size > entry->bytes_left)
484 btrfs_crit(inode->root->fs_info,
485 "bad ordered accounting left %llu size %llu",
486 entry->bytes_left, io_size);
487
488 entry->bytes_left -= io_size;
489 if (!uptodate)
490 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
491
492 if (entry->bytes_left == 0) {
493 /*
494 * Ensure only one caller can set the flag and finished_ret
495 * accordingly
496 */
497 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
498 /* test_and_set_bit implies a barrier */
499 cond_wake_up_nomb(&entry->wait);
500 }
501out:
502 if (finished && cached && entry) {
503 *cached = entry;
504 refcount_inc(&entry->refs);
505 }
506 spin_unlock_irqrestore(&tree->lock, flags);
507 return finished;
508}
509
510/*
511 * used to drop a reference on an ordered extent. This will free
512 * the extent if the last reference is dropped
513 */
514void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
515{
516 struct list_head *cur;
517 struct btrfs_ordered_sum *sum;
518
519 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
520
521 if (refcount_dec_and_test(&entry->refs)) {
522 ASSERT(list_empty(&entry->root_extent_list));
523 ASSERT(list_empty(&entry->log_list));
524 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
525 if (entry->inode)
526 btrfs_add_delayed_iput(entry->inode);
527 while (!list_empty(&entry->list)) {
528 cur = entry->list.next;
529 sum = list_entry(cur, struct btrfs_ordered_sum, list);
530 list_del(&sum->list);
531 kvfree(sum);
532 }
533 kmem_cache_free(btrfs_ordered_extent_cache, entry);
534 }
535}
536
537/*
538 * remove an ordered extent from the tree. No references are dropped
539 * and waiters are woken up.
540 */
541void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
542 struct btrfs_ordered_extent *entry)
543{
544 struct btrfs_ordered_inode_tree *tree;
545 struct btrfs_root *root = btrfs_inode->root;
546 struct btrfs_fs_info *fs_info = root->fs_info;
547 struct rb_node *node;
548 bool pending;
549
550 /* This is paired with btrfs_add_ordered_extent. */
551 spin_lock(&btrfs_inode->lock);
552 btrfs_mod_outstanding_extents(btrfs_inode, -1);
553 spin_unlock(&btrfs_inode->lock);
554 if (root != fs_info->tree_root)
555 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
556 false);
557
558 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
559 fs_info->delalloc_batch);
560
561 tree = &btrfs_inode->ordered_tree;
562 spin_lock_irq(&tree->lock);
563 node = &entry->rb_node;
564 rb_erase(node, &tree->tree);
565 RB_CLEAR_NODE(node);
566 if (tree->last == node)
567 tree->last = NULL;
568 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
569 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
570 spin_unlock_irq(&tree->lock);
571
572 /*
573 * The current running transaction is waiting on us, we need to let it
574 * know that we're complete and wake it up.
575 */
576 if (pending) {
577 struct btrfs_transaction *trans;
578
579 /*
580 * The checks for trans are just a formality, it should be set,
581 * but if it isn't we don't want to deref/assert under the spin
582 * lock, so be nice and check if trans is set, but ASSERT() so
583 * if it isn't set a developer will notice.
584 */
585 spin_lock(&fs_info->trans_lock);
586 trans = fs_info->running_transaction;
587 if (trans)
588 refcount_inc(&trans->use_count);
589 spin_unlock(&fs_info->trans_lock);
590
591 ASSERT(trans);
592 if (trans) {
593 if (atomic_dec_and_test(&trans->pending_ordered))
594 wake_up(&trans->pending_wait);
595 btrfs_put_transaction(trans);
596 }
597 }
598
599 spin_lock(&root->ordered_extent_lock);
600 list_del_init(&entry->root_extent_list);
601 root->nr_ordered_extents--;
602
603 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
604
605 if (!root->nr_ordered_extents) {
606 spin_lock(&fs_info->ordered_root_lock);
607 BUG_ON(list_empty(&root->ordered_root));
608 list_del_init(&root->ordered_root);
609 spin_unlock(&fs_info->ordered_root_lock);
610 }
611 spin_unlock(&root->ordered_extent_lock);
612 wake_up(&entry->wait);
613}
614
615static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
616{
617 struct btrfs_ordered_extent *ordered;
618
619 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
620 btrfs_start_ordered_extent(ordered, 1);
621 complete(&ordered->completion);
622}
623
624/*
625 * wait for all the ordered extents in a root. This is done when balancing
626 * space between drives.
627 */
628u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
629 const u64 range_start, const u64 range_len)
630{
631 struct btrfs_fs_info *fs_info = root->fs_info;
632 LIST_HEAD(splice);
633 LIST_HEAD(skipped);
634 LIST_HEAD(works);
635 struct btrfs_ordered_extent *ordered, *next;
636 u64 count = 0;
637 const u64 range_end = range_start + range_len;
638
639 mutex_lock(&root->ordered_extent_mutex);
640 spin_lock(&root->ordered_extent_lock);
641 list_splice_init(&root->ordered_extents, &splice);
642 while (!list_empty(&splice) && nr) {
643 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
644 root_extent_list);
645
646 if (range_end <= ordered->disk_bytenr ||
647 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
648 list_move_tail(&ordered->root_extent_list, &skipped);
649 cond_resched_lock(&root->ordered_extent_lock);
650 continue;
651 }
652
653 list_move_tail(&ordered->root_extent_list,
654 &root->ordered_extents);
655 refcount_inc(&ordered->refs);
656 spin_unlock(&root->ordered_extent_lock);
657
658 btrfs_init_work(&ordered->flush_work,
659 btrfs_run_ordered_extent_work, NULL, NULL);
660 list_add_tail(&ordered->work_list, &works);
661 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
662
663 cond_resched();
664 spin_lock(&root->ordered_extent_lock);
665 if (nr != U64_MAX)
666 nr--;
667 count++;
668 }
669 list_splice_tail(&skipped, &root->ordered_extents);
670 list_splice_tail(&splice, &root->ordered_extents);
671 spin_unlock(&root->ordered_extent_lock);
672
673 list_for_each_entry_safe(ordered, next, &works, work_list) {
674 list_del_init(&ordered->work_list);
675 wait_for_completion(&ordered->completion);
676 btrfs_put_ordered_extent(ordered);
677 cond_resched();
678 }
679 mutex_unlock(&root->ordered_extent_mutex);
680
681 return count;
682}
683
684void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
685 const u64 range_start, const u64 range_len)
686{
687 struct btrfs_root *root;
688 struct list_head splice;
689 u64 done;
690
691 INIT_LIST_HEAD(&splice);
692
693 mutex_lock(&fs_info->ordered_operations_mutex);
694 spin_lock(&fs_info->ordered_root_lock);
695 list_splice_init(&fs_info->ordered_roots, &splice);
696 while (!list_empty(&splice) && nr) {
697 root = list_first_entry(&splice, struct btrfs_root,
698 ordered_root);
699 root = btrfs_grab_root(root);
700 BUG_ON(!root);
701 list_move_tail(&root->ordered_root,
702 &fs_info->ordered_roots);
703 spin_unlock(&fs_info->ordered_root_lock);
704
705 done = btrfs_wait_ordered_extents(root, nr,
706 range_start, range_len);
707 btrfs_put_root(root);
708
709 spin_lock(&fs_info->ordered_root_lock);
710 if (nr != U64_MAX) {
711 nr -= done;
712 }
713 }
714 list_splice_tail(&splice, &fs_info->ordered_roots);
715 spin_unlock(&fs_info->ordered_root_lock);
716 mutex_unlock(&fs_info->ordered_operations_mutex);
717}
718
719/*
720 * Used to start IO or wait for a given ordered extent to finish.
721 *
722 * If wait is one, this effectively waits on page writeback for all the pages
723 * in the extent, and it waits on the io completion code to insert
724 * metadata into the btree corresponding to the extent
725 */
726void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
727{
728 u64 start = entry->file_offset;
729 u64 end = start + entry->num_bytes - 1;
730 struct btrfs_inode *inode = BTRFS_I(entry->inode);
731
732 trace_btrfs_ordered_extent_start(inode, entry);
733
734 /*
735 * pages in the range can be dirty, clean or writeback. We
736 * start IO on any dirty ones so the wait doesn't stall waiting
737 * for the flusher thread to find them
738 */
739 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
740 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
741 if (wait) {
742 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
743 &entry->flags));
744 }
745}
746
747/*
748 * Used to wait on ordered extents across a large range of bytes.
749 */
750int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
751{
752 int ret = 0;
753 int ret_wb = 0;
754 u64 end;
755 u64 orig_end;
756 struct btrfs_ordered_extent *ordered;
757
758 if (start + len < start) {
759 orig_end = INT_LIMIT(loff_t);
760 } else {
761 orig_end = start + len - 1;
762 if (orig_end > INT_LIMIT(loff_t))
763 orig_end = INT_LIMIT(loff_t);
764 }
765
766 /* start IO across the range first to instantiate any delalloc
767 * extents
768 */
769 ret = btrfs_fdatawrite_range(inode, start, orig_end);
770 if (ret)
771 return ret;
772
773 /*
774 * If we have a writeback error don't return immediately. Wait first
775 * for any ordered extents that haven't completed yet. This is to make
776 * sure no one can dirty the same page ranges and call writepages()
777 * before the ordered extents complete - to avoid failures (-EEXIST)
778 * when adding the new ordered extents to the ordered tree.
779 */
780 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
781
782 end = orig_end;
783 while (1) {
784 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
785 if (!ordered)
786 break;
787 if (ordered->file_offset > orig_end) {
788 btrfs_put_ordered_extent(ordered);
789 break;
790 }
791 if (ordered->file_offset + ordered->num_bytes <= start) {
792 btrfs_put_ordered_extent(ordered);
793 break;
794 }
795 btrfs_start_ordered_extent(ordered, 1);
796 end = ordered->file_offset;
797 /*
798 * If the ordered extent had an error save the error but don't
799 * exit without waiting first for all other ordered extents in
800 * the range to complete.
801 */
802 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
803 ret = -EIO;
804 btrfs_put_ordered_extent(ordered);
805 if (end == 0 || end == start)
806 break;
807 end--;
808 }
809 return ret_wb ? ret_wb : ret;
810}
811
812/*
813 * find an ordered extent corresponding to file_offset. return NULL if
814 * nothing is found, otherwise take a reference on the extent and return it
815 */
816struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
817 u64 file_offset)
818{
819 struct btrfs_ordered_inode_tree *tree;
820 struct rb_node *node;
821 struct btrfs_ordered_extent *entry = NULL;
822 unsigned long flags;
823
824 tree = &inode->ordered_tree;
825 spin_lock_irqsave(&tree->lock, flags);
826 node = tree_search(tree, file_offset);
827 if (!node)
828 goto out;
829
830 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
831 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
832 entry = NULL;
833 if (entry)
834 refcount_inc(&entry->refs);
835out:
836 spin_unlock_irqrestore(&tree->lock, flags);
837 return entry;
838}
839
840/* Since the DIO code tries to lock a wide area we need to look for any ordered
841 * extents that exist in the range, rather than just the start of the range.
842 */
843struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
844 struct btrfs_inode *inode, u64 file_offset, u64 len)
845{
846 struct btrfs_ordered_inode_tree *tree;
847 struct rb_node *node;
848 struct btrfs_ordered_extent *entry = NULL;
849
850 tree = &inode->ordered_tree;
851 spin_lock_irq(&tree->lock);
852 node = tree_search(tree, file_offset);
853 if (!node) {
854 node = tree_search(tree, file_offset + len);
855 if (!node)
856 goto out;
857 }
858
859 while (1) {
860 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
861 if (range_overlaps(entry, file_offset, len))
862 break;
863
864 if (entry->file_offset >= file_offset + len) {
865 entry = NULL;
866 break;
867 }
868 entry = NULL;
869 node = rb_next(node);
870 if (!node)
871 break;
872 }
873out:
874 if (entry)
875 refcount_inc(&entry->refs);
876 spin_unlock_irq(&tree->lock);
877 return entry;
878}
879
880/*
881 * Adds all ordered extents to the given list. The list ends up sorted by the
882 * file_offset of the ordered extents.
883 */
884void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
885 struct list_head *list)
886{
887 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
888 struct rb_node *n;
889
890 ASSERT(inode_is_locked(&inode->vfs_inode));
891
892 spin_lock_irq(&tree->lock);
893 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
894 struct btrfs_ordered_extent *ordered;
895
896 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
897
898 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
899 continue;
900
901 ASSERT(list_empty(&ordered->log_list));
902 list_add_tail(&ordered->log_list, list);
903 refcount_inc(&ordered->refs);
904 }
905 spin_unlock_irq(&tree->lock);
906}
907
908/*
909 * lookup and return any extent before 'file_offset'. NULL is returned
910 * if none is found
911 */
912struct btrfs_ordered_extent *
913btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
914{
915 struct btrfs_ordered_inode_tree *tree;
916 struct rb_node *node;
917 struct btrfs_ordered_extent *entry = NULL;
918
919 tree = &inode->ordered_tree;
920 spin_lock_irq(&tree->lock);
921 node = tree_search(tree, file_offset);
922 if (!node)
923 goto out;
924
925 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
926 refcount_inc(&entry->refs);
927out:
928 spin_unlock_irq(&tree->lock);
929 return entry;
930}
931
932/*
933 * Lookup the first ordered extent that overlaps the range
934 * [@file_offset, @file_offset + @len).
935 *
936 * The difference between this and btrfs_lookup_first_ordered_extent() is
937 * that this one won't return any ordered extent that does not overlap the range.
938 * And the difference against btrfs_lookup_ordered_extent() is, this function
939 * ensures the first ordered extent gets returned.
940 */
941struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
942 struct btrfs_inode *inode, u64 file_offset, u64 len)
943{
944 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
945 struct rb_node *node;
946 struct rb_node *cur;
947 struct rb_node *prev;
948 struct rb_node *next;
949 struct btrfs_ordered_extent *entry = NULL;
950
951 spin_lock_irq(&tree->lock);
952 node = tree->tree.rb_node;
953 /*
954 * Here we don't want to use tree_search() which will use tree->last
955 * and screw up the search order.
956 * And __tree_search() can't return the adjacent ordered extents
957 * either, thus here we do our own search.
958 */
959 while (node) {
960 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
961
962 if (file_offset < entry->file_offset) {
963 node = node->rb_left;
964 } else if (file_offset >= entry_end(entry)) {
965 node = node->rb_right;
966 } else {
967 /*
968 * Direct hit, got an ordered extent that starts at
969 * @file_offset
970 */
971 goto out;
972 }
973 }
974 if (!entry) {
975 /* Empty tree */
976 goto out;
977 }
978
979 cur = &entry->rb_node;
980 /* We got an entry around @file_offset, check adjacent entries */
981 if (entry->file_offset < file_offset) {
982 prev = cur;
983 next = rb_next(cur);
984 } else {
985 prev = rb_prev(cur);
986 next = cur;
987 }
988 if (prev) {
989 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
990 if (range_overlaps(entry, file_offset, len))
991 goto out;
992 }
993 if (next) {
994 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
995 if (range_overlaps(entry, file_offset, len))
996 goto out;
997 }
998 /* No ordered extent in the range */
999 entry = NULL;
1000out:
1001 if (entry)
1002 refcount_inc(&entry->refs);
1003 spin_unlock_irq(&tree->lock);
1004 return entry;
1005}
1006
1007/*
1008 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1009 * ordered extents in it are run to completion.
1010 *
1011 * @inode: Inode whose ordered tree is to be searched
1012 * @start: Beginning of range to flush
1013 * @end: Last byte of range to lock
1014 * @cached_state: If passed, will return the extent state responsible for the
1015 * locked range. It's the caller's responsibility to free the cached state.
1016 *
1017 * This function always returns with the given range locked, ensuring after it's
1018 * called no order extent can be pending.
1019 */
1020void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1021 u64 end,
1022 struct extent_state **cached_state)
1023{
1024 struct btrfs_ordered_extent *ordered;
1025 struct extent_state *cache = NULL;
1026 struct extent_state **cachedp = &cache;
1027
1028 if (cached_state)
1029 cachedp = cached_state;
1030
1031 while (1) {
1032 lock_extent_bits(&inode->io_tree, start, end, cachedp);
1033 ordered = btrfs_lookup_ordered_range(inode, start,
1034 end - start + 1);
1035 if (!ordered) {
1036 /*
1037 * If no external cached_state has been passed then
1038 * decrement the extra ref taken for cachedp since we
1039 * aren't exposing it outside of this function
1040 */
1041 if (!cached_state)
1042 refcount_dec(&cache->refs);
1043 break;
1044 }
1045 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
1046 btrfs_start_ordered_extent(ordered, 1);
1047 btrfs_put_ordered_extent(ordered);
1048 }
1049}
1050
1051static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1052 u64 len)
1053{
1054 struct inode *inode = ordered->inode;
1055 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1056 u64 file_offset = ordered->file_offset + pos;
1057 u64 disk_bytenr = ordered->disk_bytenr + pos;
1058 u64 num_bytes = len;
1059 u64 disk_num_bytes = len;
1060 int type;
1061 unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
1062 int compress_type = ordered->compress_type;
1063 unsigned long weight;
1064 int ret;
1065
1066 weight = hweight_long(flags_masked);
1067 WARN_ON_ONCE(weight > 1);
1068 if (!weight)
1069 type = 0;
1070 else
1071 type = __ffs(flags_masked);
1072
1073 /*
1074 * The splitting extent is already counted and will be added again
1075 * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
1076 * double counting.
1077 */
1078 percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
1079 fs_info->delalloc_batch);
1080 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
1081 WARN_ON_ONCE(1);
1082 ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
1083 file_offset, disk_bytenr, num_bytes,
1084 disk_num_bytes, compress_type);
1085 } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
1086 ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
1087 disk_bytenr, num_bytes, disk_num_bytes, type);
1088 } else {
1089 ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
1090 disk_bytenr, num_bytes, disk_num_bytes, type);
1091 }
1092
1093 return ret;
1094}
1095
1096int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1097 u64 post)
1098{
1099 struct inode *inode = ordered->inode;
1100 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1101 struct rb_node *node;
1102 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1103 int ret = 0;
1104
1105 spin_lock_irq(&tree->lock);
1106 /* Remove from tree once */
1107 node = &ordered->rb_node;
1108 rb_erase(node, &tree->tree);
1109 RB_CLEAR_NODE(node);
1110 if (tree->last == node)
1111 tree->last = NULL;
1112
1113 ordered->file_offset += pre;
1114 ordered->disk_bytenr += pre;
1115 ordered->num_bytes -= (pre + post);
1116 ordered->disk_num_bytes -= (pre + post);
1117 ordered->bytes_left -= (pre + post);
1118
1119 /* Re-insert the node */
1120 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1121 if (node)
1122 btrfs_panic(fs_info, -EEXIST,
1123 "zoned: inconsistency in ordered tree at offset %llu",
1124 ordered->file_offset);
1125
1126 spin_unlock_irq(&tree->lock);
1127
1128 if (pre)
1129 ret = clone_ordered_extent(ordered, 0, pre);
1130 if (ret == 0 && post)
1131 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1132 post);
1133
1134 return ret;
1135}
1136
1137int __init ordered_data_init(void)
1138{
1139 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1140 sizeof(struct btrfs_ordered_extent), 0,
1141 SLAB_MEM_SPREAD,
1142 NULL);
1143 if (!btrfs_ordered_extent_cache)
1144 return -ENOMEM;
1145
1146 return 0;
1147}
1148
1149void __cold ordered_data_exit(void)
1150{
1151 kmem_cache_destroy(btrfs_ordered_extent_cache);
1152}