Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "messages.h"
11#include "misc.h"
12#include "ctree.h"
13#include "transaction.h"
14#include "btrfs_inode.h"
15#include "extent_io.h"
16#include "disk-io.h"
17#include "compression.h"
18#include "delalloc-space.h"
19#include "qgroup.h"
20#include "subpage.h"
21#include "file.h"
22#include "super.h"
23
24static struct kmem_cache *btrfs_ordered_extent_cache;
25
26static u64 entry_end(struct btrfs_ordered_extent *entry)
27{
28 if (entry->file_offset + entry->num_bytes < entry->file_offset)
29 return (u64)-1;
30 return entry->file_offset + entry->num_bytes;
31}
32
33/* returns NULL if the insertion worked, or it returns the node it did find
34 * in the tree
35 */
36static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
37 struct rb_node *node)
38{
39 struct rb_node **p = &root->rb_node;
40 struct rb_node *parent = NULL;
41 struct btrfs_ordered_extent *entry;
42
43 while (*p) {
44 parent = *p;
45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46
47 if (file_offset < entry->file_offset)
48 p = &(*p)->rb_left;
49 else if (file_offset >= entry_end(entry))
50 p = &(*p)->rb_right;
51 else
52 return parent;
53 }
54
55 rb_link_node(node, parent, p);
56 rb_insert_color(node, root);
57 return NULL;
58}
59
60/*
61 * look for a given offset in the tree, and if it can't be found return the
62 * first lesser offset
63 */
64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
66{
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
69 struct rb_node *test;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
72
73 while (n) {
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75 prev = n;
76 prev_entry = entry;
77
78 if (file_offset < entry->file_offset)
79 n = n->rb_left;
80 else if (file_offset >= entry_end(entry))
81 n = n->rb_right;
82 else
83 return n;
84 }
85 if (!prev_ret)
86 return NULL;
87
88 while (prev && file_offset >= entry_end(prev_entry)) {
89 test = rb_next(prev);
90 if (!test)
91 break;
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93 rb_node);
94 if (file_offset < entry_end(prev_entry))
95 break;
96
97 prev = test;
98 }
99 if (prev)
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101 rb_node);
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
104 if (!test)
105 break;
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107 rb_node);
108 prev = test;
109 }
110 *prev_ret = prev;
111 return NULL;
112}
113
114static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
115 u64 len)
116{
117 if (file_offset + len <= entry->file_offset ||
118 entry->file_offset + entry->num_bytes <= file_offset)
119 return 0;
120 return 1;
121}
122
123/*
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
126 */
127static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
128 u64 file_offset)
129{
130 struct rb_node *prev = NULL;
131 struct rb_node *ret;
132 struct btrfs_ordered_extent *entry;
133
134 if (inode->ordered_tree_last) {
135 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
136 rb_node);
137 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
138 return inode->ordered_tree_last;
139 }
140 ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
141 if (!ret)
142 ret = prev;
143 if (ret)
144 inode->ordered_tree_last = ret;
145 return ret;
146}
147
148static struct btrfs_ordered_extent *alloc_ordered_extent(
149 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
150 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
151 u64 offset, unsigned long flags, int compress_type)
152{
153 struct btrfs_ordered_extent *entry;
154 int ret;
155 u64 qgroup_rsv = 0;
156
157 if (flags &
158 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
159 /* For nocow write, we can release the qgroup rsv right now */
160 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
161 if (ret < 0)
162 return ERR_PTR(ret);
163 } else {
164 /*
165 * The ordered extent has reserved qgroup space, release now
166 * and pass the reserved number for qgroup_record to free.
167 */
168 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
169 if (ret < 0)
170 return ERR_PTR(ret);
171 }
172 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
173 if (!entry)
174 return ERR_PTR(-ENOMEM);
175
176 entry->file_offset = file_offset;
177 entry->num_bytes = num_bytes;
178 entry->ram_bytes = ram_bytes;
179 entry->disk_bytenr = disk_bytenr;
180 entry->disk_num_bytes = disk_num_bytes;
181 entry->offset = offset;
182 entry->bytes_left = num_bytes;
183 entry->inode = igrab(&inode->vfs_inode);
184 entry->compress_type = compress_type;
185 entry->truncated_len = (u64)-1;
186 entry->qgroup_rsv = qgroup_rsv;
187 entry->flags = flags;
188 refcount_set(&entry->refs, 1);
189 init_waitqueue_head(&entry->wait);
190 INIT_LIST_HEAD(&entry->list);
191 INIT_LIST_HEAD(&entry->log_list);
192 INIT_LIST_HEAD(&entry->root_extent_list);
193 INIT_LIST_HEAD(&entry->work_list);
194 INIT_LIST_HEAD(&entry->bioc_list);
195 init_completion(&entry->completion);
196
197 /*
198 * We don't need the count_max_extents here, we can assume that all of
199 * that work has been done at higher layers, so this is truly the
200 * smallest the extent is going to get.
201 */
202 spin_lock(&inode->lock);
203 btrfs_mod_outstanding_extents(inode, 1);
204 spin_unlock(&inode->lock);
205
206 return entry;
207}
208
209static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
210{
211 struct btrfs_inode *inode = BTRFS_I(entry->inode);
212 struct btrfs_root *root = inode->root;
213 struct btrfs_fs_info *fs_info = root->fs_info;
214 struct rb_node *node;
215
216 trace_btrfs_ordered_extent_add(inode, entry);
217
218 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
219 fs_info->delalloc_batch);
220
221 /* One ref for the tree. */
222 refcount_inc(&entry->refs);
223
224 spin_lock_irq(&inode->ordered_tree_lock);
225 node = tree_insert(&inode->ordered_tree, entry->file_offset,
226 &entry->rb_node);
227 if (node)
228 btrfs_panic(fs_info, -EEXIST,
229 "inconsistency in ordered tree at offset %llu",
230 entry->file_offset);
231 spin_unlock_irq(&inode->ordered_tree_lock);
232
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
241 spin_unlock(&fs_info->ordered_root_lock);
242 }
243 spin_unlock(&root->ordered_extent_lock);
244}
245
246/*
247 * Add an ordered extent to the per-inode tree.
248 *
249 * @inode: Inode that this extent is for.
250 * @file_offset: Logical offset in file where the extent starts.
251 * @num_bytes: Logical length of extent in file.
252 * @ram_bytes: Full length of unencoded data.
253 * @disk_bytenr: Offset of extent on disk.
254 * @disk_num_bytes: Size of extent on disk.
255 * @offset: Offset into unencoded data where file data starts.
256 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
257 * @compress_type: Compression algorithm used for data.
258 *
259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
260 * tree is given a single reference on the ordered extent that was inserted, and
261 * the returned pointer is given a second reference.
262 *
263 * Return: the new ordered extent or error pointer.
264 */
265struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
266 struct btrfs_inode *inode, u64 file_offset,
267 u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
268 u64 disk_num_bytes, u64 offset, unsigned long flags,
269 int compress_type)
270{
271 struct btrfs_ordered_extent *entry;
272
273 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
274
275 entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes,
276 disk_bytenr, disk_num_bytes, offset, flags,
277 compress_type);
278 if (!IS_ERR(entry))
279 insert_ordered_extent(entry);
280 return entry;
281}
282
283/*
284 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
285 * when an ordered extent is finished. If the list covers more than one
286 * ordered extent, it is split across multiples.
287 */
288void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
289 struct btrfs_ordered_sum *sum)
290{
291 struct btrfs_inode *inode = BTRFS_I(entry->inode);
292
293 spin_lock_irq(&inode->ordered_tree_lock);
294 list_add_tail(&sum->list, &entry->list);
295 spin_unlock_irq(&inode->ordered_tree_lock);
296}
297
298static void finish_ordered_fn(struct btrfs_work *work)
299{
300 struct btrfs_ordered_extent *ordered_extent;
301
302 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
303 btrfs_finish_ordered_io(ordered_extent);
304}
305
306static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
307 struct page *page, u64 file_offset,
308 u64 len, bool uptodate)
309{
310 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
311 struct btrfs_fs_info *fs_info = inode->root->fs_info;
312
313 lockdep_assert_held(&inode->ordered_tree_lock);
314
315 if (page) {
316 ASSERT(page->mapping);
317 ASSERT(page_offset(page) <= file_offset);
318 ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE);
319
320 /*
321 * Ordered (Private2) bit indicates whether we still have
322 * pending io unfinished for the ordered extent.
323 *
324 * If there's no such bit, we need to skip to next range.
325 */
326 if (!btrfs_folio_test_ordered(fs_info, page_folio(page),
327 file_offset, len))
328 return false;
329 btrfs_folio_clear_ordered(fs_info, page_folio(page), file_offset, len);
330 }
331
332 /* Now we're fine to update the accounting. */
333 if (WARN_ON_ONCE(len > ordered->bytes_left)) {
334 btrfs_crit(fs_info,
335"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
336 inode->root->root_key.objectid, btrfs_ino(inode),
337 ordered->file_offset, ordered->num_bytes,
338 len, ordered->bytes_left);
339 ordered->bytes_left = 0;
340 } else {
341 ordered->bytes_left -= len;
342 }
343
344 if (!uptodate)
345 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
346
347 if (ordered->bytes_left)
348 return false;
349
350 /*
351 * All the IO of the ordered extent is finished, we need to queue
352 * the finish_func to be executed.
353 */
354 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
355 cond_wake_up(&ordered->wait);
356 refcount_inc(&ordered->refs);
357 trace_btrfs_ordered_extent_mark_finished(inode, ordered);
358 return true;
359}
360
361static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
362{
363 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
364 struct btrfs_fs_info *fs_info = inode->root->fs_info;
365 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
366 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
367
368 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
369 btrfs_queue_work(wq, &ordered->work);
370}
371
372bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
373 struct page *page, u64 file_offset, u64 len,
374 bool uptodate)
375{
376 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
377 unsigned long flags;
378 bool ret;
379
380 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
381
382 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
383 ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
384 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
385
386 if (ret)
387 btrfs_queue_ordered_fn(ordered);
388 return ret;
389}
390
391/*
392 * Mark all ordered extents io inside the specified range finished.
393 *
394 * @page: The involved page for the operation.
395 * For uncompressed buffered IO, the page status also needs to be
396 * updated to indicate whether the pending ordered io is finished.
397 * Can be NULL for direct IO and compressed write.
398 * For these cases, callers are ensured they won't execute the
399 * endio function twice.
400 *
401 * This function is called for endio, thus the range must have ordered
402 * extent(s) covering it.
403 */
404void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
405 struct page *page, u64 file_offset,
406 u64 num_bytes, bool uptodate)
407{
408 struct rb_node *node;
409 struct btrfs_ordered_extent *entry = NULL;
410 unsigned long flags;
411 u64 cur = file_offset;
412
413 trace_btrfs_writepage_end_io_hook(inode, file_offset,
414 file_offset + num_bytes - 1,
415 uptodate);
416
417 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
418 while (cur < file_offset + num_bytes) {
419 u64 entry_end;
420 u64 end;
421 u32 len;
422
423 node = ordered_tree_search(inode, cur);
424 /* No ordered extents at all */
425 if (!node)
426 break;
427
428 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
429 entry_end = entry->file_offset + entry->num_bytes;
430 /*
431 * |<-- OE --->| |
432 * cur
433 * Go to next OE.
434 */
435 if (cur >= entry_end) {
436 node = rb_next(node);
437 /* No more ordered extents, exit */
438 if (!node)
439 break;
440 entry = rb_entry(node, struct btrfs_ordered_extent,
441 rb_node);
442
443 /* Go to next ordered extent and continue */
444 cur = entry->file_offset;
445 continue;
446 }
447 /*
448 * | |<--- OE --->|
449 * cur
450 * Go to the start of OE.
451 */
452 if (cur < entry->file_offset) {
453 cur = entry->file_offset;
454 continue;
455 }
456
457 /*
458 * Now we are definitely inside one ordered extent.
459 *
460 * |<--- OE --->|
461 * |
462 * cur
463 */
464 end = min(entry->file_offset + entry->num_bytes,
465 file_offset + num_bytes) - 1;
466 ASSERT(end + 1 - cur < U32_MAX);
467 len = end + 1 - cur;
468
469 if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
470 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
471 btrfs_queue_ordered_fn(entry);
472 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
473 }
474 cur += len;
475 }
476 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
477}
478
479/*
480 * Finish IO for one ordered extent across a given range. The range can only
481 * contain one ordered extent.
482 *
483 * @cached: The cached ordered extent. If not NULL, we can skip the tree
484 * search and use the ordered extent directly.
485 * Will be also used to store the finished ordered extent.
486 * @file_offset: File offset for the finished IO
487 * @io_size: Length of the finish IO range
488 *
489 * Return true if the ordered extent is finished in the range, and update
490 * @cached.
491 * Return false otherwise.
492 *
493 * NOTE: The range can NOT cross multiple ordered extents.
494 * Thus caller should ensure the range doesn't cross ordered extents.
495 */
496bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
497 struct btrfs_ordered_extent **cached,
498 u64 file_offset, u64 io_size)
499{
500 struct rb_node *node;
501 struct btrfs_ordered_extent *entry = NULL;
502 unsigned long flags;
503 bool finished = false;
504
505 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
506 if (cached && *cached) {
507 entry = *cached;
508 goto have_entry;
509 }
510
511 node = ordered_tree_search(inode, file_offset);
512 if (!node)
513 goto out;
514
515 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
516have_entry:
517 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
518 goto out;
519
520 if (io_size > entry->bytes_left)
521 btrfs_crit(inode->root->fs_info,
522 "bad ordered accounting left %llu size %llu",
523 entry->bytes_left, io_size);
524
525 entry->bytes_left -= io_size;
526
527 if (entry->bytes_left == 0) {
528 /*
529 * Ensure only one caller can set the flag and finished_ret
530 * accordingly
531 */
532 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
533 /* test_and_set_bit implies a barrier */
534 cond_wake_up_nomb(&entry->wait);
535 }
536out:
537 if (finished && cached && entry) {
538 *cached = entry;
539 refcount_inc(&entry->refs);
540 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
541 }
542 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
543 return finished;
544}
545
546/*
547 * used to drop a reference on an ordered extent. This will free
548 * the extent if the last reference is dropped
549 */
550void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
551{
552 struct list_head *cur;
553 struct btrfs_ordered_sum *sum;
554
555 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
556
557 if (refcount_dec_and_test(&entry->refs)) {
558 ASSERT(list_empty(&entry->root_extent_list));
559 ASSERT(list_empty(&entry->log_list));
560 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
561 if (entry->inode)
562 btrfs_add_delayed_iput(BTRFS_I(entry->inode));
563 while (!list_empty(&entry->list)) {
564 cur = entry->list.next;
565 sum = list_entry(cur, struct btrfs_ordered_sum, list);
566 list_del(&sum->list);
567 kvfree(sum);
568 }
569 kmem_cache_free(btrfs_ordered_extent_cache, entry);
570 }
571}
572
573/*
574 * remove an ordered extent from the tree. No references are dropped
575 * and waiters are woken up.
576 */
577void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
578 struct btrfs_ordered_extent *entry)
579{
580 struct btrfs_root *root = btrfs_inode->root;
581 struct btrfs_fs_info *fs_info = root->fs_info;
582 struct rb_node *node;
583 bool pending;
584 bool freespace_inode;
585
586 /*
587 * If this is a free space inode the thread has not acquired the ordered
588 * extents lockdep map.
589 */
590 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
591
592 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
593 /* This is paired with btrfs_alloc_ordered_extent. */
594 spin_lock(&btrfs_inode->lock);
595 btrfs_mod_outstanding_extents(btrfs_inode, -1);
596 spin_unlock(&btrfs_inode->lock);
597 if (root != fs_info->tree_root) {
598 u64 release;
599
600 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
601 release = entry->disk_num_bytes;
602 else
603 release = entry->num_bytes;
604 btrfs_delalloc_release_metadata(btrfs_inode, release,
605 test_bit(BTRFS_ORDERED_IOERR,
606 &entry->flags));
607 }
608
609 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
610 fs_info->delalloc_batch);
611
612 spin_lock_irq(&btrfs_inode->ordered_tree_lock);
613 node = &entry->rb_node;
614 rb_erase(node, &btrfs_inode->ordered_tree);
615 RB_CLEAR_NODE(node);
616 if (btrfs_inode->ordered_tree_last == node)
617 btrfs_inode->ordered_tree_last = NULL;
618 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
619 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
620 spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
621
622 /*
623 * The current running transaction is waiting on us, we need to let it
624 * know that we're complete and wake it up.
625 */
626 if (pending) {
627 struct btrfs_transaction *trans;
628
629 /*
630 * The checks for trans are just a formality, it should be set,
631 * but if it isn't we don't want to deref/assert under the spin
632 * lock, so be nice and check if trans is set, but ASSERT() so
633 * if it isn't set a developer will notice.
634 */
635 spin_lock(&fs_info->trans_lock);
636 trans = fs_info->running_transaction;
637 if (trans)
638 refcount_inc(&trans->use_count);
639 spin_unlock(&fs_info->trans_lock);
640
641 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
642 if (trans) {
643 if (atomic_dec_and_test(&trans->pending_ordered))
644 wake_up(&trans->pending_wait);
645 btrfs_put_transaction(trans);
646 }
647 }
648
649 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
650
651 spin_lock(&root->ordered_extent_lock);
652 list_del_init(&entry->root_extent_list);
653 root->nr_ordered_extents--;
654
655 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
656
657 if (!root->nr_ordered_extents) {
658 spin_lock(&fs_info->ordered_root_lock);
659 BUG_ON(list_empty(&root->ordered_root));
660 list_del_init(&root->ordered_root);
661 spin_unlock(&fs_info->ordered_root_lock);
662 }
663 spin_unlock(&root->ordered_extent_lock);
664 wake_up(&entry->wait);
665 if (!freespace_inode)
666 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
667}
668
669static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
670{
671 struct btrfs_ordered_extent *ordered;
672
673 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
674 btrfs_start_ordered_extent(ordered);
675 complete(&ordered->completion);
676}
677
678/*
679 * wait for all the ordered extents in a root. This is done when balancing
680 * space between drives.
681 */
682u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
683 const u64 range_start, const u64 range_len)
684{
685 struct btrfs_fs_info *fs_info = root->fs_info;
686 LIST_HEAD(splice);
687 LIST_HEAD(skipped);
688 LIST_HEAD(works);
689 struct btrfs_ordered_extent *ordered, *next;
690 u64 count = 0;
691 const u64 range_end = range_start + range_len;
692
693 mutex_lock(&root->ordered_extent_mutex);
694 spin_lock(&root->ordered_extent_lock);
695 list_splice_init(&root->ordered_extents, &splice);
696 while (!list_empty(&splice) && nr) {
697 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
698 root_extent_list);
699
700 if (range_end <= ordered->disk_bytenr ||
701 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
702 list_move_tail(&ordered->root_extent_list, &skipped);
703 cond_resched_lock(&root->ordered_extent_lock);
704 continue;
705 }
706
707 list_move_tail(&ordered->root_extent_list,
708 &root->ordered_extents);
709 refcount_inc(&ordered->refs);
710 spin_unlock(&root->ordered_extent_lock);
711
712 btrfs_init_work(&ordered->flush_work,
713 btrfs_run_ordered_extent_work, NULL);
714 list_add_tail(&ordered->work_list, &works);
715 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
716
717 cond_resched();
718 spin_lock(&root->ordered_extent_lock);
719 if (nr != U64_MAX)
720 nr--;
721 count++;
722 }
723 list_splice_tail(&skipped, &root->ordered_extents);
724 list_splice_tail(&splice, &root->ordered_extents);
725 spin_unlock(&root->ordered_extent_lock);
726
727 list_for_each_entry_safe(ordered, next, &works, work_list) {
728 list_del_init(&ordered->work_list);
729 wait_for_completion(&ordered->completion);
730 btrfs_put_ordered_extent(ordered);
731 cond_resched();
732 }
733 mutex_unlock(&root->ordered_extent_mutex);
734
735 return count;
736}
737
738void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
739 const u64 range_start, const u64 range_len)
740{
741 struct btrfs_root *root;
742 LIST_HEAD(splice);
743 u64 done;
744
745 mutex_lock(&fs_info->ordered_operations_mutex);
746 spin_lock(&fs_info->ordered_root_lock);
747 list_splice_init(&fs_info->ordered_roots, &splice);
748 while (!list_empty(&splice) && nr) {
749 root = list_first_entry(&splice, struct btrfs_root,
750 ordered_root);
751 root = btrfs_grab_root(root);
752 BUG_ON(!root);
753 list_move_tail(&root->ordered_root,
754 &fs_info->ordered_roots);
755 spin_unlock(&fs_info->ordered_root_lock);
756
757 done = btrfs_wait_ordered_extents(root, nr,
758 range_start, range_len);
759 btrfs_put_root(root);
760
761 spin_lock(&fs_info->ordered_root_lock);
762 if (nr != U64_MAX) {
763 nr -= done;
764 }
765 }
766 list_splice_tail(&splice, &fs_info->ordered_roots);
767 spin_unlock(&fs_info->ordered_root_lock);
768 mutex_unlock(&fs_info->ordered_operations_mutex);
769}
770
771/*
772 * Start IO and wait for a given ordered extent to finish.
773 *
774 * Wait on page writeback for all the pages in the extent and the IO completion
775 * code to insert metadata into the btree corresponding to the extent.
776 */
777void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
778{
779 u64 start = entry->file_offset;
780 u64 end = start + entry->num_bytes - 1;
781 struct btrfs_inode *inode = BTRFS_I(entry->inode);
782 bool freespace_inode;
783
784 trace_btrfs_ordered_extent_start(inode, entry);
785
786 /*
787 * If this is a free space inode do not take the ordered extents lockdep
788 * map.
789 */
790 freespace_inode = btrfs_is_free_space_inode(inode);
791
792 /*
793 * pages in the range can be dirty, clean or writeback. We
794 * start IO on any dirty ones so the wait doesn't stall waiting
795 * for the flusher thread to find them
796 */
797 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
798 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
799
800 if (!freespace_inode)
801 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
802 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
803}
804
805/*
806 * Used to wait on ordered extents across a large range of bytes.
807 */
808int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
809{
810 int ret = 0;
811 int ret_wb = 0;
812 u64 end;
813 u64 orig_end;
814 struct btrfs_ordered_extent *ordered;
815
816 if (start + len < start) {
817 orig_end = OFFSET_MAX;
818 } else {
819 orig_end = start + len - 1;
820 if (orig_end > OFFSET_MAX)
821 orig_end = OFFSET_MAX;
822 }
823
824 /* start IO across the range first to instantiate any delalloc
825 * extents
826 */
827 ret = btrfs_fdatawrite_range(inode, start, orig_end);
828 if (ret)
829 return ret;
830
831 /*
832 * If we have a writeback error don't return immediately. Wait first
833 * for any ordered extents that haven't completed yet. This is to make
834 * sure no one can dirty the same page ranges and call writepages()
835 * before the ordered extents complete - to avoid failures (-EEXIST)
836 * when adding the new ordered extents to the ordered tree.
837 */
838 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
839
840 end = orig_end;
841 while (1) {
842 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
843 if (!ordered)
844 break;
845 if (ordered->file_offset > orig_end) {
846 btrfs_put_ordered_extent(ordered);
847 break;
848 }
849 if (ordered->file_offset + ordered->num_bytes <= start) {
850 btrfs_put_ordered_extent(ordered);
851 break;
852 }
853 btrfs_start_ordered_extent(ordered);
854 end = ordered->file_offset;
855 /*
856 * If the ordered extent had an error save the error but don't
857 * exit without waiting first for all other ordered extents in
858 * the range to complete.
859 */
860 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
861 ret = -EIO;
862 btrfs_put_ordered_extent(ordered);
863 if (end == 0 || end == start)
864 break;
865 end--;
866 }
867 return ret_wb ? ret_wb : ret;
868}
869
870/*
871 * find an ordered extent corresponding to file_offset. return NULL if
872 * nothing is found, otherwise take a reference on the extent and return it
873 */
874struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
875 u64 file_offset)
876{
877 struct rb_node *node;
878 struct btrfs_ordered_extent *entry = NULL;
879 unsigned long flags;
880
881 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
882 node = ordered_tree_search(inode, file_offset);
883 if (!node)
884 goto out;
885
886 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
887 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
888 entry = NULL;
889 if (entry) {
890 refcount_inc(&entry->refs);
891 trace_btrfs_ordered_extent_lookup(inode, entry);
892 }
893out:
894 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
895 return entry;
896}
897
898/* Since the DIO code tries to lock a wide area we need to look for any ordered
899 * extents that exist in the range, rather than just the start of the range.
900 */
901struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
902 struct btrfs_inode *inode, u64 file_offset, u64 len)
903{
904 struct rb_node *node;
905 struct btrfs_ordered_extent *entry = NULL;
906
907 spin_lock_irq(&inode->ordered_tree_lock);
908 node = ordered_tree_search(inode, file_offset);
909 if (!node) {
910 node = ordered_tree_search(inode, file_offset + len);
911 if (!node)
912 goto out;
913 }
914
915 while (1) {
916 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
917 if (range_overlaps(entry, file_offset, len))
918 break;
919
920 if (entry->file_offset >= file_offset + len) {
921 entry = NULL;
922 break;
923 }
924 entry = NULL;
925 node = rb_next(node);
926 if (!node)
927 break;
928 }
929out:
930 if (entry) {
931 refcount_inc(&entry->refs);
932 trace_btrfs_ordered_extent_lookup_range(inode, entry);
933 }
934 spin_unlock_irq(&inode->ordered_tree_lock);
935 return entry;
936}
937
938/*
939 * Adds all ordered extents to the given list. The list ends up sorted by the
940 * file_offset of the ordered extents.
941 */
942void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
943 struct list_head *list)
944{
945 struct rb_node *n;
946
947 ASSERT(inode_is_locked(&inode->vfs_inode));
948
949 spin_lock_irq(&inode->ordered_tree_lock);
950 for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
951 struct btrfs_ordered_extent *ordered;
952
953 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
954
955 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
956 continue;
957
958 ASSERT(list_empty(&ordered->log_list));
959 list_add_tail(&ordered->log_list, list);
960 refcount_inc(&ordered->refs);
961 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
962 }
963 spin_unlock_irq(&inode->ordered_tree_lock);
964}
965
966/*
967 * lookup and return any extent before 'file_offset'. NULL is returned
968 * if none is found
969 */
970struct btrfs_ordered_extent *
971btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
972{
973 struct rb_node *node;
974 struct btrfs_ordered_extent *entry = NULL;
975
976 spin_lock_irq(&inode->ordered_tree_lock);
977 node = ordered_tree_search(inode, file_offset);
978 if (!node)
979 goto out;
980
981 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
982 refcount_inc(&entry->refs);
983 trace_btrfs_ordered_extent_lookup_first(inode, entry);
984out:
985 spin_unlock_irq(&inode->ordered_tree_lock);
986 return entry;
987}
988
989/*
990 * Lookup the first ordered extent that overlaps the range
991 * [@file_offset, @file_offset + @len).
992 *
993 * The difference between this and btrfs_lookup_first_ordered_extent() is
994 * that this one won't return any ordered extent that does not overlap the range.
995 * And the difference against btrfs_lookup_ordered_extent() is, this function
996 * ensures the first ordered extent gets returned.
997 */
998struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
999 struct btrfs_inode *inode, u64 file_offset, u64 len)
1000{
1001 struct rb_node *node;
1002 struct rb_node *cur;
1003 struct rb_node *prev;
1004 struct rb_node *next;
1005 struct btrfs_ordered_extent *entry = NULL;
1006
1007 spin_lock_irq(&inode->ordered_tree_lock);
1008 node = inode->ordered_tree.rb_node;
1009 /*
1010 * Here we don't want to use tree_search() which will use tree->last
1011 * and screw up the search order.
1012 * And __tree_search() can't return the adjacent ordered extents
1013 * either, thus here we do our own search.
1014 */
1015 while (node) {
1016 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1017
1018 if (file_offset < entry->file_offset) {
1019 node = node->rb_left;
1020 } else if (file_offset >= entry_end(entry)) {
1021 node = node->rb_right;
1022 } else {
1023 /*
1024 * Direct hit, got an ordered extent that starts at
1025 * @file_offset
1026 */
1027 goto out;
1028 }
1029 }
1030 if (!entry) {
1031 /* Empty tree */
1032 goto out;
1033 }
1034
1035 cur = &entry->rb_node;
1036 /* We got an entry around @file_offset, check adjacent entries */
1037 if (entry->file_offset < file_offset) {
1038 prev = cur;
1039 next = rb_next(cur);
1040 } else {
1041 prev = rb_prev(cur);
1042 next = cur;
1043 }
1044 if (prev) {
1045 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1046 if (range_overlaps(entry, file_offset, len))
1047 goto out;
1048 }
1049 if (next) {
1050 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1051 if (range_overlaps(entry, file_offset, len))
1052 goto out;
1053 }
1054 /* No ordered extent in the range */
1055 entry = NULL;
1056out:
1057 if (entry) {
1058 refcount_inc(&entry->refs);
1059 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1060 }
1061
1062 spin_unlock_irq(&inode->ordered_tree_lock);
1063 return entry;
1064}
1065
1066/*
1067 * Lock the passed range and ensures all pending ordered extents in it are run
1068 * to completion.
1069 *
1070 * @inode: Inode whose ordered tree is to be searched
1071 * @start: Beginning of range to flush
1072 * @end: Last byte of range to lock
1073 * @cached_state: If passed, will return the extent state responsible for the
1074 * locked range. It's the caller's responsibility to free the
1075 * cached state.
1076 *
1077 * Always return with the given range locked, ensuring after it's called no
1078 * order extent can be pending.
1079 */
1080void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1081 u64 end,
1082 struct extent_state **cached_state)
1083{
1084 struct btrfs_ordered_extent *ordered;
1085 struct extent_state *cache = NULL;
1086 struct extent_state **cachedp = &cache;
1087
1088 if (cached_state)
1089 cachedp = cached_state;
1090
1091 while (1) {
1092 lock_extent(&inode->io_tree, start, end, cachedp);
1093 ordered = btrfs_lookup_ordered_range(inode, start,
1094 end - start + 1);
1095 if (!ordered) {
1096 /*
1097 * If no external cached_state has been passed then
1098 * decrement the extra ref taken for cachedp since we
1099 * aren't exposing it outside of this function
1100 */
1101 if (!cached_state)
1102 refcount_dec(&cache->refs);
1103 break;
1104 }
1105 unlock_extent(&inode->io_tree, start, end, cachedp);
1106 btrfs_start_ordered_extent(ordered);
1107 btrfs_put_ordered_extent(ordered);
1108 }
1109}
1110
1111/*
1112 * Lock the passed range and ensure all pending ordered extents in it are run
1113 * to completion in nowait mode.
1114 *
1115 * Return true if btrfs_lock_ordered_range does not return any extents,
1116 * otherwise false.
1117 */
1118bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1119 struct extent_state **cached_state)
1120{
1121 struct btrfs_ordered_extent *ordered;
1122
1123 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1124 return false;
1125
1126 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1127 if (!ordered)
1128 return true;
1129
1130 btrfs_put_ordered_extent(ordered);
1131 unlock_extent(&inode->io_tree, start, end, cached_state);
1132
1133 return false;
1134}
1135
1136/* Split out a new ordered extent for this first @len bytes of @ordered. */
1137struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1138 struct btrfs_ordered_extent *ordered, u64 len)
1139{
1140 struct btrfs_inode *inode = BTRFS_I(ordered->inode);
1141 struct btrfs_root *root = inode->root;
1142 struct btrfs_fs_info *fs_info = root->fs_info;
1143 u64 file_offset = ordered->file_offset;
1144 u64 disk_bytenr = ordered->disk_bytenr;
1145 unsigned long flags = ordered->flags;
1146 struct btrfs_ordered_sum *sum, *tmpsum;
1147 struct btrfs_ordered_extent *new;
1148 struct rb_node *node;
1149 u64 offset = 0;
1150
1151 trace_btrfs_ordered_extent_split(inode, ordered);
1152
1153 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1154
1155 /*
1156 * The entire bio must be covered by the ordered extent, but we can't
1157 * reduce the original extent to a zero length either.
1158 */
1159 if (WARN_ON_ONCE(len >= ordered->num_bytes))
1160 return ERR_PTR(-EINVAL);
1161 /* We cannot split partially completed ordered extents. */
1162 if (ordered->bytes_left) {
1163 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1164 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1165 return ERR_PTR(-EINVAL);
1166 }
1167 /* We cannot split a compressed ordered extent. */
1168 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1169 return ERR_PTR(-EINVAL);
1170
1171 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1172 len, 0, flags, ordered->compress_type);
1173 if (IS_ERR(new))
1174 return new;
1175
1176 /* One ref for the tree. */
1177 refcount_inc(&new->refs);
1178
1179 spin_lock_irq(&root->ordered_extent_lock);
1180 spin_lock(&inode->ordered_tree_lock);
1181 /* Remove from tree once */
1182 node = &ordered->rb_node;
1183 rb_erase(node, &inode->ordered_tree);
1184 RB_CLEAR_NODE(node);
1185 if (inode->ordered_tree_last == node)
1186 inode->ordered_tree_last = NULL;
1187
1188 ordered->file_offset += len;
1189 ordered->disk_bytenr += len;
1190 ordered->num_bytes -= len;
1191 ordered->disk_num_bytes -= len;
1192
1193 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1194 ASSERT(ordered->bytes_left == 0);
1195 new->bytes_left = 0;
1196 } else {
1197 ordered->bytes_left -= len;
1198 }
1199
1200 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1201 if (ordered->truncated_len > len) {
1202 ordered->truncated_len -= len;
1203 } else {
1204 new->truncated_len = ordered->truncated_len;
1205 ordered->truncated_len = 0;
1206 }
1207 }
1208
1209 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1210 if (offset == len)
1211 break;
1212 list_move_tail(&sum->list, &new->list);
1213 offset += sum->len;
1214 }
1215
1216 /* Re-insert the node */
1217 node = tree_insert(&inode->ordered_tree, ordered->file_offset,
1218 &ordered->rb_node);
1219 if (node)
1220 btrfs_panic(fs_info, -EEXIST,
1221 "zoned: inconsistency in ordered tree at offset %llu",
1222 ordered->file_offset);
1223
1224 node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1225 if (node)
1226 btrfs_panic(fs_info, -EEXIST,
1227 "zoned: inconsistency in ordered tree at offset %llu",
1228 new->file_offset);
1229 spin_unlock(&inode->ordered_tree_lock);
1230
1231 list_add_tail(&new->root_extent_list, &root->ordered_extents);
1232 root->nr_ordered_extents++;
1233 spin_unlock_irq(&root->ordered_extent_lock);
1234 return new;
1235}
1236
1237int __init ordered_data_init(void)
1238{
1239 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1240 sizeof(struct btrfs_ordered_extent), 0,
1241 SLAB_MEM_SPREAD,
1242 NULL);
1243 if (!btrfs_ordered_extent_cache)
1244 return -ENOMEM;
1245
1246 return 0;
1247}
1248
1249void __cold ordered_data_exit(void)
1250{
1251 kmem_cache_destroy(btrfs_ordered_extent_cache);
1252}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19
20static struct kmem_cache *btrfs_ordered_extent_cache;
21
22static u64 entry_end(struct btrfs_ordered_extent *entry)
23{
24 if (entry->file_offset + entry->num_bytes < entry->file_offset)
25 return (u64)-1;
26 return entry->file_offset + entry->num_bytes;
27}
28
29/* returns NULL if the insertion worked, or it returns the node it did find
30 * in the tree
31 */
32static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
33 struct rb_node *node)
34{
35 struct rb_node **p = &root->rb_node;
36 struct rb_node *parent = NULL;
37 struct btrfs_ordered_extent *entry;
38
39 while (*p) {
40 parent = *p;
41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
42
43 if (file_offset < entry->file_offset)
44 p = &(*p)->rb_left;
45 else if (file_offset >= entry_end(entry))
46 p = &(*p)->rb_right;
47 else
48 return parent;
49 }
50
51 rb_link_node(node, parent, p);
52 rb_insert_color(node, root);
53 return NULL;
54}
55
56/*
57 * look for a given offset in the tree, and if it can't be found return the
58 * first lesser offset
59 */
60static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61 struct rb_node **prev_ret)
62{
63 struct rb_node *n = root->rb_node;
64 struct rb_node *prev = NULL;
65 struct rb_node *test;
66 struct btrfs_ordered_extent *entry;
67 struct btrfs_ordered_extent *prev_entry = NULL;
68
69 while (n) {
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
71 prev = n;
72 prev_entry = entry;
73
74 if (file_offset < entry->file_offset)
75 n = n->rb_left;
76 else if (file_offset >= entry_end(entry))
77 n = n->rb_right;
78 else
79 return n;
80 }
81 if (!prev_ret)
82 return NULL;
83
84 while (prev && file_offset >= entry_end(prev_entry)) {
85 test = rb_next(prev);
86 if (!test)
87 break;
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89 rb_node);
90 if (file_offset < entry_end(prev_entry))
91 break;
92
93 prev = test;
94 }
95 if (prev)
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97 rb_node);
98 while (prev && file_offset < entry_end(prev_entry)) {
99 test = rb_prev(prev);
100 if (!test)
101 break;
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103 rb_node);
104 prev = test;
105 }
106 *prev_ret = prev;
107 return NULL;
108}
109
110/*
111 * helper to check if a given offset is inside a given entry
112 */
113static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
114{
115 if (file_offset < entry->file_offset ||
116 entry->file_offset + entry->num_bytes <= file_offset)
117 return 0;
118 return 1;
119}
120
121static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
122 u64 len)
123{
124 if (file_offset + len <= entry->file_offset ||
125 entry->file_offset + entry->num_bytes <= file_offset)
126 return 0;
127 return 1;
128}
129
130/*
131 * look find the first ordered struct that has this offset, otherwise
132 * the first one less than this offset
133 */
134static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
135 u64 file_offset)
136{
137 struct rb_root *root = &tree->tree;
138 struct rb_node *prev = NULL;
139 struct rb_node *ret;
140 struct btrfs_ordered_extent *entry;
141
142 if (tree->last) {
143 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
144 rb_node);
145 if (offset_in_entry(entry, file_offset))
146 return tree->last;
147 }
148 ret = __tree_search(root, file_offset, &prev);
149 if (!ret)
150 ret = prev;
151 if (ret)
152 tree->last = ret;
153 return ret;
154}
155
156/*
157 * Allocate and add a new ordered_extent into the per-inode tree.
158 *
159 * The tree is given a single reference on the ordered extent that was
160 * inserted.
161 */
162static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163 u64 disk_bytenr, u64 num_bytes,
164 u64 disk_num_bytes, int type, int dio,
165 int compress_type)
166{
167 struct btrfs_root *root = inode->root;
168 struct btrfs_fs_info *fs_info = root->fs_info;
169 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
170 struct rb_node *node;
171 struct btrfs_ordered_extent *entry;
172 int ret;
173
174 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175 /* For nocow write, we can release the qgroup rsv right now */
176 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
177 if (ret < 0)
178 return ret;
179 ret = 0;
180 } else {
181 /*
182 * The ordered extent has reserved qgroup space, release now
183 * and pass the reserved number for qgroup_record to free.
184 */
185 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
186 if (ret < 0)
187 return ret;
188 }
189 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
190 if (!entry)
191 return -ENOMEM;
192
193 entry->file_offset = file_offset;
194 entry->disk_bytenr = disk_bytenr;
195 entry->num_bytes = num_bytes;
196 entry->disk_num_bytes = disk_num_bytes;
197 entry->bytes_left = num_bytes;
198 entry->inode = igrab(&inode->vfs_inode);
199 entry->compress_type = compress_type;
200 entry->truncated_len = (u64)-1;
201 entry->qgroup_rsv = ret;
202 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203 set_bit(type, &entry->flags);
204
205 if (dio) {
206 percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
207 fs_info->delalloc_batch);
208 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
209 }
210
211 /* one ref for the tree */
212 refcount_set(&entry->refs, 1);
213 init_waitqueue_head(&entry->wait);
214 INIT_LIST_HEAD(&entry->list);
215 INIT_LIST_HEAD(&entry->root_extent_list);
216 INIT_LIST_HEAD(&entry->work_list);
217 init_completion(&entry->completion);
218
219 trace_btrfs_ordered_extent_add(&inode->vfs_inode, entry);
220
221 spin_lock_irq(&tree->lock);
222 node = tree_insert(&tree->tree, file_offset,
223 &entry->rb_node);
224 if (node)
225 btrfs_panic(fs_info, -EEXIST,
226 "inconsistency in ordered tree at offset %llu",
227 file_offset);
228 spin_unlock_irq(&tree->lock);
229
230 spin_lock(&root->ordered_extent_lock);
231 list_add_tail(&entry->root_extent_list,
232 &root->ordered_extents);
233 root->nr_ordered_extents++;
234 if (root->nr_ordered_extents == 1) {
235 spin_lock(&fs_info->ordered_root_lock);
236 BUG_ON(!list_empty(&root->ordered_root));
237 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
238 spin_unlock(&fs_info->ordered_root_lock);
239 }
240 spin_unlock(&root->ordered_extent_lock);
241
242 /*
243 * We don't need the count_max_extents here, we can assume that all of
244 * that work has been done at higher layers, so this is truly the
245 * smallest the extent is going to get.
246 */
247 spin_lock(&inode->lock);
248 btrfs_mod_outstanding_extents(inode, 1);
249 spin_unlock(&inode->lock);
250
251 return 0;
252}
253
254int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
255 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
256 int type)
257{
258 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
259 num_bytes, disk_num_bytes, type, 0,
260 BTRFS_COMPRESS_NONE);
261}
262
263int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
264 u64 disk_bytenr, u64 num_bytes,
265 u64 disk_num_bytes, int type)
266{
267 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
268 num_bytes, disk_num_bytes, type, 1,
269 BTRFS_COMPRESS_NONE);
270}
271
272int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
273 u64 disk_bytenr, u64 num_bytes,
274 u64 disk_num_bytes, int type,
275 int compress_type)
276{
277 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
278 num_bytes, disk_num_bytes, type, 0,
279 compress_type);
280}
281
282/*
283 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
284 * when an ordered extent is finished. If the list covers more than one
285 * ordered extent, it is split across multiples.
286 */
287void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
288 struct btrfs_ordered_sum *sum)
289{
290 struct btrfs_ordered_inode_tree *tree;
291
292 tree = &BTRFS_I(entry->inode)->ordered_tree;
293 spin_lock_irq(&tree->lock);
294 list_add_tail(&sum->list, &entry->list);
295 spin_unlock_irq(&tree->lock);
296}
297
298/*
299 * this is used to account for finished IO across a given range
300 * of the file. The IO may span ordered extents. If
301 * a given ordered_extent is completely done, 1 is returned, otherwise
302 * 0.
303 *
304 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
305 * to make sure this function only returns 1 once for a given ordered extent.
306 *
307 * file_offset is updated to one byte past the range that is recorded as
308 * complete. This allows you to walk forward in the file.
309 */
310int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
311 struct btrfs_ordered_extent **cached,
312 u64 *file_offset, u64 io_size, int uptodate)
313{
314 struct btrfs_fs_info *fs_info = inode->root->fs_info;
315 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
316 struct rb_node *node;
317 struct btrfs_ordered_extent *entry = NULL;
318 int ret;
319 unsigned long flags;
320 u64 dec_end;
321 u64 dec_start;
322 u64 to_dec;
323
324 spin_lock_irqsave(&tree->lock, flags);
325 node = tree_search(tree, *file_offset);
326 if (!node) {
327 ret = 1;
328 goto out;
329 }
330
331 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
332 if (!offset_in_entry(entry, *file_offset)) {
333 ret = 1;
334 goto out;
335 }
336
337 dec_start = max(*file_offset, entry->file_offset);
338 dec_end = min(*file_offset + io_size,
339 entry->file_offset + entry->num_bytes);
340 *file_offset = dec_end;
341 if (dec_start > dec_end) {
342 btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
343 dec_start, dec_end);
344 }
345 to_dec = dec_end - dec_start;
346 if (to_dec > entry->bytes_left) {
347 btrfs_crit(fs_info,
348 "bad ordered accounting left %llu size %llu",
349 entry->bytes_left, to_dec);
350 }
351 entry->bytes_left -= to_dec;
352 if (!uptodate)
353 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
354
355 if (entry->bytes_left == 0) {
356 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
357 /* test_and_set_bit implies a barrier */
358 cond_wake_up_nomb(&entry->wait);
359 } else {
360 ret = 1;
361 }
362out:
363 if (!ret && cached && entry) {
364 *cached = entry;
365 refcount_inc(&entry->refs);
366 }
367 spin_unlock_irqrestore(&tree->lock, flags);
368 return ret == 0;
369}
370
371/*
372 * this is used to account for finished IO across a given range
373 * of the file. The IO should not span ordered extents. If
374 * a given ordered_extent is completely done, 1 is returned, otherwise
375 * 0.
376 *
377 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
378 * to make sure this function only returns 1 once for a given ordered extent.
379 */
380int btrfs_dec_test_ordered_pending(struct inode *inode,
381 struct btrfs_ordered_extent **cached,
382 u64 file_offset, u64 io_size, int uptodate)
383{
384 struct btrfs_ordered_inode_tree *tree;
385 struct rb_node *node;
386 struct btrfs_ordered_extent *entry = NULL;
387 unsigned long flags;
388 int ret;
389
390 tree = &BTRFS_I(inode)->ordered_tree;
391 spin_lock_irqsave(&tree->lock, flags);
392 if (cached && *cached) {
393 entry = *cached;
394 goto have_entry;
395 }
396
397 node = tree_search(tree, file_offset);
398 if (!node) {
399 ret = 1;
400 goto out;
401 }
402
403 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
404have_entry:
405 if (!offset_in_entry(entry, file_offset)) {
406 ret = 1;
407 goto out;
408 }
409
410 if (io_size > entry->bytes_left) {
411 btrfs_crit(BTRFS_I(inode)->root->fs_info,
412 "bad ordered accounting left %llu size %llu",
413 entry->bytes_left, io_size);
414 }
415 entry->bytes_left -= io_size;
416 if (!uptodate)
417 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
418
419 if (entry->bytes_left == 0) {
420 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421 /* test_and_set_bit implies a barrier */
422 cond_wake_up_nomb(&entry->wait);
423 } else {
424 ret = 1;
425 }
426out:
427 if (!ret && cached && entry) {
428 *cached = entry;
429 refcount_inc(&entry->refs);
430 }
431 spin_unlock_irqrestore(&tree->lock, flags);
432 return ret == 0;
433}
434
435/*
436 * used to drop a reference on an ordered extent. This will free
437 * the extent if the last reference is dropped
438 */
439void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
440{
441 struct list_head *cur;
442 struct btrfs_ordered_sum *sum;
443
444 trace_btrfs_ordered_extent_put(entry->inode, entry);
445
446 if (refcount_dec_and_test(&entry->refs)) {
447 ASSERT(list_empty(&entry->root_extent_list));
448 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
449 if (entry->inode)
450 btrfs_add_delayed_iput(entry->inode);
451 while (!list_empty(&entry->list)) {
452 cur = entry->list.next;
453 sum = list_entry(cur, struct btrfs_ordered_sum, list);
454 list_del(&sum->list);
455 kvfree(sum);
456 }
457 kmem_cache_free(btrfs_ordered_extent_cache, entry);
458 }
459}
460
461/*
462 * remove an ordered extent from the tree. No references are dropped
463 * and waiters are woken up.
464 */
465void btrfs_remove_ordered_extent(struct inode *inode,
466 struct btrfs_ordered_extent *entry)
467{
468 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
469 struct btrfs_ordered_inode_tree *tree;
470 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
471 struct btrfs_root *root = btrfs_inode->root;
472 struct rb_node *node;
473
474 /* This is paired with btrfs_add_ordered_extent. */
475 spin_lock(&btrfs_inode->lock);
476 btrfs_mod_outstanding_extents(btrfs_inode, -1);
477 spin_unlock(&btrfs_inode->lock);
478 if (root != fs_info->tree_root)
479 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
480 false);
481
482 if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
483 percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
484 fs_info->delalloc_batch);
485
486 tree = &btrfs_inode->ordered_tree;
487 spin_lock_irq(&tree->lock);
488 node = &entry->rb_node;
489 rb_erase(node, &tree->tree);
490 RB_CLEAR_NODE(node);
491 if (tree->last == node)
492 tree->last = NULL;
493 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
494 spin_unlock_irq(&tree->lock);
495
496 spin_lock(&root->ordered_extent_lock);
497 list_del_init(&entry->root_extent_list);
498 root->nr_ordered_extents--;
499
500 trace_btrfs_ordered_extent_remove(inode, entry);
501
502 if (!root->nr_ordered_extents) {
503 spin_lock(&fs_info->ordered_root_lock);
504 BUG_ON(list_empty(&root->ordered_root));
505 list_del_init(&root->ordered_root);
506 spin_unlock(&fs_info->ordered_root_lock);
507 }
508 spin_unlock(&root->ordered_extent_lock);
509 wake_up(&entry->wait);
510}
511
512static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
513{
514 struct btrfs_ordered_extent *ordered;
515
516 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
517 btrfs_start_ordered_extent(ordered->inode, ordered, 1);
518 complete(&ordered->completion);
519}
520
521/*
522 * wait for all the ordered extents in a root. This is done when balancing
523 * space between drives.
524 */
525u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
526 const u64 range_start, const u64 range_len)
527{
528 struct btrfs_fs_info *fs_info = root->fs_info;
529 LIST_HEAD(splice);
530 LIST_HEAD(skipped);
531 LIST_HEAD(works);
532 struct btrfs_ordered_extent *ordered, *next;
533 u64 count = 0;
534 const u64 range_end = range_start + range_len;
535
536 mutex_lock(&root->ordered_extent_mutex);
537 spin_lock(&root->ordered_extent_lock);
538 list_splice_init(&root->ordered_extents, &splice);
539 while (!list_empty(&splice) && nr) {
540 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
541 root_extent_list);
542
543 if (range_end <= ordered->disk_bytenr ||
544 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
545 list_move_tail(&ordered->root_extent_list, &skipped);
546 cond_resched_lock(&root->ordered_extent_lock);
547 continue;
548 }
549
550 list_move_tail(&ordered->root_extent_list,
551 &root->ordered_extents);
552 refcount_inc(&ordered->refs);
553 spin_unlock(&root->ordered_extent_lock);
554
555 btrfs_init_work(&ordered->flush_work,
556 btrfs_run_ordered_extent_work, NULL, NULL);
557 list_add_tail(&ordered->work_list, &works);
558 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
559
560 cond_resched();
561 spin_lock(&root->ordered_extent_lock);
562 if (nr != U64_MAX)
563 nr--;
564 count++;
565 }
566 list_splice_tail(&skipped, &root->ordered_extents);
567 list_splice_tail(&splice, &root->ordered_extents);
568 spin_unlock(&root->ordered_extent_lock);
569
570 list_for_each_entry_safe(ordered, next, &works, work_list) {
571 list_del_init(&ordered->work_list);
572 wait_for_completion(&ordered->completion);
573 btrfs_put_ordered_extent(ordered);
574 cond_resched();
575 }
576 mutex_unlock(&root->ordered_extent_mutex);
577
578 return count;
579}
580
581void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
582 const u64 range_start, const u64 range_len)
583{
584 struct btrfs_root *root;
585 struct list_head splice;
586 u64 done;
587
588 INIT_LIST_HEAD(&splice);
589
590 mutex_lock(&fs_info->ordered_operations_mutex);
591 spin_lock(&fs_info->ordered_root_lock);
592 list_splice_init(&fs_info->ordered_roots, &splice);
593 while (!list_empty(&splice) && nr) {
594 root = list_first_entry(&splice, struct btrfs_root,
595 ordered_root);
596 root = btrfs_grab_root(root);
597 BUG_ON(!root);
598 list_move_tail(&root->ordered_root,
599 &fs_info->ordered_roots);
600 spin_unlock(&fs_info->ordered_root_lock);
601
602 done = btrfs_wait_ordered_extents(root, nr,
603 range_start, range_len);
604 btrfs_put_root(root);
605
606 spin_lock(&fs_info->ordered_root_lock);
607 if (nr != U64_MAX) {
608 nr -= done;
609 }
610 }
611 list_splice_tail(&splice, &fs_info->ordered_roots);
612 spin_unlock(&fs_info->ordered_root_lock);
613 mutex_unlock(&fs_info->ordered_operations_mutex);
614}
615
616/*
617 * Used to start IO or wait for a given ordered extent to finish.
618 *
619 * If wait is one, this effectively waits on page writeback for all the pages
620 * in the extent, and it waits on the io completion code to insert
621 * metadata into the btree corresponding to the extent
622 */
623void btrfs_start_ordered_extent(struct inode *inode,
624 struct btrfs_ordered_extent *entry,
625 int wait)
626{
627 u64 start = entry->file_offset;
628 u64 end = start + entry->num_bytes - 1;
629
630 trace_btrfs_ordered_extent_start(inode, entry);
631
632 /*
633 * pages in the range can be dirty, clean or writeback. We
634 * start IO on any dirty ones so the wait doesn't stall waiting
635 * for the flusher thread to find them
636 */
637 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
638 filemap_fdatawrite_range(inode->i_mapping, start, end);
639 if (wait) {
640 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
641 &entry->flags));
642 }
643}
644
645/*
646 * Used to wait on ordered extents across a large range of bytes.
647 */
648int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
649{
650 int ret = 0;
651 int ret_wb = 0;
652 u64 end;
653 u64 orig_end;
654 struct btrfs_ordered_extent *ordered;
655
656 if (start + len < start) {
657 orig_end = INT_LIMIT(loff_t);
658 } else {
659 orig_end = start + len - 1;
660 if (orig_end > INT_LIMIT(loff_t))
661 orig_end = INT_LIMIT(loff_t);
662 }
663
664 /* start IO across the range first to instantiate any delalloc
665 * extents
666 */
667 ret = btrfs_fdatawrite_range(inode, start, orig_end);
668 if (ret)
669 return ret;
670
671 /*
672 * If we have a writeback error don't return immediately. Wait first
673 * for any ordered extents that haven't completed yet. This is to make
674 * sure no one can dirty the same page ranges and call writepages()
675 * before the ordered extents complete - to avoid failures (-EEXIST)
676 * when adding the new ordered extents to the ordered tree.
677 */
678 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
679
680 end = orig_end;
681 while (1) {
682 ordered = btrfs_lookup_first_ordered_extent(inode, end);
683 if (!ordered)
684 break;
685 if (ordered->file_offset > orig_end) {
686 btrfs_put_ordered_extent(ordered);
687 break;
688 }
689 if (ordered->file_offset + ordered->num_bytes <= start) {
690 btrfs_put_ordered_extent(ordered);
691 break;
692 }
693 btrfs_start_ordered_extent(inode, ordered, 1);
694 end = ordered->file_offset;
695 /*
696 * If the ordered extent had an error save the error but don't
697 * exit without waiting first for all other ordered extents in
698 * the range to complete.
699 */
700 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
701 ret = -EIO;
702 btrfs_put_ordered_extent(ordered);
703 if (end == 0 || end == start)
704 break;
705 end--;
706 }
707 return ret_wb ? ret_wb : ret;
708}
709
710/*
711 * find an ordered extent corresponding to file_offset. return NULL if
712 * nothing is found, otherwise take a reference on the extent and return it
713 */
714struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
715 u64 file_offset)
716{
717 struct btrfs_ordered_inode_tree *tree;
718 struct rb_node *node;
719 struct btrfs_ordered_extent *entry = NULL;
720
721 tree = &inode->ordered_tree;
722 spin_lock_irq(&tree->lock);
723 node = tree_search(tree, file_offset);
724 if (!node)
725 goto out;
726
727 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
728 if (!offset_in_entry(entry, file_offset))
729 entry = NULL;
730 if (entry)
731 refcount_inc(&entry->refs);
732out:
733 spin_unlock_irq(&tree->lock);
734 return entry;
735}
736
737/* Since the DIO code tries to lock a wide area we need to look for any ordered
738 * extents that exist in the range, rather than just the start of the range.
739 */
740struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
741 struct btrfs_inode *inode, u64 file_offset, u64 len)
742{
743 struct btrfs_ordered_inode_tree *tree;
744 struct rb_node *node;
745 struct btrfs_ordered_extent *entry = NULL;
746
747 tree = &inode->ordered_tree;
748 spin_lock_irq(&tree->lock);
749 node = tree_search(tree, file_offset);
750 if (!node) {
751 node = tree_search(tree, file_offset + len);
752 if (!node)
753 goto out;
754 }
755
756 while (1) {
757 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
758 if (range_overlaps(entry, file_offset, len))
759 break;
760
761 if (entry->file_offset >= file_offset + len) {
762 entry = NULL;
763 break;
764 }
765 entry = NULL;
766 node = rb_next(node);
767 if (!node)
768 break;
769 }
770out:
771 if (entry)
772 refcount_inc(&entry->refs);
773 spin_unlock_irq(&tree->lock);
774 return entry;
775}
776
777/*
778 * lookup and return any extent before 'file_offset'. NULL is returned
779 * if none is found
780 */
781struct btrfs_ordered_extent *
782btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
783{
784 struct btrfs_ordered_inode_tree *tree;
785 struct rb_node *node;
786 struct btrfs_ordered_extent *entry = NULL;
787
788 tree = &BTRFS_I(inode)->ordered_tree;
789 spin_lock_irq(&tree->lock);
790 node = tree_search(tree, file_offset);
791 if (!node)
792 goto out;
793
794 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
795 refcount_inc(&entry->refs);
796out:
797 spin_unlock_irq(&tree->lock);
798 return entry;
799}
800
801/*
802 * search the ordered extents for one corresponding to 'offset' and
803 * try to find a checksum. This is used because we allow pages to
804 * be reclaimed before their checksum is actually put into the btree
805 */
806int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
807 u8 *sum, int len)
808{
809 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
810 struct btrfs_ordered_sum *ordered_sum;
811 struct btrfs_ordered_extent *ordered;
812 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
813 unsigned long num_sectors;
814 unsigned long i;
815 u32 sectorsize = btrfs_inode_sectorsize(inode);
816 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
817 int index = 0;
818
819 ordered = btrfs_lookup_ordered_extent(BTRFS_I(inode), offset);
820 if (!ordered)
821 return 0;
822
823 spin_lock_irq(&tree->lock);
824 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
825 if (disk_bytenr >= ordered_sum->bytenr &&
826 disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
827 i = (disk_bytenr - ordered_sum->bytenr) >>
828 inode->i_sb->s_blocksize_bits;
829 num_sectors = ordered_sum->len >>
830 inode->i_sb->s_blocksize_bits;
831 num_sectors = min_t(int, len - index, num_sectors - i);
832 memcpy(sum + index, ordered_sum->sums + i * csum_size,
833 num_sectors * csum_size);
834
835 index += (int)num_sectors * csum_size;
836 if (index == len)
837 goto out;
838 disk_bytenr += num_sectors * sectorsize;
839 }
840 }
841out:
842 spin_unlock_irq(&tree->lock);
843 btrfs_put_ordered_extent(ordered);
844 return index;
845}
846
847/*
848 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
849 * ordered extents in it are run to completion.
850 *
851 * @inode: Inode whose ordered tree is to be searched
852 * @start: Beginning of range to flush
853 * @end: Last byte of range to lock
854 * @cached_state: If passed, will return the extent state responsible for the
855 * locked range. It's the caller's responsibility to free the cached state.
856 *
857 * This function always returns with the given range locked, ensuring after it's
858 * called no order extent can be pending.
859 */
860void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
861 u64 end,
862 struct extent_state **cached_state)
863{
864 struct btrfs_ordered_extent *ordered;
865 struct extent_state *cache = NULL;
866 struct extent_state **cachedp = &cache;
867
868 if (cached_state)
869 cachedp = cached_state;
870
871 while (1) {
872 lock_extent_bits(&inode->io_tree, start, end, cachedp);
873 ordered = btrfs_lookup_ordered_range(inode, start,
874 end - start + 1);
875 if (!ordered) {
876 /*
877 * If no external cached_state has been passed then
878 * decrement the extra ref taken for cachedp since we
879 * aren't exposing it outside of this function
880 */
881 if (!cached_state)
882 refcount_dec(&cache->refs);
883 break;
884 }
885 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
886 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
887 btrfs_put_ordered_extent(ordered);
888 }
889}
890
891int __init ordered_data_init(void)
892{
893 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
894 sizeof(struct btrfs_ordered_extent), 0,
895 SLAB_MEM_SPREAD,
896 NULL);
897 if (!btrfs_ordered_extent_cache)
898 return -ENOMEM;
899
900 return 0;
901}
902
903void __cold ordered_data_exit(void)
904{
905 kmem_cache_destroy(btrfs_ordered_extent_cache);
906}