Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "messages.h"
11#include "misc.h"
12#include "ctree.h"
13#include "transaction.h"
14#include "btrfs_inode.h"
15#include "extent_io.h"
16#include "disk-io.h"
17#include "compression.h"
18#include "delalloc-space.h"
19#include "qgroup.h"
20#include "subpage.h"
21#include "file.h"
22#include "block-group.h"
23
24static struct kmem_cache *btrfs_ordered_extent_cache;
25
26static u64 entry_end(struct btrfs_ordered_extent *entry)
27{
28 if (entry->file_offset + entry->num_bytes < entry->file_offset)
29 return (u64)-1;
30 return entry->file_offset + entry->num_bytes;
31}
32
33/* returns NULL if the insertion worked, or it returns the node it did find
34 * in the tree
35 */
36static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
37 struct rb_node *node)
38{
39 struct rb_node **p = &root->rb_node;
40 struct rb_node *parent = NULL;
41 struct btrfs_ordered_extent *entry;
42
43 while (*p) {
44 parent = *p;
45 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
46
47 if (file_offset < entry->file_offset)
48 p = &(*p)->rb_left;
49 else if (file_offset >= entry_end(entry))
50 p = &(*p)->rb_right;
51 else
52 return parent;
53 }
54
55 rb_link_node(node, parent, p);
56 rb_insert_color(node, root);
57 return NULL;
58}
59
60/*
61 * look for a given offset in the tree, and if it can't be found return the
62 * first lesser offset
63 */
64static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
65 struct rb_node **prev_ret)
66{
67 struct rb_node *n = root->rb_node;
68 struct rb_node *prev = NULL;
69 struct rb_node *test;
70 struct btrfs_ordered_extent *entry;
71 struct btrfs_ordered_extent *prev_entry = NULL;
72
73 while (n) {
74 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
75 prev = n;
76 prev_entry = entry;
77
78 if (file_offset < entry->file_offset)
79 n = n->rb_left;
80 else if (file_offset >= entry_end(entry))
81 n = n->rb_right;
82 else
83 return n;
84 }
85 if (!prev_ret)
86 return NULL;
87
88 while (prev && file_offset >= entry_end(prev_entry)) {
89 test = rb_next(prev);
90 if (!test)
91 break;
92 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
93 rb_node);
94 if (file_offset < entry_end(prev_entry))
95 break;
96
97 prev = test;
98 }
99 if (prev)
100 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
101 rb_node);
102 while (prev && file_offset < entry_end(prev_entry)) {
103 test = rb_prev(prev);
104 if (!test)
105 break;
106 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
107 rb_node);
108 prev = test;
109 }
110 *prev_ret = prev;
111 return NULL;
112}
113
114static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
115 u64 len)
116{
117 if (file_offset + len <= entry->file_offset ||
118 entry->file_offset + entry->num_bytes <= file_offset)
119 return 0;
120 return 1;
121}
122
123/*
124 * look find the first ordered struct that has this offset, otherwise
125 * the first one less than this offset
126 */
127static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
128 u64 file_offset)
129{
130 struct rb_node *prev = NULL;
131 struct rb_node *ret;
132 struct btrfs_ordered_extent *entry;
133
134 if (inode->ordered_tree_last) {
135 entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
136 rb_node);
137 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
138 return inode->ordered_tree_last;
139 }
140 ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
141 if (!ret)
142 ret = prev;
143 if (ret)
144 inode->ordered_tree_last = ret;
145 return ret;
146}
147
148static struct btrfs_ordered_extent *alloc_ordered_extent(
149 struct btrfs_inode *inode, u64 file_offset, u64 num_bytes,
150 u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes,
151 u64 offset, unsigned long flags, int compress_type)
152{
153 struct btrfs_ordered_extent *entry;
154 int ret;
155 u64 qgroup_rsv = 0;
156
157 if (flags &
158 ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
159 /* For nocow write, we can release the qgroup rsv right now */
160 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
161 if (ret < 0)
162 return ERR_PTR(ret);
163 } else {
164 /*
165 * The ordered extent has reserved qgroup space, release now
166 * and pass the reserved number for qgroup_record to free.
167 */
168 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
169 if (ret < 0)
170 return ERR_PTR(ret);
171 }
172 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
173 if (!entry)
174 return ERR_PTR(-ENOMEM);
175
176 entry->file_offset = file_offset;
177 entry->num_bytes = num_bytes;
178 entry->ram_bytes = ram_bytes;
179 entry->disk_bytenr = disk_bytenr;
180 entry->disk_num_bytes = disk_num_bytes;
181 entry->offset = offset;
182 entry->bytes_left = num_bytes;
183 entry->inode = BTRFS_I(igrab(&inode->vfs_inode));
184 entry->compress_type = compress_type;
185 entry->truncated_len = (u64)-1;
186 entry->qgroup_rsv = qgroup_rsv;
187 entry->flags = flags;
188 refcount_set(&entry->refs, 1);
189 init_waitqueue_head(&entry->wait);
190 INIT_LIST_HEAD(&entry->list);
191 INIT_LIST_HEAD(&entry->log_list);
192 INIT_LIST_HEAD(&entry->root_extent_list);
193 INIT_LIST_HEAD(&entry->work_list);
194 INIT_LIST_HEAD(&entry->bioc_list);
195 init_completion(&entry->completion);
196
197 /*
198 * We don't need the count_max_extents here, we can assume that all of
199 * that work has been done at higher layers, so this is truly the
200 * smallest the extent is going to get.
201 */
202 spin_lock(&inode->lock);
203 btrfs_mod_outstanding_extents(inode, 1);
204 spin_unlock(&inode->lock);
205
206 return entry;
207}
208
209static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
210{
211 struct btrfs_inode *inode = entry->inode;
212 struct btrfs_root *root = inode->root;
213 struct btrfs_fs_info *fs_info = root->fs_info;
214 struct rb_node *node;
215
216 trace_btrfs_ordered_extent_add(inode, entry);
217
218 percpu_counter_add_batch(&fs_info->ordered_bytes, entry->num_bytes,
219 fs_info->delalloc_batch);
220
221 /* One ref for the tree. */
222 refcount_inc(&entry->refs);
223
224 spin_lock_irq(&inode->ordered_tree_lock);
225 node = tree_insert(&inode->ordered_tree, entry->file_offset,
226 &entry->rb_node);
227 if (unlikely(node))
228 btrfs_panic(fs_info, -EEXIST,
229 "inconsistency in ordered tree at offset %llu",
230 entry->file_offset);
231 spin_unlock_irq(&inode->ordered_tree_lock);
232
233 spin_lock(&root->ordered_extent_lock);
234 list_add_tail(&entry->root_extent_list,
235 &root->ordered_extents);
236 root->nr_ordered_extents++;
237 if (root->nr_ordered_extents == 1) {
238 spin_lock(&fs_info->ordered_root_lock);
239 BUG_ON(!list_empty(&root->ordered_root));
240 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
241 spin_unlock(&fs_info->ordered_root_lock);
242 }
243 spin_unlock(&root->ordered_extent_lock);
244}
245
246/*
247 * Add an ordered extent to the per-inode tree.
248 *
249 * @inode: Inode that this extent is for.
250 * @file_offset: Logical offset in file where the extent starts.
251 * @num_bytes: Logical length of extent in file.
252 * @ram_bytes: Full length of unencoded data.
253 * @disk_bytenr: Offset of extent on disk.
254 * @disk_num_bytes: Size of extent on disk.
255 * @offset: Offset into unencoded data where file data starts.
256 * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*).
257 * @compress_type: Compression algorithm used for data.
258 *
259 * Most of these parameters correspond to &struct btrfs_file_extent_item. The
260 * tree is given a single reference on the ordered extent that was inserted, and
261 * the returned pointer is given a second reference.
262 *
263 * Return: the new ordered extent or error pointer.
264 */
265struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
266 struct btrfs_inode *inode, u64 file_offset,
267 const struct btrfs_file_extent *file_extent, unsigned long flags)
268{
269 struct btrfs_ordered_extent *entry;
270
271 ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);
272
273 /*
274 * For regular writes, we just use the members in @file_extent.
275 *
276 * For NOCOW, we don't really care about the numbers except @start and
277 * file_extent->num_bytes, as we won't insert a file extent item at all.
278 *
279 * For PREALLOC, we do not use ordered extent members, but
280 * btrfs_mark_extent_written() handles everything.
281 *
282 * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents,
283 * or btrfs_split_ordered_extent() cannot handle it correctly.
284 */
285 if (flags & ((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)))
286 entry = alloc_ordered_extent(inode, file_offset,
287 file_extent->num_bytes,
288 file_extent->num_bytes,
289 file_extent->disk_bytenr + file_extent->offset,
290 file_extent->num_bytes, 0, flags,
291 file_extent->compression);
292 else
293 entry = alloc_ordered_extent(inode, file_offset,
294 file_extent->num_bytes,
295 file_extent->ram_bytes,
296 file_extent->disk_bytenr,
297 file_extent->disk_num_bytes,
298 file_extent->offset, flags,
299 file_extent->compression);
300 if (!IS_ERR(entry))
301 insert_ordered_extent(entry);
302 return entry;
303}
304
305/*
306 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
307 * when an ordered extent is finished. If the list covers more than one
308 * ordered extent, it is split across multiples.
309 */
310void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
311 struct btrfs_ordered_sum *sum)
312{
313 struct btrfs_inode *inode = entry->inode;
314
315 spin_lock_irq(&inode->ordered_tree_lock);
316 list_add_tail(&sum->list, &entry->list);
317 spin_unlock_irq(&inode->ordered_tree_lock);
318}
319
320void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
321{
322 if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
323 mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO);
324}
325
326static void finish_ordered_fn(struct btrfs_work *work)
327{
328 struct btrfs_ordered_extent *ordered_extent;
329
330 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
331 btrfs_finish_ordered_io(ordered_extent);
332}
333
334static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
335 struct folio *folio, u64 file_offset,
336 u64 len, bool uptodate)
337{
338 struct btrfs_inode *inode = ordered->inode;
339 struct btrfs_fs_info *fs_info = inode->root->fs_info;
340
341 lockdep_assert_held(&inode->ordered_tree_lock);
342
343 if (folio) {
344 ASSERT(folio->mapping);
345 ASSERT(folio_pos(folio) <= file_offset);
346 ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
347
348 /*
349 * Ordered flag indicates whether we still have
350 * pending io unfinished for the ordered extent.
351 *
352 * If it's not set, we need to skip to next range.
353 */
354 if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
355 return false;
356 btrfs_folio_clear_ordered(fs_info, folio, file_offset, len);
357 }
358
359 /* Now we're fine to update the accounting. */
360 if (WARN_ON_ONCE(len > ordered->bytes_left)) {
361 btrfs_crit(fs_info,
362"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
363 btrfs_root_id(inode->root), btrfs_ino(inode),
364 ordered->file_offset, ordered->num_bytes,
365 len, ordered->bytes_left);
366 ordered->bytes_left = 0;
367 } else {
368 ordered->bytes_left -= len;
369 }
370
371 if (!uptodate)
372 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
373
374 if (ordered->bytes_left)
375 return false;
376
377 /*
378 * All the IO of the ordered extent is finished, we need to queue
379 * the finish_func to be executed.
380 */
381 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags);
382 cond_wake_up(&ordered->wait);
383 refcount_inc(&ordered->refs);
384 trace_btrfs_ordered_extent_mark_finished(inode, ordered);
385 return true;
386}
387
388static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
389{
390 struct btrfs_inode *inode = ordered->inode;
391 struct btrfs_fs_info *fs_info = inode->root->fs_info;
392 struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
393 fs_info->endio_freespace_worker : fs_info->endio_write_workers;
394
395 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
396 btrfs_queue_work(wq, &ordered->work);
397}
398
399void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
400 struct folio *folio, u64 file_offset, u64 len,
401 bool uptodate)
402{
403 struct btrfs_inode *inode = ordered->inode;
404 unsigned long flags;
405 bool ret;
406
407 trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
408
409 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
410 ret = can_finish_ordered_extent(ordered, folio, file_offset, len,
411 uptodate);
412 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
413
414 /*
415 * If this is a COW write it means we created new extent maps for the
416 * range and they point to unwritten locations if we got an error either
417 * before submitting a bio or during IO.
418 *
419 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we
420 * are queuing its completion below. During completion, at
421 * btrfs_finish_one_ordered(), we will drop the extent maps for the
422 * unwritten extents.
423 *
424 * However because completion runs in a work queue we can end up having
425 * a fast fsync running before that. In the case of direct IO, once we
426 * unlock the inode the fsync might start, and we queue the completion
427 * before unlocking the inode. In the case of buffered IO when writeback
428 * finishes (end_bbio_data_write()) we queue the completion, so if the
429 * writeback was triggered by a fast fsync, the fsync might start
430 * logging before ordered extent completion runs in the work queue.
431 *
432 * The fast fsync will log file extent items based on the extent maps it
433 * finds, so if by the time it collects extent maps the ordered extent
434 * completion didn't happen yet, it will log file extent items that
435 * point to unwritten extents, resulting in a corruption if a crash
436 * happens and the log tree is replayed. Note that a fast fsync does not
437 * wait for completion of ordered extents in order to reduce latency.
438 *
439 * Set a flag in the inode so that the next fast fsync will wait for
440 * ordered extents to complete before starting to log.
441 */
442 if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
443 set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags);
444
445 if (ret)
446 btrfs_queue_ordered_fn(ordered);
447}
448
449/*
450 * Mark all ordered extents io inside the specified range finished.
451 *
452 * @folio: The involved folio for the operation.
453 * For uncompressed buffered IO, the folio status also needs to be
454 * updated to indicate whether the pending ordered io is finished.
455 * Can be NULL for direct IO and compressed write.
456 * For these cases, callers are ensured they won't execute the
457 * endio function twice.
458 *
459 * This function is called for endio, thus the range must have ordered
460 * extent(s) covering it.
461 */
462void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
463 struct folio *folio, u64 file_offset,
464 u64 num_bytes, bool uptodate)
465{
466 struct rb_node *node;
467 struct btrfs_ordered_extent *entry = NULL;
468 unsigned long flags;
469 u64 cur = file_offset;
470
471 trace_btrfs_writepage_end_io_hook(inode, file_offset,
472 file_offset + num_bytes - 1,
473 uptodate);
474
475 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
476 while (cur < file_offset + num_bytes) {
477 u64 entry_end;
478 u64 end;
479 u32 len;
480
481 node = ordered_tree_search(inode, cur);
482 /* No ordered extents at all */
483 if (!node)
484 break;
485
486 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
487 entry_end = entry->file_offset + entry->num_bytes;
488 /*
489 * |<-- OE --->| |
490 * cur
491 * Go to next OE.
492 */
493 if (cur >= entry_end) {
494 node = rb_next(node);
495 /* No more ordered extents, exit */
496 if (!node)
497 break;
498 entry = rb_entry(node, struct btrfs_ordered_extent,
499 rb_node);
500
501 /* Go to next ordered extent and continue */
502 cur = entry->file_offset;
503 continue;
504 }
505 /*
506 * | |<--- OE --->|
507 * cur
508 * Go to the start of OE.
509 */
510 if (cur < entry->file_offset) {
511 cur = entry->file_offset;
512 continue;
513 }
514
515 /*
516 * Now we are definitely inside one ordered extent.
517 *
518 * |<--- OE --->|
519 * |
520 * cur
521 */
522 end = min(entry->file_offset + entry->num_bytes,
523 file_offset + num_bytes) - 1;
524 ASSERT(end + 1 - cur < U32_MAX);
525 len = end + 1 - cur;
526
527 if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
528 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
529 btrfs_queue_ordered_fn(entry);
530 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
531 }
532 cur += len;
533 }
534 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
535}
536
537/*
538 * Finish IO for one ordered extent across a given range. The range can only
539 * contain one ordered extent.
540 *
541 * @cached: The cached ordered extent. If not NULL, we can skip the tree
542 * search and use the ordered extent directly.
543 * Will be also used to store the finished ordered extent.
544 * @file_offset: File offset for the finished IO
545 * @io_size: Length of the finish IO range
546 *
547 * Return true if the ordered extent is finished in the range, and update
548 * @cached.
549 * Return false otherwise.
550 *
551 * NOTE: The range can NOT cross multiple ordered extents.
552 * Thus caller should ensure the range doesn't cross ordered extents.
553 */
554bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
555 struct btrfs_ordered_extent **cached,
556 u64 file_offset, u64 io_size)
557{
558 struct rb_node *node;
559 struct btrfs_ordered_extent *entry = NULL;
560 unsigned long flags;
561 bool finished = false;
562
563 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
564 if (cached && *cached) {
565 entry = *cached;
566 goto have_entry;
567 }
568
569 node = ordered_tree_search(inode, file_offset);
570 if (!node)
571 goto out;
572
573 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
574have_entry:
575 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
576 goto out;
577
578 if (io_size > entry->bytes_left)
579 btrfs_crit(inode->root->fs_info,
580 "bad ordered accounting left %llu size %llu",
581 entry->bytes_left, io_size);
582
583 entry->bytes_left -= io_size;
584
585 if (entry->bytes_left == 0) {
586 /*
587 * Ensure only one caller can set the flag and finished_ret
588 * accordingly
589 */
590 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
591 /* test_and_set_bit implies a barrier */
592 cond_wake_up_nomb(&entry->wait);
593 }
594out:
595 if (finished && cached && entry) {
596 *cached = entry;
597 refcount_inc(&entry->refs);
598 trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
599 }
600 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
601 return finished;
602}
603
604/*
605 * used to drop a reference on an ordered extent. This will free
606 * the extent if the last reference is dropped
607 */
608void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
609{
610 struct list_head *cur;
611 struct btrfs_ordered_sum *sum;
612
613 trace_btrfs_ordered_extent_put(entry->inode, entry);
614
615 if (refcount_dec_and_test(&entry->refs)) {
616 ASSERT(list_empty(&entry->root_extent_list));
617 ASSERT(list_empty(&entry->log_list));
618 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
619 if (entry->inode)
620 btrfs_add_delayed_iput(entry->inode);
621 while (!list_empty(&entry->list)) {
622 cur = entry->list.next;
623 sum = list_entry(cur, struct btrfs_ordered_sum, list);
624 list_del(&sum->list);
625 kvfree(sum);
626 }
627 kmem_cache_free(btrfs_ordered_extent_cache, entry);
628 }
629}
630
631/*
632 * remove an ordered extent from the tree. No references are dropped
633 * and waiters are woken up.
634 */
635void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
636 struct btrfs_ordered_extent *entry)
637{
638 struct btrfs_root *root = btrfs_inode->root;
639 struct btrfs_fs_info *fs_info = root->fs_info;
640 struct rb_node *node;
641 bool pending;
642 bool freespace_inode;
643
644 /*
645 * If this is a free space inode the thread has not acquired the ordered
646 * extents lockdep map.
647 */
648 freespace_inode = btrfs_is_free_space_inode(btrfs_inode);
649
650 btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered);
651 /* This is paired with alloc_ordered_extent(). */
652 spin_lock(&btrfs_inode->lock);
653 btrfs_mod_outstanding_extents(btrfs_inode, -1);
654 spin_unlock(&btrfs_inode->lock);
655 if (root != fs_info->tree_root) {
656 u64 release;
657
658 if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags))
659 release = entry->disk_num_bytes;
660 else
661 release = entry->num_bytes;
662 btrfs_delalloc_release_metadata(btrfs_inode, release,
663 test_bit(BTRFS_ORDERED_IOERR,
664 &entry->flags));
665 }
666
667 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
668 fs_info->delalloc_batch);
669
670 spin_lock_irq(&btrfs_inode->ordered_tree_lock);
671 node = &entry->rb_node;
672 rb_erase(node, &btrfs_inode->ordered_tree);
673 RB_CLEAR_NODE(node);
674 if (btrfs_inode->ordered_tree_last == node)
675 btrfs_inode->ordered_tree_last = NULL;
676 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
677 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
678 spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
679
680 /*
681 * The current running transaction is waiting on us, we need to let it
682 * know that we're complete and wake it up.
683 */
684 if (pending) {
685 struct btrfs_transaction *trans;
686
687 /*
688 * The checks for trans are just a formality, it should be set,
689 * but if it isn't we don't want to deref/assert under the spin
690 * lock, so be nice and check if trans is set, but ASSERT() so
691 * if it isn't set a developer will notice.
692 */
693 spin_lock(&fs_info->trans_lock);
694 trans = fs_info->running_transaction;
695 if (trans)
696 refcount_inc(&trans->use_count);
697 spin_unlock(&fs_info->trans_lock);
698
699 ASSERT(trans || BTRFS_FS_ERROR(fs_info));
700 if (trans) {
701 if (atomic_dec_and_test(&trans->pending_ordered))
702 wake_up(&trans->pending_wait);
703 btrfs_put_transaction(trans);
704 }
705 }
706
707 btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered);
708
709 spin_lock(&root->ordered_extent_lock);
710 list_del_init(&entry->root_extent_list);
711 root->nr_ordered_extents--;
712
713 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
714
715 if (!root->nr_ordered_extents) {
716 spin_lock(&fs_info->ordered_root_lock);
717 BUG_ON(list_empty(&root->ordered_root));
718 list_del_init(&root->ordered_root);
719 spin_unlock(&fs_info->ordered_root_lock);
720 }
721 spin_unlock(&root->ordered_extent_lock);
722 wake_up(&entry->wait);
723 if (!freespace_inode)
724 btrfs_lockdep_release(fs_info, btrfs_ordered_extent);
725}
726
727static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
728{
729 struct btrfs_ordered_extent *ordered;
730
731 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
732 btrfs_start_ordered_extent(ordered);
733 complete(&ordered->completion);
734}
735
736/*
737 * Wait for all the ordered extents in a root. Use @bg as range or do whole
738 * range if it's NULL.
739 */
740u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
741 const struct btrfs_block_group *bg)
742{
743 struct btrfs_fs_info *fs_info = root->fs_info;
744 LIST_HEAD(splice);
745 LIST_HEAD(skipped);
746 LIST_HEAD(works);
747 struct btrfs_ordered_extent *ordered, *next;
748 u64 count = 0;
749 u64 range_start, range_len;
750 u64 range_end;
751
752 if (bg) {
753 range_start = bg->start;
754 range_len = bg->length;
755 } else {
756 range_start = 0;
757 range_len = U64_MAX;
758 }
759 range_end = range_start + range_len;
760
761 mutex_lock(&root->ordered_extent_mutex);
762 spin_lock(&root->ordered_extent_lock);
763 list_splice_init(&root->ordered_extents, &splice);
764 while (!list_empty(&splice) && nr) {
765 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
766 root_extent_list);
767
768 if (range_end <= ordered->disk_bytenr ||
769 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
770 list_move_tail(&ordered->root_extent_list, &skipped);
771 cond_resched_lock(&root->ordered_extent_lock);
772 continue;
773 }
774
775 list_move_tail(&ordered->root_extent_list,
776 &root->ordered_extents);
777 refcount_inc(&ordered->refs);
778 spin_unlock(&root->ordered_extent_lock);
779
780 btrfs_init_work(&ordered->flush_work,
781 btrfs_run_ordered_extent_work, NULL);
782 list_add_tail(&ordered->work_list, &works);
783 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
784
785 cond_resched();
786 if (nr != U64_MAX)
787 nr--;
788 count++;
789 spin_lock(&root->ordered_extent_lock);
790 }
791 list_splice_tail(&skipped, &root->ordered_extents);
792 list_splice_tail(&splice, &root->ordered_extents);
793 spin_unlock(&root->ordered_extent_lock);
794
795 list_for_each_entry_safe(ordered, next, &works, work_list) {
796 list_del_init(&ordered->work_list);
797 wait_for_completion(&ordered->completion);
798 btrfs_put_ordered_extent(ordered);
799 cond_resched();
800 }
801 mutex_unlock(&root->ordered_extent_mutex);
802
803 return count;
804}
805
806/*
807 * Wait for @nr ordered extents that intersect the @bg, or the whole range of
808 * the filesystem if @bg is NULL.
809 */
810void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
811 const struct btrfs_block_group *bg)
812{
813 struct btrfs_root *root;
814 LIST_HEAD(splice);
815 u64 done;
816
817 mutex_lock(&fs_info->ordered_operations_mutex);
818 spin_lock(&fs_info->ordered_root_lock);
819 list_splice_init(&fs_info->ordered_roots, &splice);
820 while (!list_empty(&splice) && nr) {
821 root = list_first_entry(&splice, struct btrfs_root,
822 ordered_root);
823 root = btrfs_grab_root(root);
824 BUG_ON(!root);
825 list_move_tail(&root->ordered_root,
826 &fs_info->ordered_roots);
827 spin_unlock(&fs_info->ordered_root_lock);
828
829 done = btrfs_wait_ordered_extents(root, nr, bg);
830 btrfs_put_root(root);
831
832 if (nr != U64_MAX)
833 nr -= done;
834
835 spin_lock(&fs_info->ordered_root_lock);
836 }
837 list_splice_tail(&splice, &fs_info->ordered_roots);
838 spin_unlock(&fs_info->ordered_root_lock);
839 mutex_unlock(&fs_info->ordered_operations_mutex);
840}
841
842/*
843 * Start IO and wait for a given ordered extent to finish.
844 *
845 * Wait on page writeback for all the pages in the extent and the IO completion
846 * code to insert metadata into the btree corresponding to the extent.
847 */
848void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry)
849{
850 u64 start = entry->file_offset;
851 u64 end = start + entry->num_bytes - 1;
852 struct btrfs_inode *inode = entry->inode;
853 bool freespace_inode;
854
855 trace_btrfs_ordered_extent_start(inode, entry);
856
857 /*
858 * If this is a free space inode do not take the ordered extents lockdep
859 * map.
860 */
861 freespace_inode = btrfs_is_free_space_inode(inode);
862
863 /*
864 * pages in the range can be dirty, clean or writeback. We
865 * start IO on any dirty ones so the wait doesn't stall waiting
866 * for the flusher thread to find them
867 */
868 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
869 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
870
871 if (!freespace_inode)
872 btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent);
873 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags));
874}
875
876/*
877 * Used to wait on ordered extents across a large range of bytes.
878 */
879int btrfs_wait_ordered_range(struct btrfs_inode *inode, u64 start, u64 len)
880{
881 int ret = 0;
882 int ret_wb = 0;
883 u64 end;
884 u64 orig_end;
885 struct btrfs_ordered_extent *ordered;
886
887 if (start + len < start) {
888 orig_end = OFFSET_MAX;
889 } else {
890 orig_end = start + len - 1;
891 if (orig_end > OFFSET_MAX)
892 orig_end = OFFSET_MAX;
893 }
894
895 /* start IO across the range first to instantiate any delalloc
896 * extents
897 */
898 ret = btrfs_fdatawrite_range(inode, start, orig_end);
899 if (ret)
900 return ret;
901
902 /*
903 * If we have a writeback error don't return immediately. Wait first
904 * for any ordered extents that haven't completed yet. This is to make
905 * sure no one can dirty the same page ranges and call writepages()
906 * before the ordered extents complete - to avoid failures (-EEXIST)
907 * when adding the new ordered extents to the ordered tree.
908 */
909 ret_wb = filemap_fdatawait_range(inode->vfs_inode.i_mapping, start, orig_end);
910
911 end = orig_end;
912 while (1) {
913 ordered = btrfs_lookup_first_ordered_extent(inode, end);
914 if (!ordered)
915 break;
916 if (ordered->file_offset > orig_end) {
917 btrfs_put_ordered_extent(ordered);
918 break;
919 }
920 if (ordered->file_offset + ordered->num_bytes <= start) {
921 btrfs_put_ordered_extent(ordered);
922 break;
923 }
924 btrfs_start_ordered_extent(ordered);
925 end = ordered->file_offset;
926 /*
927 * If the ordered extent had an error save the error but don't
928 * exit without waiting first for all other ordered extents in
929 * the range to complete.
930 */
931 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
932 ret = -EIO;
933 btrfs_put_ordered_extent(ordered);
934 if (end == 0 || end == start)
935 break;
936 end--;
937 }
938 return ret_wb ? ret_wb : ret;
939}
940
941/*
942 * find an ordered extent corresponding to file_offset. return NULL if
943 * nothing is found, otherwise take a reference on the extent and return it
944 */
945struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
946 u64 file_offset)
947{
948 struct rb_node *node;
949 struct btrfs_ordered_extent *entry = NULL;
950 unsigned long flags;
951
952 spin_lock_irqsave(&inode->ordered_tree_lock, flags);
953 node = ordered_tree_search(inode, file_offset);
954 if (!node)
955 goto out;
956
957 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
958 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
959 entry = NULL;
960 if (entry) {
961 refcount_inc(&entry->refs);
962 trace_btrfs_ordered_extent_lookup(inode, entry);
963 }
964out:
965 spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
966 return entry;
967}
968
969/* Since the DIO code tries to lock a wide area we need to look for any ordered
970 * extents that exist in the range, rather than just the start of the range.
971 */
972struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
973 struct btrfs_inode *inode, u64 file_offset, u64 len)
974{
975 struct rb_node *node;
976 struct btrfs_ordered_extent *entry = NULL;
977
978 spin_lock_irq(&inode->ordered_tree_lock);
979 node = ordered_tree_search(inode, file_offset);
980 if (!node) {
981 node = ordered_tree_search(inode, file_offset + len);
982 if (!node)
983 goto out;
984 }
985
986 while (1) {
987 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
988 if (btrfs_range_overlaps(entry, file_offset, len))
989 break;
990
991 if (entry->file_offset >= file_offset + len) {
992 entry = NULL;
993 break;
994 }
995 entry = NULL;
996 node = rb_next(node);
997 if (!node)
998 break;
999 }
1000out:
1001 if (entry) {
1002 refcount_inc(&entry->refs);
1003 trace_btrfs_ordered_extent_lookup_range(inode, entry);
1004 }
1005 spin_unlock_irq(&inode->ordered_tree_lock);
1006 return entry;
1007}
1008
1009/*
1010 * Adds all ordered extents to the given list. The list ends up sorted by the
1011 * file_offset of the ordered extents.
1012 */
1013void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
1014 struct list_head *list)
1015{
1016 struct rb_node *n;
1017
1018 btrfs_assert_inode_locked(inode);
1019
1020 spin_lock_irq(&inode->ordered_tree_lock);
1021 for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
1022 struct btrfs_ordered_extent *ordered;
1023
1024 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
1025
1026 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
1027 continue;
1028
1029 ASSERT(list_empty(&ordered->log_list));
1030 list_add_tail(&ordered->log_list, list);
1031 refcount_inc(&ordered->refs);
1032 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
1033 }
1034 spin_unlock_irq(&inode->ordered_tree_lock);
1035}
1036
1037/*
1038 * lookup and return any extent before 'file_offset'. NULL is returned
1039 * if none is found
1040 */
1041struct btrfs_ordered_extent *
1042btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
1043{
1044 struct rb_node *node;
1045 struct btrfs_ordered_extent *entry = NULL;
1046
1047 spin_lock_irq(&inode->ordered_tree_lock);
1048 node = ordered_tree_search(inode, file_offset);
1049 if (!node)
1050 goto out;
1051
1052 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1053 refcount_inc(&entry->refs);
1054 trace_btrfs_ordered_extent_lookup_first(inode, entry);
1055out:
1056 spin_unlock_irq(&inode->ordered_tree_lock);
1057 return entry;
1058}
1059
1060/*
1061 * Lookup the first ordered extent that overlaps the range
1062 * [@file_offset, @file_offset + @len).
1063 *
1064 * The difference between this and btrfs_lookup_first_ordered_extent() is
1065 * that this one won't return any ordered extent that does not overlap the range.
1066 * And the difference against btrfs_lookup_ordered_extent() is, this function
1067 * ensures the first ordered extent gets returned.
1068 */
1069struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
1070 struct btrfs_inode *inode, u64 file_offset, u64 len)
1071{
1072 struct rb_node *node;
1073 struct rb_node *cur;
1074 struct rb_node *prev;
1075 struct rb_node *next;
1076 struct btrfs_ordered_extent *entry = NULL;
1077
1078 spin_lock_irq(&inode->ordered_tree_lock);
1079 node = inode->ordered_tree.rb_node;
1080 /*
1081 * Here we don't want to use tree_search() which will use tree->last
1082 * and screw up the search order.
1083 * And __tree_search() can't return the adjacent ordered extents
1084 * either, thus here we do our own search.
1085 */
1086 while (node) {
1087 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1088
1089 if (file_offset < entry->file_offset) {
1090 node = node->rb_left;
1091 } else if (file_offset >= entry_end(entry)) {
1092 node = node->rb_right;
1093 } else {
1094 /*
1095 * Direct hit, got an ordered extent that starts at
1096 * @file_offset
1097 */
1098 goto out;
1099 }
1100 }
1101 if (!entry) {
1102 /* Empty tree */
1103 goto out;
1104 }
1105
1106 cur = &entry->rb_node;
1107 /* We got an entry around @file_offset, check adjacent entries */
1108 if (entry->file_offset < file_offset) {
1109 prev = cur;
1110 next = rb_next(cur);
1111 } else {
1112 prev = rb_prev(cur);
1113 next = cur;
1114 }
1115 if (prev) {
1116 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
1117 if (btrfs_range_overlaps(entry, file_offset, len))
1118 goto out;
1119 }
1120 if (next) {
1121 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
1122 if (btrfs_range_overlaps(entry, file_offset, len))
1123 goto out;
1124 }
1125 /* No ordered extent in the range */
1126 entry = NULL;
1127out:
1128 if (entry) {
1129 refcount_inc(&entry->refs);
1130 trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
1131 }
1132
1133 spin_unlock_irq(&inode->ordered_tree_lock);
1134 return entry;
1135}
1136
1137/*
1138 * Lock the passed range and ensures all pending ordered extents in it are run
1139 * to completion.
1140 *
1141 * @inode: Inode whose ordered tree is to be searched
1142 * @start: Beginning of range to flush
1143 * @end: Last byte of range to lock
1144 * @cached_state: If passed, will return the extent state responsible for the
1145 * locked range. It's the caller's responsibility to free the
1146 * cached state.
1147 *
1148 * Always return with the given range locked, ensuring after it's called no
1149 * order extent can be pending.
1150 */
1151void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1152 u64 end,
1153 struct extent_state **cached_state)
1154{
1155 struct btrfs_ordered_extent *ordered;
1156 struct extent_state *cache = NULL;
1157 struct extent_state **cachedp = &cache;
1158
1159 if (cached_state)
1160 cachedp = cached_state;
1161
1162 while (1) {
1163 lock_extent(&inode->io_tree, start, end, cachedp);
1164 ordered = btrfs_lookup_ordered_range(inode, start,
1165 end - start + 1);
1166 if (!ordered) {
1167 /*
1168 * If no external cached_state has been passed then
1169 * decrement the extra ref taken for cachedp since we
1170 * aren't exposing it outside of this function
1171 */
1172 if (!cached_state)
1173 refcount_dec(&cache->refs);
1174 break;
1175 }
1176 unlock_extent(&inode->io_tree, start, end, cachedp);
1177 btrfs_start_ordered_extent(ordered);
1178 btrfs_put_ordered_extent(ordered);
1179 }
1180}
1181
1182/*
1183 * Lock the passed range and ensure all pending ordered extents in it are run
1184 * to completion in nowait mode.
1185 *
1186 * Return true if btrfs_lock_ordered_range does not return any extents,
1187 * otherwise false.
1188 */
1189bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
1190 struct extent_state **cached_state)
1191{
1192 struct btrfs_ordered_extent *ordered;
1193
1194 if (!try_lock_extent(&inode->io_tree, start, end, cached_state))
1195 return false;
1196
1197 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);
1198 if (!ordered)
1199 return true;
1200
1201 btrfs_put_ordered_extent(ordered);
1202 unlock_extent(&inode->io_tree, start, end, cached_state);
1203
1204 return false;
1205}
1206
1207/* Split out a new ordered extent for this first @len bytes of @ordered. */
1208struct btrfs_ordered_extent *btrfs_split_ordered_extent(
1209 struct btrfs_ordered_extent *ordered, u64 len)
1210{
1211 struct btrfs_inode *inode = ordered->inode;
1212 struct btrfs_root *root = inode->root;
1213 struct btrfs_fs_info *fs_info = root->fs_info;
1214 u64 file_offset = ordered->file_offset;
1215 u64 disk_bytenr = ordered->disk_bytenr;
1216 unsigned long flags = ordered->flags;
1217 struct btrfs_ordered_sum *sum, *tmpsum;
1218 struct btrfs_ordered_extent *new;
1219 struct rb_node *node;
1220 u64 offset = 0;
1221
1222 trace_btrfs_ordered_extent_split(inode, ordered);
1223
1224 ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
1225
1226 /*
1227 * The entire bio must be covered by the ordered extent, but we can't
1228 * reduce the original extent to a zero length either.
1229 */
1230 if (WARN_ON_ONCE(len >= ordered->num_bytes))
1231 return ERR_PTR(-EINVAL);
1232 /*
1233 * If our ordered extent had an error there's no point in continuing.
1234 * The error may have come from a transaction abort done either by this
1235 * task or some other concurrent task, and the transaction abort path
1236 * iterates over all existing ordered extents and sets the flag
1237 * BTRFS_ORDERED_IOERR on them.
1238 */
1239 if (unlikely(flags & (1U << BTRFS_ORDERED_IOERR))) {
1240 const int fs_error = BTRFS_FS_ERROR(fs_info);
1241
1242 return fs_error ? ERR_PTR(fs_error) : ERR_PTR(-EIO);
1243 }
1244 /* We cannot split partially completed ordered extents. */
1245 if (ordered->bytes_left) {
1246 ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS));
1247 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
1248 return ERR_PTR(-EINVAL);
1249 }
1250 /* We cannot split a compressed ordered extent. */
1251 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
1252 return ERR_PTR(-EINVAL);
1253
1254 new = alloc_ordered_extent(inode, file_offset, len, len, disk_bytenr,
1255 len, 0, flags, ordered->compress_type);
1256 if (IS_ERR(new))
1257 return new;
1258
1259 /* One ref for the tree. */
1260 refcount_inc(&new->refs);
1261
1262 /*
1263 * Take the root's ordered_extent_lock to avoid a race with
1264 * btrfs_wait_ordered_extents() when updating the disk_bytenr and
1265 * disk_num_bytes fields of the ordered extent below. And we disable
1266 * IRQs because the inode's ordered_tree_lock is used in IRQ context
1267 * elsewhere.
1268 *
1269 * There's no concern about a previous caller of
1270 * btrfs_wait_ordered_extents() getting the trimmed ordered extent
1271 * before we insert the new one, because even if it gets the ordered
1272 * extent before it's trimmed and the new one inserted, right before it
1273 * uses it or during its use, the ordered extent might have been
1274 * trimmed in the meanwhile, and it missed the new ordered extent.
1275 * There's no way around this and it's harmless for current use cases,
1276 * so we take the root's ordered_extent_lock to fix that race during
1277 * trimming and silence tools like KCSAN.
1278 */
1279 spin_lock_irq(&root->ordered_extent_lock);
1280 spin_lock(&inode->ordered_tree_lock);
1281
1282 /*
1283 * We don't have overlapping ordered extents (that would imply double
1284 * allocation of extents) and we checked above that the split length
1285 * does not cross the ordered extent's num_bytes field, so there's
1286 * no need to remove it and re-insert it in the tree.
1287 */
1288 ordered->file_offset += len;
1289 ordered->disk_bytenr += len;
1290 ordered->num_bytes -= len;
1291 ordered->disk_num_bytes -= len;
1292 ordered->ram_bytes -= len;
1293
1294 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
1295 ASSERT(ordered->bytes_left == 0);
1296 new->bytes_left = 0;
1297 } else {
1298 ordered->bytes_left -= len;
1299 }
1300
1301 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) {
1302 if (ordered->truncated_len > len) {
1303 ordered->truncated_len -= len;
1304 } else {
1305 new->truncated_len = ordered->truncated_len;
1306 ordered->truncated_len = 0;
1307 }
1308 }
1309
1310 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) {
1311 if (offset == len)
1312 break;
1313 list_move_tail(&sum->list, &new->list);
1314 offset += sum->len;
1315 }
1316
1317 node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
1318 if (unlikely(node))
1319 btrfs_panic(fs_info, -EEXIST,
1320 "inconsistency in ordered tree at offset %llu after split",
1321 new->file_offset);
1322 spin_unlock(&inode->ordered_tree_lock);
1323
1324 list_add_tail(&new->root_extent_list, &root->ordered_extents);
1325 root->nr_ordered_extents++;
1326 spin_unlock_irq(&root->ordered_extent_lock);
1327 return new;
1328}
1329
1330int __init ordered_data_init(void)
1331{
1332 btrfs_ordered_extent_cache = KMEM_CACHE(btrfs_ordered_extent, 0);
1333 if (!btrfs_ordered_extent_cache)
1334 return -ENOMEM;
1335
1336 return 0;
1337}
1338
1339void __cold ordered_data_exit(void)
1340{
1341 kmem_cache_destroy(btrfs_ordered_extent_cache);
1342}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/writeback.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "ctree.h"
12#include "transaction.h"
13#include "btrfs_inode.h"
14#include "extent_io.h"
15#include "disk-io.h"
16#include "compression.h"
17#include "delalloc-space.h"
18#include "qgroup.h"
19#include "subpage.h"
20
21static struct kmem_cache *btrfs_ordered_extent_cache;
22
23static u64 entry_end(struct btrfs_ordered_extent *entry)
24{
25 if (entry->file_offset + entry->num_bytes < entry->file_offset)
26 return (u64)-1;
27 return entry->file_offset + entry->num_bytes;
28}
29
30/* returns NULL if the insertion worked, or it returns the node it did find
31 * in the tree
32 */
33static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
34 struct rb_node *node)
35{
36 struct rb_node **p = &root->rb_node;
37 struct rb_node *parent = NULL;
38 struct btrfs_ordered_extent *entry;
39
40 while (*p) {
41 parent = *p;
42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
43
44 if (file_offset < entry->file_offset)
45 p = &(*p)->rb_left;
46 else if (file_offset >= entry_end(entry))
47 p = &(*p)->rb_right;
48 else
49 return parent;
50 }
51
52 rb_link_node(node, parent, p);
53 rb_insert_color(node, root);
54 return NULL;
55}
56
57/*
58 * look for a given offset in the tree, and if it can't be found return the
59 * first lesser offset
60 */
61static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
62 struct rb_node **prev_ret)
63{
64 struct rb_node *n = root->rb_node;
65 struct rb_node *prev = NULL;
66 struct rb_node *test;
67 struct btrfs_ordered_extent *entry;
68 struct btrfs_ordered_extent *prev_entry = NULL;
69
70 while (n) {
71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
72 prev = n;
73 prev_entry = entry;
74
75 if (file_offset < entry->file_offset)
76 n = n->rb_left;
77 else if (file_offset >= entry_end(entry))
78 n = n->rb_right;
79 else
80 return n;
81 }
82 if (!prev_ret)
83 return NULL;
84
85 while (prev && file_offset >= entry_end(prev_entry)) {
86 test = rb_next(prev);
87 if (!test)
88 break;
89 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
90 rb_node);
91 if (file_offset < entry_end(prev_entry))
92 break;
93
94 prev = test;
95 }
96 if (prev)
97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
98 rb_node);
99 while (prev && file_offset < entry_end(prev_entry)) {
100 test = rb_prev(prev);
101 if (!test)
102 break;
103 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
104 rb_node);
105 prev = test;
106 }
107 *prev_ret = prev;
108 return NULL;
109}
110
111static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
112 u64 len)
113{
114 if (file_offset + len <= entry->file_offset ||
115 entry->file_offset + entry->num_bytes <= file_offset)
116 return 0;
117 return 1;
118}
119
120/*
121 * look find the first ordered struct that has this offset, otherwise
122 * the first one less than this offset
123 */
124static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
125 u64 file_offset)
126{
127 struct rb_root *root = &tree->tree;
128 struct rb_node *prev = NULL;
129 struct rb_node *ret;
130 struct btrfs_ordered_extent *entry;
131
132 if (tree->last) {
133 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
134 rb_node);
135 if (in_range(file_offset, entry->file_offset, entry->num_bytes))
136 return tree->last;
137 }
138 ret = __tree_search(root, file_offset, &prev);
139 if (!ret)
140 ret = prev;
141 if (ret)
142 tree->last = ret;
143 return ret;
144}
145
146/*
147 * Allocate and add a new ordered_extent into the per-inode tree.
148 *
149 * The tree is given a single reference on the ordered extent that was
150 * inserted.
151 */
152static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
153 u64 disk_bytenr, u64 num_bytes,
154 u64 disk_num_bytes, int type, int dio,
155 int compress_type)
156{
157 struct btrfs_root *root = inode->root;
158 struct btrfs_fs_info *fs_info = root->fs_info;
159 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
160 struct rb_node *node;
161 struct btrfs_ordered_extent *entry;
162 int ret;
163
164 if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
165 /* For nocow write, we can release the qgroup rsv right now */
166 ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
167 if (ret < 0)
168 return ret;
169 ret = 0;
170 } else {
171 /*
172 * The ordered extent has reserved qgroup space, release now
173 * and pass the reserved number for qgroup_record to free.
174 */
175 ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
176 if (ret < 0)
177 return ret;
178 }
179 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
180 if (!entry)
181 return -ENOMEM;
182
183 entry->file_offset = file_offset;
184 entry->disk_bytenr = disk_bytenr;
185 entry->num_bytes = num_bytes;
186 entry->disk_num_bytes = disk_num_bytes;
187 entry->bytes_left = num_bytes;
188 entry->inode = igrab(&inode->vfs_inode);
189 entry->compress_type = compress_type;
190 entry->truncated_len = (u64)-1;
191 entry->qgroup_rsv = ret;
192 entry->physical = (u64)-1;
193
194 ASSERT(type == BTRFS_ORDERED_REGULAR ||
195 type == BTRFS_ORDERED_NOCOW ||
196 type == BTRFS_ORDERED_PREALLOC ||
197 type == BTRFS_ORDERED_COMPRESSED);
198 set_bit(type, &entry->flags);
199
200 percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
201 fs_info->delalloc_batch);
202
203 if (dio)
204 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
205
206 /* one ref for the tree */
207 refcount_set(&entry->refs, 1);
208 init_waitqueue_head(&entry->wait);
209 INIT_LIST_HEAD(&entry->list);
210 INIT_LIST_HEAD(&entry->log_list);
211 INIT_LIST_HEAD(&entry->root_extent_list);
212 INIT_LIST_HEAD(&entry->work_list);
213 init_completion(&entry->completion);
214
215 trace_btrfs_ordered_extent_add(inode, entry);
216
217 spin_lock_irq(&tree->lock);
218 node = tree_insert(&tree->tree, file_offset,
219 &entry->rb_node);
220 if (node)
221 btrfs_panic(fs_info, -EEXIST,
222 "inconsistency in ordered tree at offset %llu",
223 file_offset);
224 spin_unlock_irq(&tree->lock);
225
226 spin_lock(&root->ordered_extent_lock);
227 list_add_tail(&entry->root_extent_list,
228 &root->ordered_extents);
229 root->nr_ordered_extents++;
230 if (root->nr_ordered_extents == 1) {
231 spin_lock(&fs_info->ordered_root_lock);
232 BUG_ON(!list_empty(&root->ordered_root));
233 list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
234 spin_unlock(&fs_info->ordered_root_lock);
235 }
236 spin_unlock(&root->ordered_extent_lock);
237
238 /*
239 * We don't need the count_max_extents here, we can assume that all of
240 * that work has been done at higher layers, so this is truly the
241 * smallest the extent is going to get.
242 */
243 spin_lock(&inode->lock);
244 btrfs_mod_outstanding_extents(inode, 1);
245 spin_unlock(&inode->lock);
246
247 return 0;
248}
249
250int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
251 u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
252 int type)
253{
254 ASSERT(type == BTRFS_ORDERED_REGULAR ||
255 type == BTRFS_ORDERED_NOCOW ||
256 type == BTRFS_ORDERED_PREALLOC);
257 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
258 num_bytes, disk_num_bytes, type, 0,
259 BTRFS_COMPRESS_NONE);
260}
261
262int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
263 u64 disk_bytenr, u64 num_bytes,
264 u64 disk_num_bytes, int type)
265{
266 ASSERT(type == BTRFS_ORDERED_REGULAR ||
267 type == BTRFS_ORDERED_NOCOW ||
268 type == BTRFS_ORDERED_PREALLOC);
269 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
270 num_bytes, disk_num_bytes, type, 1,
271 BTRFS_COMPRESS_NONE);
272}
273
274int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
275 u64 disk_bytenr, u64 num_bytes,
276 u64 disk_num_bytes, int compress_type)
277{
278 ASSERT(compress_type != BTRFS_COMPRESS_NONE);
279 return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
280 num_bytes, disk_num_bytes,
281 BTRFS_ORDERED_COMPRESSED, 0,
282 compress_type);
283}
284
285/*
286 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
287 * when an ordered extent is finished. If the list covers more than one
288 * ordered extent, it is split across multiples.
289 */
290void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
291 struct btrfs_ordered_sum *sum)
292{
293 struct btrfs_ordered_inode_tree *tree;
294
295 tree = &BTRFS_I(entry->inode)->ordered_tree;
296 spin_lock_irq(&tree->lock);
297 list_add_tail(&sum->list, &entry->list);
298 spin_unlock_irq(&tree->lock);
299}
300
301/*
302 * Mark all ordered extents io inside the specified range finished.
303 *
304 * @page: The invovled page for the opeartion.
305 * For uncompressed buffered IO, the page status also needs to be
306 * updated to indicate whether the pending ordered io is finished.
307 * Can be NULL for direct IO and compressed write.
308 * For these cases, callers are ensured they won't execute the
309 * endio function twice.
310 * @finish_func: The function to be executed when all the IO of an ordered
311 * extent are finished.
312 *
313 * This function is called for endio, thus the range must have ordered
314 * extent(s) coveri it.
315 */
316void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
317 struct page *page, u64 file_offset,
318 u64 num_bytes, btrfs_func_t finish_func,
319 bool uptodate)
320{
321 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
322 struct btrfs_fs_info *fs_info = inode->root->fs_info;
323 struct btrfs_workqueue *wq;
324 struct rb_node *node;
325 struct btrfs_ordered_extent *entry = NULL;
326 unsigned long flags;
327 u64 cur = file_offset;
328
329 if (btrfs_is_free_space_inode(inode))
330 wq = fs_info->endio_freespace_worker;
331 else
332 wq = fs_info->endio_write_workers;
333
334 if (page)
335 ASSERT(page->mapping && page_offset(page) <= file_offset &&
336 file_offset + num_bytes <= page_offset(page) + PAGE_SIZE);
337
338 spin_lock_irqsave(&tree->lock, flags);
339 while (cur < file_offset + num_bytes) {
340 u64 entry_end;
341 u64 end;
342 u32 len;
343
344 node = tree_search(tree, cur);
345 /* No ordered extents at all */
346 if (!node)
347 break;
348
349 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
350 entry_end = entry->file_offset + entry->num_bytes;
351 /*
352 * |<-- OE --->| |
353 * cur
354 * Go to next OE.
355 */
356 if (cur >= entry_end) {
357 node = rb_next(node);
358 /* No more ordered extents, exit */
359 if (!node)
360 break;
361 entry = rb_entry(node, struct btrfs_ordered_extent,
362 rb_node);
363
364 /* Go to next ordered extent and continue */
365 cur = entry->file_offset;
366 continue;
367 }
368 /*
369 * | |<--- OE --->|
370 * cur
371 * Go to the start of OE.
372 */
373 if (cur < entry->file_offset) {
374 cur = entry->file_offset;
375 continue;
376 }
377
378 /*
379 * Now we are definitely inside one ordered extent.
380 *
381 * |<--- OE --->|
382 * |
383 * cur
384 */
385 end = min(entry->file_offset + entry->num_bytes,
386 file_offset + num_bytes) - 1;
387 ASSERT(end + 1 - cur < U32_MAX);
388 len = end + 1 - cur;
389
390 if (page) {
391 /*
392 * Ordered (Private2) bit indicates whether we still
393 * have pending io unfinished for the ordered extent.
394 *
395 * If there's no such bit, we need to skip to next range.
396 */
397 if (!btrfs_page_test_ordered(fs_info, page, cur, len)) {
398 cur += len;
399 continue;
400 }
401 btrfs_page_clear_ordered(fs_info, page, cur, len);
402 }
403
404 /* Now we're fine to update the accounting */
405 if (unlikely(len > entry->bytes_left)) {
406 WARN_ON(1);
407 btrfs_crit(fs_info,
408"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%u left=%llu",
409 inode->root->root_key.objectid,
410 btrfs_ino(inode),
411 entry->file_offset,
412 entry->num_bytes,
413 len, entry->bytes_left);
414 entry->bytes_left = 0;
415 } else {
416 entry->bytes_left -= len;
417 }
418
419 if (!uptodate)
420 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
421
422 /*
423 * All the IO of the ordered extent is finished, we need to queue
424 * the finish_func to be executed.
425 */
426 if (entry->bytes_left == 0) {
427 set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
428 cond_wake_up(&entry->wait);
429 refcount_inc(&entry->refs);
430 spin_unlock_irqrestore(&tree->lock, flags);
431 btrfs_init_work(&entry->work, finish_func, NULL, NULL);
432 btrfs_queue_work(wq, &entry->work);
433 spin_lock_irqsave(&tree->lock, flags);
434 }
435 cur += len;
436 }
437 spin_unlock_irqrestore(&tree->lock, flags);
438}
439
440/*
441 * Finish IO for one ordered extent across a given range. The range can only
442 * contain one ordered extent.
443 *
444 * @cached: The cached ordered extent. If not NULL, we can skip the tree
445 * search and use the ordered extent directly.
446 * Will be also used to store the finished ordered extent.
447 * @file_offset: File offset for the finished IO
448 * @io_size: Length of the finish IO range
449 * @uptodate: If the IO finishes without problem
450 *
451 * Return true if the ordered extent is finished in the range, and update
452 * @cached.
453 * Return false otherwise.
454 *
455 * NOTE: The range can NOT cross multiple ordered extents.
456 * Thus caller should ensure the range doesn't cross ordered extents.
457 */
458bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
459 struct btrfs_ordered_extent **cached,
460 u64 file_offset, u64 io_size, int uptodate)
461{
462 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
463 struct rb_node *node;
464 struct btrfs_ordered_extent *entry = NULL;
465 unsigned long flags;
466 bool finished = false;
467
468 spin_lock_irqsave(&tree->lock, flags);
469 if (cached && *cached) {
470 entry = *cached;
471 goto have_entry;
472 }
473
474 node = tree_search(tree, file_offset);
475 if (!node)
476 goto out;
477
478 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
479have_entry:
480 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
481 goto out;
482
483 if (io_size > entry->bytes_left)
484 btrfs_crit(inode->root->fs_info,
485 "bad ordered accounting left %llu size %llu",
486 entry->bytes_left, io_size);
487
488 entry->bytes_left -= io_size;
489 if (!uptodate)
490 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
491
492 if (entry->bytes_left == 0) {
493 /*
494 * Ensure only one caller can set the flag and finished_ret
495 * accordingly
496 */
497 finished = !test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
498 /* test_and_set_bit implies a barrier */
499 cond_wake_up_nomb(&entry->wait);
500 }
501out:
502 if (finished && cached && entry) {
503 *cached = entry;
504 refcount_inc(&entry->refs);
505 }
506 spin_unlock_irqrestore(&tree->lock, flags);
507 return finished;
508}
509
510/*
511 * used to drop a reference on an ordered extent. This will free
512 * the extent if the last reference is dropped
513 */
514void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
515{
516 struct list_head *cur;
517 struct btrfs_ordered_sum *sum;
518
519 trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
520
521 if (refcount_dec_and_test(&entry->refs)) {
522 ASSERT(list_empty(&entry->root_extent_list));
523 ASSERT(list_empty(&entry->log_list));
524 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
525 if (entry->inode)
526 btrfs_add_delayed_iput(entry->inode);
527 while (!list_empty(&entry->list)) {
528 cur = entry->list.next;
529 sum = list_entry(cur, struct btrfs_ordered_sum, list);
530 list_del(&sum->list);
531 kvfree(sum);
532 }
533 kmem_cache_free(btrfs_ordered_extent_cache, entry);
534 }
535}
536
537/*
538 * remove an ordered extent from the tree. No references are dropped
539 * and waiters are woken up.
540 */
541void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
542 struct btrfs_ordered_extent *entry)
543{
544 struct btrfs_ordered_inode_tree *tree;
545 struct btrfs_root *root = btrfs_inode->root;
546 struct btrfs_fs_info *fs_info = root->fs_info;
547 struct rb_node *node;
548 bool pending;
549
550 /* This is paired with btrfs_add_ordered_extent. */
551 spin_lock(&btrfs_inode->lock);
552 btrfs_mod_outstanding_extents(btrfs_inode, -1);
553 spin_unlock(&btrfs_inode->lock);
554 if (root != fs_info->tree_root)
555 btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
556 false);
557
558 percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
559 fs_info->delalloc_batch);
560
561 tree = &btrfs_inode->ordered_tree;
562 spin_lock_irq(&tree->lock);
563 node = &entry->rb_node;
564 rb_erase(node, &tree->tree);
565 RB_CLEAR_NODE(node);
566 if (tree->last == node)
567 tree->last = NULL;
568 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
569 pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
570 spin_unlock_irq(&tree->lock);
571
572 /*
573 * The current running transaction is waiting on us, we need to let it
574 * know that we're complete and wake it up.
575 */
576 if (pending) {
577 struct btrfs_transaction *trans;
578
579 /*
580 * The checks for trans are just a formality, it should be set,
581 * but if it isn't we don't want to deref/assert under the spin
582 * lock, so be nice and check if trans is set, but ASSERT() so
583 * if it isn't set a developer will notice.
584 */
585 spin_lock(&fs_info->trans_lock);
586 trans = fs_info->running_transaction;
587 if (trans)
588 refcount_inc(&trans->use_count);
589 spin_unlock(&fs_info->trans_lock);
590
591 ASSERT(trans);
592 if (trans) {
593 if (atomic_dec_and_test(&trans->pending_ordered))
594 wake_up(&trans->pending_wait);
595 btrfs_put_transaction(trans);
596 }
597 }
598
599 spin_lock(&root->ordered_extent_lock);
600 list_del_init(&entry->root_extent_list);
601 root->nr_ordered_extents--;
602
603 trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
604
605 if (!root->nr_ordered_extents) {
606 spin_lock(&fs_info->ordered_root_lock);
607 BUG_ON(list_empty(&root->ordered_root));
608 list_del_init(&root->ordered_root);
609 spin_unlock(&fs_info->ordered_root_lock);
610 }
611 spin_unlock(&root->ordered_extent_lock);
612 wake_up(&entry->wait);
613}
614
615static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
616{
617 struct btrfs_ordered_extent *ordered;
618
619 ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
620 btrfs_start_ordered_extent(ordered, 1);
621 complete(&ordered->completion);
622}
623
624/*
625 * wait for all the ordered extents in a root. This is done when balancing
626 * space between drives.
627 */
628u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
629 const u64 range_start, const u64 range_len)
630{
631 struct btrfs_fs_info *fs_info = root->fs_info;
632 LIST_HEAD(splice);
633 LIST_HEAD(skipped);
634 LIST_HEAD(works);
635 struct btrfs_ordered_extent *ordered, *next;
636 u64 count = 0;
637 const u64 range_end = range_start + range_len;
638
639 mutex_lock(&root->ordered_extent_mutex);
640 spin_lock(&root->ordered_extent_lock);
641 list_splice_init(&root->ordered_extents, &splice);
642 while (!list_empty(&splice) && nr) {
643 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
644 root_extent_list);
645
646 if (range_end <= ordered->disk_bytenr ||
647 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
648 list_move_tail(&ordered->root_extent_list, &skipped);
649 cond_resched_lock(&root->ordered_extent_lock);
650 continue;
651 }
652
653 list_move_tail(&ordered->root_extent_list,
654 &root->ordered_extents);
655 refcount_inc(&ordered->refs);
656 spin_unlock(&root->ordered_extent_lock);
657
658 btrfs_init_work(&ordered->flush_work,
659 btrfs_run_ordered_extent_work, NULL, NULL);
660 list_add_tail(&ordered->work_list, &works);
661 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
662
663 cond_resched();
664 spin_lock(&root->ordered_extent_lock);
665 if (nr != U64_MAX)
666 nr--;
667 count++;
668 }
669 list_splice_tail(&skipped, &root->ordered_extents);
670 list_splice_tail(&splice, &root->ordered_extents);
671 spin_unlock(&root->ordered_extent_lock);
672
673 list_for_each_entry_safe(ordered, next, &works, work_list) {
674 list_del_init(&ordered->work_list);
675 wait_for_completion(&ordered->completion);
676 btrfs_put_ordered_extent(ordered);
677 cond_resched();
678 }
679 mutex_unlock(&root->ordered_extent_mutex);
680
681 return count;
682}
683
684void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
685 const u64 range_start, const u64 range_len)
686{
687 struct btrfs_root *root;
688 struct list_head splice;
689 u64 done;
690
691 INIT_LIST_HEAD(&splice);
692
693 mutex_lock(&fs_info->ordered_operations_mutex);
694 spin_lock(&fs_info->ordered_root_lock);
695 list_splice_init(&fs_info->ordered_roots, &splice);
696 while (!list_empty(&splice) && nr) {
697 root = list_first_entry(&splice, struct btrfs_root,
698 ordered_root);
699 root = btrfs_grab_root(root);
700 BUG_ON(!root);
701 list_move_tail(&root->ordered_root,
702 &fs_info->ordered_roots);
703 spin_unlock(&fs_info->ordered_root_lock);
704
705 done = btrfs_wait_ordered_extents(root, nr,
706 range_start, range_len);
707 btrfs_put_root(root);
708
709 spin_lock(&fs_info->ordered_root_lock);
710 if (nr != U64_MAX) {
711 nr -= done;
712 }
713 }
714 list_splice_tail(&splice, &fs_info->ordered_roots);
715 spin_unlock(&fs_info->ordered_root_lock);
716 mutex_unlock(&fs_info->ordered_operations_mutex);
717}
718
719/*
720 * Used to start IO or wait for a given ordered extent to finish.
721 *
722 * If wait is one, this effectively waits on page writeback for all the pages
723 * in the extent, and it waits on the io completion code to insert
724 * metadata into the btree corresponding to the extent
725 */
726void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
727{
728 u64 start = entry->file_offset;
729 u64 end = start + entry->num_bytes - 1;
730 struct btrfs_inode *inode = BTRFS_I(entry->inode);
731
732 trace_btrfs_ordered_extent_start(inode, entry);
733
734 /*
735 * pages in the range can be dirty, clean or writeback. We
736 * start IO on any dirty ones so the wait doesn't stall waiting
737 * for the flusher thread to find them
738 */
739 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
740 filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
741 if (wait) {
742 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
743 &entry->flags));
744 }
745}
746
747/*
748 * Used to wait on ordered extents across a large range of bytes.
749 */
750int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
751{
752 int ret = 0;
753 int ret_wb = 0;
754 u64 end;
755 u64 orig_end;
756 struct btrfs_ordered_extent *ordered;
757
758 if (start + len < start) {
759 orig_end = INT_LIMIT(loff_t);
760 } else {
761 orig_end = start + len - 1;
762 if (orig_end > INT_LIMIT(loff_t))
763 orig_end = INT_LIMIT(loff_t);
764 }
765
766 /* start IO across the range first to instantiate any delalloc
767 * extents
768 */
769 ret = btrfs_fdatawrite_range(inode, start, orig_end);
770 if (ret)
771 return ret;
772
773 /*
774 * If we have a writeback error don't return immediately. Wait first
775 * for any ordered extents that haven't completed yet. This is to make
776 * sure no one can dirty the same page ranges and call writepages()
777 * before the ordered extents complete - to avoid failures (-EEXIST)
778 * when adding the new ordered extents to the ordered tree.
779 */
780 ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
781
782 end = orig_end;
783 while (1) {
784 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
785 if (!ordered)
786 break;
787 if (ordered->file_offset > orig_end) {
788 btrfs_put_ordered_extent(ordered);
789 break;
790 }
791 if (ordered->file_offset + ordered->num_bytes <= start) {
792 btrfs_put_ordered_extent(ordered);
793 break;
794 }
795 btrfs_start_ordered_extent(ordered, 1);
796 end = ordered->file_offset;
797 /*
798 * If the ordered extent had an error save the error but don't
799 * exit without waiting first for all other ordered extents in
800 * the range to complete.
801 */
802 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
803 ret = -EIO;
804 btrfs_put_ordered_extent(ordered);
805 if (end == 0 || end == start)
806 break;
807 end--;
808 }
809 return ret_wb ? ret_wb : ret;
810}
811
812/*
813 * find an ordered extent corresponding to file_offset. return NULL if
814 * nothing is found, otherwise take a reference on the extent and return it
815 */
816struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
817 u64 file_offset)
818{
819 struct btrfs_ordered_inode_tree *tree;
820 struct rb_node *node;
821 struct btrfs_ordered_extent *entry = NULL;
822 unsigned long flags;
823
824 tree = &inode->ordered_tree;
825 spin_lock_irqsave(&tree->lock, flags);
826 node = tree_search(tree, file_offset);
827 if (!node)
828 goto out;
829
830 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
831 if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
832 entry = NULL;
833 if (entry)
834 refcount_inc(&entry->refs);
835out:
836 spin_unlock_irqrestore(&tree->lock, flags);
837 return entry;
838}
839
840/* Since the DIO code tries to lock a wide area we need to look for any ordered
841 * extents that exist in the range, rather than just the start of the range.
842 */
843struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
844 struct btrfs_inode *inode, u64 file_offset, u64 len)
845{
846 struct btrfs_ordered_inode_tree *tree;
847 struct rb_node *node;
848 struct btrfs_ordered_extent *entry = NULL;
849
850 tree = &inode->ordered_tree;
851 spin_lock_irq(&tree->lock);
852 node = tree_search(tree, file_offset);
853 if (!node) {
854 node = tree_search(tree, file_offset + len);
855 if (!node)
856 goto out;
857 }
858
859 while (1) {
860 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
861 if (range_overlaps(entry, file_offset, len))
862 break;
863
864 if (entry->file_offset >= file_offset + len) {
865 entry = NULL;
866 break;
867 }
868 entry = NULL;
869 node = rb_next(node);
870 if (!node)
871 break;
872 }
873out:
874 if (entry)
875 refcount_inc(&entry->refs);
876 spin_unlock_irq(&tree->lock);
877 return entry;
878}
879
880/*
881 * Adds all ordered extents to the given list. The list ends up sorted by the
882 * file_offset of the ordered extents.
883 */
884void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
885 struct list_head *list)
886{
887 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
888 struct rb_node *n;
889
890 ASSERT(inode_is_locked(&inode->vfs_inode));
891
892 spin_lock_irq(&tree->lock);
893 for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
894 struct btrfs_ordered_extent *ordered;
895
896 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
897
898 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
899 continue;
900
901 ASSERT(list_empty(&ordered->log_list));
902 list_add_tail(&ordered->log_list, list);
903 refcount_inc(&ordered->refs);
904 }
905 spin_unlock_irq(&tree->lock);
906}
907
908/*
909 * lookup and return any extent before 'file_offset'. NULL is returned
910 * if none is found
911 */
912struct btrfs_ordered_extent *
913btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
914{
915 struct btrfs_ordered_inode_tree *tree;
916 struct rb_node *node;
917 struct btrfs_ordered_extent *entry = NULL;
918
919 tree = &inode->ordered_tree;
920 spin_lock_irq(&tree->lock);
921 node = tree_search(tree, file_offset);
922 if (!node)
923 goto out;
924
925 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
926 refcount_inc(&entry->refs);
927out:
928 spin_unlock_irq(&tree->lock);
929 return entry;
930}
931
932/*
933 * Lookup the first ordered extent that overlaps the range
934 * [@file_offset, @file_offset + @len).
935 *
936 * The difference between this and btrfs_lookup_first_ordered_extent() is
937 * that this one won't return any ordered extent that does not overlap the range.
938 * And the difference against btrfs_lookup_ordered_extent() is, this function
939 * ensures the first ordered extent gets returned.
940 */
941struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
942 struct btrfs_inode *inode, u64 file_offset, u64 len)
943{
944 struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
945 struct rb_node *node;
946 struct rb_node *cur;
947 struct rb_node *prev;
948 struct rb_node *next;
949 struct btrfs_ordered_extent *entry = NULL;
950
951 spin_lock_irq(&tree->lock);
952 node = tree->tree.rb_node;
953 /*
954 * Here we don't want to use tree_search() which will use tree->last
955 * and screw up the search order.
956 * And __tree_search() can't return the adjacent ordered extents
957 * either, thus here we do our own search.
958 */
959 while (node) {
960 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
961
962 if (file_offset < entry->file_offset) {
963 node = node->rb_left;
964 } else if (file_offset >= entry_end(entry)) {
965 node = node->rb_right;
966 } else {
967 /*
968 * Direct hit, got an ordered extent that starts at
969 * @file_offset
970 */
971 goto out;
972 }
973 }
974 if (!entry) {
975 /* Empty tree */
976 goto out;
977 }
978
979 cur = &entry->rb_node;
980 /* We got an entry around @file_offset, check adjacent entries */
981 if (entry->file_offset < file_offset) {
982 prev = cur;
983 next = rb_next(cur);
984 } else {
985 prev = rb_prev(cur);
986 next = cur;
987 }
988 if (prev) {
989 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
990 if (range_overlaps(entry, file_offset, len))
991 goto out;
992 }
993 if (next) {
994 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
995 if (range_overlaps(entry, file_offset, len))
996 goto out;
997 }
998 /* No ordered extent in the range */
999 entry = NULL;
1000out:
1001 if (entry)
1002 refcount_inc(&entry->refs);
1003 spin_unlock_irq(&tree->lock);
1004 return entry;
1005}
1006
1007/*
1008 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
1009 * ordered extents in it are run to completion.
1010 *
1011 * @inode: Inode whose ordered tree is to be searched
1012 * @start: Beginning of range to flush
1013 * @end: Last byte of range to lock
1014 * @cached_state: If passed, will return the extent state responsible for the
1015 * locked range. It's the caller's responsibility to free the cached state.
1016 *
1017 * This function always returns with the given range locked, ensuring after it's
1018 * called no order extent can be pending.
1019 */
1020void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
1021 u64 end,
1022 struct extent_state **cached_state)
1023{
1024 struct btrfs_ordered_extent *ordered;
1025 struct extent_state *cache = NULL;
1026 struct extent_state **cachedp = &cache;
1027
1028 if (cached_state)
1029 cachedp = cached_state;
1030
1031 while (1) {
1032 lock_extent_bits(&inode->io_tree, start, end, cachedp);
1033 ordered = btrfs_lookup_ordered_range(inode, start,
1034 end - start + 1);
1035 if (!ordered) {
1036 /*
1037 * If no external cached_state has been passed then
1038 * decrement the extra ref taken for cachedp since we
1039 * aren't exposing it outside of this function
1040 */
1041 if (!cached_state)
1042 refcount_dec(&cache->refs);
1043 break;
1044 }
1045 unlock_extent_cached(&inode->io_tree, start, end, cachedp);
1046 btrfs_start_ordered_extent(ordered, 1);
1047 btrfs_put_ordered_extent(ordered);
1048 }
1049}
1050
1051static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
1052 u64 len)
1053{
1054 struct inode *inode = ordered->inode;
1055 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1056 u64 file_offset = ordered->file_offset + pos;
1057 u64 disk_bytenr = ordered->disk_bytenr + pos;
1058 u64 num_bytes = len;
1059 u64 disk_num_bytes = len;
1060 int type;
1061 unsigned long flags_masked = ordered->flags & ~(1 << BTRFS_ORDERED_DIRECT);
1062 int compress_type = ordered->compress_type;
1063 unsigned long weight;
1064 int ret;
1065
1066 weight = hweight_long(flags_masked);
1067 WARN_ON_ONCE(weight > 1);
1068 if (!weight)
1069 type = 0;
1070 else
1071 type = __ffs(flags_masked);
1072
1073 /*
1074 * The splitting extent is already counted and will be added again
1075 * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
1076 * double counting.
1077 */
1078 percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
1079 fs_info->delalloc_batch);
1080 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
1081 WARN_ON_ONCE(1);
1082 ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
1083 file_offset, disk_bytenr, num_bytes,
1084 disk_num_bytes, compress_type);
1085 } else if (test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
1086 ret = btrfs_add_ordered_extent_dio(BTRFS_I(inode), file_offset,
1087 disk_bytenr, num_bytes, disk_num_bytes, type);
1088 } else {
1089 ret = btrfs_add_ordered_extent(BTRFS_I(inode), file_offset,
1090 disk_bytenr, num_bytes, disk_num_bytes, type);
1091 }
1092
1093 return ret;
1094}
1095
1096int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
1097 u64 post)
1098{
1099 struct inode *inode = ordered->inode;
1100 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1101 struct rb_node *node;
1102 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1103 int ret = 0;
1104
1105 spin_lock_irq(&tree->lock);
1106 /* Remove from tree once */
1107 node = &ordered->rb_node;
1108 rb_erase(node, &tree->tree);
1109 RB_CLEAR_NODE(node);
1110 if (tree->last == node)
1111 tree->last = NULL;
1112
1113 ordered->file_offset += pre;
1114 ordered->disk_bytenr += pre;
1115 ordered->num_bytes -= (pre + post);
1116 ordered->disk_num_bytes -= (pre + post);
1117 ordered->bytes_left -= (pre + post);
1118
1119 /* Re-insert the node */
1120 node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
1121 if (node)
1122 btrfs_panic(fs_info, -EEXIST,
1123 "zoned: inconsistency in ordered tree at offset %llu",
1124 ordered->file_offset);
1125
1126 spin_unlock_irq(&tree->lock);
1127
1128 if (pre)
1129 ret = clone_ordered_extent(ordered, 0, pre);
1130 if (ret == 0 && post)
1131 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
1132 post);
1133
1134 return ret;
1135}
1136
1137int __init ordered_data_init(void)
1138{
1139 btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1140 sizeof(struct btrfs_ordered_extent), 0,
1141 SLAB_MEM_SPREAD,
1142 NULL);
1143 if (!btrfs_ordered_extent_cache)
1144 return -ENOMEM;
1145
1146 return 0;
1147}
1148
1149void __cold ordered_data_exit(void)
1150{
1151 kmem_cache_destroy(btrfs_ordered_extent_cache);
1152}