Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/iversion.h>
9#include "ctree.h"
10#include "fs.h"
11#include "messages.h"
12#include "misc.h"
13#include "delayed-inode.h"
14#include "disk-io.h"
15#include "transaction.h"
16#include "qgroup.h"
17#include "locking.h"
18#include "inode-item.h"
19#include "space-info.h"
20#include "accessors.h"
21#include "file-item.h"
22
23#define BTRFS_DELAYED_WRITEBACK 512
24#define BTRFS_DELAYED_BACKGROUND 128
25#define BTRFS_DELAYED_BATCH 16
26
27static struct kmem_cache *delayed_node_cache;
28
29int __init btrfs_delayed_inode_init(void)
30{
31 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
32 sizeof(struct btrfs_delayed_node),
33 0,
34 SLAB_MEM_SPREAD,
35 NULL);
36 if (!delayed_node_cache)
37 return -ENOMEM;
38 return 0;
39}
40
41void __cold btrfs_delayed_inode_exit(void)
42{
43 kmem_cache_destroy(delayed_node_cache);
44}
45
46static inline void btrfs_init_delayed_node(
47 struct btrfs_delayed_node *delayed_node,
48 struct btrfs_root *root, u64 inode_id)
49{
50 delayed_node->root = root;
51 delayed_node->inode_id = inode_id;
52 refcount_set(&delayed_node->refs, 0);
53 delayed_node->ins_root = RB_ROOT_CACHED;
54 delayed_node->del_root = RB_ROOT_CACHED;
55 mutex_init(&delayed_node->mutex);
56 INIT_LIST_HEAD(&delayed_node->n_list);
57 INIT_LIST_HEAD(&delayed_node->p_list);
58}
59
60static struct btrfs_delayed_node *btrfs_get_delayed_node(
61 struct btrfs_inode *btrfs_inode)
62{
63 struct btrfs_root *root = btrfs_inode->root;
64 u64 ino = btrfs_ino(btrfs_inode);
65 struct btrfs_delayed_node *node;
66
67 node = READ_ONCE(btrfs_inode->delayed_node);
68 if (node) {
69 refcount_inc(&node->refs);
70 return node;
71 }
72
73 spin_lock(&root->inode_lock);
74 node = xa_load(&root->delayed_nodes, ino);
75
76 if (node) {
77 if (btrfs_inode->delayed_node) {
78 refcount_inc(&node->refs); /* can be accessed */
79 BUG_ON(btrfs_inode->delayed_node != node);
80 spin_unlock(&root->inode_lock);
81 return node;
82 }
83
84 /*
85 * It's possible that we're racing into the middle of removing
86 * this node from the xarray. In this case, the refcount
87 * was zero and it should never go back to one. Just return
88 * NULL like it was never in the xarray at all; our release
89 * function is in the process of removing it.
90 *
91 * Some implementations of refcount_inc refuse to bump the
92 * refcount once it has hit zero. If we don't do this dance
93 * here, refcount_inc() may decide to just WARN_ONCE() instead
94 * of actually bumping the refcount.
95 *
96 * If this node is properly in the xarray, we want to bump the
97 * refcount twice, once for the inode and once for this get
98 * operation.
99 */
100 if (refcount_inc_not_zero(&node->refs)) {
101 refcount_inc(&node->refs);
102 btrfs_inode->delayed_node = node;
103 } else {
104 node = NULL;
105 }
106
107 spin_unlock(&root->inode_lock);
108 return node;
109 }
110 spin_unlock(&root->inode_lock);
111
112 return NULL;
113}
114
115/* Will return either the node or PTR_ERR(-ENOMEM) */
116static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
117 struct btrfs_inode *btrfs_inode)
118{
119 struct btrfs_delayed_node *node;
120 struct btrfs_root *root = btrfs_inode->root;
121 u64 ino = btrfs_ino(btrfs_inode);
122 int ret;
123 void *ptr;
124
125again:
126 node = btrfs_get_delayed_node(btrfs_inode);
127 if (node)
128 return node;
129
130 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
131 if (!node)
132 return ERR_PTR(-ENOMEM);
133 btrfs_init_delayed_node(node, root, ino);
134
135 /* Cached in the inode and can be accessed. */
136 refcount_set(&node->refs, 2);
137
138 /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
139 ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
140 if (ret == -ENOMEM) {
141 kmem_cache_free(delayed_node_cache, node);
142 return ERR_PTR(-ENOMEM);
143 }
144 spin_lock(&root->inode_lock);
145 ptr = xa_load(&root->delayed_nodes, ino);
146 if (ptr) {
147 /* Somebody inserted it, go back and read it. */
148 spin_unlock(&root->inode_lock);
149 kmem_cache_free(delayed_node_cache, node);
150 node = NULL;
151 goto again;
152 }
153 ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
154 ASSERT(xa_err(ptr) != -EINVAL);
155 ASSERT(xa_err(ptr) != -ENOMEM);
156 ASSERT(ptr == NULL);
157 btrfs_inode->delayed_node = node;
158 spin_unlock(&root->inode_lock);
159
160 return node;
161}
162
163/*
164 * Call it when holding delayed_node->mutex
165 *
166 * If mod = 1, add this node into the prepared list.
167 */
168static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
169 struct btrfs_delayed_node *node,
170 int mod)
171{
172 spin_lock(&root->lock);
173 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
174 if (!list_empty(&node->p_list))
175 list_move_tail(&node->p_list, &root->prepare_list);
176 else if (mod)
177 list_add_tail(&node->p_list, &root->prepare_list);
178 } else {
179 list_add_tail(&node->n_list, &root->node_list);
180 list_add_tail(&node->p_list, &root->prepare_list);
181 refcount_inc(&node->refs); /* inserted into list */
182 root->nodes++;
183 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
184 }
185 spin_unlock(&root->lock);
186}
187
188/* Call it when holding delayed_node->mutex */
189static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
190 struct btrfs_delayed_node *node)
191{
192 spin_lock(&root->lock);
193 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
194 root->nodes--;
195 refcount_dec(&node->refs); /* not in the list */
196 list_del_init(&node->n_list);
197 if (!list_empty(&node->p_list))
198 list_del_init(&node->p_list);
199 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
200 }
201 spin_unlock(&root->lock);
202}
203
204static struct btrfs_delayed_node *btrfs_first_delayed_node(
205 struct btrfs_delayed_root *delayed_root)
206{
207 struct list_head *p;
208 struct btrfs_delayed_node *node = NULL;
209
210 spin_lock(&delayed_root->lock);
211 if (list_empty(&delayed_root->node_list))
212 goto out;
213
214 p = delayed_root->node_list.next;
215 node = list_entry(p, struct btrfs_delayed_node, n_list);
216 refcount_inc(&node->refs);
217out:
218 spin_unlock(&delayed_root->lock);
219
220 return node;
221}
222
223static struct btrfs_delayed_node *btrfs_next_delayed_node(
224 struct btrfs_delayed_node *node)
225{
226 struct btrfs_delayed_root *delayed_root;
227 struct list_head *p;
228 struct btrfs_delayed_node *next = NULL;
229
230 delayed_root = node->root->fs_info->delayed_root;
231 spin_lock(&delayed_root->lock);
232 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
233 /* not in the list */
234 if (list_empty(&delayed_root->node_list))
235 goto out;
236 p = delayed_root->node_list.next;
237 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
238 goto out;
239 else
240 p = node->n_list.next;
241
242 next = list_entry(p, struct btrfs_delayed_node, n_list);
243 refcount_inc(&next->refs);
244out:
245 spin_unlock(&delayed_root->lock);
246
247 return next;
248}
249
250static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node *delayed_node,
252 int mod)
253{
254 struct btrfs_delayed_root *delayed_root;
255
256 if (!delayed_node)
257 return;
258
259 delayed_root = delayed_node->root->fs_info->delayed_root;
260
261 mutex_lock(&delayed_node->mutex);
262 if (delayed_node->count)
263 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
264 else
265 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
266 mutex_unlock(&delayed_node->mutex);
267
268 if (refcount_dec_and_test(&delayed_node->refs)) {
269 struct btrfs_root *root = delayed_node->root;
270
271 spin_lock(&root->inode_lock);
272 /*
273 * Once our refcount goes to zero, nobody is allowed to bump it
274 * back up. We can delete it now.
275 */
276 ASSERT(refcount_read(&delayed_node->refs) == 0);
277 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
278 spin_unlock(&root->inode_lock);
279 kmem_cache_free(delayed_node_cache, delayed_node);
280 }
281}
282
283static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
284{
285 __btrfs_release_delayed_node(node, 0);
286}
287
288static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
289 struct btrfs_delayed_root *delayed_root)
290{
291 struct list_head *p;
292 struct btrfs_delayed_node *node = NULL;
293
294 spin_lock(&delayed_root->lock);
295 if (list_empty(&delayed_root->prepare_list))
296 goto out;
297
298 p = delayed_root->prepare_list.next;
299 list_del_init(p);
300 node = list_entry(p, struct btrfs_delayed_node, p_list);
301 refcount_inc(&node->refs);
302out:
303 spin_unlock(&delayed_root->lock);
304
305 return node;
306}
307
308static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node *node)
310{
311 __btrfs_release_delayed_node(node, 1);
312}
313
314static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
315 struct btrfs_delayed_node *node,
316 enum btrfs_delayed_item_type type)
317{
318 struct btrfs_delayed_item *item;
319
320 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
321 if (item) {
322 item->data_len = data_len;
323 item->type = type;
324 item->bytes_reserved = 0;
325 item->delayed_node = node;
326 RB_CLEAR_NODE(&item->rb_node);
327 INIT_LIST_HEAD(&item->log_list);
328 item->logged = false;
329 refcount_set(&item->refs, 1);
330 }
331 return item;
332}
333
334/*
335 * Look up the delayed item by key.
336 *
337 * @delayed_node: pointer to the delayed node
338 * @index: the dir index value to lookup (offset of a dir index key)
339 *
340 * Note: if we don't find the right item, we will return the prev item and
341 * the next item.
342 */
343static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
344 struct rb_root *root,
345 u64 index)
346{
347 struct rb_node *node = root->rb_node;
348 struct btrfs_delayed_item *delayed_item = NULL;
349
350 while (node) {
351 delayed_item = rb_entry(node, struct btrfs_delayed_item,
352 rb_node);
353 if (delayed_item->index < index)
354 node = node->rb_right;
355 else if (delayed_item->index > index)
356 node = node->rb_left;
357 else
358 return delayed_item;
359 }
360
361 return NULL;
362}
363
364static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
365 struct btrfs_delayed_item *ins)
366{
367 struct rb_node **p, *node;
368 struct rb_node *parent_node = NULL;
369 struct rb_root_cached *root;
370 struct btrfs_delayed_item *item;
371 bool leftmost = true;
372
373 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
374 root = &delayed_node->ins_root;
375 else
376 root = &delayed_node->del_root;
377
378 p = &root->rb_root.rb_node;
379 node = &ins->rb_node;
380
381 while (*p) {
382 parent_node = *p;
383 item = rb_entry(parent_node, struct btrfs_delayed_item,
384 rb_node);
385
386 if (item->index < ins->index) {
387 p = &(*p)->rb_right;
388 leftmost = false;
389 } else if (item->index > ins->index) {
390 p = &(*p)->rb_left;
391 } else {
392 return -EEXIST;
393 }
394 }
395
396 rb_link_node(node, parent_node, p);
397 rb_insert_color_cached(node, root, leftmost);
398
399 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
400 ins->index >= delayed_node->index_cnt)
401 delayed_node->index_cnt = ins->index + 1;
402
403 delayed_node->count++;
404 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
405 return 0;
406}
407
408static void finish_one_item(struct btrfs_delayed_root *delayed_root)
409{
410 int seq = atomic_inc_return(&delayed_root->items_seq);
411
412 /* atomic_dec_return implies a barrier */
413 if ((atomic_dec_return(&delayed_root->items) <
414 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
415 cond_wake_up_nomb(&delayed_root->wait);
416}
417
418static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
419{
420 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
421 struct rb_root_cached *root;
422 struct btrfs_delayed_root *delayed_root;
423
424 /* Not inserted, ignore it. */
425 if (RB_EMPTY_NODE(&delayed_item->rb_node))
426 return;
427
428 /* If it's in a rbtree, then we need to have delayed node locked. */
429 lockdep_assert_held(&delayed_node->mutex);
430
431 delayed_root = delayed_node->root->fs_info->delayed_root;
432
433 BUG_ON(!delayed_root);
434
435 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
436 root = &delayed_node->ins_root;
437 else
438 root = &delayed_node->del_root;
439
440 rb_erase_cached(&delayed_item->rb_node, root);
441 RB_CLEAR_NODE(&delayed_item->rb_node);
442 delayed_node->count--;
443
444 finish_one_item(delayed_root);
445}
446
447static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
448{
449 if (item) {
450 __btrfs_remove_delayed_item(item);
451 if (refcount_dec_and_test(&item->refs))
452 kfree(item);
453 }
454}
455
456static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
457 struct btrfs_delayed_node *delayed_node)
458{
459 struct rb_node *p;
460 struct btrfs_delayed_item *item = NULL;
461
462 p = rb_first_cached(&delayed_node->ins_root);
463 if (p)
464 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
465
466 return item;
467}
468
469static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
470 struct btrfs_delayed_node *delayed_node)
471{
472 struct rb_node *p;
473 struct btrfs_delayed_item *item = NULL;
474
475 p = rb_first_cached(&delayed_node->del_root);
476 if (p)
477 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
478
479 return item;
480}
481
482static struct btrfs_delayed_item *__btrfs_next_delayed_item(
483 struct btrfs_delayed_item *item)
484{
485 struct rb_node *p;
486 struct btrfs_delayed_item *next = NULL;
487
488 p = rb_next(&item->rb_node);
489 if (p)
490 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
491
492 return next;
493}
494
495static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
496 struct btrfs_delayed_item *item)
497{
498 struct btrfs_block_rsv *src_rsv;
499 struct btrfs_block_rsv *dst_rsv;
500 struct btrfs_fs_info *fs_info = trans->fs_info;
501 u64 num_bytes;
502 int ret;
503
504 if (!trans->bytes_reserved)
505 return 0;
506
507 src_rsv = trans->block_rsv;
508 dst_rsv = &fs_info->delayed_block_rsv;
509
510 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
511
512 /*
513 * Here we migrate space rsv from transaction rsv, since have already
514 * reserved space when starting a transaction. So no need to reserve
515 * qgroup space here.
516 */
517 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
518 if (!ret) {
519 trace_btrfs_space_reservation(fs_info, "delayed_item",
520 item->delayed_node->inode_id,
521 num_bytes, 1);
522 /*
523 * For insertions we track reserved metadata space by accounting
524 * for the number of leaves that will be used, based on the delayed
525 * node's curr_index_batch_size and index_item_leaves fields.
526 */
527 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
528 item->bytes_reserved = num_bytes;
529 }
530
531 return ret;
532}
533
534static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
535 struct btrfs_delayed_item *item)
536{
537 struct btrfs_block_rsv *rsv;
538 struct btrfs_fs_info *fs_info = root->fs_info;
539
540 if (!item->bytes_reserved)
541 return;
542
543 rsv = &fs_info->delayed_block_rsv;
544 /*
545 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
546 * to release/reserve qgroup space.
547 */
548 trace_btrfs_space_reservation(fs_info, "delayed_item",
549 item->delayed_node->inode_id,
550 item->bytes_reserved, 0);
551 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
552}
553
554static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
555 unsigned int num_leaves)
556{
557 struct btrfs_fs_info *fs_info = node->root->fs_info;
558 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
559
560 /* There are no space reservations during log replay, bail out. */
561 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
562 return;
563
564 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
565 bytes, 0);
566 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
567}
568
569static int btrfs_delayed_inode_reserve_metadata(
570 struct btrfs_trans_handle *trans,
571 struct btrfs_root *root,
572 struct btrfs_delayed_node *node)
573{
574 struct btrfs_fs_info *fs_info = root->fs_info;
575 struct btrfs_block_rsv *src_rsv;
576 struct btrfs_block_rsv *dst_rsv;
577 u64 num_bytes;
578 int ret;
579
580 src_rsv = trans->block_rsv;
581 dst_rsv = &fs_info->delayed_block_rsv;
582
583 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
584
585 /*
586 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
587 * which doesn't reserve space for speed. This is a problem since we
588 * still need to reserve space for this update, so try to reserve the
589 * space.
590 *
591 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
592 * we always reserve enough to update the inode item.
593 */
594 if (!src_rsv || (!trans->bytes_reserved &&
595 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
596 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
597 BTRFS_QGROUP_RSV_META_PREALLOC, true);
598 if (ret < 0)
599 return ret;
600 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
601 BTRFS_RESERVE_NO_FLUSH);
602 /* NO_FLUSH could only fail with -ENOSPC */
603 ASSERT(ret == 0 || ret == -ENOSPC);
604 if (ret)
605 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
606 } else {
607 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
608 }
609
610 if (!ret) {
611 trace_btrfs_space_reservation(fs_info, "delayed_inode",
612 node->inode_id, num_bytes, 1);
613 node->bytes_reserved = num_bytes;
614 }
615
616 return ret;
617}
618
619static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
620 struct btrfs_delayed_node *node,
621 bool qgroup_free)
622{
623 struct btrfs_block_rsv *rsv;
624
625 if (!node->bytes_reserved)
626 return;
627
628 rsv = &fs_info->delayed_block_rsv;
629 trace_btrfs_space_reservation(fs_info, "delayed_inode",
630 node->inode_id, node->bytes_reserved, 0);
631 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
632 if (qgroup_free)
633 btrfs_qgroup_free_meta_prealloc(node->root,
634 node->bytes_reserved);
635 else
636 btrfs_qgroup_convert_reserved_meta(node->root,
637 node->bytes_reserved);
638 node->bytes_reserved = 0;
639}
640
641/*
642 * Insert a single delayed item or a batch of delayed items, as many as possible
643 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
644 * in the rbtree, and if there's a gap between two consecutive dir index items,
645 * then it means at some point we had delayed dir indexes to add but they got
646 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
647 * into the subvolume tree. Dir index keys also have their offsets coming from a
648 * monotonically increasing counter, so we can't get new keys with an offset that
649 * fits within a gap between delayed dir index items.
650 */
651static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
652 struct btrfs_root *root,
653 struct btrfs_path *path,
654 struct btrfs_delayed_item *first_item)
655{
656 struct btrfs_fs_info *fs_info = root->fs_info;
657 struct btrfs_delayed_node *node = first_item->delayed_node;
658 LIST_HEAD(item_list);
659 struct btrfs_delayed_item *curr;
660 struct btrfs_delayed_item *next;
661 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
662 struct btrfs_item_batch batch;
663 struct btrfs_key first_key;
664 const u32 first_data_size = first_item->data_len;
665 int total_size;
666 char *ins_data = NULL;
667 int ret;
668 bool continuous_keys_only = false;
669
670 lockdep_assert_held(&node->mutex);
671
672 /*
673 * During normal operation the delayed index offset is continuously
674 * increasing, so we can batch insert all items as there will not be any
675 * overlapping keys in the tree.
676 *
677 * The exception to this is log replay, where we may have interleaved
678 * offsets in the tree, so our batch needs to be continuous keys only in
679 * order to ensure we do not end up with out of order items in our leaf.
680 */
681 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
682 continuous_keys_only = true;
683
684 /*
685 * For delayed items to insert, we track reserved metadata bytes based
686 * on the number of leaves that we will use.
687 * See btrfs_insert_delayed_dir_index() and
688 * btrfs_delayed_item_reserve_metadata()).
689 */
690 ASSERT(first_item->bytes_reserved == 0);
691
692 list_add_tail(&first_item->tree_list, &item_list);
693 batch.total_data_size = first_data_size;
694 batch.nr = 1;
695 total_size = first_data_size + sizeof(struct btrfs_item);
696 curr = first_item;
697
698 while (true) {
699 int next_size;
700
701 next = __btrfs_next_delayed_item(curr);
702 if (!next)
703 break;
704
705 /*
706 * We cannot allow gaps in the key space if we're doing log
707 * replay.
708 */
709 if (continuous_keys_only && (next->index != curr->index + 1))
710 break;
711
712 ASSERT(next->bytes_reserved == 0);
713
714 next_size = next->data_len + sizeof(struct btrfs_item);
715 if (total_size + next_size > max_size)
716 break;
717
718 list_add_tail(&next->tree_list, &item_list);
719 batch.nr++;
720 total_size += next_size;
721 batch.total_data_size += next->data_len;
722 curr = next;
723 }
724
725 if (batch.nr == 1) {
726 first_key.objectid = node->inode_id;
727 first_key.type = BTRFS_DIR_INDEX_KEY;
728 first_key.offset = first_item->index;
729 batch.keys = &first_key;
730 batch.data_sizes = &first_data_size;
731 } else {
732 struct btrfs_key *ins_keys;
733 u32 *ins_sizes;
734 int i = 0;
735
736 ins_data = kmalloc(batch.nr * sizeof(u32) +
737 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
738 if (!ins_data) {
739 ret = -ENOMEM;
740 goto out;
741 }
742 ins_sizes = (u32 *)ins_data;
743 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
744 batch.keys = ins_keys;
745 batch.data_sizes = ins_sizes;
746 list_for_each_entry(curr, &item_list, tree_list) {
747 ins_keys[i].objectid = node->inode_id;
748 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
749 ins_keys[i].offset = curr->index;
750 ins_sizes[i] = curr->data_len;
751 i++;
752 }
753 }
754
755 ret = btrfs_insert_empty_items(trans, root, path, &batch);
756 if (ret)
757 goto out;
758
759 list_for_each_entry(curr, &item_list, tree_list) {
760 char *data_ptr;
761
762 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
763 write_extent_buffer(path->nodes[0], &curr->data,
764 (unsigned long)data_ptr, curr->data_len);
765 path->slots[0]++;
766 }
767
768 /*
769 * Now release our path before releasing the delayed items and their
770 * metadata reservations, so that we don't block other tasks for more
771 * time than needed.
772 */
773 btrfs_release_path(path);
774
775 ASSERT(node->index_item_leaves > 0);
776
777 /*
778 * For normal operations we will batch an entire leaf's worth of delayed
779 * items, so if there are more items to process we can decrement
780 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
781 *
782 * However for log replay we may not have inserted an entire leaf's
783 * worth of items, we may have not had continuous items, so decrementing
784 * here would mess up the index_item_leaves accounting. For this case
785 * only clean up the accounting when there are no items left.
786 */
787 if (next && !continuous_keys_only) {
788 /*
789 * We inserted one batch of items into a leaf a there are more
790 * items to flush in a future batch, now release one unit of
791 * metadata space from the delayed block reserve, corresponding
792 * the leaf we just flushed to.
793 */
794 btrfs_delayed_item_release_leaves(node, 1);
795 node->index_item_leaves--;
796 } else if (!next) {
797 /*
798 * There are no more items to insert. We can have a number of
799 * reserved leaves > 1 here - this happens when many dir index
800 * items are added and then removed before they are flushed (file
801 * names with a very short life, never span a transaction). So
802 * release all remaining leaves.
803 */
804 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
805 node->index_item_leaves = 0;
806 }
807
808 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
809 list_del(&curr->tree_list);
810 btrfs_release_delayed_item(curr);
811 }
812out:
813 kfree(ins_data);
814 return ret;
815}
816
817static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
818 struct btrfs_path *path,
819 struct btrfs_root *root,
820 struct btrfs_delayed_node *node)
821{
822 int ret = 0;
823
824 while (ret == 0) {
825 struct btrfs_delayed_item *curr;
826
827 mutex_lock(&node->mutex);
828 curr = __btrfs_first_delayed_insertion_item(node);
829 if (!curr) {
830 mutex_unlock(&node->mutex);
831 break;
832 }
833 ret = btrfs_insert_delayed_item(trans, root, path, curr);
834 mutex_unlock(&node->mutex);
835 }
836
837 return ret;
838}
839
840static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
841 struct btrfs_root *root,
842 struct btrfs_path *path,
843 struct btrfs_delayed_item *item)
844{
845 const u64 ino = item->delayed_node->inode_id;
846 struct btrfs_fs_info *fs_info = root->fs_info;
847 struct btrfs_delayed_item *curr, *next;
848 struct extent_buffer *leaf = path->nodes[0];
849 LIST_HEAD(batch_list);
850 int nitems, slot, last_slot;
851 int ret;
852 u64 total_reserved_size = item->bytes_reserved;
853
854 ASSERT(leaf != NULL);
855
856 slot = path->slots[0];
857 last_slot = btrfs_header_nritems(leaf) - 1;
858 /*
859 * Our caller always gives us a path pointing to an existing item, so
860 * this can not happen.
861 */
862 ASSERT(slot <= last_slot);
863 if (WARN_ON(slot > last_slot))
864 return -ENOENT;
865
866 nitems = 1;
867 curr = item;
868 list_add_tail(&curr->tree_list, &batch_list);
869
870 /*
871 * Keep checking if the next delayed item matches the next item in the
872 * leaf - if so, we can add it to the batch of items to delete from the
873 * leaf.
874 */
875 while (slot < last_slot) {
876 struct btrfs_key key;
877
878 next = __btrfs_next_delayed_item(curr);
879 if (!next)
880 break;
881
882 slot++;
883 btrfs_item_key_to_cpu(leaf, &key, slot);
884 if (key.objectid != ino ||
885 key.type != BTRFS_DIR_INDEX_KEY ||
886 key.offset != next->index)
887 break;
888 nitems++;
889 curr = next;
890 list_add_tail(&curr->tree_list, &batch_list);
891 total_reserved_size += curr->bytes_reserved;
892 }
893
894 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
895 if (ret)
896 return ret;
897
898 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
899 if (total_reserved_size > 0) {
900 /*
901 * Check btrfs_delayed_item_reserve_metadata() to see why we
902 * don't need to release/reserve qgroup space.
903 */
904 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
905 total_reserved_size, 0);
906 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
907 total_reserved_size, NULL);
908 }
909
910 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
911 list_del(&curr->tree_list);
912 btrfs_release_delayed_item(curr);
913 }
914
915 return 0;
916}
917
918static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
919 struct btrfs_path *path,
920 struct btrfs_root *root,
921 struct btrfs_delayed_node *node)
922{
923 struct btrfs_key key;
924 int ret = 0;
925
926 key.objectid = node->inode_id;
927 key.type = BTRFS_DIR_INDEX_KEY;
928
929 while (ret == 0) {
930 struct btrfs_delayed_item *item;
931
932 mutex_lock(&node->mutex);
933 item = __btrfs_first_delayed_deletion_item(node);
934 if (!item) {
935 mutex_unlock(&node->mutex);
936 break;
937 }
938
939 key.offset = item->index;
940 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
941 if (ret > 0) {
942 /*
943 * There's no matching item in the leaf. This means we
944 * have already deleted this item in a past run of the
945 * delayed items. We ignore errors when running delayed
946 * items from an async context, through a work queue job
947 * running btrfs_async_run_delayed_root(), and don't
948 * release delayed items that failed to complete. This
949 * is because we will retry later, and at transaction
950 * commit time we always run delayed items and will
951 * then deal with errors if they fail to run again.
952 *
953 * So just release delayed items for which we can't find
954 * an item in the tree, and move to the next item.
955 */
956 btrfs_release_path(path);
957 btrfs_release_delayed_item(item);
958 ret = 0;
959 } else if (ret == 0) {
960 ret = btrfs_batch_delete_items(trans, root, path, item);
961 btrfs_release_path(path);
962 }
963
964 /*
965 * We unlock and relock on each iteration, this is to prevent
966 * blocking other tasks for too long while we are being run from
967 * the async context (work queue job). Those tasks are typically
968 * running system calls like creat/mkdir/rename/unlink/etc which
969 * need to add delayed items to this delayed node.
970 */
971 mutex_unlock(&node->mutex);
972 }
973
974 return ret;
975}
976
977static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
978{
979 struct btrfs_delayed_root *delayed_root;
980
981 if (delayed_node &&
982 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
983 BUG_ON(!delayed_node->root);
984 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
985 delayed_node->count--;
986
987 delayed_root = delayed_node->root->fs_info->delayed_root;
988 finish_one_item(delayed_root);
989 }
990}
991
992static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
993{
994
995 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
996 struct btrfs_delayed_root *delayed_root;
997
998 ASSERT(delayed_node->root);
999 delayed_node->count--;
1000
1001 delayed_root = delayed_node->root->fs_info->delayed_root;
1002 finish_one_item(delayed_root);
1003 }
1004}
1005
1006static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1007 struct btrfs_root *root,
1008 struct btrfs_path *path,
1009 struct btrfs_delayed_node *node)
1010{
1011 struct btrfs_fs_info *fs_info = root->fs_info;
1012 struct btrfs_key key;
1013 struct btrfs_inode_item *inode_item;
1014 struct extent_buffer *leaf;
1015 int mod;
1016 int ret;
1017
1018 key.objectid = node->inode_id;
1019 key.type = BTRFS_INODE_ITEM_KEY;
1020 key.offset = 0;
1021
1022 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1023 mod = -1;
1024 else
1025 mod = 1;
1026
1027 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1028 if (ret > 0)
1029 ret = -ENOENT;
1030 if (ret < 0)
1031 goto out;
1032
1033 leaf = path->nodes[0];
1034 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1035 struct btrfs_inode_item);
1036 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1037 sizeof(struct btrfs_inode_item));
1038 btrfs_mark_buffer_dirty(trans, leaf);
1039
1040 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1041 goto out;
1042
1043 /*
1044 * Now we're going to delete the INODE_REF/EXTREF, which should be the
1045 * only one ref left. Check if the next item is an INODE_REF/EXTREF.
1046 *
1047 * But if we're the last item already, release and search for the last
1048 * INODE_REF/EXTREF.
1049 */
1050 if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1051 key.objectid = node->inode_id;
1052 key.type = BTRFS_INODE_EXTREF_KEY;
1053 key.offset = (u64)-1;
1054
1055 btrfs_release_path(path);
1056 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1057 if (ret < 0)
1058 goto err_out;
1059 ASSERT(ret > 0);
1060 ASSERT(path->slots[0] > 0);
1061 ret = 0;
1062 path->slots[0]--;
1063 leaf = path->nodes[0];
1064 } else {
1065 path->slots[0]++;
1066 }
1067 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1068 if (key.objectid != node->inode_id)
1069 goto out;
1070 if (key.type != BTRFS_INODE_REF_KEY &&
1071 key.type != BTRFS_INODE_EXTREF_KEY)
1072 goto out;
1073
1074 /*
1075 * Delayed iref deletion is for the inode who has only one link,
1076 * so there is only one iref. The case that several irefs are
1077 * in the same item doesn't exist.
1078 */
1079 ret = btrfs_del_item(trans, root, path);
1080out:
1081 btrfs_release_delayed_iref(node);
1082 btrfs_release_path(path);
1083err_out:
1084 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1085 btrfs_release_delayed_inode(node);
1086
1087 /*
1088 * If we fail to update the delayed inode we need to abort the
1089 * transaction, because we could leave the inode with the improper
1090 * counts behind.
1091 */
1092 if (ret && ret != -ENOENT)
1093 btrfs_abort_transaction(trans, ret);
1094
1095 return ret;
1096}
1097
1098static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1099 struct btrfs_root *root,
1100 struct btrfs_path *path,
1101 struct btrfs_delayed_node *node)
1102{
1103 int ret;
1104
1105 mutex_lock(&node->mutex);
1106 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1107 mutex_unlock(&node->mutex);
1108 return 0;
1109 }
1110
1111 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1112 mutex_unlock(&node->mutex);
1113 return ret;
1114}
1115
1116static inline int
1117__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1118 struct btrfs_path *path,
1119 struct btrfs_delayed_node *node)
1120{
1121 int ret;
1122
1123 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1124 if (ret)
1125 return ret;
1126
1127 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1128 if (ret)
1129 return ret;
1130
1131 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1132 return ret;
1133}
1134
1135/*
1136 * Called when committing the transaction.
1137 * Returns 0 on success.
1138 * Returns < 0 on error and returns with an aborted transaction with any
1139 * outstanding delayed items cleaned up.
1140 */
1141static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1142{
1143 struct btrfs_fs_info *fs_info = trans->fs_info;
1144 struct btrfs_delayed_root *delayed_root;
1145 struct btrfs_delayed_node *curr_node, *prev_node;
1146 struct btrfs_path *path;
1147 struct btrfs_block_rsv *block_rsv;
1148 int ret = 0;
1149 bool count = (nr > 0);
1150
1151 if (TRANS_ABORTED(trans))
1152 return -EIO;
1153
1154 path = btrfs_alloc_path();
1155 if (!path)
1156 return -ENOMEM;
1157
1158 block_rsv = trans->block_rsv;
1159 trans->block_rsv = &fs_info->delayed_block_rsv;
1160
1161 delayed_root = fs_info->delayed_root;
1162
1163 curr_node = btrfs_first_delayed_node(delayed_root);
1164 while (curr_node && (!count || nr--)) {
1165 ret = __btrfs_commit_inode_delayed_items(trans, path,
1166 curr_node);
1167 if (ret) {
1168 btrfs_abort_transaction(trans, ret);
1169 break;
1170 }
1171
1172 prev_node = curr_node;
1173 curr_node = btrfs_next_delayed_node(curr_node);
1174 /*
1175 * See the comment below about releasing path before releasing
1176 * node. If the commit of delayed items was successful the path
1177 * should always be released, but in case of an error, it may
1178 * point to locked extent buffers (a leaf at the very least).
1179 */
1180 ASSERT(path->nodes[0] == NULL);
1181 btrfs_release_delayed_node(prev_node);
1182 }
1183
1184 /*
1185 * Release the path to avoid a potential deadlock and lockdep splat when
1186 * releasing the delayed node, as that requires taking the delayed node's
1187 * mutex. If another task starts running delayed items before we take
1188 * the mutex, it will first lock the mutex and then it may try to lock
1189 * the same btree path (leaf).
1190 */
1191 btrfs_free_path(path);
1192
1193 if (curr_node)
1194 btrfs_release_delayed_node(curr_node);
1195 trans->block_rsv = block_rsv;
1196
1197 return ret;
1198}
1199
1200int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1201{
1202 return __btrfs_run_delayed_items(trans, -1);
1203}
1204
1205int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1206{
1207 return __btrfs_run_delayed_items(trans, nr);
1208}
1209
1210int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1211 struct btrfs_inode *inode)
1212{
1213 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1214 struct btrfs_path *path;
1215 struct btrfs_block_rsv *block_rsv;
1216 int ret;
1217
1218 if (!delayed_node)
1219 return 0;
1220
1221 mutex_lock(&delayed_node->mutex);
1222 if (!delayed_node->count) {
1223 mutex_unlock(&delayed_node->mutex);
1224 btrfs_release_delayed_node(delayed_node);
1225 return 0;
1226 }
1227 mutex_unlock(&delayed_node->mutex);
1228
1229 path = btrfs_alloc_path();
1230 if (!path) {
1231 btrfs_release_delayed_node(delayed_node);
1232 return -ENOMEM;
1233 }
1234
1235 block_rsv = trans->block_rsv;
1236 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1237
1238 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1239
1240 btrfs_release_delayed_node(delayed_node);
1241 btrfs_free_path(path);
1242 trans->block_rsv = block_rsv;
1243
1244 return ret;
1245}
1246
1247int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1248{
1249 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1250 struct btrfs_trans_handle *trans;
1251 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1252 struct btrfs_path *path;
1253 struct btrfs_block_rsv *block_rsv;
1254 int ret;
1255
1256 if (!delayed_node)
1257 return 0;
1258
1259 mutex_lock(&delayed_node->mutex);
1260 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1261 mutex_unlock(&delayed_node->mutex);
1262 btrfs_release_delayed_node(delayed_node);
1263 return 0;
1264 }
1265 mutex_unlock(&delayed_node->mutex);
1266
1267 trans = btrfs_join_transaction(delayed_node->root);
1268 if (IS_ERR(trans)) {
1269 ret = PTR_ERR(trans);
1270 goto out;
1271 }
1272
1273 path = btrfs_alloc_path();
1274 if (!path) {
1275 ret = -ENOMEM;
1276 goto trans_out;
1277 }
1278
1279 block_rsv = trans->block_rsv;
1280 trans->block_rsv = &fs_info->delayed_block_rsv;
1281
1282 mutex_lock(&delayed_node->mutex);
1283 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1284 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1285 path, delayed_node);
1286 else
1287 ret = 0;
1288 mutex_unlock(&delayed_node->mutex);
1289
1290 btrfs_free_path(path);
1291 trans->block_rsv = block_rsv;
1292trans_out:
1293 btrfs_end_transaction(trans);
1294 btrfs_btree_balance_dirty(fs_info);
1295out:
1296 btrfs_release_delayed_node(delayed_node);
1297
1298 return ret;
1299}
1300
1301void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1302{
1303 struct btrfs_delayed_node *delayed_node;
1304
1305 delayed_node = READ_ONCE(inode->delayed_node);
1306 if (!delayed_node)
1307 return;
1308
1309 inode->delayed_node = NULL;
1310 btrfs_release_delayed_node(delayed_node);
1311}
1312
1313struct btrfs_async_delayed_work {
1314 struct btrfs_delayed_root *delayed_root;
1315 int nr;
1316 struct btrfs_work work;
1317};
1318
1319static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1320{
1321 struct btrfs_async_delayed_work *async_work;
1322 struct btrfs_delayed_root *delayed_root;
1323 struct btrfs_trans_handle *trans;
1324 struct btrfs_path *path;
1325 struct btrfs_delayed_node *delayed_node = NULL;
1326 struct btrfs_root *root;
1327 struct btrfs_block_rsv *block_rsv;
1328 int total_done = 0;
1329
1330 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1331 delayed_root = async_work->delayed_root;
1332
1333 path = btrfs_alloc_path();
1334 if (!path)
1335 goto out;
1336
1337 do {
1338 if (atomic_read(&delayed_root->items) <
1339 BTRFS_DELAYED_BACKGROUND / 2)
1340 break;
1341
1342 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1343 if (!delayed_node)
1344 break;
1345
1346 root = delayed_node->root;
1347
1348 trans = btrfs_join_transaction(root);
1349 if (IS_ERR(trans)) {
1350 btrfs_release_path(path);
1351 btrfs_release_prepared_delayed_node(delayed_node);
1352 total_done++;
1353 continue;
1354 }
1355
1356 block_rsv = trans->block_rsv;
1357 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1358
1359 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1360
1361 trans->block_rsv = block_rsv;
1362 btrfs_end_transaction(trans);
1363 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1364
1365 btrfs_release_path(path);
1366 btrfs_release_prepared_delayed_node(delayed_node);
1367 total_done++;
1368
1369 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1370 || total_done < async_work->nr);
1371
1372 btrfs_free_path(path);
1373out:
1374 wake_up(&delayed_root->wait);
1375 kfree(async_work);
1376}
1377
1378
1379static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1380 struct btrfs_fs_info *fs_info, int nr)
1381{
1382 struct btrfs_async_delayed_work *async_work;
1383
1384 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1385 if (!async_work)
1386 return -ENOMEM;
1387
1388 async_work->delayed_root = delayed_root;
1389 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1390 async_work->nr = nr;
1391
1392 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1393 return 0;
1394}
1395
1396void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1397{
1398 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1399}
1400
1401static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1402{
1403 int val = atomic_read(&delayed_root->items_seq);
1404
1405 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1406 return 1;
1407
1408 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1409 return 1;
1410
1411 return 0;
1412}
1413
1414void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1415{
1416 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1417
1418 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1419 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1420 return;
1421
1422 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1423 int seq;
1424 int ret;
1425
1426 seq = atomic_read(&delayed_root->items_seq);
1427
1428 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1429 if (ret)
1430 return;
1431
1432 wait_event_interruptible(delayed_root->wait,
1433 could_end_wait(delayed_root, seq));
1434 return;
1435 }
1436
1437 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1438}
1439
1440static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1441{
1442 struct btrfs_fs_info *fs_info = trans->fs_info;
1443 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1444
1445 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1446 return;
1447
1448 /*
1449 * Adding the new dir index item does not require touching another
1450 * leaf, so we can release 1 unit of metadata that was previously
1451 * reserved when starting the transaction. This applies only to
1452 * the case where we had a transaction start and excludes the
1453 * transaction join case (when replaying log trees).
1454 */
1455 trace_btrfs_space_reservation(fs_info, "transaction",
1456 trans->transid, bytes, 0);
1457 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1458 ASSERT(trans->bytes_reserved >= bytes);
1459 trans->bytes_reserved -= bytes;
1460}
1461
1462/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1463int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1464 const char *name, int name_len,
1465 struct btrfs_inode *dir,
1466 struct btrfs_disk_key *disk_key, u8 flags,
1467 u64 index)
1468{
1469 struct btrfs_fs_info *fs_info = trans->fs_info;
1470 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1471 struct btrfs_delayed_node *delayed_node;
1472 struct btrfs_delayed_item *delayed_item;
1473 struct btrfs_dir_item *dir_item;
1474 bool reserve_leaf_space;
1475 u32 data_len;
1476 int ret;
1477
1478 delayed_node = btrfs_get_or_create_delayed_node(dir);
1479 if (IS_ERR(delayed_node))
1480 return PTR_ERR(delayed_node);
1481
1482 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1483 delayed_node,
1484 BTRFS_DELAYED_INSERTION_ITEM);
1485 if (!delayed_item) {
1486 ret = -ENOMEM;
1487 goto release_node;
1488 }
1489
1490 delayed_item->index = index;
1491
1492 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1493 dir_item->location = *disk_key;
1494 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1495 btrfs_set_stack_dir_data_len(dir_item, 0);
1496 btrfs_set_stack_dir_name_len(dir_item, name_len);
1497 btrfs_set_stack_dir_flags(dir_item, flags);
1498 memcpy((char *)(dir_item + 1), name, name_len);
1499
1500 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1501
1502 mutex_lock(&delayed_node->mutex);
1503
1504 /*
1505 * First attempt to insert the delayed item. This is to make the error
1506 * handling path simpler in case we fail (-EEXIST). There's no risk of
1507 * any other task coming in and running the delayed item before we do
1508 * the metadata space reservation below, because we are holding the
1509 * delayed node's mutex and that mutex must also be locked before the
1510 * node's delayed items can be run.
1511 */
1512 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1513 if (unlikely(ret)) {
1514 btrfs_err(trans->fs_info,
1515"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1516 name_len, name, index, btrfs_root_id(delayed_node->root),
1517 delayed_node->inode_id, dir->index_cnt,
1518 delayed_node->index_cnt, ret);
1519 btrfs_release_delayed_item(delayed_item);
1520 btrfs_release_dir_index_item_space(trans);
1521 mutex_unlock(&delayed_node->mutex);
1522 goto release_node;
1523 }
1524
1525 if (delayed_node->index_item_leaves == 0 ||
1526 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1527 delayed_node->curr_index_batch_size = data_len;
1528 reserve_leaf_space = true;
1529 } else {
1530 delayed_node->curr_index_batch_size += data_len;
1531 reserve_leaf_space = false;
1532 }
1533
1534 if (reserve_leaf_space) {
1535 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1536 /*
1537 * Space was reserved for a dir index item insertion when we
1538 * started the transaction, so getting a failure here should be
1539 * impossible.
1540 */
1541 if (WARN_ON(ret)) {
1542 btrfs_release_delayed_item(delayed_item);
1543 mutex_unlock(&delayed_node->mutex);
1544 goto release_node;
1545 }
1546
1547 delayed_node->index_item_leaves++;
1548 } else {
1549 btrfs_release_dir_index_item_space(trans);
1550 }
1551 mutex_unlock(&delayed_node->mutex);
1552
1553release_node:
1554 btrfs_release_delayed_node(delayed_node);
1555 return ret;
1556}
1557
1558static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1559 struct btrfs_delayed_node *node,
1560 u64 index)
1561{
1562 struct btrfs_delayed_item *item;
1563
1564 mutex_lock(&node->mutex);
1565 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1566 if (!item) {
1567 mutex_unlock(&node->mutex);
1568 return 1;
1569 }
1570
1571 /*
1572 * For delayed items to insert, we track reserved metadata bytes based
1573 * on the number of leaves that we will use.
1574 * See btrfs_insert_delayed_dir_index() and
1575 * btrfs_delayed_item_reserve_metadata()).
1576 */
1577 ASSERT(item->bytes_reserved == 0);
1578 ASSERT(node->index_item_leaves > 0);
1579
1580 /*
1581 * If there's only one leaf reserved, we can decrement this item from the
1582 * current batch, otherwise we can not because we don't know which leaf
1583 * it belongs to. With the current limit on delayed items, we rarely
1584 * accumulate enough dir index items to fill more than one leaf (even
1585 * when using a leaf size of 4K).
1586 */
1587 if (node->index_item_leaves == 1) {
1588 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1589
1590 ASSERT(node->curr_index_batch_size >= data_len);
1591 node->curr_index_batch_size -= data_len;
1592 }
1593
1594 btrfs_release_delayed_item(item);
1595
1596 /* If we now have no more dir index items, we can release all leaves. */
1597 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1598 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1599 node->index_item_leaves = 0;
1600 }
1601
1602 mutex_unlock(&node->mutex);
1603 return 0;
1604}
1605
1606int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1607 struct btrfs_inode *dir, u64 index)
1608{
1609 struct btrfs_delayed_node *node;
1610 struct btrfs_delayed_item *item;
1611 int ret;
1612
1613 node = btrfs_get_or_create_delayed_node(dir);
1614 if (IS_ERR(node))
1615 return PTR_ERR(node);
1616
1617 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1618 if (!ret)
1619 goto end;
1620
1621 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1622 if (!item) {
1623 ret = -ENOMEM;
1624 goto end;
1625 }
1626
1627 item->index = index;
1628
1629 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1630 /*
1631 * we have reserved enough space when we start a new transaction,
1632 * so reserving metadata failure is impossible.
1633 */
1634 if (ret < 0) {
1635 btrfs_err(trans->fs_info,
1636"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1637 btrfs_release_delayed_item(item);
1638 goto end;
1639 }
1640
1641 mutex_lock(&node->mutex);
1642 ret = __btrfs_add_delayed_item(node, item);
1643 if (unlikely(ret)) {
1644 btrfs_err(trans->fs_info,
1645 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1646 index, node->root->root_key.objectid,
1647 node->inode_id, ret);
1648 btrfs_delayed_item_release_metadata(dir->root, item);
1649 btrfs_release_delayed_item(item);
1650 }
1651 mutex_unlock(&node->mutex);
1652end:
1653 btrfs_release_delayed_node(node);
1654 return ret;
1655}
1656
1657int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1658{
1659 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1660
1661 if (!delayed_node)
1662 return -ENOENT;
1663
1664 /*
1665 * Since we have held i_mutex of this directory, it is impossible that
1666 * a new directory index is added into the delayed node and index_cnt
1667 * is updated now. So we needn't lock the delayed node.
1668 */
1669 if (!delayed_node->index_cnt) {
1670 btrfs_release_delayed_node(delayed_node);
1671 return -EINVAL;
1672 }
1673
1674 inode->index_cnt = delayed_node->index_cnt;
1675 btrfs_release_delayed_node(delayed_node);
1676 return 0;
1677}
1678
1679bool btrfs_readdir_get_delayed_items(struct inode *inode,
1680 u64 last_index,
1681 struct list_head *ins_list,
1682 struct list_head *del_list)
1683{
1684 struct btrfs_delayed_node *delayed_node;
1685 struct btrfs_delayed_item *item;
1686
1687 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1688 if (!delayed_node)
1689 return false;
1690
1691 /*
1692 * We can only do one readdir with delayed items at a time because of
1693 * item->readdir_list.
1694 */
1695 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
1696 btrfs_inode_lock(BTRFS_I(inode), 0);
1697
1698 mutex_lock(&delayed_node->mutex);
1699 item = __btrfs_first_delayed_insertion_item(delayed_node);
1700 while (item && item->index <= last_index) {
1701 refcount_inc(&item->refs);
1702 list_add_tail(&item->readdir_list, ins_list);
1703 item = __btrfs_next_delayed_item(item);
1704 }
1705
1706 item = __btrfs_first_delayed_deletion_item(delayed_node);
1707 while (item && item->index <= last_index) {
1708 refcount_inc(&item->refs);
1709 list_add_tail(&item->readdir_list, del_list);
1710 item = __btrfs_next_delayed_item(item);
1711 }
1712 mutex_unlock(&delayed_node->mutex);
1713 /*
1714 * This delayed node is still cached in the btrfs inode, so refs
1715 * must be > 1 now, and we needn't check it is going to be freed
1716 * or not.
1717 *
1718 * Besides that, this function is used to read dir, we do not
1719 * insert/delete delayed items in this period. So we also needn't
1720 * requeue or dequeue this delayed node.
1721 */
1722 refcount_dec(&delayed_node->refs);
1723
1724 return true;
1725}
1726
1727void btrfs_readdir_put_delayed_items(struct inode *inode,
1728 struct list_head *ins_list,
1729 struct list_head *del_list)
1730{
1731 struct btrfs_delayed_item *curr, *next;
1732
1733 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1734 list_del(&curr->readdir_list);
1735 if (refcount_dec_and_test(&curr->refs))
1736 kfree(curr);
1737 }
1738
1739 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1740 list_del(&curr->readdir_list);
1741 if (refcount_dec_and_test(&curr->refs))
1742 kfree(curr);
1743 }
1744
1745 /*
1746 * The VFS is going to do up_read(), so we need to downgrade back to a
1747 * read lock.
1748 */
1749 downgrade_write(&inode->i_rwsem);
1750}
1751
1752int btrfs_should_delete_dir_index(struct list_head *del_list,
1753 u64 index)
1754{
1755 struct btrfs_delayed_item *curr;
1756 int ret = 0;
1757
1758 list_for_each_entry(curr, del_list, readdir_list) {
1759 if (curr->index > index)
1760 break;
1761 if (curr->index == index) {
1762 ret = 1;
1763 break;
1764 }
1765 }
1766 return ret;
1767}
1768
1769/*
1770 * Read dir info stored in the delayed tree.
1771 */
1772int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1773 struct list_head *ins_list)
1774{
1775 struct btrfs_dir_item *di;
1776 struct btrfs_delayed_item *curr, *next;
1777 struct btrfs_key location;
1778 char *name;
1779 int name_len;
1780 int over = 0;
1781 unsigned char d_type;
1782
1783 /*
1784 * Changing the data of the delayed item is impossible. So
1785 * we needn't lock them. And we have held i_mutex of the
1786 * directory, nobody can delete any directory indexes now.
1787 */
1788 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1789 list_del(&curr->readdir_list);
1790
1791 if (curr->index < ctx->pos) {
1792 if (refcount_dec_and_test(&curr->refs))
1793 kfree(curr);
1794 continue;
1795 }
1796
1797 ctx->pos = curr->index;
1798
1799 di = (struct btrfs_dir_item *)curr->data;
1800 name = (char *)(di + 1);
1801 name_len = btrfs_stack_dir_name_len(di);
1802
1803 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1804 btrfs_disk_key_to_cpu(&location, &di->location);
1805
1806 over = !dir_emit(ctx, name, name_len,
1807 location.objectid, d_type);
1808
1809 if (refcount_dec_and_test(&curr->refs))
1810 kfree(curr);
1811
1812 if (over)
1813 return 1;
1814 ctx->pos++;
1815 }
1816 return 0;
1817}
1818
1819static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1820 struct btrfs_inode_item *inode_item,
1821 struct inode *inode)
1822{
1823 u64 flags;
1824
1825 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1826 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1827 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1828 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1829 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1830 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1831 btrfs_set_stack_inode_generation(inode_item,
1832 BTRFS_I(inode)->generation);
1833 btrfs_set_stack_inode_sequence(inode_item,
1834 inode_peek_iversion(inode));
1835 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1836 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1837 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1838 BTRFS_I(inode)->ro_flags);
1839 btrfs_set_stack_inode_flags(inode_item, flags);
1840 btrfs_set_stack_inode_block_group(inode_item, 0);
1841
1842 btrfs_set_stack_timespec_sec(&inode_item->atime,
1843 inode_get_atime_sec(inode));
1844 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1845 inode_get_atime_nsec(inode));
1846
1847 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1848 inode_get_mtime_sec(inode));
1849 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1850 inode_get_mtime_nsec(inode));
1851
1852 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1853 inode_get_ctime_sec(inode));
1854 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1855 inode_get_ctime_nsec(inode));
1856
1857 btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1858 btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1859}
1860
1861int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1862{
1863 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1864 struct btrfs_delayed_node *delayed_node;
1865 struct btrfs_inode_item *inode_item;
1866
1867 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1868 if (!delayed_node)
1869 return -ENOENT;
1870
1871 mutex_lock(&delayed_node->mutex);
1872 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1873 mutex_unlock(&delayed_node->mutex);
1874 btrfs_release_delayed_node(delayed_node);
1875 return -ENOENT;
1876 }
1877
1878 inode_item = &delayed_node->inode_item;
1879
1880 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1881 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1882 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1883 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1884 round_up(i_size_read(inode), fs_info->sectorsize));
1885 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1886 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1887 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1888 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1889 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1890
1891 inode_set_iversion_queried(inode,
1892 btrfs_stack_inode_sequence(inode_item));
1893 inode->i_rdev = 0;
1894 *rdev = btrfs_stack_inode_rdev(inode_item);
1895 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1896 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1897
1898 inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1899 btrfs_stack_timespec_nsec(&inode_item->atime));
1900
1901 inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1902 btrfs_stack_timespec_nsec(&inode_item->mtime));
1903
1904 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1905 btrfs_stack_timespec_nsec(&inode_item->ctime));
1906
1907 BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1908 BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1909
1910 inode->i_generation = BTRFS_I(inode)->generation;
1911 BTRFS_I(inode)->index_cnt = (u64)-1;
1912
1913 mutex_unlock(&delayed_node->mutex);
1914 btrfs_release_delayed_node(delayed_node);
1915 return 0;
1916}
1917
1918int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1919 struct btrfs_inode *inode)
1920{
1921 struct btrfs_root *root = inode->root;
1922 struct btrfs_delayed_node *delayed_node;
1923 int ret = 0;
1924
1925 delayed_node = btrfs_get_or_create_delayed_node(inode);
1926 if (IS_ERR(delayed_node))
1927 return PTR_ERR(delayed_node);
1928
1929 mutex_lock(&delayed_node->mutex);
1930 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1931 fill_stack_inode_item(trans, &delayed_node->inode_item,
1932 &inode->vfs_inode);
1933 goto release_node;
1934 }
1935
1936 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1937 if (ret)
1938 goto release_node;
1939
1940 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1941 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1942 delayed_node->count++;
1943 atomic_inc(&root->fs_info->delayed_root->items);
1944release_node:
1945 mutex_unlock(&delayed_node->mutex);
1946 btrfs_release_delayed_node(delayed_node);
1947 return ret;
1948}
1949
1950int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1951{
1952 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1953 struct btrfs_delayed_node *delayed_node;
1954
1955 /*
1956 * we don't do delayed inode updates during log recovery because it
1957 * leads to enospc problems. This means we also can't do
1958 * delayed inode refs
1959 */
1960 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1961 return -EAGAIN;
1962
1963 delayed_node = btrfs_get_or_create_delayed_node(inode);
1964 if (IS_ERR(delayed_node))
1965 return PTR_ERR(delayed_node);
1966
1967 /*
1968 * We don't reserve space for inode ref deletion is because:
1969 * - We ONLY do async inode ref deletion for the inode who has only
1970 * one link(i_nlink == 1), it means there is only one inode ref.
1971 * And in most case, the inode ref and the inode item are in the
1972 * same leaf, and we will deal with them at the same time.
1973 * Since we are sure we will reserve the space for the inode item,
1974 * it is unnecessary to reserve space for inode ref deletion.
1975 * - If the inode ref and the inode item are not in the same leaf,
1976 * We also needn't worry about enospc problem, because we reserve
1977 * much more space for the inode update than it needs.
1978 * - At the worst, we can steal some space from the global reservation.
1979 * It is very rare.
1980 */
1981 mutex_lock(&delayed_node->mutex);
1982 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1983 goto release_node;
1984
1985 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1986 delayed_node->count++;
1987 atomic_inc(&fs_info->delayed_root->items);
1988release_node:
1989 mutex_unlock(&delayed_node->mutex);
1990 btrfs_release_delayed_node(delayed_node);
1991 return 0;
1992}
1993
1994static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1995{
1996 struct btrfs_root *root = delayed_node->root;
1997 struct btrfs_fs_info *fs_info = root->fs_info;
1998 struct btrfs_delayed_item *curr_item, *prev_item;
1999
2000 mutex_lock(&delayed_node->mutex);
2001 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2002 while (curr_item) {
2003 prev_item = curr_item;
2004 curr_item = __btrfs_next_delayed_item(prev_item);
2005 btrfs_release_delayed_item(prev_item);
2006 }
2007
2008 if (delayed_node->index_item_leaves > 0) {
2009 btrfs_delayed_item_release_leaves(delayed_node,
2010 delayed_node->index_item_leaves);
2011 delayed_node->index_item_leaves = 0;
2012 }
2013
2014 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2015 while (curr_item) {
2016 btrfs_delayed_item_release_metadata(root, curr_item);
2017 prev_item = curr_item;
2018 curr_item = __btrfs_next_delayed_item(prev_item);
2019 btrfs_release_delayed_item(prev_item);
2020 }
2021
2022 btrfs_release_delayed_iref(delayed_node);
2023
2024 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2025 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2026 btrfs_release_delayed_inode(delayed_node);
2027 }
2028 mutex_unlock(&delayed_node->mutex);
2029}
2030
2031void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2032{
2033 struct btrfs_delayed_node *delayed_node;
2034
2035 delayed_node = btrfs_get_delayed_node(inode);
2036 if (!delayed_node)
2037 return;
2038
2039 __btrfs_kill_delayed_node(delayed_node);
2040 btrfs_release_delayed_node(delayed_node);
2041}
2042
2043void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2044{
2045 unsigned long index = 0;
2046 struct btrfs_delayed_node *delayed_nodes[8];
2047
2048 while (1) {
2049 struct btrfs_delayed_node *node;
2050 int count;
2051
2052 spin_lock(&root->inode_lock);
2053 if (xa_empty(&root->delayed_nodes)) {
2054 spin_unlock(&root->inode_lock);
2055 return;
2056 }
2057
2058 count = 0;
2059 xa_for_each_start(&root->delayed_nodes, index, node, index) {
2060 /*
2061 * Don't increase refs in case the node is dead and
2062 * about to be removed from the tree in the loop below
2063 */
2064 if (refcount_inc_not_zero(&node->refs)) {
2065 delayed_nodes[count] = node;
2066 count++;
2067 }
2068 if (count >= ARRAY_SIZE(delayed_nodes))
2069 break;
2070 }
2071 spin_unlock(&root->inode_lock);
2072 index++;
2073
2074 for (int i = 0; i < count; i++) {
2075 __btrfs_kill_delayed_node(delayed_nodes[i]);
2076 btrfs_release_delayed_node(delayed_nodes[i]);
2077 }
2078 }
2079}
2080
2081void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2082{
2083 struct btrfs_delayed_node *curr_node, *prev_node;
2084
2085 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2086 while (curr_node) {
2087 __btrfs_kill_delayed_node(curr_node);
2088
2089 prev_node = curr_node;
2090 curr_node = btrfs_next_delayed_node(curr_node);
2091 btrfs_release_delayed_node(prev_node);
2092 }
2093}
2094
2095void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2096 struct list_head *ins_list,
2097 struct list_head *del_list)
2098{
2099 struct btrfs_delayed_node *node;
2100 struct btrfs_delayed_item *item;
2101
2102 node = btrfs_get_delayed_node(inode);
2103 if (!node)
2104 return;
2105
2106 mutex_lock(&node->mutex);
2107 item = __btrfs_first_delayed_insertion_item(node);
2108 while (item) {
2109 /*
2110 * It's possible that the item is already in a log list. This
2111 * can happen in case two tasks are trying to log the same
2112 * directory. For example if we have tasks A and task B:
2113 *
2114 * Task A collected the delayed items into a log list while
2115 * under the inode's log_mutex (at btrfs_log_inode()), but it
2116 * only releases the items after logging the inodes they point
2117 * to (if they are new inodes), which happens after unlocking
2118 * the log mutex;
2119 *
2120 * Task B enters btrfs_log_inode() and acquires the log_mutex
2121 * of the same directory inode, before task B releases the
2122 * delayed items. This can happen for example when logging some
2123 * inode we need to trigger logging of its parent directory, so
2124 * logging two files that have the same parent directory can
2125 * lead to this.
2126 *
2127 * If this happens, just ignore delayed items already in a log
2128 * list. All the tasks logging the directory are under a log
2129 * transaction and whichever finishes first can not sync the log
2130 * before the other completes and leaves the log transaction.
2131 */
2132 if (!item->logged && list_empty(&item->log_list)) {
2133 refcount_inc(&item->refs);
2134 list_add_tail(&item->log_list, ins_list);
2135 }
2136 item = __btrfs_next_delayed_item(item);
2137 }
2138
2139 item = __btrfs_first_delayed_deletion_item(node);
2140 while (item) {
2141 /* It may be non-empty, for the same reason mentioned above. */
2142 if (!item->logged && list_empty(&item->log_list)) {
2143 refcount_inc(&item->refs);
2144 list_add_tail(&item->log_list, del_list);
2145 }
2146 item = __btrfs_next_delayed_item(item);
2147 }
2148 mutex_unlock(&node->mutex);
2149
2150 /*
2151 * We are called during inode logging, which means the inode is in use
2152 * and can not be evicted before we finish logging the inode. So we never
2153 * have the last reference on the delayed inode.
2154 * Also, we don't use btrfs_release_delayed_node() because that would
2155 * requeue the delayed inode (change its order in the list of prepared
2156 * nodes) and we don't want to do such change because we don't create or
2157 * delete delayed items.
2158 */
2159 ASSERT(refcount_read(&node->refs) > 1);
2160 refcount_dec(&node->refs);
2161}
2162
2163void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2164 struct list_head *ins_list,
2165 struct list_head *del_list)
2166{
2167 struct btrfs_delayed_node *node;
2168 struct btrfs_delayed_item *item;
2169 struct btrfs_delayed_item *next;
2170
2171 node = btrfs_get_delayed_node(inode);
2172 if (!node)
2173 return;
2174
2175 mutex_lock(&node->mutex);
2176
2177 list_for_each_entry_safe(item, next, ins_list, log_list) {
2178 item->logged = true;
2179 list_del_init(&item->log_list);
2180 if (refcount_dec_and_test(&item->refs))
2181 kfree(item);
2182 }
2183
2184 list_for_each_entry_safe(item, next, del_list, log_list) {
2185 item->logged = true;
2186 list_del_init(&item->log_list);
2187 if (refcount_dec_and_test(&item->refs))
2188 kfree(item);
2189 }
2190
2191 mutex_unlock(&node->mutex);
2192
2193 /*
2194 * We are called during inode logging, which means the inode is in use
2195 * and can not be evicted before we finish logging the inode. So we never
2196 * have the last reference on the delayed inode.
2197 * Also, we don't use btrfs_release_delayed_node() because that would
2198 * requeue the delayed inode (change its order in the list of prepared
2199 * nodes) and we don't want to do such change because we don't create or
2200 * delete delayed items.
2201 */
2202 ASSERT(refcount_read(&node->refs) > 1);
2203 refcount_dec(&node->refs);
2204}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/iversion.h>
9#include "ctree.h"
10#include "fs.h"
11#include "messages.h"
12#include "misc.h"
13#include "delayed-inode.h"
14#include "disk-io.h"
15#include "transaction.h"
16#include "qgroup.h"
17#include "locking.h"
18#include "inode-item.h"
19#include "space-info.h"
20#include "accessors.h"
21#include "file-item.h"
22
23#define BTRFS_DELAYED_WRITEBACK 512
24#define BTRFS_DELAYED_BACKGROUND 128
25#define BTRFS_DELAYED_BATCH 16
26
27static struct kmem_cache *delayed_node_cache;
28
29int __init btrfs_delayed_inode_init(void)
30{
31 delayed_node_cache = KMEM_CACHE(btrfs_delayed_node, 0);
32 if (!delayed_node_cache)
33 return -ENOMEM;
34 return 0;
35}
36
37void __cold btrfs_delayed_inode_exit(void)
38{
39 kmem_cache_destroy(delayed_node_cache);
40}
41
42void btrfs_init_delayed_root(struct btrfs_delayed_root *delayed_root)
43{
44 atomic_set(&delayed_root->items, 0);
45 atomic_set(&delayed_root->items_seq, 0);
46 delayed_root->nodes = 0;
47 spin_lock_init(&delayed_root->lock);
48 init_waitqueue_head(&delayed_root->wait);
49 INIT_LIST_HEAD(&delayed_root->node_list);
50 INIT_LIST_HEAD(&delayed_root->prepare_list);
51}
52
53static inline void btrfs_init_delayed_node(
54 struct btrfs_delayed_node *delayed_node,
55 struct btrfs_root *root, u64 inode_id)
56{
57 delayed_node->root = root;
58 delayed_node->inode_id = inode_id;
59 refcount_set(&delayed_node->refs, 0);
60 delayed_node->ins_root = RB_ROOT_CACHED;
61 delayed_node->del_root = RB_ROOT_CACHED;
62 mutex_init(&delayed_node->mutex);
63 INIT_LIST_HEAD(&delayed_node->n_list);
64 INIT_LIST_HEAD(&delayed_node->p_list);
65}
66
67static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
69{
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
73
74 node = READ_ONCE(btrfs_inode->delayed_node);
75 if (node) {
76 refcount_inc(&node->refs);
77 return node;
78 }
79
80 xa_lock(&root->delayed_nodes);
81 node = xa_load(&root->delayed_nodes, ino);
82
83 if (node) {
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 xa_unlock(&root->delayed_nodes);
88 return node;
89 }
90
91 /*
92 * It's possible that we're racing into the middle of removing
93 * this node from the xarray. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the xarray at all; our release
96 * function is in the process of removing it.
97 *
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
102 *
103 * If this node is properly in the xarray, we want to bump the
104 * refcount twice, once for the inode and once for this get
105 * operation.
106 */
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
110 } else {
111 node = NULL;
112 }
113
114 xa_unlock(&root->delayed_nodes);
115 return node;
116 }
117 xa_unlock(&root->delayed_nodes);
118
119 return NULL;
120}
121
122/* Will return either the node or PTR_ERR(-ENOMEM) */
123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
125{
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
129 int ret;
130 void *ptr;
131
132again:
133 node = btrfs_get_delayed_node(btrfs_inode);
134 if (node)
135 return node;
136
137 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
138 if (!node)
139 return ERR_PTR(-ENOMEM);
140 btrfs_init_delayed_node(node, root, ino);
141
142 /* Cached in the inode and can be accessed. */
143 refcount_set(&node->refs, 2);
144
145 /* Allocate and reserve the slot, from now it can return a NULL from xa_load(). */
146 ret = xa_reserve(&root->delayed_nodes, ino, GFP_NOFS);
147 if (ret == -ENOMEM) {
148 kmem_cache_free(delayed_node_cache, node);
149 return ERR_PTR(-ENOMEM);
150 }
151 xa_lock(&root->delayed_nodes);
152 ptr = xa_load(&root->delayed_nodes, ino);
153 if (ptr) {
154 /* Somebody inserted it, go back and read it. */
155 xa_unlock(&root->delayed_nodes);
156 kmem_cache_free(delayed_node_cache, node);
157 node = NULL;
158 goto again;
159 }
160 ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
161 ASSERT(xa_err(ptr) != -EINVAL);
162 ASSERT(xa_err(ptr) != -ENOMEM);
163 ASSERT(ptr == NULL);
164 btrfs_inode->delayed_node = node;
165 xa_unlock(&root->delayed_nodes);
166
167 return node;
168}
169
170/*
171 * Call it when holding delayed_node->mutex
172 *
173 * If mod = 1, add this node into the prepared list.
174 */
175static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
176 struct btrfs_delayed_node *node,
177 int mod)
178{
179 spin_lock(&root->lock);
180 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
181 if (!list_empty(&node->p_list))
182 list_move_tail(&node->p_list, &root->prepare_list);
183 else if (mod)
184 list_add_tail(&node->p_list, &root->prepare_list);
185 } else {
186 list_add_tail(&node->n_list, &root->node_list);
187 list_add_tail(&node->p_list, &root->prepare_list);
188 refcount_inc(&node->refs); /* inserted into list */
189 root->nodes++;
190 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
191 }
192 spin_unlock(&root->lock);
193}
194
195/* Call it when holding delayed_node->mutex */
196static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
197 struct btrfs_delayed_node *node)
198{
199 spin_lock(&root->lock);
200 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
201 root->nodes--;
202 refcount_dec(&node->refs); /* not in the list */
203 list_del_init(&node->n_list);
204 if (!list_empty(&node->p_list))
205 list_del_init(&node->p_list);
206 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
207 }
208 spin_unlock(&root->lock);
209}
210
211static struct btrfs_delayed_node *btrfs_first_delayed_node(
212 struct btrfs_delayed_root *delayed_root)
213{
214 struct list_head *p;
215 struct btrfs_delayed_node *node = NULL;
216
217 spin_lock(&delayed_root->lock);
218 if (list_empty(&delayed_root->node_list))
219 goto out;
220
221 p = delayed_root->node_list.next;
222 node = list_entry(p, struct btrfs_delayed_node, n_list);
223 refcount_inc(&node->refs);
224out:
225 spin_unlock(&delayed_root->lock);
226
227 return node;
228}
229
230static struct btrfs_delayed_node *btrfs_next_delayed_node(
231 struct btrfs_delayed_node *node)
232{
233 struct btrfs_delayed_root *delayed_root;
234 struct list_head *p;
235 struct btrfs_delayed_node *next = NULL;
236
237 delayed_root = node->root->fs_info->delayed_root;
238 spin_lock(&delayed_root->lock);
239 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
240 /* not in the list */
241 if (list_empty(&delayed_root->node_list))
242 goto out;
243 p = delayed_root->node_list.next;
244 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
245 goto out;
246 else
247 p = node->n_list.next;
248
249 next = list_entry(p, struct btrfs_delayed_node, n_list);
250 refcount_inc(&next->refs);
251out:
252 spin_unlock(&delayed_root->lock);
253
254 return next;
255}
256
257static void __btrfs_release_delayed_node(
258 struct btrfs_delayed_node *delayed_node,
259 int mod)
260{
261 struct btrfs_delayed_root *delayed_root;
262
263 if (!delayed_node)
264 return;
265
266 delayed_root = delayed_node->root->fs_info->delayed_root;
267
268 mutex_lock(&delayed_node->mutex);
269 if (delayed_node->count)
270 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
271 else
272 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
273 mutex_unlock(&delayed_node->mutex);
274
275 if (refcount_dec_and_test(&delayed_node->refs)) {
276 struct btrfs_root *root = delayed_node->root;
277
278 xa_erase(&root->delayed_nodes, delayed_node->inode_id);
279 /*
280 * Once our refcount goes to zero, nobody is allowed to bump it
281 * back up. We can delete it now.
282 */
283 ASSERT(refcount_read(&delayed_node->refs) == 0);
284 kmem_cache_free(delayed_node_cache, delayed_node);
285 }
286}
287
288static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
289{
290 __btrfs_release_delayed_node(node, 0);
291}
292
293static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
294 struct btrfs_delayed_root *delayed_root)
295{
296 struct list_head *p;
297 struct btrfs_delayed_node *node = NULL;
298
299 spin_lock(&delayed_root->lock);
300 if (list_empty(&delayed_root->prepare_list))
301 goto out;
302
303 p = delayed_root->prepare_list.next;
304 list_del_init(p);
305 node = list_entry(p, struct btrfs_delayed_node, p_list);
306 refcount_inc(&node->refs);
307out:
308 spin_unlock(&delayed_root->lock);
309
310 return node;
311}
312
313static inline void btrfs_release_prepared_delayed_node(
314 struct btrfs_delayed_node *node)
315{
316 __btrfs_release_delayed_node(node, 1);
317}
318
319static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
320 struct btrfs_delayed_node *node,
321 enum btrfs_delayed_item_type type)
322{
323 struct btrfs_delayed_item *item;
324
325 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
326 if (item) {
327 item->data_len = data_len;
328 item->type = type;
329 item->bytes_reserved = 0;
330 item->delayed_node = node;
331 RB_CLEAR_NODE(&item->rb_node);
332 INIT_LIST_HEAD(&item->log_list);
333 item->logged = false;
334 refcount_set(&item->refs, 1);
335 }
336 return item;
337}
338
339/*
340 * Look up the delayed item by key.
341 *
342 * @delayed_node: pointer to the delayed node
343 * @index: the dir index value to lookup (offset of a dir index key)
344 *
345 * Note: if we don't find the right item, we will return the prev item and
346 * the next item.
347 */
348static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
349 struct rb_root *root,
350 u64 index)
351{
352 struct rb_node *node = root->rb_node;
353 struct btrfs_delayed_item *delayed_item = NULL;
354
355 while (node) {
356 delayed_item = rb_entry(node, struct btrfs_delayed_item,
357 rb_node);
358 if (delayed_item->index < index)
359 node = node->rb_right;
360 else if (delayed_item->index > index)
361 node = node->rb_left;
362 else
363 return delayed_item;
364 }
365
366 return NULL;
367}
368
369static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
370 struct btrfs_delayed_item *ins)
371{
372 struct rb_node **p, *node;
373 struct rb_node *parent_node = NULL;
374 struct rb_root_cached *root;
375 struct btrfs_delayed_item *item;
376 bool leftmost = true;
377
378 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
379 root = &delayed_node->ins_root;
380 else
381 root = &delayed_node->del_root;
382
383 p = &root->rb_root.rb_node;
384 node = &ins->rb_node;
385
386 while (*p) {
387 parent_node = *p;
388 item = rb_entry(parent_node, struct btrfs_delayed_item,
389 rb_node);
390
391 if (item->index < ins->index) {
392 p = &(*p)->rb_right;
393 leftmost = false;
394 } else if (item->index > ins->index) {
395 p = &(*p)->rb_left;
396 } else {
397 return -EEXIST;
398 }
399 }
400
401 rb_link_node(node, parent_node, p);
402 rb_insert_color_cached(node, root, leftmost);
403
404 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
405 ins->index >= delayed_node->index_cnt)
406 delayed_node->index_cnt = ins->index + 1;
407
408 delayed_node->count++;
409 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
410 return 0;
411}
412
413static void finish_one_item(struct btrfs_delayed_root *delayed_root)
414{
415 int seq = atomic_inc_return(&delayed_root->items_seq);
416
417 /* atomic_dec_return implies a barrier */
418 if ((atomic_dec_return(&delayed_root->items) <
419 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
420 cond_wake_up_nomb(&delayed_root->wait);
421}
422
423static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
424{
425 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
426 struct rb_root_cached *root;
427 struct btrfs_delayed_root *delayed_root;
428
429 /* Not inserted, ignore it. */
430 if (RB_EMPTY_NODE(&delayed_item->rb_node))
431 return;
432
433 /* If it's in a rbtree, then we need to have delayed node locked. */
434 lockdep_assert_held(&delayed_node->mutex);
435
436 delayed_root = delayed_node->root->fs_info->delayed_root;
437
438 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
439 root = &delayed_node->ins_root;
440 else
441 root = &delayed_node->del_root;
442
443 rb_erase_cached(&delayed_item->rb_node, root);
444 RB_CLEAR_NODE(&delayed_item->rb_node);
445 delayed_node->count--;
446
447 finish_one_item(delayed_root);
448}
449
450static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
451{
452 if (item) {
453 __btrfs_remove_delayed_item(item);
454 if (refcount_dec_and_test(&item->refs))
455 kfree(item);
456 }
457}
458
459static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
460 struct btrfs_delayed_node *delayed_node)
461{
462 struct rb_node *p;
463 struct btrfs_delayed_item *item = NULL;
464
465 p = rb_first_cached(&delayed_node->ins_root);
466 if (p)
467 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
468
469 return item;
470}
471
472static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
473 struct btrfs_delayed_node *delayed_node)
474{
475 struct rb_node *p;
476 struct btrfs_delayed_item *item = NULL;
477
478 p = rb_first_cached(&delayed_node->del_root);
479 if (p)
480 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
481
482 return item;
483}
484
485static struct btrfs_delayed_item *__btrfs_next_delayed_item(
486 struct btrfs_delayed_item *item)
487{
488 struct rb_node *p;
489 struct btrfs_delayed_item *next = NULL;
490
491 p = rb_next(&item->rb_node);
492 if (p)
493 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
494
495 return next;
496}
497
498static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
499 struct btrfs_delayed_item *item)
500{
501 struct btrfs_block_rsv *src_rsv;
502 struct btrfs_block_rsv *dst_rsv;
503 struct btrfs_fs_info *fs_info = trans->fs_info;
504 u64 num_bytes;
505 int ret;
506
507 if (!trans->bytes_reserved)
508 return 0;
509
510 src_rsv = trans->block_rsv;
511 dst_rsv = &fs_info->delayed_block_rsv;
512
513 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
514
515 /*
516 * Here we migrate space rsv from transaction rsv, since have already
517 * reserved space when starting a transaction. So no need to reserve
518 * qgroup space here.
519 */
520 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
521 if (!ret) {
522 trace_btrfs_space_reservation(fs_info, "delayed_item",
523 item->delayed_node->inode_id,
524 num_bytes, 1);
525 /*
526 * For insertions we track reserved metadata space by accounting
527 * for the number of leaves that will be used, based on the delayed
528 * node's curr_index_batch_size and index_item_leaves fields.
529 */
530 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
531 item->bytes_reserved = num_bytes;
532 }
533
534 return ret;
535}
536
537static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
538 struct btrfs_delayed_item *item)
539{
540 struct btrfs_block_rsv *rsv;
541 struct btrfs_fs_info *fs_info = root->fs_info;
542
543 if (!item->bytes_reserved)
544 return;
545
546 rsv = &fs_info->delayed_block_rsv;
547 /*
548 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
549 * to release/reserve qgroup space.
550 */
551 trace_btrfs_space_reservation(fs_info, "delayed_item",
552 item->delayed_node->inode_id,
553 item->bytes_reserved, 0);
554 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
555}
556
557static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
558 unsigned int num_leaves)
559{
560 struct btrfs_fs_info *fs_info = node->root->fs_info;
561 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
562
563 /* There are no space reservations during log replay, bail out. */
564 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
565 return;
566
567 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
568 bytes, 0);
569 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
570}
571
572static int btrfs_delayed_inode_reserve_metadata(
573 struct btrfs_trans_handle *trans,
574 struct btrfs_root *root,
575 struct btrfs_delayed_node *node)
576{
577 struct btrfs_fs_info *fs_info = root->fs_info;
578 struct btrfs_block_rsv *src_rsv;
579 struct btrfs_block_rsv *dst_rsv;
580 u64 num_bytes;
581 int ret;
582
583 src_rsv = trans->block_rsv;
584 dst_rsv = &fs_info->delayed_block_rsv;
585
586 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
587
588 /*
589 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
590 * which doesn't reserve space for speed. This is a problem since we
591 * still need to reserve space for this update, so try to reserve the
592 * space.
593 *
594 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
595 * we always reserve enough to update the inode item.
596 */
597 if (!src_rsv || (!trans->bytes_reserved &&
598 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
599 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
600 BTRFS_QGROUP_RSV_META_PREALLOC, true);
601 if (ret < 0)
602 return ret;
603 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
604 BTRFS_RESERVE_NO_FLUSH);
605 /* NO_FLUSH could only fail with -ENOSPC */
606 ASSERT(ret == 0 || ret == -ENOSPC);
607 if (ret)
608 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
609 } else {
610 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
611 }
612
613 if (!ret) {
614 trace_btrfs_space_reservation(fs_info, "delayed_inode",
615 node->inode_id, num_bytes, 1);
616 node->bytes_reserved = num_bytes;
617 }
618
619 return ret;
620}
621
622static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
623 struct btrfs_delayed_node *node,
624 bool qgroup_free)
625{
626 struct btrfs_block_rsv *rsv;
627
628 if (!node->bytes_reserved)
629 return;
630
631 rsv = &fs_info->delayed_block_rsv;
632 trace_btrfs_space_reservation(fs_info, "delayed_inode",
633 node->inode_id, node->bytes_reserved, 0);
634 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
635 if (qgroup_free)
636 btrfs_qgroup_free_meta_prealloc(node->root,
637 node->bytes_reserved);
638 else
639 btrfs_qgroup_convert_reserved_meta(node->root,
640 node->bytes_reserved);
641 node->bytes_reserved = 0;
642}
643
644/*
645 * Insert a single delayed item or a batch of delayed items, as many as possible
646 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
647 * in the rbtree, and if there's a gap between two consecutive dir index items,
648 * then it means at some point we had delayed dir indexes to add but they got
649 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
650 * into the subvolume tree. Dir index keys also have their offsets coming from a
651 * monotonically increasing counter, so we can't get new keys with an offset that
652 * fits within a gap between delayed dir index items.
653 */
654static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
655 struct btrfs_root *root,
656 struct btrfs_path *path,
657 struct btrfs_delayed_item *first_item)
658{
659 struct btrfs_fs_info *fs_info = root->fs_info;
660 struct btrfs_delayed_node *node = first_item->delayed_node;
661 LIST_HEAD(item_list);
662 struct btrfs_delayed_item *curr;
663 struct btrfs_delayed_item *next;
664 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
665 struct btrfs_item_batch batch;
666 struct btrfs_key first_key;
667 const u32 first_data_size = first_item->data_len;
668 int total_size;
669 char *ins_data = NULL;
670 int ret;
671 bool continuous_keys_only = false;
672
673 lockdep_assert_held(&node->mutex);
674
675 /*
676 * During normal operation the delayed index offset is continuously
677 * increasing, so we can batch insert all items as there will not be any
678 * overlapping keys in the tree.
679 *
680 * The exception to this is log replay, where we may have interleaved
681 * offsets in the tree, so our batch needs to be continuous keys only in
682 * order to ensure we do not end up with out of order items in our leaf.
683 */
684 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
685 continuous_keys_only = true;
686
687 /*
688 * For delayed items to insert, we track reserved metadata bytes based
689 * on the number of leaves that we will use.
690 * See btrfs_insert_delayed_dir_index() and
691 * btrfs_delayed_item_reserve_metadata()).
692 */
693 ASSERT(first_item->bytes_reserved == 0);
694
695 list_add_tail(&first_item->tree_list, &item_list);
696 batch.total_data_size = first_data_size;
697 batch.nr = 1;
698 total_size = first_data_size + sizeof(struct btrfs_item);
699 curr = first_item;
700
701 while (true) {
702 int next_size;
703
704 next = __btrfs_next_delayed_item(curr);
705 if (!next)
706 break;
707
708 /*
709 * We cannot allow gaps in the key space if we're doing log
710 * replay.
711 */
712 if (continuous_keys_only && (next->index != curr->index + 1))
713 break;
714
715 ASSERT(next->bytes_reserved == 0);
716
717 next_size = next->data_len + sizeof(struct btrfs_item);
718 if (total_size + next_size > max_size)
719 break;
720
721 list_add_tail(&next->tree_list, &item_list);
722 batch.nr++;
723 total_size += next_size;
724 batch.total_data_size += next->data_len;
725 curr = next;
726 }
727
728 if (batch.nr == 1) {
729 first_key.objectid = node->inode_id;
730 first_key.type = BTRFS_DIR_INDEX_KEY;
731 first_key.offset = first_item->index;
732 batch.keys = &first_key;
733 batch.data_sizes = &first_data_size;
734 } else {
735 struct btrfs_key *ins_keys;
736 u32 *ins_sizes;
737 int i = 0;
738
739 ins_data = kmalloc(batch.nr * sizeof(u32) +
740 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
741 if (!ins_data) {
742 ret = -ENOMEM;
743 goto out;
744 }
745 ins_sizes = (u32 *)ins_data;
746 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
747 batch.keys = ins_keys;
748 batch.data_sizes = ins_sizes;
749 list_for_each_entry(curr, &item_list, tree_list) {
750 ins_keys[i].objectid = node->inode_id;
751 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
752 ins_keys[i].offset = curr->index;
753 ins_sizes[i] = curr->data_len;
754 i++;
755 }
756 }
757
758 ret = btrfs_insert_empty_items(trans, root, path, &batch);
759 if (ret)
760 goto out;
761
762 list_for_each_entry(curr, &item_list, tree_list) {
763 char *data_ptr;
764
765 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
766 write_extent_buffer(path->nodes[0], &curr->data,
767 (unsigned long)data_ptr, curr->data_len);
768 path->slots[0]++;
769 }
770
771 /*
772 * Now release our path before releasing the delayed items and their
773 * metadata reservations, so that we don't block other tasks for more
774 * time than needed.
775 */
776 btrfs_release_path(path);
777
778 ASSERT(node->index_item_leaves > 0);
779
780 /*
781 * For normal operations we will batch an entire leaf's worth of delayed
782 * items, so if there are more items to process we can decrement
783 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
784 *
785 * However for log replay we may not have inserted an entire leaf's
786 * worth of items, we may have not had continuous items, so decrementing
787 * here would mess up the index_item_leaves accounting. For this case
788 * only clean up the accounting when there are no items left.
789 */
790 if (next && !continuous_keys_only) {
791 /*
792 * We inserted one batch of items into a leaf a there are more
793 * items to flush in a future batch, now release one unit of
794 * metadata space from the delayed block reserve, corresponding
795 * the leaf we just flushed to.
796 */
797 btrfs_delayed_item_release_leaves(node, 1);
798 node->index_item_leaves--;
799 } else if (!next) {
800 /*
801 * There are no more items to insert. We can have a number of
802 * reserved leaves > 1 here - this happens when many dir index
803 * items are added and then removed before they are flushed (file
804 * names with a very short life, never span a transaction). So
805 * release all remaining leaves.
806 */
807 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
808 node->index_item_leaves = 0;
809 }
810
811 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
812 list_del(&curr->tree_list);
813 btrfs_release_delayed_item(curr);
814 }
815out:
816 kfree(ins_data);
817 return ret;
818}
819
820static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
821 struct btrfs_path *path,
822 struct btrfs_root *root,
823 struct btrfs_delayed_node *node)
824{
825 int ret = 0;
826
827 while (ret == 0) {
828 struct btrfs_delayed_item *curr;
829
830 mutex_lock(&node->mutex);
831 curr = __btrfs_first_delayed_insertion_item(node);
832 if (!curr) {
833 mutex_unlock(&node->mutex);
834 break;
835 }
836 ret = btrfs_insert_delayed_item(trans, root, path, curr);
837 mutex_unlock(&node->mutex);
838 }
839
840 return ret;
841}
842
843static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
844 struct btrfs_root *root,
845 struct btrfs_path *path,
846 struct btrfs_delayed_item *item)
847{
848 const u64 ino = item->delayed_node->inode_id;
849 struct btrfs_fs_info *fs_info = root->fs_info;
850 struct btrfs_delayed_item *curr, *next;
851 struct extent_buffer *leaf = path->nodes[0];
852 LIST_HEAD(batch_list);
853 int nitems, slot, last_slot;
854 int ret;
855 u64 total_reserved_size = item->bytes_reserved;
856
857 ASSERT(leaf != NULL);
858
859 slot = path->slots[0];
860 last_slot = btrfs_header_nritems(leaf) - 1;
861 /*
862 * Our caller always gives us a path pointing to an existing item, so
863 * this can not happen.
864 */
865 ASSERT(slot <= last_slot);
866 if (WARN_ON(slot > last_slot))
867 return -ENOENT;
868
869 nitems = 1;
870 curr = item;
871 list_add_tail(&curr->tree_list, &batch_list);
872
873 /*
874 * Keep checking if the next delayed item matches the next item in the
875 * leaf - if so, we can add it to the batch of items to delete from the
876 * leaf.
877 */
878 while (slot < last_slot) {
879 struct btrfs_key key;
880
881 next = __btrfs_next_delayed_item(curr);
882 if (!next)
883 break;
884
885 slot++;
886 btrfs_item_key_to_cpu(leaf, &key, slot);
887 if (key.objectid != ino ||
888 key.type != BTRFS_DIR_INDEX_KEY ||
889 key.offset != next->index)
890 break;
891 nitems++;
892 curr = next;
893 list_add_tail(&curr->tree_list, &batch_list);
894 total_reserved_size += curr->bytes_reserved;
895 }
896
897 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
898 if (ret)
899 return ret;
900
901 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
902 if (total_reserved_size > 0) {
903 /*
904 * Check btrfs_delayed_item_reserve_metadata() to see why we
905 * don't need to release/reserve qgroup space.
906 */
907 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
908 total_reserved_size, 0);
909 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
910 total_reserved_size, NULL);
911 }
912
913 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
914 list_del(&curr->tree_list);
915 btrfs_release_delayed_item(curr);
916 }
917
918 return 0;
919}
920
921static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
922 struct btrfs_path *path,
923 struct btrfs_root *root,
924 struct btrfs_delayed_node *node)
925{
926 struct btrfs_key key;
927 int ret = 0;
928
929 key.objectid = node->inode_id;
930 key.type = BTRFS_DIR_INDEX_KEY;
931
932 while (ret == 0) {
933 struct btrfs_delayed_item *item;
934
935 mutex_lock(&node->mutex);
936 item = __btrfs_first_delayed_deletion_item(node);
937 if (!item) {
938 mutex_unlock(&node->mutex);
939 break;
940 }
941
942 key.offset = item->index;
943 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
944 if (ret > 0) {
945 /*
946 * There's no matching item in the leaf. This means we
947 * have already deleted this item in a past run of the
948 * delayed items. We ignore errors when running delayed
949 * items from an async context, through a work queue job
950 * running btrfs_async_run_delayed_root(), and don't
951 * release delayed items that failed to complete. This
952 * is because we will retry later, and at transaction
953 * commit time we always run delayed items and will
954 * then deal with errors if they fail to run again.
955 *
956 * So just release delayed items for which we can't find
957 * an item in the tree, and move to the next item.
958 */
959 btrfs_release_path(path);
960 btrfs_release_delayed_item(item);
961 ret = 0;
962 } else if (ret == 0) {
963 ret = btrfs_batch_delete_items(trans, root, path, item);
964 btrfs_release_path(path);
965 }
966
967 /*
968 * We unlock and relock on each iteration, this is to prevent
969 * blocking other tasks for too long while we are being run from
970 * the async context (work queue job). Those tasks are typically
971 * running system calls like creat/mkdir/rename/unlink/etc which
972 * need to add delayed items to this delayed node.
973 */
974 mutex_unlock(&node->mutex);
975 }
976
977 return ret;
978}
979
980static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
981{
982 struct btrfs_delayed_root *delayed_root;
983
984 if (delayed_node &&
985 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
986 ASSERT(delayed_node->root);
987 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
988 delayed_node->count--;
989
990 delayed_root = delayed_node->root->fs_info->delayed_root;
991 finish_one_item(delayed_root);
992 }
993}
994
995static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
996{
997
998 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
999 struct btrfs_delayed_root *delayed_root;
1000
1001 ASSERT(delayed_node->root);
1002 delayed_node->count--;
1003
1004 delayed_root = delayed_node->root->fs_info->delayed_root;
1005 finish_one_item(delayed_root);
1006 }
1007}
1008
1009static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1010 struct btrfs_root *root,
1011 struct btrfs_path *path,
1012 struct btrfs_delayed_node *node)
1013{
1014 struct btrfs_fs_info *fs_info = root->fs_info;
1015 struct btrfs_key key;
1016 struct btrfs_inode_item *inode_item;
1017 struct extent_buffer *leaf;
1018 int mod;
1019 int ret;
1020
1021 key.objectid = node->inode_id;
1022 key.type = BTRFS_INODE_ITEM_KEY;
1023 key.offset = 0;
1024
1025 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1026 mod = -1;
1027 else
1028 mod = 1;
1029
1030 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1031 if (ret > 0)
1032 ret = -ENOENT;
1033 if (ret < 0)
1034 goto out;
1035
1036 leaf = path->nodes[0];
1037 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1038 struct btrfs_inode_item);
1039 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1040 sizeof(struct btrfs_inode_item));
1041 btrfs_mark_buffer_dirty(trans, leaf);
1042
1043 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1044 goto out;
1045
1046 /*
1047 * Now we're going to delete the INODE_REF/EXTREF, which should be the
1048 * only one ref left. Check if the next item is an INODE_REF/EXTREF.
1049 *
1050 * But if we're the last item already, release and search for the last
1051 * INODE_REF/EXTREF.
1052 */
1053 if (path->slots[0] + 1 >= btrfs_header_nritems(leaf)) {
1054 key.objectid = node->inode_id;
1055 key.type = BTRFS_INODE_EXTREF_KEY;
1056 key.offset = (u64)-1;
1057
1058 btrfs_release_path(path);
1059 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1060 if (ret < 0)
1061 goto err_out;
1062 ASSERT(ret > 0);
1063 ASSERT(path->slots[0] > 0);
1064 ret = 0;
1065 path->slots[0]--;
1066 leaf = path->nodes[0];
1067 } else {
1068 path->slots[0]++;
1069 }
1070 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1071 if (key.objectid != node->inode_id)
1072 goto out;
1073 if (key.type != BTRFS_INODE_REF_KEY &&
1074 key.type != BTRFS_INODE_EXTREF_KEY)
1075 goto out;
1076
1077 /*
1078 * Delayed iref deletion is for the inode who has only one link,
1079 * so there is only one iref. The case that several irefs are
1080 * in the same item doesn't exist.
1081 */
1082 ret = btrfs_del_item(trans, root, path);
1083out:
1084 btrfs_release_delayed_iref(node);
1085 btrfs_release_path(path);
1086err_out:
1087 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1088 btrfs_release_delayed_inode(node);
1089
1090 /*
1091 * If we fail to update the delayed inode we need to abort the
1092 * transaction, because we could leave the inode with the improper
1093 * counts behind.
1094 */
1095 if (ret && ret != -ENOENT)
1096 btrfs_abort_transaction(trans, ret);
1097
1098 return ret;
1099}
1100
1101static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1102 struct btrfs_root *root,
1103 struct btrfs_path *path,
1104 struct btrfs_delayed_node *node)
1105{
1106 int ret;
1107
1108 mutex_lock(&node->mutex);
1109 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1110 mutex_unlock(&node->mutex);
1111 return 0;
1112 }
1113
1114 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1115 mutex_unlock(&node->mutex);
1116 return ret;
1117}
1118
1119static inline int
1120__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1121 struct btrfs_path *path,
1122 struct btrfs_delayed_node *node)
1123{
1124 int ret;
1125
1126 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1127 if (ret)
1128 return ret;
1129
1130 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1131 if (ret)
1132 return ret;
1133
1134 ret = btrfs_record_root_in_trans(trans, node->root);
1135 if (ret)
1136 return ret;
1137 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1138 return ret;
1139}
1140
1141/*
1142 * Called when committing the transaction.
1143 * Returns 0 on success.
1144 * Returns < 0 on error and returns with an aborted transaction with any
1145 * outstanding delayed items cleaned up.
1146 */
1147static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1148{
1149 struct btrfs_fs_info *fs_info = trans->fs_info;
1150 struct btrfs_delayed_root *delayed_root;
1151 struct btrfs_delayed_node *curr_node, *prev_node;
1152 struct btrfs_path *path;
1153 struct btrfs_block_rsv *block_rsv;
1154 int ret = 0;
1155 bool count = (nr > 0);
1156
1157 if (TRANS_ABORTED(trans))
1158 return -EIO;
1159
1160 path = btrfs_alloc_path();
1161 if (!path)
1162 return -ENOMEM;
1163
1164 block_rsv = trans->block_rsv;
1165 trans->block_rsv = &fs_info->delayed_block_rsv;
1166
1167 delayed_root = fs_info->delayed_root;
1168
1169 curr_node = btrfs_first_delayed_node(delayed_root);
1170 while (curr_node && (!count || nr--)) {
1171 ret = __btrfs_commit_inode_delayed_items(trans, path,
1172 curr_node);
1173 if (ret) {
1174 btrfs_abort_transaction(trans, ret);
1175 break;
1176 }
1177
1178 prev_node = curr_node;
1179 curr_node = btrfs_next_delayed_node(curr_node);
1180 /*
1181 * See the comment below about releasing path before releasing
1182 * node. If the commit of delayed items was successful the path
1183 * should always be released, but in case of an error, it may
1184 * point to locked extent buffers (a leaf at the very least).
1185 */
1186 ASSERT(path->nodes[0] == NULL);
1187 btrfs_release_delayed_node(prev_node);
1188 }
1189
1190 /*
1191 * Release the path to avoid a potential deadlock and lockdep splat when
1192 * releasing the delayed node, as that requires taking the delayed node's
1193 * mutex. If another task starts running delayed items before we take
1194 * the mutex, it will first lock the mutex and then it may try to lock
1195 * the same btree path (leaf).
1196 */
1197 btrfs_free_path(path);
1198
1199 if (curr_node)
1200 btrfs_release_delayed_node(curr_node);
1201 trans->block_rsv = block_rsv;
1202
1203 return ret;
1204}
1205
1206int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1207{
1208 return __btrfs_run_delayed_items(trans, -1);
1209}
1210
1211int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1212{
1213 return __btrfs_run_delayed_items(trans, nr);
1214}
1215
1216int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1217 struct btrfs_inode *inode)
1218{
1219 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1220 struct btrfs_path *path;
1221 struct btrfs_block_rsv *block_rsv;
1222 int ret;
1223
1224 if (!delayed_node)
1225 return 0;
1226
1227 mutex_lock(&delayed_node->mutex);
1228 if (!delayed_node->count) {
1229 mutex_unlock(&delayed_node->mutex);
1230 btrfs_release_delayed_node(delayed_node);
1231 return 0;
1232 }
1233 mutex_unlock(&delayed_node->mutex);
1234
1235 path = btrfs_alloc_path();
1236 if (!path) {
1237 btrfs_release_delayed_node(delayed_node);
1238 return -ENOMEM;
1239 }
1240
1241 block_rsv = trans->block_rsv;
1242 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1243
1244 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1245
1246 btrfs_release_delayed_node(delayed_node);
1247 btrfs_free_path(path);
1248 trans->block_rsv = block_rsv;
1249
1250 return ret;
1251}
1252
1253int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1254{
1255 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1256 struct btrfs_trans_handle *trans;
1257 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1258 struct btrfs_path *path;
1259 struct btrfs_block_rsv *block_rsv;
1260 int ret;
1261
1262 if (!delayed_node)
1263 return 0;
1264
1265 mutex_lock(&delayed_node->mutex);
1266 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1267 mutex_unlock(&delayed_node->mutex);
1268 btrfs_release_delayed_node(delayed_node);
1269 return 0;
1270 }
1271 mutex_unlock(&delayed_node->mutex);
1272
1273 trans = btrfs_join_transaction(delayed_node->root);
1274 if (IS_ERR(trans)) {
1275 ret = PTR_ERR(trans);
1276 goto out;
1277 }
1278
1279 path = btrfs_alloc_path();
1280 if (!path) {
1281 ret = -ENOMEM;
1282 goto trans_out;
1283 }
1284
1285 block_rsv = trans->block_rsv;
1286 trans->block_rsv = &fs_info->delayed_block_rsv;
1287
1288 mutex_lock(&delayed_node->mutex);
1289 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1290 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1291 path, delayed_node);
1292 else
1293 ret = 0;
1294 mutex_unlock(&delayed_node->mutex);
1295
1296 btrfs_free_path(path);
1297 trans->block_rsv = block_rsv;
1298trans_out:
1299 btrfs_end_transaction(trans);
1300 btrfs_btree_balance_dirty(fs_info);
1301out:
1302 btrfs_release_delayed_node(delayed_node);
1303
1304 return ret;
1305}
1306
1307void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1308{
1309 struct btrfs_delayed_node *delayed_node;
1310
1311 delayed_node = READ_ONCE(inode->delayed_node);
1312 if (!delayed_node)
1313 return;
1314
1315 inode->delayed_node = NULL;
1316 btrfs_release_delayed_node(delayed_node);
1317}
1318
1319struct btrfs_async_delayed_work {
1320 struct btrfs_delayed_root *delayed_root;
1321 int nr;
1322 struct btrfs_work work;
1323};
1324
1325static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1326{
1327 struct btrfs_async_delayed_work *async_work;
1328 struct btrfs_delayed_root *delayed_root;
1329 struct btrfs_trans_handle *trans;
1330 struct btrfs_path *path;
1331 struct btrfs_delayed_node *delayed_node = NULL;
1332 struct btrfs_root *root;
1333 struct btrfs_block_rsv *block_rsv;
1334 int total_done = 0;
1335
1336 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1337 delayed_root = async_work->delayed_root;
1338
1339 path = btrfs_alloc_path();
1340 if (!path)
1341 goto out;
1342
1343 do {
1344 if (atomic_read(&delayed_root->items) <
1345 BTRFS_DELAYED_BACKGROUND / 2)
1346 break;
1347
1348 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1349 if (!delayed_node)
1350 break;
1351
1352 root = delayed_node->root;
1353
1354 trans = btrfs_join_transaction(root);
1355 if (IS_ERR(trans)) {
1356 btrfs_release_path(path);
1357 btrfs_release_prepared_delayed_node(delayed_node);
1358 total_done++;
1359 continue;
1360 }
1361
1362 block_rsv = trans->block_rsv;
1363 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1364
1365 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1366
1367 trans->block_rsv = block_rsv;
1368 btrfs_end_transaction(trans);
1369 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1370
1371 btrfs_release_path(path);
1372 btrfs_release_prepared_delayed_node(delayed_node);
1373 total_done++;
1374
1375 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1376 || total_done < async_work->nr);
1377
1378 btrfs_free_path(path);
1379out:
1380 wake_up(&delayed_root->wait);
1381 kfree(async_work);
1382}
1383
1384
1385static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1386 struct btrfs_fs_info *fs_info, int nr)
1387{
1388 struct btrfs_async_delayed_work *async_work;
1389
1390 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1391 if (!async_work)
1392 return -ENOMEM;
1393
1394 async_work->delayed_root = delayed_root;
1395 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
1396 async_work->nr = nr;
1397
1398 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1399 return 0;
1400}
1401
1402void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1403{
1404 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1405}
1406
1407static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1408{
1409 int val = atomic_read(&delayed_root->items_seq);
1410
1411 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1412 return 1;
1413
1414 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1415 return 1;
1416
1417 return 0;
1418}
1419
1420void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1421{
1422 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1423
1424 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1425 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1426 return;
1427
1428 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1429 int seq;
1430 int ret;
1431
1432 seq = atomic_read(&delayed_root->items_seq);
1433
1434 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1435 if (ret)
1436 return;
1437
1438 wait_event_interruptible(delayed_root->wait,
1439 could_end_wait(delayed_root, seq));
1440 return;
1441 }
1442
1443 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1444}
1445
1446static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1447{
1448 struct btrfs_fs_info *fs_info = trans->fs_info;
1449 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1450
1451 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1452 return;
1453
1454 /*
1455 * Adding the new dir index item does not require touching another
1456 * leaf, so we can release 1 unit of metadata that was previously
1457 * reserved when starting the transaction. This applies only to
1458 * the case where we had a transaction start and excludes the
1459 * transaction join case (when replaying log trees).
1460 */
1461 trace_btrfs_space_reservation(fs_info, "transaction",
1462 trans->transid, bytes, 0);
1463 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1464 ASSERT(trans->bytes_reserved >= bytes);
1465 trans->bytes_reserved -= bytes;
1466}
1467
1468/* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
1469int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1470 const char *name, int name_len,
1471 struct btrfs_inode *dir,
1472 const struct btrfs_disk_key *disk_key, u8 flags,
1473 u64 index)
1474{
1475 struct btrfs_fs_info *fs_info = trans->fs_info;
1476 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1477 struct btrfs_delayed_node *delayed_node;
1478 struct btrfs_delayed_item *delayed_item;
1479 struct btrfs_dir_item *dir_item;
1480 bool reserve_leaf_space;
1481 u32 data_len;
1482 int ret;
1483
1484 delayed_node = btrfs_get_or_create_delayed_node(dir);
1485 if (IS_ERR(delayed_node))
1486 return PTR_ERR(delayed_node);
1487
1488 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1489 delayed_node,
1490 BTRFS_DELAYED_INSERTION_ITEM);
1491 if (!delayed_item) {
1492 ret = -ENOMEM;
1493 goto release_node;
1494 }
1495
1496 delayed_item->index = index;
1497
1498 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1499 dir_item->location = *disk_key;
1500 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1501 btrfs_set_stack_dir_data_len(dir_item, 0);
1502 btrfs_set_stack_dir_name_len(dir_item, name_len);
1503 btrfs_set_stack_dir_flags(dir_item, flags);
1504 memcpy((char *)(dir_item + 1), name, name_len);
1505
1506 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1507
1508 mutex_lock(&delayed_node->mutex);
1509
1510 /*
1511 * First attempt to insert the delayed item. This is to make the error
1512 * handling path simpler in case we fail (-EEXIST). There's no risk of
1513 * any other task coming in and running the delayed item before we do
1514 * the metadata space reservation below, because we are holding the
1515 * delayed node's mutex and that mutex must also be locked before the
1516 * node's delayed items can be run.
1517 */
1518 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1519 if (unlikely(ret)) {
1520 btrfs_err(trans->fs_info,
1521"error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1522 name_len, name, index, btrfs_root_id(delayed_node->root),
1523 delayed_node->inode_id, dir->index_cnt,
1524 delayed_node->index_cnt, ret);
1525 btrfs_release_delayed_item(delayed_item);
1526 btrfs_release_dir_index_item_space(trans);
1527 mutex_unlock(&delayed_node->mutex);
1528 goto release_node;
1529 }
1530
1531 if (delayed_node->index_item_leaves == 0 ||
1532 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1533 delayed_node->curr_index_batch_size = data_len;
1534 reserve_leaf_space = true;
1535 } else {
1536 delayed_node->curr_index_batch_size += data_len;
1537 reserve_leaf_space = false;
1538 }
1539
1540 if (reserve_leaf_space) {
1541 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1542 /*
1543 * Space was reserved for a dir index item insertion when we
1544 * started the transaction, so getting a failure here should be
1545 * impossible.
1546 */
1547 if (WARN_ON(ret)) {
1548 btrfs_release_delayed_item(delayed_item);
1549 mutex_unlock(&delayed_node->mutex);
1550 goto release_node;
1551 }
1552
1553 delayed_node->index_item_leaves++;
1554 } else {
1555 btrfs_release_dir_index_item_space(trans);
1556 }
1557 mutex_unlock(&delayed_node->mutex);
1558
1559release_node:
1560 btrfs_release_delayed_node(delayed_node);
1561 return ret;
1562}
1563
1564static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1565 struct btrfs_delayed_node *node,
1566 u64 index)
1567{
1568 struct btrfs_delayed_item *item;
1569
1570 mutex_lock(&node->mutex);
1571 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1572 if (!item) {
1573 mutex_unlock(&node->mutex);
1574 return 1;
1575 }
1576
1577 /*
1578 * For delayed items to insert, we track reserved metadata bytes based
1579 * on the number of leaves that we will use.
1580 * See btrfs_insert_delayed_dir_index() and
1581 * btrfs_delayed_item_reserve_metadata()).
1582 */
1583 ASSERT(item->bytes_reserved == 0);
1584 ASSERT(node->index_item_leaves > 0);
1585
1586 /*
1587 * If there's only one leaf reserved, we can decrement this item from the
1588 * current batch, otherwise we can not because we don't know which leaf
1589 * it belongs to. With the current limit on delayed items, we rarely
1590 * accumulate enough dir index items to fill more than one leaf (even
1591 * when using a leaf size of 4K).
1592 */
1593 if (node->index_item_leaves == 1) {
1594 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1595
1596 ASSERT(node->curr_index_batch_size >= data_len);
1597 node->curr_index_batch_size -= data_len;
1598 }
1599
1600 btrfs_release_delayed_item(item);
1601
1602 /* If we now have no more dir index items, we can release all leaves. */
1603 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1604 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1605 node->index_item_leaves = 0;
1606 }
1607
1608 mutex_unlock(&node->mutex);
1609 return 0;
1610}
1611
1612int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1613 struct btrfs_inode *dir, u64 index)
1614{
1615 struct btrfs_delayed_node *node;
1616 struct btrfs_delayed_item *item;
1617 int ret;
1618
1619 node = btrfs_get_or_create_delayed_node(dir);
1620 if (IS_ERR(node))
1621 return PTR_ERR(node);
1622
1623 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1624 if (!ret)
1625 goto end;
1626
1627 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1628 if (!item) {
1629 ret = -ENOMEM;
1630 goto end;
1631 }
1632
1633 item->index = index;
1634
1635 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1636 /*
1637 * we have reserved enough space when we start a new transaction,
1638 * so reserving metadata failure is impossible.
1639 */
1640 if (ret < 0) {
1641 btrfs_err(trans->fs_info,
1642"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1643 btrfs_release_delayed_item(item);
1644 goto end;
1645 }
1646
1647 mutex_lock(&node->mutex);
1648 ret = __btrfs_add_delayed_item(node, item);
1649 if (unlikely(ret)) {
1650 btrfs_err(trans->fs_info,
1651 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1652 index, btrfs_root_id(node->root),
1653 node->inode_id, ret);
1654 btrfs_delayed_item_release_metadata(dir->root, item);
1655 btrfs_release_delayed_item(item);
1656 }
1657 mutex_unlock(&node->mutex);
1658end:
1659 btrfs_release_delayed_node(node);
1660 return ret;
1661}
1662
1663int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1664{
1665 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1666
1667 if (!delayed_node)
1668 return -ENOENT;
1669
1670 /*
1671 * Since we have held i_mutex of this directory, it is impossible that
1672 * a new directory index is added into the delayed node and index_cnt
1673 * is updated now. So we needn't lock the delayed node.
1674 */
1675 if (!delayed_node->index_cnt) {
1676 btrfs_release_delayed_node(delayed_node);
1677 return -EINVAL;
1678 }
1679
1680 inode->index_cnt = delayed_node->index_cnt;
1681 btrfs_release_delayed_node(delayed_node);
1682 return 0;
1683}
1684
1685bool btrfs_readdir_get_delayed_items(struct btrfs_inode *inode,
1686 u64 last_index,
1687 struct list_head *ins_list,
1688 struct list_head *del_list)
1689{
1690 struct btrfs_delayed_node *delayed_node;
1691 struct btrfs_delayed_item *item;
1692
1693 delayed_node = btrfs_get_delayed_node(inode);
1694 if (!delayed_node)
1695 return false;
1696
1697 /*
1698 * We can only do one readdir with delayed items at a time because of
1699 * item->readdir_list.
1700 */
1701 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1702 btrfs_inode_lock(inode, 0);
1703
1704 mutex_lock(&delayed_node->mutex);
1705 item = __btrfs_first_delayed_insertion_item(delayed_node);
1706 while (item && item->index <= last_index) {
1707 refcount_inc(&item->refs);
1708 list_add_tail(&item->readdir_list, ins_list);
1709 item = __btrfs_next_delayed_item(item);
1710 }
1711
1712 item = __btrfs_first_delayed_deletion_item(delayed_node);
1713 while (item && item->index <= last_index) {
1714 refcount_inc(&item->refs);
1715 list_add_tail(&item->readdir_list, del_list);
1716 item = __btrfs_next_delayed_item(item);
1717 }
1718 mutex_unlock(&delayed_node->mutex);
1719 /*
1720 * This delayed node is still cached in the btrfs inode, so refs
1721 * must be > 1 now, and we needn't check it is going to be freed
1722 * or not.
1723 *
1724 * Besides that, this function is used to read dir, we do not
1725 * insert/delete delayed items in this period. So we also needn't
1726 * requeue or dequeue this delayed node.
1727 */
1728 refcount_dec(&delayed_node->refs);
1729
1730 return true;
1731}
1732
1733void btrfs_readdir_put_delayed_items(struct btrfs_inode *inode,
1734 struct list_head *ins_list,
1735 struct list_head *del_list)
1736{
1737 struct btrfs_delayed_item *curr, *next;
1738
1739 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1740 list_del(&curr->readdir_list);
1741 if (refcount_dec_and_test(&curr->refs))
1742 kfree(curr);
1743 }
1744
1745 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1746 list_del(&curr->readdir_list);
1747 if (refcount_dec_and_test(&curr->refs))
1748 kfree(curr);
1749 }
1750
1751 /*
1752 * The VFS is going to do up_read(), so we need to downgrade back to a
1753 * read lock.
1754 */
1755 downgrade_write(&inode->vfs_inode.i_rwsem);
1756}
1757
1758int btrfs_should_delete_dir_index(const struct list_head *del_list,
1759 u64 index)
1760{
1761 struct btrfs_delayed_item *curr;
1762 int ret = 0;
1763
1764 list_for_each_entry(curr, del_list, readdir_list) {
1765 if (curr->index > index)
1766 break;
1767 if (curr->index == index) {
1768 ret = 1;
1769 break;
1770 }
1771 }
1772 return ret;
1773}
1774
1775/*
1776 * Read dir info stored in the delayed tree.
1777 */
1778int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1779 const struct list_head *ins_list)
1780{
1781 struct btrfs_dir_item *di;
1782 struct btrfs_delayed_item *curr, *next;
1783 struct btrfs_key location;
1784 char *name;
1785 int name_len;
1786 int over = 0;
1787 unsigned char d_type;
1788
1789 /*
1790 * Changing the data of the delayed item is impossible. So
1791 * we needn't lock them. And we have held i_mutex of the
1792 * directory, nobody can delete any directory indexes now.
1793 */
1794 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1795 list_del(&curr->readdir_list);
1796
1797 if (curr->index < ctx->pos) {
1798 if (refcount_dec_and_test(&curr->refs))
1799 kfree(curr);
1800 continue;
1801 }
1802
1803 ctx->pos = curr->index;
1804
1805 di = (struct btrfs_dir_item *)curr->data;
1806 name = (char *)(di + 1);
1807 name_len = btrfs_stack_dir_name_len(di);
1808
1809 d_type = fs_ftype_to_dtype(btrfs_dir_flags_to_ftype(di->type));
1810 btrfs_disk_key_to_cpu(&location, &di->location);
1811
1812 over = !dir_emit(ctx, name, name_len,
1813 location.objectid, d_type);
1814
1815 if (refcount_dec_and_test(&curr->refs))
1816 kfree(curr);
1817
1818 if (over)
1819 return 1;
1820 ctx->pos++;
1821 }
1822 return 0;
1823}
1824
1825static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1826 struct btrfs_inode_item *inode_item,
1827 struct inode *inode)
1828{
1829 u64 flags;
1830
1831 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1832 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1833 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1834 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1835 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1836 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1837 btrfs_set_stack_inode_generation(inode_item,
1838 BTRFS_I(inode)->generation);
1839 btrfs_set_stack_inode_sequence(inode_item,
1840 inode_peek_iversion(inode));
1841 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1842 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1843 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1844 BTRFS_I(inode)->ro_flags);
1845 btrfs_set_stack_inode_flags(inode_item, flags);
1846 btrfs_set_stack_inode_block_group(inode_item, 0);
1847
1848 btrfs_set_stack_timespec_sec(&inode_item->atime,
1849 inode_get_atime_sec(inode));
1850 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1851 inode_get_atime_nsec(inode));
1852
1853 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1854 inode_get_mtime_sec(inode));
1855 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1856 inode_get_mtime_nsec(inode));
1857
1858 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1859 inode_get_ctime_sec(inode));
1860 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1861 inode_get_ctime_nsec(inode));
1862
1863 btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
1864 btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
1865}
1866
1867int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1868{
1869 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1870 struct btrfs_delayed_node *delayed_node;
1871 struct btrfs_inode_item *inode_item;
1872
1873 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1874 if (!delayed_node)
1875 return -ENOENT;
1876
1877 mutex_lock(&delayed_node->mutex);
1878 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1879 mutex_unlock(&delayed_node->mutex);
1880 btrfs_release_delayed_node(delayed_node);
1881 return -ENOENT;
1882 }
1883
1884 inode_item = &delayed_node->inode_item;
1885
1886 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1887 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1888 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1889 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1890 round_up(i_size_read(inode), fs_info->sectorsize));
1891 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1892 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1893 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1894 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1895 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1896
1897 inode_set_iversion_queried(inode,
1898 btrfs_stack_inode_sequence(inode_item));
1899 inode->i_rdev = 0;
1900 *rdev = btrfs_stack_inode_rdev(inode_item);
1901 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1902 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1903
1904 inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
1905 btrfs_stack_timespec_nsec(&inode_item->atime));
1906
1907 inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
1908 btrfs_stack_timespec_nsec(&inode_item->mtime));
1909
1910 inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
1911 btrfs_stack_timespec_nsec(&inode_item->ctime));
1912
1913 BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
1914 BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
1915
1916 inode->i_generation = BTRFS_I(inode)->generation;
1917 if (S_ISDIR(inode->i_mode))
1918 BTRFS_I(inode)->index_cnt = (u64)-1;
1919
1920 mutex_unlock(&delayed_node->mutex);
1921 btrfs_release_delayed_node(delayed_node);
1922 return 0;
1923}
1924
1925int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1926 struct btrfs_inode *inode)
1927{
1928 struct btrfs_root *root = inode->root;
1929 struct btrfs_delayed_node *delayed_node;
1930 int ret = 0;
1931
1932 delayed_node = btrfs_get_or_create_delayed_node(inode);
1933 if (IS_ERR(delayed_node))
1934 return PTR_ERR(delayed_node);
1935
1936 mutex_lock(&delayed_node->mutex);
1937 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1938 fill_stack_inode_item(trans, &delayed_node->inode_item,
1939 &inode->vfs_inode);
1940 goto release_node;
1941 }
1942
1943 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1944 if (ret)
1945 goto release_node;
1946
1947 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1948 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1949 delayed_node->count++;
1950 atomic_inc(&root->fs_info->delayed_root->items);
1951release_node:
1952 mutex_unlock(&delayed_node->mutex);
1953 btrfs_release_delayed_node(delayed_node);
1954 return ret;
1955}
1956
1957int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1958{
1959 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1960 struct btrfs_delayed_node *delayed_node;
1961
1962 /*
1963 * we don't do delayed inode updates during log recovery because it
1964 * leads to enospc problems. This means we also can't do
1965 * delayed inode refs
1966 */
1967 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1968 return -EAGAIN;
1969
1970 delayed_node = btrfs_get_or_create_delayed_node(inode);
1971 if (IS_ERR(delayed_node))
1972 return PTR_ERR(delayed_node);
1973
1974 /*
1975 * We don't reserve space for inode ref deletion is because:
1976 * - We ONLY do async inode ref deletion for the inode who has only
1977 * one link(i_nlink == 1), it means there is only one inode ref.
1978 * And in most case, the inode ref and the inode item are in the
1979 * same leaf, and we will deal with them at the same time.
1980 * Since we are sure we will reserve the space for the inode item,
1981 * it is unnecessary to reserve space for inode ref deletion.
1982 * - If the inode ref and the inode item are not in the same leaf,
1983 * We also needn't worry about enospc problem, because we reserve
1984 * much more space for the inode update than it needs.
1985 * - At the worst, we can steal some space from the global reservation.
1986 * It is very rare.
1987 */
1988 mutex_lock(&delayed_node->mutex);
1989 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1990 goto release_node;
1991
1992 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1993 delayed_node->count++;
1994 atomic_inc(&fs_info->delayed_root->items);
1995release_node:
1996 mutex_unlock(&delayed_node->mutex);
1997 btrfs_release_delayed_node(delayed_node);
1998 return 0;
1999}
2000
2001static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
2002{
2003 struct btrfs_root *root = delayed_node->root;
2004 struct btrfs_fs_info *fs_info = root->fs_info;
2005 struct btrfs_delayed_item *curr_item, *prev_item;
2006
2007 mutex_lock(&delayed_node->mutex);
2008 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
2009 while (curr_item) {
2010 prev_item = curr_item;
2011 curr_item = __btrfs_next_delayed_item(prev_item);
2012 btrfs_release_delayed_item(prev_item);
2013 }
2014
2015 if (delayed_node->index_item_leaves > 0) {
2016 btrfs_delayed_item_release_leaves(delayed_node,
2017 delayed_node->index_item_leaves);
2018 delayed_node->index_item_leaves = 0;
2019 }
2020
2021 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2022 while (curr_item) {
2023 btrfs_delayed_item_release_metadata(root, curr_item);
2024 prev_item = curr_item;
2025 curr_item = __btrfs_next_delayed_item(prev_item);
2026 btrfs_release_delayed_item(prev_item);
2027 }
2028
2029 btrfs_release_delayed_iref(delayed_node);
2030
2031 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2032 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2033 btrfs_release_delayed_inode(delayed_node);
2034 }
2035 mutex_unlock(&delayed_node->mutex);
2036}
2037
2038void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2039{
2040 struct btrfs_delayed_node *delayed_node;
2041
2042 delayed_node = btrfs_get_delayed_node(inode);
2043 if (!delayed_node)
2044 return;
2045
2046 __btrfs_kill_delayed_node(delayed_node);
2047 btrfs_release_delayed_node(delayed_node);
2048}
2049
2050void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2051{
2052 unsigned long index = 0;
2053 struct btrfs_delayed_node *delayed_nodes[8];
2054
2055 while (1) {
2056 struct btrfs_delayed_node *node;
2057 int count;
2058
2059 xa_lock(&root->delayed_nodes);
2060 if (xa_empty(&root->delayed_nodes)) {
2061 xa_unlock(&root->delayed_nodes);
2062 return;
2063 }
2064
2065 count = 0;
2066 xa_for_each_start(&root->delayed_nodes, index, node, index) {
2067 /*
2068 * Don't increase refs in case the node is dead and
2069 * about to be removed from the tree in the loop below
2070 */
2071 if (refcount_inc_not_zero(&node->refs)) {
2072 delayed_nodes[count] = node;
2073 count++;
2074 }
2075 if (count >= ARRAY_SIZE(delayed_nodes))
2076 break;
2077 }
2078 xa_unlock(&root->delayed_nodes);
2079 index++;
2080
2081 for (int i = 0; i < count; i++) {
2082 __btrfs_kill_delayed_node(delayed_nodes[i]);
2083 btrfs_release_delayed_node(delayed_nodes[i]);
2084 }
2085 }
2086}
2087
2088void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2089{
2090 struct btrfs_delayed_node *curr_node, *prev_node;
2091
2092 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2093 while (curr_node) {
2094 __btrfs_kill_delayed_node(curr_node);
2095
2096 prev_node = curr_node;
2097 curr_node = btrfs_next_delayed_node(curr_node);
2098 btrfs_release_delayed_node(prev_node);
2099 }
2100}
2101
2102void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2103 struct list_head *ins_list,
2104 struct list_head *del_list)
2105{
2106 struct btrfs_delayed_node *node;
2107 struct btrfs_delayed_item *item;
2108
2109 node = btrfs_get_delayed_node(inode);
2110 if (!node)
2111 return;
2112
2113 mutex_lock(&node->mutex);
2114 item = __btrfs_first_delayed_insertion_item(node);
2115 while (item) {
2116 /*
2117 * It's possible that the item is already in a log list. This
2118 * can happen in case two tasks are trying to log the same
2119 * directory. For example if we have tasks A and task B:
2120 *
2121 * Task A collected the delayed items into a log list while
2122 * under the inode's log_mutex (at btrfs_log_inode()), but it
2123 * only releases the items after logging the inodes they point
2124 * to (if they are new inodes), which happens after unlocking
2125 * the log mutex;
2126 *
2127 * Task B enters btrfs_log_inode() and acquires the log_mutex
2128 * of the same directory inode, before task B releases the
2129 * delayed items. This can happen for example when logging some
2130 * inode we need to trigger logging of its parent directory, so
2131 * logging two files that have the same parent directory can
2132 * lead to this.
2133 *
2134 * If this happens, just ignore delayed items already in a log
2135 * list. All the tasks logging the directory are under a log
2136 * transaction and whichever finishes first can not sync the log
2137 * before the other completes and leaves the log transaction.
2138 */
2139 if (!item->logged && list_empty(&item->log_list)) {
2140 refcount_inc(&item->refs);
2141 list_add_tail(&item->log_list, ins_list);
2142 }
2143 item = __btrfs_next_delayed_item(item);
2144 }
2145
2146 item = __btrfs_first_delayed_deletion_item(node);
2147 while (item) {
2148 /* It may be non-empty, for the same reason mentioned above. */
2149 if (!item->logged && list_empty(&item->log_list)) {
2150 refcount_inc(&item->refs);
2151 list_add_tail(&item->log_list, del_list);
2152 }
2153 item = __btrfs_next_delayed_item(item);
2154 }
2155 mutex_unlock(&node->mutex);
2156
2157 /*
2158 * We are called during inode logging, which means the inode is in use
2159 * and can not be evicted before we finish logging the inode. So we never
2160 * have the last reference on the delayed inode.
2161 * Also, we don't use btrfs_release_delayed_node() because that would
2162 * requeue the delayed inode (change its order in the list of prepared
2163 * nodes) and we don't want to do such change because we don't create or
2164 * delete delayed items.
2165 */
2166 ASSERT(refcount_read(&node->refs) > 1);
2167 refcount_dec(&node->refs);
2168}
2169
2170void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2171 struct list_head *ins_list,
2172 struct list_head *del_list)
2173{
2174 struct btrfs_delayed_node *node;
2175 struct btrfs_delayed_item *item;
2176 struct btrfs_delayed_item *next;
2177
2178 node = btrfs_get_delayed_node(inode);
2179 if (!node)
2180 return;
2181
2182 mutex_lock(&node->mutex);
2183
2184 list_for_each_entry_safe(item, next, ins_list, log_list) {
2185 item->logged = true;
2186 list_del_init(&item->log_list);
2187 if (refcount_dec_and_test(&item->refs))
2188 kfree(item);
2189 }
2190
2191 list_for_each_entry_safe(item, next, del_list, log_list) {
2192 item->logged = true;
2193 list_del_init(&item->log_list);
2194 if (refcount_dec_and_test(&item->refs))
2195 kfree(item);
2196 }
2197
2198 mutex_unlock(&node->mutex);
2199
2200 /*
2201 * We are called during inode logging, which means the inode is in use
2202 * and can not be evicted before we finish logging the inode. So we never
2203 * have the last reference on the delayed inode.
2204 * Also, we don't use btrfs_release_delayed_node() because that would
2205 * requeue the delayed inode (change its order in the list of prepared
2206 * nodes) and we don't want to do such change because we don't create or
2207 * delete delayed items.
2208 */
2209 ASSERT(refcount_read(&node->refs) > 1);
2210 refcount_dec(&node->refs);
2211}