Loading...
1/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "ctree.h"
25
26#define BTRFS_DELAYED_WRITEBACK 512
27#define BTRFS_DELAYED_BACKGROUND 128
28#define BTRFS_DELAYED_BATCH 16
29
30static struct kmem_cache *delayed_node_cache;
31
32int __init btrfs_delayed_inode_init(void)
33{
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
36 0,
37 SLAB_MEM_SPREAD,
38 NULL);
39 if (!delayed_node_cache)
40 return -ENOMEM;
41 return 0;
42}
43
44void btrfs_delayed_inode_exit(void)
45{
46 kmem_cache_destroy(delayed_node_cache);
47}
48
49static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
52{
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
55 atomic_set(&delayed_node->refs, 0);
56 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
59 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
61}
62
63static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
66{
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
71 return 1;
72 return 0;
73}
74
75static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
76{
77 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
78 struct btrfs_root *root = btrfs_inode->root;
79 u64 ino = btrfs_ino(inode);
80 struct btrfs_delayed_node *node;
81
82 node = ACCESS_ONCE(btrfs_inode->delayed_node);
83 if (node) {
84 atomic_inc(&node->refs);
85 return node;
86 }
87
88 spin_lock(&root->inode_lock);
89 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90 if (node) {
91 if (btrfs_inode->delayed_node) {
92 atomic_inc(&node->refs); /* can be accessed */
93 BUG_ON(btrfs_inode->delayed_node != node);
94 spin_unlock(&root->inode_lock);
95 return node;
96 }
97 btrfs_inode->delayed_node = node;
98 /* can be accessed and cached in the inode */
99 atomic_add(2, &node->refs);
100 spin_unlock(&root->inode_lock);
101 return node;
102 }
103 spin_unlock(&root->inode_lock);
104
105 return NULL;
106}
107
108/* Will return either the node or PTR_ERR(-ENOMEM) */
109static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
110 struct inode *inode)
111{
112 struct btrfs_delayed_node *node;
113 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
114 struct btrfs_root *root = btrfs_inode->root;
115 u64 ino = btrfs_ino(inode);
116 int ret;
117
118again:
119 node = btrfs_get_delayed_node(inode);
120 if (node)
121 return node;
122
123 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
124 if (!node)
125 return ERR_PTR(-ENOMEM);
126 btrfs_init_delayed_node(node, root, ino);
127
128 /* cached in the btrfs inode and can be accessed */
129 atomic_add(2, &node->refs);
130
131 ret = radix_tree_preload(GFP_NOFS);
132 if (ret) {
133 kmem_cache_free(delayed_node_cache, node);
134 return ERR_PTR(ret);
135 }
136
137 spin_lock(&root->inode_lock);
138 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
139 if (ret == -EEXIST) {
140 spin_unlock(&root->inode_lock);
141 kmem_cache_free(delayed_node_cache, node);
142 radix_tree_preload_end();
143 goto again;
144 }
145 btrfs_inode->delayed_node = node;
146 spin_unlock(&root->inode_lock);
147 radix_tree_preload_end();
148
149 return node;
150}
151
152/*
153 * Call it when holding delayed_node->mutex
154 *
155 * If mod = 1, add this node into the prepared list.
156 */
157static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
158 struct btrfs_delayed_node *node,
159 int mod)
160{
161 spin_lock(&root->lock);
162 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
163 if (!list_empty(&node->p_list))
164 list_move_tail(&node->p_list, &root->prepare_list);
165 else if (mod)
166 list_add_tail(&node->p_list, &root->prepare_list);
167 } else {
168 list_add_tail(&node->n_list, &root->node_list);
169 list_add_tail(&node->p_list, &root->prepare_list);
170 atomic_inc(&node->refs); /* inserted into list */
171 root->nodes++;
172 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
173 }
174 spin_unlock(&root->lock);
175}
176
177/* Call it when holding delayed_node->mutex */
178static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
179 struct btrfs_delayed_node *node)
180{
181 spin_lock(&root->lock);
182 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
183 root->nodes--;
184 atomic_dec(&node->refs); /* not in the list */
185 list_del_init(&node->n_list);
186 if (!list_empty(&node->p_list))
187 list_del_init(&node->p_list);
188 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
189 }
190 spin_unlock(&root->lock);
191}
192
193static struct btrfs_delayed_node *btrfs_first_delayed_node(
194 struct btrfs_delayed_root *delayed_root)
195{
196 struct list_head *p;
197 struct btrfs_delayed_node *node = NULL;
198
199 spin_lock(&delayed_root->lock);
200 if (list_empty(&delayed_root->node_list))
201 goto out;
202
203 p = delayed_root->node_list.next;
204 node = list_entry(p, struct btrfs_delayed_node, n_list);
205 atomic_inc(&node->refs);
206out:
207 spin_unlock(&delayed_root->lock);
208
209 return node;
210}
211
212static struct btrfs_delayed_node *btrfs_next_delayed_node(
213 struct btrfs_delayed_node *node)
214{
215 struct btrfs_delayed_root *delayed_root;
216 struct list_head *p;
217 struct btrfs_delayed_node *next = NULL;
218
219 delayed_root = node->root->fs_info->delayed_root;
220 spin_lock(&delayed_root->lock);
221 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
222 /* not in the list */
223 if (list_empty(&delayed_root->node_list))
224 goto out;
225 p = delayed_root->node_list.next;
226 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
227 goto out;
228 else
229 p = node->n_list.next;
230
231 next = list_entry(p, struct btrfs_delayed_node, n_list);
232 atomic_inc(&next->refs);
233out:
234 spin_unlock(&delayed_root->lock);
235
236 return next;
237}
238
239static void __btrfs_release_delayed_node(
240 struct btrfs_delayed_node *delayed_node,
241 int mod)
242{
243 struct btrfs_delayed_root *delayed_root;
244
245 if (!delayed_node)
246 return;
247
248 delayed_root = delayed_node->root->fs_info->delayed_root;
249
250 mutex_lock(&delayed_node->mutex);
251 if (delayed_node->count)
252 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
253 else
254 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
255 mutex_unlock(&delayed_node->mutex);
256
257 if (atomic_dec_and_test(&delayed_node->refs)) {
258 bool free = false;
259 struct btrfs_root *root = delayed_node->root;
260 spin_lock(&root->inode_lock);
261 if (atomic_read(&delayed_node->refs) == 0) {
262 radix_tree_delete(&root->delayed_nodes_tree,
263 delayed_node->inode_id);
264 free = true;
265 }
266 spin_unlock(&root->inode_lock);
267 if (free)
268 kmem_cache_free(delayed_node_cache, delayed_node);
269 }
270}
271
272static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
273{
274 __btrfs_release_delayed_node(node, 0);
275}
276
277static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
278 struct btrfs_delayed_root *delayed_root)
279{
280 struct list_head *p;
281 struct btrfs_delayed_node *node = NULL;
282
283 spin_lock(&delayed_root->lock);
284 if (list_empty(&delayed_root->prepare_list))
285 goto out;
286
287 p = delayed_root->prepare_list.next;
288 list_del_init(p);
289 node = list_entry(p, struct btrfs_delayed_node, p_list);
290 atomic_inc(&node->refs);
291out:
292 spin_unlock(&delayed_root->lock);
293
294 return node;
295}
296
297static inline void btrfs_release_prepared_delayed_node(
298 struct btrfs_delayed_node *node)
299{
300 __btrfs_release_delayed_node(node, 1);
301}
302
303static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
304{
305 struct btrfs_delayed_item *item;
306 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
307 if (item) {
308 item->data_len = data_len;
309 item->ins_or_del = 0;
310 item->bytes_reserved = 0;
311 item->delayed_node = NULL;
312 atomic_set(&item->refs, 1);
313 }
314 return item;
315}
316
317/*
318 * __btrfs_lookup_delayed_item - look up the delayed item by key
319 * @delayed_node: pointer to the delayed node
320 * @key: the key to look up
321 * @prev: used to store the prev item if the right item isn't found
322 * @next: used to store the next item if the right item isn't found
323 *
324 * Note: if we don't find the right item, we will return the prev item and
325 * the next item.
326 */
327static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
328 struct rb_root *root,
329 struct btrfs_key *key,
330 struct btrfs_delayed_item **prev,
331 struct btrfs_delayed_item **next)
332{
333 struct rb_node *node, *prev_node = NULL;
334 struct btrfs_delayed_item *delayed_item = NULL;
335 int ret = 0;
336
337 node = root->rb_node;
338
339 while (node) {
340 delayed_item = rb_entry(node, struct btrfs_delayed_item,
341 rb_node);
342 prev_node = node;
343 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
344 if (ret < 0)
345 node = node->rb_right;
346 else if (ret > 0)
347 node = node->rb_left;
348 else
349 return delayed_item;
350 }
351
352 if (prev) {
353 if (!prev_node)
354 *prev = NULL;
355 else if (ret < 0)
356 *prev = delayed_item;
357 else if ((node = rb_prev(prev_node)) != NULL) {
358 *prev = rb_entry(node, struct btrfs_delayed_item,
359 rb_node);
360 } else
361 *prev = NULL;
362 }
363
364 if (next) {
365 if (!prev_node)
366 *next = NULL;
367 else if (ret > 0)
368 *next = delayed_item;
369 else if ((node = rb_next(prev_node)) != NULL) {
370 *next = rb_entry(node, struct btrfs_delayed_item,
371 rb_node);
372 } else
373 *next = NULL;
374 }
375 return NULL;
376}
377
378static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
379 struct btrfs_delayed_node *delayed_node,
380 struct btrfs_key *key)
381{
382 return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
383 NULL, NULL);
384}
385
386static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
387 struct btrfs_delayed_item *ins,
388 int action)
389{
390 struct rb_node **p, *node;
391 struct rb_node *parent_node = NULL;
392 struct rb_root *root;
393 struct btrfs_delayed_item *item;
394 int cmp;
395
396 if (action == BTRFS_DELAYED_INSERTION_ITEM)
397 root = &delayed_node->ins_root;
398 else if (action == BTRFS_DELAYED_DELETION_ITEM)
399 root = &delayed_node->del_root;
400 else
401 BUG();
402 p = &root->rb_node;
403 node = &ins->rb_node;
404
405 while (*p) {
406 parent_node = *p;
407 item = rb_entry(parent_node, struct btrfs_delayed_item,
408 rb_node);
409
410 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
411 if (cmp < 0)
412 p = &(*p)->rb_right;
413 else if (cmp > 0)
414 p = &(*p)->rb_left;
415 else
416 return -EEXIST;
417 }
418
419 rb_link_node(node, parent_node, p);
420 rb_insert_color(node, root);
421 ins->delayed_node = delayed_node;
422 ins->ins_or_del = action;
423
424 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
425 action == BTRFS_DELAYED_INSERTION_ITEM &&
426 ins->key.offset >= delayed_node->index_cnt)
427 delayed_node->index_cnt = ins->key.offset + 1;
428
429 delayed_node->count++;
430 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
431 return 0;
432}
433
434static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
435 struct btrfs_delayed_item *item)
436{
437 return __btrfs_add_delayed_item(node, item,
438 BTRFS_DELAYED_INSERTION_ITEM);
439}
440
441static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
442 struct btrfs_delayed_item *item)
443{
444 return __btrfs_add_delayed_item(node, item,
445 BTRFS_DELAYED_DELETION_ITEM);
446}
447
448static void finish_one_item(struct btrfs_delayed_root *delayed_root)
449{
450 int seq = atomic_inc_return(&delayed_root->items_seq);
451
452 /*
453 * atomic_dec_return implies a barrier for waitqueue_active
454 */
455 if ((atomic_dec_return(&delayed_root->items) <
456 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
457 waitqueue_active(&delayed_root->wait))
458 wake_up(&delayed_root->wait);
459}
460
461static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
462{
463 struct rb_root *root;
464 struct btrfs_delayed_root *delayed_root;
465
466 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
467
468 BUG_ON(!delayed_root);
469 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
470 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
471
472 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
473 root = &delayed_item->delayed_node->ins_root;
474 else
475 root = &delayed_item->delayed_node->del_root;
476
477 rb_erase(&delayed_item->rb_node, root);
478 delayed_item->delayed_node->count--;
479
480 finish_one_item(delayed_root);
481}
482
483static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
484{
485 if (item) {
486 __btrfs_remove_delayed_item(item);
487 if (atomic_dec_and_test(&item->refs))
488 kfree(item);
489 }
490}
491
492static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
493 struct btrfs_delayed_node *delayed_node)
494{
495 struct rb_node *p;
496 struct btrfs_delayed_item *item = NULL;
497
498 p = rb_first(&delayed_node->ins_root);
499 if (p)
500 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
501
502 return item;
503}
504
505static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
506 struct btrfs_delayed_node *delayed_node)
507{
508 struct rb_node *p;
509 struct btrfs_delayed_item *item = NULL;
510
511 p = rb_first(&delayed_node->del_root);
512 if (p)
513 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
514
515 return item;
516}
517
518static struct btrfs_delayed_item *__btrfs_next_delayed_item(
519 struct btrfs_delayed_item *item)
520{
521 struct rb_node *p;
522 struct btrfs_delayed_item *next = NULL;
523
524 p = rb_next(&item->rb_node);
525 if (p)
526 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
527
528 return next;
529}
530
531static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
532 struct btrfs_fs_info *fs_info,
533 struct btrfs_delayed_item *item)
534{
535 struct btrfs_block_rsv *src_rsv;
536 struct btrfs_block_rsv *dst_rsv;
537 u64 num_bytes;
538 int ret;
539
540 if (!trans->bytes_reserved)
541 return 0;
542
543 src_rsv = trans->block_rsv;
544 dst_rsv = &fs_info->delayed_block_rsv;
545
546 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
547 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
548 if (!ret) {
549 trace_btrfs_space_reservation(fs_info, "delayed_item",
550 item->key.objectid,
551 num_bytes, 1);
552 item->bytes_reserved = num_bytes;
553 }
554
555 return ret;
556}
557
558static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
559 struct btrfs_delayed_item *item)
560{
561 struct btrfs_block_rsv *rsv;
562
563 if (!item->bytes_reserved)
564 return;
565
566 rsv = &fs_info->delayed_block_rsv;
567 trace_btrfs_space_reservation(fs_info, "delayed_item",
568 item->key.objectid, item->bytes_reserved,
569 0);
570 btrfs_block_rsv_release(fs_info, rsv,
571 item->bytes_reserved);
572}
573
574static int btrfs_delayed_inode_reserve_metadata(
575 struct btrfs_trans_handle *trans,
576 struct btrfs_root *root,
577 struct inode *inode,
578 struct btrfs_delayed_node *node)
579{
580 struct btrfs_fs_info *fs_info = root->fs_info;
581 struct btrfs_block_rsv *src_rsv;
582 struct btrfs_block_rsv *dst_rsv;
583 u64 num_bytes;
584 int ret;
585 bool release = false;
586
587 src_rsv = trans->block_rsv;
588 dst_rsv = &fs_info->delayed_block_rsv;
589
590 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
591
592 /*
593 * If our block_rsv is the delalloc block reserve then check and see if
594 * we have our extra reservation for updating the inode. If not fall
595 * through and try to reserve space quickly.
596 *
597 * We used to try and steal from the delalloc block rsv or the global
598 * reserve, but we'd steal a full reservation, which isn't kind. We are
599 * here through delalloc which means we've likely just cowed down close
600 * to the leaf that contains the inode, so we would steal less just
601 * doing the fallback inode update, so if we do end up having to steal
602 * from the global block rsv we hopefully only steal one or two blocks
603 * worth which is less likely to hurt us.
604 */
605 if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
606 spin_lock(&BTRFS_I(inode)->lock);
607 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
608 &BTRFS_I(inode)->runtime_flags))
609 release = true;
610 else
611 src_rsv = NULL;
612 spin_unlock(&BTRFS_I(inode)->lock);
613 }
614
615 /*
616 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
617 * which doesn't reserve space for speed. This is a problem since we
618 * still need to reserve space for this update, so try to reserve the
619 * space.
620 *
621 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
622 * we're accounted for.
623 */
624 if (!src_rsv || (!trans->bytes_reserved &&
625 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
626 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
627 BTRFS_RESERVE_NO_FLUSH);
628 /*
629 * Since we're under a transaction reserve_metadata_bytes could
630 * try to commit the transaction which will make it return
631 * EAGAIN to make us stop the transaction we have, so return
632 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
633 */
634 if (ret == -EAGAIN)
635 ret = -ENOSPC;
636 if (!ret) {
637 node->bytes_reserved = num_bytes;
638 trace_btrfs_space_reservation(fs_info,
639 "delayed_inode",
640 btrfs_ino(inode),
641 num_bytes, 1);
642 }
643 return ret;
644 }
645
646 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
647
648 /*
649 * Migrate only takes a reservation, it doesn't touch the size of the
650 * block_rsv. This is to simplify people who don't normally have things
651 * migrated from their block rsv. If they go to release their
652 * reservation, that will decrease the size as well, so if migrate
653 * reduced size we'd end up with a negative size. But for the
654 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
655 * but we could in fact do this reserve/migrate dance several times
656 * between the time we did the original reservation and we'd clean it
657 * up. So to take care of this, release the space for the meta
658 * reservation here. I think it may be time for a documentation page on
659 * how block rsvs. work.
660 */
661 if (!ret) {
662 trace_btrfs_space_reservation(fs_info, "delayed_inode",
663 btrfs_ino(inode), num_bytes, 1);
664 node->bytes_reserved = num_bytes;
665 }
666
667 if (release) {
668 trace_btrfs_space_reservation(fs_info, "delalloc",
669 btrfs_ino(inode), num_bytes, 0);
670 btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
671 }
672
673 return ret;
674}
675
676static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
677 struct btrfs_delayed_node *node)
678{
679 struct btrfs_block_rsv *rsv;
680
681 if (!node->bytes_reserved)
682 return;
683
684 rsv = &fs_info->delayed_block_rsv;
685 trace_btrfs_space_reservation(fs_info, "delayed_inode",
686 node->inode_id, node->bytes_reserved, 0);
687 btrfs_block_rsv_release(fs_info, rsv,
688 node->bytes_reserved);
689 node->bytes_reserved = 0;
690}
691
692/*
693 * This helper will insert some continuous items into the same leaf according
694 * to the free space of the leaf.
695 */
696static int btrfs_batch_insert_items(struct btrfs_root *root,
697 struct btrfs_path *path,
698 struct btrfs_delayed_item *item)
699{
700 struct btrfs_fs_info *fs_info = root->fs_info;
701 struct btrfs_delayed_item *curr, *next;
702 int free_space;
703 int total_data_size = 0, total_size = 0;
704 struct extent_buffer *leaf;
705 char *data_ptr;
706 struct btrfs_key *keys;
707 u32 *data_size;
708 struct list_head head;
709 int slot;
710 int nitems;
711 int i;
712 int ret = 0;
713
714 BUG_ON(!path->nodes[0]);
715
716 leaf = path->nodes[0];
717 free_space = btrfs_leaf_free_space(fs_info, leaf);
718 INIT_LIST_HEAD(&head);
719
720 next = item;
721 nitems = 0;
722
723 /*
724 * count the number of the continuous items that we can insert in batch
725 */
726 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
727 free_space) {
728 total_data_size += next->data_len;
729 total_size += next->data_len + sizeof(struct btrfs_item);
730 list_add_tail(&next->tree_list, &head);
731 nitems++;
732
733 curr = next;
734 next = __btrfs_next_delayed_item(curr);
735 if (!next)
736 break;
737
738 if (!btrfs_is_continuous_delayed_item(curr, next))
739 break;
740 }
741
742 if (!nitems) {
743 ret = 0;
744 goto out;
745 }
746
747 /*
748 * we need allocate some memory space, but it might cause the task
749 * to sleep, so we set all locked nodes in the path to blocking locks
750 * first.
751 */
752 btrfs_set_path_blocking(path);
753
754 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
755 if (!keys) {
756 ret = -ENOMEM;
757 goto out;
758 }
759
760 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
761 if (!data_size) {
762 ret = -ENOMEM;
763 goto error;
764 }
765
766 /* get keys of all the delayed items */
767 i = 0;
768 list_for_each_entry(next, &head, tree_list) {
769 keys[i] = next->key;
770 data_size[i] = next->data_len;
771 i++;
772 }
773
774 /* reset all the locked nodes in the patch to spinning locks. */
775 btrfs_clear_path_blocking(path, NULL, 0);
776
777 /* insert the keys of the items */
778 setup_items_for_insert(root, path, keys, data_size,
779 total_data_size, total_size, nitems);
780
781 /* insert the dir index items */
782 slot = path->slots[0];
783 list_for_each_entry_safe(curr, next, &head, tree_list) {
784 data_ptr = btrfs_item_ptr(leaf, slot, char);
785 write_extent_buffer(leaf, &curr->data,
786 (unsigned long)data_ptr,
787 curr->data_len);
788 slot++;
789
790 btrfs_delayed_item_release_metadata(fs_info, curr);
791
792 list_del(&curr->tree_list);
793 btrfs_release_delayed_item(curr);
794 }
795
796error:
797 kfree(data_size);
798 kfree(keys);
799out:
800 return ret;
801}
802
803/*
804 * This helper can just do simple insertion that needn't extend item for new
805 * data, such as directory name index insertion, inode insertion.
806 */
807static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
808 struct btrfs_root *root,
809 struct btrfs_path *path,
810 struct btrfs_delayed_item *delayed_item)
811{
812 struct btrfs_fs_info *fs_info = root->fs_info;
813 struct extent_buffer *leaf;
814 char *ptr;
815 int ret;
816
817 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
818 delayed_item->data_len);
819 if (ret < 0 && ret != -EEXIST)
820 return ret;
821
822 leaf = path->nodes[0];
823
824 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
825
826 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
827 delayed_item->data_len);
828 btrfs_mark_buffer_dirty(leaf);
829
830 btrfs_delayed_item_release_metadata(fs_info, delayed_item);
831 return 0;
832}
833
834/*
835 * we insert an item first, then if there are some continuous items, we try
836 * to insert those items into the same leaf.
837 */
838static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
839 struct btrfs_path *path,
840 struct btrfs_root *root,
841 struct btrfs_delayed_node *node)
842{
843 struct btrfs_delayed_item *curr, *prev;
844 int ret = 0;
845
846do_again:
847 mutex_lock(&node->mutex);
848 curr = __btrfs_first_delayed_insertion_item(node);
849 if (!curr)
850 goto insert_end;
851
852 ret = btrfs_insert_delayed_item(trans, root, path, curr);
853 if (ret < 0) {
854 btrfs_release_path(path);
855 goto insert_end;
856 }
857
858 prev = curr;
859 curr = __btrfs_next_delayed_item(prev);
860 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
861 /* insert the continuous items into the same leaf */
862 path->slots[0]++;
863 btrfs_batch_insert_items(root, path, curr);
864 }
865 btrfs_release_delayed_item(prev);
866 btrfs_mark_buffer_dirty(path->nodes[0]);
867
868 btrfs_release_path(path);
869 mutex_unlock(&node->mutex);
870 goto do_again;
871
872insert_end:
873 mutex_unlock(&node->mutex);
874 return ret;
875}
876
877static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
878 struct btrfs_root *root,
879 struct btrfs_path *path,
880 struct btrfs_delayed_item *item)
881{
882 struct btrfs_fs_info *fs_info = root->fs_info;
883 struct btrfs_delayed_item *curr, *next;
884 struct extent_buffer *leaf;
885 struct btrfs_key key;
886 struct list_head head;
887 int nitems, i, last_item;
888 int ret = 0;
889
890 BUG_ON(!path->nodes[0]);
891
892 leaf = path->nodes[0];
893
894 i = path->slots[0];
895 last_item = btrfs_header_nritems(leaf) - 1;
896 if (i > last_item)
897 return -ENOENT; /* FIXME: Is errno suitable? */
898
899 next = item;
900 INIT_LIST_HEAD(&head);
901 btrfs_item_key_to_cpu(leaf, &key, i);
902 nitems = 0;
903 /*
904 * count the number of the dir index items that we can delete in batch
905 */
906 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
907 list_add_tail(&next->tree_list, &head);
908 nitems++;
909
910 curr = next;
911 next = __btrfs_next_delayed_item(curr);
912 if (!next)
913 break;
914
915 if (!btrfs_is_continuous_delayed_item(curr, next))
916 break;
917
918 i++;
919 if (i > last_item)
920 break;
921 btrfs_item_key_to_cpu(leaf, &key, i);
922 }
923
924 if (!nitems)
925 return 0;
926
927 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
928 if (ret)
929 goto out;
930
931 list_for_each_entry_safe(curr, next, &head, tree_list) {
932 btrfs_delayed_item_release_metadata(fs_info, curr);
933 list_del(&curr->tree_list);
934 btrfs_release_delayed_item(curr);
935 }
936
937out:
938 return ret;
939}
940
941static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
942 struct btrfs_path *path,
943 struct btrfs_root *root,
944 struct btrfs_delayed_node *node)
945{
946 struct btrfs_delayed_item *curr, *prev;
947 int ret = 0;
948
949do_again:
950 mutex_lock(&node->mutex);
951 curr = __btrfs_first_delayed_deletion_item(node);
952 if (!curr)
953 goto delete_fail;
954
955 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
956 if (ret < 0)
957 goto delete_fail;
958 else if (ret > 0) {
959 /*
960 * can't find the item which the node points to, so this node
961 * is invalid, just drop it.
962 */
963 prev = curr;
964 curr = __btrfs_next_delayed_item(prev);
965 btrfs_release_delayed_item(prev);
966 ret = 0;
967 btrfs_release_path(path);
968 if (curr) {
969 mutex_unlock(&node->mutex);
970 goto do_again;
971 } else
972 goto delete_fail;
973 }
974
975 btrfs_batch_delete_items(trans, root, path, curr);
976 btrfs_release_path(path);
977 mutex_unlock(&node->mutex);
978 goto do_again;
979
980delete_fail:
981 btrfs_release_path(path);
982 mutex_unlock(&node->mutex);
983 return ret;
984}
985
986static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
987{
988 struct btrfs_delayed_root *delayed_root;
989
990 if (delayed_node &&
991 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
992 BUG_ON(!delayed_node->root);
993 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
994 delayed_node->count--;
995
996 delayed_root = delayed_node->root->fs_info->delayed_root;
997 finish_one_item(delayed_root);
998 }
999}
1000
1001static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1002{
1003 struct btrfs_delayed_root *delayed_root;
1004
1005 ASSERT(delayed_node->root);
1006 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1007 delayed_node->count--;
1008
1009 delayed_root = delayed_node->root->fs_info->delayed_root;
1010 finish_one_item(delayed_root);
1011}
1012
1013static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1014 struct btrfs_root *root,
1015 struct btrfs_path *path,
1016 struct btrfs_delayed_node *node)
1017{
1018 struct btrfs_fs_info *fs_info = root->fs_info;
1019 struct btrfs_key key;
1020 struct btrfs_inode_item *inode_item;
1021 struct extent_buffer *leaf;
1022 int mod;
1023 int ret;
1024
1025 key.objectid = node->inode_id;
1026 key.type = BTRFS_INODE_ITEM_KEY;
1027 key.offset = 0;
1028
1029 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1030 mod = -1;
1031 else
1032 mod = 1;
1033
1034 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035 if (ret > 0) {
1036 btrfs_release_path(path);
1037 return -ENOENT;
1038 } else if (ret < 0) {
1039 return ret;
1040 }
1041
1042 leaf = path->nodes[0];
1043 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044 struct btrfs_inode_item);
1045 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046 sizeof(struct btrfs_inode_item));
1047 btrfs_mark_buffer_dirty(leaf);
1048
1049 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1050 goto no_iref;
1051
1052 path->slots[0]++;
1053 if (path->slots[0] >= btrfs_header_nritems(leaf))
1054 goto search;
1055again:
1056 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 if (key.objectid != node->inode_id)
1058 goto out;
1059
1060 if (key.type != BTRFS_INODE_REF_KEY &&
1061 key.type != BTRFS_INODE_EXTREF_KEY)
1062 goto out;
1063
1064 /*
1065 * Delayed iref deletion is for the inode who has only one link,
1066 * so there is only one iref. The case that several irefs are
1067 * in the same item doesn't exist.
1068 */
1069 btrfs_del_item(trans, root, path);
1070out:
1071 btrfs_release_delayed_iref(node);
1072no_iref:
1073 btrfs_release_path(path);
1074err_out:
1075 btrfs_delayed_inode_release_metadata(fs_info, node);
1076 btrfs_release_delayed_inode(node);
1077
1078 return ret;
1079
1080search:
1081 btrfs_release_path(path);
1082
1083 key.type = BTRFS_INODE_EXTREF_KEY;
1084 key.offset = -1;
1085 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1086 if (ret < 0)
1087 goto err_out;
1088 ASSERT(ret);
1089
1090 ret = 0;
1091 leaf = path->nodes[0];
1092 path->slots[0]--;
1093 goto again;
1094}
1095
1096static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1097 struct btrfs_root *root,
1098 struct btrfs_path *path,
1099 struct btrfs_delayed_node *node)
1100{
1101 int ret;
1102
1103 mutex_lock(&node->mutex);
1104 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1105 mutex_unlock(&node->mutex);
1106 return 0;
1107 }
1108
1109 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1110 mutex_unlock(&node->mutex);
1111 return ret;
1112}
1113
1114static inline int
1115__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1116 struct btrfs_path *path,
1117 struct btrfs_delayed_node *node)
1118{
1119 int ret;
1120
1121 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1122 if (ret)
1123 return ret;
1124
1125 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1126 if (ret)
1127 return ret;
1128
1129 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1130 return ret;
1131}
1132
1133/*
1134 * Called when committing the transaction.
1135 * Returns 0 on success.
1136 * Returns < 0 on error and returns with an aborted transaction with any
1137 * outstanding delayed items cleaned up.
1138 */
1139static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1140 struct btrfs_fs_info *fs_info, int nr)
1141{
1142 struct btrfs_delayed_root *delayed_root;
1143 struct btrfs_delayed_node *curr_node, *prev_node;
1144 struct btrfs_path *path;
1145 struct btrfs_block_rsv *block_rsv;
1146 int ret = 0;
1147 bool count = (nr > 0);
1148
1149 if (trans->aborted)
1150 return -EIO;
1151
1152 path = btrfs_alloc_path();
1153 if (!path)
1154 return -ENOMEM;
1155 path->leave_spinning = 1;
1156
1157 block_rsv = trans->block_rsv;
1158 trans->block_rsv = &fs_info->delayed_block_rsv;
1159
1160 delayed_root = fs_info->delayed_root;
1161
1162 curr_node = btrfs_first_delayed_node(delayed_root);
1163 while (curr_node && (!count || (count && nr--))) {
1164 ret = __btrfs_commit_inode_delayed_items(trans, path,
1165 curr_node);
1166 if (ret) {
1167 btrfs_release_delayed_node(curr_node);
1168 curr_node = NULL;
1169 btrfs_abort_transaction(trans, ret);
1170 break;
1171 }
1172
1173 prev_node = curr_node;
1174 curr_node = btrfs_next_delayed_node(curr_node);
1175 btrfs_release_delayed_node(prev_node);
1176 }
1177
1178 if (curr_node)
1179 btrfs_release_delayed_node(curr_node);
1180 btrfs_free_path(path);
1181 trans->block_rsv = block_rsv;
1182
1183 return ret;
1184}
1185
1186int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1187 struct btrfs_fs_info *fs_info)
1188{
1189 return __btrfs_run_delayed_items(trans, fs_info, -1);
1190}
1191
1192int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1193 struct btrfs_fs_info *fs_info, int nr)
1194{
1195 return __btrfs_run_delayed_items(trans, fs_info, nr);
1196}
1197
1198int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1199 struct inode *inode)
1200{
1201 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1202 struct btrfs_path *path;
1203 struct btrfs_block_rsv *block_rsv;
1204 int ret;
1205
1206 if (!delayed_node)
1207 return 0;
1208
1209 mutex_lock(&delayed_node->mutex);
1210 if (!delayed_node->count) {
1211 mutex_unlock(&delayed_node->mutex);
1212 btrfs_release_delayed_node(delayed_node);
1213 return 0;
1214 }
1215 mutex_unlock(&delayed_node->mutex);
1216
1217 path = btrfs_alloc_path();
1218 if (!path) {
1219 btrfs_release_delayed_node(delayed_node);
1220 return -ENOMEM;
1221 }
1222 path->leave_spinning = 1;
1223
1224 block_rsv = trans->block_rsv;
1225 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1226
1227 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1228
1229 btrfs_release_delayed_node(delayed_node);
1230 btrfs_free_path(path);
1231 trans->block_rsv = block_rsv;
1232
1233 return ret;
1234}
1235
1236int btrfs_commit_inode_delayed_inode(struct inode *inode)
1237{
1238 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1239 struct btrfs_trans_handle *trans;
1240 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1241 struct btrfs_path *path;
1242 struct btrfs_block_rsv *block_rsv;
1243 int ret;
1244
1245 if (!delayed_node)
1246 return 0;
1247
1248 mutex_lock(&delayed_node->mutex);
1249 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1250 mutex_unlock(&delayed_node->mutex);
1251 btrfs_release_delayed_node(delayed_node);
1252 return 0;
1253 }
1254 mutex_unlock(&delayed_node->mutex);
1255
1256 trans = btrfs_join_transaction(delayed_node->root);
1257 if (IS_ERR(trans)) {
1258 ret = PTR_ERR(trans);
1259 goto out;
1260 }
1261
1262 path = btrfs_alloc_path();
1263 if (!path) {
1264 ret = -ENOMEM;
1265 goto trans_out;
1266 }
1267 path->leave_spinning = 1;
1268
1269 block_rsv = trans->block_rsv;
1270 trans->block_rsv = &fs_info->delayed_block_rsv;
1271
1272 mutex_lock(&delayed_node->mutex);
1273 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1274 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1275 path, delayed_node);
1276 else
1277 ret = 0;
1278 mutex_unlock(&delayed_node->mutex);
1279
1280 btrfs_free_path(path);
1281 trans->block_rsv = block_rsv;
1282trans_out:
1283 btrfs_end_transaction(trans);
1284 btrfs_btree_balance_dirty(fs_info);
1285out:
1286 btrfs_release_delayed_node(delayed_node);
1287
1288 return ret;
1289}
1290
1291void btrfs_remove_delayed_node(struct inode *inode)
1292{
1293 struct btrfs_delayed_node *delayed_node;
1294
1295 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1296 if (!delayed_node)
1297 return;
1298
1299 BTRFS_I(inode)->delayed_node = NULL;
1300 btrfs_release_delayed_node(delayed_node);
1301}
1302
1303struct btrfs_async_delayed_work {
1304 struct btrfs_delayed_root *delayed_root;
1305 int nr;
1306 struct btrfs_work work;
1307};
1308
1309static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1310{
1311 struct btrfs_async_delayed_work *async_work;
1312 struct btrfs_delayed_root *delayed_root;
1313 struct btrfs_trans_handle *trans;
1314 struct btrfs_path *path;
1315 struct btrfs_delayed_node *delayed_node = NULL;
1316 struct btrfs_root *root;
1317 struct btrfs_block_rsv *block_rsv;
1318 int total_done = 0;
1319
1320 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1321 delayed_root = async_work->delayed_root;
1322
1323 path = btrfs_alloc_path();
1324 if (!path)
1325 goto out;
1326
1327again:
1328 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1329 goto free_path;
1330
1331 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1332 if (!delayed_node)
1333 goto free_path;
1334
1335 path->leave_spinning = 1;
1336 root = delayed_node->root;
1337
1338 trans = btrfs_join_transaction(root);
1339 if (IS_ERR(trans))
1340 goto release_path;
1341
1342 block_rsv = trans->block_rsv;
1343 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1344
1345 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1346
1347 trans->block_rsv = block_rsv;
1348 btrfs_end_transaction(trans);
1349 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1350
1351release_path:
1352 btrfs_release_path(path);
1353 total_done++;
1354
1355 btrfs_release_prepared_delayed_node(delayed_node);
1356 if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1357 total_done < async_work->nr)
1358 goto again;
1359
1360free_path:
1361 btrfs_free_path(path);
1362out:
1363 wake_up(&delayed_root->wait);
1364 kfree(async_work);
1365}
1366
1367
1368static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1369 struct btrfs_fs_info *fs_info, int nr)
1370{
1371 struct btrfs_async_delayed_work *async_work;
1372
1373 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1374 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1375 return 0;
1376
1377 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378 if (!async_work)
1379 return -ENOMEM;
1380
1381 async_work->delayed_root = delayed_root;
1382 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1383 btrfs_async_run_delayed_root, NULL, NULL);
1384 async_work->nr = nr;
1385
1386 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387 return 0;
1388}
1389
1390void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391{
1392 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393}
1394
1395static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396{
1397 int val = atomic_read(&delayed_root->items_seq);
1398
1399 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400 return 1;
1401
1402 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403 return 1;
1404
1405 return 0;
1406}
1407
1408void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409{
1410 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1413 return;
1414
1415 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1416 int seq;
1417 int ret;
1418
1419 seq = atomic_read(&delayed_root->items_seq);
1420
1421 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1422 if (ret)
1423 return;
1424
1425 wait_event_interruptible(delayed_root->wait,
1426 could_end_wait(delayed_root, seq));
1427 return;
1428 }
1429
1430 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1431}
1432
1433/* Will return 0 or -ENOMEM */
1434int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1435 struct btrfs_fs_info *fs_info,
1436 const char *name, int name_len,
1437 struct inode *dir,
1438 struct btrfs_disk_key *disk_key, u8 type,
1439 u64 index)
1440{
1441 struct btrfs_delayed_node *delayed_node;
1442 struct btrfs_delayed_item *delayed_item;
1443 struct btrfs_dir_item *dir_item;
1444 int ret;
1445
1446 delayed_node = btrfs_get_or_create_delayed_node(dir);
1447 if (IS_ERR(delayed_node))
1448 return PTR_ERR(delayed_node);
1449
1450 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451 if (!delayed_item) {
1452 ret = -ENOMEM;
1453 goto release_node;
1454 }
1455
1456 delayed_item->key.objectid = btrfs_ino(dir);
1457 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458 delayed_item->key.offset = index;
1459
1460 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461 dir_item->location = *disk_key;
1462 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463 btrfs_set_stack_dir_data_len(dir_item, 0);
1464 btrfs_set_stack_dir_name_len(dir_item, name_len);
1465 btrfs_set_stack_dir_type(dir_item, type);
1466 memcpy((char *)(dir_item + 1), name, name_len);
1467
1468 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1469 /*
1470 * we have reserved enough space when we start a new transaction,
1471 * so reserving metadata failure is impossible
1472 */
1473 BUG_ON(ret);
1474
1475
1476 mutex_lock(&delayed_node->mutex);
1477 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1478 if (unlikely(ret)) {
1479 btrfs_err(fs_info,
1480 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1481 name_len, name, delayed_node->root->objectid,
1482 delayed_node->inode_id, ret);
1483 BUG();
1484 }
1485 mutex_unlock(&delayed_node->mutex);
1486
1487release_node:
1488 btrfs_release_delayed_node(delayed_node);
1489 return ret;
1490}
1491
1492static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1493 struct btrfs_delayed_node *node,
1494 struct btrfs_key *key)
1495{
1496 struct btrfs_delayed_item *item;
1497
1498 mutex_lock(&node->mutex);
1499 item = __btrfs_lookup_delayed_insertion_item(node, key);
1500 if (!item) {
1501 mutex_unlock(&node->mutex);
1502 return 1;
1503 }
1504
1505 btrfs_delayed_item_release_metadata(fs_info, item);
1506 btrfs_release_delayed_item(item);
1507 mutex_unlock(&node->mutex);
1508 return 0;
1509}
1510
1511int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1512 struct btrfs_fs_info *fs_info,
1513 struct inode *dir, u64 index)
1514{
1515 struct btrfs_delayed_node *node;
1516 struct btrfs_delayed_item *item;
1517 struct btrfs_key item_key;
1518 int ret;
1519
1520 node = btrfs_get_or_create_delayed_node(dir);
1521 if (IS_ERR(node))
1522 return PTR_ERR(node);
1523
1524 item_key.objectid = btrfs_ino(dir);
1525 item_key.type = BTRFS_DIR_INDEX_KEY;
1526 item_key.offset = index;
1527
1528 ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1529 if (!ret)
1530 goto end;
1531
1532 item = btrfs_alloc_delayed_item(0);
1533 if (!item) {
1534 ret = -ENOMEM;
1535 goto end;
1536 }
1537
1538 item->key = item_key;
1539
1540 ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1541 /*
1542 * we have reserved enough space when we start a new transaction,
1543 * so reserving metadata failure is impossible.
1544 */
1545 BUG_ON(ret);
1546
1547 mutex_lock(&node->mutex);
1548 ret = __btrfs_add_delayed_deletion_item(node, item);
1549 if (unlikely(ret)) {
1550 btrfs_err(fs_info,
1551 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1552 index, node->root->objectid, node->inode_id, ret);
1553 BUG();
1554 }
1555 mutex_unlock(&node->mutex);
1556end:
1557 btrfs_release_delayed_node(node);
1558 return ret;
1559}
1560
1561int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1562{
1563 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1564
1565 if (!delayed_node)
1566 return -ENOENT;
1567
1568 /*
1569 * Since we have held i_mutex of this directory, it is impossible that
1570 * a new directory index is added into the delayed node and index_cnt
1571 * is updated now. So we needn't lock the delayed node.
1572 */
1573 if (!delayed_node->index_cnt) {
1574 btrfs_release_delayed_node(delayed_node);
1575 return -EINVAL;
1576 }
1577
1578 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1579 btrfs_release_delayed_node(delayed_node);
1580 return 0;
1581}
1582
1583bool btrfs_readdir_get_delayed_items(struct inode *inode,
1584 struct list_head *ins_list,
1585 struct list_head *del_list)
1586{
1587 struct btrfs_delayed_node *delayed_node;
1588 struct btrfs_delayed_item *item;
1589
1590 delayed_node = btrfs_get_delayed_node(inode);
1591 if (!delayed_node)
1592 return false;
1593
1594 /*
1595 * We can only do one readdir with delayed items at a time because of
1596 * item->readdir_list.
1597 */
1598 inode_unlock_shared(inode);
1599 inode_lock(inode);
1600
1601 mutex_lock(&delayed_node->mutex);
1602 item = __btrfs_first_delayed_insertion_item(delayed_node);
1603 while (item) {
1604 atomic_inc(&item->refs);
1605 list_add_tail(&item->readdir_list, ins_list);
1606 item = __btrfs_next_delayed_item(item);
1607 }
1608
1609 item = __btrfs_first_delayed_deletion_item(delayed_node);
1610 while (item) {
1611 atomic_inc(&item->refs);
1612 list_add_tail(&item->readdir_list, del_list);
1613 item = __btrfs_next_delayed_item(item);
1614 }
1615 mutex_unlock(&delayed_node->mutex);
1616 /*
1617 * This delayed node is still cached in the btrfs inode, so refs
1618 * must be > 1 now, and we needn't check it is going to be freed
1619 * or not.
1620 *
1621 * Besides that, this function is used to read dir, we do not
1622 * insert/delete delayed items in this period. So we also needn't
1623 * requeue or dequeue this delayed node.
1624 */
1625 atomic_dec(&delayed_node->refs);
1626
1627 return true;
1628}
1629
1630void btrfs_readdir_put_delayed_items(struct inode *inode,
1631 struct list_head *ins_list,
1632 struct list_head *del_list)
1633{
1634 struct btrfs_delayed_item *curr, *next;
1635
1636 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1637 list_del(&curr->readdir_list);
1638 if (atomic_dec_and_test(&curr->refs))
1639 kfree(curr);
1640 }
1641
1642 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1643 list_del(&curr->readdir_list);
1644 if (atomic_dec_and_test(&curr->refs))
1645 kfree(curr);
1646 }
1647
1648 /*
1649 * The VFS is going to do up_read(), so we need to downgrade back to a
1650 * read lock.
1651 */
1652 downgrade_write(&inode->i_rwsem);
1653}
1654
1655int btrfs_should_delete_dir_index(struct list_head *del_list,
1656 u64 index)
1657{
1658 struct btrfs_delayed_item *curr, *next;
1659 int ret;
1660
1661 if (list_empty(del_list))
1662 return 0;
1663
1664 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665 if (curr->key.offset > index)
1666 break;
1667
1668 list_del(&curr->readdir_list);
1669 ret = (curr->key.offset == index);
1670
1671 if (atomic_dec_and_test(&curr->refs))
1672 kfree(curr);
1673
1674 if (ret)
1675 return 1;
1676 else
1677 continue;
1678 }
1679 return 0;
1680}
1681
1682/*
1683 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1684 *
1685 */
1686int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1687 struct list_head *ins_list)
1688{
1689 struct btrfs_dir_item *di;
1690 struct btrfs_delayed_item *curr, *next;
1691 struct btrfs_key location;
1692 char *name;
1693 int name_len;
1694 int over = 0;
1695 unsigned char d_type;
1696
1697 if (list_empty(ins_list))
1698 return 0;
1699
1700 /*
1701 * Changing the data of the delayed item is impossible. So
1702 * we needn't lock them. And we have held i_mutex of the
1703 * directory, nobody can delete any directory indexes now.
1704 */
1705 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1706 list_del(&curr->readdir_list);
1707
1708 if (curr->key.offset < ctx->pos) {
1709 if (atomic_dec_and_test(&curr->refs))
1710 kfree(curr);
1711 continue;
1712 }
1713
1714 ctx->pos = curr->key.offset;
1715
1716 di = (struct btrfs_dir_item *)curr->data;
1717 name = (char *)(di + 1);
1718 name_len = btrfs_stack_dir_name_len(di);
1719
1720 d_type = btrfs_filetype_table[di->type];
1721 btrfs_disk_key_to_cpu(&location, &di->location);
1722
1723 over = !dir_emit(ctx, name, name_len,
1724 location.objectid, d_type);
1725
1726 if (atomic_dec_and_test(&curr->refs))
1727 kfree(curr);
1728
1729 if (over)
1730 return 1;
1731 }
1732 return 0;
1733}
1734
1735static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1736 struct btrfs_inode_item *inode_item,
1737 struct inode *inode)
1738{
1739 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1740 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1741 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1742 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1743 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1744 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1745 btrfs_set_stack_inode_generation(inode_item,
1746 BTRFS_I(inode)->generation);
1747 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1748 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1749 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1750 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1751 btrfs_set_stack_inode_block_group(inode_item, 0);
1752
1753 btrfs_set_stack_timespec_sec(&inode_item->atime,
1754 inode->i_atime.tv_sec);
1755 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1756 inode->i_atime.tv_nsec);
1757
1758 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1759 inode->i_mtime.tv_sec);
1760 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1761 inode->i_mtime.tv_nsec);
1762
1763 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1764 inode->i_ctime.tv_sec);
1765 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1766 inode->i_ctime.tv_nsec);
1767
1768 btrfs_set_stack_timespec_sec(&inode_item->otime,
1769 BTRFS_I(inode)->i_otime.tv_sec);
1770 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1771 BTRFS_I(inode)->i_otime.tv_nsec);
1772}
1773
1774int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1775{
1776 struct btrfs_delayed_node *delayed_node;
1777 struct btrfs_inode_item *inode_item;
1778
1779 delayed_node = btrfs_get_delayed_node(inode);
1780 if (!delayed_node)
1781 return -ENOENT;
1782
1783 mutex_lock(&delayed_node->mutex);
1784 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1785 mutex_unlock(&delayed_node->mutex);
1786 btrfs_release_delayed_node(delayed_node);
1787 return -ENOENT;
1788 }
1789
1790 inode_item = &delayed_node->inode_item;
1791
1792 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1793 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1794 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1795 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1796 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1797 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1798 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1799 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1800
1801 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1802 inode->i_rdev = 0;
1803 *rdev = btrfs_stack_inode_rdev(inode_item);
1804 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1805
1806 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1807 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1808
1809 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1810 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1811
1812 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1813 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1814
1815 BTRFS_I(inode)->i_otime.tv_sec =
1816 btrfs_stack_timespec_sec(&inode_item->otime);
1817 BTRFS_I(inode)->i_otime.tv_nsec =
1818 btrfs_stack_timespec_nsec(&inode_item->otime);
1819
1820 inode->i_generation = BTRFS_I(inode)->generation;
1821 BTRFS_I(inode)->index_cnt = (u64)-1;
1822
1823 mutex_unlock(&delayed_node->mutex);
1824 btrfs_release_delayed_node(delayed_node);
1825 return 0;
1826}
1827
1828int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1829 struct btrfs_root *root, struct inode *inode)
1830{
1831 struct btrfs_delayed_node *delayed_node;
1832 int ret = 0;
1833
1834 delayed_node = btrfs_get_or_create_delayed_node(inode);
1835 if (IS_ERR(delayed_node))
1836 return PTR_ERR(delayed_node);
1837
1838 mutex_lock(&delayed_node->mutex);
1839 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1840 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1841 goto release_node;
1842 }
1843
1844 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1845 delayed_node);
1846 if (ret)
1847 goto release_node;
1848
1849 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1850 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1851 delayed_node->count++;
1852 atomic_inc(&root->fs_info->delayed_root->items);
1853release_node:
1854 mutex_unlock(&delayed_node->mutex);
1855 btrfs_release_delayed_node(delayed_node);
1856 return ret;
1857}
1858
1859int btrfs_delayed_delete_inode_ref(struct inode *inode)
1860{
1861 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1862 struct btrfs_delayed_node *delayed_node;
1863
1864 /*
1865 * we don't do delayed inode updates during log recovery because it
1866 * leads to enospc problems. This means we also can't do
1867 * delayed inode refs
1868 */
1869 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1870 return -EAGAIN;
1871
1872 delayed_node = btrfs_get_or_create_delayed_node(inode);
1873 if (IS_ERR(delayed_node))
1874 return PTR_ERR(delayed_node);
1875
1876 /*
1877 * We don't reserve space for inode ref deletion is because:
1878 * - We ONLY do async inode ref deletion for the inode who has only
1879 * one link(i_nlink == 1), it means there is only one inode ref.
1880 * And in most case, the inode ref and the inode item are in the
1881 * same leaf, and we will deal with them at the same time.
1882 * Since we are sure we will reserve the space for the inode item,
1883 * it is unnecessary to reserve space for inode ref deletion.
1884 * - If the inode ref and the inode item are not in the same leaf,
1885 * We also needn't worry about enospc problem, because we reserve
1886 * much more space for the inode update than it needs.
1887 * - At the worst, we can steal some space from the global reservation.
1888 * It is very rare.
1889 */
1890 mutex_lock(&delayed_node->mutex);
1891 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1892 goto release_node;
1893
1894 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1895 delayed_node->count++;
1896 atomic_inc(&fs_info->delayed_root->items);
1897release_node:
1898 mutex_unlock(&delayed_node->mutex);
1899 btrfs_release_delayed_node(delayed_node);
1900 return 0;
1901}
1902
1903static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1904{
1905 struct btrfs_root *root = delayed_node->root;
1906 struct btrfs_fs_info *fs_info = root->fs_info;
1907 struct btrfs_delayed_item *curr_item, *prev_item;
1908
1909 mutex_lock(&delayed_node->mutex);
1910 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1911 while (curr_item) {
1912 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1913 prev_item = curr_item;
1914 curr_item = __btrfs_next_delayed_item(prev_item);
1915 btrfs_release_delayed_item(prev_item);
1916 }
1917
1918 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1919 while (curr_item) {
1920 btrfs_delayed_item_release_metadata(fs_info, curr_item);
1921 prev_item = curr_item;
1922 curr_item = __btrfs_next_delayed_item(prev_item);
1923 btrfs_release_delayed_item(prev_item);
1924 }
1925
1926 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1927 btrfs_release_delayed_iref(delayed_node);
1928
1929 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1930 btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1931 btrfs_release_delayed_inode(delayed_node);
1932 }
1933 mutex_unlock(&delayed_node->mutex);
1934}
1935
1936void btrfs_kill_delayed_inode_items(struct inode *inode)
1937{
1938 struct btrfs_delayed_node *delayed_node;
1939
1940 delayed_node = btrfs_get_delayed_node(inode);
1941 if (!delayed_node)
1942 return;
1943
1944 __btrfs_kill_delayed_node(delayed_node);
1945 btrfs_release_delayed_node(delayed_node);
1946}
1947
1948void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1949{
1950 u64 inode_id = 0;
1951 struct btrfs_delayed_node *delayed_nodes[8];
1952 int i, n;
1953
1954 while (1) {
1955 spin_lock(&root->inode_lock);
1956 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1957 (void **)delayed_nodes, inode_id,
1958 ARRAY_SIZE(delayed_nodes));
1959 if (!n) {
1960 spin_unlock(&root->inode_lock);
1961 break;
1962 }
1963
1964 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1965
1966 for (i = 0; i < n; i++)
1967 atomic_inc(&delayed_nodes[i]->refs);
1968 spin_unlock(&root->inode_lock);
1969
1970 for (i = 0; i < n; i++) {
1971 __btrfs_kill_delayed_node(delayed_nodes[i]);
1972 btrfs_release_delayed_node(delayed_nodes[i]);
1973 }
1974 }
1975}
1976
1977void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1978{
1979 struct btrfs_delayed_node *curr_node, *prev_node;
1980
1981 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1982 while (curr_node) {
1983 __btrfs_kill_delayed_node(curr_node);
1984
1985 prev_node = curr_node;
1986 curr_node = btrfs_next_delayed_node(curr_node);
1987 btrfs_release_delayed_node(prev_node);
1988 }
1989}
1990
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/iversion.h>
9#include <linux/sched/mm.h>
10#include "misc.h"
11#include "delayed-inode.h"
12#include "disk-io.h"
13#include "transaction.h"
14#include "ctree.h"
15#include "qgroup.h"
16#include "locking.h"
17
18#define BTRFS_DELAYED_WRITEBACK 512
19#define BTRFS_DELAYED_BACKGROUND 128
20#define BTRFS_DELAYED_BATCH 16
21
22static struct kmem_cache *delayed_node_cache;
23
24int __init btrfs_delayed_inode_init(void)
25{
26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 sizeof(struct btrfs_delayed_node),
28 0,
29 SLAB_MEM_SPREAD,
30 NULL);
31 if (!delayed_node_cache)
32 return -ENOMEM;
33 return 0;
34}
35
36void __cold btrfs_delayed_inode_exit(void)
37{
38 kmem_cache_destroy(delayed_node_cache);
39}
40
41static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
44{
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
47 refcount_set(&delayed_node->refs, 0);
48 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
50 mutex_init(&delayed_node->mutex);
51 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
53}
54
55static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
58{
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
63 return 1;
64 return 0;
65}
66
67static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
69{
70 struct btrfs_root *root = btrfs_inode->root;
71 u64 ino = btrfs_ino(btrfs_inode);
72 struct btrfs_delayed_node *node;
73
74 node = READ_ONCE(btrfs_inode->delayed_node);
75 if (node) {
76 refcount_inc(&node->refs);
77 return node;
78 }
79
80 spin_lock(&root->inode_lock);
81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
82
83 if (node) {
84 if (btrfs_inode->delayed_node) {
85 refcount_inc(&node->refs); /* can be accessed */
86 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
88 return node;
89 }
90
91 /*
92 * It's possible that we're racing into the middle of removing
93 * this node from the radix tree. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the radix at all; our release
96 * function is in the process of removing it.
97 *
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
102 *
103 * If this node is properly in the radix, we want to bump the
104 * refcount twice, once for the inode and once for this get
105 * operation.
106 */
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
110 } else {
111 node = NULL;
112 }
113
114 spin_unlock(&root->inode_lock);
115 return node;
116 }
117 spin_unlock(&root->inode_lock);
118
119 return NULL;
120}
121
122/* Will return either the node or PTR_ERR(-ENOMEM) */
123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
124 struct btrfs_inode *btrfs_inode)
125{
126 struct btrfs_delayed_node *node;
127 struct btrfs_root *root = btrfs_inode->root;
128 u64 ino = btrfs_ino(btrfs_inode);
129 int ret;
130
131again:
132 node = btrfs_get_delayed_node(btrfs_inode);
133 if (node)
134 return node;
135
136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
137 if (!node)
138 return ERR_PTR(-ENOMEM);
139 btrfs_init_delayed_node(node, root, ino);
140
141 /* cached in the btrfs inode and can be accessed */
142 refcount_set(&node->refs, 2);
143
144 ret = radix_tree_preload(GFP_NOFS);
145 if (ret) {
146 kmem_cache_free(delayed_node_cache, node);
147 return ERR_PTR(ret);
148 }
149
150 spin_lock(&root->inode_lock);
151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
152 if (ret == -EEXIST) {
153 spin_unlock(&root->inode_lock);
154 kmem_cache_free(delayed_node_cache, node);
155 radix_tree_preload_end();
156 goto again;
157 }
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
161
162 return node;
163}
164
165/*
166 * Call it when holding delayed_node->mutex
167 *
168 * If mod = 1, add this node into the prepared list.
169 */
170static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
172 int mod)
173{
174 spin_lock(&root->lock);
175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod)
179 list_add_tail(&node->p_list, &root->prepare_list);
180 } else {
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
183 refcount_inc(&node->refs); /* inserted into list */
184 root->nodes++;
185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
186 }
187 spin_unlock(&root->lock);
188}
189
190/* Call it when holding delayed_node->mutex */
191static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
193{
194 spin_lock(&root->lock);
195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
196 root->nodes--;
197 refcount_dec(&node->refs); /* not in the list */
198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
202 }
203 spin_unlock(&root->lock);
204}
205
206static struct btrfs_delayed_node *btrfs_first_delayed_node(
207 struct btrfs_delayed_root *delayed_root)
208{
209 struct list_head *p;
210 struct btrfs_delayed_node *node = NULL;
211
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
214 goto out;
215
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
218 refcount_inc(&node->refs);
219out:
220 spin_unlock(&delayed_root->lock);
221
222 return node;
223}
224
225static struct btrfs_delayed_node *btrfs_next_delayed_node(
226 struct btrfs_delayed_node *node)
227{
228 struct btrfs_delayed_root *delayed_root;
229 struct list_head *p;
230 struct btrfs_delayed_node *next = NULL;
231
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 /* not in the list */
236 if (list_empty(&delayed_root->node_list))
237 goto out;
238 p = delayed_root->node_list.next;
239 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 goto out;
241 else
242 p = node->n_list.next;
243
244 next = list_entry(p, struct btrfs_delayed_node, n_list);
245 refcount_inc(&next->refs);
246out:
247 spin_unlock(&delayed_root->lock);
248
249 return next;
250}
251
252static void __btrfs_release_delayed_node(
253 struct btrfs_delayed_node *delayed_node,
254 int mod)
255{
256 struct btrfs_delayed_root *delayed_root;
257
258 if (!delayed_node)
259 return;
260
261 delayed_root = delayed_node->root->fs_info->delayed_root;
262
263 mutex_lock(&delayed_node->mutex);
264 if (delayed_node->count)
265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 else
267 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 mutex_unlock(&delayed_node->mutex);
269
270 if (refcount_dec_and_test(&delayed_node->refs)) {
271 struct btrfs_root *root = delayed_node->root;
272
273 spin_lock(&root->inode_lock);
274 /*
275 * Once our refcount goes to zero, nobody is allowed to bump it
276 * back up. We can delete it now.
277 */
278 ASSERT(refcount_read(&delayed_node->refs) == 0);
279 radix_tree_delete(&root->delayed_nodes_tree,
280 delayed_node->inode_id);
281 spin_unlock(&root->inode_lock);
282 kmem_cache_free(delayed_node_cache, delayed_node);
283 }
284}
285
286static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287{
288 __btrfs_release_delayed_node(node, 0);
289}
290
291static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
292 struct btrfs_delayed_root *delayed_root)
293{
294 struct list_head *p;
295 struct btrfs_delayed_node *node = NULL;
296
297 spin_lock(&delayed_root->lock);
298 if (list_empty(&delayed_root->prepare_list))
299 goto out;
300
301 p = delayed_root->prepare_list.next;
302 list_del_init(p);
303 node = list_entry(p, struct btrfs_delayed_node, p_list);
304 refcount_inc(&node->refs);
305out:
306 spin_unlock(&delayed_root->lock);
307
308 return node;
309}
310
311static inline void btrfs_release_prepared_delayed_node(
312 struct btrfs_delayed_node *node)
313{
314 __btrfs_release_delayed_node(node, 1);
315}
316
317static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
318{
319 struct btrfs_delayed_item *item;
320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 if (item) {
322 item->data_len = data_len;
323 item->ins_or_del = 0;
324 item->bytes_reserved = 0;
325 item->delayed_node = NULL;
326 refcount_set(&item->refs, 1);
327 }
328 return item;
329}
330
331/*
332 * __btrfs_lookup_delayed_item - look up the delayed item by key
333 * @delayed_node: pointer to the delayed node
334 * @key: the key to look up
335 * @prev: used to store the prev item if the right item isn't found
336 * @next: used to store the next item if the right item isn't found
337 *
338 * Note: if we don't find the right item, we will return the prev item and
339 * the next item.
340 */
341static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 struct rb_root *root,
343 struct btrfs_key *key,
344 struct btrfs_delayed_item **prev,
345 struct btrfs_delayed_item **next)
346{
347 struct rb_node *node, *prev_node = NULL;
348 struct btrfs_delayed_item *delayed_item = NULL;
349 int ret = 0;
350
351 node = root->rb_node;
352
353 while (node) {
354 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 rb_node);
356 prev_node = node;
357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 if (ret < 0)
359 node = node->rb_right;
360 else if (ret > 0)
361 node = node->rb_left;
362 else
363 return delayed_item;
364 }
365
366 if (prev) {
367 if (!prev_node)
368 *prev = NULL;
369 else if (ret < 0)
370 *prev = delayed_item;
371 else if ((node = rb_prev(prev_node)) != NULL) {
372 *prev = rb_entry(node, struct btrfs_delayed_item,
373 rb_node);
374 } else
375 *prev = NULL;
376 }
377
378 if (next) {
379 if (!prev_node)
380 *next = NULL;
381 else if (ret > 0)
382 *next = delayed_item;
383 else if ((node = rb_next(prev_node)) != NULL) {
384 *next = rb_entry(node, struct btrfs_delayed_item,
385 rb_node);
386 } else
387 *next = NULL;
388 }
389 return NULL;
390}
391
392static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
393 struct btrfs_delayed_node *delayed_node,
394 struct btrfs_key *key)
395{
396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
397 NULL, NULL);
398}
399
400static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 struct btrfs_delayed_item *ins,
402 int action)
403{
404 struct rb_node **p, *node;
405 struct rb_node *parent_node = NULL;
406 struct rb_root_cached *root;
407 struct btrfs_delayed_item *item;
408 int cmp;
409 bool leftmost = true;
410
411 if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 root = &delayed_node->ins_root;
413 else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 root = &delayed_node->del_root;
415 else
416 BUG();
417 p = &root->rb_root.rb_node;
418 node = &ins->rb_node;
419
420 while (*p) {
421 parent_node = *p;
422 item = rb_entry(parent_node, struct btrfs_delayed_item,
423 rb_node);
424
425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
426 if (cmp < 0) {
427 p = &(*p)->rb_right;
428 leftmost = false;
429 } else if (cmp > 0) {
430 p = &(*p)->rb_left;
431 } else {
432 return -EEXIST;
433 }
434 }
435
436 rb_link_node(node, parent_node, p);
437 rb_insert_color_cached(node, root, leftmost);
438 ins->delayed_node = delayed_node;
439 ins->ins_or_del = action;
440
441 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 action == BTRFS_DELAYED_INSERTION_ITEM &&
443 ins->key.offset >= delayed_node->index_cnt)
444 delayed_node->index_cnt = ins->key.offset + 1;
445
446 delayed_node->count++;
447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 return 0;
449}
450
451static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
453{
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_INSERTION_ITEM);
456}
457
458static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
460{
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_DELETION_ITEM);
463}
464
465static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466{
467 int seq = atomic_inc_return(&delayed_root->items_seq);
468
469 /* atomic_dec_return implies a barrier */
470 if ((atomic_dec_return(&delayed_root->items) <
471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 cond_wake_up_nomb(&delayed_root->wait);
473}
474
475static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476{
477 struct rb_root_cached *root;
478 struct btrfs_delayed_root *delayed_root;
479
480 /* Not associated with any delayed_node */
481 if (!delayed_item->delayed_node)
482 return;
483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484
485 BUG_ON(!delayed_root);
486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488
489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 root = &delayed_item->delayed_node->ins_root;
491 else
492 root = &delayed_item->delayed_node->del_root;
493
494 rb_erase_cached(&delayed_item->rb_node, root);
495 delayed_item->delayed_node->count--;
496
497 finish_one_item(delayed_root);
498}
499
500static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501{
502 if (item) {
503 __btrfs_remove_delayed_item(item);
504 if (refcount_dec_and_test(&item->refs))
505 kfree(item);
506 }
507}
508
509static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
510 struct btrfs_delayed_node *delayed_node)
511{
512 struct rb_node *p;
513 struct btrfs_delayed_item *item = NULL;
514
515 p = rb_first_cached(&delayed_node->ins_root);
516 if (p)
517 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519 return item;
520}
521
522static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
523 struct btrfs_delayed_node *delayed_node)
524{
525 struct rb_node *p;
526 struct btrfs_delayed_item *item = NULL;
527
528 p = rb_first_cached(&delayed_node->del_root);
529 if (p)
530 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531
532 return item;
533}
534
535static struct btrfs_delayed_item *__btrfs_next_delayed_item(
536 struct btrfs_delayed_item *item)
537{
538 struct rb_node *p;
539 struct btrfs_delayed_item *next = NULL;
540
541 p = rb_next(&item->rb_node);
542 if (p)
543 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544
545 return next;
546}
547
548static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
549 struct btrfs_root *root,
550 struct btrfs_delayed_item *item)
551{
552 struct btrfs_block_rsv *src_rsv;
553 struct btrfs_block_rsv *dst_rsv;
554 struct btrfs_fs_info *fs_info = root->fs_info;
555 u64 num_bytes;
556 int ret;
557
558 if (!trans->bytes_reserved)
559 return 0;
560
561 src_rsv = trans->block_rsv;
562 dst_rsv = &fs_info->delayed_block_rsv;
563
564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
565
566 /*
567 * Here we migrate space rsv from transaction rsv, since have already
568 * reserved space when starting a transaction. So no need to reserve
569 * qgroup space here.
570 */
571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
572 if (!ret) {
573 trace_btrfs_space_reservation(fs_info, "delayed_item",
574 item->key.objectid,
575 num_bytes, 1);
576 item->bytes_reserved = num_bytes;
577 }
578
579 return ret;
580}
581
582static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
583 struct btrfs_delayed_item *item)
584{
585 struct btrfs_block_rsv *rsv;
586 struct btrfs_fs_info *fs_info = root->fs_info;
587
588 if (!item->bytes_reserved)
589 return;
590
591 rsv = &fs_info->delayed_block_rsv;
592 /*
593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 * to release/reserve qgroup space.
595 */
596 trace_btrfs_space_reservation(fs_info, "delayed_item",
597 item->key.objectid, item->bytes_reserved,
598 0);
599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
600}
601
602static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 struct btrfs_inode *inode,
606 struct btrfs_delayed_node *node)
607{
608 struct btrfs_fs_info *fs_info = root->fs_info;
609 struct btrfs_block_rsv *src_rsv;
610 struct btrfs_block_rsv *dst_rsv;
611 u64 num_bytes;
612 int ret;
613
614 src_rsv = trans->block_rsv;
615 dst_rsv = &fs_info->delayed_block_rsv;
616
617 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
618
619 /*
620 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 * which doesn't reserve space for speed. This is a problem since we
622 * still need to reserve space for this update, so try to reserve the
623 * space.
624 *
625 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
626 * we always reserve enough to update the inode item.
627 */
628 if (!src_rsv || (!trans->bytes_reserved &&
629 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
630 ret = btrfs_qgroup_reserve_meta_prealloc(root,
631 fs_info->nodesize, true);
632 if (ret < 0)
633 return ret;
634 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
635 BTRFS_RESERVE_NO_FLUSH);
636 /*
637 * Since we're under a transaction reserve_metadata_bytes could
638 * try to commit the transaction which will make it return
639 * EAGAIN to make us stop the transaction we have, so return
640 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
641 */
642 if (ret == -EAGAIN) {
643 ret = -ENOSPC;
644 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
645 }
646 if (!ret) {
647 node->bytes_reserved = num_bytes;
648 trace_btrfs_space_reservation(fs_info,
649 "delayed_inode",
650 btrfs_ino(inode),
651 num_bytes, 1);
652 } else {
653 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
654 }
655 return ret;
656 }
657
658 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
659 if (!ret) {
660 trace_btrfs_space_reservation(fs_info, "delayed_inode",
661 btrfs_ino(inode), num_bytes, 1);
662 node->bytes_reserved = num_bytes;
663 }
664
665 return ret;
666}
667
668static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
669 struct btrfs_delayed_node *node,
670 bool qgroup_free)
671{
672 struct btrfs_block_rsv *rsv;
673
674 if (!node->bytes_reserved)
675 return;
676
677 rsv = &fs_info->delayed_block_rsv;
678 trace_btrfs_space_reservation(fs_info, "delayed_inode",
679 node->inode_id, node->bytes_reserved, 0);
680 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
681 if (qgroup_free)
682 btrfs_qgroup_free_meta_prealloc(node->root,
683 node->bytes_reserved);
684 else
685 btrfs_qgroup_convert_reserved_meta(node->root,
686 node->bytes_reserved);
687 node->bytes_reserved = 0;
688}
689
690/*
691 * This helper will insert some continuous items into the same leaf according
692 * to the free space of the leaf.
693 */
694static int btrfs_batch_insert_items(struct btrfs_root *root,
695 struct btrfs_path *path,
696 struct btrfs_delayed_item *item)
697{
698 struct btrfs_delayed_item *curr, *next;
699 int free_space;
700 int total_data_size = 0, total_size = 0;
701 struct extent_buffer *leaf;
702 char *data_ptr;
703 struct btrfs_key *keys;
704 u32 *data_size;
705 struct list_head head;
706 int slot;
707 int nitems;
708 int i;
709 int ret = 0;
710
711 BUG_ON(!path->nodes[0]);
712
713 leaf = path->nodes[0];
714 free_space = btrfs_leaf_free_space(leaf);
715 INIT_LIST_HEAD(&head);
716
717 next = item;
718 nitems = 0;
719
720 /*
721 * count the number of the continuous items that we can insert in batch
722 */
723 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
724 free_space) {
725 total_data_size += next->data_len;
726 total_size += next->data_len + sizeof(struct btrfs_item);
727 list_add_tail(&next->tree_list, &head);
728 nitems++;
729
730 curr = next;
731 next = __btrfs_next_delayed_item(curr);
732 if (!next)
733 break;
734
735 if (!btrfs_is_continuous_delayed_item(curr, next))
736 break;
737 }
738
739 if (!nitems) {
740 ret = 0;
741 goto out;
742 }
743
744 /*
745 * we need allocate some memory space, but it might cause the task
746 * to sleep, so we set all locked nodes in the path to blocking locks
747 * first.
748 */
749 btrfs_set_path_blocking(path);
750
751 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
752 if (!keys) {
753 ret = -ENOMEM;
754 goto out;
755 }
756
757 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
758 if (!data_size) {
759 ret = -ENOMEM;
760 goto error;
761 }
762
763 /* get keys of all the delayed items */
764 i = 0;
765 list_for_each_entry(next, &head, tree_list) {
766 keys[i] = next->key;
767 data_size[i] = next->data_len;
768 i++;
769 }
770
771 /* insert the keys of the items */
772 setup_items_for_insert(root, path, keys, data_size,
773 total_data_size, total_size, nitems);
774
775 /* insert the dir index items */
776 slot = path->slots[0];
777 list_for_each_entry_safe(curr, next, &head, tree_list) {
778 data_ptr = btrfs_item_ptr(leaf, slot, char);
779 write_extent_buffer(leaf, &curr->data,
780 (unsigned long)data_ptr,
781 curr->data_len);
782 slot++;
783
784 btrfs_delayed_item_release_metadata(root, curr);
785
786 list_del(&curr->tree_list);
787 btrfs_release_delayed_item(curr);
788 }
789
790error:
791 kfree(data_size);
792 kfree(keys);
793out:
794 return ret;
795}
796
797/*
798 * This helper can just do simple insertion that needn't extend item for new
799 * data, such as directory name index insertion, inode insertion.
800 */
801static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
802 struct btrfs_root *root,
803 struct btrfs_path *path,
804 struct btrfs_delayed_item *delayed_item)
805{
806 struct extent_buffer *leaf;
807 unsigned int nofs_flag;
808 char *ptr;
809 int ret;
810
811 nofs_flag = memalloc_nofs_save();
812 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
813 delayed_item->data_len);
814 memalloc_nofs_restore(nofs_flag);
815 if (ret < 0 && ret != -EEXIST)
816 return ret;
817
818 leaf = path->nodes[0];
819
820 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
821
822 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
823 delayed_item->data_len);
824 btrfs_mark_buffer_dirty(leaf);
825
826 btrfs_delayed_item_release_metadata(root, delayed_item);
827 return 0;
828}
829
830/*
831 * we insert an item first, then if there are some continuous items, we try
832 * to insert those items into the same leaf.
833 */
834static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
835 struct btrfs_path *path,
836 struct btrfs_root *root,
837 struct btrfs_delayed_node *node)
838{
839 struct btrfs_delayed_item *curr, *prev;
840 int ret = 0;
841
842do_again:
843 mutex_lock(&node->mutex);
844 curr = __btrfs_first_delayed_insertion_item(node);
845 if (!curr)
846 goto insert_end;
847
848 ret = btrfs_insert_delayed_item(trans, root, path, curr);
849 if (ret < 0) {
850 btrfs_release_path(path);
851 goto insert_end;
852 }
853
854 prev = curr;
855 curr = __btrfs_next_delayed_item(prev);
856 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
857 /* insert the continuous items into the same leaf */
858 path->slots[0]++;
859 btrfs_batch_insert_items(root, path, curr);
860 }
861 btrfs_release_delayed_item(prev);
862 btrfs_mark_buffer_dirty(path->nodes[0]);
863
864 btrfs_release_path(path);
865 mutex_unlock(&node->mutex);
866 goto do_again;
867
868insert_end:
869 mutex_unlock(&node->mutex);
870 return ret;
871}
872
873static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
874 struct btrfs_root *root,
875 struct btrfs_path *path,
876 struct btrfs_delayed_item *item)
877{
878 struct btrfs_delayed_item *curr, *next;
879 struct extent_buffer *leaf;
880 struct btrfs_key key;
881 struct list_head head;
882 int nitems, i, last_item;
883 int ret = 0;
884
885 BUG_ON(!path->nodes[0]);
886
887 leaf = path->nodes[0];
888
889 i = path->slots[0];
890 last_item = btrfs_header_nritems(leaf) - 1;
891 if (i > last_item)
892 return -ENOENT; /* FIXME: Is errno suitable? */
893
894 next = item;
895 INIT_LIST_HEAD(&head);
896 btrfs_item_key_to_cpu(leaf, &key, i);
897 nitems = 0;
898 /*
899 * count the number of the dir index items that we can delete in batch
900 */
901 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
902 list_add_tail(&next->tree_list, &head);
903 nitems++;
904
905 curr = next;
906 next = __btrfs_next_delayed_item(curr);
907 if (!next)
908 break;
909
910 if (!btrfs_is_continuous_delayed_item(curr, next))
911 break;
912
913 i++;
914 if (i > last_item)
915 break;
916 btrfs_item_key_to_cpu(leaf, &key, i);
917 }
918
919 if (!nitems)
920 return 0;
921
922 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
923 if (ret)
924 goto out;
925
926 list_for_each_entry_safe(curr, next, &head, tree_list) {
927 btrfs_delayed_item_release_metadata(root, curr);
928 list_del(&curr->tree_list);
929 btrfs_release_delayed_item(curr);
930 }
931
932out:
933 return ret;
934}
935
936static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
937 struct btrfs_path *path,
938 struct btrfs_root *root,
939 struct btrfs_delayed_node *node)
940{
941 struct btrfs_delayed_item *curr, *prev;
942 unsigned int nofs_flag;
943 int ret = 0;
944
945do_again:
946 mutex_lock(&node->mutex);
947 curr = __btrfs_first_delayed_deletion_item(node);
948 if (!curr)
949 goto delete_fail;
950
951 nofs_flag = memalloc_nofs_save();
952 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
953 memalloc_nofs_restore(nofs_flag);
954 if (ret < 0)
955 goto delete_fail;
956 else if (ret > 0) {
957 /*
958 * can't find the item which the node points to, so this node
959 * is invalid, just drop it.
960 */
961 prev = curr;
962 curr = __btrfs_next_delayed_item(prev);
963 btrfs_release_delayed_item(prev);
964 ret = 0;
965 btrfs_release_path(path);
966 if (curr) {
967 mutex_unlock(&node->mutex);
968 goto do_again;
969 } else
970 goto delete_fail;
971 }
972
973 btrfs_batch_delete_items(trans, root, path, curr);
974 btrfs_release_path(path);
975 mutex_unlock(&node->mutex);
976 goto do_again;
977
978delete_fail:
979 btrfs_release_path(path);
980 mutex_unlock(&node->mutex);
981 return ret;
982}
983
984static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
985{
986 struct btrfs_delayed_root *delayed_root;
987
988 if (delayed_node &&
989 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
990 BUG_ON(!delayed_node->root);
991 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
992 delayed_node->count--;
993
994 delayed_root = delayed_node->root->fs_info->delayed_root;
995 finish_one_item(delayed_root);
996 }
997}
998
999static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1000{
1001 struct btrfs_delayed_root *delayed_root;
1002
1003 ASSERT(delayed_node->root);
1004 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1005 delayed_node->count--;
1006
1007 delayed_root = delayed_node->root->fs_info->delayed_root;
1008 finish_one_item(delayed_root);
1009}
1010
1011static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1012 struct btrfs_root *root,
1013 struct btrfs_path *path,
1014 struct btrfs_delayed_node *node)
1015{
1016 struct btrfs_fs_info *fs_info = root->fs_info;
1017 struct btrfs_key key;
1018 struct btrfs_inode_item *inode_item;
1019 struct extent_buffer *leaf;
1020 unsigned int nofs_flag;
1021 int mod;
1022 int ret;
1023
1024 key.objectid = node->inode_id;
1025 key.type = BTRFS_INODE_ITEM_KEY;
1026 key.offset = 0;
1027
1028 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1029 mod = -1;
1030 else
1031 mod = 1;
1032
1033 nofs_flag = memalloc_nofs_save();
1034 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1035 memalloc_nofs_restore(nofs_flag);
1036 if (ret > 0) {
1037 btrfs_release_path(path);
1038 return -ENOENT;
1039 } else if (ret < 0) {
1040 return ret;
1041 }
1042
1043 leaf = path->nodes[0];
1044 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1045 struct btrfs_inode_item);
1046 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1047 sizeof(struct btrfs_inode_item));
1048 btrfs_mark_buffer_dirty(leaf);
1049
1050 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1051 goto no_iref;
1052
1053 path->slots[0]++;
1054 if (path->slots[0] >= btrfs_header_nritems(leaf))
1055 goto search;
1056again:
1057 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1058 if (key.objectid != node->inode_id)
1059 goto out;
1060
1061 if (key.type != BTRFS_INODE_REF_KEY &&
1062 key.type != BTRFS_INODE_EXTREF_KEY)
1063 goto out;
1064
1065 /*
1066 * Delayed iref deletion is for the inode who has only one link,
1067 * so there is only one iref. The case that several irefs are
1068 * in the same item doesn't exist.
1069 */
1070 btrfs_del_item(trans, root, path);
1071out:
1072 btrfs_release_delayed_iref(node);
1073no_iref:
1074 btrfs_release_path(path);
1075err_out:
1076 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1077 btrfs_release_delayed_inode(node);
1078
1079 return ret;
1080
1081search:
1082 btrfs_release_path(path);
1083
1084 key.type = BTRFS_INODE_EXTREF_KEY;
1085 key.offset = -1;
1086
1087 nofs_flag = memalloc_nofs_save();
1088 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1089 memalloc_nofs_restore(nofs_flag);
1090 if (ret < 0)
1091 goto err_out;
1092 ASSERT(ret);
1093
1094 ret = 0;
1095 leaf = path->nodes[0];
1096 path->slots[0]--;
1097 goto again;
1098}
1099
1100static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1101 struct btrfs_root *root,
1102 struct btrfs_path *path,
1103 struct btrfs_delayed_node *node)
1104{
1105 int ret;
1106
1107 mutex_lock(&node->mutex);
1108 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1109 mutex_unlock(&node->mutex);
1110 return 0;
1111 }
1112
1113 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1114 mutex_unlock(&node->mutex);
1115 return ret;
1116}
1117
1118static inline int
1119__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1120 struct btrfs_path *path,
1121 struct btrfs_delayed_node *node)
1122{
1123 int ret;
1124
1125 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1126 if (ret)
1127 return ret;
1128
1129 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1130 if (ret)
1131 return ret;
1132
1133 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1134 return ret;
1135}
1136
1137/*
1138 * Called when committing the transaction.
1139 * Returns 0 on success.
1140 * Returns < 0 on error and returns with an aborted transaction with any
1141 * outstanding delayed items cleaned up.
1142 */
1143static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1144{
1145 struct btrfs_fs_info *fs_info = trans->fs_info;
1146 struct btrfs_delayed_root *delayed_root;
1147 struct btrfs_delayed_node *curr_node, *prev_node;
1148 struct btrfs_path *path;
1149 struct btrfs_block_rsv *block_rsv;
1150 int ret = 0;
1151 bool count = (nr > 0);
1152
1153 if (TRANS_ABORTED(trans))
1154 return -EIO;
1155
1156 path = btrfs_alloc_path();
1157 if (!path)
1158 return -ENOMEM;
1159 path->leave_spinning = 1;
1160
1161 block_rsv = trans->block_rsv;
1162 trans->block_rsv = &fs_info->delayed_block_rsv;
1163
1164 delayed_root = fs_info->delayed_root;
1165
1166 curr_node = btrfs_first_delayed_node(delayed_root);
1167 while (curr_node && (!count || (count && nr--))) {
1168 ret = __btrfs_commit_inode_delayed_items(trans, path,
1169 curr_node);
1170 if (ret) {
1171 btrfs_release_delayed_node(curr_node);
1172 curr_node = NULL;
1173 btrfs_abort_transaction(trans, ret);
1174 break;
1175 }
1176
1177 prev_node = curr_node;
1178 curr_node = btrfs_next_delayed_node(curr_node);
1179 btrfs_release_delayed_node(prev_node);
1180 }
1181
1182 if (curr_node)
1183 btrfs_release_delayed_node(curr_node);
1184 btrfs_free_path(path);
1185 trans->block_rsv = block_rsv;
1186
1187 return ret;
1188}
1189
1190int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1191{
1192 return __btrfs_run_delayed_items(trans, -1);
1193}
1194
1195int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1196{
1197 return __btrfs_run_delayed_items(trans, nr);
1198}
1199
1200int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1201 struct btrfs_inode *inode)
1202{
1203 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1204 struct btrfs_path *path;
1205 struct btrfs_block_rsv *block_rsv;
1206 int ret;
1207
1208 if (!delayed_node)
1209 return 0;
1210
1211 mutex_lock(&delayed_node->mutex);
1212 if (!delayed_node->count) {
1213 mutex_unlock(&delayed_node->mutex);
1214 btrfs_release_delayed_node(delayed_node);
1215 return 0;
1216 }
1217 mutex_unlock(&delayed_node->mutex);
1218
1219 path = btrfs_alloc_path();
1220 if (!path) {
1221 btrfs_release_delayed_node(delayed_node);
1222 return -ENOMEM;
1223 }
1224 path->leave_spinning = 1;
1225
1226 block_rsv = trans->block_rsv;
1227 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1228
1229 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1230
1231 btrfs_release_delayed_node(delayed_node);
1232 btrfs_free_path(path);
1233 trans->block_rsv = block_rsv;
1234
1235 return ret;
1236}
1237
1238int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1239{
1240 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1241 struct btrfs_trans_handle *trans;
1242 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1243 struct btrfs_path *path;
1244 struct btrfs_block_rsv *block_rsv;
1245 int ret;
1246
1247 if (!delayed_node)
1248 return 0;
1249
1250 mutex_lock(&delayed_node->mutex);
1251 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1252 mutex_unlock(&delayed_node->mutex);
1253 btrfs_release_delayed_node(delayed_node);
1254 return 0;
1255 }
1256 mutex_unlock(&delayed_node->mutex);
1257
1258 trans = btrfs_join_transaction(delayed_node->root);
1259 if (IS_ERR(trans)) {
1260 ret = PTR_ERR(trans);
1261 goto out;
1262 }
1263
1264 path = btrfs_alloc_path();
1265 if (!path) {
1266 ret = -ENOMEM;
1267 goto trans_out;
1268 }
1269 path->leave_spinning = 1;
1270
1271 block_rsv = trans->block_rsv;
1272 trans->block_rsv = &fs_info->delayed_block_rsv;
1273
1274 mutex_lock(&delayed_node->mutex);
1275 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1276 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1277 path, delayed_node);
1278 else
1279 ret = 0;
1280 mutex_unlock(&delayed_node->mutex);
1281
1282 btrfs_free_path(path);
1283 trans->block_rsv = block_rsv;
1284trans_out:
1285 btrfs_end_transaction(trans);
1286 btrfs_btree_balance_dirty(fs_info);
1287out:
1288 btrfs_release_delayed_node(delayed_node);
1289
1290 return ret;
1291}
1292
1293void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1294{
1295 struct btrfs_delayed_node *delayed_node;
1296
1297 delayed_node = READ_ONCE(inode->delayed_node);
1298 if (!delayed_node)
1299 return;
1300
1301 inode->delayed_node = NULL;
1302 btrfs_release_delayed_node(delayed_node);
1303}
1304
1305struct btrfs_async_delayed_work {
1306 struct btrfs_delayed_root *delayed_root;
1307 int nr;
1308 struct btrfs_work work;
1309};
1310
1311static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1312{
1313 struct btrfs_async_delayed_work *async_work;
1314 struct btrfs_delayed_root *delayed_root;
1315 struct btrfs_trans_handle *trans;
1316 struct btrfs_path *path;
1317 struct btrfs_delayed_node *delayed_node = NULL;
1318 struct btrfs_root *root;
1319 struct btrfs_block_rsv *block_rsv;
1320 int total_done = 0;
1321
1322 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1323 delayed_root = async_work->delayed_root;
1324
1325 path = btrfs_alloc_path();
1326 if (!path)
1327 goto out;
1328
1329 do {
1330 if (atomic_read(&delayed_root->items) <
1331 BTRFS_DELAYED_BACKGROUND / 2)
1332 break;
1333
1334 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1335 if (!delayed_node)
1336 break;
1337
1338 path->leave_spinning = 1;
1339 root = delayed_node->root;
1340
1341 trans = btrfs_join_transaction(root);
1342 if (IS_ERR(trans)) {
1343 btrfs_release_path(path);
1344 btrfs_release_prepared_delayed_node(delayed_node);
1345 total_done++;
1346 continue;
1347 }
1348
1349 block_rsv = trans->block_rsv;
1350 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1351
1352 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1353
1354 trans->block_rsv = block_rsv;
1355 btrfs_end_transaction(trans);
1356 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1357
1358 btrfs_release_path(path);
1359 btrfs_release_prepared_delayed_node(delayed_node);
1360 total_done++;
1361
1362 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1363 || total_done < async_work->nr);
1364
1365 btrfs_free_path(path);
1366out:
1367 wake_up(&delayed_root->wait);
1368 kfree(async_work);
1369}
1370
1371
1372static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1373 struct btrfs_fs_info *fs_info, int nr)
1374{
1375 struct btrfs_async_delayed_work *async_work;
1376
1377 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1378 if (!async_work)
1379 return -ENOMEM;
1380
1381 async_work->delayed_root = delayed_root;
1382 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1383 NULL);
1384 async_work->nr = nr;
1385
1386 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1387 return 0;
1388}
1389
1390void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1391{
1392 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1393}
1394
1395static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1396{
1397 int val = atomic_read(&delayed_root->items_seq);
1398
1399 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1400 return 1;
1401
1402 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1403 return 1;
1404
1405 return 0;
1406}
1407
1408void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1409{
1410 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1411
1412 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1413 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1414 return;
1415
1416 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1417 int seq;
1418 int ret;
1419
1420 seq = atomic_read(&delayed_root->items_seq);
1421
1422 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1423 if (ret)
1424 return;
1425
1426 wait_event_interruptible(delayed_root->wait,
1427 could_end_wait(delayed_root, seq));
1428 return;
1429 }
1430
1431 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1432}
1433
1434/* Will return 0 or -ENOMEM */
1435int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1436 const char *name, int name_len,
1437 struct btrfs_inode *dir,
1438 struct btrfs_disk_key *disk_key, u8 type,
1439 u64 index)
1440{
1441 struct btrfs_delayed_node *delayed_node;
1442 struct btrfs_delayed_item *delayed_item;
1443 struct btrfs_dir_item *dir_item;
1444 int ret;
1445
1446 delayed_node = btrfs_get_or_create_delayed_node(dir);
1447 if (IS_ERR(delayed_node))
1448 return PTR_ERR(delayed_node);
1449
1450 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1451 if (!delayed_item) {
1452 ret = -ENOMEM;
1453 goto release_node;
1454 }
1455
1456 delayed_item->key.objectid = btrfs_ino(dir);
1457 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1458 delayed_item->key.offset = index;
1459
1460 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1461 dir_item->location = *disk_key;
1462 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1463 btrfs_set_stack_dir_data_len(dir_item, 0);
1464 btrfs_set_stack_dir_name_len(dir_item, name_len);
1465 btrfs_set_stack_dir_type(dir_item, type);
1466 memcpy((char *)(dir_item + 1), name, name_len);
1467
1468 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1469 /*
1470 * we have reserved enough space when we start a new transaction,
1471 * so reserving metadata failure is impossible
1472 */
1473 BUG_ON(ret);
1474
1475 mutex_lock(&delayed_node->mutex);
1476 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1477 if (unlikely(ret)) {
1478 btrfs_err(trans->fs_info,
1479 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1480 name_len, name, delayed_node->root->root_key.objectid,
1481 delayed_node->inode_id, ret);
1482 BUG();
1483 }
1484 mutex_unlock(&delayed_node->mutex);
1485
1486release_node:
1487 btrfs_release_delayed_node(delayed_node);
1488 return ret;
1489}
1490
1491static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1492 struct btrfs_delayed_node *node,
1493 struct btrfs_key *key)
1494{
1495 struct btrfs_delayed_item *item;
1496
1497 mutex_lock(&node->mutex);
1498 item = __btrfs_lookup_delayed_insertion_item(node, key);
1499 if (!item) {
1500 mutex_unlock(&node->mutex);
1501 return 1;
1502 }
1503
1504 btrfs_delayed_item_release_metadata(node->root, item);
1505 btrfs_release_delayed_item(item);
1506 mutex_unlock(&node->mutex);
1507 return 0;
1508}
1509
1510int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1511 struct btrfs_inode *dir, u64 index)
1512{
1513 struct btrfs_delayed_node *node;
1514 struct btrfs_delayed_item *item;
1515 struct btrfs_key item_key;
1516 int ret;
1517
1518 node = btrfs_get_or_create_delayed_node(dir);
1519 if (IS_ERR(node))
1520 return PTR_ERR(node);
1521
1522 item_key.objectid = btrfs_ino(dir);
1523 item_key.type = BTRFS_DIR_INDEX_KEY;
1524 item_key.offset = index;
1525
1526 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1527 &item_key);
1528 if (!ret)
1529 goto end;
1530
1531 item = btrfs_alloc_delayed_item(0);
1532 if (!item) {
1533 ret = -ENOMEM;
1534 goto end;
1535 }
1536
1537 item->key = item_key;
1538
1539 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1540 /*
1541 * we have reserved enough space when we start a new transaction,
1542 * so reserving metadata failure is impossible.
1543 */
1544 if (ret < 0) {
1545 btrfs_err(trans->fs_info,
1546"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1547 btrfs_release_delayed_item(item);
1548 goto end;
1549 }
1550
1551 mutex_lock(&node->mutex);
1552 ret = __btrfs_add_delayed_deletion_item(node, item);
1553 if (unlikely(ret)) {
1554 btrfs_err(trans->fs_info,
1555 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1556 index, node->root->root_key.objectid,
1557 node->inode_id, ret);
1558 btrfs_delayed_item_release_metadata(dir->root, item);
1559 btrfs_release_delayed_item(item);
1560 }
1561 mutex_unlock(&node->mutex);
1562end:
1563 btrfs_release_delayed_node(node);
1564 return ret;
1565}
1566
1567int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1568{
1569 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1570
1571 if (!delayed_node)
1572 return -ENOENT;
1573
1574 /*
1575 * Since we have held i_mutex of this directory, it is impossible that
1576 * a new directory index is added into the delayed node and index_cnt
1577 * is updated now. So we needn't lock the delayed node.
1578 */
1579 if (!delayed_node->index_cnt) {
1580 btrfs_release_delayed_node(delayed_node);
1581 return -EINVAL;
1582 }
1583
1584 inode->index_cnt = delayed_node->index_cnt;
1585 btrfs_release_delayed_node(delayed_node);
1586 return 0;
1587}
1588
1589bool btrfs_readdir_get_delayed_items(struct inode *inode,
1590 struct list_head *ins_list,
1591 struct list_head *del_list)
1592{
1593 struct btrfs_delayed_node *delayed_node;
1594 struct btrfs_delayed_item *item;
1595
1596 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1597 if (!delayed_node)
1598 return false;
1599
1600 /*
1601 * We can only do one readdir with delayed items at a time because of
1602 * item->readdir_list.
1603 */
1604 inode_unlock_shared(inode);
1605 inode_lock(inode);
1606
1607 mutex_lock(&delayed_node->mutex);
1608 item = __btrfs_first_delayed_insertion_item(delayed_node);
1609 while (item) {
1610 refcount_inc(&item->refs);
1611 list_add_tail(&item->readdir_list, ins_list);
1612 item = __btrfs_next_delayed_item(item);
1613 }
1614
1615 item = __btrfs_first_delayed_deletion_item(delayed_node);
1616 while (item) {
1617 refcount_inc(&item->refs);
1618 list_add_tail(&item->readdir_list, del_list);
1619 item = __btrfs_next_delayed_item(item);
1620 }
1621 mutex_unlock(&delayed_node->mutex);
1622 /*
1623 * This delayed node is still cached in the btrfs inode, so refs
1624 * must be > 1 now, and we needn't check it is going to be freed
1625 * or not.
1626 *
1627 * Besides that, this function is used to read dir, we do not
1628 * insert/delete delayed items in this period. So we also needn't
1629 * requeue or dequeue this delayed node.
1630 */
1631 refcount_dec(&delayed_node->refs);
1632
1633 return true;
1634}
1635
1636void btrfs_readdir_put_delayed_items(struct inode *inode,
1637 struct list_head *ins_list,
1638 struct list_head *del_list)
1639{
1640 struct btrfs_delayed_item *curr, *next;
1641
1642 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1643 list_del(&curr->readdir_list);
1644 if (refcount_dec_and_test(&curr->refs))
1645 kfree(curr);
1646 }
1647
1648 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1649 list_del(&curr->readdir_list);
1650 if (refcount_dec_and_test(&curr->refs))
1651 kfree(curr);
1652 }
1653
1654 /*
1655 * The VFS is going to do up_read(), so we need to downgrade back to a
1656 * read lock.
1657 */
1658 downgrade_write(&inode->i_rwsem);
1659}
1660
1661int btrfs_should_delete_dir_index(struct list_head *del_list,
1662 u64 index)
1663{
1664 struct btrfs_delayed_item *curr;
1665 int ret = 0;
1666
1667 list_for_each_entry(curr, del_list, readdir_list) {
1668 if (curr->key.offset > index)
1669 break;
1670 if (curr->key.offset == index) {
1671 ret = 1;
1672 break;
1673 }
1674 }
1675 return ret;
1676}
1677
1678/*
1679 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1680 *
1681 */
1682int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1683 struct list_head *ins_list)
1684{
1685 struct btrfs_dir_item *di;
1686 struct btrfs_delayed_item *curr, *next;
1687 struct btrfs_key location;
1688 char *name;
1689 int name_len;
1690 int over = 0;
1691 unsigned char d_type;
1692
1693 if (list_empty(ins_list))
1694 return 0;
1695
1696 /*
1697 * Changing the data of the delayed item is impossible. So
1698 * we needn't lock them. And we have held i_mutex of the
1699 * directory, nobody can delete any directory indexes now.
1700 */
1701 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1702 list_del(&curr->readdir_list);
1703
1704 if (curr->key.offset < ctx->pos) {
1705 if (refcount_dec_and_test(&curr->refs))
1706 kfree(curr);
1707 continue;
1708 }
1709
1710 ctx->pos = curr->key.offset;
1711
1712 di = (struct btrfs_dir_item *)curr->data;
1713 name = (char *)(di + 1);
1714 name_len = btrfs_stack_dir_name_len(di);
1715
1716 d_type = fs_ftype_to_dtype(di->type);
1717 btrfs_disk_key_to_cpu(&location, &di->location);
1718
1719 over = !dir_emit(ctx, name, name_len,
1720 location.objectid, d_type);
1721
1722 if (refcount_dec_and_test(&curr->refs))
1723 kfree(curr);
1724
1725 if (over)
1726 return 1;
1727 ctx->pos++;
1728 }
1729 return 0;
1730}
1731
1732static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1733 struct btrfs_inode_item *inode_item,
1734 struct inode *inode)
1735{
1736 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1737 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1738 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1739 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1740 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1741 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1742 btrfs_set_stack_inode_generation(inode_item,
1743 BTRFS_I(inode)->generation);
1744 btrfs_set_stack_inode_sequence(inode_item,
1745 inode_peek_iversion(inode));
1746 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1747 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1748 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1749 btrfs_set_stack_inode_block_group(inode_item, 0);
1750
1751 btrfs_set_stack_timespec_sec(&inode_item->atime,
1752 inode->i_atime.tv_sec);
1753 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1754 inode->i_atime.tv_nsec);
1755
1756 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1757 inode->i_mtime.tv_sec);
1758 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1759 inode->i_mtime.tv_nsec);
1760
1761 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1762 inode->i_ctime.tv_sec);
1763 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1764 inode->i_ctime.tv_nsec);
1765
1766 btrfs_set_stack_timespec_sec(&inode_item->otime,
1767 BTRFS_I(inode)->i_otime.tv_sec);
1768 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1769 BTRFS_I(inode)->i_otime.tv_nsec);
1770}
1771
1772int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1773{
1774 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1775 struct btrfs_delayed_node *delayed_node;
1776 struct btrfs_inode_item *inode_item;
1777
1778 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1779 if (!delayed_node)
1780 return -ENOENT;
1781
1782 mutex_lock(&delayed_node->mutex);
1783 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1784 mutex_unlock(&delayed_node->mutex);
1785 btrfs_release_delayed_node(delayed_node);
1786 return -ENOENT;
1787 }
1788
1789 inode_item = &delayed_node->inode_item;
1790
1791 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1792 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1793 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1794 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1795 round_up(i_size_read(inode), fs_info->sectorsize));
1796 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1797 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1798 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1799 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1800 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1801
1802 inode_set_iversion_queried(inode,
1803 btrfs_stack_inode_sequence(inode_item));
1804 inode->i_rdev = 0;
1805 *rdev = btrfs_stack_inode_rdev(inode_item);
1806 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1807
1808 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1809 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1810
1811 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1812 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1813
1814 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1815 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1816
1817 BTRFS_I(inode)->i_otime.tv_sec =
1818 btrfs_stack_timespec_sec(&inode_item->otime);
1819 BTRFS_I(inode)->i_otime.tv_nsec =
1820 btrfs_stack_timespec_nsec(&inode_item->otime);
1821
1822 inode->i_generation = BTRFS_I(inode)->generation;
1823 BTRFS_I(inode)->index_cnt = (u64)-1;
1824
1825 mutex_unlock(&delayed_node->mutex);
1826 btrfs_release_delayed_node(delayed_node);
1827 return 0;
1828}
1829
1830int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1831 struct btrfs_root *root, struct inode *inode)
1832{
1833 struct btrfs_delayed_node *delayed_node;
1834 int ret = 0;
1835
1836 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1837 if (IS_ERR(delayed_node))
1838 return PTR_ERR(delayed_node);
1839
1840 mutex_lock(&delayed_node->mutex);
1841 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1842 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1843 goto release_node;
1844 }
1845
1846 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1847 delayed_node);
1848 if (ret)
1849 goto release_node;
1850
1851 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1852 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1853 delayed_node->count++;
1854 atomic_inc(&root->fs_info->delayed_root->items);
1855release_node:
1856 mutex_unlock(&delayed_node->mutex);
1857 btrfs_release_delayed_node(delayed_node);
1858 return ret;
1859}
1860
1861int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1862{
1863 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1864 struct btrfs_delayed_node *delayed_node;
1865
1866 /*
1867 * we don't do delayed inode updates during log recovery because it
1868 * leads to enospc problems. This means we also can't do
1869 * delayed inode refs
1870 */
1871 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1872 return -EAGAIN;
1873
1874 delayed_node = btrfs_get_or_create_delayed_node(inode);
1875 if (IS_ERR(delayed_node))
1876 return PTR_ERR(delayed_node);
1877
1878 /*
1879 * We don't reserve space for inode ref deletion is because:
1880 * - We ONLY do async inode ref deletion for the inode who has only
1881 * one link(i_nlink == 1), it means there is only one inode ref.
1882 * And in most case, the inode ref and the inode item are in the
1883 * same leaf, and we will deal with them at the same time.
1884 * Since we are sure we will reserve the space for the inode item,
1885 * it is unnecessary to reserve space for inode ref deletion.
1886 * - If the inode ref and the inode item are not in the same leaf,
1887 * We also needn't worry about enospc problem, because we reserve
1888 * much more space for the inode update than it needs.
1889 * - At the worst, we can steal some space from the global reservation.
1890 * It is very rare.
1891 */
1892 mutex_lock(&delayed_node->mutex);
1893 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1894 goto release_node;
1895
1896 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1897 delayed_node->count++;
1898 atomic_inc(&fs_info->delayed_root->items);
1899release_node:
1900 mutex_unlock(&delayed_node->mutex);
1901 btrfs_release_delayed_node(delayed_node);
1902 return 0;
1903}
1904
1905static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1906{
1907 struct btrfs_root *root = delayed_node->root;
1908 struct btrfs_fs_info *fs_info = root->fs_info;
1909 struct btrfs_delayed_item *curr_item, *prev_item;
1910
1911 mutex_lock(&delayed_node->mutex);
1912 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1913 while (curr_item) {
1914 btrfs_delayed_item_release_metadata(root, curr_item);
1915 prev_item = curr_item;
1916 curr_item = __btrfs_next_delayed_item(prev_item);
1917 btrfs_release_delayed_item(prev_item);
1918 }
1919
1920 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1921 while (curr_item) {
1922 btrfs_delayed_item_release_metadata(root, curr_item);
1923 prev_item = curr_item;
1924 curr_item = __btrfs_next_delayed_item(prev_item);
1925 btrfs_release_delayed_item(prev_item);
1926 }
1927
1928 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1929 btrfs_release_delayed_iref(delayed_node);
1930
1931 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1932 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1933 btrfs_release_delayed_inode(delayed_node);
1934 }
1935 mutex_unlock(&delayed_node->mutex);
1936}
1937
1938void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1939{
1940 struct btrfs_delayed_node *delayed_node;
1941
1942 delayed_node = btrfs_get_delayed_node(inode);
1943 if (!delayed_node)
1944 return;
1945
1946 __btrfs_kill_delayed_node(delayed_node);
1947 btrfs_release_delayed_node(delayed_node);
1948}
1949
1950void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1951{
1952 u64 inode_id = 0;
1953 struct btrfs_delayed_node *delayed_nodes[8];
1954 int i, n;
1955
1956 while (1) {
1957 spin_lock(&root->inode_lock);
1958 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1959 (void **)delayed_nodes, inode_id,
1960 ARRAY_SIZE(delayed_nodes));
1961 if (!n) {
1962 spin_unlock(&root->inode_lock);
1963 break;
1964 }
1965
1966 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1967 for (i = 0; i < n; i++) {
1968 /*
1969 * Don't increase refs in case the node is dead and
1970 * about to be removed from the tree in the loop below
1971 */
1972 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1973 delayed_nodes[i] = NULL;
1974 }
1975 spin_unlock(&root->inode_lock);
1976
1977 for (i = 0; i < n; i++) {
1978 if (!delayed_nodes[i])
1979 continue;
1980 __btrfs_kill_delayed_node(delayed_nodes[i]);
1981 btrfs_release_delayed_node(delayed_nodes[i]);
1982 }
1983 }
1984}
1985
1986void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1987{
1988 struct btrfs_delayed_node *curr_node, *prev_node;
1989
1990 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1991 while (curr_node) {
1992 __btrfs_kill_delayed_node(curr_node);
1993
1994 prev_node = curr_node;
1995 curr_node = btrfs_next_delayed_node(curr_node);
1996 btrfs_release_delayed_node(prev_node);
1997 }
1998}
1999