Loading...
1/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
24
25#define BTRFS_DELAYED_WRITEBACK 400
26#define BTRFS_DELAYED_BACKGROUND 100
27
28static struct kmem_cache *delayed_node_cache;
29
30int __init btrfs_delayed_inode_init(void)
31{
32 delayed_node_cache = kmem_cache_create("delayed_node",
33 sizeof(struct btrfs_delayed_node),
34 0,
35 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
36 NULL);
37 if (!delayed_node_cache)
38 return -ENOMEM;
39 return 0;
40}
41
42void btrfs_delayed_inode_exit(void)
43{
44 if (delayed_node_cache)
45 kmem_cache_destroy(delayed_node_cache);
46}
47
48static inline void btrfs_init_delayed_node(
49 struct btrfs_delayed_node *delayed_node,
50 struct btrfs_root *root, u64 inode_id)
51{
52 delayed_node->root = root;
53 delayed_node->inode_id = inode_id;
54 atomic_set(&delayed_node->refs, 0);
55 delayed_node->count = 0;
56 delayed_node->in_list = 0;
57 delayed_node->inode_dirty = 0;
58 delayed_node->ins_root = RB_ROOT;
59 delayed_node->del_root = RB_ROOT;
60 mutex_init(&delayed_node->mutex);
61 delayed_node->index_cnt = 0;
62 INIT_LIST_HEAD(&delayed_node->n_list);
63 INIT_LIST_HEAD(&delayed_node->p_list);
64 delayed_node->bytes_reserved = 0;
65}
66
67static inline int btrfs_is_continuous_delayed_item(
68 struct btrfs_delayed_item *item1,
69 struct btrfs_delayed_item *item2)
70{
71 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
72 item1->key.objectid == item2->key.objectid &&
73 item1->key.type == item2->key.type &&
74 item1->key.offset + 1 == item2->key.offset)
75 return 1;
76 return 0;
77}
78
79static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
80 struct btrfs_root *root)
81{
82 return root->fs_info->delayed_root;
83}
84
85static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
86{
87 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
88 struct btrfs_root *root = btrfs_inode->root;
89 u64 ino = btrfs_ino(inode);
90 struct btrfs_delayed_node *node;
91
92 node = ACCESS_ONCE(btrfs_inode->delayed_node);
93 if (node) {
94 atomic_inc(&node->refs);
95 return node;
96 }
97
98 spin_lock(&root->inode_lock);
99 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
100 if (node) {
101 if (btrfs_inode->delayed_node) {
102 atomic_inc(&node->refs); /* can be accessed */
103 BUG_ON(btrfs_inode->delayed_node != node);
104 spin_unlock(&root->inode_lock);
105 return node;
106 }
107 btrfs_inode->delayed_node = node;
108 atomic_inc(&node->refs); /* can be accessed */
109 atomic_inc(&node->refs); /* cached in the inode */
110 spin_unlock(&root->inode_lock);
111 return node;
112 }
113 spin_unlock(&root->inode_lock);
114
115 return NULL;
116}
117
118static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 struct inode *inode)
120{
121 struct btrfs_delayed_node *node;
122 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
123 struct btrfs_root *root = btrfs_inode->root;
124 u64 ino = btrfs_ino(inode);
125 int ret;
126
127again:
128 node = btrfs_get_delayed_node(inode);
129 if (node)
130 return node;
131
132 node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
133 if (!node)
134 return ERR_PTR(-ENOMEM);
135 btrfs_init_delayed_node(node, root, ino);
136
137 atomic_inc(&node->refs); /* cached in the btrfs inode */
138 atomic_inc(&node->refs); /* can be accessed */
139
140 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
141 if (ret) {
142 kmem_cache_free(delayed_node_cache, node);
143 return ERR_PTR(ret);
144 }
145
146 spin_lock(&root->inode_lock);
147 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
148 if (ret == -EEXIST) {
149 kmem_cache_free(delayed_node_cache, node);
150 spin_unlock(&root->inode_lock);
151 radix_tree_preload_end();
152 goto again;
153 }
154 btrfs_inode->delayed_node = node;
155 spin_unlock(&root->inode_lock);
156 radix_tree_preload_end();
157
158 return node;
159}
160
161/*
162 * Call it when holding delayed_node->mutex
163 *
164 * If mod = 1, add this node into the prepared list.
165 */
166static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
167 struct btrfs_delayed_node *node,
168 int mod)
169{
170 spin_lock(&root->lock);
171 if (node->in_list) {
172 if (!list_empty(&node->p_list))
173 list_move_tail(&node->p_list, &root->prepare_list);
174 else if (mod)
175 list_add_tail(&node->p_list, &root->prepare_list);
176 } else {
177 list_add_tail(&node->n_list, &root->node_list);
178 list_add_tail(&node->p_list, &root->prepare_list);
179 atomic_inc(&node->refs); /* inserted into list */
180 root->nodes++;
181 node->in_list = 1;
182 }
183 spin_unlock(&root->lock);
184}
185
186/* Call it when holding delayed_node->mutex */
187static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
188 struct btrfs_delayed_node *node)
189{
190 spin_lock(&root->lock);
191 if (node->in_list) {
192 root->nodes--;
193 atomic_dec(&node->refs); /* not in the list */
194 list_del_init(&node->n_list);
195 if (!list_empty(&node->p_list))
196 list_del_init(&node->p_list);
197 node->in_list = 0;
198 }
199 spin_unlock(&root->lock);
200}
201
202struct btrfs_delayed_node *btrfs_first_delayed_node(
203 struct btrfs_delayed_root *delayed_root)
204{
205 struct list_head *p;
206 struct btrfs_delayed_node *node = NULL;
207
208 spin_lock(&delayed_root->lock);
209 if (list_empty(&delayed_root->node_list))
210 goto out;
211
212 p = delayed_root->node_list.next;
213 node = list_entry(p, struct btrfs_delayed_node, n_list);
214 atomic_inc(&node->refs);
215out:
216 spin_unlock(&delayed_root->lock);
217
218 return node;
219}
220
221struct btrfs_delayed_node *btrfs_next_delayed_node(
222 struct btrfs_delayed_node *node)
223{
224 struct btrfs_delayed_root *delayed_root;
225 struct list_head *p;
226 struct btrfs_delayed_node *next = NULL;
227
228 delayed_root = node->root->fs_info->delayed_root;
229 spin_lock(&delayed_root->lock);
230 if (!node->in_list) { /* not in the list */
231 if (list_empty(&delayed_root->node_list))
232 goto out;
233 p = delayed_root->node_list.next;
234 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
235 goto out;
236 else
237 p = node->n_list.next;
238
239 next = list_entry(p, struct btrfs_delayed_node, n_list);
240 atomic_inc(&next->refs);
241out:
242 spin_unlock(&delayed_root->lock);
243
244 return next;
245}
246
247static void __btrfs_release_delayed_node(
248 struct btrfs_delayed_node *delayed_node,
249 int mod)
250{
251 struct btrfs_delayed_root *delayed_root;
252
253 if (!delayed_node)
254 return;
255
256 delayed_root = delayed_node->root->fs_info->delayed_root;
257
258 mutex_lock(&delayed_node->mutex);
259 if (delayed_node->count)
260 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
261 else
262 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
263 mutex_unlock(&delayed_node->mutex);
264
265 if (atomic_dec_and_test(&delayed_node->refs)) {
266 struct btrfs_root *root = delayed_node->root;
267 spin_lock(&root->inode_lock);
268 if (atomic_read(&delayed_node->refs) == 0) {
269 radix_tree_delete(&root->delayed_nodes_tree,
270 delayed_node->inode_id);
271 kmem_cache_free(delayed_node_cache, delayed_node);
272 }
273 spin_unlock(&root->inode_lock);
274 }
275}
276
277static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
278{
279 __btrfs_release_delayed_node(node, 0);
280}
281
282struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
283 struct btrfs_delayed_root *delayed_root)
284{
285 struct list_head *p;
286 struct btrfs_delayed_node *node = NULL;
287
288 spin_lock(&delayed_root->lock);
289 if (list_empty(&delayed_root->prepare_list))
290 goto out;
291
292 p = delayed_root->prepare_list.next;
293 list_del_init(p);
294 node = list_entry(p, struct btrfs_delayed_node, p_list);
295 atomic_inc(&node->refs);
296out:
297 spin_unlock(&delayed_root->lock);
298
299 return node;
300}
301
302static inline void btrfs_release_prepared_delayed_node(
303 struct btrfs_delayed_node *node)
304{
305 __btrfs_release_delayed_node(node, 1);
306}
307
308struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
309{
310 struct btrfs_delayed_item *item;
311 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
312 if (item) {
313 item->data_len = data_len;
314 item->ins_or_del = 0;
315 item->bytes_reserved = 0;
316 item->delayed_node = NULL;
317 atomic_set(&item->refs, 1);
318 }
319 return item;
320}
321
322/*
323 * __btrfs_lookup_delayed_item - look up the delayed item by key
324 * @delayed_node: pointer to the delayed node
325 * @key: the key to look up
326 * @prev: used to store the prev item if the right item isn't found
327 * @next: used to store the next item if the right item isn't found
328 *
329 * Note: if we don't find the right item, we will return the prev item and
330 * the next item.
331 */
332static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
333 struct rb_root *root,
334 struct btrfs_key *key,
335 struct btrfs_delayed_item **prev,
336 struct btrfs_delayed_item **next)
337{
338 struct rb_node *node, *prev_node = NULL;
339 struct btrfs_delayed_item *delayed_item = NULL;
340 int ret = 0;
341
342 node = root->rb_node;
343
344 while (node) {
345 delayed_item = rb_entry(node, struct btrfs_delayed_item,
346 rb_node);
347 prev_node = node;
348 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
349 if (ret < 0)
350 node = node->rb_right;
351 else if (ret > 0)
352 node = node->rb_left;
353 else
354 return delayed_item;
355 }
356
357 if (prev) {
358 if (!prev_node)
359 *prev = NULL;
360 else if (ret < 0)
361 *prev = delayed_item;
362 else if ((node = rb_prev(prev_node)) != NULL) {
363 *prev = rb_entry(node, struct btrfs_delayed_item,
364 rb_node);
365 } else
366 *prev = NULL;
367 }
368
369 if (next) {
370 if (!prev_node)
371 *next = NULL;
372 else if (ret > 0)
373 *next = delayed_item;
374 else if ((node = rb_next(prev_node)) != NULL) {
375 *next = rb_entry(node, struct btrfs_delayed_item,
376 rb_node);
377 } else
378 *next = NULL;
379 }
380 return NULL;
381}
382
383struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
384 struct btrfs_delayed_node *delayed_node,
385 struct btrfs_key *key)
386{
387 struct btrfs_delayed_item *item;
388
389 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
390 NULL, NULL);
391 return item;
392}
393
394struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
395 struct btrfs_delayed_node *delayed_node,
396 struct btrfs_key *key)
397{
398 struct btrfs_delayed_item *item;
399
400 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
401 NULL, NULL);
402 return item;
403}
404
405struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
406 struct btrfs_delayed_node *delayed_node,
407 struct btrfs_key *key)
408{
409 struct btrfs_delayed_item *item, *next;
410
411 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
412 NULL, &next);
413 if (!item)
414 item = next;
415
416 return item;
417}
418
419struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
420 struct btrfs_delayed_node *delayed_node,
421 struct btrfs_key *key)
422{
423 struct btrfs_delayed_item *item, *next;
424
425 item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
426 NULL, &next);
427 if (!item)
428 item = next;
429
430 return item;
431}
432
433static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
434 struct btrfs_delayed_item *ins,
435 int action)
436{
437 struct rb_node **p, *node;
438 struct rb_node *parent_node = NULL;
439 struct rb_root *root;
440 struct btrfs_delayed_item *item;
441 int cmp;
442
443 if (action == BTRFS_DELAYED_INSERTION_ITEM)
444 root = &delayed_node->ins_root;
445 else if (action == BTRFS_DELAYED_DELETION_ITEM)
446 root = &delayed_node->del_root;
447 else
448 BUG();
449 p = &root->rb_node;
450 node = &ins->rb_node;
451
452 while (*p) {
453 parent_node = *p;
454 item = rb_entry(parent_node, struct btrfs_delayed_item,
455 rb_node);
456
457 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
458 if (cmp < 0)
459 p = &(*p)->rb_right;
460 else if (cmp > 0)
461 p = &(*p)->rb_left;
462 else
463 return -EEXIST;
464 }
465
466 rb_link_node(node, parent_node, p);
467 rb_insert_color(node, root);
468 ins->delayed_node = delayed_node;
469 ins->ins_or_del = action;
470
471 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
472 action == BTRFS_DELAYED_INSERTION_ITEM &&
473 ins->key.offset >= delayed_node->index_cnt)
474 delayed_node->index_cnt = ins->key.offset + 1;
475
476 delayed_node->count++;
477 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
478 return 0;
479}
480
481static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
482 struct btrfs_delayed_item *item)
483{
484 return __btrfs_add_delayed_item(node, item,
485 BTRFS_DELAYED_INSERTION_ITEM);
486}
487
488static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
489 struct btrfs_delayed_item *item)
490{
491 return __btrfs_add_delayed_item(node, item,
492 BTRFS_DELAYED_DELETION_ITEM);
493}
494
495static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
496{
497 struct rb_root *root;
498 struct btrfs_delayed_root *delayed_root;
499
500 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
501
502 BUG_ON(!delayed_root);
503 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
504 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
505
506 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
507 root = &delayed_item->delayed_node->ins_root;
508 else
509 root = &delayed_item->delayed_node->del_root;
510
511 rb_erase(&delayed_item->rb_node, root);
512 delayed_item->delayed_node->count--;
513 atomic_dec(&delayed_root->items);
514 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
515 waitqueue_active(&delayed_root->wait))
516 wake_up(&delayed_root->wait);
517}
518
519static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
520{
521 if (item) {
522 __btrfs_remove_delayed_item(item);
523 if (atomic_dec_and_test(&item->refs))
524 kfree(item);
525 }
526}
527
528struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
529 struct btrfs_delayed_node *delayed_node)
530{
531 struct rb_node *p;
532 struct btrfs_delayed_item *item = NULL;
533
534 p = rb_first(&delayed_node->ins_root);
535 if (p)
536 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
537
538 return item;
539}
540
541struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
542 struct btrfs_delayed_node *delayed_node)
543{
544 struct rb_node *p;
545 struct btrfs_delayed_item *item = NULL;
546
547 p = rb_first(&delayed_node->del_root);
548 if (p)
549 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
550
551 return item;
552}
553
554struct btrfs_delayed_item *__btrfs_next_delayed_item(
555 struct btrfs_delayed_item *item)
556{
557 struct rb_node *p;
558 struct btrfs_delayed_item *next = NULL;
559
560 p = rb_next(&item->rb_node);
561 if (p)
562 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
563
564 return next;
565}
566
567static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
568 u64 root_id)
569{
570 struct btrfs_key root_key;
571
572 if (root->objectid == root_id)
573 return root;
574
575 root_key.objectid = root_id;
576 root_key.type = BTRFS_ROOT_ITEM_KEY;
577 root_key.offset = (u64)-1;
578 return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
579}
580
581static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
582 struct btrfs_root *root,
583 struct btrfs_delayed_item *item)
584{
585 struct btrfs_block_rsv *src_rsv;
586 struct btrfs_block_rsv *dst_rsv;
587 u64 num_bytes;
588 int ret;
589
590 if (!trans->bytes_reserved)
591 return 0;
592
593 src_rsv = trans->block_rsv;
594 dst_rsv = &root->fs_info->global_block_rsv;
595
596 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
598 if (!ret)
599 item->bytes_reserved = num_bytes;
600
601 return ret;
602}
603
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item)
606{
607 struct btrfs_block_rsv *rsv;
608
609 if (!item->bytes_reserved)
610 return;
611
612 rsv = &root->fs_info->global_block_rsv;
613 btrfs_block_rsv_release(root, rsv,
614 item->bytes_reserved);
615}
616
617static int btrfs_delayed_inode_reserve_metadata(
618 struct btrfs_trans_handle *trans,
619 struct btrfs_root *root,
620 struct btrfs_delayed_node *node)
621{
622 struct btrfs_block_rsv *src_rsv;
623 struct btrfs_block_rsv *dst_rsv;
624 u64 num_bytes;
625 int ret;
626
627 if (!trans->bytes_reserved)
628 return 0;
629
630 src_rsv = trans->block_rsv;
631 dst_rsv = &root->fs_info->global_block_rsv;
632
633 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
634 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
635 if (!ret)
636 node->bytes_reserved = num_bytes;
637
638 return ret;
639}
640
641static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
642 struct btrfs_delayed_node *node)
643{
644 struct btrfs_block_rsv *rsv;
645
646 if (!node->bytes_reserved)
647 return;
648
649 rsv = &root->fs_info->global_block_rsv;
650 btrfs_block_rsv_release(root, rsv,
651 node->bytes_reserved);
652 node->bytes_reserved = 0;
653}
654
655/*
656 * This helper will insert some continuous items into the same leaf according
657 * to the free space of the leaf.
658 */
659static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root,
661 struct btrfs_path *path,
662 struct btrfs_delayed_item *item)
663{
664 struct btrfs_delayed_item *curr, *next;
665 int free_space;
666 int total_data_size = 0, total_size = 0;
667 struct extent_buffer *leaf;
668 char *data_ptr;
669 struct btrfs_key *keys;
670 u32 *data_size;
671 struct list_head head;
672 int slot;
673 int nitems;
674 int i;
675 int ret = 0;
676
677 BUG_ON(!path->nodes[0]);
678
679 leaf = path->nodes[0];
680 free_space = btrfs_leaf_free_space(root, leaf);
681 INIT_LIST_HEAD(&head);
682
683 next = item;
684 nitems = 0;
685
686 /*
687 * count the number of the continuous items that we can insert in batch
688 */
689 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
690 free_space) {
691 total_data_size += next->data_len;
692 total_size += next->data_len + sizeof(struct btrfs_item);
693 list_add_tail(&next->tree_list, &head);
694 nitems++;
695
696 curr = next;
697 next = __btrfs_next_delayed_item(curr);
698 if (!next)
699 break;
700
701 if (!btrfs_is_continuous_delayed_item(curr, next))
702 break;
703 }
704
705 if (!nitems) {
706 ret = 0;
707 goto out;
708 }
709
710 /*
711 * we need allocate some memory space, but it might cause the task
712 * to sleep, so we set all locked nodes in the path to blocking locks
713 * first.
714 */
715 btrfs_set_path_blocking(path);
716
717 keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
718 if (!keys) {
719 ret = -ENOMEM;
720 goto out;
721 }
722
723 data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
724 if (!data_size) {
725 ret = -ENOMEM;
726 goto error;
727 }
728
729 /* get keys of all the delayed items */
730 i = 0;
731 list_for_each_entry(next, &head, tree_list) {
732 keys[i] = next->key;
733 data_size[i] = next->data_len;
734 i++;
735 }
736
737 /* reset all the locked nodes in the patch to spinning locks. */
738 btrfs_clear_path_blocking(path, NULL, 0);
739
740 /* insert the keys of the items */
741 ret = setup_items_for_insert(trans, root, path, keys, data_size,
742 total_data_size, total_size, nitems);
743 if (ret)
744 goto error;
745
746 /* insert the dir index items */
747 slot = path->slots[0];
748 list_for_each_entry_safe(curr, next, &head, tree_list) {
749 data_ptr = btrfs_item_ptr(leaf, slot, char);
750 write_extent_buffer(leaf, &curr->data,
751 (unsigned long)data_ptr,
752 curr->data_len);
753 slot++;
754
755 btrfs_delayed_item_release_metadata(root, curr);
756
757 list_del(&curr->tree_list);
758 btrfs_release_delayed_item(curr);
759 }
760
761error:
762 kfree(data_size);
763 kfree(keys);
764out:
765 return ret;
766}
767
768/*
769 * This helper can just do simple insertion that needn't extend item for new
770 * data, such as directory name index insertion, inode insertion.
771 */
772static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
773 struct btrfs_root *root,
774 struct btrfs_path *path,
775 struct btrfs_delayed_item *delayed_item)
776{
777 struct extent_buffer *leaf;
778 struct btrfs_item *item;
779 char *ptr;
780 int ret;
781
782 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
783 delayed_item->data_len);
784 if (ret < 0 && ret != -EEXIST)
785 return ret;
786
787 leaf = path->nodes[0];
788
789 item = btrfs_item_nr(leaf, path->slots[0]);
790 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
791
792 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
793 delayed_item->data_len);
794 btrfs_mark_buffer_dirty(leaf);
795
796 btrfs_delayed_item_release_metadata(root, delayed_item);
797 return 0;
798}
799
800/*
801 * we insert an item first, then if there are some continuous items, we try
802 * to insert those items into the same leaf.
803 */
804static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
805 struct btrfs_path *path,
806 struct btrfs_root *root,
807 struct btrfs_delayed_node *node)
808{
809 struct btrfs_delayed_item *curr, *prev;
810 int ret = 0;
811
812do_again:
813 mutex_lock(&node->mutex);
814 curr = __btrfs_first_delayed_insertion_item(node);
815 if (!curr)
816 goto insert_end;
817
818 ret = btrfs_insert_delayed_item(trans, root, path, curr);
819 if (ret < 0) {
820 btrfs_release_path(path);
821 goto insert_end;
822 }
823
824 prev = curr;
825 curr = __btrfs_next_delayed_item(prev);
826 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
827 /* insert the continuous items into the same leaf */
828 path->slots[0]++;
829 btrfs_batch_insert_items(trans, root, path, curr);
830 }
831 btrfs_release_delayed_item(prev);
832 btrfs_mark_buffer_dirty(path->nodes[0]);
833
834 btrfs_release_path(path);
835 mutex_unlock(&node->mutex);
836 goto do_again;
837
838insert_end:
839 mutex_unlock(&node->mutex);
840 return ret;
841}
842
843static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
844 struct btrfs_root *root,
845 struct btrfs_path *path,
846 struct btrfs_delayed_item *item)
847{
848 struct btrfs_delayed_item *curr, *next;
849 struct extent_buffer *leaf;
850 struct btrfs_key key;
851 struct list_head head;
852 int nitems, i, last_item;
853 int ret = 0;
854
855 BUG_ON(!path->nodes[0]);
856
857 leaf = path->nodes[0];
858
859 i = path->slots[0];
860 last_item = btrfs_header_nritems(leaf) - 1;
861 if (i > last_item)
862 return -ENOENT; /* FIXME: Is errno suitable? */
863
864 next = item;
865 INIT_LIST_HEAD(&head);
866 btrfs_item_key_to_cpu(leaf, &key, i);
867 nitems = 0;
868 /*
869 * count the number of the dir index items that we can delete in batch
870 */
871 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
872 list_add_tail(&next->tree_list, &head);
873 nitems++;
874
875 curr = next;
876 next = __btrfs_next_delayed_item(curr);
877 if (!next)
878 break;
879
880 if (!btrfs_is_continuous_delayed_item(curr, next))
881 break;
882
883 i++;
884 if (i > last_item)
885 break;
886 btrfs_item_key_to_cpu(leaf, &key, i);
887 }
888
889 if (!nitems)
890 return 0;
891
892 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
893 if (ret)
894 goto out;
895
896 list_for_each_entry_safe(curr, next, &head, tree_list) {
897 btrfs_delayed_item_release_metadata(root, curr);
898 list_del(&curr->tree_list);
899 btrfs_release_delayed_item(curr);
900 }
901
902out:
903 return ret;
904}
905
906static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
907 struct btrfs_path *path,
908 struct btrfs_root *root,
909 struct btrfs_delayed_node *node)
910{
911 struct btrfs_delayed_item *curr, *prev;
912 int ret = 0;
913
914do_again:
915 mutex_lock(&node->mutex);
916 curr = __btrfs_first_delayed_deletion_item(node);
917 if (!curr)
918 goto delete_fail;
919
920 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
921 if (ret < 0)
922 goto delete_fail;
923 else if (ret > 0) {
924 /*
925 * can't find the item which the node points to, so this node
926 * is invalid, just drop it.
927 */
928 prev = curr;
929 curr = __btrfs_next_delayed_item(prev);
930 btrfs_release_delayed_item(prev);
931 ret = 0;
932 btrfs_release_path(path);
933 if (curr)
934 goto do_again;
935 else
936 goto delete_fail;
937 }
938
939 btrfs_batch_delete_items(trans, root, path, curr);
940 btrfs_release_path(path);
941 mutex_unlock(&node->mutex);
942 goto do_again;
943
944delete_fail:
945 btrfs_release_path(path);
946 mutex_unlock(&node->mutex);
947 return ret;
948}
949
950static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
951{
952 struct btrfs_delayed_root *delayed_root;
953
954 if (delayed_node && delayed_node->inode_dirty) {
955 BUG_ON(!delayed_node->root);
956 delayed_node->inode_dirty = 0;
957 delayed_node->count--;
958
959 delayed_root = delayed_node->root->fs_info->delayed_root;
960 atomic_dec(&delayed_root->items);
961 if (atomic_read(&delayed_root->items) <
962 BTRFS_DELAYED_BACKGROUND &&
963 waitqueue_active(&delayed_root->wait))
964 wake_up(&delayed_root->wait);
965 }
966}
967
968static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
969 struct btrfs_root *root,
970 struct btrfs_path *path,
971 struct btrfs_delayed_node *node)
972{
973 struct btrfs_key key;
974 struct btrfs_inode_item *inode_item;
975 struct extent_buffer *leaf;
976 int ret;
977
978 mutex_lock(&node->mutex);
979 if (!node->inode_dirty) {
980 mutex_unlock(&node->mutex);
981 return 0;
982 }
983
984 key.objectid = node->inode_id;
985 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
986 key.offset = 0;
987 ret = btrfs_lookup_inode(trans, root, path, &key, 1);
988 if (ret > 0) {
989 btrfs_release_path(path);
990 mutex_unlock(&node->mutex);
991 return -ENOENT;
992 } else if (ret < 0) {
993 mutex_unlock(&node->mutex);
994 return ret;
995 }
996
997 btrfs_unlock_up_safe(path, 1);
998 leaf = path->nodes[0];
999 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1000 struct btrfs_inode_item);
1001 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1002 sizeof(struct btrfs_inode_item));
1003 btrfs_mark_buffer_dirty(leaf);
1004 btrfs_release_path(path);
1005
1006 btrfs_delayed_inode_release_metadata(root, node);
1007 btrfs_release_delayed_inode(node);
1008 mutex_unlock(&node->mutex);
1009
1010 return 0;
1011}
1012
1013/* Called when committing the transaction. */
1014int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1015 struct btrfs_root *root)
1016{
1017 struct btrfs_delayed_root *delayed_root;
1018 struct btrfs_delayed_node *curr_node, *prev_node;
1019 struct btrfs_path *path;
1020 struct btrfs_block_rsv *block_rsv;
1021 int ret = 0;
1022
1023 path = btrfs_alloc_path();
1024 if (!path)
1025 return -ENOMEM;
1026 path->leave_spinning = 1;
1027
1028 block_rsv = trans->block_rsv;
1029 trans->block_rsv = &root->fs_info->global_block_rsv;
1030
1031 delayed_root = btrfs_get_delayed_root(root);
1032
1033 curr_node = btrfs_first_delayed_node(delayed_root);
1034 while (curr_node) {
1035 root = curr_node->root;
1036 ret = btrfs_insert_delayed_items(trans, path, root,
1037 curr_node);
1038 if (!ret)
1039 ret = btrfs_delete_delayed_items(trans, path, root,
1040 curr_node);
1041 if (!ret)
1042 ret = btrfs_update_delayed_inode(trans, root, path,
1043 curr_node);
1044 if (ret) {
1045 btrfs_release_delayed_node(curr_node);
1046 break;
1047 }
1048
1049 prev_node = curr_node;
1050 curr_node = btrfs_next_delayed_node(curr_node);
1051 btrfs_release_delayed_node(prev_node);
1052 }
1053
1054 btrfs_free_path(path);
1055 trans->block_rsv = block_rsv;
1056 return ret;
1057}
1058
1059static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1060 struct btrfs_delayed_node *node)
1061{
1062 struct btrfs_path *path;
1063 struct btrfs_block_rsv *block_rsv;
1064 int ret;
1065
1066 path = btrfs_alloc_path();
1067 if (!path)
1068 return -ENOMEM;
1069 path->leave_spinning = 1;
1070
1071 block_rsv = trans->block_rsv;
1072 trans->block_rsv = &node->root->fs_info->global_block_rsv;
1073
1074 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1075 if (!ret)
1076 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1077 if (!ret)
1078 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1079 btrfs_free_path(path);
1080
1081 trans->block_rsv = block_rsv;
1082 return ret;
1083}
1084
1085int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1086 struct inode *inode)
1087{
1088 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1089 int ret;
1090
1091 if (!delayed_node)
1092 return 0;
1093
1094 mutex_lock(&delayed_node->mutex);
1095 if (!delayed_node->count) {
1096 mutex_unlock(&delayed_node->mutex);
1097 btrfs_release_delayed_node(delayed_node);
1098 return 0;
1099 }
1100 mutex_unlock(&delayed_node->mutex);
1101
1102 ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
1103 btrfs_release_delayed_node(delayed_node);
1104 return ret;
1105}
1106
1107void btrfs_remove_delayed_node(struct inode *inode)
1108{
1109 struct btrfs_delayed_node *delayed_node;
1110
1111 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1112 if (!delayed_node)
1113 return;
1114
1115 BTRFS_I(inode)->delayed_node = NULL;
1116 btrfs_release_delayed_node(delayed_node);
1117}
1118
1119struct btrfs_async_delayed_node {
1120 struct btrfs_root *root;
1121 struct btrfs_delayed_node *delayed_node;
1122 struct btrfs_work work;
1123};
1124
1125static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1126{
1127 struct btrfs_async_delayed_node *async_node;
1128 struct btrfs_trans_handle *trans;
1129 struct btrfs_path *path;
1130 struct btrfs_delayed_node *delayed_node = NULL;
1131 struct btrfs_root *root;
1132 struct btrfs_block_rsv *block_rsv;
1133 unsigned long nr = 0;
1134 int need_requeue = 0;
1135 int ret;
1136
1137 async_node = container_of(work, struct btrfs_async_delayed_node, work);
1138
1139 path = btrfs_alloc_path();
1140 if (!path)
1141 goto out;
1142 path->leave_spinning = 1;
1143
1144 delayed_node = async_node->delayed_node;
1145 root = delayed_node->root;
1146
1147 trans = btrfs_join_transaction(root);
1148 if (IS_ERR(trans))
1149 goto free_path;
1150
1151 block_rsv = trans->block_rsv;
1152 trans->block_rsv = &root->fs_info->global_block_rsv;
1153
1154 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1155 if (!ret)
1156 ret = btrfs_delete_delayed_items(trans, path, root,
1157 delayed_node);
1158
1159 if (!ret)
1160 btrfs_update_delayed_inode(trans, root, path, delayed_node);
1161
1162 /*
1163 * Maybe new delayed items have been inserted, so we need requeue
1164 * the work. Besides that, we must dequeue the empty delayed nodes
1165 * to avoid the race between delayed items balance and the worker.
1166 * The race like this:
1167 * Task1 Worker thread
1168 * count == 0, needn't requeue
1169 * also needn't insert the
1170 * delayed node into prepare
1171 * list again.
1172 * add lots of delayed items
1173 * queue the delayed node
1174 * already in the list,
1175 * and not in the prepare
1176 * list, it means the delayed
1177 * node is being dealt with
1178 * by the worker.
1179 * do delayed items balance
1180 * the delayed node is being
1181 * dealt with by the worker
1182 * now, just wait.
1183 * the worker goto idle.
1184 * Task1 will sleep until the transaction is commited.
1185 */
1186 mutex_lock(&delayed_node->mutex);
1187 if (delayed_node->count)
1188 need_requeue = 1;
1189 else
1190 btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
1191 delayed_node);
1192 mutex_unlock(&delayed_node->mutex);
1193
1194 nr = trans->blocks_used;
1195
1196 trans->block_rsv = block_rsv;
1197 btrfs_end_transaction_dmeta(trans, root);
1198 __btrfs_btree_balance_dirty(root, nr);
1199free_path:
1200 btrfs_free_path(path);
1201out:
1202 if (need_requeue)
1203 btrfs_requeue_work(&async_node->work);
1204 else {
1205 btrfs_release_prepared_delayed_node(delayed_node);
1206 kfree(async_node);
1207 }
1208}
1209
1210static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1211 struct btrfs_root *root, int all)
1212{
1213 struct btrfs_async_delayed_node *async_node;
1214 struct btrfs_delayed_node *curr;
1215 int count = 0;
1216
1217again:
1218 curr = btrfs_first_prepared_delayed_node(delayed_root);
1219 if (!curr)
1220 return 0;
1221
1222 async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
1223 if (!async_node) {
1224 btrfs_release_prepared_delayed_node(curr);
1225 return -ENOMEM;
1226 }
1227
1228 async_node->root = root;
1229 async_node->delayed_node = curr;
1230
1231 async_node->work.func = btrfs_async_run_delayed_node_done;
1232 async_node->work.flags = 0;
1233
1234 btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
1235 count++;
1236
1237 if (all || count < 4)
1238 goto again;
1239
1240 return 0;
1241}
1242
1243void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1244{
1245 struct btrfs_delayed_root *delayed_root;
1246 delayed_root = btrfs_get_delayed_root(root);
1247 WARN_ON(btrfs_first_delayed_node(delayed_root));
1248}
1249
1250void btrfs_balance_delayed_items(struct btrfs_root *root)
1251{
1252 struct btrfs_delayed_root *delayed_root;
1253
1254 delayed_root = btrfs_get_delayed_root(root);
1255
1256 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1257 return;
1258
1259 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1260 int ret;
1261 ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
1262 if (ret)
1263 return;
1264
1265 wait_event_interruptible_timeout(
1266 delayed_root->wait,
1267 (atomic_read(&delayed_root->items) <
1268 BTRFS_DELAYED_BACKGROUND),
1269 HZ);
1270 return;
1271 }
1272
1273 btrfs_wq_run_delayed_node(delayed_root, root, 0);
1274}
1275
1276int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1277 struct btrfs_root *root, const char *name,
1278 int name_len, struct inode *dir,
1279 struct btrfs_disk_key *disk_key, u8 type,
1280 u64 index)
1281{
1282 struct btrfs_delayed_node *delayed_node;
1283 struct btrfs_delayed_item *delayed_item;
1284 struct btrfs_dir_item *dir_item;
1285 int ret;
1286
1287 delayed_node = btrfs_get_or_create_delayed_node(dir);
1288 if (IS_ERR(delayed_node))
1289 return PTR_ERR(delayed_node);
1290
1291 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1292 if (!delayed_item) {
1293 ret = -ENOMEM;
1294 goto release_node;
1295 }
1296
1297 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1298 /*
1299 * we have reserved enough space when we start a new transaction,
1300 * so reserving metadata failure is impossible
1301 */
1302 BUG_ON(ret);
1303
1304 delayed_item->key.objectid = btrfs_ino(dir);
1305 btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
1306 delayed_item->key.offset = index;
1307
1308 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1309 dir_item->location = *disk_key;
1310 dir_item->transid = cpu_to_le64(trans->transid);
1311 dir_item->data_len = 0;
1312 dir_item->name_len = cpu_to_le16(name_len);
1313 dir_item->type = type;
1314 memcpy((char *)(dir_item + 1), name, name_len);
1315
1316 mutex_lock(&delayed_node->mutex);
1317 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1318 if (unlikely(ret)) {
1319 printk(KERN_ERR "err add delayed dir index item(name: %s) into "
1320 "the insertion tree of the delayed node"
1321 "(root id: %llu, inode id: %llu, errno: %d)\n",
1322 name,
1323 (unsigned long long)delayed_node->root->objectid,
1324 (unsigned long long)delayed_node->inode_id,
1325 ret);
1326 BUG();
1327 }
1328 mutex_unlock(&delayed_node->mutex);
1329
1330release_node:
1331 btrfs_release_delayed_node(delayed_node);
1332 return ret;
1333}
1334
1335static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1336 struct btrfs_delayed_node *node,
1337 struct btrfs_key *key)
1338{
1339 struct btrfs_delayed_item *item;
1340
1341 mutex_lock(&node->mutex);
1342 item = __btrfs_lookup_delayed_insertion_item(node, key);
1343 if (!item) {
1344 mutex_unlock(&node->mutex);
1345 return 1;
1346 }
1347
1348 btrfs_delayed_item_release_metadata(root, item);
1349 btrfs_release_delayed_item(item);
1350 mutex_unlock(&node->mutex);
1351 return 0;
1352}
1353
1354int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1355 struct btrfs_root *root, struct inode *dir,
1356 u64 index)
1357{
1358 struct btrfs_delayed_node *node;
1359 struct btrfs_delayed_item *item;
1360 struct btrfs_key item_key;
1361 int ret;
1362
1363 node = btrfs_get_or_create_delayed_node(dir);
1364 if (IS_ERR(node))
1365 return PTR_ERR(node);
1366
1367 item_key.objectid = btrfs_ino(dir);
1368 btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
1369 item_key.offset = index;
1370
1371 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1372 if (!ret)
1373 goto end;
1374
1375 item = btrfs_alloc_delayed_item(0);
1376 if (!item) {
1377 ret = -ENOMEM;
1378 goto end;
1379 }
1380
1381 item->key = item_key;
1382
1383 ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1384 /*
1385 * we have reserved enough space when we start a new transaction,
1386 * so reserving metadata failure is impossible.
1387 */
1388 BUG_ON(ret);
1389
1390 mutex_lock(&node->mutex);
1391 ret = __btrfs_add_delayed_deletion_item(node, item);
1392 if (unlikely(ret)) {
1393 printk(KERN_ERR "err add delayed dir index item(index: %llu) "
1394 "into the deletion tree of the delayed node"
1395 "(root id: %llu, inode id: %llu, errno: %d)\n",
1396 (unsigned long long)index,
1397 (unsigned long long)node->root->objectid,
1398 (unsigned long long)node->inode_id,
1399 ret);
1400 BUG();
1401 }
1402 mutex_unlock(&node->mutex);
1403end:
1404 btrfs_release_delayed_node(node);
1405 return ret;
1406}
1407
1408int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1409{
1410 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1411
1412 if (!delayed_node)
1413 return -ENOENT;
1414
1415 /*
1416 * Since we have held i_mutex of this directory, it is impossible that
1417 * a new directory index is added into the delayed node and index_cnt
1418 * is updated now. So we needn't lock the delayed node.
1419 */
1420 if (!delayed_node->index_cnt) {
1421 btrfs_release_delayed_node(delayed_node);
1422 return -EINVAL;
1423 }
1424
1425 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1426 btrfs_release_delayed_node(delayed_node);
1427 return 0;
1428}
1429
1430void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1431 struct list_head *del_list)
1432{
1433 struct btrfs_delayed_node *delayed_node;
1434 struct btrfs_delayed_item *item;
1435
1436 delayed_node = btrfs_get_delayed_node(inode);
1437 if (!delayed_node)
1438 return;
1439
1440 mutex_lock(&delayed_node->mutex);
1441 item = __btrfs_first_delayed_insertion_item(delayed_node);
1442 while (item) {
1443 atomic_inc(&item->refs);
1444 list_add_tail(&item->readdir_list, ins_list);
1445 item = __btrfs_next_delayed_item(item);
1446 }
1447
1448 item = __btrfs_first_delayed_deletion_item(delayed_node);
1449 while (item) {
1450 atomic_inc(&item->refs);
1451 list_add_tail(&item->readdir_list, del_list);
1452 item = __btrfs_next_delayed_item(item);
1453 }
1454 mutex_unlock(&delayed_node->mutex);
1455 /*
1456 * This delayed node is still cached in the btrfs inode, so refs
1457 * must be > 1 now, and we needn't check it is going to be freed
1458 * or not.
1459 *
1460 * Besides that, this function is used to read dir, we do not
1461 * insert/delete delayed items in this period. So we also needn't
1462 * requeue or dequeue this delayed node.
1463 */
1464 atomic_dec(&delayed_node->refs);
1465}
1466
1467void btrfs_put_delayed_items(struct list_head *ins_list,
1468 struct list_head *del_list)
1469{
1470 struct btrfs_delayed_item *curr, *next;
1471
1472 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1473 list_del(&curr->readdir_list);
1474 if (atomic_dec_and_test(&curr->refs))
1475 kfree(curr);
1476 }
1477
1478 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1479 list_del(&curr->readdir_list);
1480 if (atomic_dec_and_test(&curr->refs))
1481 kfree(curr);
1482 }
1483}
1484
1485int btrfs_should_delete_dir_index(struct list_head *del_list,
1486 u64 index)
1487{
1488 struct btrfs_delayed_item *curr, *next;
1489 int ret;
1490
1491 if (list_empty(del_list))
1492 return 0;
1493
1494 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1495 if (curr->key.offset > index)
1496 break;
1497
1498 list_del(&curr->readdir_list);
1499 ret = (curr->key.offset == index);
1500
1501 if (atomic_dec_and_test(&curr->refs))
1502 kfree(curr);
1503
1504 if (ret)
1505 return 1;
1506 else
1507 continue;
1508 }
1509 return 0;
1510}
1511
1512/*
1513 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1514 *
1515 */
1516int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
1517 filldir_t filldir,
1518 struct list_head *ins_list)
1519{
1520 struct btrfs_dir_item *di;
1521 struct btrfs_delayed_item *curr, *next;
1522 struct btrfs_key location;
1523 char *name;
1524 int name_len;
1525 int over = 0;
1526 unsigned char d_type;
1527
1528 if (list_empty(ins_list))
1529 return 0;
1530
1531 /*
1532 * Changing the data of the delayed item is impossible. So
1533 * we needn't lock them. And we have held i_mutex of the
1534 * directory, nobody can delete any directory indexes now.
1535 */
1536 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1537 list_del(&curr->readdir_list);
1538
1539 if (curr->key.offset < filp->f_pos) {
1540 if (atomic_dec_and_test(&curr->refs))
1541 kfree(curr);
1542 continue;
1543 }
1544
1545 filp->f_pos = curr->key.offset;
1546
1547 di = (struct btrfs_dir_item *)curr->data;
1548 name = (char *)(di + 1);
1549 name_len = le16_to_cpu(di->name_len);
1550
1551 d_type = btrfs_filetype_table[di->type];
1552 btrfs_disk_key_to_cpu(&location, &di->location);
1553
1554 over = filldir(dirent, name, name_len, curr->key.offset,
1555 location.objectid, d_type);
1556
1557 if (atomic_dec_and_test(&curr->refs))
1558 kfree(curr);
1559
1560 if (over)
1561 return 1;
1562 }
1563 return 0;
1564}
1565
1566BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
1567 generation, 64);
1568BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
1569 sequence, 64);
1570BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
1571 transid, 64);
1572BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
1573BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
1574 nbytes, 64);
1575BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
1576 block_group, 64);
1577BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
1578BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
1579BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
1580BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
1581BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
1582BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
1583
1584BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
1585BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
1586
1587static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1588 struct btrfs_inode_item *inode_item,
1589 struct inode *inode)
1590{
1591 btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
1592 btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
1593 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1594 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1595 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1596 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1597 btrfs_set_stack_inode_generation(inode_item,
1598 BTRFS_I(inode)->generation);
1599 btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
1600 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1601 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1602 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1603 btrfs_set_stack_inode_block_group(inode_item, 0);
1604
1605 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1606 inode->i_atime.tv_sec);
1607 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
1608 inode->i_atime.tv_nsec);
1609
1610 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
1611 inode->i_mtime.tv_sec);
1612 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
1613 inode->i_mtime.tv_nsec);
1614
1615 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
1616 inode->i_ctime.tv_sec);
1617 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
1618 inode->i_ctime.tv_nsec);
1619}
1620
1621int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1622{
1623 struct btrfs_delayed_node *delayed_node;
1624 struct btrfs_inode_item *inode_item;
1625 struct btrfs_timespec *tspec;
1626
1627 delayed_node = btrfs_get_delayed_node(inode);
1628 if (!delayed_node)
1629 return -ENOENT;
1630
1631 mutex_lock(&delayed_node->mutex);
1632 if (!delayed_node->inode_dirty) {
1633 mutex_unlock(&delayed_node->mutex);
1634 btrfs_release_delayed_node(delayed_node);
1635 return -ENOENT;
1636 }
1637
1638 inode_item = &delayed_node->inode_item;
1639
1640 inode->i_uid = btrfs_stack_inode_uid(inode_item);
1641 inode->i_gid = btrfs_stack_inode_gid(inode_item);
1642 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1643 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1644 inode->i_nlink = btrfs_stack_inode_nlink(inode_item);
1645 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1646 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1647 BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
1648 inode->i_rdev = 0;
1649 *rdev = btrfs_stack_inode_rdev(inode_item);
1650 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1651
1652 tspec = btrfs_inode_atime(inode_item);
1653 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
1654 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1655
1656 tspec = btrfs_inode_mtime(inode_item);
1657 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
1658 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1659
1660 tspec = btrfs_inode_ctime(inode_item);
1661 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
1662 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
1663
1664 inode->i_generation = BTRFS_I(inode)->generation;
1665 BTRFS_I(inode)->index_cnt = (u64)-1;
1666
1667 mutex_unlock(&delayed_node->mutex);
1668 btrfs_release_delayed_node(delayed_node);
1669 return 0;
1670}
1671
1672int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1673 struct btrfs_root *root, struct inode *inode)
1674{
1675 struct btrfs_delayed_node *delayed_node;
1676 int ret = 0;
1677
1678 delayed_node = btrfs_get_or_create_delayed_node(inode);
1679 if (IS_ERR(delayed_node))
1680 return PTR_ERR(delayed_node);
1681
1682 mutex_lock(&delayed_node->mutex);
1683 if (delayed_node->inode_dirty) {
1684 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1685 goto release_node;
1686 }
1687
1688 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1689 /*
1690 * we must reserve enough space when we start a new transaction,
1691 * so reserving metadata failure is impossible
1692 */
1693 BUG_ON(ret);
1694
1695 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1696 delayed_node->inode_dirty = 1;
1697 delayed_node->count++;
1698 atomic_inc(&root->fs_info->delayed_root->items);
1699release_node:
1700 mutex_unlock(&delayed_node->mutex);
1701 btrfs_release_delayed_node(delayed_node);
1702 return ret;
1703}
1704
1705static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1706{
1707 struct btrfs_root *root = delayed_node->root;
1708 struct btrfs_delayed_item *curr_item, *prev_item;
1709
1710 mutex_lock(&delayed_node->mutex);
1711 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1712 while (curr_item) {
1713 btrfs_delayed_item_release_metadata(root, curr_item);
1714 prev_item = curr_item;
1715 curr_item = __btrfs_next_delayed_item(prev_item);
1716 btrfs_release_delayed_item(prev_item);
1717 }
1718
1719 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1720 while (curr_item) {
1721 btrfs_delayed_item_release_metadata(root, curr_item);
1722 prev_item = curr_item;
1723 curr_item = __btrfs_next_delayed_item(prev_item);
1724 btrfs_release_delayed_item(prev_item);
1725 }
1726
1727 if (delayed_node->inode_dirty) {
1728 btrfs_delayed_inode_release_metadata(root, delayed_node);
1729 btrfs_release_delayed_inode(delayed_node);
1730 }
1731 mutex_unlock(&delayed_node->mutex);
1732}
1733
1734void btrfs_kill_delayed_inode_items(struct inode *inode)
1735{
1736 struct btrfs_delayed_node *delayed_node;
1737
1738 delayed_node = btrfs_get_delayed_node(inode);
1739 if (!delayed_node)
1740 return;
1741
1742 __btrfs_kill_delayed_node(delayed_node);
1743 btrfs_release_delayed_node(delayed_node);
1744}
1745
1746void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1747{
1748 u64 inode_id = 0;
1749 struct btrfs_delayed_node *delayed_nodes[8];
1750 int i, n;
1751
1752 while (1) {
1753 spin_lock(&root->inode_lock);
1754 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1755 (void **)delayed_nodes, inode_id,
1756 ARRAY_SIZE(delayed_nodes));
1757 if (!n) {
1758 spin_unlock(&root->inode_lock);
1759 break;
1760 }
1761
1762 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1763
1764 for (i = 0; i < n; i++)
1765 atomic_inc(&delayed_nodes[i]->refs);
1766 spin_unlock(&root->inode_lock);
1767
1768 for (i = 0; i < n; i++) {
1769 __btrfs_kill_delayed_node(delayed_nodes[i]);
1770 btrfs_release_delayed_node(delayed_nodes[i]);
1771 }
1772 }
1773}
1/*
2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/slab.h>
21#include "delayed-inode.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "ctree.h"
25
26#define BTRFS_DELAYED_WRITEBACK 512
27#define BTRFS_DELAYED_BACKGROUND 128
28#define BTRFS_DELAYED_BATCH 16
29
30static struct kmem_cache *delayed_node_cache;
31
32int __init btrfs_delayed_inode_init(void)
33{
34 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node),
36 0,
37 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
38 NULL);
39 if (!delayed_node_cache)
40 return -ENOMEM;
41 return 0;
42}
43
44void btrfs_delayed_inode_exit(void)
45{
46 kmem_cache_destroy(delayed_node_cache);
47}
48
49static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node *delayed_node,
51 struct btrfs_root *root, u64 inode_id)
52{
53 delayed_node->root = root;
54 delayed_node->inode_id = inode_id;
55 atomic_set(&delayed_node->refs, 0);
56 delayed_node->ins_root = RB_ROOT;
57 delayed_node->del_root = RB_ROOT;
58 mutex_init(&delayed_node->mutex);
59 INIT_LIST_HEAD(&delayed_node->n_list);
60 INIT_LIST_HEAD(&delayed_node->p_list);
61}
62
63static inline int btrfs_is_continuous_delayed_item(
64 struct btrfs_delayed_item *item1,
65 struct btrfs_delayed_item *item2)
66{
67 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 item1->key.objectid == item2->key.objectid &&
69 item1->key.type == item2->key.type &&
70 item1->key.offset + 1 == item2->key.offset)
71 return 1;
72 return 0;
73}
74
75static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
76 struct btrfs_root *root)
77{
78 return root->fs_info->delayed_root;
79}
80
81static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
82{
83 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
84 struct btrfs_root *root = btrfs_inode->root;
85 u64 ino = btrfs_ino(inode);
86 struct btrfs_delayed_node *node;
87
88 node = ACCESS_ONCE(btrfs_inode->delayed_node);
89 if (node) {
90 atomic_inc(&node->refs);
91 return node;
92 }
93
94 spin_lock(&root->inode_lock);
95 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
96 if (node) {
97 if (btrfs_inode->delayed_node) {
98 atomic_inc(&node->refs); /* can be accessed */
99 BUG_ON(btrfs_inode->delayed_node != node);
100 spin_unlock(&root->inode_lock);
101 return node;
102 }
103 btrfs_inode->delayed_node = node;
104 /* can be accessed and cached in the inode */
105 atomic_add(2, &node->refs);
106 spin_unlock(&root->inode_lock);
107 return node;
108 }
109 spin_unlock(&root->inode_lock);
110
111 return NULL;
112}
113
114/* Will return either the node or PTR_ERR(-ENOMEM) */
115static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
116 struct inode *inode)
117{
118 struct btrfs_delayed_node *node;
119 struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
120 struct btrfs_root *root = btrfs_inode->root;
121 u64 ino = btrfs_ino(inode);
122 int ret;
123
124again:
125 node = btrfs_get_delayed_node(inode);
126 if (node)
127 return node;
128
129 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
130 if (!node)
131 return ERR_PTR(-ENOMEM);
132 btrfs_init_delayed_node(node, root, ino);
133
134 /* cached in the btrfs inode and can be accessed */
135 atomic_add(2, &node->refs);
136
137 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
138 if (ret) {
139 kmem_cache_free(delayed_node_cache, node);
140 return ERR_PTR(ret);
141 }
142
143 spin_lock(&root->inode_lock);
144 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
145 if (ret == -EEXIST) {
146 spin_unlock(&root->inode_lock);
147 kmem_cache_free(delayed_node_cache, node);
148 radix_tree_preload_end();
149 goto again;
150 }
151 btrfs_inode->delayed_node = node;
152 spin_unlock(&root->inode_lock);
153 radix_tree_preload_end();
154
155 return node;
156}
157
158/*
159 * Call it when holding delayed_node->mutex
160 *
161 * If mod = 1, add this node into the prepared list.
162 */
163static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
164 struct btrfs_delayed_node *node,
165 int mod)
166{
167 spin_lock(&root->lock);
168 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
169 if (!list_empty(&node->p_list))
170 list_move_tail(&node->p_list, &root->prepare_list);
171 else if (mod)
172 list_add_tail(&node->p_list, &root->prepare_list);
173 } else {
174 list_add_tail(&node->n_list, &root->node_list);
175 list_add_tail(&node->p_list, &root->prepare_list);
176 atomic_inc(&node->refs); /* inserted into list */
177 root->nodes++;
178 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
179 }
180 spin_unlock(&root->lock);
181}
182
183/* Call it when holding delayed_node->mutex */
184static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
185 struct btrfs_delayed_node *node)
186{
187 spin_lock(&root->lock);
188 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
189 root->nodes--;
190 atomic_dec(&node->refs); /* not in the list */
191 list_del_init(&node->n_list);
192 if (!list_empty(&node->p_list))
193 list_del_init(&node->p_list);
194 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
195 }
196 spin_unlock(&root->lock);
197}
198
199static struct btrfs_delayed_node *btrfs_first_delayed_node(
200 struct btrfs_delayed_root *delayed_root)
201{
202 struct list_head *p;
203 struct btrfs_delayed_node *node = NULL;
204
205 spin_lock(&delayed_root->lock);
206 if (list_empty(&delayed_root->node_list))
207 goto out;
208
209 p = delayed_root->node_list.next;
210 node = list_entry(p, struct btrfs_delayed_node, n_list);
211 atomic_inc(&node->refs);
212out:
213 spin_unlock(&delayed_root->lock);
214
215 return node;
216}
217
218static struct btrfs_delayed_node *btrfs_next_delayed_node(
219 struct btrfs_delayed_node *node)
220{
221 struct btrfs_delayed_root *delayed_root;
222 struct list_head *p;
223 struct btrfs_delayed_node *next = NULL;
224
225 delayed_root = node->root->fs_info->delayed_root;
226 spin_lock(&delayed_root->lock);
227 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
228 /* not in the list */
229 if (list_empty(&delayed_root->node_list))
230 goto out;
231 p = delayed_root->node_list.next;
232 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
233 goto out;
234 else
235 p = node->n_list.next;
236
237 next = list_entry(p, struct btrfs_delayed_node, n_list);
238 atomic_inc(&next->refs);
239out:
240 spin_unlock(&delayed_root->lock);
241
242 return next;
243}
244
245static void __btrfs_release_delayed_node(
246 struct btrfs_delayed_node *delayed_node,
247 int mod)
248{
249 struct btrfs_delayed_root *delayed_root;
250
251 if (!delayed_node)
252 return;
253
254 delayed_root = delayed_node->root->fs_info->delayed_root;
255
256 mutex_lock(&delayed_node->mutex);
257 if (delayed_node->count)
258 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
259 else
260 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
261 mutex_unlock(&delayed_node->mutex);
262
263 if (atomic_dec_and_test(&delayed_node->refs)) {
264 bool free = false;
265 struct btrfs_root *root = delayed_node->root;
266 spin_lock(&root->inode_lock);
267 if (atomic_read(&delayed_node->refs) == 0) {
268 radix_tree_delete(&root->delayed_nodes_tree,
269 delayed_node->inode_id);
270 free = true;
271 }
272 spin_unlock(&root->inode_lock);
273 if (free)
274 kmem_cache_free(delayed_node_cache, delayed_node);
275 }
276}
277
278static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
279{
280 __btrfs_release_delayed_node(node, 0);
281}
282
283static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
284 struct btrfs_delayed_root *delayed_root)
285{
286 struct list_head *p;
287 struct btrfs_delayed_node *node = NULL;
288
289 spin_lock(&delayed_root->lock);
290 if (list_empty(&delayed_root->prepare_list))
291 goto out;
292
293 p = delayed_root->prepare_list.next;
294 list_del_init(p);
295 node = list_entry(p, struct btrfs_delayed_node, p_list);
296 atomic_inc(&node->refs);
297out:
298 spin_unlock(&delayed_root->lock);
299
300 return node;
301}
302
303static inline void btrfs_release_prepared_delayed_node(
304 struct btrfs_delayed_node *node)
305{
306 __btrfs_release_delayed_node(node, 1);
307}
308
309static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
310{
311 struct btrfs_delayed_item *item;
312 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
313 if (item) {
314 item->data_len = data_len;
315 item->ins_or_del = 0;
316 item->bytes_reserved = 0;
317 item->delayed_node = NULL;
318 atomic_set(&item->refs, 1);
319 }
320 return item;
321}
322
323/*
324 * __btrfs_lookup_delayed_item - look up the delayed item by key
325 * @delayed_node: pointer to the delayed node
326 * @key: the key to look up
327 * @prev: used to store the prev item if the right item isn't found
328 * @next: used to store the next item if the right item isn't found
329 *
330 * Note: if we don't find the right item, we will return the prev item and
331 * the next item.
332 */
333static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
334 struct rb_root *root,
335 struct btrfs_key *key,
336 struct btrfs_delayed_item **prev,
337 struct btrfs_delayed_item **next)
338{
339 struct rb_node *node, *prev_node = NULL;
340 struct btrfs_delayed_item *delayed_item = NULL;
341 int ret = 0;
342
343 node = root->rb_node;
344
345 while (node) {
346 delayed_item = rb_entry(node, struct btrfs_delayed_item,
347 rb_node);
348 prev_node = node;
349 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
350 if (ret < 0)
351 node = node->rb_right;
352 else if (ret > 0)
353 node = node->rb_left;
354 else
355 return delayed_item;
356 }
357
358 if (prev) {
359 if (!prev_node)
360 *prev = NULL;
361 else if (ret < 0)
362 *prev = delayed_item;
363 else if ((node = rb_prev(prev_node)) != NULL) {
364 *prev = rb_entry(node, struct btrfs_delayed_item,
365 rb_node);
366 } else
367 *prev = NULL;
368 }
369
370 if (next) {
371 if (!prev_node)
372 *next = NULL;
373 else if (ret > 0)
374 *next = delayed_item;
375 else if ((node = rb_next(prev_node)) != NULL) {
376 *next = rb_entry(node, struct btrfs_delayed_item,
377 rb_node);
378 } else
379 *next = NULL;
380 }
381 return NULL;
382}
383
384static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
385 struct btrfs_delayed_node *delayed_node,
386 struct btrfs_key *key)
387{
388 struct btrfs_delayed_item *item;
389
390 item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
391 NULL, NULL);
392 return item;
393}
394
395static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
396 struct btrfs_delayed_item *ins,
397 int action)
398{
399 struct rb_node **p, *node;
400 struct rb_node *parent_node = NULL;
401 struct rb_root *root;
402 struct btrfs_delayed_item *item;
403 int cmp;
404
405 if (action == BTRFS_DELAYED_INSERTION_ITEM)
406 root = &delayed_node->ins_root;
407 else if (action == BTRFS_DELAYED_DELETION_ITEM)
408 root = &delayed_node->del_root;
409 else
410 BUG();
411 p = &root->rb_node;
412 node = &ins->rb_node;
413
414 while (*p) {
415 parent_node = *p;
416 item = rb_entry(parent_node, struct btrfs_delayed_item,
417 rb_node);
418
419 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
420 if (cmp < 0)
421 p = &(*p)->rb_right;
422 else if (cmp > 0)
423 p = &(*p)->rb_left;
424 else
425 return -EEXIST;
426 }
427
428 rb_link_node(node, parent_node, p);
429 rb_insert_color(node, root);
430 ins->delayed_node = delayed_node;
431 ins->ins_or_del = action;
432
433 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
434 action == BTRFS_DELAYED_INSERTION_ITEM &&
435 ins->key.offset >= delayed_node->index_cnt)
436 delayed_node->index_cnt = ins->key.offset + 1;
437
438 delayed_node->count++;
439 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
440 return 0;
441}
442
443static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
444 struct btrfs_delayed_item *item)
445{
446 return __btrfs_add_delayed_item(node, item,
447 BTRFS_DELAYED_INSERTION_ITEM);
448}
449
450static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
451 struct btrfs_delayed_item *item)
452{
453 return __btrfs_add_delayed_item(node, item,
454 BTRFS_DELAYED_DELETION_ITEM);
455}
456
457static void finish_one_item(struct btrfs_delayed_root *delayed_root)
458{
459 int seq = atomic_inc_return(&delayed_root->items_seq);
460
461 /*
462 * atomic_dec_return implies a barrier for waitqueue_active
463 */
464 if ((atomic_dec_return(&delayed_root->items) <
465 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
466 waitqueue_active(&delayed_root->wait))
467 wake_up(&delayed_root->wait);
468}
469
470static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
471{
472 struct rb_root *root;
473 struct btrfs_delayed_root *delayed_root;
474
475 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
476
477 BUG_ON(!delayed_root);
478 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
479 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
480
481 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
482 root = &delayed_item->delayed_node->ins_root;
483 else
484 root = &delayed_item->delayed_node->del_root;
485
486 rb_erase(&delayed_item->rb_node, root);
487 delayed_item->delayed_node->count--;
488
489 finish_one_item(delayed_root);
490}
491
492static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
493{
494 if (item) {
495 __btrfs_remove_delayed_item(item);
496 if (atomic_dec_and_test(&item->refs))
497 kfree(item);
498 }
499}
500
501static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
502 struct btrfs_delayed_node *delayed_node)
503{
504 struct rb_node *p;
505 struct btrfs_delayed_item *item = NULL;
506
507 p = rb_first(&delayed_node->ins_root);
508 if (p)
509 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
510
511 return item;
512}
513
514static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
515 struct btrfs_delayed_node *delayed_node)
516{
517 struct rb_node *p;
518 struct btrfs_delayed_item *item = NULL;
519
520 p = rb_first(&delayed_node->del_root);
521 if (p)
522 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
523
524 return item;
525}
526
527static struct btrfs_delayed_item *__btrfs_next_delayed_item(
528 struct btrfs_delayed_item *item)
529{
530 struct rb_node *p;
531 struct btrfs_delayed_item *next = NULL;
532
533 p = rb_next(&item->rb_node);
534 if (p)
535 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
536
537 return next;
538}
539
540static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
541 struct btrfs_root *root,
542 struct btrfs_delayed_item *item)
543{
544 struct btrfs_block_rsv *src_rsv;
545 struct btrfs_block_rsv *dst_rsv;
546 u64 num_bytes;
547 int ret;
548
549 if (!trans->bytes_reserved)
550 return 0;
551
552 src_rsv = trans->block_rsv;
553 dst_rsv = &root->fs_info->delayed_block_rsv;
554
555 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
556 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
557 if (!ret) {
558 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
559 item->key.objectid,
560 num_bytes, 1);
561 item->bytes_reserved = num_bytes;
562 }
563
564 return ret;
565}
566
567static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
568 struct btrfs_delayed_item *item)
569{
570 struct btrfs_block_rsv *rsv;
571
572 if (!item->bytes_reserved)
573 return;
574
575 rsv = &root->fs_info->delayed_block_rsv;
576 trace_btrfs_space_reservation(root->fs_info, "delayed_item",
577 item->key.objectid, item->bytes_reserved,
578 0);
579 btrfs_block_rsv_release(root, rsv,
580 item->bytes_reserved);
581}
582
583static int btrfs_delayed_inode_reserve_metadata(
584 struct btrfs_trans_handle *trans,
585 struct btrfs_root *root,
586 struct inode *inode,
587 struct btrfs_delayed_node *node)
588{
589 struct btrfs_block_rsv *src_rsv;
590 struct btrfs_block_rsv *dst_rsv;
591 u64 num_bytes;
592 int ret;
593 bool release = false;
594
595 src_rsv = trans->block_rsv;
596 dst_rsv = &root->fs_info->delayed_block_rsv;
597
598 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
599
600 /*
601 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
602 * which doesn't reserve space for speed. This is a problem since we
603 * still need to reserve space for this update, so try to reserve the
604 * space.
605 *
606 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
607 * we're accounted for.
608 */
609 if (!src_rsv || (!trans->bytes_reserved &&
610 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
611 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
612 BTRFS_RESERVE_NO_FLUSH);
613 /*
614 * Since we're under a transaction reserve_metadata_bytes could
615 * try to commit the transaction which will make it return
616 * EAGAIN to make us stop the transaction we have, so return
617 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
618 */
619 if (ret == -EAGAIN)
620 ret = -ENOSPC;
621 if (!ret) {
622 node->bytes_reserved = num_bytes;
623 trace_btrfs_space_reservation(root->fs_info,
624 "delayed_inode",
625 btrfs_ino(inode),
626 num_bytes, 1);
627 }
628 return ret;
629 } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
630 spin_lock(&BTRFS_I(inode)->lock);
631 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
632 &BTRFS_I(inode)->runtime_flags)) {
633 spin_unlock(&BTRFS_I(inode)->lock);
634 release = true;
635 goto migrate;
636 }
637 spin_unlock(&BTRFS_I(inode)->lock);
638
639 /* Ok we didn't have space pre-reserved. This shouldn't happen
640 * too often but it can happen if we do delalloc to an existing
641 * inode which gets dirtied because of the time update, and then
642 * isn't touched again until after the transaction commits and
643 * then we try to write out the data. First try to be nice and
644 * reserve something strictly for us. If not be a pain and try
645 * to steal from the delalloc block rsv.
646 */
647 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
648 BTRFS_RESERVE_NO_FLUSH);
649 if (!ret)
650 goto out;
651
652 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
653 if (!ret)
654 goto out;
655
656 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
657 btrfs_debug(root->fs_info,
658 "block rsv migrate returned %d", ret);
659 WARN_ON(1);
660 }
661 /*
662 * Ok this is a problem, let's just steal from the global rsv
663 * since this really shouldn't happen that often.
664 */
665 ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
666 dst_rsv, num_bytes);
667 goto out;
668 }
669
670migrate:
671 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
672
673out:
674 /*
675 * Migrate only takes a reservation, it doesn't touch the size of the
676 * block_rsv. This is to simplify people who don't normally have things
677 * migrated from their block rsv. If they go to release their
678 * reservation, that will decrease the size as well, so if migrate
679 * reduced size we'd end up with a negative size. But for the
680 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
681 * but we could in fact do this reserve/migrate dance several times
682 * between the time we did the original reservation and we'd clean it
683 * up. So to take care of this, release the space for the meta
684 * reservation here. I think it may be time for a documentation page on
685 * how block rsvs. work.
686 */
687 if (!ret) {
688 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
689 btrfs_ino(inode), num_bytes, 1);
690 node->bytes_reserved = num_bytes;
691 }
692
693 if (release) {
694 trace_btrfs_space_reservation(root->fs_info, "delalloc",
695 btrfs_ino(inode), num_bytes, 0);
696 btrfs_block_rsv_release(root, src_rsv, num_bytes);
697 }
698
699 return ret;
700}
701
702static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
703 struct btrfs_delayed_node *node)
704{
705 struct btrfs_block_rsv *rsv;
706
707 if (!node->bytes_reserved)
708 return;
709
710 rsv = &root->fs_info->delayed_block_rsv;
711 trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
712 node->inode_id, node->bytes_reserved, 0);
713 btrfs_block_rsv_release(root, rsv,
714 node->bytes_reserved);
715 node->bytes_reserved = 0;
716}
717
718/*
719 * This helper will insert some continuous items into the same leaf according
720 * to the free space of the leaf.
721 */
722static int btrfs_batch_insert_items(struct btrfs_root *root,
723 struct btrfs_path *path,
724 struct btrfs_delayed_item *item)
725{
726 struct btrfs_delayed_item *curr, *next;
727 int free_space;
728 int total_data_size = 0, total_size = 0;
729 struct extent_buffer *leaf;
730 char *data_ptr;
731 struct btrfs_key *keys;
732 u32 *data_size;
733 struct list_head head;
734 int slot;
735 int nitems;
736 int i;
737 int ret = 0;
738
739 BUG_ON(!path->nodes[0]);
740
741 leaf = path->nodes[0];
742 free_space = btrfs_leaf_free_space(root, leaf);
743 INIT_LIST_HEAD(&head);
744
745 next = item;
746 nitems = 0;
747
748 /*
749 * count the number of the continuous items that we can insert in batch
750 */
751 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
752 free_space) {
753 total_data_size += next->data_len;
754 total_size += next->data_len + sizeof(struct btrfs_item);
755 list_add_tail(&next->tree_list, &head);
756 nitems++;
757
758 curr = next;
759 next = __btrfs_next_delayed_item(curr);
760 if (!next)
761 break;
762
763 if (!btrfs_is_continuous_delayed_item(curr, next))
764 break;
765 }
766
767 if (!nitems) {
768 ret = 0;
769 goto out;
770 }
771
772 /*
773 * we need allocate some memory space, but it might cause the task
774 * to sleep, so we set all locked nodes in the path to blocking locks
775 * first.
776 */
777 btrfs_set_path_blocking(path);
778
779 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
780 if (!keys) {
781 ret = -ENOMEM;
782 goto out;
783 }
784
785 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
786 if (!data_size) {
787 ret = -ENOMEM;
788 goto error;
789 }
790
791 /* get keys of all the delayed items */
792 i = 0;
793 list_for_each_entry(next, &head, tree_list) {
794 keys[i] = next->key;
795 data_size[i] = next->data_len;
796 i++;
797 }
798
799 /* reset all the locked nodes in the patch to spinning locks. */
800 btrfs_clear_path_blocking(path, NULL, 0);
801
802 /* insert the keys of the items */
803 setup_items_for_insert(root, path, keys, data_size,
804 total_data_size, total_size, nitems);
805
806 /* insert the dir index items */
807 slot = path->slots[0];
808 list_for_each_entry_safe(curr, next, &head, tree_list) {
809 data_ptr = btrfs_item_ptr(leaf, slot, char);
810 write_extent_buffer(leaf, &curr->data,
811 (unsigned long)data_ptr,
812 curr->data_len);
813 slot++;
814
815 btrfs_delayed_item_release_metadata(root, curr);
816
817 list_del(&curr->tree_list);
818 btrfs_release_delayed_item(curr);
819 }
820
821error:
822 kfree(data_size);
823 kfree(keys);
824out:
825 return ret;
826}
827
828/*
829 * This helper can just do simple insertion that needn't extend item for new
830 * data, such as directory name index insertion, inode insertion.
831 */
832static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
833 struct btrfs_root *root,
834 struct btrfs_path *path,
835 struct btrfs_delayed_item *delayed_item)
836{
837 struct extent_buffer *leaf;
838 char *ptr;
839 int ret;
840
841 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
842 delayed_item->data_len);
843 if (ret < 0 && ret != -EEXIST)
844 return ret;
845
846 leaf = path->nodes[0];
847
848 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
849
850 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
851 delayed_item->data_len);
852 btrfs_mark_buffer_dirty(leaf);
853
854 btrfs_delayed_item_release_metadata(root, delayed_item);
855 return 0;
856}
857
858/*
859 * we insert an item first, then if there are some continuous items, we try
860 * to insert those items into the same leaf.
861 */
862static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
863 struct btrfs_path *path,
864 struct btrfs_root *root,
865 struct btrfs_delayed_node *node)
866{
867 struct btrfs_delayed_item *curr, *prev;
868 int ret = 0;
869
870do_again:
871 mutex_lock(&node->mutex);
872 curr = __btrfs_first_delayed_insertion_item(node);
873 if (!curr)
874 goto insert_end;
875
876 ret = btrfs_insert_delayed_item(trans, root, path, curr);
877 if (ret < 0) {
878 btrfs_release_path(path);
879 goto insert_end;
880 }
881
882 prev = curr;
883 curr = __btrfs_next_delayed_item(prev);
884 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
885 /* insert the continuous items into the same leaf */
886 path->slots[0]++;
887 btrfs_batch_insert_items(root, path, curr);
888 }
889 btrfs_release_delayed_item(prev);
890 btrfs_mark_buffer_dirty(path->nodes[0]);
891
892 btrfs_release_path(path);
893 mutex_unlock(&node->mutex);
894 goto do_again;
895
896insert_end:
897 mutex_unlock(&node->mutex);
898 return ret;
899}
900
901static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
902 struct btrfs_root *root,
903 struct btrfs_path *path,
904 struct btrfs_delayed_item *item)
905{
906 struct btrfs_delayed_item *curr, *next;
907 struct extent_buffer *leaf;
908 struct btrfs_key key;
909 struct list_head head;
910 int nitems, i, last_item;
911 int ret = 0;
912
913 BUG_ON(!path->nodes[0]);
914
915 leaf = path->nodes[0];
916
917 i = path->slots[0];
918 last_item = btrfs_header_nritems(leaf) - 1;
919 if (i > last_item)
920 return -ENOENT; /* FIXME: Is errno suitable? */
921
922 next = item;
923 INIT_LIST_HEAD(&head);
924 btrfs_item_key_to_cpu(leaf, &key, i);
925 nitems = 0;
926 /*
927 * count the number of the dir index items that we can delete in batch
928 */
929 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
930 list_add_tail(&next->tree_list, &head);
931 nitems++;
932
933 curr = next;
934 next = __btrfs_next_delayed_item(curr);
935 if (!next)
936 break;
937
938 if (!btrfs_is_continuous_delayed_item(curr, next))
939 break;
940
941 i++;
942 if (i > last_item)
943 break;
944 btrfs_item_key_to_cpu(leaf, &key, i);
945 }
946
947 if (!nitems)
948 return 0;
949
950 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
951 if (ret)
952 goto out;
953
954 list_for_each_entry_safe(curr, next, &head, tree_list) {
955 btrfs_delayed_item_release_metadata(root, curr);
956 list_del(&curr->tree_list);
957 btrfs_release_delayed_item(curr);
958 }
959
960out:
961 return ret;
962}
963
964static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
965 struct btrfs_path *path,
966 struct btrfs_root *root,
967 struct btrfs_delayed_node *node)
968{
969 struct btrfs_delayed_item *curr, *prev;
970 int ret = 0;
971
972do_again:
973 mutex_lock(&node->mutex);
974 curr = __btrfs_first_delayed_deletion_item(node);
975 if (!curr)
976 goto delete_fail;
977
978 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
979 if (ret < 0)
980 goto delete_fail;
981 else if (ret > 0) {
982 /*
983 * can't find the item which the node points to, so this node
984 * is invalid, just drop it.
985 */
986 prev = curr;
987 curr = __btrfs_next_delayed_item(prev);
988 btrfs_release_delayed_item(prev);
989 ret = 0;
990 btrfs_release_path(path);
991 if (curr) {
992 mutex_unlock(&node->mutex);
993 goto do_again;
994 } else
995 goto delete_fail;
996 }
997
998 btrfs_batch_delete_items(trans, root, path, curr);
999 btrfs_release_path(path);
1000 mutex_unlock(&node->mutex);
1001 goto do_again;
1002
1003delete_fail:
1004 btrfs_release_path(path);
1005 mutex_unlock(&node->mutex);
1006 return ret;
1007}
1008
1009static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1010{
1011 struct btrfs_delayed_root *delayed_root;
1012
1013 if (delayed_node &&
1014 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1015 BUG_ON(!delayed_node->root);
1016 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1017 delayed_node->count--;
1018
1019 delayed_root = delayed_node->root->fs_info->delayed_root;
1020 finish_one_item(delayed_root);
1021 }
1022}
1023
1024static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1025{
1026 struct btrfs_delayed_root *delayed_root;
1027
1028 ASSERT(delayed_node->root);
1029 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1030 delayed_node->count--;
1031
1032 delayed_root = delayed_node->root->fs_info->delayed_root;
1033 finish_one_item(delayed_root);
1034}
1035
1036static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1037 struct btrfs_root *root,
1038 struct btrfs_path *path,
1039 struct btrfs_delayed_node *node)
1040{
1041 struct btrfs_key key;
1042 struct btrfs_inode_item *inode_item;
1043 struct extent_buffer *leaf;
1044 int mod;
1045 int ret;
1046
1047 key.objectid = node->inode_id;
1048 key.type = BTRFS_INODE_ITEM_KEY;
1049 key.offset = 0;
1050
1051 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1052 mod = -1;
1053 else
1054 mod = 1;
1055
1056 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1057 if (ret > 0) {
1058 btrfs_release_path(path);
1059 return -ENOENT;
1060 } else if (ret < 0) {
1061 return ret;
1062 }
1063
1064 leaf = path->nodes[0];
1065 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1066 struct btrfs_inode_item);
1067 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1068 sizeof(struct btrfs_inode_item));
1069 btrfs_mark_buffer_dirty(leaf);
1070
1071 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1072 goto no_iref;
1073
1074 path->slots[0]++;
1075 if (path->slots[0] >= btrfs_header_nritems(leaf))
1076 goto search;
1077again:
1078 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1079 if (key.objectid != node->inode_id)
1080 goto out;
1081
1082 if (key.type != BTRFS_INODE_REF_KEY &&
1083 key.type != BTRFS_INODE_EXTREF_KEY)
1084 goto out;
1085
1086 /*
1087 * Delayed iref deletion is for the inode who has only one link,
1088 * so there is only one iref. The case that several irefs are
1089 * in the same item doesn't exist.
1090 */
1091 btrfs_del_item(trans, root, path);
1092out:
1093 btrfs_release_delayed_iref(node);
1094no_iref:
1095 btrfs_release_path(path);
1096err_out:
1097 btrfs_delayed_inode_release_metadata(root, node);
1098 btrfs_release_delayed_inode(node);
1099
1100 return ret;
1101
1102search:
1103 btrfs_release_path(path);
1104
1105 key.type = BTRFS_INODE_EXTREF_KEY;
1106 key.offset = -1;
1107 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1108 if (ret < 0)
1109 goto err_out;
1110 ASSERT(ret);
1111
1112 ret = 0;
1113 leaf = path->nodes[0];
1114 path->slots[0]--;
1115 goto again;
1116}
1117
1118static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1119 struct btrfs_root *root,
1120 struct btrfs_path *path,
1121 struct btrfs_delayed_node *node)
1122{
1123 int ret;
1124
1125 mutex_lock(&node->mutex);
1126 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1127 mutex_unlock(&node->mutex);
1128 return 0;
1129 }
1130
1131 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1132 mutex_unlock(&node->mutex);
1133 return ret;
1134}
1135
1136static inline int
1137__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1138 struct btrfs_path *path,
1139 struct btrfs_delayed_node *node)
1140{
1141 int ret;
1142
1143 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1144 if (ret)
1145 return ret;
1146
1147 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1148 if (ret)
1149 return ret;
1150
1151 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1152 return ret;
1153}
1154
1155/*
1156 * Called when committing the transaction.
1157 * Returns 0 on success.
1158 * Returns < 0 on error and returns with an aborted transaction with any
1159 * outstanding delayed items cleaned up.
1160 */
1161static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1162 struct btrfs_root *root, int nr)
1163{
1164 struct btrfs_delayed_root *delayed_root;
1165 struct btrfs_delayed_node *curr_node, *prev_node;
1166 struct btrfs_path *path;
1167 struct btrfs_block_rsv *block_rsv;
1168 int ret = 0;
1169 bool count = (nr > 0);
1170
1171 if (trans->aborted)
1172 return -EIO;
1173
1174 path = btrfs_alloc_path();
1175 if (!path)
1176 return -ENOMEM;
1177 path->leave_spinning = 1;
1178
1179 block_rsv = trans->block_rsv;
1180 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1181
1182 delayed_root = btrfs_get_delayed_root(root);
1183
1184 curr_node = btrfs_first_delayed_node(delayed_root);
1185 while (curr_node && (!count || (count && nr--))) {
1186 ret = __btrfs_commit_inode_delayed_items(trans, path,
1187 curr_node);
1188 if (ret) {
1189 btrfs_release_delayed_node(curr_node);
1190 curr_node = NULL;
1191 btrfs_abort_transaction(trans, root, ret);
1192 break;
1193 }
1194
1195 prev_node = curr_node;
1196 curr_node = btrfs_next_delayed_node(curr_node);
1197 btrfs_release_delayed_node(prev_node);
1198 }
1199
1200 if (curr_node)
1201 btrfs_release_delayed_node(curr_node);
1202 btrfs_free_path(path);
1203 trans->block_rsv = block_rsv;
1204
1205 return ret;
1206}
1207
1208int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1209 struct btrfs_root *root)
1210{
1211 return __btrfs_run_delayed_items(trans, root, -1);
1212}
1213
1214int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1215 struct btrfs_root *root, int nr)
1216{
1217 return __btrfs_run_delayed_items(trans, root, nr);
1218}
1219
1220int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1221 struct inode *inode)
1222{
1223 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1224 struct btrfs_path *path;
1225 struct btrfs_block_rsv *block_rsv;
1226 int ret;
1227
1228 if (!delayed_node)
1229 return 0;
1230
1231 mutex_lock(&delayed_node->mutex);
1232 if (!delayed_node->count) {
1233 mutex_unlock(&delayed_node->mutex);
1234 btrfs_release_delayed_node(delayed_node);
1235 return 0;
1236 }
1237 mutex_unlock(&delayed_node->mutex);
1238
1239 path = btrfs_alloc_path();
1240 if (!path) {
1241 btrfs_release_delayed_node(delayed_node);
1242 return -ENOMEM;
1243 }
1244 path->leave_spinning = 1;
1245
1246 block_rsv = trans->block_rsv;
1247 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1248
1249 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1250
1251 btrfs_release_delayed_node(delayed_node);
1252 btrfs_free_path(path);
1253 trans->block_rsv = block_rsv;
1254
1255 return ret;
1256}
1257
1258int btrfs_commit_inode_delayed_inode(struct inode *inode)
1259{
1260 struct btrfs_trans_handle *trans;
1261 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1262 struct btrfs_path *path;
1263 struct btrfs_block_rsv *block_rsv;
1264 int ret;
1265
1266 if (!delayed_node)
1267 return 0;
1268
1269 mutex_lock(&delayed_node->mutex);
1270 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1271 mutex_unlock(&delayed_node->mutex);
1272 btrfs_release_delayed_node(delayed_node);
1273 return 0;
1274 }
1275 mutex_unlock(&delayed_node->mutex);
1276
1277 trans = btrfs_join_transaction(delayed_node->root);
1278 if (IS_ERR(trans)) {
1279 ret = PTR_ERR(trans);
1280 goto out;
1281 }
1282
1283 path = btrfs_alloc_path();
1284 if (!path) {
1285 ret = -ENOMEM;
1286 goto trans_out;
1287 }
1288 path->leave_spinning = 1;
1289
1290 block_rsv = trans->block_rsv;
1291 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1292
1293 mutex_lock(&delayed_node->mutex);
1294 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1295 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1296 path, delayed_node);
1297 else
1298 ret = 0;
1299 mutex_unlock(&delayed_node->mutex);
1300
1301 btrfs_free_path(path);
1302 trans->block_rsv = block_rsv;
1303trans_out:
1304 btrfs_end_transaction(trans, delayed_node->root);
1305 btrfs_btree_balance_dirty(delayed_node->root);
1306out:
1307 btrfs_release_delayed_node(delayed_node);
1308
1309 return ret;
1310}
1311
1312void btrfs_remove_delayed_node(struct inode *inode)
1313{
1314 struct btrfs_delayed_node *delayed_node;
1315
1316 delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
1317 if (!delayed_node)
1318 return;
1319
1320 BTRFS_I(inode)->delayed_node = NULL;
1321 btrfs_release_delayed_node(delayed_node);
1322}
1323
1324struct btrfs_async_delayed_work {
1325 struct btrfs_delayed_root *delayed_root;
1326 int nr;
1327 struct btrfs_work work;
1328};
1329
1330static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1331{
1332 struct btrfs_async_delayed_work *async_work;
1333 struct btrfs_delayed_root *delayed_root;
1334 struct btrfs_trans_handle *trans;
1335 struct btrfs_path *path;
1336 struct btrfs_delayed_node *delayed_node = NULL;
1337 struct btrfs_root *root;
1338 struct btrfs_block_rsv *block_rsv;
1339 int total_done = 0;
1340
1341 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1342 delayed_root = async_work->delayed_root;
1343
1344 path = btrfs_alloc_path();
1345 if (!path)
1346 goto out;
1347
1348again:
1349 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1350 goto free_path;
1351
1352 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1353 if (!delayed_node)
1354 goto free_path;
1355
1356 path->leave_spinning = 1;
1357 root = delayed_node->root;
1358
1359 trans = btrfs_join_transaction(root);
1360 if (IS_ERR(trans))
1361 goto release_path;
1362
1363 block_rsv = trans->block_rsv;
1364 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1365
1366 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1367
1368 trans->block_rsv = block_rsv;
1369 btrfs_end_transaction(trans, root);
1370 btrfs_btree_balance_dirty_nodelay(root);
1371
1372release_path:
1373 btrfs_release_path(path);
1374 total_done++;
1375
1376 btrfs_release_prepared_delayed_node(delayed_node);
1377 if (async_work->nr == 0 || total_done < async_work->nr)
1378 goto again;
1379
1380free_path:
1381 btrfs_free_path(path);
1382out:
1383 wake_up(&delayed_root->wait);
1384 kfree(async_work);
1385}
1386
1387
1388static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1389 struct btrfs_fs_info *fs_info, int nr)
1390{
1391 struct btrfs_async_delayed_work *async_work;
1392
1393 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1394 return 0;
1395
1396 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1397 if (!async_work)
1398 return -ENOMEM;
1399
1400 async_work->delayed_root = delayed_root;
1401 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1402 btrfs_async_run_delayed_root, NULL, NULL);
1403 async_work->nr = nr;
1404
1405 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1406 return 0;
1407}
1408
1409void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1410{
1411 struct btrfs_delayed_root *delayed_root;
1412 delayed_root = btrfs_get_delayed_root(root);
1413 WARN_ON(btrfs_first_delayed_node(delayed_root));
1414}
1415
1416static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1417{
1418 int val = atomic_read(&delayed_root->items_seq);
1419
1420 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1421 return 1;
1422
1423 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1424 return 1;
1425
1426 return 0;
1427}
1428
1429void btrfs_balance_delayed_items(struct btrfs_root *root)
1430{
1431 struct btrfs_delayed_root *delayed_root;
1432 struct btrfs_fs_info *fs_info = root->fs_info;
1433
1434 delayed_root = btrfs_get_delayed_root(root);
1435
1436 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1437 return;
1438
1439 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1440 int seq;
1441 int ret;
1442
1443 seq = atomic_read(&delayed_root->items_seq);
1444
1445 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1446 if (ret)
1447 return;
1448
1449 wait_event_interruptible(delayed_root->wait,
1450 could_end_wait(delayed_root, seq));
1451 return;
1452 }
1453
1454 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1455}
1456
1457/* Will return 0 or -ENOMEM */
1458int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1459 struct btrfs_root *root, const char *name,
1460 int name_len, struct inode *dir,
1461 struct btrfs_disk_key *disk_key, u8 type,
1462 u64 index)
1463{
1464 struct btrfs_delayed_node *delayed_node;
1465 struct btrfs_delayed_item *delayed_item;
1466 struct btrfs_dir_item *dir_item;
1467 int ret;
1468
1469 delayed_node = btrfs_get_or_create_delayed_node(dir);
1470 if (IS_ERR(delayed_node))
1471 return PTR_ERR(delayed_node);
1472
1473 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1474 if (!delayed_item) {
1475 ret = -ENOMEM;
1476 goto release_node;
1477 }
1478
1479 delayed_item->key.objectid = btrfs_ino(dir);
1480 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1481 delayed_item->key.offset = index;
1482
1483 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1484 dir_item->location = *disk_key;
1485 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1486 btrfs_set_stack_dir_data_len(dir_item, 0);
1487 btrfs_set_stack_dir_name_len(dir_item, name_len);
1488 btrfs_set_stack_dir_type(dir_item, type);
1489 memcpy((char *)(dir_item + 1), name, name_len);
1490
1491 ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
1492 /*
1493 * we have reserved enough space when we start a new transaction,
1494 * so reserving metadata failure is impossible
1495 */
1496 BUG_ON(ret);
1497
1498
1499 mutex_lock(&delayed_node->mutex);
1500 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1501 if (unlikely(ret)) {
1502 btrfs_err(root->fs_info, "err add delayed dir index item(name: %.*s) "
1503 "into the insertion tree of the delayed node"
1504 "(root id: %llu, inode id: %llu, errno: %d)",
1505 name_len, name, delayed_node->root->objectid,
1506 delayed_node->inode_id, ret);
1507 BUG();
1508 }
1509 mutex_unlock(&delayed_node->mutex);
1510
1511release_node:
1512 btrfs_release_delayed_node(delayed_node);
1513 return ret;
1514}
1515
1516static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
1517 struct btrfs_delayed_node *node,
1518 struct btrfs_key *key)
1519{
1520 struct btrfs_delayed_item *item;
1521
1522 mutex_lock(&node->mutex);
1523 item = __btrfs_lookup_delayed_insertion_item(node, key);
1524 if (!item) {
1525 mutex_unlock(&node->mutex);
1526 return 1;
1527 }
1528
1529 btrfs_delayed_item_release_metadata(root, item);
1530 btrfs_release_delayed_item(item);
1531 mutex_unlock(&node->mutex);
1532 return 0;
1533}
1534
1535int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1536 struct btrfs_root *root, struct inode *dir,
1537 u64 index)
1538{
1539 struct btrfs_delayed_node *node;
1540 struct btrfs_delayed_item *item;
1541 struct btrfs_key item_key;
1542 int ret;
1543
1544 node = btrfs_get_or_create_delayed_node(dir);
1545 if (IS_ERR(node))
1546 return PTR_ERR(node);
1547
1548 item_key.objectid = btrfs_ino(dir);
1549 item_key.type = BTRFS_DIR_INDEX_KEY;
1550 item_key.offset = index;
1551
1552 ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
1553 if (!ret)
1554 goto end;
1555
1556 item = btrfs_alloc_delayed_item(0);
1557 if (!item) {
1558 ret = -ENOMEM;
1559 goto end;
1560 }
1561
1562 item->key = item_key;
1563
1564 ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
1565 /*
1566 * we have reserved enough space when we start a new transaction,
1567 * so reserving metadata failure is impossible.
1568 */
1569 BUG_ON(ret);
1570
1571 mutex_lock(&node->mutex);
1572 ret = __btrfs_add_delayed_deletion_item(node, item);
1573 if (unlikely(ret)) {
1574 btrfs_err(root->fs_info, "err add delayed dir index item(index: %llu) "
1575 "into the deletion tree of the delayed node"
1576 "(root id: %llu, inode id: %llu, errno: %d)",
1577 index, node->root->objectid, node->inode_id,
1578 ret);
1579 BUG();
1580 }
1581 mutex_unlock(&node->mutex);
1582end:
1583 btrfs_release_delayed_node(node);
1584 return ret;
1585}
1586
1587int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1588{
1589 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1590
1591 if (!delayed_node)
1592 return -ENOENT;
1593
1594 /*
1595 * Since we have held i_mutex of this directory, it is impossible that
1596 * a new directory index is added into the delayed node and index_cnt
1597 * is updated now. So we needn't lock the delayed node.
1598 */
1599 if (!delayed_node->index_cnt) {
1600 btrfs_release_delayed_node(delayed_node);
1601 return -EINVAL;
1602 }
1603
1604 BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
1605 btrfs_release_delayed_node(delayed_node);
1606 return 0;
1607}
1608
1609void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1610 struct list_head *del_list)
1611{
1612 struct btrfs_delayed_node *delayed_node;
1613 struct btrfs_delayed_item *item;
1614
1615 delayed_node = btrfs_get_delayed_node(inode);
1616 if (!delayed_node)
1617 return;
1618
1619 mutex_lock(&delayed_node->mutex);
1620 item = __btrfs_first_delayed_insertion_item(delayed_node);
1621 while (item) {
1622 atomic_inc(&item->refs);
1623 list_add_tail(&item->readdir_list, ins_list);
1624 item = __btrfs_next_delayed_item(item);
1625 }
1626
1627 item = __btrfs_first_delayed_deletion_item(delayed_node);
1628 while (item) {
1629 atomic_inc(&item->refs);
1630 list_add_tail(&item->readdir_list, del_list);
1631 item = __btrfs_next_delayed_item(item);
1632 }
1633 mutex_unlock(&delayed_node->mutex);
1634 /*
1635 * This delayed node is still cached in the btrfs inode, so refs
1636 * must be > 1 now, and we needn't check it is going to be freed
1637 * or not.
1638 *
1639 * Besides that, this function is used to read dir, we do not
1640 * insert/delete delayed items in this period. So we also needn't
1641 * requeue or dequeue this delayed node.
1642 */
1643 atomic_dec(&delayed_node->refs);
1644}
1645
1646void btrfs_put_delayed_items(struct list_head *ins_list,
1647 struct list_head *del_list)
1648{
1649 struct btrfs_delayed_item *curr, *next;
1650
1651 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1652 list_del(&curr->readdir_list);
1653 if (atomic_dec_and_test(&curr->refs))
1654 kfree(curr);
1655 }
1656
1657 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1658 list_del(&curr->readdir_list);
1659 if (atomic_dec_and_test(&curr->refs))
1660 kfree(curr);
1661 }
1662}
1663
1664int btrfs_should_delete_dir_index(struct list_head *del_list,
1665 u64 index)
1666{
1667 struct btrfs_delayed_item *curr, *next;
1668 int ret;
1669
1670 if (list_empty(del_list))
1671 return 0;
1672
1673 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1674 if (curr->key.offset > index)
1675 break;
1676
1677 list_del(&curr->readdir_list);
1678 ret = (curr->key.offset == index);
1679
1680 if (atomic_dec_and_test(&curr->refs))
1681 kfree(curr);
1682
1683 if (ret)
1684 return 1;
1685 else
1686 continue;
1687 }
1688 return 0;
1689}
1690
1691/*
1692 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1693 *
1694 */
1695int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1696 struct list_head *ins_list, bool *emitted)
1697{
1698 struct btrfs_dir_item *di;
1699 struct btrfs_delayed_item *curr, *next;
1700 struct btrfs_key location;
1701 char *name;
1702 int name_len;
1703 int over = 0;
1704 unsigned char d_type;
1705
1706 if (list_empty(ins_list))
1707 return 0;
1708
1709 /*
1710 * Changing the data of the delayed item is impossible. So
1711 * we needn't lock them. And we have held i_mutex of the
1712 * directory, nobody can delete any directory indexes now.
1713 */
1714 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1715 list_del(&curr->readdir_list);
1716
1717 if (curr->key.offset < ctx->pos) {
1718 if (atomic_dec_and_test(&curr->refs))
1719 kfree(curr);
1720 continue;
1721 }
1722
1723 ctx->pos = curr->key.offset;
1724
1725 di = (struct btrfs_dir_item *)curr->data;
1726 name = (char *)(di + 1);
1727 name_len = btrfs_stack_dir_name_len(di);
1728
1729 d_type = btrfs_filetype_table[di->type];
1730 btrfs_disk_key_to_cpu(&location, &di->location);
1731
1732 over = !dir_emit(ctx, name, name_len,
1733 location.objectid, d_type);
1734
1735 if (atomic_dec_and_test(&curr->refs))
1736 kfree(curr);
1737
1738 if (over)
1739 return 1;
1740 *emitted = true;
1741 }
1742 return 0;
1743}
1744
1745static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1746 struct btrfs_inode_item *inode_item,
1747 struct inode *inode)
1748{
1749 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1750 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1751 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1752 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1753 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1754 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1755 btrfs_set_stack_inode_generation(inode_item,
1756 BTRFS_I(inode)->generation);
1757 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1758 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1759 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1760 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1761 btrfs_set_stack_inode_block_group(inode_item, 0);
1762
1763 btrfs_set_stack_timespec_sec(&inode_item->atime,
1764 inode->i_atime.tv_sec);
1765 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1766 inode->i_atime.tv_nsec);
1767
1768 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1769 inode->i_mtime.tv_sec);
1770 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1771 inode->i_mtime.tv_nsec);
1772
1773 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1774 inode->i_ctime.tv_sec);
1775 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1776 inode->i_ctime.tv_nsec);
1777
1778 btrfs_set_stack_timespec_sec(&inode_item->otime,
1779 BTRFS_I(inode)->i_otime.tv_sec);
1780 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1781 BTRFS_I(inode)->i_otime.tv_nsec);
1782}
1783
1784int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1785{
1786 struct btrfs_delayed_node *delayed_node;
1787 struct btrfs_inode_item *inode_item;
1788
1789 delayed_node = btrfs_get_delayed_node(inode);
1790 if (!delayed_node)
1791 return -ENOENT;
1792
1793 mutex_lock(&delayed_node->mutex);
1794 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1795 mutex_unlock(&delayed_node->mutex);
1796 btrfs_release_delayed_node(delayed_node);
1797 return -ENOENT;
1798 }
1799
1800 inode_item = &delayed_node->inode_item;
1801
1802 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1803 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1804 btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
1805 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1806 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1807 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1808 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1809 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1810
1811 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1812 inode->i_rdev = 0;
1813 *rdev = btrfs_stack_inode_rdev(inode_item);
1814 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1815
1816 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1817 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1818
1819 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1820 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1821
1822 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1823 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1824
1825 BTRFS_I(inode)->i_otime.tv_sec =
1826 btrfs_stack_timespec_sec(&inode_item->otime);
1827 BTRFS_I(inode)->i_otime.tv_nsec =
1828 btrfs_stack_timespec_nsec(&inode_item->otime);
1829
1830 inode->i_generation = BTRFS_I(inode)->generation;
1831 BTRFS_I(inode)->index_cnt = (u64)-1;
1832
1833 mutex_unlock(&delayed_node->mutex);
1834 btrfs_release_delayed_node(delayed_node);
1835 return 0;
1836}
1837
1838int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1839 struct btrfs_root *root, struct inode *inode)
1840{
1841 struct btrfs_delayed_node *delayed_node;
1842 int ret = 0;
1843
1844 delayed_node = btrfs_get_or_create_delayed_node(inode);
1845 if (IS_ERR(delayed_node))
1846 return PTR_ERR(delayed_node);
1847
1848 mutex_lock(&delayed_node->mutex);
1849 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1850 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1851 goto release_node;
1852 }
1853
1854 ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
1855 delayed_node);
1856 if (ret)
1857 goto release_node;
1858
1859 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1860 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1861 delayed_node->count++;
1862 atomic_inc(&root->fs_info->delayed_root->items);
1863release_node:
1864 mutex_unlock(&delayed_node->mutex);
1865 btrfs_release_delayed_node(delayed_node);
1866 return ret;
1867}
1868
1869int btrfs_delayed_delete_inode_ref(struct inode *inode)
1870{
1871 struct btrfs_delayed_node *delayed_node;
1872
1873 /*
1874 * we don't do delayed inode updates during log recovery because it
1875 * leads to enospc problems. This means we also can't do
1876 * delayed inode refs
1877 */
1878 if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
1879 return -EAGAIN;
1880
1881 delayed_node = btrfs_get_or_create_delayed_node(inode);
1882 if (IS_ERR(delayed_node))
1883 return PTR_ERR(delayed_node);
1884
1885 /*
1886 * We don't reserve space for inode ref deletion is because:
1887 * - We ONLY do async inode ref deletion for the inode who has only
1888 * one link(i_nlink == 1), it means there is only one inode ref.
1889 * And in most case, the inode ref and the inode item are in the
1890 * same leaf, and we will deal with them at the same time.
1891 * Since we are sure we will reserve the space for the inode item,
1892 * it is unnecessary to reserve space for inode ref deletion.
1893 * - If the inode ref and the inode item are not in the same leaf,
1894 * We also needn't worry about enospc problem, because we reserve
1895 * much more space for the inode update than it needs.
1896 * - At the worst, we can steal some space from the global reservation.
1897 * It is very rare.
1898 */
1899 mutex_lock(&delayed_node->mutex);
1900 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1901 goto release_node;
1902
1903 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1904 delayed_node->count++;
1905 atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
1906release_node:
1907 mutex_unlock(&delayed_node->mutex);
1908 btrfs_release_delayed_node(delayed_node);
1909 return 0;
1910}
1911
1912static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1913{
1914 struct btrfs_root *root = delayed_node->root;
1915 struct btrfs_delayed_item *curr_item, *prev_item;
1916
1917 mutex_lock(&delayed_node->mutex);
1918 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1919 while (curr_item) {
1920 btrfs_delayed_item_release_metadata(root, curr_item);
1921 prev_item = curr_item;
1922 curr_item = __btrfs_next_delayed_item(prev_item);
1923 btrfs_release_delayed_item(prev_item);
1924 }
1925
1926 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1927 while (curr_item) {
1928 btrfs_delayed_item_release_metadata(root, curr_item);
1929 prev_item = curr_item;
1930 curr_item = __btrfs_next_delayed_item(prev_item);
1931 btrfs_release_delayed_item(prev_item);
1932 }
1933
1934 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1935 btrfs_release_delayed_iref(delayed_node);
1936
1937 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1938 btrfs_delayed_inode_release_metadata(root, delayed_node);
1939 btrfs_release_delayed_inode(delayed_node);
1940 }
1941 mutex_unlock(&delayed_node->mutex);
1942}
1943
1944void btrfs_kill_delayed_inode_items(struct inode *inode)
1945{
1946 struct btrfs_delayed_node *delayed_node;
1947
1948 delayed_node = btrfs_get_delayed_node(inode);
1949 if (!delayed_node)
1950 return;
1951
1952 __btrfs_kill_delayed_node(delayed_node);
1953 btrfs_release_delayed_node(delayed_node);
1954}
1955
1956void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1957{
1958 u64 inode_id = 0;
1959 struct btrfs_delayed_node *delayed_nodes[8];
1960 int i, n;
1961
1962 while (1) {
1963 spin_lock(&root->inode_lock);
1964 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1965 (void **)delayed_nodes, inode_id,
1966 ARRAY_SIZE(delayed_nodes));
1967 if (!n) {
1968 spin_unlock(&root->inode_lock);
1969 break;
1970 }
1971
1972 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1973
1974 for (i = 0; i < n; i++)
1975 atomic_inc(&delayed_nodes[i]->refs);
1976 spin_unlock(&root->inode_lock);
1977
1978 for (i = 0; i < n; i++) {
1979 __btrfs_kill_delayed_node(delayed_nodes[i]);
1980 btrfs_release_delayed_node(delayed_nodes[i]);
1981 }
1982 }
1983}
1984
1985void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1986{
1987 struct btrfs_delayed_root *delayed_root;
1988 struct btrfs_delayed_node *curr_node, *prev_node;
1989
1990 delayed_root = btrfs_get_delayed_root(root);
1991
1992 curr_node = btrfs_first_delayed_node(delayed_root);
1993 while (curr_node) {
1994 __btrfs_kill_delayed_node(curr_node);
1995
1996 prev_node = curr_node;
1997 curr_node = btrfs_next_delayed_node(curr_node);
1998 btrfs_release_delayed_node(prev_node);
1999 }
2000}
2001