Loading...
1/*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/rbtree.h>
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "print-tree.h"
26#include "locking.h"
27
28static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot,
42 int tree_mod_log);
43static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
47 u64 time_seq);
48struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
50 u64 time_seq);
51
52struct btrfs_path *btrfs_alloc_path(void)
53{
54 struct btrfs_path *path;
55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
56 return path;
57}
58
59/*
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
62 */
63noinline void btrfs_set_path_blocking(struct btrfs_path *p)
64{
65 int i;
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 if (!p->nodes[i] || !p->locks[i])
68 continue;
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
74 }
75}
76
77/*
78 * reset all the locked nodes in the patch to spinning locks.
79 *
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
83 * for held
84 */
85noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
86 struct extent_buffer *held, int held_rw)
87{
88 int i;
89
90#ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
95 * the path blocking.
96 */
97 if (held) {
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
103 }
104 btrfs_set_path_blocking(p);
105#endif
106
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
114 }
115 }
116
117#ifdef CONFIG_DEBUG_LOCK_ALLOC
118 if (held)
119 btrfs_clear_lock_blocking_rw(held, held_rw);
120#endif
121}
122
123/* this also releases the path */
124void btrfs_free_path(struct btrfs_path *p)
125{
126 if (!p)
127 return;
128 btrfs_release_path(p);
129 kmem_cache_free(btrfs_path_cachep, p);
130}
131
132/*
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
135 *
136 * It is safe to call this on paths that no locks or extent buffers held.
137 */
138noinline void btrfs_release_path(struct btrfs_path *p)
139{
140 int i;
141
142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
143 p->slots[i] = 0;
144 if (!p->nodes[i])
145 continue;
146 if (p->locks[i]) {
147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
148 p->locks[i] = 0;
149 }
150 free_extent_buffer(p->nodes[i]);
151 p->nodes[i] = NULL;
152 }
153}
154
155/*
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
159 * looping required.
160 *
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
164 */
165struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
166{
167 struct extent_buffer *eb;
168
169 while (1) {
170 rcu_read_lock();
171 eb = rcu_dereference(root->node);
172
173 /*
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
178 */
179 if (atomic_inc_not_zero(&eb->refs)) {
180 rcu_read_unlock();
181 break;
182 }
183 rcu_read_unlock();
184 synchronize_rcu();
185 }
186 return eb;
187}
188
189/* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
192 */
193struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
194{
195 struct extent_buffer *eb;
196
197 while (1) {
198 eb = btrfs_root_node(root);
199 btrfs_tree_lock(eb);
200 if (eb == root->node)
201 break;
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
204 }
205 return eb;
206}
207
208/* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
211 */
212struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
213{
214 struct extent_buffer *eb;
215
216 while (1) {
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
220 break;
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
223 }
224 return eb;
225}
226
227/* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
230 */
231static void add_root_to_dirty_list(struct btrfs_root *root)
232{
233 spin_lock(&root->fs_info->trans_lock);
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
237 }
238 spin_unlock(&root->fs_info->trans_lock);
239}
240
241/*
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
245 */
246int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
250{
251 struct extent_buffer *cow;
252 int ret = 0;
253 int level;
254 struct btrfs_disk_key disk_key;
255
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
259
260 level = btrfs_header_level(buf);
261 if (level == 0)
262 btrfs_item_key(buf, &disk_key, 0);
263 else
264 btrfs_node_key(buf, &disk_key, 0);
265
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
268 buf->start, 0);
269 if (IS_ERR(cow))
270 return PTR_ERR(cow);
271
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
280 else
281 btrfs_set_header_owner(cow, new_root_objectid);
282
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
285 BTRFS_FSID_SIZE);
286
287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
290 else
291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
292
293 if (ret)
294 return ret;
295
296 btrfs_mark_buffer_dirty(cow);
297 *cow_ret = cow;
298 return 0;
299}
300
301enum mod_log_op {
302 MOD_LOG_KEY_REPLACE,
303 MOD_LOG_KEY_ADD,
304 MOD_LOG_KEY_REMOVE,
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_MOVE_KEYS,
308 MOD_LOG_ROOT_REPLACE,
309};
310
311struct tree_mod_move {
312 int dst_slot;
313 int nr_items;
314};
315
316struct tree_mod_root {
317 u64 logical;
318 u8 level;
319};
320
321struct tree_mod_elem {
322 struct rb_node node;
323 u64 index; /* shifted logical */
324 struct seq_list elem;
325 enum mod_log_op op;
326
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 int slot;
329
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 u64 generation;
332
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
335 u64 blockptr;
336
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
339
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
342};
343
344static inline void
345__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346{
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349}
350
351void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 elem->flags = 1;
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
358}
359
360void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
362{
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
370
371 if (!seq_putting)
372 return;
373
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
377
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
381 /*
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
384 */
385 goto out;
386 }
387 min_seq = cur_elem->seq;
388 }
389 }
390
391 /*
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
394 */
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
401 continue;
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
404 kfree(tm);
405 }
406 write_unlock(&fs_info->tree_mod_log_lock);
407out:
408 spin_unlock(&fs_info->tree_mod_seq_lock);
409}
410
411/*
412 * key order of the log:
413 * index -> sequence
414 *
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
417 * operations.
418 */
419static noinline int
420__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421{
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
426 int ret = 0;
427
428 BUG_ON(!tm || !tm->elem.seq);
429
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
433 while (*new) {
434 cur = container_of(*new, struct tree_mod_elem, node);
435 parent = *new;
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
444 else {
445 kfree(tm);
446 ret = -EEXIST;
447 goto unlock;
448 }
449 }
450
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
453unlock:
454 write_unlock(&fs_info->tree_mod_log_lock);
455 return ret;
456}
457
458static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
459 struct extent_buffer *eb) {
460 smp_mb();
461 if (list_empty(&(fs_info)->tree_mod_seq_list))
462 return 1;
463 if (!eb)
464 return 0;
465 if (btrfs_header_level(eb) == 0)
466 return 1;
467 return 0;
468}
469
470/*
471 * This allocates memory and gets a tree modification sequence number when
472 * needed.
473 *
474 * Returns 0 when no sequence number is needed, < 0 on error.
475 * Returns 1 when a sequence number was added. In this case,
476 * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
477 * after inserting into the rb tree.
478 */
479static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
480 struct tree_mod_elem **tm_ret)
481{
482 struct tree_mod_elem *tm;
483 int seq;
484
485 if (tree_mod_dont_log(fs_info, NULL))
486 return 0;
487
488 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
489 if (!tm)
490 return -ENOMEM;
491
492 tm->elem.flags = 0;
493 spin_lock(&fs_info->tree_mod_seq_lock);
494 if (list_empty(&fs_info->tree_mod_seq_list)) {
495 /*
496 * someone emptied the list while we were waiting for the lock.
497 * we must not add to the list, because no blocker exists. items
498 * are removed from the list only when the existing blocker is
499 * removed from the list.
500 */
501 kfree(tm);
502 seq = 0;
503 spin_unlock(&fs_info->tree_mod_seq_lock);
504 } else {
505 __get_tree_mod_seq(fs_info, &tm->elem);
506 seq = tm->elem.seq;
507 }
508
509 return seq;
510}
511
512static noinline int
513tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
514 struct extent_buffer *eb, int slot,
515 enum mod_log_op op, gfp_t flags)
516{
517 struct tree_mod_elem *tm;
518 int ret;
519
520 ret = tree_mod_alloc(fs_info, flags, &tm);
521 if (ret <= 0)
522 return ret;
523
524 tm->index = eb->start >> PAGE_CACHE_SHIFT;
525 if (op != MOD_LOG_KEY_ADD) {
526 btrfs_node_key(eb, &tm->key, slot);
527 tm->blockptr = btrfs_node_blockptr(eb, slot);
528 }
529 tm->op = op;
530 tm->slot = slot;
531 tm->generation = btrfs_node_ptr_generation(eb, slot);
532
533 ret = __tree_mod_log_insert(fs_info, tm);
534 spin_unlock(&fs_info->tree_mod_seq_lock);
535 return ret;
536}
537
538static noinline int
539tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
540 int slot, enum mod_log_op op)
541{
542 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
543}
544
545static noinline int
546tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
547 struct extent_buffer *eb, int dst_slot, int src_slot,
548 int nr_items, gfp_t flags)
549{
550 struct tree_mod_elem *tm;
551 int ret;
552 int i;
553
554 if (tree_mod_dont_log(fs_info, eb))
555 return 0;
556
557 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
558 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
559 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
560 BUG_ON(ret < 0);
561 }
562
563 ret = tree_mod_alloc(fs_info, flags, &tm);
564 if (ret <= 0)
565 return ret;
566
567 tm->index = eb->start >> PAGE_CACHE_SHIFT;
568 tm->slot = src_slot;
569 tm->move.dst_slot = dst_slot;
570 tm->move.nr_items = nr_items;
571 tm->op = MOD_LOG_MOVE_KEYS;
572
573 ret = __tree_mod_log_insert(fs_info, tm);
574 spin_unlock(&fs_info->tree_mod_seq_lock);
575 return ret;
576}
577
578static noinline int
579tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
580 struct extent_buffer *old_root,
581 struct extent_buffer *new_root, gfp_t flags)
582{
583 struct tree_mod_elem *tm;
584 int ret;
585
586 ret = tree_mod_alloc(fs_info, flags, &tm);
587 if (ret <= 0)
588 return ret;
589
590 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
591 tm->old_root.logical = old_root->start;
592 tm->old_root.level = btrfs_header_level(old_root);
593 tm->generation = btrfs_header_generation(old_root);
594 tm->op = MOD_LOG_ROOT_REPLACE;
595
596 ret = __tree_mod_log_insert(fs_info, tm);
597 spin_unlock(&fs_info->tree_mod_seq_lock);
598 return ret;
599}
600
601static struct tree_mod_elem *
602__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
603 int smallest)
604{
605 struct rb_root *tm_root;
606 struct rb_node *node;
607 struct tree_mod_elem *cur = NULL;
608 struct tree_mod_elem *found = NULL;
609 u64 index = start >> PAGE_CACHE_SHIFT;
610
611 read_lock(&fs_info->tree_mod_log_lock);
612 tm_root = &fs_info->tree_mod_log;
613 node = tm_root->rb_node;
614 while (node) {
615 cur = container_of(node, struct tree_mod_elem, node);
616 if (cur->index < index) {
617 node = node->rb_left;
618 } else if (cur->index > index) {
619 node = node->rb_right;
620 } else if (cur->elem.seq < min_seq) {
621 node = node->rb_left;
622 } else if (!smallest) {
623 /* we want the node with the highest seq */
624 if (found)
625 BUG_ON(found->elem.seq > cur->elem.seq);
626 found = cur;
627 node = node->rb_left;
628 } else if (cur->elem.seq > min_seq) {
629 /* we want the node with the smallest seq */
630 if (found)
631 BUG_ON(found->elem.seq < cur->elem.seq);
632 found = cur;
633 node = node->rb_right;
634 } else {
635 found = cur;
636 break;
637 }
638 }
639 read_unlock(&fs_info->tree_mod_log_lock);
640
641 return found;
642}
643
644/*
645 * this returns the element from the log with the smallest time sequence
646 * value that's in the log (the oldest log item). any element with a time
647 * sequence lower than min_seq will be ignored.
648 */
649static struct tree_mod_elem *
650tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
651 u64 min_seq)
652{
653 return __tree_mod_log_search(fs_info, start, min_seq, 1);
654}
655
656/*
657 * this returns the element from the log with the largest time sequence
658 * value that's in the log (the most recent log item). any element with
659 * a time sequence lower than min_seq will be ignored.
660 */
661static struct tree_mod_elem *
662tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
663{
664 return __tree_mod_log_search(fs_info, start, min_seq, 0);
665}
666
667static inline void
668tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
669 struct extent_buffer *src, unsigned long dst_offset,
670 unsigned long src_offset, int nr_items)
671{
672 int ret;
673 int i;
674
675 if (tree_mod_dont_log(fs_info, NULL))
676 return;
677
678 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
679 return;
680
681 /* speed this up by single seq for all operations? */
682 for (i = 0; i < nr_items; i++) {
683 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
684 MOD_LOG_KEY_REMOVE);
685 BUG_ON(ret < 0);
686 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
687 MOD_LOG_KEY_ADD);
688 BUG_ON(ret < 0);
689 }
690}
691
692static inline void
693tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
694 int dst_offset, int src_offset, int nr_items)
695{
696 int ret;
697 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
698 nr_items, GFP_NOFS);
699 BUG_ON(ret < 0);
700}
701
702static inline void
703tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
704 struct extent_buffer *eb,
705 struct btrfs_disk_key *disk_key, int slot, int atomic)
706{
707 int ret;
708
709 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
710 MOD_LOG_KEY_REPLACE,
711 atomic ? GFP_ATOMIC : GFP_NOFS);
712 BUG_ON(ret < 0);
713}
714
715static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
716 struct extent_buffer *eb)
717{
718 int i;
719 int ret;
720 u32 nritems;
721
722 if (tree_mod_dont_log(fs_info, eb))
723 return;
724
725 nritems = btrfs_header_nritems(eb);
726 for (i = nritems - 1; i >= 0; i--) {
727 ret = tree_mod_log_insert_key(fs_info, eb, i,
728 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
729 BUG_ON(ret < 0);
730 }
731}
732
733static inline void
734tree_mod_log_set_root_pointer(struct btrfs_root *root,
735 struct extent_buffer *new_root_node)
736{
737 int ret;
738 tree_mod_log_free_eb(root->fs_info, root->node);
739 ret = tree_mod_log_insert_root(root->fs_info, root->node,
740 new_root_node, GFP_NOFS);
741 BUG_ON(ret < 0);
742}
743
744/*
745 * check if the tree block can be shared by multiple trees
746 */
747int btrfs_block_can_be_shared(struct btrfs_root *root,
748 struct extent_buffer *buf)
749{
750 /*
751 * Tree blocks not in refernece counted trees and tree roots
752 * are never shared. If a block was allocated after the last
753 * snapshot and the block was not allocated by tree relocation,
754 * we know the block is not shared.
755 */
756 if (root->ref_cows &&
757 buf != root->node && buf != root->commit_root &&
758 (btrfs_header_generation(buf) <=
759 btrfs_root_last_snapshot(&root->root_item) ||
760 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
761 return 1;
762#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
763 if (root->ref_cows &&
764 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
765 return 1;
766#endif
767 return 0;
768}
769
770static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
771 struct btrfs_root *root,
772 struct extent_buffer *buf,
773 struct extent_buffer *cow,
774 int *last_ref)
775{
776 u64 refs;
777 u64 owner;
778 u64 flags;
779 u64 new_flags = 0;
780 int ret;
781
782 /*
783 * Backrefs update rules:
784 *
785 * Always use full backrefs for extent pointers in tree block
786 * allocated by tree relocation.
787 *
788 * If a shared tree block is no longer referenced by its owner
789 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
790 * use full backrefs for extent pointers in tree block.
791 *
792 * If a tree block is been relocating
793 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
794 * use full backrefs for extent pointers in tree block.
795 * The reason for this is some operations (such as drop tree)
796 * are only allowed for blocks use full backrefs.
797 */
798
799 if (btrfs_block_can_be_shared(root, buf)) {
800 ret = btrfs_lookup_extent_info(trans, root, buf->start,
801 buf->len, &refs, &flags);
802 if (ret)
803 return ret;
804 if (refs == 0) {
805 ret = -EROFS;
806 btrfs_std_error(root->fs_info, ret);
807 return ret;
808 }
809 } else {
810 refs = 1;
811 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
812 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
813 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
814 else
815 flags = 0;
816 }
817
818 owner = btrfs_header_owner(buf);
819 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
820 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
821
822 if (refs > 1) {
823 if ((owner == root->root_key.objectid ||
824 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
825 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
826 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
827 BUG_ON(ret); /* -ENOMEM */
828
829 if (root->root_key.objectid ==
830 BTRFS_TREE_RELOC_OBJECTID) {
831 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
832 BUG_ON(ret); /* -ENOMEM */
833 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
834 BUG_ON(ret); /* -ENOMEM */
835 }
836 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
837 } else {
838
839 if (root->root_key.objectid ==
840 BTRFS_TREE_RELOC_OBJECTID)
841 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
842 else
843 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
844 BUG_ON(ret); /* -ENOMEM */
845 }
846 if (new_flags != 0) {
847 ret = btrfs_set_disk_extent_flags(trans, root,
848 buf->start,
849 buf->len,
850 new_flags, 0);
851 if (ret)
852 return ret;
853 }
854 } else {
855 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
856 if (root->root_key.objectid ==
857 BTRFS_TREE_RELOC_OBJECTID)
858 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
859 else
860 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
861 BUG_ON(ret); /* -ENOMEM */
862 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
863 BUG_ON(ret); /* -ENOMEM */
864 }
865 /*
866 * don't log freeing in case we're freeing the root node, this
867 * is done by tree_mod_log_set_root_pointer later
868 */
869 if (buf != root->node && btrfs_header_level(buf) != 0)
870 tree_mod_log_free_eb(root->fs_info, buf);
871 clean_tree_block(trans, root, buf);
872 *last_ref = 1;
873 }
874 return 0;
875}
876
877/*
878 * does the dirty work in cow of a single block. The parent block (if
879 * supplied) is updated to point to the new cow copy. The new buffer is marked
880 * dirty and returned locked. If you modify the block it needs to be marked
881 * dirty again.
882 *
883 * search_start -- an allocation hint for the new block
884 *
885 * empty_size -- a hint that you plan on doing more cow. This is the size in
886 * bytes the allocator should try to find free next to the block it returns.
887 * This is just a hint and may be ignored by the allocator.
888 */
889static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
890 struct btrfs_root *root,
891 struct extent_buffer *buf,
892 struct extent_buffer *parent, int parent_slot,
893 struct extent_buffer **cow_ret,
894 u64 search_start, u64 empty_size)
895{
896 struct btrfs_disk_key disk_key;
897 struct extent_buffer *cow;
898 int level, ret;
899 int last_ref = 0;
900 int unlock_orig = 0;
901 u64 parent_start;
902
903 if (*cow_ret == buf)
904 unlock_orig = 1;
905
906 btrfs_assert_tree_locked(buf);
907
908 WARN_ON(root->ref_cows && trans->transid !=
909 root->fs_info->running_transaction->transid);
910 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
911
912 level = btrfs_header_level(buf);
913
914 if (level == 0)
915 btrfs_item_key(buf, &disk_key, 0);
916 else
917 btrfs_node_key(buf, &disk_key, 0);
918
919 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
920 if (parent)
921 parent_start = parent->start;
922 else
923 parent_start = 0;
924 } else
925 parent_start = 0;
926
927 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
928 root->root_key.objectid, &disk_key,
929 level, search_start, empty_size);
930 if (IS_ERR(cow))
931 return PTR_ERR(cow);
932
933 /* cow is set to blocking by btrfs_init_new_buffer */
934
935 copy_extent_buffer(cow, buf, 0, 0, cow->len);
936 btrfs_set_header_bytenr(cow, cow->start);
937 btrfs_set_header_generation(cow, trans->transid);
938 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
939 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
940 BTRFS_HEADER_FLAG_RELOC);
941 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
942 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
943 else
944 btrfs_set_header_owner(cow, root->root_key.objectid);
945
946 write_extent_buffer(cow, root->fs_info->fsid,
947 (unsigned long)btrfs_header_fsid(cow),
948 BTRFS_FSID_SIZE);
949
950 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
951 if (ret) {
952 btrfs_abort_transaction(trans, root, ret);
953 return ret;
954 }
955
956 if (root->ref_cows)
957 btrfs_reloc_cow_block(trans, root, buf, cow);
958
959 if (buf == root->node) {
960 WARN_ON(parent && parent != buf);
961 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
962 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
963 parent_start = buf->start;
964 else
965 parent_start = 0;
966
967 extent_buffer_get(cow);
968 tree_mod_log_set_root_pointer(root, cow);
969 rcu_assign_pointer(root->node, cow);
970
971 btrfs_free_tree_block(trans, root, buf, parent_start,
972 last_ref);
973 free_extent_buffer(buf);
974 add_root_to_dirty_list(root);
975 } else {
976 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
977 parent_start = parent->start;
978 else
979 parent_start = 0;
980
981 WARN_ON(trans->transid != btrfs_header_generation(parent));
982 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
983 MOD_LOG_KEY_REPLACE);
984 btrfs_set_node_blockptr(parent, parent_slot,
985 cow->start);
986 btrfs_set_node_ptr_generation(parent, parent_slot,
987 trans->transid);
988 btrfs_mark_buffer_dirty(parent);
989 btrfs_free_tree_block(trans, root, buf, parent_start,
990 last_ref);
991 }
992 if (unlock_orig)
993 btrfs_tree_unlock(buf);
994 free_extent_buffer_stale(buf);
995 btrfs_mark_buffer_dirty(cow);
996 *cow_ret = cow;
997 return 0;
998}
999
1000/*
1001 * returns the logical address of the oldest predecessor of the given root.
1002 * entries older than time_seq are ignored.
1003 */
1004static struct tree_mod_elem *
1005__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1006 struct btrfs_root *root, u64 time_seq)
1007{
1008 struct tree_mod_elem *tm;
1009 struct tree_mod_elem *found = NULL;
1010 u64 root_logical = root->node->start;
1011 int looped = 0;
1012
1013 if (!time_seq)
1014 return 0;
1015
1016 /*
1017 * the very last operation that's logged for a root is the replacement
1018 * operation (if it is replaced at all). this has the index of the *new*
1019 * root, making it the very first operation that's logged for this root.
1020 */
1021 while (1) {
1022 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1023 time_seq);
1024 if (!looped && !tm)
1025 return 0;
1026 /*
1027 * if there are no tree operation for the oldest root, we simply
1028 * return it. this should only happen if that (old) root is at
1029 * level 0.
1030 */
1031 if (!tm)
1032 break;
1033
1034 /*
1035 * if there's an operation that's not a root replacement, we
1036 * found the oldest version of our root. normally, we'll find a
1037 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1038 */
1039 if (tm->op != MOD_LOG_ROOT_REPLACE)
1040 break;
1041
1042 found = tm;
1043 root_logical = tm->old_root.logical;
1044 BUG_ON(root_logical == root->node->start);
1045 looped = 1;
1046 }
1047
1048 /* if there's no old root to return, return what we found instead */
1049 if (!found)
1050 found = tm;
1051
1052 return found;
1053}
1054
1055/*
1056 * tm is a pointer to the first operation to rewind within eb. then, all
1057 * previous operations will be rewinded (until we reach something older than
1058 * time_seq).
1059 */
1060static void
1061__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1062 struct tree_mod_elem *first_tm)
1063{
1064 u32 n;
1065 struct rb_node *next;
1066 struct tree_mod_elem *tm = first_tm;
1067 unsigned long o_dst;
1068 unsigned long o_src;
1069 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1070
1071 n = btrfs_header_nritems(eb);
1072 while (tm && tm->elem.seq >= time_seq) {
1073 /*
1074 * all the operations are recorded with the operator used for
1075 * the modification. as we're going backwards, we do the
1076 * opposite of each operation here.
1077 */
1078 switch (tm->op) {
1079 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1080 BUG_ON(tm->slot < n);
1081 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1082 case MOD_LOG_KEY_REMOVE:
1083 btrfs_set_node_key(eb, &tm->key, tm->slot);
1084 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1085 btrfs_set_node_ptr_generation(eb, tm->slot,
1086 tm->generation);
1087 n++;
1088 break;
1089 case MOD_LOG_KEY_REPLACE:
1090 BUG_ON(tm->slot >= n);
1091 btrfs_set_node_key(eb, &tm->key, tm->slot);
1092 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1093 btrfs_set_node_ptr_generation(eb, tm->slot,
1094 tm->generation);
1095 break;
1096 case MOD_LOG_KEY_ADD:
1097 /* if a move operation is needed it's in the log */
1098 n--;
1099 break;
1100 case MOD_LOG_MOVE_KEYS:
1101 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1102 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1103 memmove_extent_buffer(eb, o_dst, o_src,
1104 tm->move.nr_items * p_size);
1105 break;
1106 case MOD_LOG_ROOT_REPLACE:
1107 /*
1108 * this operation is special. for roots, this must be
1109 * handled explicitly before rewinding.
1110 * for non-roots, this operation may exist if the node
1111 * was a root: root A -> child B; then A gets empty and
1112 * B is promoted to the new root. in the mod log, we'll
1113 * have a root-replace operation for B, a tree block
1114 * that is no root. we simply ignore that operation.
1115 */
1116 break;
1117 }
1118 next = rb_next(&tm->node);
1119 if (!next)
1120 break;
1121 tm = container_of(next, struct tree_mod_elem, node);
1122 if (tm->index != first_tm->index)
1123 break;
1124 }
1125 btrfs_set_header_nritems(eb, n);
1126}
1127
1128static struct extent_buffer *
1129tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1130 u64 time_seq)
1131{
1132 struct extent_buffer *eb_rewin;
1133 struct tree_mod_elem *tm;
1134
1135 if (!time_seq)
1136 return eb;
1137
1138 if (btrfs_header_level(eb) == 0)
1139 return eb;
1140
1141 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1142 if (!tm)
1143 return eb;
1144
1145 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1146 BUG_ON(tm->slot != 0);
1147 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1148 fs_info->tree_root->nodesize);
1149 BUG_ON(!eb_rewin);
1150 btrfs_set_header_bytenr(eb_rewin, eb->start);
1151 btrfs_set_header_backref_rev(eb_rewin,
1152 btrfs_header_backref_rev(eb));
1153 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1154 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1155 } else {
1156 eb_rewin = btrfs_clone_extent_buffer(eb);
1157 BUG_ON(!eb_rewin);
1158 }
1159
1160 extent_buffer_get(eb_rewin);
1161 free_extent_buffer(eb);
1162
1163 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1164
1165 return eb_rewin;
1166}
1167
1168/*
1169 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1170 * value. If there are no changes, the current root->root_node is returned. If
1171 * anything changed in between, there's a fresh buffer allocated on which the
1172 * rewind operations are done. In any case, the returned buffer is read locked.
1173 * Returns NULL on error (with no locks held).
1174 */
1175static inline struct extent_buffer *
1176get_old_root(struct btrfs_root *root, u64 time_seq)
1177{
1178 struct tree_mod_elem *tm;
1179 struct extent_buffer *eb;
1180 struct tree_mod_root *old_root = NULL;
1181 u64 old_generation = 0;
1182 u64 logical;
1183
1184 eb = btrfs_read_lock_root_node(root);
1185 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1186 if (!tm)
1187 return root->node;
1188
1189 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1190 old_root = &tm->old_root;
1191 old_generation = tm->generation;
1192 logical = old_root->logical;
1193 } else {
1194 logical = root->node->start;
1195 }
1196
1197 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1198 if (old_root)
1199 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1200 else
1201 eb = btrfs_clone_extent_buffer(root->node);
1202 btrfs_tree_read_unlock(root->node);
1203 free_extent_buffer(root->node);
1204 if (!eb)
1205 return NULL;
1206 btrfs_tree_read_lock(eb);
1207 if (old_root) {
1208 btrfs_set_header_bytenr(eb, eb->start);
1209 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1210 btrfs_set_header_owner(eb, root->root_key.objectid);
1211 btrfs_set_header_level(eb, old_root->level);
1212 btrfs_set_header_generation(eb, old_generation);
1213 }
1214 if (tm)
1215 __tree_mod_log_rewind(eb, time_seq, tm);
1216 else
1217 WARN_ON(btrfs_header_level(eb) != 0);
1218 extent_buffer_get(eb);
1219
1220 return eb;
1221}
1222
1223static inline int should_cow_block(struct btrfs_trans_handle *trans,
1224 struct btrfs_root *root,
1225 struct extent_buffer *buf)
1226{
1227 /* ensure we can see the force_cow */
1228 smp_rmb();
1229
1230 /*
1231 * We do not need to cow a block if
1232 * 1) this block is not created or changed in this transaction;
1233 * 2) this block does not belong to TREE_RELOC tree;
1234 * 3) the root is not forced COW.
1235 *
1236 * What is forced COW:
1237 * when we create snapshot during commiting the transaction,
1238 * after we've finished coping src root, we must COW the shared
1239 * block to ensure the metadata consistency.
1240 */
1241 if (btrfs_header_generation(buf) == trans->transid &&
1242 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1243 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1244 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1245 !root->force_cow)
1246 return 0;
1247 return 1;
1248}
1249
1250/*
1251 * cows a single block, see __btrfs_cow_block for the real work.
1252 * This version of it has extra checks so that a block isn't cow'd more than
1253 * once per transaction, as long as it hasn't been written yet
1254 */
1255noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1256 struct btrfs_root *root, struct extent_buffer *buf,
1257 struct extent_buffer *parent, int parent_slot,
1258 struct extent_buffer **cow_ret)
1259{
1260 u64 search_start;
1261 int ret;
1262
1263 if (trans->transaction != root->fs_info->running_transaction) {
1264 printk(KERN_CRIT "trans %llu running %llu\n",
1265 (unsigned long long)trans->transid,
1266 (unsigned long long)
1267 root->fs_info->running_transaction->transid);
1268 WARN_ON(1);
1269 }
1270 if (trans->transid != root->fs_info->generation) {
1271 printk(KERN_CRIT "trans %llu running %llu\n",
1272 (unsigned long long)trans->transid,
1273 (unsigned long long)root->fs_info->generation);
1274 WARN_ON(1);
1275 }
1276
1277 if (!should_cow_block(trans, root, buf)) {
1278 *cow_ret = buf;
1279 return 0;
1280 }
1281
1282 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1283
1284 if (parent)
1285 btrfs_set_lock_blocking(parent);
1286 btrfs_set_lock_blocking(buf);
1287
1288 ret = __btrfs_cow_block(trans, root, buf, parent,
1289 parent_slot, cow_ret, search_start, 0);
1290
1291 trace_btrfs_cow_block(root, buf, *cow_ret);
1292
1293 return ret;
1294}
1295
1296/*
1297 * helper function for defrag to decide if two blocks pointed to by a
1298 * node are actually close by
1299 */
1300static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1301{
1302 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1303 return 1;
1304 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1305 return 1;
1306 return 0;
1307}
1308
1309/*
1310 * compare two keys in a memcmp fashion
1311 */
1312static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1313{
1314 struct btrfs_key k1;
1315
1316 btrfs_disk_key_to_cpu(&k1, disk);
1317
1318 return btrfs_comp_cpu_keys(&k1, k2);
1319}
1320
1321/*
1322 * same as comp_keys only with two btrfs_key's
1323 */
1324int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1325{
1326 if (k1->objectid > k2->objectid)
1327 return 1;
1328 if (k1->objectid < k2->objectid)
1329 return -1;
1330 if (k1->type > k2->type)
1331 return 1;
1332 if (k1->type < k2->type)
1333 return -1;
1334 if (k1->offset > k2->offset)
1335 return 1;
1336 if (k1->offset < k2->offset)
1337 return -1;
1338 return 0;
1339}
1340
1341/*
1342 * this is used by the defrag code to go through all the
1343 * leaves pointed to by a node and reallocate them so that
1344 * disk order is close to key order
1345 */
1346int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1347 struct btrfs_root *root, struct extent_buffer *parent,
1348 int start_slot, int cache_only, u64 *last_ret,
1349 struct btrfs_key *progress)
1350{
1351 struct extent_buffer *cur;
1352 u64 blocknr;
1353 u64 gen;
1354 u64 search_start = *last_ret;
1355 u64 last_block = 0;
1356 u64 other;
1357 u32 parent_nritems;
1358 int end_slot;
1359 int i;
1360 int err = 0;
1361 int parent_level;
1362 int uptodate;
1363 u32 blocksize;
1364 int progress_passed = 0;
1365 struct btrfs_disk_key disk_key;
1366
1367 parent_level = btrfs_header_level(parent);
1368 if (cache_only && parent_level != 1)
1369 return 0;
1370
1371 if (trans->transaction != root->fs_info->running_transaction)
1372 WARN_ON(1);
1373 if (trans->transid != root->fs_info->generation)
1374 WARN_ON(1);
1375
1376 parent_nritems = btrfs_header_nritems(parent);
1377 blocksize = btrfs_level_size(root, parent_level - 1);
1378 end_slot = parent_nritems;
1379
1380 if (parent_nritems == 1)
1381 return 0;
1382
1383 btrfs_set_lock_blocking(parent);
1384
1385 for (i = start_slot; i < end_slot; i++) {
1386 int close = 1;
1387
1388 btrfs_node_key(parent, &disk_key, i);
1389 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1390 continue;
1391
1392 progress_passed = 1;
1393 blocknr = btrfs_node_blockptr(parent, i);
1394 gen = btrfs_node_ptr_generation(parent, i);
1395 if (last_block == 0)
1396 last_block = blocknr;
1397
1398 if (i > 0) {
1399 other = btrfs_node_blockptr(parent, i - 1);
1400 close = close_blocks(blocknr, other, blocksize);
1401 }
1402 if (!close && i < end_slot - 2) {
1403 other = btrfs_node_blockptr(parent, i + 1);
1404 close = close_blocks(blocknr, other, blocksize);
1405 }
1406 if (close) {
1407 last_block = blocknr;
1408 continue;
1409 }
1410
1411 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1412 if (cur)
1413 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1414 else
1415 uptodate = 0;
1416 if (!cur || !uptodate) {
1417 if (cache_only) {
1418 free_extent_buffer(cur);
1419 continue;
1420 }
1421 if (!cur) {
1422 cur = read_tree_block(root, blocknr,
1423 blocksize, gen);
1424 if (!cur)
1425 return -EIO;
1426 } else if (!uptodate) {
1427 err = btrfs_read_buffer(cur, gen);
1428 if (err) {
1429 free_extent_buffer(cur);
1430 return err;
1431 }
1432 }
1433 }
1434 if (search_start == 0)
1435 search_start = last_block;
1436
1437 btrfs_tree_lock(cur);
1438 btrfs_set_lock_blocking(cur);
1439 err = __btrfs_cow_block(trans, root, cur, parent, i,
1440 &cur, search_start,
1441 min(16 * blocksize,
1442 (end_slot - i) * blocksize));
1443 if (err) {
1444 btrfs_tree_unlock(cur);
1445 free_extent_buffer(cur);
1446 break;
1447 }
1448 search_start = cur->start;
1449 last_block = cur->start;
1450 *last_ret = search_start;
1451 btrfs_tree_unlock(cur);
1452 free_extent_buffer(cur);
1453 }
1454 return err;
1455}
1456
1457/*
1458 * The leaf data grows from end-to-front in the node.
1459 * this returns the address of the start of the last item,
1460 * which is the stop of the leaf data stack
1461 */
1462static inline unsigned int leaf_data_end(struct btrfs_root *root,
1463 struct extent_buffer *leaf)
1464{
1465 u32 nr = btrfs_header_nritems(leaf);
1466 if (nr == 0)
1467 return BTRFS_LEAF_DATA_SIZE(root);
1468 return btrfs_item_offset_nr(leaf, nr - 1);
1469}
1470
1471
1472/*
1473 * search for key in the extent_buffer. The items start at offset p,
1474 * and they are item_size apart. There are 'max' items in p.
1475 *
1476 * the slot in the array is returned via slot, and it points to
1477 * the place where you would insert key if it is not found in
1478 * the array.
1479 *
1480 * slot may point to max if the key is bigger than all of the keys
1481 */
1482static noinline int generic_bin_search(struct extent_buffer *eb,
1483 unsigned long p,
1484 int item_size, struct btrfs_key *key,
1485 int max, int *slot)
1486{
1487 int low = 0;
1488 int high = max;
1489 int mid;
1490 int ret;
1491 struct btrfs_disk_key *tmp = NULL;
1492 struct btrfs_disk_key unaligned;
1493 unsigned long offset;
1494 char *kaddr = NULL;
1495 unsigned long map_start = 0;
1496 unsigned long map_len = 0;
1497 int err;
1498
1499 while (low < high) {
1500 mid = (low + high) / 2;
1501 offset = p + mid * item_size;
1502
1503 if (!kaddr || offset < map_start ||
1504 (offset + sizeof(struct btrfs_disk_key)) >
1505 map_start + map_len) {
1506
1507 err = map_private_extent_buffer(eb, offset,
1508 sizeof(struct btrfs_disk_key),
1509 &kaddr, &map_start, &map_len);
1510
1511 if (!err) {
1512 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1513 map_start);
1514 } else {
1515 read_extent_buffer(eb, &unaligned,
1516 offset, sizeof(unaligned));
1517 tmp = &unaligned;
1518 }
1519
1520 } else {
1521 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1522 map_start);
1523 }
1524 ret = comp_keys(tmp, key);
1525
1526 if (ret < 0)
1527 low = mid + 1;
1528 else if (ret > 0)
1529 high = mid;
1530 else {
1531 *slot = mid;
1532 return 0;
1533 }
1534 }
1535 *slot = low;
1536 return 1;
1537}
1538
1539/*
1540 * simple bin_search frontend that does the right thing for
1541 * leaves vs nodes
1542 */
1543static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1544 int level, int *slot)
1545{
1546 if (level == 0)
1547 return generic_bin_search(eb,
1548 offsetof(struct btrfs_leaf, items),
1549 sizeof(struct btrfs_item),
1550 key, btrfs_header_nritems(eb),
1551 slot);
1552 else
1553 return generic_bin_search(eb,
1554 offsetof(struct btrfs_node, ptrs),
1555 sizeof(struct btrfs_key_ptr),
1556 key, btrfs_header_nritems(eb),
1557 slot);
1558}
1559
1560int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1561 int level, int *slot)
1562{
1563 return bin_search(eb, key, level, slot);
1564}
1565
1566static void root_add_used(struct btrfs_root *root, u32 size)
1567{
1568 spin_lock(&root->accounting_lock);
1569 btrfs_set_root_used(&root->root_item,
1570 btrfs_root_used(&root->root_item) + size);
1571 spin_unlock(&root->accounting_lock);
1572}
1573
1574static void root_sub_used(struct btrfs_root *root, u32 size)
1575{
1576 spin_lock(&root->accounting_lock);
1577 btrfs_set_root_used(&root->root_item,
1578 btrfs_root_used(&root->root_item) - size);
1579 spin_unlock(&root->accounting_lock);
1580}
1581
1582/* given a node and slot number, this reads the blocks it points to. The
1583 * extent buffer is returned with a reference taken (but unlocked).
1584 * NULL is returned on error.
1585 */
1586static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1587 struct extent_buffer *parent, int slot)
1588{
1589 int level = btrfs_header_level(parent);
1590 if (slot < 0)
1591 return NULL;
1592 if (slot >= btrfs_header_nritems(parent))
1593 return NULL;
1594
1595 BUG_ON(level == 0);
1596
1597 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1598 btrfs_level_size(root, level - 1),
1599 btrfs_node_ptr_generation(parent, slot));
1600}
1601
1602/*
1603 * node level balancing, used to make sure nodes are in proper order for
1604 * item deletion. We balance from the top down, so we have to make sure
1605 * that a deletion won't leave an node completely empty later on.
1606 */
1607static noinline int balance_level(struct btrfs_trans_handle *trans,
1608 struct btrfs_root *root,
1609 struct btrfs_path *path, int level)
1610{
1611 struct extent_buffer *right = NULL;
1612 struct extent_buffer *mid;
1613 struct extent_buffer *left = NULL;
1614 struct extent_buffer *parent = NULL;
1615 int ret = 0;
1616 int wret;
1617 int pslot;
1618 int orig_slot = path->slots[level];
1619 u64 orig_ptr;
1620
1621 if (level == 0)
1622 return 0;
1623
1624 mid = path->nodes[level];
1625
1626 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1627 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1628 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1629
1630 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1631
1632 if (level < BTRFS_MAX_LEVEL - 1) {
1633 parent = path->nodes[level + 1];
1634 pslot = path->slots[level + 1];
1635 }
1636
1637 /*
1638 * deal with the case where there is only one pointer in the root
1639 * by promoting the node below to a root
1640 */
1641 if (!parent) {
1642 struct extent_buffer *child;
1643
1644 if (btrfs_header_nritems(mid) != 1)
1645 return 0;
1646
1647 /* promote the child to a root */
1648 child = read_node_slot(root, mid, 0);
1649 if (!child) {
1650 ret = -EROFS;
1651 btrfs_std_error(root->fs_info, ret);
1652 goto enospc;
1653 }
1654
1655 btrfs_tree_lock(child);
1656 btrfs_set_lock_blocking(child);
1657 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1658 if (ret) {
1659 btrfs_tree_unlock(child);
1660 free_extent_buffer(child);
1661 goto enospc;
1662 }
1663
1664 tree_mod_log_set_root_pointer(root, child);
1665 rcu_assign_pointer(root->node, child);
1666
1667 add_root_to_dirty_list(root);
1668 btrfs_tree_unlock(child);
1669
1670 path->locks[level] = 0;
1671 path->nodes[level] = NULL;
1672 clean_tree_block(trans, root, mid);
1673 btrfs_tree_unlock(mid);
1674 /* once for the path */
1675 free_extent_buffer(mid);
1676
1677 root_sub_used(root, mid->len);
1678 btrfs_free_tree_block(trans, root, mid, 0, 1);
1679 /* once for the root ptr */
1680 free_extent_buffer_stale(mid);
1681 return 0;
1682 }
1683 if (btrfs_header_nritems(mid) >
1684 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1685 return 0;
1686
1687 left = read_node_slot(root, parent, pslot - 1);
1688 if (left) {
1689 btrfs_tree_lock(left);
1690 btrfs_set_lock_blocking(left);
1691 wret = btrfs_cow_block(trans, root, left,
1692 parent, pslot - 1, &left);
1693 if (wret) {
1694 ret = wret;
1695 goto enospc;
1696 }
1697 }
1698 right = read_node_slot(root, parent, pslot + 1);
1699 if (right) {
1700 btrfs_tree_lock(right);
1701 btrfs_set_lock_blocking(right);
1702 wret = btrfs_cow_block(trans, root, right,
1703 parent, pslot + 1, &right);
1704 if (wret) {
1705 ret = wret;
1706 goto enospc;
1707 }
1708 }
1709
1710 /* first, try to make some room in the middle buffer */
1711 if (left) {
1712 orig_slot += btrfs_header_nritems(left);
1713 wret = push_node_left(trans, root, left, mid, 1);
1714 if (wret < 0)
1715 ret = wret;
1716 }
1717
1718 /*
1719 * then try to empty the right most buffer into the middle
1720 */
1721 if (right) {
1722 wret = push_node_left(trans, root, mid, right, 1);
1723 if (wret < 0 && wret != -ENOSPC)
1724 ret = wret;
1725 if (btrfs_header_nritems(right) == 0) {
1726 clean_tree_block(trans, root, right);
1727 btrfs_tree_unlock(right);
1728 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1729 root_sub_used(root, right->len);
1730 btrfs_free_tree_block(trans, root, right, 0, 1);
1731 free_extent_buffer_stale(right);
1732 right = NULL;
1733 } else {
1734 struct btrfs_disk_key right_key;
1735 btrfs_node_key(right, &right_key, 0);
1736 tree_mod_log_set_node_key(root->fs_info, parent,
1737 &right_key, pslot + 1, 0);
1738 btrfs_set_node_key(parent, &right_key, pslot + 1);
1739 btrfs_mark_buffer_dirty(parent);
1740 }
1741 }
1742 if (btrfs_header_nritems(mid) == 1) {
1743 /*
1744 * we're not allowed to leave a node with one item in the
1745 * tree during a delete. A deletion from lower in the tree
1746 * could try to delete the only pointer in this node.
1747 * So, pull some keys from the left.
1748 * There has to be a left pointer at this point because
1749 * otherwise we would have pulled some pointers from the
1750 * right
1751 */
1752 if (!left) {
1753 ret = -EROFS;
1754 btrfs_std_error(root->fs_info, ret);
1755 goto enospc;
1756 }
1757 wret = balance_node_right(trans, root, mid, left);
1758 if (wret < 0) {
1759 ret = wret;
1760 goto enospc;
1761 }
1762 if (wret == 1) {
1763 wret = push_node_left(trans, root, left, mid, 1);
1764 if (wret < 0)
1765 ret = wret;
1766 }
1767 BUG_ON(wret == 1);
1768 }
1769 if (btrfs_header_nritems(mid) == 0) {
1770 clean_tree_block(trans, root, mid);
1771 btrfs_tree_unlock(mid);
1772 del_ptr(trans, root, path, level + 1, pslot, 1);
1773 root_sub_used(root, mid->len);
1774 btrfs_free_tree_block(trans, root, mid, 0, 1);
1775 free_extent_buffer_stale(mid);
1776 mid = NULL;
1777 } else {
1778 /* update the parent key to reflect our changes */
1779 struct btrfs_disk_key mid_key;
1780 btrfs_node_key(mid, &mid_key, 0);
1781 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1782 pslot, 0);
1783 btrfs_set_node_key(parent, &mid_key, pslot);
1784 btrfs_mark_buffer_dirty(parent);
1785 }
1786
1787 /* update the path */
1788 if (left) {
1789 if (btrfs_header_nritems(left) > orig_slot) {
1790 extent_buffer_get(left);
1791 /* left was locked after cow */
1792 path->nodes[level] = left;
1793 path->slots[level + 1] -= 1;
1794 path->slots[level] = orig_slot;
1795 if (mid) {
1796 btrfs_tree_unlock(mid);
1797 free_extent_buffer(mid);
1798 }
1799 } else {
1800 orig_slot -= btrfs_header_nritems(left);
1801 path->slots[level] = orig_slot;
1802 }
1803 }
1804 /* double check we haven't messed things up */
1805 if (orig_ptr !=
1806 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1807 BUG();
1808enospc:
1809 if (right) {
1810 btrfs_tree_unlock(right);
1811 free_extent_buffer(right);
1812 }
1813 if (left) {
1814 if (path->nodes[level] != left)
1815 btrfs_tree_unlock(left);
1816 free_extent_buffer(left);
1817 }
1818 return ret;
1819}
1820
1821/* Node balancing for insertion. Here we only split or push nodes around
1822 * when they are completely full. This is also done top down, so we
1823 * have to be pessimistic.
1824 */
1825static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1826 struct btrfs_root *root,
1827 struct btrfs_path *path, int level)
1828{
1829 struct extent_buffer *right = NULL;
1830 struct extent_buffer *mid;
1831 struct extent_buffer *left = NULL;
1832 struct extent_buffer *parent = NULL;
1833 int ret = 0;
1834 int wret;
1835 int pslot;
1836 int orig_slot = path->slots[level];
1837
1838 if (level == 0)
1839 return 1;
1840
1841 mid = path->nodes[level];
1842 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1843
1844 if (level < BTRFS_MAX_LEVEL - 1) {
1845 parent = path->nodes[level + 1];
1846 pslot = path->slots[level + 1];
1847 }
1848
1849 if (!parent)
1850 return 1;
1851
1852 left = read_node_slot(root, parent, pslot - 1);
1853
1854 /* first, try to make some room in the middle buffer */
1855 if (left) {
1856 u32 left_nr;
1857
1858 btrfs_tree_lock(left);
1859 btrfs_set_lock_blocking(left);
1860
1861 left_nr = btrfs_header_nritems(left);
1862 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1863 wret = 1;
1864 } else {
1865 ret = btrfs_cow_block(trans, root, left, parent,
1866 pslot - 1, &left);
1867 if (ret)
1868 wret = 1;
1869 else {
1870 wret = push_node_left(trans, root,
1871 left, mid, 0);
1872 }
1873 }
1874 if (wret < 0)
1875 ret = wret;
1876 if (wret == 0) {
1877 struct btrfs_disk_key disk_key;
1878 orig_slot += left_nr;
1879 btrfs_node_key(mid, &disk_key, 0);
1880 tree_mod_log_set_node_key(root->fs_info, parent,
1881 &disk_key, pslot, 0);
1882 btrfs_set_node_key(parent, &disk_key, pslot);
1883 btrfs_mark_buffer_dirty(parent);
1884 if (btrfs_header_nritems(left) > orig_slot) {
1885 path->nodes[level] = left;
1886 path->slots[level + 1] -= 1;
1887 path->slots[level] = orig_slot;
1888 btrfs_tree_unlock(mid);
1889 free_extent_buffer(mid);
1890 } else {
1891 orig_slot -=
1892 btrfs_header_nritems(left);
1893 path->slots[level] = orig_slot;
1894 btrfs_tree_unlock(left);
1895 free_extent_buffer(left);
1896 }
1897 return 0;
1898 }
1899 btrfs_tree_unlock(left);
1900 free_extent_buffer(left);
1901 }
1902 right = read_node_slot(root, parent, pslot + 1);
1903
1904 /*
1905 * then try to empty the right most buffer into the middle
1906 */
1907 if (right) {
1908 u32 right_nr;
1909
1910 btrfs_tree_lock(right);
1911 btrfs_set_lock_blocking(right);
1912
1913 right_nr = btrfs_header_nritems(right);
1914 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1915 wret = 1;
1916 } else {
1917 ret = btrfs_cow_block(trans, root, right,
1918 parent, pslot + 1,
1919 &right);
1920 if (ret)
1921 wret = 1;
1922 else {
1923 wret = balance_node_right(trans, root,
1924 right, mid);
1925 }
1926 }
1927 if (wret < 0)
1928 ret = wret;
1929 if (wret == 0) {
1930 struct btrfs_disk_key disk_key;
1931
1932 btrfs_node_key(right, &disk_key, 0);
1933 tree_mod_log_set_node_key(root->fs_info, parent,
1934 &disk_key, pslot + 1, 0);
1935 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1936 btrfs_mark_buffer_dirty(parent);
1937
1938 if (btrfs_header_nritems(mid) <= orig_slot) {
1939 path->nodes[level] = right;
1940 path->slots[level + 1] += 1;
1941 path->slots[level] = orig_slot -
1942 btrfs_header_nritems(mid);
1943 btrfs_tree_unlock(mid);
1944 free_extent_buffer(mid);
1945 } else {
1946 btrfs_tree_unlock(right);
1947 free_extent_buffer(right);
1948 }
1949 return 0;
1950 }
1951 btrfs_tree_unlock(right);
1952 free_extent_buffer(right);
1953 }
1954 return 1;
1955}
1956
1957/*
1958 * readahead one full node of leaves, finding things that are close
1959 * to the block in 'slot', and triggering ra on them.
1960 */
1961static void reada_for_search(struct btrfs_root *root,
1962 struct btrfs_path *path,
1963 int level, int slot, u64 objectid)
1964{
1965 struct extent_buffer *node;
1966 struct btrfs_disk_key disk_key;
1967 u32 nritems;
1968 u64 search;
1969 u64 target;
1970 u64 nread = 0;
1971 u64 gen;
1972 int direction = path->reada;
1973 struct extent_buffer *eb;
1974 u32 nr;
1975 u32 blocksize;
1976 u32 nscan = 0;
1977
1978 if (level != 1)
1979 return;
1980
1981 if (!path->nodes[level])
1982 return;
1983
1984 node = path->nodes[level];
1985
1986 search = btrfs_node_blockptr(node, slot);
1987 blocksize = btrfs_level_size(root, level - 1);
1988 eb = btrfs_find_tree_block(root, search, blocksize);
1989 if (eb) {
1990 free_extent_buffer(eb);
1991 return;
1992 }
1993
1994 target = search;
1995
1996 nritems = btrfs_header_nritems(node);
1997 nr = slot;
1998
1999 while (1) {
2000 if (direction < 0) {
2001 if (nr == 0)
2002 break;
2003 nr--;
2004 } else if (direction > 0) {
2005 nr++;
2006 if (nr >= nritems)
2007 break;
2008 }
2009 if (path->reada < 0 && objectid) {
2010 btrfs_node_key(node, &disk_key, nr);
2011 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2012 break;
2013 }
2014 search = btrfs_node_blockptr(node, nr);
2015 if ((search <= target && target - search <= 65536) ||
2016 (search > target && search - target <= 65536)) {
2017 gen = btrfs_node_ptr_generation(node, nr);
2018 readahead_tree_block(root, search, blocksize, gen);
2019 nread += blocksize;
2020 }
2021 nscan++;
2022 if ((nread > 65536 || nscan > 32))
2023 break;
2024 }
2025}
2026
2027/*
2028 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2029 * cache
2030 */
2031static noinline int reada_for_balance(struct btrfs_root *root,
2032 struct btrfs_path *path, int level)
2033{
2034 int slot;
2035 int nritems;
2036 struct extent_buffer *parent;
2037 struct extent_buffer *eb;
2038 u64 gen;
2039 u64 block1 = 0;
2040 u64 block2 = 0;
2041 int ret = 0;
2042 int blocksize;
2043
2044 parent = path->nodes[level + 1];
2045 if (!parent)
2046 return 0;
2047
2048 nritems = btrfs_header_nritems(parent);
2049 slot = path->slots[level + 1];
2050 blocksize = btrfs_level_size(root, level);
2051
2052 if (slot > 0) {
2053 block1 = btrfs_node_blockptr(parent, slot - 1);
2054 gen = btrfs_node_ptr_generation(parent, slot - 1);
2055 eb = btrfs_find_tree_block(root, block1, blocksize);
2056 /*
2057 * if we get -eagain from btrfs_buffer_uptodate, we
2058 * don't want to return eagain here. That will loop
2059 * forever
2060 */
2061 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2062 block1 = 0;
2063 free_extent_buffer(eb);
2064 }
2065 if (slot + 1 < nritems) {
2066 block2 = btrfs_node_blockptr(parent, slot + 1);
2067 gen = btrfs_node_ptr_generation(parent, slot + 1);
2068 eb = btrfs_find_tree_block(root, block2, blocksize);
2069 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2070 block2 = 0;
2071 free_extent_buffer(eb);
2072 }
2073 if (block1 || block2) {
2074 ret = -EAGAIN;
2075
2076 /* release the whole path */
2077 btrfs_release_path(path);
2078
2079 /* read the blocks */
2080 if (block1)
2081 readahead_tree_block(root, block1, blocksize, 0);
2082 if (block2)
2083 readahead_tree_block(root, block2, blocksize, 0);
2084
2085 if (block1) {
2086 eb = read_tree_block(root, block1, blocksize, 0);
2087 free_extent_buffer(eb);
2088 }
2089 if (block2) {
2090 eb = read_tree_block(root, block2, blocksize, 0);
2091 free_extent_buffer(eb);
2092 }
2093 }
2094 return ret;
2095}
2096
2097
2098/*
2099 * when we walk down the tree, it is usually safe to unlock the higher layers
2100 * in the tree. The exceptions are when our path goes through slot 0, because
2101 * operations on the tree might require changing key pointers higher up in the
2102 * tree.
2103 *
2104 * callers might also have set path->keep_locks, which tells this code to keep
2105 * the lock if the path points to the last slot in the block. This is part of
2106 * walking through the tree, and selecting the next slot in the higher block.
2107 *
2108 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2109 * if lowest_unlock is 1, level 0 won't be unlocked
2110 */
2111static noinline void unlock_up(struct btrfs_path *path, int level,
2112 int lowest_unlock, int min_write_lock_level,
2113 int *write_lock_level)
2114{
2115 int i;
2116 int skip_level = level;
2117 int no_skips = 0;
2118 struct extent_buffer *t;
2119
2120 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2121 if (!path->nodes[i])
2122 break;
2123 if (!path->locks[i])
2124 break;
2125 if (!no_skips && path->slots[i] == 0) {
2126 skip_level = i + 1;
2127 continue;
2128 }
2129 if (!no_skips && path->keep_locks) {
2130 u32 nritems;
2131 t = path->nodes[i];
2132 nritems = btrfs_header_nritems(t);
2133 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2134 skip_level = i + 1;
2135 continue;
2136 }
2137 }
2138 if (skip_level < i && i >= lowest_unlock)
2139 no_skips = 1;
2140
2141 t = path->nodes[i];
2142 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2143 btrfs_tree_unlock_rw(t, path->locks[i]);
2144 path->locks[i] = 0;
2145 if (write_lock_level &&
2146 i > min_write_lock_level &&
2147 i <= *write_lock_level) {
2148 *write_lock_level = i - 1;
2149 }
2150 }
2151 }
2152}
2153
2154/*
2155 * This releases any locks held in the path starting at level and
2156 * going all the way up to the root.
2157 *
2158 * btrfs_search_slot will keep the lock held on higher nodes in a few
2159 * corner cases, such as COW of the block at slot zero in the node. This
2160 * ignores those rules, and it should only be called when there are no
2161 * more updates to be done higher up in the tree.
2162 */
2163noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2164{
2165 int i;
2166
2167 if (path->keep_locks)
2168 return;
2169
2170 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2171 if (!path->nodes[i])
2172 continue;
2173 if (!path->locks[i])
2174 continue;
2175 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2176 path->locks[i] = 0;
2177 }
2178}
2179
2180/*
2181 * helper function for btrfs_search_slot. The goal is to find a block
2182 * in cache without setting the path to blocking. If we find the block
2183 * we return zero and the path is unchanged.
2184 *
2185 * If we can't find the block, we set the path blocking and do some
2186 * reada. -EAGAIN is returned and the search must be repeated.
2187 */
2188static int
2189read_block_for_search(struct btrfs_trans_handle *trans,
2190 struct btrfs_root *root, struct btrfs_path *p,
2191 struct extent_buffer **eb_ret, int level, int slot,
2192 struct btrfs_key *key, u64 time_seq)
2193{
2194 u64 blocknr;
2195 u64 gen;
2196 u32 blocksize;
2197 struct extent_buffer *b = *eb_ret;
2198 struct extent_buffer *tmp;
2199 int ret;
2200
2201 blocknr = btrfs_node_blockptr(b, slot);
2202 gen = btrfs_node_ptr_generation(b, slot);
2203 blocksize = btrfs_level_size(root, level - 1);
2204
2205 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2206 if (tmp) {
2207 /* first we do an atomic uptodate check */
2208 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2209 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2210 /*
2211 * we found an up to date block without
2212 * sleeping, return
2213 * right away
2214 */
2215 *eb_ret = tmp;
2216 return 0;
2217 }
2218 /* the pages were up to date, but we failed
2219 * the generation number check. Do a full
2220 * read for the generation number that is correct.
2221 * We must do this without dropping locks so
2222 * we can trust our generation number
2223 */
2224 free_extent_buffer(tmp);
2225 btrfs_set_path_blocking(p);
2226
2227 /* now we're allowed to do a blocking uptodate check */
2228 tmp = read_tree_block(root, blocknr, blocksize, gen);
2229 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2230 *eb_ret = tmp;
2231 return 0;
2232 }
2233 free_extent_buffer(tmp);
2234 btrfs_release_path(p);
2235 return -EIO;
2236 }
2237 }
2238
2239 /*
2240 * reduce lock contention at high levels
2241 * of the btree by dropping locks before
2242 * we read. Don't release the lock on the current
2243 * level because we need to walk this node to figure
2244 * out which blocks to read.
2245 */
2246 btrfs_unlock_up_safe(p, level + 1);
2247 btrfs_set_path_blocking(p);
2248
2249 free_extent_buffer(tmp);
2250 if (p->reada)
2251 reada_for_search(root, p, level, slot, key->objectid);
2252
2253 btrfs_release_path(p);
2254
2255 ret = -EAGAIN;
2256 tmp = read_tree_block(root, blocknr, blocksize, 0);
2257 if (tmp) {
2258 /*
2259 * If the read above didn't mark this buffer up to date,
2260 * it will never end up being up to date. Set ret to EIO now
2261 * and give up so that our caller doesn't loop forever
2262 * on our EAGAINs.
2263 */
2264 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2265 ret = -EIO;
2266 free_extent_buffer(tmp);
2267 }
2268 return ret;
2269}
2270
2271/*
2272 * helper function for btrfs_search_slot. This does all of the checks
2273 * for node-level blocks and does any balancing required based on
2274 * the ins_len.
2275 *
2276 * If no extra work was required, zero is returned. If we had to
2277 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2278 * start over
2279 */
2280static int
2281setup_nodes_for_search(struct btrfs_trans_handle *trans,
2282 struct btrfs_root *root, struct btrfs_path *p,
2283 struct extent_buffer *b, int level, int ins_len,
2284 int *write_lock_level)
2285{
2286 int ret;
2287 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2288 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2289 int sret;
2290
2291 if (*write_lock_level < level + 1) {
2292 *write_lock_level = level + 1;
2293 btrfs_release_path(p);
2294 goto again;
2295 }
2296
2297 sret = reada_for_balance(root, p, level);
2298 if (sret)
2299 goto again;
2300
2301 btrfs_set_path_blocking(p);
2302 sret = split_node(trans, root, p, level);
2303 btrfs_clear_path_blocking(p, NULL, 0);
2304
2305 BUG_ON(sret > 0);
2306 if (sret) {
2307 ret = sret;
2308 goto done;
2309 }
2310 b = p->nodes[level];
2311 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2312 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2313 int sret;
2314
2315 if (*write_lock_level < level + 1) {
2316 *write_lock_level = level + 1;
2317 btrfs_release_path(p);
2318 goto again;
2319 }
2320
2321 sret = reada_for_balance(root, p, level);
2322 if (sret)
2323 goto again;
2324
2325 btrfs_set_path_blocking(p);
2326 sret = balance_level(trans, root, p, level);
2327 btrfs_clear_path_blocking(p, NULL, 0);
2328
2329 if (sret) {
2330 ret = sret;
2331 goto done;
2332 }
2333 b = p->nodes[level];
2334 if (!b) {
2335 btrfs_release_path(p);
2336 goto again;
2337 }
2338 BUG_ON(btrfs_header_nritems(b) == 1);
2339 }
2340 return 0;
2341
2342again:
2343 ret = -EAGAIN;
2344done:
2345 return ret;
2346}
2347
2348/*
2349 * look for key in the tree. path is filled in with nodes along the way
2350 * if key is found, we return zero and you can find the item in the leaf
2351 * level of the path (level 0)
2352 *
2353 * If the key isn't found, the path points to the slot where it should
2354 * be inserted, and 1 is returned. If there are other errors during the
2355 * search a negative error number is returned.
2356 *
2357 * if ins_len > 0, nodes and leaves will be split as we walk down the
2358 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2359 * possible)
2360 */
2361int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2362 *root, struct btrfs_key *key, struct btrfs_path *p, int
2363 ins_len, int cow)
2364{
2365 struct extent_buffer *b;
2366 int slot;
2367 int ret;
2368 int err;
2369 int level;
2370 int lowest_unlock = 1;
2371 int root_lock;
2372 /* everything at write_lock_level or lower must be write locked */
2373 int write_lock_level = 0;
2374 u8 lowest_level = 0;
2375 int min_write_lock_level;
2376
2377 lowest_level = p->lowest_level;
2378 WARN_ON(lowest_level && ins_len > 0);
2379 WARN_ON(p->nodes[0] != NULL);
2380
2381 if (ins_len < 0) {
2382 lowest_unlock = 2;
2383
2384 /* when we are removing items, we might have to go up to level
2385 * two as we update tree pointers Make sure we keep write
2386 * for those levels as well
2387 */
2388 write_lock_level = 2;
2389 } else if (ins_len > 0) {
2390 /*
2391 * for inserting items, make sure we have a write lock on
2392 * level 1 so we can update keys
2393 */
2394 write_lock_level = 1;
2395 }
2396
2397 if (!cow)
2398 write_lock_level = -1;
2399
2400 if (cow && (p->keep_locks || p->lowest_level))
2401 write_lock_level = BTRFS_MAX_LEVEL;
2402
2403 min_write_lock_level = write_lock_level;
2404
2405again:
2406 /*
2407 * we try very hard to do read locks on the root
2408 */
2409 root_lock = BTRFS_READ_LOCK;
2410 level = 0;
2411 if (p->search_commit_root) {
2412 /*
2413 * the commit roots are read only
2414 * so we always do read locks
2415 */
2416 b = root->commit_root;
2417 extent_buffer_get(b);
2418 level = btrfs_header_level(b);
2419 if (!p->skip_locking)
2420 btrfs_tree_read_lock(b);
2421 } else {
2422 if (p->skip_locking) {
2423 b = btrfs_root_node(root);
2424 level = btrfs_header_level(b);
2425 } else {
2426 /* we don't know the level of the root node
2427 * until we actually have it read locked
2428 */
2429 b = btrfs_read_lock_root_node(root);
2430 level = btrfs_header_level(b);
2431 if (level <= write_lock_level) {
2432 /* whoops, must trade for write lock */
2433 btrfs_tree_read_unlock(b);
2434 free_extent_buffer(b);
2435 b = btrfs_lock_root_node(root);
2436 root_lock = BTRFS_WRITE_LOCK;
2437
2438 /* the level might have changed, check again */
2439 level = btrfs_header_level(b);
2440 }
2441 }
2442 }
2443 p->nodes[level] = b;
2444 if (!p->skip_locking)
2445 p->locks[level] = root_lock;
2446
2447 while (b) {
2448 level = btrfs_header_level(b);
2449
2450 /*
2451 * setup the path here so we can release it under lock
2452 * contention with the cow code
2453 */
2454 if (cow) {
2455 /*
2456 * if we don't really need to cow this block
2457 * then we don't want to set the path blocking,
2458 * so we test it here
2459 */
2460 if (!should_cow_block(trans, root, b))
2461 goto cow_done;
2462
2463 btrfs_set_path_blocking(p);
2464
2465 /*
2466 * must have write locks on this node and the
2467 * parent
2468 */
2469 if (level + 1 > write_lock_level) {
2470 write_lock_level = level + 1;
2471 btrfs_release_path(p);
2472 goto again;
2473 }
2474
2475 err = btrfs_cow_block(trans, root, b,
2476 p->nodes[level + 1],
2477 p->slots[level + 1], &b);
2478 if (err) {
2479 ret = err;
2480 goto done;
2481 }
2482 }
2483cow_done:
2484 BUG_ON(!cow && ins_len);
2485
2486 p->nodes[level] = b;
2487 btrfs_clear_path_blocking(p, NULL, 0);
2488
2489 /*
2490 * we have a lock on b and as long as we aren't changing
2491 * the tree, there is no way to for the items in b to change.
2492 * It is safe to drop the lock on our parent before we
2493 * go through the expensive btree search on b.
2494 *
2495 * If cow is true, then we might be changing slot zero,
2496 * which may require changing the parent. So, we can't
2497 * drop the lock until after we know which slot we're
2498 * operating on.
2499 */
2500 if (!cow)
2501 btrfs_unlock_up_safe(p, level + 1);
2502
2503 ret = bin_search(b, key, level, &slot);
2504
2505 if (level != 0) {
2506 int dec = 0;
2507 if (ret && slot > 0) {
2508 dec = 1;
2509 slot -= 1;
2510 }
2511 p->slots[level] = slot;
2512 err = setup_nodes_for_search(trans, root, p, b, level,
2513 ins_len, &write_lock_level);
2514 if (err == -EAGAIN)
2515 goto again;
2516 if (err) {
2517 ret = err;
2518 goto done;
2519 }
2520 b = p->nodes[level];
2521 slot = p->slots[level];
2522
2523 /*
2524 * slot 0 is special, if we change the key
2525 * we have to update the parent pointer
2526 * which means we must have a write lock
2527 * on the parent
2528 */
2529 if (slot == 0 && cow &&
2530 write_lock_level < level + 1) {
2531 write_lock_level = level + 1;
2532 btrfs_release_path(p);
2533 goto again;
2534 }
2535
2536 unlock_up(p, level, lowest_unlock,
2537 min_write_lock_level, &write_lock_level);
2538
2539 if (level == lowest_level) {
2540 if (dec)
2541 p->slots[level]++;
2542 goto done;
2543 }
2544
2545 err = read_block_for_search(trans, root, p,
2546 &b, level, slot, key, 0);
2547 if (err == -EAGAIN)
2548 goto again;
2549 if (err) {
2550 ret = err;
2551 goto done;
2552 }
2553
2554 if (!p->skip_locking) {
2555 level = btrfs_header_level(b);
2556 if (level <= write_lock_level) {
2557 err = btrfs_try_tree_write_lock(b);
2558 if (!err) {
2559 btrfs_set_path_blocking(p);
2560 btrfs_tree_lock(b);
2561 btrfs_clear_path_blocking(p, b,
2562 BTRFS_WRITE_LOCK);
2563 }
2564 p->locks[level] = BTRFS_WRITE_LOCK;
2565 } else {
2566 err = btrfs_try_tree_read_lock(b);
2567 if (!err) {
2568 btrfs_set_path_blocking(p);
2569 btrfs_tree_read_lock(b);
2570 btrfs_clear_path_blocking(p, b,
2571 BTRFS_READ_LOCK);
2572 }
2573 p->locks[level] = BTRFS_READ_LOCK;
2574 }
2575 p->nodes[level] = b;
2576 }
2577 } else {
2578 p->slots[level] = slot;
2579 if (ins_len > 0 &&
2580 btrfs_leaf_free_space(root, b) < ins_len) {
2581 if (write_lock_level < 1) {
2582 write_lock_level = 1;
2583 btrfs_release_path(p);
2584 goto again;
2585 }
2586
2587 btrfs_set_path_blocking(p);
2588 err = split_leaf(trans, root, key,
2589 p, ins_len, ret == 0);
2590 btrfs_clear_path_blocking(p, NULL, 0);
2591
2592 BUG_ON(err > 0);
2593 if (err) {
2594 ret = err;
2595 goto done;
2596 }
2597 }
2598 if (!p->search_for_split)
2599 unlock_up(p, level, lowest_unlock,
2600 min_write_lock_level, &write_lock_level);
2601 goto done;
2602 }
2603 }
2604 ret = 1;
2605done:
2606 /*
2607 * we don't really know what they plan on doing with the path
2608 * from here on, so for now just mark it as blocking
2609 */
2610 if (!p->leave_spinning)
2611 btrfs_set_path_blocking(p);
2612 if (ret < 0)
2613 btrfs_release_path(p);
2614 return ret;
2615}
2616
2617/*
2618 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2619 * current state of the tree together with the operations recorded in the tree
2620 * modification log to search for the key in a previous version of this tree, as
2621 * denoted by the time_seq parameter.
2622 *
2623 * Naturally, there is no support for insert, delete or cow operations.
2624 *
2625 * The resulting path and return value will be set up as if we called
2626 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2627 */
2628int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2629 struct btrfs_path *p, u64 time_seq)
2630{
2631 struct extent_buffer *b;
2632 int slot;
2633 int ret;
2634 int err;
2635 int level;
2636 int lowest_unlock = 1;
2637 u8 lowest_level = 0;
2638
2639 lowest_level = p->lowest_level;
2640 WARN_ON(p->nodes[0] != NULL);
2641
2642 if (p->search_commit_root) {
2643 BUG_ON(time_seq);
2644 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2645 }
2646
2647again:
2648 b = get_old_root(root, time_seq);
2649 level = btrfs_header_level(b);
2650 p->locks[level] = BTRFS_READ_LOCK;
2651
2652 while (b) {
2653 level = btrfs_header_level(b);
2654 p->nodes[level] = b;
2655 btrfs_clear_path_blocking(p, NULL, 0);
2656
2657 /*
2658 * we have a lock on b and as long as we aren't changing
2659 * the tree, there is no way to for the items in b to change.
2660 * It is safe to drop the lock on our parent before we
2661 * go through the expensive btree search on b.
2662 */
2663 btrfs_unlock_up_safe(p, level + 1);
2664
2665 ret = bin_search(b, key, level, &slot);
2666
2667 if (level != 0) {
2668 int dec = 0;
2669 if (ret && slot > 0) {
2670 dec = 1;
2671 slot -= 1;
2672 }
2673 p->slots[level] = slot;
2674 unlock_up(p, level, lowest_unlock, 0, NULL);
2675
2676 if (level == lowest_level) {
2677 if (dec)
2678 p->slots[level]++;
2679 goto done;
2680 }
2681
2682 err = read_block_for_search(NULL, root, p, &b, level,
2683 slot, key, time_seq);
2684 if (err == -EAGAIN)
2685 goto again;
2686 if (err) {
2687 ret = err;
2688 goto done;
2689 }
2690
2691 level = btrfs_header_level(b);
2692 err = btrfs_try_tree_read_lock(b);
2693 if (!err) {
2694 btrfs_set_path_blocking(p);
2695 btrfs_tree_read_lock(b);
2696 btrfs_clear_path_blocking(p, b,
2697 BTRFS_READ_LOCK);
2698 }
2699 p->locks[level] = BTRFS_READ_LOCK;
2700 p->nodes[level] = b;
2701 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2702 if (b != p->nodes[level]) {
2703 btrfs_tree_unlock_rw(p->nodes[level],
2704 p->locks[level]);
2705 p->locks[level] = 0;
2706 p->nodes[level] = b;
2707 }
2708 } else {
2709 p->slots[level] = slot;
2710 unlock_up(p, level, lowest_unlock, 0, NULL);
2711 goto done;
2712 }
2713 }
2714 ret = 1;
2715done:
2716 if (!p->leave_spinning)
2717 btrfs_set_path_blocking(p);
2718 if (ret < 0)
2719 btrfs_release_path(p);
2720
2721 return ret;
2722}
2723
2724/*
2725 * adjust the pointers going up the tree, starting at level
2726 * making sure the right key of each node is points to 'key'.
2727 * This is used after shifting pointers to the left, so it stops
2728 * fixing up pointers when a given leaf/node is not in slot 0 of the
2729 * higher levels
2730 *
2731 */
2732static void fixup_low_keys(struct btrfs_trans_handle *trans,
2733 struct btrfs_root *root, struct btrfs_path *path,
2734 struct btrfs_disk_key *key, int level)
2735{
2736 int i;
2737 struct extent_buffer *t;
2738
2739 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2740 int tslot = path->slots[i];
2741 if (!path->nodes[i])
2742 break;
2743 t = path->nodes[i];
2744 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
2745 btrfs_set_node_key(t, key, tslot);
2746 btrfs_mark_buffer_dirty(path->nodes[i]);
2747 if (tslot != 0)
2748 break;
2749 }
2750}
2751
2752/*
2753 * update item key.
2754 *
2755 * This function isn't completely safe. It's the caller's responsibility
2756 * that the new key won't break the order
2757 */
2758void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2759 struct btrfs_root *root, struct btrfs_path *path,
2760 struct btrfs_key *new_key)
2761{
2762 struct btrfs_disk_key disk_key;
2763 struct extent_buffer *eb;
2764 int slot;
2765
2766 eb = path->nodes[0];
2767 slot = path->slots[0];
2768 if (slot > 0) {
2769 btrfs_item_key(eb, &disk_key, slot - 1);
2770 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2771 }
2772 if (slot < btrfs_header_nritems(eb) - 1) {
2773 btrfs_item_key(eb, &disk_key, slot + 1);
2774 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2775 }
2776
2777 btrfs_cpu_key_to_disk(&disk_key, new_key);
2778 btrfs_set_item_key(eb, &disk_key, slot);
2779 btrfs_mark_buffer_dirty(eb);
2780 if (slot == 0)
2781 fixup_low_keys(trans, root, path, &disk_key, 1);
2782}
2783
2784/*
2785 * try to push data from one node into the next node left in the
2786 * tree.
2787 *
2788 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2789 * error, and > 0 if there was no room in the left hand block.
2790 */
2791static int push_node_left(struct btrfs_trans_handle *trans,
2792 struct btrfs_root *root, struct extent_buffer *dst,
2793 struct extent_buffer *src, int empty)
2794{
2795 int push_items = 0;
2796 int src_nritems;
2797 int dst_nritems;
2798 int ret = 0;
2799
2800 src_nritems = btrfs_header_nritems(src);
2801 dst_nritems = btrfs_header_nritems(dst);
2802 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2803 WARN_ON(btrfs_header_generation(src) != trans->transid);
2804 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2805
2806 if (!empty && src_nritems <= 8)
2807 return 1;
2808
2809 if (push_items <= 0)
2810 return 1;
2811
2812 if (empty) {
2813 push_items = min(src_nritems, push_items);
2814 if (push_items < src_nritems) {
2815 /* leave at least 8 pointers in the node if
2816 * we aren't going to empty it
2817 */
2818 if (src_nritems - push_items < 8) {
2819 if (push_items <= 8)
2820 return 1;
2821 push_items -= 8;
2822 }
2823 }
2824 } else
2825 push_items = min(src_nritems - 8, push_items);
2826
2827 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2828 push_items);
2829 copy_extent_buffer(dst, src,
2830 btrfs_node_key_ptr_offset(dst_nritems),
2831 btrfs_node_key_ptr_offset(0),
2832 push_items * sizeof(struct btrfs_key_ptr));
2833
2834 if (push_items < src_nritems) {
2835 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2836 src_nritems - push_items);
2837 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2838 btrfs_node_key_ptr_offset(push_items),
2839 (src_nritems - push_items) *
2840 sizeof(struct btrfs_key_ptr));
2841 }
2842 btrfs_set_header_nritems(src, src_nritems - push_items);
2843 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2844 btrfs_mark_buffer_dirty(src);
2845 btrfs_mark_buffer_dirty(dst);
2846
2847 return ret;
2848}
2849
2850/*
2851 * try to push data from one node into the next node right in the
2852 * tree.
2853 *
2854 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2855 * error, and > 0 if there was no room in the right hand block.
2856 *
2857 * this will only push up to 1/2 the contents of the left node over
2858 */
2859static int balance_node_right(struct btrfs_trans_handle *trans,
2860 struct btrfs_root *root,
2861 struct extent_buffer *dst,
2862 struct extent_buffer *src)
2863{
2864 int push_items = 0;
2865 int max_push;
2866 int src_nritems;
2867 int dst_nritems;
2868 int ret = 0;
2869
2870 WARN_ON(btrfs_header_generation(src) != trans->transid);
2871 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2872
2873 src_nritems = btrfs_header_nritems(src);
2874 dst_nritems = btrfs_header_nritems(dst);
2875 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2876 if (push_items <= 0)
2877 return 1;
2878
2879 if (src_nritems < 4)
2880 return 1;
2881
2882 max_push = src_nritems / 2 + 1;
2883 /* don't try to empty the node */
2884 if (max_push >= src_nritems)
2885 return 1;
2886
2887 if (max_push < push_items)
2888 push_items = max_push;
2889
2890 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
2891 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2892 btrfs_node_key_ptr_offset(0),
2893 (dst_nritems) *
2894 sizeof(struct btrfs_key_ptr));
2895
2896 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2897 src_nritems - push_items, push_items);
2898 copy_extent_buffer(dst, src,
2899 btrfs_node_key_ptr_offset(0),
2900 btrfs_node_key_ptr_offset(src_nritems - push_items),
2901 push_items * sizeof(struct btrfs_key_ptr));
2902
2903 btrfs_set_header_nritems(src, src_nritems - push_items);
2904 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2905
2906 btrfs_mark_buffer_dirty(src);
2907 btrfs_mark_buffer_dirty(dst);
2908
2909 return ret;
2910}
2911
2912/*
2913 * helper function to insert a new root level in the tree.
2914 * A new node is allocated, and a single item is inserted to
2915 * point to the existing root
2916 *
2917 * returns zero on success or < 0 on failure.
2918 */
2919static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2920 struct btrfs_root *root,
2921 struct btrfs_path *path, int level)
2922{
2923 u64 lower_gen;
2924 struct extent_buffer *lower;
2925 struct extent_buffer *c;
2926 struct extent_buffer *old;
2927 struct btrfs_disk_key lower_key;
2928
2929 BUG_ON(path->nodes[level]);
2930 BUG_ON(path->nodes[level-1] != root->node);
2931
2932 lower = path->nodes[level-1];
2933 if (level == 1)
2934 btrfs_item_key(lower, &lower_key, 0);
2935 else
2936 btrfs_node_key(lower, &lower_key, 0);
2937
2938 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2939 root->root_key.objectid, &lower_key,
2940 level, root->node->start, 0);
2941 if (IS_ERR(c))
2942 return PTR_ERR(c);
2943
2944 root_add_used(root, root->nodesize);
2945
2946 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2947 btrfs_set_header_nritems(c, 1);
2948 btrfs_set_header_level(c, level);
2949 btrfs_set_header_bytenr(c, c->start);
2950 btrfs_set_header_generation(c, trans->transid);
2951 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2952 btrfs_set_header_owner(c, root->root_key.objectid);
2953
2954 write_extent_buffer(c, root->fs_info->fsid,
2955 (unsigned long)btrfs_header_fsid(c),
2956 BTRFS_FSID_SIZE);
2957
2958 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2959 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2960 BTRFS_UUID_SIZE);
2961
2962 btrfs_set_node_key(c, &lower_key, 0);
2963 btrfs_set_node_blockptr(c, 0, lower->start);
2964 lower_gen = btrfs_header_generation(lower);
2965 WARN_ON(lower_gen != trans->transid);
2966
2967 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2968
2969 btrfs_mark_buffer_dirty(c);
2970
2971 old = root->node;
2972 tree_mod_log_set_root_pointer(root, c);
2973 rcu_assign_pointer(root->node, c);
2974
2975 /* the super has an extra ref to root->node */
2976 free_extent_buffer(old);
2977
2978 add_root_to_dirty_list(root);
2979 extent_buffer_get(c);
2980 path->nodes[level] = c;
2981 path->locks[level] = BTRFS_WRITE_LOCK;
2982 path->slots[level] = 0;
2983 return 0;
2984}
2985
2986/*
2987 * worker function to insert a single pointer in a node.
2988 * the node should have enough room for the pointer already
2989 *
2990 * slot and level indicate where you want the key to go, and
2991 * blocknr is the block the key points to.
2992 */
2993static void insert_ptr(struct btrfs_trans_handle *trans,
2994 struct btrfs_root *root, struct btrfs_path *path,
2995 struct btrfs_disk_key *key, u64 bytenr,
2996 int slot, int level)
2997{
2998 struct extent_buffer *lower;
2999 int nritems;
3000 int ret;
3001
3002 BUG_ON(!path->nodes[level]);
3003 btrfs_assert_tree_locked(path->nodes[level]);
3004 lower = path->nodes[level];
3005 nritems = btrfs_header_nritems(lower);
3006 BUG_ON(slot > nritems);
3007 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3008 if (slot != nritems) {
3009 if (level)
3010 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3011 slot, nritems - slot);
3012 memmove_extent_buffer(lower,
3013 btrfs_node_key_ptr_offset(slot + 1),
3014 btrfs_node_key_ptr_offset(slot),
3015 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3016 }
3017 if (level) {
3018 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3019 MOD_LOG_KEY_ADD);
3020 BUG_ON(ret < 0);
3021 }
3022 btrfs_set_node_key(lower, key, slot);
3023 btrfs_set_node_blockptr(lower, slot, bytenr);
3024 WARN_ON(trans->transid == 0);
3025 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3026 btrfs_set_header_nritems(lower, nritems + 1);
3027 btrfs_mark_buffer_dirty(lower);
3028}
3029
3030/*
3031 * split the node at the specified level in path in two.
3032 * The path is corrected to point to the appropriate node after the split
3033 *
3034 * Before splitting this tries to make some room in the node by pushing
3035 * left and right, if either one works, it returns right away.
3036 *
3037 * returns 0 on success and < 0 on failure
3038 */
3039static noinline int split_node(struct btrfs_trans_handle *trans,
3040 struct btrfs_root *root,
3041 struct btrfs_path *path, int level)
3042{
3043 struct extent_buffer *c;
3044 struct extent_buffer *split;
3045 struct btrfs_disk_key disk_key;
3046 int mid;
3047 int ret;
3048 u32 c_nritems;
3049
3050 c = path->nodes[level];
3051 WARN_ON(btrfs_header_generation(c) != trans->transid);
3052 if (c == root->node) {
3053 /* trying to split the root, lets make a new one */
3054 ret = insert_new_root(trans, root, path, level + 1);
3055 if (ret)
3056 return ret;
3057 } else {
3058 ret = push_nodes_for_insert(trans, root, path, level);
3059 c = path->nodes[level];
3060 if (!ret && btrfs_header_nritems(c) <
3061 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3062 return 0;
3063 if (ret < 0)
3064 return ret;
3065 }
3066
3067 c_nritems = btrfs_header_nritems(c);
3068 mid = (c_nritems + 1) / 2;
3069 btrfs_node_key(c, &disk_key, mid);
3070
3071 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3072 root->root_key.objectid,
3073 &disk_key, level, c->start, 0);
3074 if (IS_ERR(split))
3075 return PTR_ERR(split);
3076
3077 root_add_used(root, root->nodesize);
3078
3079 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3080 btrfs_set_header_level(split, btrfs_header_level(c));
3081 btrfs_set_header_bytenr(split, split->start);
3082 btrfs_set_header_generation(split, trans->transid);
3083 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3084 btrfs_set_header_owner(split, root->root_key.objectid);
3085 write_extent_buffer(split, root->fs_info->fsid,
3086 (unsigned long)btrfs_header_fsid(split),
3087 BTRFS_FSID_SIZE);
3088 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3089 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3090 BTRFS_UUID_SIZE);
3091
3092 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3093 copy_extent_buffer(split, c,
3094 btrfs_node_key_ptr_offset(0),
3095 btrfs_node_key_ptr_offset(mid),
3096 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3097 btrfs_set_header_nritems(split, c_nritems - mid);
3098 btrfs_set_header_nritems(c, mid);
3099 ret = 0;
3100
3101 btrfs_mark_buffer_dirty(c);
3102 btrfs_mark_buffer_dirty(split);
3103
3104 insert_ptr(trans, root, path, &disk_key, split->start,
3105 path->slots[level + 1] + 1, level + 1);
3106
3107 if (path->slots[level] >= mid) {
3108 path->slots[level] -= mid;
3109 btrfs_tree_unlock(c);
3110 free_extent_buffer(c);
3111 path->nodes[level] = split;
3112 path->slots[level + 1] += 1;
3113 } else {
3114 btrfs_tree_unlock(split);
3115 free_extent_buffer(split);
3116 }
3117 return ret;
3118}
3119
3120/*
3121 * how many bytes are required to store the items in a leaf. start
3122 * and nr indicate which items in the leaf to check. This totals up the
3123 * space used both by the item structs and the item data
3124 */
3125static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3126{
3127 int data_len;
3128 int nritems = btrfs_header_nritems(l);
3129 int end = min(nritems, start + nr) - 1;
3130
3131 if (!nr)
3132 return 0;
3133 data_len = btrfs_item_end_nr(l, start);
3134 data_len = data_len - btrfs_item_offset_nr(l, end);
3135 data_len += sizeof(struct btrfs_item) * nr;
3136 WARN_ON(data_len < 0);
3137 return data_len;
3138}
3139
3140/*
3141 * The space between the end of the leaf items and
3142 * the start of the leaf data. IOW, how much room
3143 * the leaf has left for both items and data
3144 */
3145noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3146 struct extent_buffer *leaf)
3147{
3148 int nritems = btrfs_header_nritems(leaf);
3149 int ret;
3150 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3151 if (ret < 0) {
3152 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3153 "used %d nritems %d\n",
3154 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3155 leaf_space_used(leaf, 0, nritems), nritems);
3156 }
3157 return ret;
3158}
3159
3160/*
3161 * min slot controls the lowest index we're willing to push to the
3162 * right. We'll push up to and including min_slot, but no lower
3163 */
3164static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3165 struct btrfs_root *root,
3166 struct btrfs_path *path,
3167 int data_size, int empty,
3168 struct extent_buffer *right,
3169 int free_space, u32 left_nritems,
3170 u32 min_slot)
3171{
3172 struct extent_buffer *left = path->nodes[0];
3173 struct extent_buffer *upper = path->nodes[1];
3174 struct btrfs_map_token token;
3175 struct btrfs_disk_key disk_key;
3176 int slot;
3177 u32 i;
3178 int push_space = 0;
3179 int push_items = 0;
3180 struct btrfs_item *item;
3181 u32 nr;
3182 u32 right_nritems;
3183 u32 data_end;
3184 u32 this_item_size;
3185
3186 btrfs_init_map_token(&token);
3187
3188 if (empty)
3189 nr = 0;
3190 else
3191 nr = max_t(u32, 1, min_slot);
3192
3193 if (path->slots[0] >= left_nritems)
3194 push_space += data_size;
3195
3196 slot = path->slots[1];
3197 i = left_nritems - 1;
3198 while (i >= nr) {
3199 item = btrfs_item_nr(left, i);
3200
3201 if (!empty && push_items > 0) {
3202 if (path->slots[0] > i)
3203 break;
3204 if (path->slots[0] == i) {
3205 int space = btrfs_leaf_free_space(root, left);
3206 if (space + push_space * 2 > free_space)
3207 break;
3208 }
3209 }
3210
3211 if (path->slots[0] == i)
3212 push_space += data_size;
3213
3214 this_item_size = btrfs_item_size(left, item);
3215 if (this_item_size + sizeof(*item) + push_space > free_space)
3216 break;
3217
3218 push_items++;
3219 push_space += this_item_size + sizeof(*item);
3220 if (i == 0)
3221 break;
3222 i--;
3223 }
3224
3225 if (push_items == 0)
3226 goto out_unlock;
3227
3228 if (!empty && push_items == left_nritems)
3229 WARN_ON(1);
3230
3231 /* push left to right */
3232 right_nritems = btrfs_header_nritems(right);
3233
3234 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3235 push_space -= leaf_data_end(root, left);
3236
3237 /* make room in the right data area */
3238 data_end = leaf_data_end(root, right);
3239 memmove_extent_buffer(right,
3240 btrfs_leaf_data(right) + data_end - push_space,
3241 btrfs_leaf_data(right) + data_end,
3242 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3243
3244 /* copy from the left data area */
3245 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3246 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3247 btrfs_leaf_data(left) + leaf_data_end(root, left),
3248 push_space);
3249
3250 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3251 btrfs_item_nr_offset(0),
3252 right_nritems * sizeof(struct btrfs_item));
3253
3254 /* copy the items from left to right */
3255 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3256 btrfs_item_nr_offset(left_nritems - push_items),
3257 push_items * sizeof(struct btrfs_item));
3258
3259 /* update the item pointers */
3260 right_nritems += push_items;
3261 btrfs_set_header_nritems(right, right_nritems);
3262 push_space = BTRFS_LEAF_DATA_SIZE(root);
3263 for (i = 0; i < right_nritems; i++) {
3264 item = btrfs_item_nr(right, i);
3265 push_space -= btrfs_token_item_size(right, item, &token);
3266 btrfs_set_token_item_offset(right, item, push_space, &token);
3267 }
3268
3269 left_nritems -= push_items;
3270 btrfs_set_header_nritems(left, left_nritems);
3271
3272 if (left_nritems)
3273 btrfs_mark_buffer_dirty(left);
3274 else
3275 clean_tree_block(trans, root, left);
3276
3277 btrfs_mark_buffer_dirty(right);
3278
3279 btrfs_item_key(right, &disk_key, 0);
3280 btrfs_set_node_key(upper, &disk_key, slot + 1);
3281 btrfs_mark_buffer_dirty(upper);
3282
3283 /* then fixup the leaf pointer in the path */
3284 if (path->slots[0] >= left_nritems) {
3285 path->slots[0] -= left_nritems;
3286 if (btrfs_header_nritems(path->nodes[0]) == 0)
3287 clean_tree_block(trans, root, path->nodes[0]);
3288 btrfs_tree_unlock(path->nodes[0]);
3289 free_extent_buffer(path->nodes[0]);
3290 path->nodes[0] = right;
3291 path->slots[1] += 1;
3292 } else {
3293 btrfs_tree_unlock(right);
3294 free_extent_buffer(right);
3295 }
3296 return 0;
3297
3298out_unlock:
3299 btrfs_tree_unlock(right);
3300 free_extent_buffer(right);
3301 return 1;
3302}
3303
3304/*
3305 * push some data in the path leaf to the right, trying to free up at
3306 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3307 *
3308 * returns 1 if the push failed because the other node didn't have enough
3309 * room, 0 if everything worked out and < 0 if there were major errors.
3310 *
3311 * this will push starting from min_slot to the end of the leaf. It won't
3312 * push any slot lower than min_slot
3313 */
3314static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3315 *root, struct btrfs_path *path,
3316 int min_data_size, int data_size,
3317 int empty, u32 min_slot)
3318{
3319 struct extent_buffer *left = path->nodes[0];
3320 struct extent_buffer *right;
3321 struct extent_buffer *upper;
3322 int slot;
3323 int free_space;
3324 u32 left_nritems;
3325 int ret;
3326
3327 if (!path->nodes[1])
3328 return 1;
3329
3330 slot = path->slots[1];
3331 upper = path->nodes[1];
3332 if (slot >= btrfs_header_nritems(upper) - 1)
3333 return 1;
3334
3335 btrfs_assert_tree_locked(path->nodes[1]);
3336
3337 right = read_node_slot(root, upper, slot + 1);
3338 if (right == NULL)
3339 return 1;
3340
3341 btrfs_tree_lock(right);
3342 btrfs_set_lock_blocking(right);
3343
3344 free_space = btrfs_leaf_free_space(root, right);
3345 if (free_space < data_size)
3346 goto out_unlock;
3347
3348 /* cow and double check */
3349 ret = btrfs_cow_block(trans, root, right, upper,
3350 slot + 1, &right);
3351 if (ret)
3352 goto out_unlock;
3353
3354 free_space = btrfs_leaf_free_space(root, right);
3355 if (free_space < data_size)
3356 goto out_unlock;
3357
3358 left_nritems = btrfs_header_nritems(left);
3359 if (left_nritems == 0)
3360 goto out_unlock;
3361
3362 return __push_leaf_right(trans, root, path, min_data_size, empty,
3363 right, free_space, left_nritems, min_slot);
3364out_unlock:
3365 btrfs_tree_unlock(right);
3366 free_extent_buffer(right);
3367 return 1;
3368}
3369
3370/*
3371 * push some data in the path leaf to the left, trying to free up at
3372 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3373 *
3374 * max_slot can put a limit on how far into the leaf we'll push items. The
3375 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3376 * items
3377 */
3378static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3379 struct btrfs_root *root,
3380 struct btrfs_path *path, int data_size,
3381 int empty, struct extent_buffer *left,
3382 int free_space, u32 right_nritems,
3383 u32 max_slot)
3384{
3385 struct btrfs_disk_key disk_key;
3386 struct extent_buffer *right = path->nodes[0];
3387 int i;
3388 int push_space = 0;
3389 int push_items = 0;
3390 struct btrfs_item *item;
3391 u32 old_left_nritems;
3392 u32 nr;
3393 int ret = 0;
3394 u32 this_item_size;
3395 u32 old_left_item_size;
3396 struct btrfs_map_token token;
3397
3398 btrfs_init_map_token(&token);
3399
3400 if (empty)
3401 nr = min(right_nritems, max_slot);
3402 else
3403 nr = min(right_nritems - 1, max_slot);
3404
3405 for (i = 0; i < nr; i++) {
3406 item = btrfs_item_nr(right, i);
3407
3408 if (!empty && push_items > 0) {
3409 if (path->slots[0] < i)
3410 break;
3411 if (path->slots[0] == i) {
3412 int space = btrfs_leaf_free_space(root, right);
3413 if (space + push_space * 2 > free_space)
3414 break;
3415 }
3416 }
3417
3418 if (path->slots[0] == i)
3419 push_space += data_size;
3420
3421 this_item_size = btrfs_item_size(right, item);
3422 if (this_item_size + sizeof(*item) + push_space > free_space)
3423 break;
3424
3425 push_items++;
3426 push_space += this_item_size + sizeof(*item);
3427 }
3428
3429 if (push_items == 0) {
3430 ret = 1;
3431 goto out;
3432 }
3433 if (!empty && push_items == btrfs_header_nritems(right))
3434 WARN_ON(1);
3435
3436 /* push data from right to left */
3437 copy_extent_buffer(left, right,
3438 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3439 btrfs_item_nr_offset(0),
3440 push_items * sizeof(struct btrfs_item));
3441
3442 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3443 btrfs_item_offset_nr(right, push_items - 1);
3444
3445 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3446 leaf_data_end(root, left) - push_space,
3447 btrfs_leaf_data(right) +
3448 btrfs_item_offset_nr(right, push_items - 1),
3449 push_space);
3450 old_left_nritems = btrfs_header_nritems(left);
3451 BUG_ON(old_left_nritems <= 0);
3452
3453 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3454 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3455 u32 ioff;
3456
3457 item = btrfs_item_nr(left, i);
3458
3459 ioff = btrfs_token_item_offset(left, item, &token);
3460 btrfs_set_token_item_offset(left, item,
3461 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3462 &token);
3463 }
3464 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3465
3466 /* fixup right node */
3467 if (push_items > right_nritems) {
3468 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3469 right_nritems);
3470 WARN_ON(1);
3471 }
3472
3473 if (push_items < right_nritems) {
3474 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3475 leaf_data_end(root, right);
3476 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3477 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3478 btrfs_leaf_data(right) +
3479 leaf_data_end(root, right), push_space);
3480
3481 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3482 btrfs_item_nr_offset(push_items),
3483 (btrfs_header_nritems(right) - push_items) *
3484 sizeof(struct btrfs_item));
3485 }
3486 right_nritems -= push_items;
3487 btrfs_set_header_nritems(right, right_nritems);
3488 push_space = BTRFS_LEAF_DATA_SIZE(root);
3489 for (i = 0; i < right_nritems; i++) {
3490 item = btrfs_item_nr(right, i);
3491
3492 push_space = push_space - btrfs_token_item_size(right,
3493 item, &token);
3494 btrfs_set_token_item_offset(right, item, push_space, &token);
3495 }
3496
3497 btrfs_mark_buffer_dirty(left);
3498 if (right_nritems)
3499 btrfs_mark_buffer_dirty(right);
3500 else
3501 clean_tree_block(trans, root, right);
3502
3503 btrfs_item_key(right, &disk_key, 0);
3504 fixup_low_keys(trans, root, path, &disk_key, 1);
3505
3506 /* then fixup the leaf pointer in the path */
3507 if (path->slots[0] < push_items) {
3508 path->slots[0] += old_left_nritems;
3509 btrfs_tree_unlock(path->nodes[0]);
3510 free_extent_buffer(path->nodes[0]);
3511 path->nodes[0] = left;
3512 path->slots[1] -= 1;
3513 } else {
3514 btrfs_tree_unlock(left);
3515 free_extent_buffer(left);
3516 path->slots[0] -= push_items;
3517 }
3518 BUG_ON(path->slots[0] < 0);
3519 return ret;
3520out:
3521 btrfs_tree_unlock(left);
3522 free_extent_buffer(left);
3523 return ret;
3524}
3525
3526/*
3527 * push some data in the path leaf to the left, trying to free up at
3528 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3529 *
3530 * max_slot can put a limit on how far into the leaf we'll push items. The
3531 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3532 * items
3533 */
3534static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3535 *root, struct btrfs_path *path, int min_data_size,
3536 int data_size, int empty, u32 max_slot)
3537{
3538 struct extent_buffer *right = path->nodes[0];
3539 struct extent_buffer *left;
3540 int slot;
3541 int free_space;
3542 u32 right_nritems;
3543 int ret = 0;
3544
3545 slot = path->slots[1];
3546 if (slot == 0)
3547 return 1;
3548 if (!path->nodes[1])
3549 return 1;
3550
3551 right_nritems = btrfs_header_nritems(right);
3552 if (right_nritems == 0)
3553 return 1;
3554
3555 btrfs_assert_tree_locked(path->nodes[1]);
3556
3557 left = read_node_slot(root, path->nodes[1], slot - 1);
3558 if (left == NULL)
3559 return 1;
3560
3561 btrfs_tree_lock(left);
3562 btrfs_set_lock_blocking(left);
3563
3564 free_space = btrfs_leaf_free_space(root, left);
3565 if (free_space < data_size) {
3566 ret = 1;
3567 goto out;
3568 }
3569
3570 /* cow and double check */
3571 ret = btrfs_cow_block(trans, root, left,
3572 path->nodes[1], slot - 1, &left);
3573 if (ret) {
3574 /* we hit -ENOSPC, but it isn't fatal here */
3575 if (ret == -ENOSPC)
3576 ret = 1;
3577 goto out;
3578 }
3579
3580 free_space = btrfs_leaf_free_space(root, left);
3581 if (free_space < data_size) {
3582 ret = 1;
3583 goto out;
3584 }
3585
3586 return __push_leaf_left(trans, root, path, min_data_size,
3587 empty, left, free_space, right_nritems,
3588 max_slot);
3589out:
3590 btrfs_tree_unlock(left);
3591 free_extent_buffer(left);
3592 return ret;
3593}
3594
3595/*
3596 * split the path's leaf in two, making sure there is at least data_size
3597 * available for the resulting leaf level of the path.
3598 */
3599static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3600 struct btrfs_root *root,
3601 struct btrfs_path *path,
3602 struct extent_buffer *l,
3603 struct extent_buffer *right,
3604 int slot, int mid, int nritems)
3605{
3606 int data_copy_size;
3607 int rt_data_off;
3608 int i;
3609 struct btrfs_disk_key disk_key;
3610 struct btrfs_map_token token;
3611
3612 btrfs_init_map_token(&token);
3613
3614 nritems = nritems - mid;
3615 btrfs_set_header_nritems(right, nritems);
3616 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3617
3618 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3619 btrfs_item_nr_offset(mid),
3620 nritems * sizeof(struct btrfs_item));
3621
3622 copy_extent_buffer(right, l,
3623 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3624 data_copy_size, btrfs_leaf_data(l) +
3625 leaf_data_end(root, l), data_copy_size);
3626
3627 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3628 btrfs_item_end_nr(l, mid);
3629
3630 for (i = 0; i < nritems; i++) {
3631 struct btrfs_item *item = btrfs_item_nr(right, i);
3632 u32 ioff;
3633
3634 ioff = btrfs_token_item_offset(right, item, &token);
3635 btrfs_set_token_item_offset(right, item,
3636 ioff + rt_data_off, &token);
3637 }
3638
3639 btrfs_set_header_nritems(l, mid);
3640 btrfs_item_key(right, &disk_key, 0);
3641 insert_ptr(trans, root, path, &disk_key, right->start,
3642 path->slots[1] + 1, 1);
3643
3644 btrfs_mark_buffer_dirty(right);
3645 btrfs_mark_buffer_dirty(l);
3646 BUG_ON(path->slots[0] != slot);
3647
3648 if (mid <= slot) {
3649 btrfs_tree_unlock(path->nodes[0]);
3650 free_extent_buffer(path->nodes[0]);
3651 path->nodes[0] = right;
3652 path->slots[0] -= mid;
3653 path->slots[1] += 1;
3654 } else {
3655 btrfs_tree_unlock(right);
3656 free_extent_buffer(right);
3657 }
3658
3659 BUG_ON(path->slots[0] < 0);
3660}
3661
3662/*
3663 * double splits happen when we need to insert a big item in the middle
3664 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3665 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3666 * A B C
3667 *
3668 * We avoid this by trying to push the items on either side of our target
3669 * into the adjacent leaves. If all goes well we can avoid the double split
3670 * completely.
3671 */
3672static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3673 struct btrfs_root *root,
3674 struct btrfs_path *path,
3675 int data_size)
3676{
3677 int ret;
3678 int progress = 0;
3679 int slot;
3680 u32 nritems;
3681
3682 slot = path->slots[0];
3683
3684 /*
3685 * try to push all the items after our slot into the
3686 * right leaf
3687 */
3688 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3689 if (ret < 0)
3690 return ret;
3691
3692 if (ret == 0)
3693 progress++;
3694
3695 nritems = btrfs_header_nritems(path->nodes[0]);
3696 /*
3697 * our goal is to get our slot at the start or end of a leaf. If
3698 * we've done so we're done
3699 */
3700 if (path->slots[0] == 0 || path->slots[0] == nritems)
3701 return 0;
3702
3703 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3704 return 0;
3705
3706 /* try to push all the items before our slot into the next leaf */
3707 slot = path->slots[0];
3708 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3709 if (ret < 0)
3710 return ret;
3711
3712 if (ret == 0)
3713 progress++;
3714
3715 if (progress)
3716 return 0;
3717 return 1;
3718}
3719
3720/*
3721 * split the path's leaf in two, making sure there is at least data_size
3722 * available for the resulting leaf level of the path.
3723 *
3724 * returns 0 if all went well and < 0 on failure.
3725 */
3726static noinline int split_leaf(struct btrfs_trans_handle *trans,
3727 struct btrfs_root *root,
3728 struct btrfs_key *ins_key,
3729 struct btrfs_path *path, int data_size,
3730 int extend)
3731{
3732 struct btrfs_disk_key disk_key;
3733 struct extent_buffer *l;
3734 u32 nritems;
3735 int mid;
3736 int slot;
3737 struct extent_buffer *right;
3738 int ret = 0;
3739 int wret;
3740 int split;
3741 int num_doubles = 0;
3742 int tried_avoid_double = 0;
3743
3744 l = path->nodes[0];
3745 slot = path->slots[0];
3746 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3747 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3748 return -EOVERFLOW;
3749
3750 /* first try to make some room by pushing left and right */
3751 if (data_size) {
3752 wret = push_leaf_right(trans, root, path, data_size,
3753 data_size, 0, 0);
3754 if (wret < 0)
3755 return wret;
3756 if (wret) {
3757 wret = push_leaf_left(trans, root, path, data_size,
3758 data_size, 0, (u32)-1);
3759 if (wret < 0)
3760 return wret;
3761 }
3762 l = path->nodes[0];
3763
3764 /* did the pushes work? */
3765 if (btrfs_leaf_free_space(root, l) >= data_size)
3766 return 0;
3767 }
3768
3769 if (!path->nodes[1]) {
3770 ret = insert_new_root(trans, root, path, 1);
3771 if (ret)
3772 return ret;
3773 }
3774again:
3775 split = 1;
3776 l = path->nodes[0];
3777 slot = path->slots[0];
3778 nritems = btrfs_header_nritems(l);
3779 mid = (nritems + 1) / 2;
3780
3781 if (mid <= slot) {
3782 if (nritems == 1 ||
3783 leaf_space_used(l, mid, nritems - mid) + data_size >
3784 BTRFS_LEAF_DATA_SIZE(root)) {
3785 if (slot >= nritems) {
3786 split = 0;
3787 } else {
3788 mid = slot;
3789 if (mid != nritems &&
3790 leaf_space_used(l, mid, nritems - mid) +
3791 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3792 if (data_size && !tried_avoid_double)
3793 goto push_for_double;
3794 split = 2;
3795 }
3796 }
3797 }
3798 } else {
3799 if (leaf_space_used(l, 0, mid) + data_size >
3800 BTRFS_LEAF_DATA_SIZE(root)) {
3801 if (!extend && data_size && slot == 0) {
3802 split = 0;
3803 } else if ((extend || !data_size) && slot == 0) {
3804 mid = 1;
3805 } else {
3806 mid = slot;
3807 if (mid != nritems &&
3808 leaf_space_used(l, mid, nritems - mid) +
3809 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3810 if (data_size && !tried_avoid_double)
3811 goto push_for_double;
3812 split = 2 ;
3813 }
3814 }
3815 }
3816 }
3817
3818 if (split == 0)
3819 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3820 else
3821 btrfs_item_key(l, &disk_key, mid);
3822
3823 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3824 root->root_key.objectid,
3825 &disk_key, 0, l->start, 0);
3826 if (IS_ERR(right))
3827 return PTR_ERR(right);
3828
3829 root_add_used(root, root->leafsize);
3830
3831 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3832 btrfs_set_header_bytenr(right, right->start);
3833 btrfs_set_header_generation(right, trans->transid);
3834 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3835 btrfs_set_header_owner(right, root->root_key.objectid);
3836 btrfs_set_header_level(right, 0);
3837 write_extent_buffer(right, root->fs_info->fsid,
3838 (unsigned long)btrfs_header_fsid(right),
3839 BTRFS_FSID_SIZE);
3840
3841 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3842 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3843 BTRFS_UUID_SIZE);
3844
3845 if (split == 0) {
3846 if (mid <= slot) {
3847 btrfs_set_header_nritems(right, 0);
3848 insert_ptr(trans, root, path, &disk_key, right->start,
3849 path->slots[1] + 1, 1);
3850 btrfs_tree_unlock(path->nodes[0]);
3851 free_extent_buffer(path->nodes[0]);
3852 path->nodes[0] = right;
3853 path->slots[0] = 0;
3854 path->slots[1] += 1;
3855 } else {
3856 btrfs_set_header_nritems(right, 0);
3857 insert_ptr(trans, root, path, &disk_key, right->start,
3858 path->slots[1], 1);
3859 btrfs_tree_unlock(path->nodes[0]);
3860 free_extent_buffer(path->nodes[0]);
3861 path->nodes[0] = right;
3862 path->slots[0] = 0;
3863 if (path->slots[1] == 0)
3864 fixup_low_keys(trans, root, path,
3865 &disk_key, 1);
3866 }
3867 btrfs_mark_buffer_dirty(right);
3868 return ret;
3869 }
3870
3871 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3872
3873 if (split == 2) {
3874 BUG_ON(num_doubles != 0);
3875 num_doubles++;
3876 goto again;
3877 }
3878
3879 return 0;
3880
3881push_for_double:
3882 push_for_double_split(trans, root, path, data_size);
3883 tried_avoid_double = 1;
3884 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3885 return 0;
3886 goto again;
3887}
3888
3889static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3890 struct btrfs_root *root,
3891 struct btrfs_path *path, int ins_len)
3892{
3893 struct btrfs_key key;
3894 struct extent_buffer *leaf;
3895 struct btrfs_file_extent_item *fi;
3896 u64 extent_len = 0;
3897 u32 item_size;
3898 int ret;
3899
3900 leaf = path->nodes[0];
3901 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3902
3903 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3904 key.type != BTRFS_EXTENT_CSUM_KEY);
3905
3906 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3907 return 0;
3908
3909 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3910 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3911 fi = btrfs_item_ptr(leaf, path->slots[0],
3912 struct btrfs_file_extent_item);
3913 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3914 }
3915 btrfs_release_path(path);
3916
3917 path->keep_locks = 1;
3918 path->search_for_split = 1;
3919 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3920 path->search_for_split = 0;
3921 if (ret < 0)
3922 goto err;
3923
3924 ret = -EAGAIN;
3925 leaf = path->nodes[0];
3926 /* if our item isn't there or got smaller, return now */
3927 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3928 goto err;
3929
3930 /* the leaf has changed, it now has room. return now */
3931 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3932 goto err;
3933
3934 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3935 fi = btrfs_item_ptr(leaf, path->slots[0],
3936 struct btrfs_file_extent_item);
3937 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3938 goto err;
3939 }
3940
3941 btrfs_set_path_blocking(path);
3942 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3943 if (ret)
3944 goto err;
3945
3946 path->keep_locks = 0;
3947 btrfs_unlock_up_safe(path, 1);
3948 return 0;
3949err:
3950 path->keep_locks = 0;
3951 return ret;
3952}
3953
3954static noinline int split_item(struct btrfs_trans_handle *trans,
3955 struct btrfs_root *root,
3956 struct btrfs_path *path,
3957 struct btrfs_key *new_key,
3958 unsigned long split_offset)
3959{
3960 struct extent_buffer *leaf;
3961 struct btrfs_item *item;
3962 struct btrfs_item *new_item;
3963 int slot;
3964 char *buf;
3965 u32 nritems;
3966 u32 item_size;
3967 u32 orig_offset;
3968 struct btrfs_disk_key disk_key;
3969
3970 leaf = path->nodes[0];
3971 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3972
3973 btrfs_set_path_blocking(path);
3974
3975 item = btrfs_item_nr(leaf, path->slots[0]);
3976 orig_offset = btrfs_item_offset(leaf, item);
3977 item_size = btrfs_item_size(leaf, item);
3978
3979 buf = kmalloc(item_size, GFP_NOFS);
3980 if (!buf)
3981 return -ENOMEM;
3982
3983 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3984 path->slots[0]), item_size);
3985
3986 slot = path->slots[0] + 1;
3987 nritems = btrfs_header_nritems(leaf);
3988 if (slot != nritems) {
3989 /* shift the items */
3990 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3991 btrfs_item_nr_offset(slot),
3992 (nritems - slot) * sizeof(struct btrfs_item));
3993 }
3994
3995 btrfs_cpu_key_to_disk(&disk_key, new_key);
3996 btrfs_set_item_key(leaf, &disk_key, slot);
3997
3998 new_item = btrfs_item_nr(leaf, slot);
3999
4000 btrfs_set_item_offset(leaf, new_item, orig_offset);
4001 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4002
4003 btrfs_set_item_offset(leaf, item,
4004 orig_offset + item_size - split_offset);
4005 btrfs_set_item_size(leaf, item, split_offset);
4006
4007 btrfs_set_header_nritems(leaf, nritems + 1);
4008
4009 /* write the data for the start of the original item */
4010 write_extent_buffer(leaf, buf,
4011 btrfs_item_ptr_offset(leaf, path->slots[0]),
4012 split_offset);
4013
4014 /* write the data for the new item */
4015 write_extent_buffer(leaf, buf + split_offset,
4016 btrfs_item_ptr_offset(leaf, slot),
4017 item_size - split_offset);
4018 btrfs_mark_buffer_dirty(leaf);
4019
4020 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4021 kfree(buf);
4022 return 0;
4023}
4024
4025/*
4026 * This function splits a single item into two items,
4027 * giving 'new_key' to the new item and splitting the
4028 * old one at split_offset (from the start of the item).
4029 *
4030 * The path may be released by this operation. After
4031 * the split, the path is pointing to the old item. The
4032 * new item is going to be in the same node as the old one.
4033 *
4034 * Note, the item being split must be smaller enough to live alone on
4035 * a tree block with room for one extra struct btrfs_item
4036 *
4037 * This allows us to split the item in place, keeping a lock on the
4038 * leaf the entire time.
4039 */
4040int btrfs_split_item(struct btrfs_trans_handle *trans,
4041 struct btrfs_root *root,
4042 struct btrfs_path *path,
4043 struct btrfs_key *new_key,
4044 unsigned long split_offset)
4045{
4046 int ret;
4047 ret = setup_leaf_for_split(trans, root, path,
4048 sizeof(struct btrfs_item));
4049 if (ret)
4050 return ret;
4051
4052 ret = split_item(trans, root, path, new_key, split_offset);
4053 return ret;
4054}
4055
4056/*
4057 * This function duplicate a item, giving 'new_key' to the new item.
4058 * It guarantees both items live in the same tree leaf and the new item
4059 * is contiguous with the original item.
4060 *
4061 * This allows us to split file extent in place, keeping a lock on the
4062 * leaf the entire time.
4063 */
4064int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4065 struct btrfs_root *root,
4066 struct btrfs_path *path,
4067 struct btrfs_key *new_key)
4068{
4069 struct extent_buffer *leaf;
4070 int ret;
4071 u32 item_size;
4072
4073 leaf = path->nodes[0];
4074 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4075 ret = setup_leaf_for_split(trans, root, path,
4076 item_size + sizeof(struct btrfs_item));
4077 if (ret)
4078 return ret;
4079
4080 path->slots[0]++;
4081 setup_items_for_insert(trans, root, path, new_key, &item_size,
4082 item_size, item_size +
4083 sizeof(struct btrfs_item), 1);
4084 leaf = path->nodes[0];
4085 memcpy_extent_buffer(leaf,
4086 btrfs_item_ptr_offset(leaf, path->slots[0]),
4087 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4088 item_size);
4089 return 0;
4090}
4091
4092/*
4093 * make the item pointed to by the path smaller. new_size indicates
4094 * how small to make it, and from_end tells us if we just chop bytes
4095 * off the end of the item or if we shift the item to chop bytes off
4096 * the front.
4097 */
4098void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4099 struct btrfs_root *root,
4100 struct btrfs_path *path,
4101 u32 new_size, int from_end)
4102{
4103 int slot;
4104 struct extent_buffer *leaf;
4105 struct btrfs_item *item;
4106 u32 nritems;
4107 unsigned int data_end;
4108 unsigned int old_data_start;
4109 unsigned int old_size;
4110 unsigned int size_diff;
4111 int i;
4112 struct btrfs_map_token token;
4113
4114 btrfs_init_map_token(&token);
4115
4116 leaf = path->nodes[0];
4117 slot = path->slots[0];
4118
4119 old_size = btrfs_item_size_nr(leaf, slot);
4120 if (old_size == new_size)
4121 return;
4122
4123 nritems = btrfs_header_nritems(leaf);
4124 data_end = leaf_data_end(root, leaf);
4125
4126 old_data_start = btrfs_item_offset_nr(leaf, slot);
4127
4128 size_diff = old_size - new_size;
4129
4130 BUG_ON(slot < 0);
4131 BUG_ON(slot >= nritems);
4132
4133 /*
4134 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4135 */
4136 /* first correct the data pointers */
4137 for (i = slot; i < nritems; i++) {
4138 u32 ioff;
4139 item = btrfs_item_nr(leaf, i);
4140
4141 ioff = btrfs_token_item_offset(leaf, item, &token);
4142 btrfs_set_token_item_offset(leaf, item,
4143 ioff + size_diff, &token);
4144 }
4145
4146 /* shift the data */
4147 if (from_end) {
4148 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4149 data_end + size_diff, btrfs_leaf_data(leaf) +
4150 data_end, old_data_start + new_size - data_end);
4151 } else {
4152 struct btrfs_disk_key disk_key;
4153 u64 offset;
4154
4155 btrfs_item_key(leaf, &disk_key, slot);
4156
4157 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4158 unsigned long ptr;
4159 struct btrfs_file_extent_item *fi;
4160
4161 fi = btrfs_item_ptr(leaf, slot,
4162 struct btrfs_file_extent_item);
4163 fi = (struct btrfs_file_extent_item *)(
4164 (unsigned long)fi - size_diff);
4165
4166 if (btrfs_file_extent_type(leaf, fi) ==
4167 BTRFS_FILE_EXTENT_INLINE) {
4168 ptr = btrfs_item_ptr_offset(leaf, slot);
4169 memmove_extent_buffer(leaf, ptr,
4170 (unsigned long)fi,
4171 offsetof(struct btrfs_file_extent_item,
4172 disk_bytenr));
4173 }
4174 }
4175
4176 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4177 data_end + size_diff, btrfs_leaf_data(leaf) +
4178 data_end, old_data_start - data_end);
4179
4180 offset = btrfs_disk_key_offset(&disk_key);
4181 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4182 btrfs_set_item_key(leaf, &disk_key, slot);
4183 if (slot == 0)
4184 fixup_low_keys(trans, root, path, &disk_key, 1);
4185 }
4186
4187 item = btrfs_item_nr(leaf, slot);
4188 btrfs_set_item_size(leaf, item, new_size);
4189 btrfs_mark_buffer_dirty(leaf);
4190
4191 if (btrfs_leaf_free_space(root, leaf) < 0) {
4192 btrfs_print_leaf(root, leaf);
4193 BUG();
4194 }
4195}
4196
4197/*
4198 * make the item pointed to by the path bigger, data_size is the new size.
4199 */
4200void btrfs_extend_item(struct btrfs_trans_handle *trans,
4201 struct btrfs_root *root, struct btrfs_path *path,
4202 u32 data_size)
4203{
4204 int slot;
4205 struct extent_buffer *leaf;
4206 struct btrfs_item *item;
4207 u32 nritems;
4208 unsigned int data_end;
4209 unsigned int old_data;
4210 unsigned int old_size;
4211 int i;
4212 struct btrfs_map_token token;
4213
4214 btrfs_init_map_token(&token);
4215
4216 leaf = path->nodes[0];
4217
4218 nritems = btrfs_header_nritems(leaf);
4219 data_end = leaf_data_end(root, leaf);
4220
4221 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4222 btrfs_print_leaf(root, leaf);
4223 BUG();
4224 }
4225 slot = path->slots[0];
4226 old_data = btrfs_item_end_nr(leaf, slot);
4227
4228 BUG_ON(slot < 0);
4229 if (slot >= nritems) {
4230 btrfs_print_leaf(root, leaf);
4231 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4232 slot, nritems);
4233 BUG_ON(1);
4234 }
4235
4236 /*
4237 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4238 */
4239 /* first correct the data pointers */
4240 for (i = slot; i < nritems; i++) {
4241 u32 ioff;
4242 item = btrfs_item_nr(leaf, i);
4243
4244 ioff = btrfs_token_item_offset(leaf, item, &token);
4245 btrfs_set_token_item_offset(leaf, item,
4246 ioff - data_size, &token);
4247 }
4248
4249 /* shift the data */
4250 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4251 data_end - data_size, btrfs_leaf_data(leaf) +
4252 data_end, old_data - data_end);
4253
4254 data_end = old_data;
4255 old_size = btrfs_item_size_nr(leaf, slot);
4256 item = btrfs_item_nr(leaf, slot);
4257 btrfs_set_item_size(leaf, item, old_size + data_size);
4258 btrfs_mark_buffer_dirty(leaf);
4259
4260 if (btrfs_leaf_free_space(root, leaf) < 0) {
4261 btrfs_print_leaf(root, leaf);
4262 BUG();
4263 }
4264}
4265
4266/*
4267 * Given a key and some data, insert items into the tree.
4268 * This does all the path init required, making room in the tree if needed.
4269 * Returns the number of keys that were inserted.
4270 */
4271int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
4272 struct btrfs_root *root,
4273 struct btrfs_path *path,
4274 struct btrfs_key *cpu_key, u32 *data_size,
4275 int nr)
4276{
4277 struct extent_buffer *leaf;
4278 struct btrfs_item *item;
4279 int ret = 0;
4280 int slot;
4281 int i;
4282 u32 nritems;
4283 u32 total_data = 0;
4284 u32 total_size = 0;
4285 unsigned int data_end;
4286 struct btrfs_disk_key disk_key;
4287 struct btrfs_key found_key;
4288 struct btrfs_map_token token;
4289
4290 btrfs_init_map_token(&token);
4291
4292 for (i = 0; i < nr; i++) {
4293 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
4294 BTRFS_LEAF_DATA_SIZE(root)) {
4295 break;
4296 nr = i;
4297 }
4298 total_data += data_size[i];
4299 total_size += data_size[i] + sizeof(struct btrfs_item);
4300 }
4301 BUG_ON(nr == 0);
4302
4303 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4304 if (ret == 0)
4305 return -EEXIST;
4306 if (ret < 0)
4307 goto out;
4308
4309 leaf = path->nodes[0];
4310
4311 nritems = btrfs_header_nritems(leaf);
4312 data_end = leaf_data_end(root, leaf);
4313
4314 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4315 for (i = nr; i >= 0; i--) {
4316 total_data -= data_size[i];
4317 total_size -= data_size[i] + sizeof(struct btrfs_item);
4318 if (total_size < btrfs_leaf_free_space(root, leaf))
4319 break;
4320 }
4321 nr = i;
4322 }
4323
4324 slot = path->slots[0];
4325 BUG_ON(slot < 0);
4326
4327 if (slot != nritems) {
4328 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4329
4330 item = btrfs_item_nr(leaf, slot);
4331 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4332
4333 /* figure out how many keys we can insert in here */
4334 total_data = data_size[0];
4335 for (i = 1; i < nr; i++) {
4336 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
4337 break;
4338 total_data += data_size[i];
4339 }
4340 nr = i;
4341
4342 if (old_data < data_end) {
4343 btrfs_print_leaf(root, leaf);
4344 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4345 slot, old_data, data_end);
4346 BUG_ON(1);
4347 }
4348 /*
4349 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4350 */
4351 /* first correct the data pointers */
4352 for (i = slot; i < nritems; i++) {
4353 u32 ioff;
4354
4355 item = btrfs_item_nr(leaf, i);
4356 ioff = btrfs_token_item_offset(leaf, item, &token);
4357 btrfs_set_token_item_offset(leaf, item,
4358 ioff - total_data, &token);
4359 }
4360 /* shift the items */
4361 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4362 btrfs_item_nr_offset(slot),
4363 (nritems - slot) * sizeof(struct btrfs_item));
4364
4365 /* shift the data */
4366 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4367 data_end - total_data, btrfs_leaf_data(leaf) +
4368 data_end, old_data - data_end);
4369 data_end = old_data;
4370 } else {
4371 /*
4372 * this sucks but it has to be done, if we are inserting at
4373 * the end of the leaf only insert 1 of the items, since we
4374 * have no way of knowing whats on the next leaf and we'd have
4375 * to drop our current locks to figure it out
4376 */
4377 nr = 1;
4378 }
4379
4380 /* setup the item for the new data */
4381 for (i = 0; i < nr; i++) {
4382 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4383 btrfs_set_item_key(leaf, &disk_key, slot + i);
4384 item = btrfs_item_nr(leaf, slot + i);
4385 btrfs_set_token_item_offset(leaf, item,
4386 data_end - data_size[i], &token);
4387 data_end -= data_size[i];
4388 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4389 }
4390 btrfs_set_header_nritems(leaf, nritems + nr);
4391 btrfs_mark_buffer_dirty(leaf);
4392
4393 ret = 0;
4394 if (slot == 0) {
4395 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4396 fixup_low_keys(trans, root, path, &disk_key, 1);
4397 }
4398
4399 if (btrfs_leaf_free_space(root, leaf) < 0) {
4400 btrfs_print_leaf(root, leaf);
4401 BUG();
4402 }
4403out:
4404 if (!ret)
4405 ret = nr;
4406 return ret;
4407}
4408
4409/*
4410 * this is a helper for btrfs_insert_empty_items, the main goal here is
4411 * to save stack depth by doing the bulk of the work in a function
4412 * that doesn't call btrfs_search_slot
4413 */
4414void setup_items_for_insert(struct btrfs_trans_handle *trans,
4415 struct btrfs_root *root, struct btrfs_path *path,
4416 struct btrfs_key *cpu_key, u32 *data_size,
4417 u32 total_data, u32 total_size, int nr)
4418{
4419 struct btrfs_item *item;
4420 int i;
4421 u32 nritems;
4422 unsigned int data_end;
4423 struct btrfs_disk_key disk_key;
4424 struct extent_buffer *leaf;
4425 int slot;
4426 struct btrfs_map_token token;
4427
4428 btrfs_init_map_token(&token);
4429
4430 leaf = path->nodes[0];
4431 slot = path->slots[0];
4432
4433 nritems = btrfs_header_nritems(leaf);
4434 data_end = leaf_data_end(root, leaf);
4435
4436 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4437 btrfs_print_leaf(root, leaf);
4438 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4439 total_size, btrfs_leaf_free_space(root, leaf));
4440 BUG();
4441 }
4442
4443 if (slot != nritems) {
4444 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4445
4446 if (old_data < data_end) {
4447 btrfs_print_leaf(root, leaf);
4448 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4449 slot, old_data, data_end);
4450 BUG_ON(1);
4451 }
4452 /*
4453 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4454 */
4455 /* first correct the data pointers */
4456 for (i = slot; i < nritems; i++) {
4457 u32 ioff;
4458
4459 item = btrfs_item_nr(leaf, i);
4460 ioff = btrfs_token_item_offset(leaf, item, &token);
4461 btrfs_set_token_item_offset(leaf, item,
4462 ioff - total_data, &token);
4463 }
4464 /* shift the items */
4465 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4466 btrfs_item_nr_offset(slot),
4467 (nritems - slot) * sizeof(struct btrfs_item));
4468
4469 /* shift the data */
4470 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4471 data_end - total_data, btrfs_leaf_data(leaf) +
4472 data_end, old_data - data_end);
4473 data_end = old_data;
4474 }
4475
4476 /* setup the item for the new data */
4477 for (i = 0; i < nr; i++) {
4478 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4479 btrfs_set_item_key(leaf, &disk_key, slot + i);
4480 item = btrfs_item_nr(leaf, slot + i);
4481 btrfs_set_token_item_offset(leaf, item,
4482 data_end - data_size[i], &token);
4483 data_end -= data_size[i];
4484 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4485 }
4486
4487 btrfs_set_header_nritems(leaf, nritems + nr);
4488
4489 if (slot == 0) {
4490 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4491 fixup_low_keys(trans, root, path, &disk_key, 1);
4492 }
4493 btrfs_unlock_up_safe(path, 1);
4494 btrfs_mark_buffer_dirty(leaf);
4495
4496 if (btrfs_leaf_free_space(root, leaf) < 0) {
4497 btrfs_print_leaf(root, leaf);
4498 BUG();
4499 }
4500}
4501
4502/*
4503 * Given a key and some data, insert items into the tree.
4504 * This does all the path init required, making room in the tree if needed.
4505 */
4506int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4507 struct btrfs_root *root,
4508 struct btrfs_path *path,
4509 struct btrfs_key *cpu_key, u32 *data_size,
4510 int nr)
4511{
4512 int ret = 0;
4513 int slot;
4514 int i;
4515 u32 total_size = 0;
4516 u32 total_data = 0;
4517
4518 for (i = 0; i < nr; i++)
4519 total_data += data_size[i];
4520
4521 total_size = total_data + (nr * sizeof(struct btrfs_item));
4522 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4523 if (ret == 0)
4524 return -EEXIST;
4525 if (ret < 0)
4526 return ret;
4527
4528 slot = path->slots[0];
4529 BUG_ON(slot < 0);
4530
4531 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4532 total_data, total_size, nr);
4533 return 0;
4534}
4535
4536/*
4537 * Given a key and some data, insert an item into the tree.
4538 * This does all the path init required, making room in the tree if needed.
4539 */
4540int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4541 *root, struct btrfs_key *cpu_key, void *data, u32
4542 data_size)
4543{
4544 int ret = 0;
4545 struct btrfs_path *path;
4546 struct extent_buffer *leaf;
4547 unsigned long ptr;
4548
4549 path = btrfs_alloc_path();
4550 if (!path)
4551 return -ENOMEM;
4552 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4553 if (!ret) {
4554 leaf = path->nodes[0];
4555 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4556 write_extent_buffer(leaf, data, ptr, data_size);
4557 btrfs_mark_buffer_dirty(leaf);
4558 }
4559 btrfs_free_path(path);
4560 return ret;
4561}
4562
4563/*
4564 * delete the pointer from a given node.
4565 *
4566 * the tree should have been previously balanced so the deletion does not
4567 * empty a node.
4568 */
4569static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4570 struct btrfs_path *path, int level, int slot,
4571 int tree_mod_log)
4572{
4573 struct extent_buffer *parent = path->nodes[level];
4574 u32 nritems;
4575 int ret;
4576
4577 nritems = btrfs_header_nritems(parent);
4578 if (slot != nritems - 1) {
4579 if (tree_mod_log && level)
4580 tree_mod_log_eb_move(root->fs_info, parent, slot,
4581 slot + 1, nritems - slot - 1);
4582 memmove_extent_buffer(parent,
4583 btrfs_node_key_ptr_offset(slot),
4584 btrfs_node_key_ptr_offset(slot + 1),
4585 sizeof(struct btrfs_key_ptr) *
4586 (nritems - slot - 1));
4587 } else if (tree_mod_log && level) {
4588 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4589 MOD_LOG_KEY_REMOVE);
4590 BUG_ON(ret < 0);
4591 }
4592
4593 nritems--;
4594 btrfs_set_header_nritems(parent, nritems);
4595 if (nritems == 0 && parent == root->node) {
4596 BUG_ON(btrfs_header_level(root->node) != 1);
4597 /* just turn the root into a leaf and break */
4598 btrfs_set_header_level(root->node, 0);
4599 } else if (slot == 0) {
4600 struct btrfs_disk_key disk_key;
4601
4602 btrfs_node_key(parent, &disk_key, 0);
4603 fixup_low_keys(trans, root, path, &disk_key, level + 1);
4604 }
4605 btrfs_mark_buffer_dirty(parent);
4606}
4607
4608/*
4609 * a helper function to delete the leaf pointed to by path->slots[1] and
4610 * path->nodes[1].
4611 *
4612 * This deletes the pointer in path->nodes[1] and frees the leaf
4613 * block extent. zero is returned if it all worked out, < 0 otherwise.
4614 *
4615 * The path must have already been setup for deleting the leaf, including
4616 * all the proper balancing. path->nodes[1] must be locked.
4617 */
4618static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4619 struct btrfs_root *root,
4620 struct btrfs_path *path,
4621 struct extent_buffer *leaf)
4622{
4623 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4624 del_ptr(trans, root, path, 1, path->slots[1], 1);
4625
4626 /*
4627 * btrfs_free_extent is expensive, we want to make sure we
4628 * aren't holding any locks when we call it
4629 */
4630 btrfs_unlock_up_safe(path, 0);
4631
4632 root_sub_used(root, leaf->len);
4633
4634 extent_buffer_get(leaf);
4635 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4636 free_extent_buffer_stale(leaf);
4637}
4638/*
4639 * delete the item at the leaf level in path. If that empties
4640 * the leaf, remove it from the tree
4641 */
4642int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4643 struct btrfs_path *path, int slot, int nr)
4644{
4645 struct extent_buffer *leaf;
4646 struct btrfs_item *item;
4647 int last_off;
4648 int dsize = 0;
4649 int ret = 0;
4650 int wret;
4651 int i;
4652 u32 nritems;
4653 struct btrfs_map_token token;
4654
4655 btrfs_init_map_token(&token);
4656
4657 leaf = path->nodes[0];
4658 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4659
4660 for (i = 0; i < nr; i++)
4661 dsize += btrfs_item_size_nr(leaf, slot + i);
4662
4663 nritems = btrfs_header_nritems(leaf);
4664
4665 if (slot + nr != nritems) {
4666 int data_end = leaf_data_end(root, leaf);
4667
4668 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4669 data_end + dsize,
4670 btrfs_leaf_data(leaf) + data_end,
4671 last_off - data_end);
4672
4673 for (i = slot + nr; i < nritems; i++) {
4674 u32 ioff;
4675
4676 item = btrfs_item_nr(leaf, i);
4677 ioff = btrfs_token_item_offset(leaf, item, &token);
4678 btrfs_set_token_item_offset(leaf, item,
4679 ioff + dsize, &token);
4680 }
4681
4682 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4683 btrfs_item_nr_offset(slot + nr),
4684 sizeof(struct btrfs_item) *
4685 (nritems - slot - nr));
4686 }
4687 btrfs_set_header_nritems(leaf, nritems - nr);
4688 nritems -= nr;
4689
4690 /* delete the leaf if we've emptied it */
4691 if (nritems == 0) {
4692 if (leaf == root->node) {
4693 btrfs_set_header_level(leaf, 0);
4694 } else {
4695 btrfs_set_path_blocking(path);
4696 clean_tree_block(trans, root, leaf);
4697 btrfs_del_leaf(trans, root, path, leaf);
4698 }
4699 } else {
4700 int used = leaf_space_used(leaf, 0, nritems);
4701 if (slot == 0) {
4702 struct btrfs_disk_key disk_key;
4703
4704 btrfs_item_key(leaf, &disk_key, 0);
4705 fixup_low_keys(trans, root, path, &disk_key, 1);
4706 }
4707
4708 /* delete the leaf if it is mostly empty */
4709 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4710 /* push_leaf_left fixes the path.
4711 * make sure the path still points to our leaf
4712 * for possible call to del_ptr below
4713 */
4714 slot = path->slots[1];
4715 extent_buffer_get(leaf);
4716
4717 btrfs_set_path_blocking(path);
4718 wret = push_leaf_left(trans, root, path, 1, 1,
4719 1, (u32)-1);
4720 if (wret < 0 && wret != -ENOSPC)
4721 ret = wret;
4722
4723 if (path->nodes[0] == leaf &&
4724 btrfs_header_nritems(leaf)) {
4725 wret = push_leaf_right(trans, root, path, 1,
4726 1, 1, 0);
4727 if (wret < 0 && wret != -ENOSPC)
4728 ret = wret;
4729 }
4730
4731 if (btrfs_header_nritems(leaf) == 0) {
4732 path->slots[1] = slot;
4733 btrfs_del_leaf(trans, root, path, leaf);
4734 free_extent_buffer(leaf);
4735 ret = 0;
4736 } else {
4737 /* if we're still in the path, make sure
4738 * we're dirty. Otherwise, one of the
4739 * push_leaf functions must have already
4740 * dirtied this buffer
4741 */
4742 if (path->nodes[0] == leaf)
4743 btrfs_mark_buffer_dirty(leaf);
4744 free_extent_buffer(leaf);
4745 }
4746 } else {
4747 btrfs_mark_buffer_dirty(leaf);
4748 }
4749 }
4750 return ret;
4751}
4752
4753/*
4754 * search the tree again to find a leaf with lesser keys
4755 * returns 0 if it found something or 1 if there are no lesser leaves.
4756 * returns < 0 on io errors.
4757 *
4758 * This may release the path, and so you may lose any locks held at the
4759 * time you call it.
4760 */
4761int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4762{
4763 struct btrfs_key key;
4764 struct btrfs_disk_key found_key;
4765 int ret;
4766
4767 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4768
4769 if (key.offset > 0)
4770 key.offset--;
4771 else if (key.type > 0)
4772 key.type--;
4773 else if (key.objectid > 0)
4774 key.objectid--;
4775 else
4776 return 1;
4777
4778 btrfs_release_path(path);
4779 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4780 if (ret < 0)
4781 return ret;
4782 btrfs_item_key(path->nodes[0], &found_key, 0);
4783 ret = comp_keys(&found_key, &key);
4784 if (ret < 0)
4785 return 0;
4786 return 1;
4787}
4788
4789/*
4790 * A helper function to walk down the tree starting at min_key, and looking
4791 * for nodes or leaves that are either in cache or have a minimum
4792 * transaction id. This is used by the btree defrag code, and tree logging
4793 *
4794 * This does not cow, but it does stuff the starting key it finds back
4795 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4796 * key and get a writable path.
4797 *
4798 * This does lock as it descends, and path->keep_locks should be set
4799 * to 1 by the caller.
4800 *
4801 * This honors path->lowest_level to prevent descent past a given level
4802 * of the tree.
4803 *
4804 * min_trans indicates the oldest transaction that you are interested
4805 * in walking through. Any nodes or leaves older than min_trans are
4806 * skipped over (without reading them).
4807 *
4808 * returns zero if something useful was found, < 0 on error and 1 if there
4809 * was nothing in the tree that matched the search criteria.
4810 */
4811int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4812 struct btrfs_key *max_key,
4813 struct btrfs_path *path, int cache_only,
4814 u64 min_trans)
4815{
4816 struct extent_buffer *cur;
4817 struct btrfs_key found_key;
4818 int slot;
4819 int sret;
4820 u32 nritems;
4821 int level;
4822 int ret = 1;
4823
4824 WARN_ON(!path->keep_locks);
4825again:
4826 cur = btrfs_read_lock_root_node(root);
4827 level = btrfs_header_level(cur);
4828 WARN_ON(path->nodes[level]);
4829 path->nodes[level] = cur;
4830 path->locks[level] = BTRFS_READ_LOCK;
4831
4832 if (btrfs_header_generation(cur) < min_trans) {
4833 ret = 1;
4834 goto out;
4835 }
4836 while (1) {
4837 nritems = btrfs_header_nritems(cur);
4838 level = btrfs_header_level(cur);
4839 sret = bin_search(cur, min_key, level, &slot);
4840
4841 /* at the lowest level, we're done, setup the path and exit */
4842 if (level == path->lowest_level) {
4843 if (slot >= nritems)
4844 goto find_next_key;
4845 ret = 0;
4846 path->slots[level] = slot;
4847 btrfs_item_key_to_cpu(cur, &found_key, slot);
4848 goto out;
4849 }
4850 if (sret && slot > 0)
4851 slot--;
4852 /*
4853 * check this node pointer against the cache_only and
4854 * min_trans parameters. If it isn't in cache or is too
4855 * old, skip to the next one.
4856 */
4857 while (slot < nritems) {
4858 u64 blockptr;
4859 u64 gen;
4860 struct extent_buffer *tmp;
4861 struct btrfs_disk_key disk_key;
4862
4863 blockptr = btrfs_node_blockptr(cur, slot);
4864 gen = btrfs_node_ptr_generation(cur, slot);
4865 if (gen < min_trans) {
4866 slot++;
4867 continue;
4868 }
4869 if (!cache_only)
4870 break;
4871
4872 if (max_key) {
4873 btrfs_node_key(cur, &disk_key, slot);
4874 if (comp_keys(&disk_key, max_key) >= 0) {
4875 ret = 1;
4876 goto out;
4877 }
4878 }
4879
4880 tmp = btrfs_find_tree_block(root, blockptr,
4881 btrfs_level_size(root, level - 1));
4882
4883 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4884 free_extent_buffer(tmp);
4885 break;
4886 }
4887 if (tmp)
4888 free_extent_buffer(tmp);
4889 slot++;
4890 }
4891find_next_key:
4892 /*
4893 * we didn't find a candidate key in this node, walk forward
4894 * and find another one
4895 */
4896 if (slot >= nritems) {
4897 path->slots[level] = slot;
4898 btrfs_set_path_blocking(path);
4899 sret = btrfs_find_next_key(root, path, min_key, level,
4900 cache_only, min_trans);
4901 if (sret == 0) {
4902 btrfs_release_path(path);
4903 goto again;
4904 } else {
4905 goto out;
4906 }
4907 }
4908 /* save our key for returning back */
4909 btrfs_node_key_to_cpu(cur, &found_key, slot);
4910 path->slots[level] = slot;
4911 if (level == path->lowest_level) {
4912 ret = 0;
4913 unlock_up(path, level, 1, 0, NULL);
4914 goto out;
4915 }
4916 btrfs_set_path_blocking(path);
4917 cur = read_node_slot(root, cur, slot);
4918 BUG_ON(!cur); /* -ENOMEM */
4919
4920 btrfs_tree_read_lock(cur);
4921
4922 path->locks[level - 1] = BTRFS_READ_LOCK;
4923 path->nodes[level - 1] = cur;
4924 unlock_up(path, level, 1, 0, NULL);
4925 btrfs_clear_path_blocking(path, NULL, 0);
4926 }
4927out:
4928 if (ret == 0)
4929 memcpy(min_key, &found_key, sizeof(found_key));
4930 btrfs_set_path_blocking(path);
4931 return ret;
4932}
4933
4934/*
4935 * this is similar to btrfs_next_leaf, but does not try to preserve
4936 * and fixup the path. It looks for and returns the next key in the
4937 * tree based on the current path and the cache_only and min_trans
4938 * parameters.
4939 *
4940 * 0 is returned if another key is found, < 0 if there are any errors
4941 * and 1 is returned if there are no higher keys in the tree
4942 *
4943 * path->keep_locks should be set to 1 on the search made before
4944 * calling this function.
4945 */
4946int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4947 struct btrfs_key *key, int level,
4948 int cache_only, u64 min_trans)
4949{
4950 int slot;
4951 struct extent_buffer *c;
4952
4953 WARN_ON(!path->keep_locks);
4954 while (level < BTRFS_MAX_LEVEL) {
4955 if (!path->nodes[level])
4956 return 1;
4957
4958 slot = path->slots[level] + 1;
4959 c = path->nodes[level];
4960next:
4961 if (slot >= btrfs_header_nritems(c)) {
4962 int ret;
4963 int orig_lowest;
4964 struct btrfs_key cur_key;
4965 if (level + 1 >= BTRFS_MAX_LEVEL ||
4966 !path->nodes[level + 1])
4967 return 1;
4968
4969 if (path->locks[level + 1]) {
4970 level++;
4971 continue;
4972 }
4973
4974 slot = btrfs_header_nritems(c) - 1;
4975 if (level == 0)
4976 btrfs_item_key_to_cpu(c, &cur_key, slot);
4977 else
4978 btrfs_node_key_to_cpu(c, &cur_key, slot);
4979
4980 orig_lowest = path->lowest_level;
4981 btrfs_release_path(path);
4982 path->lowest_level = level;
4983 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4984 0, 0);
4985 path->lowest_level = orig_lowest;
4986 if (ret < 0)
4987 return ret;
4988
4989 c = path->nodes[level];
4990 slot = path->slots[level];
4991 if (ret == 0)
4992 slot++;
4993 goto next;
4994 }
4995
4996 if (level == 0)
4997 btrfs_item_key_to_cpu(c, key, slot);
4998 else {
4999 u64 blockptr = btrfs_node_blockptr(c, slot);
5000 u64 gen = btrfs_node_ptr_generation(c, slot);
5001
5002 if (cache_only) {
5003 struct extent_buffer *cur;
5004 cur = btrfs_find_tree_block(root, blockptr,
5005 btrfs_level_size(root, level - 1));
5006 if (!cur ||
5007 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
5008 slot++;
5009 if (cur)
5010 free_extent_buffer(cur);
5011 goto next;
5012 }
5013 free_extent_buffer(cur);
5014 }
5015 if (gen < min_trans) {
5016 slot++;
5017 goto next;
5018 }
5019 btrfs_node_key_to_cpu(c, key, slot);
5020 }
5021 return 0;
5022 }
5023 return 1;
5024}
5025
5026/*
5027 * search the tree again to find a leaf with greater keys
5028 * returns 0 if it found something or 1 if there are no greater leaves.
5029 * returns < 0 on io errors.
5030 */
5031int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5032{
5033 return btrfs_next_old_leaf(root, path, 0);
5034}
5035
5036int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5037 u64 time_seq)
5038{
5039 int slot;
5040 int level;
5041 struct extent_buffer *c;
5042 struct extent_buffer *next;
5043 struct btrfs_key key;
5044 u32 nritems;
5045 int ret;
5046 int old_spinning = path->leave_spinning;
5047 int next_rw_lock = 0;
5048
5049 nritems = btrfs_header_nritems(path->nodes[0]);
5050 if (nritems == 0)
5051 return 1;
5052
5053 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5054again:
5055 level = 1;
5056 next = NULL;
5057 next_rw_lock = 0;
5058 btrfs_release_path(path);
5059
5060 path->keep_locks = 1;
5061 path->leave_spinning = 1;
5062
5063 if (time_seq)
5064 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5065 else
5066 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5067 path->keep_locks = 0;
5068
5069 if (ret < 0)
5070 return ret;
5071
5072 nritems = btrfs_header_nritems(path->nodes[0]);
5073 /*
5074 * by releasing the path above we dropped all our locks. A balance
5075 * could have added more items next to the key that used to be
5076 * at the very end of the block. So, check again here and
5077 * advance the path if there are now more items available.
5078 */
5079 if (nritems > 0 && path->slots[0] < nritems - 1) {
5080 if (ret == 0)
5081 path->slots[0]++;
5082 ret = 0;
5083 goto done;
5084 }
5085
5086 while (level < BTRFS_MAX_LEVEL) {
5087 if (!path->nodes[level]) {
5088 ret = 1;
5089 goto done;
5090 }
5091
5092 slot = path->slots[level] + 1;
5093 c = path->nodes[level];
5094 if (slot >= btrfs_header_nritems(c)) {
5095 level++;
5096 if (level == BTRFS_MAX_LEVEL) {
5097 ret = 1;
5098 goto done;
5099 }
5100 continue;
5101 }
5102
5103 if (next) {
5104 btrfs_tree_unlock_rw(next, next_rw_lock);
5105 free_extent_buffer(next);
5106 }
5107
5108 next = c;
5109 next_rw_lock = path->locks[level];
5110 ret = read_block_for_search(NULL, root, path, &next, level,
5111 slot, &key, 0);
5112 if (ret == -EAGAIN)
5113 goto again;
5114
5115 if (ret < 0) {
5116 btrfs_release_path(path);
5117 goto done;
5118 }
5119
5120 if (!path->skip_locking) {
5121 ret = btrfs_try_tree_read_lock(next);
5122 if (!ret && time_seq) {
5123 /*
5124 * If we don't get the lock, we may be racing
5125 * with push_leaf_left, holding that lock while
5126 * itself waiting for the leaf we've currently
5127 * locked. To solve this situation, we give up
5128 * on our lock and cycle.
5129 */
5130 btrfs_release_path(path);
5131 cond_resched();
5132 goto again;
5133 }
5134 if (!ret) {
5135 btrfs_set_path_blocking(path);
5136 btrfs_tree_read_lock(next);
5137 btrfs_clear_path_blocking(path, next,
5138 BTRFS_READ_LOCK);
5139 }
5140 next_rw_lock = BTRFS_READ_LOCK;
5141 }
5142 break;
5143 }
5144 path->slots[level] = slot;
5145 while (1) {
5146 level--;
5147 c = path->nodes[level];
5148 if (path->locks[level])
5149 btrfs_tree_unlock_rw(c, path->locks[level]);
5150
5151 free_extent_buffer(c);
5152 path->nodes[level] = next;
5153 path->slots[level] = 0;
5154 if (!path->skip_locking)
5155 path->locks[level] = next_rw_lock;
5156 if (!level)
5157 break;
5158
5159 ret = read_block_for_search(NULL, root, path, &next, level,
5160 0, &key, 0);
5161 if (ret == -EAGAIN)
5162 goto again;
5163
5164 if (ret < 0) {
5165 btrfs_release_path(path);
5166 goto done;
5167 }
5168
5169 if (!path->skip_locking) {
5170 ret = btrfs_try_tree_read_lock(next);
5171 if (!ret) {
5172 btrfs_set_path_blocking(path);
5173 btrfs_tree_read_lock(next);
5174 btrfs_clear_path_blocking(path, next,
5175 BTRFS_READ_LOCK);
5176 }
5177 next_rw_lock = BTRFS_READ_LOCK;
5178 }
5179 }
5180 ret = 0;
5181done:
5182 unlock_up(path, 0, 1, 0, NULL);
5183 path->leave_spinning = old_spinning;
5184 if (!old_spinning)
5185 btrfs_set_path_blocking(path);
5186
5187 return ret;
5188}
5189
5190/*
5191 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5192 * searching until it gets past min_objectid or finds an item of 'type'
5193 *
5194 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5195 */
5196int btrfs_previous_item(struct btrfs_root *root,
5197 struct btrfs_path *path, u64 min_objectid,
5198 int type)
5199{
5200 struct btrfs_key found_key;
5201 struct extent_buffer *leaf;
5202 u32 nritems;
5203 int ret;
5204
5205 while (1) {
5206 if (path->slots[0] == 0) {
5207 btrfs_set_path_blocking(path);
5208 ret = btrfs_prev_leaf(root, path);
5209 if (ret != 0)
5210 return ret;
5211 } else {
5212 path->slots[0]--;
5213 }
5214 leaf = path->nodes[0];
5215 nritems = btrfs_header_nritems(leaf);
5216 if (nritems == 0)
5217 return 1;
5218 if (path->slots[0] == nritems)
5219 path->slots[0]--;
5220
5221 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5222 if (found_key.objectid < min_objectid)
5223 break;
5224 if (found_key.type == type)
5225 return 0;
5226 if (found_key.objectid == min_objectid &&
5227 found_key.type < type)
5228 break;
5229 }
5230 return 1;
5231}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/rbtree.h>
9#include <linux/mm.h>
10#include <linux/error-injection.h>
11#include "messages.h"
12#include "ctree.h"
13#include "disk-io.h"
14#include "transaction.h"
15#include "print-tree.h"
16#include "locking.h"
17#include "volumes.h"
18#include "qgroup.h"
19#include "tree-mod-log.h"
20#include "tree-checker.h"
21#include "fs.h"
22#include "accessors.h"
23#include "extent-tree.h"
24#include "relocation.h"
25#include "file-item.h"
26
27static struct kmem_cache *btrfs_path_cachep;
28
29static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34static int push_node_left(struct btrfs_trans_handle *trans,
35 struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42
43static const struct btrfs_csums {
44 u16 size;
45 const char name[10];
46 const char driver[12];
47} btrfs_csums[] = {
48 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
49 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
50 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
51 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
52 .driver = "blake2b-256" },
53};
54
55/*
56 * The leaf data grows from end-to-front in the node. this returns the address
57 * of the start of the last item, which is the stop of the leaf data stack.
58 */
59static unsigned int leaf_data_end(const struct extent_buffer *leaf)
60{
61 u32 nr = btrfs_header_nritems(leaf);
62
63 if (nr == 0)
64 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
65 return btrfs_item_offset(leaf, nr - 1);
66}
67
68/*
69 * Move data in a @leaf (using memmove, safe for overlapping ranges).
70 *
71 * @leaf: leaf that we're doing a memmove on
72 * @dst_offset: item data offset we're moving to
73 * @src_offset: item data offset were' moving from
74 * @len: length of the data we're moving
75 *
76 * Wrapper around memmove_extent_buffer() that takes into account the header on
77 * the leaf. The btrfs_item offset's start directly after the header, so we
78 * have to adjust any offsets to account for the header in the leaf. This
79 * handles that math to simplify the callers.
80 */
81static inline void memmove_leaf_data(const struct extent_buffer *leaf,
82 unsigned long dst_offset,
83 unsigned long src_offset,
84 unsigned long len)
85{
86 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
87 btrfs_item_nr_offset(leaf, 0) + src_offset, len);
88}
89
90/*
91 * Copy item data from @src into @dst at the given @offset.
92 *
93 * @dst: destination leaf that we're copying into
94 * @src: source leaf that we're copying from
95 * @dst_offset: item data offset we're copying to
96 * @src_offset: item data offset were' copying from
97 * @len: length of the data we're copying
98 *
99 * Wrapper around copy_extent_buffer() that takes into account the header on
100 * the leaf. The btrfs_item offset's start directly after the header, so we
101 * have to adjust any offsets to account for the header in the leaf. This
102 * handles that math to simplify the callers.
103 */
104static inline void copy_leaf_data(const struct extent_buffer *dst,
105 const struct extent_buffer *src,
106 unsigned long dst_offset,
107 unsigned long src_offset, unsigned long len)
108{
109 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
110 btrfs_item_nr_offset(src, 0) + src_offset, len);
111}
112
113/*
114 * Move items in a @leaf (using memmove).
115 *
116 * @dst: destination leaf for the items
117 * @dst_item: the item nr we're copying into
118 * @src_item: the item nr we're copying from
119 * @nr_items: the number of items to copy
120 *
121 * Wrapper around memmove_extent_buffer() that does the math to get the
122 * appropriate offsets into the leaf from the item numbers.
123 */
124static inline void memmove_leaf_items(const struct extent_buffer *leaf,
125 int dst_item, int src_item, int nr_items)
126{
127 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
128 btrfs_item_nr_offset(leaf, src_item),
129 nr_items * sizeof(struct btrfs_item));
130}
131
132/*
133 * Copy items from @src into @dst at the given @offset.
134 *
135 * @dst: destination leaf for the items
136 * @src: source leaf for the items
137 * @dst_item: the item nr we're copying into
138 * @src_item: the item nr we're copying from
139 * @nr_items: the number of items to copy
140 *
141 * Wrapper around copy_extent_buffer() that does the math to get the
142 * appropriate offsets into the leaf from the item numbers.
143 */
144static inline void copy_leaf_items(const struct extent_buffer *dst,
145 const struct extent_buffer *src,
146 int dst_item, int src_item, int nr_items)
147{
148 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
149 btrfs_item_nr_offset(src, src_item),
150 nr_items * sizeof(struct btrfs_item));
151}
152
153int btrfs_super_csum_size(const struct btrfs_super_block *s)
154{
155 u16 t = btrfs_super_csum_type(s);
156 /*
157 * csum type is validated at mount time
158 */
159 return btrfs_csums[t].size;
160}
161
162const char *btrfs_super_csum_name(u16 csum_type)
163{
164 /* csum type is validated at mount time */
165 return btrfs_csums[csum_type].name;
166}
167
168/*
169 * Return driver name if defined, otherwise the name that's also a valid driver
170 * name
171 */
172const char *btrfs_super_csum_driver(u16 csum_type)
173{
174 /* csum type is validated at mount time */
175 return btrfs_csums[csum_type].driver[0] ?
176 btrfs_csums[csum_type].driver :
177 btrfs_csums[csum_type].name;
178}
179
180size_t __attribute_const__ btrfs_get_num_csums(void)
181{
182 return ARRAY_SIZE(btrfs_csums);
183}
184
185struct btrfs_path *btrfs_alloc_path(void)
186{
187 might_sleep();
188
189 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
190}
191
192/* this also releases the path */
193void btrfs_free_path(struct btrfs_path *p)
194{
195 if (!p)
196 return;
197 btrfs_release_path(p);
198 kmem_cache_free(btrfs_path_cachep, p);
199}
200
201/*
202 * path release drops references on the extent buffers in the path
203 * and it drops any locks held by this path
204 *
205 * It is safe to call this on paths that no locks or extent buffers held.
206 */
207noinline void btrfs_release_path(struct btrfs_path *p)
208{
209 int i;
210
211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
212 p->slots[i] = 0;
213 if (!p->nodes[i])
214 continue;
215 if (p->locks[i]) {
216 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
217 p->locks[i] = 0;
218 }
219 free_extent_buffer(p->nodes[i]);
220 p->nodes[i] = NULL;
221 }
222}
223
224/*
225 * We want the transaction abort to print stack trace only for errors where the
226 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
227 * caused by external factors.
228 */
229bool __cold abort_should_print_stack(int errno)
230{
231 switch (errno) {
232 case -EIO:
233 case -EROFS:
234 case -ENOMEM:
235 return false;
236 }
237 return true;
238}
239
240/*
241 * safely gets a reference on the root node of a tree. A lock
242 * is not taken, so a concurrent writer may put a different node
243 * at the root of the tree. See btrfs_lock_root_node for the
244 * looping required.
245 *
246 * The extent buffer returned by this has a reference taken, so
247 * it won't disappear. It may stop being the root of the tree
248 * at any time because there are no locks held.
249 */
250struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
251{
252 struct extent_buffer *eb;
253
254 while (1) {
255 rcu_read_lock();
256 eb = rcu_dereference(root->node);
257
258 /*
259 * RCU really hurts here, we could free up the root node because
260 * it was COWed but we may not get the new root node yet so do
261 * the inc_not_zero dance and if it doesn't work then
262 * synchronize_rcu and try again.
263 */
264 if (atomic_inc_not_zero(&eb->refs)) {
265 rcu_read_unlock();
266 break;
267 }
268 rcu_read_unlock();
269 synchronize_rcu();
270 }
271 return eb;
272}
273
274/*
275 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
276 * just get put onto a simple dirty list. Transaction walks this list to make
277 * sure they get properly updated on disk.
278 */
279static void add_root_to_dirty_list(struct btrfs_root *root)
280{
281 struct btrfs_fs_info *fs_info = root->fs_info;
282
283 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
284 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
285 return;
286
287 spin_lock(&fs_info->trans_lock);
288 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
289 /* Want the extent tree to be the last on the list */
290 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
291 list_move_tail(&root->dirty_list,
292 &fs_info->dirty_cowonly_roots);
293 else
294 list_move(&root->dirty_list,
295 &fs_info->dirty_cowonly_roots);
296 }
297 spin_unlock(&fs_info->trans_lock);
298}
299
300/*
301 * used by snapshot creation to make a copy of a root for a tree with
302 * a given objectid. The buffer with the new root node is returned in
303 * cow_ret, and this func returns zero on success or a negative error code.
304 */
305int btrfs_copy_root(struct btrfs_trans_handle *trans,
306 struct btrfs_root *root,
307 struct extent_buffer *buf,
308 struct extent_buffer **cow_ret, u64 new_root_objectid)
309{
310 struct btrfs_fs_info *fs_info = root->fs_info;
311 struct extent_buffer *cow;
312 int ret = 0;
313 int level;
314 struct btrfs_disk_key disk_key;
315
316 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
317 trans->transid != fs_info->running_transaction->transid);
318 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
319 trans->transid != root->last_trans);
320
321 level = btrfs_header_level(buf);
322 if (level == 0)
323 btrfs_item_key(buf, &disk_key, 0);
324 else
325 btrfs_node_key(buf, &disk_key, 0);
326
327 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
328 &disk_key, level, buf->start, 0,
329 BTRFS_NESTING_NEW_ROOT);
330 if (IS_ERR(cow))
331 return PTR_ERR(cow);
332
333 copy_extent_buffer_full(cow, buf);
334 btrfs_set_header_bytenr(cow, cow->start);
335 btrfs_set_header_generation(cow, trans->transid);
336 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
337 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
338 BTRFS_HEADER_FLAG_RELOC);
339 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
340 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
341 else
342 btrfs_set_header_owner(cow, new_root_objectid);
343
344 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
345
346 WARN_ON(btrfs_header_generation(buf) > trans->transid);
347 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
348 ret = btrfs_inc_ref(trans, root, cow, 1);
349 else
350 ret = btrfs_inc_ref(trans, root, cow, 0);
351 if (ret) {
352 btrfs_tree_unlock(cow);
353 free_extent_buffer(cow);
354 btrfs_abort_transaction(trans, ret);
355 return ret;
356 }
357
358 btrfs_mark_buffer_dirty(cow);
359 *cow_ret = cow;
360 return 0;
361}
362
363/*
364 * check if the tree block can be shared by multiple trees
365 */
366int btrfs_block_can_be_shared(struct btrfs_root *root,
367 struct extent_buffer *buf)
368{
369 /*
370 * Tree blocks not in shareable trees and tree roots are never shared.
371 * If a block was allocated after the last snapshot and the block was
372 * not allocated by tree relocation, we know the block is not shared.
373 */
374 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
375 buf != root->node && buf != root->commit_root &&
376 (btrfs_header_generation(buf) <=
377 btrfs_root_last_snapshot(&root->root_item) ||
378 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
379 return 1;
380
381 return 0;
382}
383
384static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
385 struct btrfs_root *root,
386 struct extent_buffer *buf,
387 struct extent_buffer *cow,
388 int *last_ref)
389{
390 struct btrfs_fs_info *fs_info = root->fs_info;
391 u64 refs;
392 u64 owner;
393 u64 flags;
394 u64 new_flags = 0;
395 int ret;
396
397 /*
398 * Backrefs update rules:
399 *
400 * Always use full backrefs for extent pointers in tree block
401 * allocated by tree relocation.
402 *
403 * If a shared tree block is no longer referenced by its owner
404 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
405 * use full backrefs for extent pointers in tree block.
406 *
407 * If a tree block is been relocating
408 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
409 * use full backrefs for extent pointers in tree block.
410 * The reason for this is some operations (such as drop tree)
411 * are only allowed for blocks use full backrefs.
412 */
413
414 if (btrfs_block_can_be_shared(root, buf)) {
415 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
416 btrfs_header_level(buf), 1,
417 &refs, &flags);
418 if (ret)
419 return ret;
420 if (refs == 0) {
421 ret = -EROFS;
422 btrfs_handle_fs_error(fs_info, ret, NULL);
423 return ret;
424 }
425 } else {
426 refs = 1;
427 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
428 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
429 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
430 else
431 flags = 0;
432 }
433
434 owner = btrfs_header_owner(buf);
435 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
436 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
437
438 if (refs > 1) {
439 if ((owner == root->root_key.objectid ||
440 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
441 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
442 ret = btrfs_inc_ref(trans, root, buf, 1);
443 if (ret)
444 return ret;
445
446 if (root->root_key.objectid ==
447 BTRFS_TREE_RELOC_OBJECTID) {
448 ret = btrfs_dec_ref(trans, root, buf, 0);
449 if (ret)
450 return ret;
451 ret = btrfs_inc_ref(trans, root, cow, 1);
452 if (ret)
453 return ret;
454 }
455 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
456 } else {
457
458 if (root->root_key.objectid ==
459 BTRFS_TREE_RELOC_OBJECTID)
460 ret = btrfs_inc_ref(trans, root, cow, 1);
461 else
462 ret = btrfs_inc_ref(trans, root, cow, 0);
463 if (ret)
464 return ret;
465 }
466 if (new_flags != 0) {
467 int level = btrfs_header_level(buf);
468
469 ret = btrfs_set_disk_extent_flags(trans, buf,
470 new_flags, level);
471 if (ret)
472 return ret;
473 }
474 } else {
475 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
476 if (root->root_key.objectid ==
477 BTRFS_TREE_RELOC_OBJECTID)
478 ret = btrfs_inc_ref(trans, root, cow, 1);
479 else
480 ret = btrfs_inc_ref(trans, root, cow, 0);
481 if (ret)
482 return ret;
483 ret = btrfs_dec_ref(trans, root, buf, 1);
484 if (ret)
485 return ret;
486 }
487 btrfs_clean_tree_block(buf);
488 *last_ref = 1;
489 }
490 return 0;
491}
492
493/*
494 * does the dirty work in cow of a single block. The parent block (if
495 * supplied) is updated to point to the new cow copy. The new buffer is marked
496 * dirty and returned locked. If you modify the block it needs to be marked
497 * dirty again.
498 *
499 * search_start -- an allocation hint for the new block
500 *
501 * empty_size -- a hint that you plan on doing more cow. This is the size in
502 * bytes the allocator should try to find free next to the block it returns.
503 * This is just a hint and may be ignored by the allocator.
504 */
505static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
506 struct btrfs_root *root,
507 struct extent_buffer *buf,
508 struct extent_buffer *parent, int parent_slot,
509 struct extent_buffer **cow_ret,
510 u64 search_start, u64 empty_size,
511 enum btrfs_lock_nesting nest)
512{
513 struct btrfs_fs_info *fs_info = root->fs_info;
514 struct btrfs_disk_key disk_key;
515 struct extent_buffer *cow;
516 int level, ret;
517 int last_ref = 0;
518 int unlock_orig = 0;
519 u64 parent_start = 0;
520
521 if (*cow_ret == buf)
522 unlock_orig = 1;
523
524 btrfs_assert_tree_write_locked(buf);
525
526 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
527 trans->transid != fs_info->running_transaction->transid);
528 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
529 trans->transid != root->last_trans);
530
531 level = btrfs_header_level(buf);
532
533 if (level == 0)
534 btrfs_item_key(buf, &disk_key, 0);
535 else
536 btrfs_node_key(buf, &disk_key, 0);
537
538 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
539 parent_start = parent->start;
540
541 cow = btrfs_alloc_tree_block(trans, root, parent_start,
542 root->root_key.objectid, &disk_key, level,
543 search_start, empty_size, nest);
544 if (IS_ERR(cow))
545 return PTR_ERR(cow);
546
547 /* cow is set to blocking by btrfs_init_new_buffer */
548
549 copy_extent_buffer_full(cow, buf);
550 btrfs_set_header_bytenr(cow, cow->start);
551 btrfs_set_header_generation(cow, trans->transid);
552 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
553 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
554 BTRFS_HEADER_FLAG_RELOC);
555 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
556 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
557 else
558 btrfs_set_header_owner(cow, root->root_key.objectid);
559
560 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
561
562 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
563 if (ret) {
564 btrfs_tree_unlock(cow);
565 free_extent_buffer(cow);
566 btrfs_abort_transaction(trans, ret);
567 return ret;
568 }
569
570 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
571 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
572 if (ret) {
573 btrfs_tree_unlock(cow);
574 free_extent_buffer(cow);
575 btrfs_abort_transaction(trans, ret);
576 return ret;
577 }
578 }
579
580 if (buf == root->node) {
581 WARN_ON(parent && parent != buf);
582 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
583 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
584 parent_start = buf->start;
585
586 atomic_inc(&cow->refs);
587 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
588 BUG_ON(ret < 0);
589 rcu_assign_pointer(root->node, cow);
590
591 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
592 parent_start, last_ref);
593 free_extent_buffer(buf);
594 add_root_to_dirty_list(root);
595 } else {
596 WARN_ON(trans->transid != btrfs_header_generation(parent));
597 btrfs_tree_mod_log_insert_key(parent, parent_slot,
598 BTRFS_MOD_LOG_KEY_REPLACE);
599 btrfs_set_node_blockptr(parent, parent_slot,
600 cow->start);
601 btrfs_set_node_ptr_generation(parent, parent_slot,
602 trans->transid);
603 btrfs_mark_buffer_dirty(parent);
604 if (last_ref) {
605 ret = btrfs_tree_mod_log_free_eb(buf);
606 if (ret) {
607 btrfs_tree_unlock(cow);
608 free_extent_buffer(cow);
609 btrfs_abort_transaction(trans, ret);
610 return ret;
611 }
612 }
613 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
614 parent_start, last_ref);
615 }
616 if (unlock_orig)
617 btrfs_tree_unlock(buf);
618 free_extent_buffer_stale(buf);
619 btrfs_mark_buffer_dirty(cow);
620 *cow_ret = cow;
621 return 0;
622}
623
624static inline int should_cow_block(struct btrfs_trans_handle *trans,
625 struct btrfs_root *root,
626 struct extent_buffer *buf)
627{
628 if (btrfs_is_testing(root->fs_info))
629 return 0;
630
631 /* Ensure we can see the FORCE_COW bit */
632 smp_mb__before_atomic();
633
634 /*
635 * We do not need to cow a block if
636 * 1) this block is not created or changed in this transaction;
637 * 2) this block does not belong to TREE_RELOC tree;
638 * 3) the root is not forced COW.
639 *
640 * What is forced COW:
641 * when we create snapshot during committing the transaction,
642 * after we've finished copying src root, we must COW the shared
643 * block to ensure the metadata consistency.
644 */
645 if (btrfs_header_generation(buf) == trans->transid &&
646 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
647 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
648 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
649 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
650 return 0;
651 return 1;
652}
653
654/*
655 * cows a single block, see __btrfs_cow_block for the real work.
656 * This version of it has extra checks so that a block isn't COWed more than
657 * once per transaction, as long as it hasn't been written yet
658 */
659noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root, struct extent_buffer *buf,
661 struct extent_buffer *parent, int parent_slot,
662 struct extent_buffer **cow_ret,
663 enum btrfs_lock_nesting nest)
664{
665 struct btrfs_fs_info *fs_info = root->fs_info;
666 u64 search_start;
667 int ret;
668
669 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
670 btrfs_err(fs_info,
671 "COW'ing blocks on a fs root that's being dropped");
672
673 if (trans->transaction != fs_info->running_transaction)
674 WARN(1, KERN_CRIT "trans %llu running %llu\n",
675 trans->transid,
676 fs_info->running_transaction->transid);
677
678 if (trans->transid != fs_info->generation)
679 WARN(1, KERN_CRIT "trans %llu running %llu\n",
680 trans->transid, fs_info->generation);
681
682 if (!should_cow_block(trans, root, buf)) {
683 *cow_ret = buf;
684 return 0;
685 }
686
687 search_start = buf->start & ~((u64)SZ_1G - 1);
688
689 /*
690 * Before CoWing this block for later modification, check if it's
691 * the subtree root and do the delayed subtree trace if needed.
692 *
693 * Also We don't care about the error, as it's handled internally.
694 */
695 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
696 ret = __btrfs_cow_block(trans, root, buf, parent,
697 parent_slot, cow_ret, search_start, 0, nest);
698
699 trace_btrfs_cow_block(root, buf, *cow_ret);
700
701 return ret;
702}
703ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
704
705/*
706 * helper function for defrag to decide if two blocks pointed to by a
707 * node are actually close by
708 */
709static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
710{
711 if (blocknr < other && other - (blocknr + blocksize) < 32768)
712 return 1;
713 if (blocknr > other && blocknr - (other + blocksize) < 32768)
714 return 1;
715 return 0;
716}
717
718#ifdef __LITTLE_ENDIAN
719
720/*
721 * Compare two keys, on little-endian the disk order is same as CPU order and
722 * we can avoid the conversion.
723 */
724static int comp_keys(const struct btrfs_disk_key *disk_key,
725 const struct btrfs_key *k2)
726{
727 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
728
729 return btrfs_comp_cpu_keys(k1, k2);
730}
731
732#else
733
734/*
735 * compare two keys in a memcmp fashion
736 */
737static int comp_keys(const struct btrfs_disk_key *disk,
738 const struct btrfs_key *k2)
739{
740 struct btrfs_key k1;
741
742 btrfs_disk_key_to_cpu(&k1, disk);
743
744 return btrfs_comp_cpu_keys(&k1, k2);
745}
746#endif
747
748/*
749 * same as comp_keys only with two btrfs_key's
750 */
751int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
752{
753 if (k1->objectid > k2->objectid)
754 return 1;
755 if (k1->objectid < k2->objectid)
756 return -1;
757 if (k1->type > k2->type)
758 return 1;
759 if (k1->type < k2->type)
760 return -1;
761 if (k1->offset > k2->offset)
762 return 1;
763 if (k1->offset < k2->offset)
764 return -1;
765 return 0;
766}
767
768/*
769 * this is used by the defrag code to go through all the
770 * leaves pointed to by a node and reallocate them so that
771 * disk order is close to key order
772 */
773int btrfs_realloc_node(struct btrfs_trans_handle *trans,
774 struct btrfs_root *root, struct extent_buffer *parent,
775 int start_slot, u64 *last_ret,
776 struct btrfs_key *progress)
777{
778 struct btrfs_fs_info *fs_info = root->fs_info;
779 struct extent_buffer *cur;
780 u64 blocknr;
781 u64 search_start = *last_ret;
782 u64 last_block = 0;
783 u64 other;
784 u32 parent_nritems;
785 int end_slot;
786 int i;
787 int err = 0;
788 u32 blocksize;
789 int progress_passed = 0;
790 struct btrfs_disk_key disk_key;
791
792 WARN_ON(trans->transaction != fs_info->running_transaction);
793 WARN_ON(trans->transid != fs_info->generation);
794
795 parent_nritems = btrfs_header_nritems(parent);
796 blocksize = fs_info->nodesize;
797 end_slot = parent_nritems - 1;
798
799 if (parent_nritems <= 1)
800 return 0;
801
802 for (i = start_slot; i <= end_slot; i++) {
803 int close = 1;
804
805 btrfs_node_key(parent, &disk_key, i);
806 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
807 continue;
808
809 progress_passed = 1;
810 blocknr = btrfs_node_blockptr(parent, i);
811 if (last_block == 0)
812 last_block = blocknr;
813
814 if (i > 0) {
815 other = btrfs_node_blockptr(parent, i - 1);
816 close = close_blocks(blocknr, other, blocksize);
817 }
818 if (!close && i < end_slot) {
819 other = btrfs_node_blockptr(parent, i + 1);
820 close = close_blocks(blocknr, other, blocksize);
821 }
822 if (close) {
823 last_block = blocknr;
824 continue;
825 }
826
827 cur = btrfs_read_node_slot(parent, i);
828 if (IS_ERR(cur))
829 return PTR_ERR(cur);
830 if (search_start == 0)
831 search_start = last_block;
832
833 btrfs_tree_lock(cur);
834 err = __btrfs_cow_block(trans, root, cur, parent, i,
835 &cur, search_start,
836 min(16 * blocksize,
837 (end_slot - i) * blocksize),
838 BTRFS_NESTING_COW);
839 if (err) {
840 btrfs_tree_unlock(cur);
841 free_extent_buffer(cur);
842 break;
843 }
844 search_start = cur->start;
845 last_block = cur->start;
846 *last_ret = search_start;
847 btrfs_tree_unlock(cur);
848 free_extent_buffer(cur);
849 }
850 return err;
851}
852
853/*
854 * Search for a key in the given extent_buffer.
855 *
856 * The lower boundary for the search is specified by the slot number @low. Use a
857 * value of 0 to search over the whole extent buffer.
858 *
859 * The slot in the extent buffer is returned via @slot. If the key exists in the
860 * extent buffer, then @slot will point to the slot where the key is, otherwise
861 * it points to the slot where you would insert the key.
862 *
863 * Slot may point to the total number of items (i.e. one position beyond the last
864 * key) if the key is bigger than the last key in the extent buffer.
865 */
866static noinline int generic_bin_search(struct extent_buffer *eb, int low,
867 const struct btrfs_key *key, int *slot)
868{
869 unsigned long p;
870 int item_size;
871 int high = btrfs_header_nritems(eb);
872 int ret;
873 const int key_size = sizeof(struct btrfs_disk_key);
874
875 if (low > high) {
876 btrfs_err(eb->fs_info,
877 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
878 __func__, low, high, eb->start,
879 btrfs_header_owner(eb), btrfs_header_level(eb));
880 return -EINVAL;
881 }
882
883 if (btrfs_header_level(eb) == 0) {
884 p = offsetof(struct btrfs_leaf, items);
885 item_size = sizeof(struct btrfs_item);
886 } else {
887 p = offsetof(struct btrfs_node, ptrs);
888 item_size = sizeof(struct btrfs_key_ptr);
889 }
890
891 while (low < high) {
892 unsigned long oip;
893 unsigned long offset;
894 struct btrfs_disk_key *tmp;
895 struct btrfs_disk_key unaligned;
896 int mid;
897
898 mid = (low + high) / 2;
899 offset = p + mid * item_size;
900 oip = offset_in_page(offset);
901
902 if (oip + key_size <= PAGE_SIZE) {
903 const unsigned long idx = get_eb_page_index(offset);
904 char *kaddr = page_address(eb->pages[idx]);
905
906 oip = get_eb_offset_in_page(eb, offset);
907 tmp = (struct btrfs_disk_key *)(kaddr + oip);
908 } else {
909 read_extent_buffer(eb, &unaligned, offset, key_size);
910 tmp = &unaligned;
911 }
912
913 ret = comp_keys(tmp, key);
914
915 if (ret < 0)
916 low = mid + 1;
917 else if (ret > 0)
918 high = mid;
919 else {
920 *slot = mid;
921 return 0;
922 }
923 }
924 *slot = low;
925 return 1;
926}
927
928/*
929 * Simple binary search on an extent buffer. Works for both leaves and nodes, and
930 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1').
931 */
932int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
933 int *slot)
934{
935 return generic_bin_search(eb, 0, key, slot);
936}
937
938static void root_add_used(struct btrfs_root *root, u32 size)
939{
940 spin_lock(&root->accounting_lock);
941 btrfs_set_root_used(&root->root_item,
942 btrfs_root_used(&root->root_item) + size);
943 spin_unlock(&root->accounting_lock);
944}
945
946static void root_sub_used(struct btrfs_root *root, u32 size)
947{
948 spin_lock(&root->accounting_lock);
949 btrfs_set_root_used(&root->root_item,
950 btrfs_root_used(&root->root_item) - size);
951 spin_unlock(&root->accounting_lock);
952}
953
954/* given a node and slot number, this reads the blocks it points to. The
955 * extent buffer is returned with a reference taken (but unlocked).
956 */
957struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
958 int slot)
959{
960 int level = btrfs_header_level(parent);
961 struct btrfs_tree_parent_check check = { 0 };
962 struct extent_buffer *eb;
963
964 if (slot < 0 || slot >= btrfs_header_nritems(parent))
965 return ERR_PTR(-ENOENT);
966
967 BUG_ON(level == 0);
968
969 check.level = level - 1;
970 check.transid = btrfs_node_ptr_generation(parent, slot);
971 check.owner_root = btrfs_header_owner(parent);
972 check.has_first_key = true;
973 btrfs_node_key_to_cpu(parent, &check.first_key, slot);
974
975 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
976 &check);
977 if (IS_ERR(eb))
978 return eb;
979 if (!extent_buffer_uptodate(eb)) {
980 free_extent_buffer(eb);
981 return ERR_PTR(-EIO);
982 }
983
984 return eb;
985}
986
987/*
988 * node level balancing, used to make sure nodes are in proper order for
989 * item deletion. We balance from the top down, so we have to make sure
990 * that a deletion won't leave an node completely empty later on.
991 */
992static noinline int balance_level(struct btrfs_trans_handle *trans,
993 struct btrfs_root *root,
994 struct btrfs_path *path, int level)
995{
996 struct btrfs_fs_info *fs_info = root->fs_info;
997 struct extent_buffer *right = NULL;
998 struct extent_buffer *mid;
999 struct extent_buffer *left = NULL;
1000 struct extent_buffer *parent = NULL;
1001 int ret = 0;
1002 int wret;
1003 int pslot;
1004 int orig_slot = path->slots[level];
1005 u64 orig_ptr;
1006
1007 ASSERT(level > 0);
1008
1009 mid = path->nodes[level];
1010
1011 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
1012 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1013
1014 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1015
1016 if (level < BTRFS_MAX_LEVEL - 1) {
1017 parent = path->nodes[level + 1];
1018 pslot = path->slots[level + 1];
1019 }
1020
1021 /*
1022 * deal with the case where there is only one pointer in the root
1023 * by promoting the node below to a root
1024 */
1025 if (!parent) {
1026 struct extent_buffer *child;
1027
1028 if (btrfs_header_nritems(mid) != 1)
1029 return 0;
1030
1031 /* promote the child to a root */
1032 child = btrfs_read_node_slot(mid, 0);
1033 if (IS_ERR(child)) {
1034 ret = PTR_ERR(child);
1035 btrfs_handle_fs_error(fs_info, ret, NULL);
1036 goto enospc;
1037 }
1038
1039 btrfs_tree_lock(child);
1040 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
1041 BTRFS_NESTING_COW);
1042 if (ret) {
1043 btrfs_tree_unlock(child);
1044 free_extent_buffer(child);
1045 goto enospc;
1046 }
1047
1048 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
1049 BUG_ON(ret < 0);
1050 rcu_assign_pointer(root->node, child);
1051
1052 add_root_to_dirty_list(root);
1053 btrfs_tree_unlock(child);
1054
1055 path->locks[level] = 0;
1056 path->nodes[level] = NULL;
1057 btrfs_clean_tree_block(mid);
1058 btrfs_tree_unlock(mid);
1059 /* once for the path */
1060 free_extent_buffer(mid);
1061
1062 root_sub_used(root, mid->len);
1063 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1064 /* once for the root ptr */
1065 free_extent_buffer_stale(mid);
1066 return 0;
1067 }
1068 if (btrfs_header_nritems(mid) >
1069 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1070 return 0;
1071
1072 left = btrfs_read_node_slot(parent, pslot - 1);
1073 if (IS_ERR(left))
1074 left = NULL;
1075
1076 if (left) {
1077 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1078 wret = btrfs_cow_block(trans, root, left,
1079 parent, pslot - 1, &left,
1080 BTRFS_NESTING_LEFT_COW);
1081 if (wret) {
1082 ret = wret;
1083 goto enospc;
1084 }
1085 }
1086
1087 right = btrfs_read_node_slot(parent, pslot + 1);
1088 if (IS_ERR(right))
1089 right = NULL;
1090
1091 if (right) {
1092 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1093 wret = btrfs_cow_block(trans, root, right,
1094 parent, pslot + 1, &right,
1095 BTRFS_NESTING_RIGHT_COW);
1096 if (wret) {
1097 ret = wret;
1098 goto enospc;
1099 }
1100 }
1101
1102 /* first, try to make some room in the middle buffer */
1103 if (left) {
1104 orig_slot += btrfs_header_nritems(left);
1105 wret = push_node_left(trans, left, mid, 1);
1106 if (wret < 0)
1107 ret = wret;
1108 }
1109
1110 /*
1111 * then try to empty the right most buffer into the middle
1112 */
1113 if (right) {
1114 wret = push_node_left(trans, mid, right, 1);
1115 if (wret < 0 && wret != -ENOSPC)
1116 ret = wret;
1117 if (btrfs_header_nritems(right) == 0) {
1118 btrfs_clean_tree_block(right);
1119 btrfs_tree_unlock(right);
1120 del_ptr(root, path, level + 1, pslot + 1);
1121 root_sub_used(root, right->len);
1122 btrfs_free_tree_block(trans, btrfs_root_id(root), right,
1123 0, 1);
1124 free_extent_buffer_stale(right);
1125 right = NULL;
1126 } else {
1127 struct btrfs_disk_key right_key;
1128 btrfs_node_key(right, &right_key, 0);
1129 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1130 BTRFS_MOD_LOG_KEY_REPLACE);
1131 BUG_ON(ret < 0);
1132 btrfs_set_node_key(parent, &right_key, pslot + 1);
1133 btrfs_mark_buffer_dirty(parent);
1134 }
1135 }
1136 if (btrfs_header_nritems(mid) == 1) {
1137 /*
1138 * we're not allowed to leave a node with one item in the
1139 * tree during a delete. A deletion from lower in the tree
1140 * could try to delete the only pointer in this node.
1141 * So, pull some keys from the left.
1142 * There has to be a left pointer at this point because
1143 * otherwise we would have pulled some pointers from the
1144 * right
1145 */
1146 if (!left) {
1147 ret = -EROFS;
1148 btrfs_handle_fs_error(fs_info, ret, NULL);
1149 goto enospc;
1150 }
1151 wret = balance_node_right(trans, mid, left);
1152 if (wret < 0) {
1153 ret = wret;
1154 goto enospc;
1155 }
1156 if (wret == 1) {
1157 wret = push_node_left(trans, left, mid, 1);
1158 if (wret < 0)
1159 ret = wret;
1160 }
1161 BUG_ON(wret == 1);
1162 }
1163 if (btrfs_header_nritems(mid) == 0) {
1164 btrfs_clean_tree_block(mid);
1165 btrfs_tree_unlock(mid);
1166 del_ptr(root, path, level + 1, pslot);
1167 root_sub_used(root, mid->len);
1168 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1169 free_extent_buffer_stale(mid);
1170 mid = NULL;
1171 } else {
1172 /* update the parent key to reflect our changes */
1173 struct btrfs_disk_key mid_key;
1174 btrfs_node_key(mid, &mid_key, 0);
1175 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1176 BTRFS_MOD_LOG_KEY_REPLACE);
1177 BUG_ON(ret < 0);
1178 btrfs_set_node_key(parent, &mid_key, pslot);
1179 btrfs_mark_buffer_dirty(parent);
1180 }
1181
1182 /* update the path */
1183 if (left) {
1184 if (btrfs_header_nritems(left) > orig_slot) {
1185 atomic_inc(&left->refs);
1186 /* left was locked after cow */
1187 path->nodes[level] = left;
1188 path->slots[level + 1] -= 1;
1189 path->slots[level] = orig_slot;
1190 if (mid) {
1191 btrfs_tree_unlock(mid);
1192 free_extent_buffer(mid);
1193 }
1194 } else {
1195 orig_slot -= btrfs_header_nritems(left);
1196 path->slots[level] = orig_slot;
1197 }
1198 }
1199 /* double check we haven't messed things up */
1200 if (orig_ptr !=
1201 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1202 BUG();
1203enospc:
1204 if (right) {
1205 btrfs_tree_unlock(right);
1206 free_extent_buffer(right);
1207 }
1208 if (left) {
1209 if (path->nodes[level] != left)
1210 btrfs_tree_unlock(left);
1211 free_extent_buffer(left);
1212 }
1213 return ret;
1214}
1215
1216/* Node balancing for insertion. Here we only split or push nodes around
1217 * when they are completely full. This is also done top down, so we
1218 * have to be pessimistic.
1219 */
1220static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1221 struct btrfs_root *root,
1222 struct btrfs_path *path, int level)
1223{
1224 struct btrfs_fs_info *fs_info = root->fs_info;
1225 struct extent_buffer *right = NULL;
1226 struct extent_buffer *mid;
1227 struct extent_buffer *left = NULL;
1228 struct extent_buffer *parent = NULL;
1229 int ret = 0;
1230 int wret;
1231 int pslot;
1232 int orig_slot = path->slots[level];
1233
1234 if (level == 0)
1235 return 1;
1236
1237 mid = path->nodes[level];
1238 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1239
1240 if (level < BTRFS_MAX_LEVEL - 1) {
1241 parent = path->nodes[level + 1];
1242 pslot = path->slots[level + 1];
1243 }
1244
1245 if (!parent)
1246 return 1;
1247
1248 left = btrfs_read_node_slot(parent, pslot - 1);
1249 if (IS_ERR(left))
1250 left = NULL;
1251
1252 /* first, try to make some room in the middle buffer */
1253 if (left) {
1254 u32 left_nr;
1255
1256 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1257
1258 left_nr = btrfs_header_nritems(left);
1259 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1260 wret = 1;
1261 } else {
1262 ret = btrfs_cow_block(trans, root, left, parent,
1263 pslot - 1, &left,
1264 BTRFS_NESTING_LEFT_COW);
1265 if (ret)
1266 wret = 1;
1267 else {
1268 wret = push_node_left(trans, left, mid, 0);
1269 }
1270 }
1271 if (wret < 0)
1272 ret = wret;
1273 if (wret == 0) {
1274 struct btrfs_disk_key disk_key;
1275 orig_slot += left_nr;
1276 btrfs_node_key(mid, &disk_key, 0);
1277 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1278 BTRFS_MOD_LOG_KEY_REPLACE);
1279 BUG_ON(ret < 0);
1280 btrfs_set_node_key(parent, &disk_key, pslot);
1281 btrfs_mark_buffer_dirty(parent);
1282 if (btrfs_header_nritems(left) > orig_slot) {
1283 path->nodes[level] = left;
1284 path->slots[level + 1] -= 1;
1285 path->slots[level] = orig_slot;
1286 btrfs_tree_unlock(mid);
1287 free_extent_buffer(mid);
1288 } else {
1289 orig_slot -=
1290 btrfs_header_nritems(left);
1291 path->slots[level] = orig_slot;
1292 btrfs_tree_unlock(left);
1293 free_extent_buffer(left);
1294 }
1295 return 0;
1296 }
1297 btrfs_tree_unlock(left);
1298 free_extent_buffer(left);
1299 }
1300 right = btrfs_read_node_slot(parent, pslot + 1);
1301 if (IS_ERR(right))
1302 right = NULL;
1303
1304 /*
1305 * then try to empty the right most buffer into the middle
1306 */
1307 if (right) {
1308 u32 right_nr;
1309
1310 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1311
1312 right_nr = btrfs_header_nritems(right);
1313 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1314 wret = 1;
1315 } else {
1316 ret = btrfs_cow_block(trans, root, right,
1317 parent, pslot + 1,
1318 &right, BTRFS_NESTING_RIGHT_COW);
1319 if (ret)
1320 wret = 1;
1321 else {
1322 wret = balance_node_right(trans, right, mid);
1323 }
1324 }
1325 if (wret < 0)
1326 ret = wret;
1327 if (wret == 0) {
1328 struct btrfs_disk_key disk_key;
1329
1330 btrfs_node_key(right, &disk_key, 0);
1331 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1332 BTRFS_MOD_LOG_KEY_REPLACE);
1333 BUG_ON(ret < 0);
1334 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1335 btrfs_mark_buffer_dirty(parent);
1336
1337 if (btrfs_header_nritems(mid) <= orig_slot) {
1338 path->nodes[level] = right;
1339 path->slots[level + 1] += 1;
1340 path->slots[level] = orig_slot -
1341 btrfs_header_nritems(mid);
1342 btrfs_tree_unlock(mid);
1343 free_extent_buffer(mid);
1344 } else {
1345 btrfs_tree_unlock(right);
1346 free_extent_buffer(right);
1347 }
1348 return 0;
1349 }
1350 btrfs_tree_unlock(right);
1351 free_extent_buffer(right);
1352 }
1353 return 1;
1354}
1355
1356/*
1357 * readahead one full node of leaves, finding things that are close
1358 * to the block in 'slot', and triggering ra on them.
1359 */
1360static void reada_for_search(struct btrfs_fs_info *fs_info,
1361 struct btrfs_path *path,
1362 int level, int slot, u64 objectid)
1363{
1364 struct extent_buffer *node;
1365 struct btrfs_disk_key disk_key;
1366 u32 nritems;
1367 u64 search;
1368 u64 target;
1369 u64 nread = 0;
1370 u64 nread_max;
1371 u32 nr;
1372 u32 blocksize;
1373 u32 nscan = 0;
1374
1375 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1376 return;
1377
1378 if (!path->nodes[level])
1379 return;
1380
1381 node = path->nodes[level];
1382
1383 /*
1384 * Since the time between visiting leaves is much shorter than the time
1385 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1386 * much IO at once (possibly random).
1387 */
1388 if (path->reada == READA_FORWARD_ALWAYS) {
1389 if (level > 1)
1390 nread_max = node->fs_info->nodesize;
1391 else
1392 nread_max = SZ_128K;
1393 } else {
1394 nread_max = SZ_64K;
1395 }
1396
1397 search = btrfs_node_blockptr(node, slot);
1398 blocksize = fs_info->nodesize;
1399 if (path->reada != READA_FORWARD_ALWAYS) {
1400 struct extent_buffer *eb;
1401
1402 eb = find_extent_buffer(fs_info, search);
1403 if (eb) {
1404 free_extent_buffer(eb);
1405 return;
1406 }
1407 }
1408
1409 target = search;
1410
1411 nritems = btrfs_header_nritems(node);
1412 nr = slot;
1413
1414 while (1) {
1415 if (path->reada == READA_BACK) {
1416 if (nr == 0)
1417 break;
1418 nr--;
1419 } else if (path->reada == READA_FORWARD ||
1420 path->reada == READA_FORWARD_ALWAYS) {
1421 nr++;
1422 if (nr >= nritems)
1423 break;
1424 }
1425 if (path->reada == READA_BACK && objectid) {
1426 btrfs_node_key(node, &disk_key, nr);
1427 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1428 break;
1429 }
1430 search = btrfs_node_blockptr(node, nr);
1431 if (path->reada == READA_FORWARD_ALWAYS ||
1432 (search <= target && target - search <= 65536) ||
1433 (search > target && search - target <= 65536)) {
1434 btrfs_readahead_node_child(node, nr);
1435 nread += blocksize;
1436 }
1437 nscan++;
1438 if (nread > nread_max || nscan > 32)
1439 break;
1440 }
1441}
1442
1443static noinline void reada_for_balance(struct btrfs_path *path, int level)
1444{
1445 struct extent_buffer *parent;
1446 int slot;
1447 int nritems;
1448
1449 parent = path->nodes[level + 1];
1450 if (!parent)
1451 return;
1452
1453 nritems = btrfs_header_nritems(parent);
1454 slot = path->slots[level + 1];
1455
1456 if (slot > 0)
1457 btrfs_readahead_node_child(parent, slot - 1);
1458 if (slot + 1 < nritems)
1459 btrfs_readahead_node_child(parent, slot + 1);
1460}
1461
1462
1463/*
1464 * when we walk down the tree, it is usually safe to unlock the higher layers
1465 * in the tree. The exceptions are when our path goes through slot 0, because
1466 * operations on the tree might require changing key pointers higher up in the
1467 * tree.
1468 *
1469 * callers might also have set path->keep_locks, which tells this code to keep
1470 * the lock if the path points to the last slot in the block. This is part of
1471 * walking through the tree, and selecting the next slot in the higher block.
1472 *
1473 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1474 * if lowest_unlock is 1, level 0 won't be unlocked
1475 */
1476static noinline void unlock_up(struct btrfs_path *path, int level,
1477 int lowest_unlock, int min_write_lock_level,
1478 int *write_lock_level)
1479{
1480 int i;
1481 int skip_level = level;
1482 bool check_skip = true;
1483
1484 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1485 if (!path->nodes[i])
1486 break;
1487 if (!path->locks[i])
1488 break;
1489
1490 if (check_skip) {
1491 if (path->slots[i] == 0) {
1492 skip_level = i + 1;
1493 continue;
1494 }
1495
1496 if (path->keep_locks) {
1497 u32 nritems;
1498
1499 nritems = btrfs_header_nritems(path->nodes[i]);
1500 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1501 skip_level = i + 1;
1502 continue;
1503 }
1504 }
1505 }
1506
1507 if (i >= lowest_unlock && i > skip_level) {
1508 check_skip = false;
1509 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1510 path->locks[i] = 0;
1511 if (write_lock_level &&
1512 i > min_write_lock_level &&
1513 i <= *write_lock_level) {
1514 *write_lock_level = i - 1;
1515 }
1516 }
1517 }
1518}
1519
1520/*
1521 * Helper function for btrfs_search_slot() and other functions that do a search
1522 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1523 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1524 * its pages from disk.
1525 *
1526 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1527 * whole btree search, starting again from the current root node.
1528 */
1529static int
1530read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1531 struct extent_buffer **eb_ret, int level, int slot,
1532 const struct btrfs_key *key)
1533{
1534 struct btrfs_fs_info *fs_info = root->fs_info;
1535 struct btrfs_tree_parent_check check = { 0 };
1536 u64 blocknr;
1537 u64 gen;
1538 struct extent_buffer *tmp;
1539 int ret;
1540 int parent_level;
1541 bool unlock_up;
1542
1543 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
1544 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1545 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1546 parent_level = btrfs_header_level(*eb_ret);
1547 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
1548 check.has_first_key = true;
1549 check.level = parent_level - 1;
1550 check.transid = gen;
1551 check.owner_root = root->root_key.objectid;
1552
1553 /*
1554 * If we need to read an extent buffer from disk and we are holding locks
1555 * on upper level nodes, we unlock all the upper nodes before reading the
1556 * extent buffer, and then return -EAGAIN to the caller as it needs to
1557 * restart the search. We don't release the lock on the current level
1558 * because we need to walk this node to figure out which blocks to read.
1559 */
1560 tmp = find_extent_buffer(fs_info, blocknr);
1561 if (tmp) {
1562 if (p->reada == READA_FORWARD_ALWAYS)
1563 reada_for_search(fs_info, p, level, slot, key->objectid);
1564
1565 /* first we do an atomic uptodate check */
1566 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1567 /*
1568 * Do extra check for first_key, eb can be stale due to
1569 * being cached, read from scrub, or have multiple
1570 * parents (shared tree blocks).
1571 */
1572 if (btrfs_verify_level_key(tmp,
1573 parent_level - 1, &check.first_key, gen)) {
1574 free_extent_buffer(tmp);
1575 return -EUCLEAN;
1576 }
1577 *eb_ret = tmp;
1578 return 0;
1579 }
1580
1581 if (p->nowait) {
1582 free_extent_buffer(tmp);
1583 return -EAGAIN;
1584 }
1585
1586 if (unlock_up)
1587 btrfs_unlock_up_safe(p, level + 1);
1588
1589 /* now we're allowed to do a blocking uptodate check */
1590 ret = btrfs_read_extent_buffer(tmp, &check);
1591 if (ret) {
1592 free_extent_buffer(tmp);
1593 btrfs_release_path(p);
1594 return -EIO;
1595 }
1596 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
1597 free_extent_buffer(tmp);
1598 btrfs_release_path(p);
1599 return -EUCLEAN;
1600 }
1601
1602 if (unlock_up)
1603 ret = -EAGAIN;
1604
1605 goto out;
1606 } else if (p->nowait) {
1607 return -EAGAIN;
1608 }
1609
1610 if (unlock_up) {
1611 btrfs_unlock_up_safe(p, level + 1);
1612 ret = -EAGAIN;
1613 } else {
1614 ret = 0;
1615 }
1616
1617 if (p->reada != READA_NONE)
1618 reada_for_search(fs_info, p, level, slot, key->objectid);
1619
1620 tmp = read_tree_block(fs_info, blocknr, &check);
1621 if (IS_ERR(tmp)) {
1622 btrfs_release_path(p);
1623 return PTR_ERR(tmp);
1624 }
1625 /*
1626 * If the read above didn't mark this buffer up to date,
1627 * it will never end up being up to date. Set ret to EIO now
1628 * and give up so that our caller doesn't loop forever
1629 * on our EAGAINs.
1630 */
1631 if (!extent_buffer_uptodate(tmp))
1632 ret = -EIO;
1633
1634out:
1635 if (ret == 0) {
1636 *eb_ret = tmp;
1637 } else {
1638 free_extent_buffer(tmp);
1639 btrfs_release_path(p);
1640 }
1641
1642 return ret;
1643}
1644
1645/*
1646 * helper function for btrfs_search_slot. This does all of the checks
1647 * for node-level blocks and does any balancing required based on
1648 * the ins_len.
1649 *
1650 * If no extra work was required, zero is returned. If we had to
1651 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1652 * start over
1653 */
1654static int
1655setup_nodes_for_search(struct btrfs_trans_handle *trans,
1656 struct btrfs_root *root, struct btrfs_path *p,
1657 struct extent_buffer *b, int level, int ins_len,
1658 int *write_lock_level)
1659{
1660 struct btrfs_fs_info *fs_info = root->fs_info;
1661 int ret = 0;
1662
1663 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1664 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1665
1666 if (*write_lock_level < level + 1) {
1667 *write_lock_level = level + 1;
1668 btrfs_release_path(p);
1669 return -EAGAIN;
1670 }
1671
1672 reada_for_balance(p, level);
1673 ret = split_node(trans, root, p, level);
1674
1675 b = p->nodes[level];
1676 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1677 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1678
1679 if (*write_lock_level < level + 1) {
1680 *write_lock_level = level + 1;
1681 btrfs_release_path(p);
1682 return -EAGAIN;
1683 }
1684
1685 reada_for_balance(p, level);
1686 ret = balance_level(trans, root, p, level);
1687 if (ret)
1688 return ret;
1689
1690 b = p->nodes[level];
1691 if (!b) {
1692 btrfs_release_path(p);
1693 return -EAGAIN;
1694 }
1695 BUG_ON(btrfs_header_nritems(b) == 1);
1696 }
1697 return ret;
1698}
1699
1700int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1701 u64 iobjectid, u64 ioff, u8 key_type,
1702 struct btrfs_key *found_key)
1703{
1704 int ret;
1705 struct btrfs_key key;
1706 struct extent_buffer *eb;
1707
1708 ASSERT(path);
1709 ASSERT(found_key);
1710
1711 key.type = key_type;
1712 key.objectid = iobjectid;
1713 key.offset = ioff;
1714
1715 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1716 if (ret < 0)
1717 return ret;
1718
1719 eb = path->nodes[0];
1720 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1721 ret = btrfs_next_leaf(fs_root, path);
1722 if (ret)
1723 return ret;
1724 eb = path->nodes[0];
1725 }
1726
1727 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1728 if (found_key->type != key.type ||
1729 found_key->objectid != key.objectid)
1730 return 1;
1731
1732 return 0;
1733}
1734
1735static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1736 struct btrfs_path *p,
1737 int write_lock_level)
1738{
1739 struct extent_buffer *b;
1740 int root_lock = 0;
1741 int level = 0;
1742
1743 if (p->search_commit_root) {
1744 b = root->commit_root;
1745 atomic_inc(&b->refs);
1746 level = btrfs_header_level(b);
1747 /*
1748 * Ensure that all callers have set skip_locking when
1749 * p->search_commit_root = 1.
1750 */
1751 ASSERT(p->skip_locking == 1);
1752
1753 goto out;
1754 }
1755
1756 if (p->skip_locking) {
1757 b = btrfs_root_node(root);
1758 level = btrfs_header_level(b);
1759 goto out;
1760 }
1761
1762 /* We try very hard to do read locks on the root */
1763 root_lock = BTRFS_READ_LOCK;
1764
1765 /*
1766 * If the level is set to maximum, we can skip trying to get the read
1767 * lock.
1768 */
1769 if (write_lock_level < BTRFS_MAX_LEVEL) {
1770 /*
1771 * We don't know the level of the root node until we actually
1772 * have it read locked
1773 */
1774 if (p->nowait) {
1775 b = btrfs_try_read_lock_root_node(root);
1776 if (IS_ERR(b))
1777 return b;
1778 } else {
1779 b = btrfs_read_lock_root_node(root);
1780 }
1781 level = btrfs_header_level(b);
1782 if (level > write_lock_level)
1783 goto out;
1784
1785 /* Whoops, must trade for write lock */
1786 btrfs_tree_read_unlock(b);
1787 free_extent_buffer(b);
1788 }
1789
1790 b = btrfs_lock_root_node(root);
1791 root_lock = BTRFS_WRITE_LOCK;
1792
1793 /* The level might have changed, check again */
1794 level = btrfs_header_level(b);
1795
1796out:
1797 /*
1798 * The root may have failed to write out at some point, and thus is no
1799 * longer valid, return an error in this case.
1800 */
1801 if (!extent_buffer_uptodate(b)) {
1802 if (root_lock)
1803 btrfs_tree_unlock_rw(b, root_lock);
1804 free_extent_buffer(b);
1805 return ERR_PTR(-EIO);
1806 }
1807
1808 p->nodes[level] = b;
1809 if (!p->skip_locking)
1810 p->locks[level] = root_lock;
1811 /*
1812 * Callers are responsible for dropping b's references.
1813 */
1814 return b;
1815}
1816
1817/*
1818 * Replace the extent buffer at the lowest level of the path with a cloned
1819 * version. The purpose is to be able to use it safely, after releasing the
1820 * commit root semaphore, even if relocation is happening in parallel, the
1821 * transaction used for relocation is committed and the extent buffer is
1822 * reallocated in the next transaction.
1823 *
1824 * This is used in a context where the caller does not prevent transaction
1825 * commits from happening, either by holding a transaction handle or holding
1826 * some lock, while it's doing searches through a commit root.
1827 * At the moment it's only used for send operations.
1828 */
1829static int finish_need_commit_sem_search(struct btrfs_path *path)
1830{
1831 const int i = path->lowest_level;
1832 const int slot = path->slots[i];
1833 struct extent_buffer *lowest = path->nodes[i];
1834 struct extent_buffer *clone;
1835
1836 ASSERT(path->need_commit_sem);
1837
1838 if (!lowest)
1839 return 0;
1840
1841 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1842
1843 clone = btrfs_clone_extent_buffer(lowest);
1844 if (!clone)
1845 return -ENOMEM;
1846
1847 btrfs_release_path(path);
1848 path->nodes[i] = clone;
1849 path->slots[i] = slot;
1850
1851 return 0;
1852}
1853
1854static inline int search_for_key_slot(struct extent_buffer *eb,
1855 int search_low_slot,
1856 const struct btrfs_key *key,
1857 int prev_cmp,
1858 int *slot)
1859{
1860 /*
1861 * If a previous call to btrfs_bin_search() on a parent node returned an
1862 * exact match (prev_cmp == 0), we can safely assume the target key will
1863 * always be at slot 0 on lower levels, since each key pointer
1864 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1865 * subtree it points to. Thus we can skip searching lower levels.
1866 */
1867 if (prev_cmp == 0) {
1868 *slot = 0;
1869 return 0;
1870 }
1871
1872 return generic_bin_search(eb, search_low_slot, key, slot);
1873}
1874
1875static int search_leaf(struct btrfs_trans_handle *trans,
1876 struct btrfs_root *root,
1877 const struct btrfs_key *key,
1878 struct btrfs_path *path,
1879 int ins_len,
1880 int prev_cmp)
1881{
1882 struct extent_buffer *leaf = path->nodes[0];
1883 int leaf_free_space = -1;
1884 int search_low_slot = 0;
1885 int ret;
1886 bool do_bin_search = true;
1887
1888 /*
1889 * If we are doing an insertion, the leaf has enough free space and the
1890 * destination slot for the key is not slot 0, then we can unlock our
1891 * write lock on the parent, and any other upper nodes, before doing the
1892 * binary search on the leaf (with search_for_key_slot()), allowing other
1893 * tasks to lock the parent and any other upper nodes.
1894 */
1895 if (ins_len > 0) {
1896 /*
1897 * Cache the leaf free space, since we will need it later and it
1898 * will not change until then.
1899 */
1900 leaf_free_space = btrfs_leaf_free_space(leaf);
1901
1902 /*
1903 * !path->locks[1] means we have a single node tree, the leaf is
1904 * the root of the tree.
1905 */
1906 if (path->locks[1] && leaf_free_space >= ins_len) {
1907 struct btrfs_disk_key first_key;
1908
1909 ASSERT(btrfs_header_nritems(leaf) > 0);
1910 btrfs_item_key(leaf, &first_key, 0);
1911
1912 /*
1913 * Doing the extra comparison with the first key is cheap,
1914 * taking into account that the first key is very likely
1915 * already in a cache line because it immediately follows
1916 * the extent buffer's header and we have recently accessed
1917 * the header's level field.
1918 */
1919 ret = comp_keys(&first_key, key);
1920 if (ret < 0) {
1921 /*
1922 * The first key is smaller than the key we want
1923 * to insert, so we are safe to unlock all upper
1924 * nodes and we have to do the binary search.
1925 *
1926 * We do use btrfs_unlock_up_safe() and not
1927 * unlock_up() because the later does not unlock
1928 * nodes with a slot of 0 - we can safely unlock
1929 * any node even if its slot is 0 since in this
1930 * case the key does not end up at slot 0 of the
1931 * leaf and there's no need to split the leaf.
1932 */
1933 btrfs_unlock_up_safe(path, 1);
1934 search_low_slot = 1;
1935 } else {
1936 /*
1937 * The first key is >= then the key we want to
1938 * insert, so we can skip the binary search as
1939 * the target key will be at slot 0.
1940 *
1941 * We can not unlock upper nodes when the key is
1942 * less than the first key, because we will need
1943 * to update the key at slot 0 of the parent node
1944 * and possibly of other upper nodes too.
1945 * If the key matches the first key, then we can
1946 * unlock all the upper nodes, using
1947 * btrfs_unlock_up_safe() instead of unlock_up()
1948 * as stated above.
1949 */
1950 if (ret == 0)
1951 btrfs_unlock_up_safe(path, 1);
1952 /*
1953 * ret is already 0 or 1, matching the result of
1954 * a btrfs_bin_search() call, so there is no need
1955 * to adjust it.
1956 */
1957 do_bin_search = false;
1958 path->slots[0] = 0;
1959 }
1960 }
1961 }
1962
1963 if (do_bin_search) {
1964 ret = search_for_key_slot(leaf, search_low_slot, key,
1965 prev_cmp, &path->slots[0]);
1966 if (ret < 0)
1967 return ret;
1968 }
1969
1970 if (ins_len > 0) {
1971 /*
1972 * Item key already exists. In this case, if we are allowed to
1973 * insert the item (for example, in dir_item case, item key
1974 * collision is allowed), it will be merged with the original
1975 * item. Only the item size grows, no new btrfs item will be
1976 * added. If search_for_extension is not set, ins_len already
1977 * accounts the size btrfs_item, deduct it here so leaf space
1978 * check will be correct.
1979 */
1980 if (ret == 0 && !path->search_for_extension) {
1981 ASSERT(ins_len >= sizeof(struct btrfs_item));
1982 ins_len -= sizeof(struct btrfs_item);
1983 }
1984
1985 ASSERT(leaf_free_space >= 0);
1986
1987 if (leaf_free_space < ins_len) {
1988 int err;
1989
1990 err = split_leaf(trans, root, key, path, ins_len,
1991 (ret == 0));
1992 ASSERT(err <= 0);
1993 if (WARN_ON(err > 0))
1994 err = -EUCLEAN;
1995 if (err)
1996 ret = err;
1997 }
1998 }
1999
2000 return ret;
2001}
2002
2003/*
2004 * btrfs_search_slot - look for a key in a tree and perform necessary
2005 * modifications to preserve tree invariants.
2006 *
2007 * @trans: Handle of transaction, used when modifying the tree
2008 * @p: Holds all btree nodes along the search path
2009 * @root: The root node of the tree
2010 * @key: The key we are looking for
2011 * @ins_len: Indicates purpose of search:
2012 * >0 for inserts it's size of item inserted (*)
2013 * <0 for deletions
2014 * 0 for plain searches, not modifying the tree
2015 *
2016 * (*) If size of item inserted doesn't include
2017 * sizeof(struct btrfs_item), then p->search_for_extension must
2018 * be set.
2019 * @cow: boolean should CoW operations be performed. Must always be 1
2020 * when modifying the tree.
2021 *
2022 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2023 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2024 *
2025 * If @key is found, 0 is returned and you can find the item in the leaf level
2026 * of the path (level 0)
2027 *
2028 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2029 * points to the slot where it should be inserted
2030 *
2031 * If an error is encountered while searching the tree a negative error number
2032 * is returned
2033 */
2034int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2035 const struct btrfs_key *key, struct btrfs_path *p,
2036 int ins_len, int cow)
2037{
2038 struct btrfs_fs_info *fs_info = root->fs_info;
2039 struct extent_buffer *b;
2040 int slot;
2041 int ret;
2042 int err;
2043 int level;
2044 int lowest_unlock = 1;
2045 /* everything at write_lock_level or lower must be write locked */
2046 int write_lock_level = 0;
2047 u8 lowest_level = 0;
2048 int min_write_lock_level;
2049 int prev_cmp;
2050
2051 might_sleep();
2052
2053 lowest_level = p->lowest_level;
2054 WARN_ON(lowest_level && ins_len > 0);
2055 WARN_ON(p->nodes[0] != NULL);
2056 BUG_ON(!cow && ins_len);
2057
2058 /*
2059 * For now only allow nowait for read only operations. There's no
2060 * strict reason why we can't, we just only need it for reads so it's
2061 * only implemented for reads.
2062 */
2063 ASSERT(!p->nowait || !cow);
2064
2065 if (ins_len < 0) {
2066 lowest_unlock = 2;
2067
2068 /* when we are removing items, we might have to go up to level
2069 * two as we update tree pointers Make sure we keep write
2070 * for those levels as well
2071 */
2072 write_lock_level = 2;
2073 } else if (ins_len > 0) {
2074 /*
2075 * for inserting items, make sure we have a write lock on
2076 * level 1 so we can update keys
2077 */
2078 write_lock_level = 1;
2079 }
2080
2081 if (!cow)
2082 write_lock_level = -1;
2083
2084 if (cow && (p->keep_locks || p->lowest_level))
2085 write_lock_level = BTRFS_MAX_LEVEL;
2086
2087 min_write_lock_level = write_lock_level;
2088
2089 if (p->need_commit_sem) {
2090 ASSERT(p->search_commit_root);
2091 if (p->nowait) {
2092 if (!down_read_trylock(&fs_info->commit_root_sem))
2093 return -EAGAIN;
2094 } else {
2095 down_read(&fs_info->commit_root_sem);
2096 }
2097 }
2098
2099again:
2100 prev_cmp = -1;
2101 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2102 if (IS_ERR(b)) {
2103 ret = PTR_ERR(b);
2104 goto done;
2105 }
2106
2107 while (b) {
2108 int dec = 0;
2109
2110 level = btrfs_header_level(b);
2111
2112 if (cow) {
2113 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2114
2115 /*
2116 * if we don't really need to cow this block
2117 * then we don't want to set the path blocking,
2118 * so we test it here
2119 */
2120 if (!should_cow_block(trans, root, b))
2121 goto cow_done;
2122
2123 /*
2124 * must have write locks on this node and the
2125 * parent
2126 */
2127 if (level > write_lock_level ||
2128 (level + 1 > write_lock_level &&
2129 level + 1 < BTRFS_MAX_LEVEL &&
2130 p->nodes[level + 1])) {
2131 write_lock_level = level + 1;
2132 btrfs_release_path(p);
2133 goto again;
2134 }
2135
2136 if (last_level)
2137 err = btrfs_cow_block(trans, root, b, NULL, 0,
2138 &b,
2139 BTRFS_NESTING_COW);
2140 else
2141 err = btrfs_cow_block(trans, root, b,
2142 p->nodes[level + 1],
2143 p->slots[level + 1], &b,
2144 BTRFS_NESTING_COW);
2145 if (err) {
2146 ret = err;
2147 goto done;
2148 }
2149 }
2150cow_done:
2151 p->nodes[level] = b;
2152
2153 /*
2154 * we have a lock on b and as long as we aren't changing
2155 * the tree, there is no way to for the items in b to change.
2156 * It is safe to drop the lock on our parent before we
2157 * go through the expensive btree search on b.
2158 *
2159 * If we're inserting or deleting (ins_len != 0), then we might
2160 * be changing slot zero, which may require changing the parent.
2161 * So, we can't drop the lock until after we know which slot
2162 * we're operating on.
2163 */
2164 if (!ins_len && !p->keep_locks) {
2165 int u = level + 1;
2166
2167 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2168 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2169 p->locks[u] = 0;
2170 }
2171 }
2172
2173 if (level == 0) {
2174 if (ins_len > 0)
2175 ASSERT(write_lock_level >= 1);
2176
2177 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2178 if (!p->search_for_split)
2179 unlock_up(p, level, lowest_unlock,
2180 min_write_lock_level, NULL);
2181 goto done;
2182 }
2183
2184 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2185 if (ret < 0)
2186 goto done;
2187 prev_cmp = ret;
2188
2189 if (ret && slot > 0) {
2190 dec = 1;
2191 slot--;
2192 }
2193 p->slots[level] = slot;
2194 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2195 &write_lock_level);
2196 if (err == -EAGAIN)
2197 goto again;
2198 if (err) {
2199 ret = err;
2200 goto done;
2201 }
2202 b = p->nodes[level];
2203 slot = p->slots[level];
2204
2205 /*
2206 * Slot 0 is special, if we change the key we have to update
2207 * the parent pointer which means we must have a write lock on
2208 * the parent
2209 */
2210 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2211 write_lock_level = level + 1;
2212 btrfs_release_path(p);
2213 goto again;
2214 }
2215
2216 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2217 &write_lock_level);
2218
2219 if (level == lowest_level) {
2220 if (dec)
2221 p->slots[level]++;
2222 goto done;
2223 }
2224
2225 err = read_block_for_search(root, p, &b, level, slot, key);
2226 if (err == -EAGAIN)
2227 goto again;
2228 if (err) {
2229 ret = err;
2230 goto done;
2231 }
2232
2233 if (!p->skip_locking) {
2234 level = btrfs_header_level(b);
2235
2236 btrfs_maybe_reset_lockdep_class(root, b);
2237
2238 if (level <= write_lock_level) {
2239 btrfs_tree_lock(b);
2240 p->locks[level] = BTRFS_WRITE_LOCK;
2241 } else {
2242 if (p->nowait) {
2243 if (!btrfs_try_tree_read_lock(b)) {
2244 free_extent_buffer(b);
2245 ret = -EAGAIN;
2246 goto done;
2247 }
2248 } else {
2249 btrfs_tree_read_lock(b);
2250 }
2251 p->locks[level] = BTRFS_READ_LOCK;
2252 }
2253 p->nodes[level] = b;
2254 }
2255 }
2256 ret = 1;
2257done:
2258 if (ret < 0 && !p->skip_release_on_error)
2259 btrfs_release_path(p);
2260
2261 if (p->need_commit_sem) {
2262 int ret2;
2263
2264 ret2 = finish_need_commit_sem_search(p);
2265 up_read(&fs_info->commit_root_sem);
2266 if (ret2)
2267 ret = ret2;
2268 }
2269
2270 return ret;
2271}
2272ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2273
2274/*
2275 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2276 * current state of the tree together with the operations recorded in the tree
2277 * modification log to search for the key in a previous version of this tree, as
2278 * denoted by the time_seq parameter.
2279 *
2280 * Naturally, there is no support for insert, delete or cow operations.
2281 *
2282 * The resulting path and return value will be set up as if we called
2283 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2284 */
2285int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2286 struct btrfs_path *p, u64 time_seq)
2287{
2288 struct btrfs_fs_info *fs_info = root->fs_info;
2289 struct extent_buffer *b;
2290 int slot;
2291 int ret;
2292 int err;
2293 int level;
2294 int lowest_unlock = 1;
2295 u8 lowest_level = 0;
2296
2297 lowest_level = p->lowest_level;
2298 WARN_ON(p->nodes[0] != NULL);
2299 ASSERT(!p->nowait);
2300
2301 if (p->search_commit_root) {
2302 BUG_ON(time_seq);
2303 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2304 }
2305
2306again:
2307 b = btrfs_get_old_root(root, time_seq);
2308 if (!b) {
2309 ret = -EIO;
2310 goto done;
2311 }
2312 level = btrfs_header_level(b);
2313 p->locks[level] = BTRFS_READ_LOCK;
2314
2315 while (b) {
2316 int dec = 0;
2317
2318 level = btrfs_header_level(b);
2319 p->nodes[level] = b;
2320
2321 /*
2322 * we have a lock on b and as long as we aren't changing
2323 * the tree, there is no way to for the items in b to change.
2324 * It is safe to drop the lock on our parent before we
2325 * go through the expensive btree search on b.
2326 */
2327 btrfs_unlock_up_safe(p, level + 1);
2328
2329 ret = btrfs_bin_search(b, key, &slot);
2330 if (ret < 0)
2331 goto done;
2332
2333 if (level == 0) {
2334 p->slots[level] = slot;
2335 unlock_up(p, level, lowest_unlock, 0, NULL);
2336 goto done;
2337 }
2338
2339 if (ret && slot > 0) {
2340 dec = 1;
2341 slot--;
2342 }
2343 p->slots[level] = slot;
2344 unlock_up(p, level, lowest_unlock, 0, NULL);
2345
2346 if (level == lowest_level) {
2347 if (dec)
2348 p->slots[level]++;
2349 goto done;
2350 }
2351
2352 err = read_block_for_search(root, p, &b, level, slot, key);
2353 if (err == -EAGAIN)
2354 goto again;
2355 if (err) {
2356 ret = err;
2357 goto done;
2358 }
2359
2360 level = btrfs_header_level(b);
2361 btrfs_tree_read_lock(b);
2362 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2363 if (!b) {
2364 ret = -ENOMEM;
2365 goto done;
2366 }
2367 p->locks[level] = BTRFS_READ_LOCK;
2368 p->nodes[level] = b;
2369 }
2370 ret = 1;
2371done:
2372 if (ret < 0)
2373 btrfs_release_path(p);
2374
2375 return ret;
2376}
2377
2378/*
2379 * helper to use instead of search slot if no exact match is needed but
2380 * instead the next or previous item should be returned.
2381 * When find_higher is true, the next higher item is returned, the next lower
2382 * otherwise.
2383 * When return_any and find_higher are both true, and no higher item is found,
2384 * return the next lower instead.
2385 * When return_any is true and find_higher is false, and no lower item is found,
2386 * return the next higher instead.
2387 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2388 * < 0 on error
2389 */
2390int btrfs_search_slot_for_read(struct btrfs_root *root,
2391 const struct btrfs_key *key,
2392 struct btrfs_path *p, int find_higher,
2393 int return_any)
2394{
2395 int ret;
2396 struct extent_buffer *leaf;
2397
2398again:
2399 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2400 if (ret <= 0)
2401 return ret;
2402 /*
2403 * a return value of 1 means the path is at the position where the
2404 * item should be inserted. Normally this is the next bigger item,
2405 * but in case the previous item is the last in a leaf, path points
2406 * to the first free slot in the previous leaf, i.e. at an invalid
2407 * item.
2408 */
2409 leaf = p->nodes[0];
2410
2411 if (find_higher) {
2412 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2413 ret = btrfs_next_leaf(root, p);
2414 if (ret <= 0)
2415 return ret;
2416 if (!return_any)
2417 return 1;
2418 /*
2419 * no higher item found, return the next
2420 * lower instead
2421 */
2422 return_any = 0;
2423 find_higher = 0;
2424 btrfs_release_path(p);
2425 goto again;
2426 }
2427 } else {
2428 if (p->slots[0] == 0) {
2429 ret = btrfs_prev_leaf(root, p);
2430 if (ret < 0)
2431 return ret;
2432 if (!ret) {
2433 leaf = p->nodes[0];
2434 if (p->slots[0] == btrfs_header_nritems(leaf))
2435 p->slots[0]--;
2436 return 0;
2437 }
2438 if (!return_any)
2439 return 1;
2440 /*
2441 * no lower item found, return the next
2442 * higher instead
2443 */
2444 return_any = 0;
2445 find_higher = 1;
2446 btrfs_release_path(p);
2447 goto again;
2448 } else {
2449 --p->slots[0];
2450 }
2451 }
2452 return 0;
2453}
2454
2455/*
2456 * Execute search and call btrfs_previous_item to traverse backwards if the item
2457 * was not found.
2458 *
2459 * Return 0 if found, 1 if not found and < 0 if error.
2460 */
2461int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2462 struct btrfs_path *path)
2463{
2464 int ret;
2465
2466 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2467 if (ret > 0)
2468 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2469
2470 if (ret == 0)
2471 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2472
2473 return ret;
2474}
2475
2476/*
2477 * Search for a valid slot for the given path.
2478 *
2479 * @root: The root node of the tree.
2480 * @key: Will contain a valid item if found.
2481 * @path: The starting point to validate the slot.
2482 *
2483 * Return: 0 if the item is valid
2484 * 1 if not found
2485 * <0 if error.
2486 */
2487int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2488 struct btrfs_path *path)
2489{
2490 while (1) {
2491 int ret;
2492 const int slot = path->slots[0];
2493 const struct extent_buffer *leaf = path->nodes[0];
2494
2495 /* This is where we start walking the path. */
2496 if (slot >= btrfs_header_nritems(leaf)) {
2497 /*
2498 * If we've reached the last slot in this leaf we need
2499 * to go to the next leaf and reset the path.
2500 */
2501 ret = btrfs_next_leaf(root, path);
2502 if (ret)
2503 return ret;
2504 continue;
2505 }
2506 /* Store the found, valid item in @key. */
2507 btrfs_item_key_to_cpu(leaf, key, slot);
2508 break;
2509 }
2510 return 0;
2511}
2512
2513/*
2514 * adjust the pointers going up the tree, starting at level
2515 * making sure the right key of each node is points to 'key'.
2516 * This is used after shifting pointers to the left, so it stops
2517 * fixing up pointers when a given leaf/node is not in slot 0 of the
2518 * higher levels
2519 *
2520 */
2521static void fixup_low_keys(struct btrfs_path *path,
2522 struct btrfs_disk_key *key, int level)
2523{
2524 int i;
2525 struct extent_buffer *t;
2526 int ret;
2527
2528 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2529 int tslot = path->slots[i];
2530
2531 if (!path->nodes[i])
2532 break;
2533 t = path->nodes[i];
2534 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2535 BTRFS_MOD_LOG_KEY_REPLACE);
2536 BUG_ON(ret < 0);
2537 btrfs_set_node_key(t, key, tslot);
2538 btrfs_mark_buffer_dirty(path->nodes[i]);
2539 if (tslot != 0)
2540 break;
2541 }
2542}
2543
2544/*
2545 * update item key.
2546 *
2547 * This function isn't completely safe. It's the caller's responsibility
2548 * that the new key won't break the order
2549 */
2550void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2551 struct btrfs_path *path,
2552 const struct btrfs_key *new_key)
2553{
2554 struct btrfs_disk_key disk_key;
2555 struct extent_buffer *eb;
2556 int slot;
2557
2558 eb = path->nodes[0];
2559 slot = path->slots[0];
2560 if (slot > 0) {
2561 btrfs_item_key(eb, &disk_key, slot - 1);
2562 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2563 btrfs_crit(fs_info,
2564 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2565 slot, btrfs_disk_key_objectid(&disk_key),
2566 btrfs_disk_key_type(&disk_key),
2567 btrfs_disk_key_offset(&disk_key),
2568 new_key->objectid, new_key->type,
2569 new_key->offset);
2570 btrfs_print_leaf(eb);
2571 BUG();
2572 }
2573 }
2574 if (slot < btrfs_header_nritems(eb) - 1) {
2575 btrfs_item_key(eb, &disk_key, slot + 1);
2576 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2577 btrfs_crit(fs_info,
2578 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2579 slot, btrfs_disk_key_objectid(&disk_key),
2580 btrfs_disk_key_type(&disk_key),
2581 btrfs_disk_key_offset(&disk_key),
2582 new_key->objectid, new_key->type,
2583 new_key->offset);
2584 btrfs_print_leaf(eb);
2585 BUG();
2586 }
2587 }
2588
2589 btrfs_cpu_key_to_disk(&disk_key, new_key);
2590 btrfs_set_item_key(eb, &disk_key, slot);
2591 btrfs_mark_buffer_dirty(eb);
2592 if (slot == 0)
2593 fixup_low_keys(path, &disk_key, 1);
2594}
2595
2596/*
2597 * Check key order of two sibling extent buffers.
2598 *
2599 * Return true if something is wrong.
2600 * Return false if everything is fine.
2601 *
2602 * Tree-checker only works inside one tree block, thus the following
2603 * corruption can not be detected by tree-checker:
2604 *
2605 * Leaf @left | Leaf @right
2606 * --------------------------------------------------------------
2607 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2608 *
2609 * Key f6 in leaf @left itself is valid, but not valid when the next
2610 * key in leaf @right is 7.
2611 * This can only be checked at tree block merge time.
2612 * And since tree checker has ensured all key order in each tree block
2613 * is correct, we only need to bother the last key of @left and the first
2614 * key of @right.
2615 */
2616static bool check_sibling_keys(struct extent_buffer *left,
2617 struct extent_buffer *right)
2618{
2619 struct btrfs_key left_last;
2620 struct btrfs_key right_first;
2621 int level = btrfs_header_level(left);
2622 int nr_left = btrfs_header_nritems(left);
2623 int nr_right = btrfs_header_nritems(right);
2624
2625 /* No key to check in one of the tree blocks */
2626 if (!nr_left || !nr_right)
2627 return false;
2628
2629 if (level) {
2630 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2631 btrfs_node_key_to_cpu(right, &right_first, 0);
2632 } else {
2633 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2634 btrfs_item_key_to_cpu(right, &right_first, 0);
2635 }
2636
2637 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2638 btrfs_crit(left->fs_info,
2639"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2640 left_last.objectid, left_last.type,
2641 left_last.offset, right_first.objectid,
2642 right_first.type, right_first.offset);
2643 return true;
2644 }
2645 return false;
2646}
2647
2648/*
2649 * try to push data from one node into the next node left in the
2650 * tree.
2651 *
2652 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2653 * error, and > 0 if there was no room in the left hand block.
2654 */
2655static int push_node_left(struct btrfs_trans_handle *trans,
2656 struct extent_buffer *dst,
2657 struct extent_buffer *src, int empty)
2658{
2659 struct btrfs_fs_info *fs_info = trans->fs_info;
2660 int push_items = 0;
2661 int src_nritems;
2662 int dst_nritems;
2663 int ret = 0;
2664
2665 src_nritems = btrfs_header_nritems(src);
2666 dst_nritems = btrfs_header_nritems(dst);
2667 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2668 WARN_ON(btrfs_header_generation(src) != trans->transid);
2669 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2670
2671 if (!empty && src_nritems <= 8)
2672 return 1;
2673
2674 if (push_items <= 0)
2675 return 1;
2676
2677 if (empty) {
2678 push_items = min(src_nritems, push_items);
2679 if (push_items < src_nritems) {
2680 /* leave at least 8 pointers in the node if
2681 * we aren't going to empty it
2682 */
2683 if (src_nritems - push_items < 8) {
2684 if (push_items <= 8)
2685 return 1;
2686 push_items -= 8;
2687 }
2688 }
2689 } else
2690 push_items = min(src_nritems - 8, push_items);
2691
2692 /* dst is the left eb, src is the middle eb */
2693 if (check_sibling_keys(dst, src)) {
2694 ret = -EUCLEAN;
2695 btrfs_abort_transaction(trans, ret);
2696 return ret;
2697 }
2698 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2699 if (ret) {
2700 btrfs_abort_transaction(trans, ret);
2701 return ret;
2702 }
2703 copy_extent_buffer(dst, src,
2704 btrfs_node_key_ptr_offset(dst, dst_nritems),
2705 btrfs_node_key_ptr_offset(src, 0),
2706 push_items * sizeof(struct btrfs_key_ptr));
2707
2708 if (push_items < src_nritems) {
2709 /*
2710 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2711 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2712 */
2713 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
2714 btrfs_node_key_ptr_offset(src, push_items),
2715 (src_nritems - push_items) *
2716 sizeof(struct btrfs_key_ptr));
2717 }
2718 btrfs_set_header_nritems(src, src_nritems - push_items);
2719 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2720 btrfs_mark_buffer_dirty(src);
2721 btrfs_mark_buffer_dirty(dst);
2722
2723 return ret;
2724}
2725
2726/*
2727 * try to push data from one node into the next node right in the
2728 * tree.
2729 *
2730 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2731 * error, and > 0 if there was no room in the right hand block.
2732 *
2733 * this will only push up to 1/2 the contents of the left node over
2734 */
2735static int balance_node_right(struct btrfs_trans_handle *trans,
2736 struct extent_buffer *dst,
2737 struct extent_buffer *src)
2738{
2739 struct btrfs_fs_info *fs_info = trans->fs_info;
2740 int push_items = 0;
2741 int max_push;
2742 int src_nritems;
2743 int dst_nritems;
2744 int ret = 0;
2745
2746 WARN_ON(btrfs_header_generation(src) != trans->transid);
2747 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2748
2749 src_nritems = btrfs_header_nritems(src);
2750 dst_nritems = btrfs_header_nritems(dst);
2751 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2752 if (push_items <= 0)
2753 return 1;
2754
2755 if (src_nritems < 4)
2756 return 1;
2757
2758 max_push = src_nritems / 2 + 1;
2759 /* don't try to empty the node */
2760 if (max_push >= src_nritems)
2761 return 1;
2762
2763 if (max_push < push_items)
2764 push_items = max_push;
2765
2766 /* dst is the right eb, src is the middle eb */
2767 if (check_sibling_keys(src, dst)) {
2768 ret = -EUCLEAN;
2769 btrfs_abort_transaction(trans, ret);
2770 return ret;
2771 }
2772 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2773 BUG_ON(ret < 0);
2774 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
2775 btrfs_node_key_ptr_offset(dst, 0),
2776 (dst_nritems) *
2777 sizeof(struct btrfs_key_ptr));
2778
2779 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2780 push_items);
2781 if (ret) {
2782 btrfs_abort_transaction(trans, ret);
2783 return ret;
2784 }
2785 copy_extent_buffer(dst, src,
2786 btrfs_node_key_ptr_offset(dst, 0),
2787 btrfs_node_key_ptr_offset(src, src_nritems - push_items),
2788 push_items * sizeof(struct btrfs_key_ptr));
2789
2790 btrfs_set_header_nritems(src, src_nritems - push_items);
2791 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2792
2793 btrfs_mark_buffer_dirty(src);
2794 btrfs_mark_buffer_dirty(dst);
2795
2796 return ret;
2797}
2798
2799/*
2800 * helper function to insert a new root level in the tree.
2801 * A new node is allocated, and a single item is inserted to
2802 * point to the existing root
2803 *
2804 * returns zero on success or < 0 on failure.
2805 */
2806static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2807 struct btrfs_root *root,
2808 struct btrfs_path *path, int level)
2809{
2810 struct btrfs_fs_info *fs_info = root->fs_info;
2811 u64 lower_gen;
2812 struct extent_buffer *lower;
2813 struct extent_buffer *c;
2814 struct extent_buffer *old;
2815 struct btrfs_disk_key lower_key;
2816 int ret;
2817
2818 BUG_ON(path->nodes[level]);
2819 BUG_ON(path->nodes[level-1] != root->node);
2820
2821 lower = path->nodes[level-1];
2822 if (level == 1)
2823 btrfs_item_key(lower, &lower_key, 0);
2824 else
2825 btrfs_node_key(lower, &lower_key, 0);
2826
2827 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2828 &lower_key, level, root->node->start, 0,
2829 BTRFS_NESTING_NEW_ROOT);
2830 if (IS_ERR(c))
2831 return PTR_ERR(c);
2832
2833 root_add_used(root, fs_info->nodesize);
2834
2835 btrfs_set_header_nritems(c, 1);
2836 btrfs_set_node_key(c, &lower_key, 0);
2837 btrfs_set_node_blockptr(c, 0, lower->start);
2838 lower_gen = btrfs_header_generation(lower);
2839 WARN_ON(lower_gen != trans->transid);
2840
2841 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2842
2843 btrfs_mark_buffer_dirty(c);
2844
2845 old = root->node;
2846 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2847 BUG_ON(ret < 0);
2848 rcu_assign_pointer(root->node, c);
2849
2850 /* the super has an extra ref to root->node */
2851 free_extent_buffer(old);
2852
2853 add_root_to_dirty_list(root);
2854 atomic_inc(&c->refs);
2855 path->nodes[level] = c;
2856 path->locks[level] = BTRFS_WRITE_LOCK;
2857 path->slots[level] = 0;
2858 return 0;
2859}
2860
2861/*
2862 * worker function to insert a single pointer in a node.
2863 * the node should have enough room for the pointer already
2864 *
2865 * slot and level indicate where you want the key to go, and
2866 * blocknr is the block the key points to.
2867 */
2868static void insert_ptr(struct btrfs_trans_handle *trans,
2869 struct btrfs_path *path,
2870 struct btrfs_disk_key *key, u64 bytenr,
2871 int slot, int level)
2872{
2873 struct extent_buffer *lower;
2874 int nritems;
2875 int ret;
2876
2877 BUG_ON(!path->nodes[level]);
2878 btrfs_assert_tree_write_locked(path->nodes[level]);
2879 lower = path->nodes[level];
2880 nritems = btrfs_header_nritems(lower);
2881 BUG_ON(slot > nritems);
2882 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2883 if (slot != nritems) {
2884 if (level) {
2885 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2886 slot, nritems - slot);
2887 BUG_ON(ret < 0);
2888 }
2889 memmove_extent_buffer(lower,
2890 btrfs_node_key_ptr_offset(lower, slot + 1),
2891 btrfs_node_key_ptr_offset(lower, slot),
2892 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2893 }
2894 if (level) {
2895 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2896 BTRFS_MOD_LOG_KEY_ADD);
2897 BUG_ON(ret < 0);
2898 }
2899 btrfs_set_node_key(lower, key, slot);
2900 btrfs_set_node_blockptr(lower, slot, bytenr);
2901 WARN_ON(trans->transid == 0);
2902 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2903 btrfs_set_header_nritems(lower, nritems + 1);
2904 btrfs_mark_buffer_dirty(lower);
2905}
2906
2907/*
2908 * split the node at the specified level in path in two.
2909 * The path is corrected to point to the appropriate node after the split
2910 *
2911 * Before splitting this tries to make some room in the node by pushing
2912 * left and right, if either one works, it returns right away.
2913 *
2914 * returns 0 on success and < 0 on failure
2915 */
2916static noinline int split_node(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root,
2918 struct btrfs_path *path, int level)
2919{
2920 struct btrfs_fs_info *fs_info = root->fs_info;
2921 struct extent_buffer *c;
2922 struct extent_buffer *split;
2923 struct btrfs_disk_key disk_key;
2924 int mid;
2925 int ret;
2926 u32 c_nritems;
2927
2928 c = path->nodes[level];
2929 WARN_ON(btrfs_header_generation(c) != trans->transid);
2930 if (c == root->node) {
2931 /*
2932 * trying to split the root, lets make a new one
2933 *
2934 * tree mod log: We don't log_removal old root in
2935 * insert_new_root, because that root buffer will be kept as a
2936 * normal node. We are going to log removal of half of the
2937 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2938 * holding a tree lock on the buffer, which is why we cannot
2939 * race with other tree_mod_log users.
2940 */
2941 ret = insert_new_root(trans, root, path, level + 1);
2942 if (ret)
2943 return ret;
2944 } else {
2945 ret = push_nodes_for_insert(trans, root, path, level);
2946 c = path->nodes[level];
2947 if (!ret && btrfs_header_nritems(c) <
2948 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2949 return 0;
2950 if (ret < 0)
2951 return ret;
2952 }
2953
2954 c_nritems = btrfs_header_nritems(c);
2955 mid = (c_nritems + 1) / 2;
2956 btrfs_node_key(c, &disk_key, mid);
2957
2958 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2959 &disk_key, level, c->start, 0,
2960 BTRFS_NESTING_SPLIT);
2961 if (IS_ERR(split))
2962 return PTR_ERR(split);
2963
2964 root_add_used(root, fs_info->nodesize);
2965 ASSERT(btrfs_header_level(c) == level);
2966
2967 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2968 if (ret) {
2969 btrfs_abort_transaction(trans, ret);
2970 return ret;
2971 }
2972 copy_extent_buffer(split, c,
2973 btrfs_node_key_ptr_offset(split, 0),
2974 btrfs_node_key_ptr_offset(c, mid),
2975 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2976 btrfs_set_header_nritems(split, c_nritems - mid);
2977 btrfs_set_header_nritems(c, mid);
2978
2979 btrfs_mark_buffer_dirty(c);
2980 btrfs_mark_buffer_dirty(split);
2981
2982 insert_ptr(trans, path, &disk_key, split->start,
2983 path->slots[level + 1] + 1, level + 1);
2984
2985 if (path->slots[level] >= mid) {
2986 path->slots[level] -= mid;
2987 btrfs_tree_unlock(c);
2988 free_extent_buffer(c);
2989 path->nodes[level] = split;
2990 path->slots[level + 1] += 1;
2991 } else {
2992 btrfs_tree_unlock(split);
2993 free_extent_buffer(split);
2994 }
2995 return 0;
2996}
2997
2998/*
2999 * how many bytes are required to store the items in a leaf. start
3000 * and nr indicate which items in the leaf to check. This totals up the
3001 * space used both by the item structs and the item data
3002 */
3003static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3004{
3005 int data_len;
3006 int nritems = btrfs_header_nritems(l);
3007 int end = min(nritems, start + nr) - 1;
3008
3009 if (!nr)
3010 return 0;
3011 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
3012 data_len = data_len - btrfs_item_offset(l, end);
3013 data_len += sizeof(struct btrfs_item) * nr;
3014 WARN_ON(data_len < 0);
3015 return data_len;
3016}
3017
3018/*
3019 * The space between the end of the leaf items and
3020 * the start of the leaf data. IOW, how much room
3021 * the leaf has left for both items and data
3022 */
3023noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3024{
3025 struct btrfs_fs_info *fs_info = leaf->fs_info;
3026 int nritems = btrfs_header_nritems(leaf);
3027 int ret;
3028
3029 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3030 if (ret < 0) {
3031 btrfs_crit(fs_info,
3032 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3033 ret,
3034 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3035 leaf_space_used(leaf, 0, nritems), nritems);
3036 }
3037 return ret;
3038}
3039
3040/*
3041 * min slot controls the lowest index we're willing to push to the
3042 * right. We'll push up to and including min_slot, but no lower
3043 */
3044static noinline int __push_leaf_right(struct btrfs_path *path,
3045 int data_size, int empty,
3046 struct extent_buffer *right,
3047 int free_space, u32 left_nritems,
3048 u32 min_slot)
3049{
3050 struct btrfs_fs_info *fs_info = right->fs_info;
3051 struct extent_buffer *left = path->nodes[0];
3052 struct extent_buffer *upper = path->nodes[1];
3053 struct btrfs_map_token token;
3054 struct btrfs_disk_key disk_key;
3055 int slot;
3056 u32 i;
3057 int push_space = 0;
3058 int push_items = 0;
3059 u32 nr;
3060 u32 right_nritems;
3061 u32 data_end;
3062 u32 this_item_size;
3063
3064 if (empty)
3065 nr = 0;
3066 else
3067 nr = max_t(u32, 1, min_slot);
3068
3069 if (path->slots[0] >= left_nritems)
3070 push_space += data_size;
3071
3072 slot = path->slots[1];
3073 i = left_nritems - 1;
3074 while (i >= nr) {
3075 if (!empty && push_items > 0) {
3076 if (path->slots[0] > i)
3077 break;
3078 if (path->slots[0] == i) {
3079 int space = btrfs_leaf_free_space(left);
3080
3081 if (space + push_space * 2 > free_space)
3082 break;
3083 }
3084 }
3085
3086 if (path->slots[0] == i)
3087 push_space += data_size;
3088
3089 this_item_size = btrfs_item_size(left, i);
3090 if (this_item_size + sizeof(struct btrfs_item) +
3091 push_space > free_space)
3092 break;
3093
3094 push_items++;
3095 push_space += this_item_size + sizeof(struct btrfs_item);
3096 if (i == 0)
3097 break;
3098 i--;
3099 }
3100
3101 if (push_items == 0)
3102 goto out_unlock;
3103
3104 WARN_ON(!empty && push_items == left_nritems);
3105
3106 /* push left to right */
3107 right_nritems = btrfs_header_nritems(right);
3108
3109 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3110 push_space -= leaf_data_end(left);
3111
3112 /* make room in the right data area */
3113 data_end = leaf_data_end(right);
3114 memmove_leaf_data(right, data_end - push_space, data_end,
3115 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3116
3117 /* copy from the left data area */
3118 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3119 leaf_data_end(left), push_space);
3120
3121 memmove_leaf_items(right, push_items, 0, right_nritems);
3122
3123 /* copy the items from left to right */
3124 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
3125
3126 /* update the item pointers */
3127 btrfs_init_map_token(&token, right);
3128 right_nritems += push_items;
3129 btrfs_set_header_nritems(right, right_nritems);
3130 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3131 for (i = 0; i < right_nritems; i++) {
3132 push_space -= btrfs_token_item_size(&token, i);
3133 btrfs_set_token_item_offset(&token, i, push_space);
3134 }
3135
3136 left_nritems -= push_items;
3137 btrfs_set_header_nritems(left, left_nritems);
3138
3139 if (left_nritems)
3140 btrfs_mark_buffer_dirty(left);
3141 else
3142 btrfs_clean_tree_block(left);
3143
3144 btrfs_mark_buffer_dirty(right);
3145
3146 btrfs_item_key(right, &disk_key, 0);
3147 btrfs_set_node_key(upper, &disk_key, slot + 1);
3148 btrfs_mark_buffer_dirty(upper);
3149
3150 /* then fixup the leaf pointer in the path */
3151 if (path->slots[0] >= left_nritems) {
3152 path->slots[0] -= left_nritems;
3153 if (btrfs_header_nritems(path->nodes[0]) == 0)
3154 btrfs_clean_tree_block(path->nodes[0]);
3155 btrfs_tree_unlock(path->nodes[0]);
3156 free_extent_buffer(path->nodes[0]);
3157 path->nodes[0] = right;
3158 path->slots[1] += 1;
3159 } else {
3160 btrfs_tree_unlock(right);
3161 free_extent_buffer(right);
3162 }
3163 return 0;
3164
3165out_unlock:
3166 btrfs_tree_unlock(right);
3167 free_extent_buffer(right);
3168 return 1;
3169}
3170
3171/*
3172 * push some data in the path leaf to the right, trying to free up at
3173 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3174 *
3175 * returns 1 if the push failed because the other node didn't have enough
3176 * room, 0 if everything worked out and < 0 if there were major errors.
3177 *
3178 * this will push starting from min_slot to the end of the leaf. It won't
3179 * push any slot lower than min_slot
3180 */
3181static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3182 *root, struct btrfs_path *path,
3183 int min_data_size, int data_size,
3184 int empty, u32 min_slot)
3185{
3186 struct extent_buffer *left = path->nodes[0];
3187 struct extent_buffer *right;
3188 struct extent_buffer *upper;
3189 int slot;
3190 int free_space;
3191 u32 left_nritems;
3192 int ret;
3193
3194 if (!path->nodes[1])
3195 return 1;
3196
3197 slot = path->slots[1];
3198 upper = path->nodes[1];
3199 if (slot >= btrfs_header_nritems(upper) - 1)
3200 return 1;
3201
3202 btrfs_assert_tree_write_locked(path->nodes[1]);
3203
3204 right = btrfs_read_node_slot(upper, slot + 1);
3205 /*
3206 * slot + 1 is not valid or we fail to read the right node,
3207 * no big deal, just return.
3208 */
3209 if (IS_ERR(right))
3210 return 1;
3211
3212 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
3213
3214 free_space = btrfs_leaf_free_space(right);
3215 if (free_space < data_size)
3216 goto out_unlock;
3217
3218 ret = btrfs_cow_block(trans, root, right, upper,
3219 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3220 if (ret)
3221 goto out_unlock;
3222
3223 left_nritems = btrfs_header_nritems(left);
3224 if (left_nritems == 0)
3225 goto out_unlock;
3226
3227 if (check_sibling_keys(left, right)) {
3228 ret = -EUCLEAN;
3229 btrfs_tree_unlock(right);
3230 free_extent_buffer(right);
3231 return ret;
3232 }
3233 if (path->slots[0] == left_nritems && !empty) {
3234 /* Key greater than all keys in the leaf, right neighbor has
3235 * enough room for it and we're not emptying our leaf to delete
3236 * it, therefore use right neighbor to insert the new item and
3237 * no need to touch/dirty our left leaf. */
3238 btrfs_tree_unlock(left);
3239 free_extent_buffer(left);
3240 path->nodes[0] = right;
3241 path->slots[0] = 0;
3242 path->slots[1]++;
3243 return 0;
3244 }
3245
3246 return __push_leaf_right(path, min_data_size, empty,
3247 right, free_space, left_nritems, min_slot);
3248out_unlock:
3249 btrfs_tree_unlock(right);
3250 free_extent_buffer(right);
3251 return 1;
3252}
3253
3254/*
3255 * push some data in the path leaf to the left, trying to free up at
3256 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3257 *
3258 * max_slot can put a limit on how far into the leaf we'll push items. The
3259 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3260 * items
3261 */
3262static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3263 int empty, struct extent_buffer *left,
3264 int free_space, u32 right_nritems,
3265 u32 max_slot)
3266{
3267 struct btrfs_fs_info *fs_info = left->fs_info;
3268 struct btrfs_disk_key disk_key;
3269 struct extent_buffer *right = path->nodes[0];
3270 int i;
3271 int push_space = 0;
3272 int push_items = 0;
3273 u32 old_left_nritems;
3274 u32 nr;
3275 int ret = 0;
3276 u32 this_item_size;
3277 u32 old_left_item_size;
3278 struct btrfs_map_token token;
3279
3280 if (empty)
3281 nr = min(right_nritems, max_slot);
3282 else
3283 nr = min(right_nritems - 1, max_slot);
3284
3285 for (i = 0; i < nr; i++) {
3286 if (!empty && push_items > 0) {
3287 if (path->slots[0] < i)
3288 break;
3289 if (path->slots[0] == i) {
3290 int space = btrfs_leaf_free_space(right);
3291
3292 if (space + push_space * 2 > free_space)
3293 break;
3294 }
3295 }
3296
3297 if (path->slots[0] == i)
3298 push_space += data_size;
3299
3300 this_item_size = btrfs_item_size(right, i);
3301 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3302 free_space)
3303 break;
3304
3305 push_items++;
3306 push_space += this_item_size + sizeof(struct btrfs_item);
3307 }
3308
3309 if (push_items == 0) {
3310 ret = 1;
3311 goto out;
3312 }
3313 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3314
3315 /* push data from right to left */
3316 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
3317
3318 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3319 btrfs_item_offset(right, push_items - 1);
3320
3321 copy_leaf_data(left, right, leaf_data_end(left) - push_space,
3322 btrfs_item_offset(right, push_items - 1), push_space);
3323 old_left_nritems = btrfs_header_nritems(left);
3324 BUG_ON(old_left_nritems <= 0);
3325
3326 btrfs_init_map_token(&token, left);
3327 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3328 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3329 u32 ioff;
3330
3331 ioff = btrfs_token_item_offset(&token, i);
3332 btrfs_set_token_item_offset(&token, i,
3333 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3334 }
3335 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3336
3337 /* fixup right node */
3338 if (push_items > right_nritems)
3339 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3340 right_nritems);
3341
3342 if (push_items < right_nritems) {
3343 push_space = btrfs_item_offset(right, push_items - 1) -
3344 leaf_data_end(right);
3345 memmove_leaf_data(right,
3346 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3347 leaf_data_end(right), push_space);
3348
3349 memmove_leaf_items(right, 0, push_items,
3350 btrfs_header_nritems(right) - push_items);
3351 }
3352
3353 btrfs_init_map_token(&token, right);
3354 right_nritems -= push_items;
3355 btrfs_set_header_nritems(right, right_nritems);
3356 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3357 for (i = 0; i < right_nritems; i++) {
3358 push_space = push_space - btrfs_token_item_size(&token, i);
3359 btrfs_set_token_item_offset(&token, i, push_space);
3360 }
3361
3362 btrfs_mark_buffer_dirty(left);
3363 if (right_nritems)
3364 btrfs_mark_buffer_dirty(right);
3365 else
3366 btrfs_clean_tree_block(right);
3367
3368 btrfs_item_key(right, &disk_key, 0);
3369 fixup_low_keys(path, &disk_key, 1);
3370
3371 /* then fixup the leaf pointer in the path */
3372 if (path->slots[0] < push_items) {
3373 path->slots[0] += old_left_nritems;
3374 btrfs_tree_unlock(path->nodes[0]);
3375 free_extent_buffer(path->nodes[0]);
3376 path->nodes[0] = left;
3377 path->slots[1] -= 1;
3378 } else {
3379 btrfs_tree_unlock(left);
3380 free_extent_buffer(left);
3381 path->slots[0] -= push_items;
3382 }
3383 BUG_ON(path->slots[0] < 0);
3384 return ret;
3385out:
3386 btrfs_tree_unlock(left);
3387 free_extent_buffer(left);
3388 return ret;
3389}
3390
3391/*
3392 * push some data in the path leaf to the left, trying to free up at
3393 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3394 *
3395 * max_slot can put a limit on how far into the leaf we'll push items. The
3396 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3397 * items
3398 */
3399static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3400 *root, struct btrfs_path *path, int min_data_size,
3401 int data_size, int empty, u32 max_slot)
3402{
3403 struct extent_buffer *right = path->nodes[0];
3404 struct extent_buffer *left;
3405 int slot;
3406 int free_space;
3407 u32 right_nritems;
3408 int ret = 0;
3409
3410 slot = path->slots[1];
3411 if (slot == 0)
3412 return 1;
3413 if (!path->nodes[1])
3414 return 1;
3415
3416 right_nritems = btrfs_header_nritems(right);
3417 if (right_nritems == 0)
3418 return 1;
3419
3420 btrfs_assert_tree_write_locked(path->nodes[1]);
3421
3422 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3423 /*
3424 * slot - 1 is not valid or we fail to read the left node,
3425 * no big deal, just return.
3426 */
3427 if (IS_ERR(left))
3428 return 1;
3429
3430 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3431
3432 free_space = btrfs_leaf_free_space(left);
3433 if (free_space < data_size) {
3434 ret = 1;
3435 goto out;
3436 }
3437
3438 ret = btrfs_cow_block(trans, root, left,
3439 path->nodes[1], slot - 1, &left,
3440 BTRFS_NESTING_LEFT_COW);
3441 if (ret) {
3442 /* we hit -ENOSPC, but it isn't fatal here */
3443 if (ret == -ENOSPC)
3444 ret = 1;
3445 goto out;
3446 }
3447
3448 if (check_sibling_keys(left, right)) {
3449 ret = -EUCLEAN;
3450 goto out;
3451 }
3452 return __push_leaf_left(path, min_data_size,
3453 empty, left, free_space, right_nritems,
3454 max_slot);
3455out:
3456 btrfs_tree_unlock(left);
3457 free_extent_buffer(left);
3458 return ret;
3459}
3460
3461/*
3462 * split the path's leaf in two, making sure there is at least data_size
3463 * available for the resulting leaf level of the path.
3464 */
3465static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3466 struct btrfs_path *path,
3467 struct extent_buffer *l,
3468 struct extent_buffer *right,
3469 int slot, int mid, int nritems)
3470{
3471 struct btrfs_fs_info *fs_info = trans->fs_info;
3472 int data_copy_size;
3473 int rt_data_off;
3474 int i;
3475 struct btrfs_disk_key disk_key;
3476 struct btrfs_map_token token;
3477
3478 nritems = nritems - mid;
3479 btrfs_set_header_nritems(right, nritems);
3480 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3481
3482 copy_leaf_items(right, l, 0, mid, nritems);
3483
3484 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
3485 leaf_data_end(l), data_copy_size);
3486
3487 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3488
3489 btrfs_init_map_token(&token, right);
3490 for (i = 0; i < nritems; i++) {
3491 u32 ioff;
3492
3493 ioff = btrfs_token_item_offset(&token, i);
3494 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3495 }
3496
3497 btrfs_set_header_nritems(l, mid);
3498 btrfs_item_key(right, &disk_key, 0);
3499 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3500
3501 btrfs_mark_buffer_dirty(right);
3502 btrfs_mark_buffer_dirty(l);
3503 BUG_ON(path->slots[0] != slot);
3504
3505 if (mid <= slot) {
3506 btrfs_tree_unlock(path->nodes[0]);
3507 free_extent_buffer(path->nodes[0]);
3508 path->nodes[0] = right;
3509 path->slots[0] -= mid;
3510 path->slots[1] += 1;
3511 } else {
3512 btrfs_tree_unlock(right);
3513 free_extent_buffer(right);
3514 }
3515
3516 BUG_ON(path->slots[0] < 0);
3517}
3518
3519/*
3520 * double splits happen when we need to insert a big item in the middle
3521 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3522 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3523 * A B C
3524 *
3525 * We avoid this by trying to push the items on either side of our target
3526 * into the adjacent leaves. If all goes well we can avoid the double split
3527 * completely.
3528 */
3529static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3530 struct btrfs_root *root,
3531 struct btrfs_path *path,
3532 int data_size)
3533{
3534 int ret;
3535 int progress = 0;
3536 int slot;
3537 u32 nritems;
3538 int space_needed = data_size;
3539
3540 slot = path->slots[0];
3541 if (slot < btrfs_header_nritems(path->nodes[0]))
3542 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3543
3544 /*
3545 * try to push all the items after our slot into the
3546 * right leaf
3547 */
3548 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3549 if (ret < 0)
3550 return ret;
3551
3552 if (ret == 0)
3553 progress++;
3554
3555 nritems = btrfs_header_nritems(path->nodes[0]);
3556 /*
3557 * our goal is to get our slot at the start or end of a leaf. If
3558 * we've done so we're done
3559 */
3560 if (path->slots[0] == 0 || path->slots[0] == nritems)
3561 return 0;
3562
3563 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3564 return 0;
3565
3566 /* try to push all the items before our slot into the next leaf */
3567 slot = path->slots[0];
3568 space_needed = data_size;
3569 if (slot > 0)
3570 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3571 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3572 if (ret < 0)
3573 return ret;
3574
3575 if (ret == 0)
3576 progress++;
3577
3578 if (progress)
3579 return 0;
3580 return 1;
3581}
3582
3583/*
3584 * split the path's leaf in two, making sure there is at least data_size
3585 * available for the resulting leaf level of the path.
3586 *
3587 * returns 0 if all went well and < 0 on failure.
3588 */
3589static noinline int split_leaf(struct btrfs_trans_handle *trans,
3590 struct btrfs_root *root,
3591 const struct btrfs_key *ins_key,
3592 struct btrfs_path *path, int data_size,
3593 int extend)
3594{
3595 struct btrfs_disk_key disk_key;
3596 struct extent_buffer *l;
3597 u32 nritems;
3598 int mid;
3599 int slot;
3600 struct extent_buffer *right;
3601 struct btrfs_fs_info *fs_info = root->fs_info;
3602 int ret = 0;
3603 int wret;
3604 int split;
3605 int num_doubles = 0;
3606 int tried_avoid_double = 0;
3607
3608 l = path->nodes[0];
3609 slot = path->slots[0];
3610 if (extend && data_size + btrfs_item_size(l, slot) +
3611 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3612 return -EOVERFLOW;
3613
3614 /* first try to make some room by pushing left and right */
3615 if (data_size && path->nodes[1]) {
3616 int space_needed = data_size;
3617
3618 if (slot < btrfs_header_nritems(l))
3619 space_needed -= btrfs_leaf_free_space(l);
3620
3621 wret = push_leaf_right(trans, root, path, space_needed,
3622 space_needed, 0, 0);
3623 if (wret < 0)
3624 return wret;
3625 if (wret) {
3626 space_needed = data_size;
3627 if (slot > 0)
3628 space_needed -= btrfs_leaf_free_space(l);
3629 wret = push_leaf_left(trans, root, path, space_needed,
3630 space_needed, 0, (u32)-1);
3631 if (wret < 0)
3632 return wret;
3633 }
3634 l = path->nodes[0];
3635
3636 /* did the pushes work? */
3637 if (btrfs_leaf_free_space(l) >= data_size)
3638 return 0;
3639 }
3640
3641 if (!path->nodes[1]) {
3642 ret = insert_new_root(trans, root, path, 1);
3643 if (ret)
3644 return ret;
3645 }
3646again:
3647 split = 1;
3648 l = path->nodes[0];
3649 slot = path->slots[0];
3650 nritems = btrfs_header_nritems(l);
3651 mid = (nritems + 1) / 2;
3652
3653 if (mid <= slot) {
3654 if (nritems == 1 ||
3655 leaf_space_used(l, mid, nritems - mid) + data_size >
3656 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3657 if (slot >= nritems) {
3658 split = 0;
3659 } else {
3660 mid = slot;
3661 if (mid != nritems &&
3662 leaf_space_used(l, mid, nritems - mid) +
3663 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3664 if (data_size && !tried_avoid_double)
3665 goto push_for_double;
3666 split = 2;
3667 }
3668 }
3669 }
3670 } else {
3671 if (leaf_space_used(l, 0, mid) + data_size >
3672 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3673 if (!extend && data_size && slot == 0) {
3674 split = 0;
3675 } else if ((extend || !data_size) && slot == 0) {
3676 mid = 1;
3677 } else {
3678 mid = slot;
3679 if (mid != nritems &&
3680 leaf_space_used(l, mid, nritems - mid) +
3681 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3682 if (data_size && !tried_avoid_double)
3683 goto push_for_double;
3684 split = 2;
3685 }
3686 }
3687 }
3688 }
3689
3690 if (split == 0)
3691 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3692 else
3693 btrfs_item_key(l, &disk_key, mid);
3694
3695 /*
3696 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3697 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3698 * subclasses, which is 8 at the time of this patch, and we've maxed it
3699 * out. In the future we could add a
3700 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3701 * use BTRFS_NESTING_NEW_ROOT.
3702 */
3703 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3704 &disk_key, 0, l->start, 0,
3705 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3706 BTRFS_NESTING_SPLIT);
3707 if (IS_ERR(right))
3708 return PTR_ERR(right);
3709
3710 root_add_used(root, fs_info->nodesize);
3711
3712 if (split == 0) {
3713 if (mid <= slot) {
3714 btrfs_set_header_nritems(right, 0);
3715 insert_ptr(trans, path, &disk_key,
3716 right->start, path->slots[1] + 1, 1);
3717 btrfs_tree_unlock(path->nodes[0]);
3718 free_extent_buffer(path->nodes[0]);
3719 path->nodes[0] = right;
3720 path->slots[0] = 0;
3721 path->slots[1] += 1;
3722 } else {
3723 btrfs_set_header_nritems(right, 0);
3724 insert_ptr(trans, path, &disk_key,
3725 right->start, path->slots[1], 1);
3726 btrfs_tree_unlock(path->nodes[0]);
3727 free_extent_buffer(path->nodes[0]);
3728 path->nodes[0] = right;
3729 path->slots[0] = 0;
3730 if (path->slots[1] == 0)
3731 fixup_low_keys(path, &disk_key, 1);
3732 }
3733 /*
3734 * We create a new leaf 'right' for the required ins_len and
3735 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3736 * the content of ins_len to 'right'.
3737 */
3738 return ret;
3739 }
3740
3741 copy_for_split(trans, path, l, right, slot, mid, nritems);
3742
3743 if (split == 2) {
3744 BUG_ON(num_doubles != 0);
3745 num_doubles++;
3746 goto again;
3747 }
3748
3749 return 0;
3750
3751push_for_double:
3752 push_for_double_split(trans, root, path, data_size);
3753 tried_avoid_double = 1;
3754 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3755 return 0;
3756 goto again;
3757}
3758
3759static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3760 struct btrfs_root *root,
3761 struct btrfs_path *path, int ins_len)
3762{
3763 struct btrfs_key key;
3764 struct extent_buffer *leaf;
3765 struct btrfs_file_extent_item *fi;
3766 u64 extent_len = 0;
3767 u32 item_size;
3768 int ret;
3769
3770 leaf = path->nodes[0];
3771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3772
3773 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3774 key.type != BTRFS_EXTENT_CSUM_KEY);
3775
3776 if (btrfs_leaf_free_space(leaf) >= ins_len)
3777 return 0;
3778
3779 item_size = btrfs_item_size(leaf, path->slots[0]);
3780 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3781 fi = btrfs_item_ptr(leaf, path->slots[0],
3782 struct btrfs_file_extent_item);
3783 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3784 }
3785 btrfs_release_path(path);
3786
3787 path->keep_locks = 1;
3788 path->search_for_split = 1;
3789 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3790 path->search_for_split = 0;
3791 if (ret > 0)
3792 ret = -EAGAIN;
3793 if (ret < 0)
3794 goto err;
3795
3796 ret = -EAGAIN;
3797 leaf = path->nodes[0];
3798 /* if our item isn't there, return now */
3799 if (item_size != btrfs_item_size(leaf, path->slots[0]))
3800 goto err;
3801
3802 /* the leaf has changed, it now has room. return now */
3803 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3804 goto err;
3805
3806 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3807 fi = btrfs_item_ptr(leaf, path->slots[0],
3808 struct btrfs_file_extent_item);
3809 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3810 goto err;
3811 }
3812
3813 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3814 if (ret)
3815 goto err;
3816
3817 path->keep_locks = 0;
3818 btrfs_unlock_up_safe(path, 1);
3819 return 0;
3820err:
3821 path->keep_locks = 0;
3822 return ret;
3823}
3824
3825static noinline int split_item(struct btrfs_path *path,
3826 const struct btrfs_key *new_key,
3827 unsigned long split_offset)
3828{
3829 struct extent_buffer *leaf;
3830 int orig_slot, slot;
3831 char *buf;
3832 u32 nritems;
3833 u32 item_size;
3834 u32 orig_offset;
3835 struct btrfs_disk_key disk_key;
3836
3837 leaf = path->nodes[0];
3838 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3839
3840 orig_slot = path->slots[0];
3841 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
3842 item_size = btrfs_item_size(leaf, path->slots[0]);
3843
3844 buf = kmalloc(item_size, GFP_NOFS);
3845 if (!buf)
3846 return -ENOMEM;
3847
3848 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3849 path->slots[0]), item_size);
3850
3851 slot = path->slots[0] + 1;
3852 nritems = btrfs_header_nritems(leaf);
3853 if (slot != nritems) {
3854 /* shift the items */
3855 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
3856 }
3857
3858 btrfs_cpu_key_to_disk(&disk_key, new_key);
3859 btrfs_set_item_key(leaf, &disk_key, slot);
3860
3861 btrfs_set_item_offset(leaf, slot, orig_offset);
3862 btrfs_set_item_size(leaf, slot, item_size - split_offset);
3863
3864 btrfs_set_item_offset(leaf, orig_slot,
3865 orig_offset + item_size - split_offset);
3866 btrfs_set_item_size(leaf, orig_slot, split_offset);
3867
3868 btrfs_set_header_nritems(leaf, nritems + 1);
3869
3870 /* write the data for the start of the original item */
3871 write_extent_buffer(leaf, buf,
3872 btrfs_item_ptr_offset(leaf, path->slots[0]),
3873 split_offset);
3874
3875 /* write the data for the new item */
3876 write_extent_buffer(leaf, buf + split_offset,
3877 btrfs_item_ptr_offset(leaf, slot),
3878 item_size - split_offset);
3879 btrfs_mark_buffer_dirty(leaf);
3880
3881 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3882 kfree(buf);
3883 return 0;
3884}
3885
3886/*
3887 * This function splits a single item into two items,
3888 * giving 'new_key' to the new item and splitting the
3889 * old one at split_offset (from the start of the item).
3890 *
3891 * The path may be released by this operation. After
3892 * the split, the path is pointing to the old item. The
3893 * new item is going to be in the same node as the old one.
3894 *
3895 * Note, the item being split must be smaller enough to live alone on
3896 * a tree block with room for one extra struct btrfs_item
3897 *
3898 * This allows us to split the item in place, keeping a lock on the
3899 * leaf the entire time.
3900 */
3901int btrfs_split_item(struct btrfs_trans_handle *trans,
3902 struct btrfs_root *root,
3903 struct btrfs_path *path,
3904 const struct btrfs_key *new_key,
3905 unsigned long split_offset)
3906{
3907 int ret;
3908 ret = setup_leaf_for_split(trans, root, path,
3909 sizeof(struct btrfs_item));
3910 if (ret)
3911 return ret;
3912
3913 ret = split_item(path, new_key, split_offset);
3914 return ret;
3915}
3916
3917/*
3918 * make the item pointed to by the path smaller. new_size indicates
3919 * how small to make it, and from_end tells us if we just chop bytes
3920 * off the end of the item or if we shift the item to chop bytes off
3921 * the front.
3922 */
3923void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3924{
3925 int slot;
3926 struct extent_buffer *leaf;
3927 u32 nritems;
3928 unsigned int data_end;
3929 unsigned int old_data_start;
3930 unsigned int old_size;
3931 unsigned int size_diff;
3932 int i;
3933 struct btrfs_map_token token;
3934
3935 leaf = path->nodes[0];
3936 slot = path->slots[0];
3937
3938 old_size = btrfs_item_size(leaf, slot);
3939 if (old_size == new_size)
3940 return;
3941
3942 nritems = btrfs_header_nritems(leaf);
3943 data_end = leaf_data_end(leaf);
3944
3945 old_data_start = btrfs_item_offset(leaf, slot);
3946
3947 size_diff = old_size - new_size;
3948
3949 BUG_ON(slot < 0);
3950 BUG_ON(slot >= nritems);
3951
3952 /*
3953 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3954 */
3955 /* first correct the data pointers */
3956 btrfs_init_map_token(&token, leaf);
3957 for (i = slot; i < nritems; i++) {
3958 u32 ioff;
3959
3960 ioff = btrfs_token_item_offset(&token, i);
3961 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
3962 }
3963
3964 /* shift the data */
3965 if (from_end) {
3966 memmove_leaf_data(leaf, data_end + size_diff, data_end,
3967 old_data_start + new_size - data_end);
3968 } else {
3969 struct btrfs_disk_key disk_key;
3970 u64 offset;
3971
3972 btrfs_item_key(leaf, &disk_key, slot);
3973
3974 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3975 unsigned long ptr;
3976 struct btrfs_file_extent_item *fi;
3977
3978 fi = btrfs_item_ptr(leaf, slot,
3979 struct btrfs_file_extent_item);
3980 fi = (struct btrfs_file_extent_item *)(
3981 (unsigned long)fi - size_diff);
3982
3983 if (btrfs_file_extent_type(leaf, fi) ==
3984 BTRFS_FILE_EXTENT_INLINE) {
3985 ptr = btrfs_item_ptr_offset(leaf, slot);
3986 memmove_extent_buffer(leaf, ptr,
3987 (unsigned long)fi,
3988 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3989 }
3990 }
3991
3992 memmove_leaf_data(leaf, data_end + size_diff, data_end,
3993 old_data_start - data_end);
3994
3995 offset = btrfs_disk_key_offset(&disk_key);
3996 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3997 btrfs_set_item_key(leaf, &disk_key, slot);
3998 if (slot == 0)
3999 fixup_low_keys(path, &disk_key, 1);
4000 }
4001
4002 btrfs_set_item_size(leaf, slot, new_size);
4003 btrfs_mark_buffer_dirty(leaf);
4004
4005 if (btrfs_leaf_free_space(leaf) < 0) {
4006 btrfs_print_leaf(leaf);
4007 BUG();
4008 }
4009}
4010
4011/*
4012 * make the item pointed to by the path bigger, data_size is the added size.
4013 */
4014void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4015{
4016 int slot;
4017 struct extent_buffer *leaf;
4018 u32 nritems;
4019 unsigned int data_end;
4020 unsigned int old_data;
4021 unsigned int old_size;
4022 int i;
4023 struct btrfs_map_token token;
4024
4025 leaf = path->nodes[0];
4026
4027 nritems = btrfs_header_nritems(leaf);
4028 data_end = leaf_data_end(leaf);
4029
4030 if (btrfs_leaf_free_space(leaf) < data_size) {
4031 btrfs_print_leaf(leaf);
4032 BUG();
4033 }
4034 slot = path->slots[0];
4035 old_data = btrfs_item_data_end(leaf, slot);
4036
4037 BUG_ON(slot < 0);
4038 if (slot >= nritems) {
4039 btrfs_print_leaf(leaf);
4040 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4041 slot, nritems);
4042 BUG();
4043 }
4044
4045 /*
4046 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4047 */
4048 /* first correct the data pointers */
4049 btrfs_init_map_token(&token, leaf);
4050 for (i = slot; i < nritems; i++) {
4051 u32 ioff;
4052
4053 ioff = btrfs_token_item_offset(&token, i);
4054 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4055 }
4056
4057 /* shift the data */
4058 memmove_leaf_data(leaf, data_end - data_size, data_end,
4059 old_data - data_end);
4060
4061 data_end = old_data;
4062 old_size = btrfs_item_size(leaf, slot);
4063 btrfs_set_item_size(leaf, slot, old_size + data_size);
4064 btrfs_mark_buffer_dirty(leaf);
4065
4066 if (btrfs_leaf_free_space(leaf) < 0) {
4067 btrfs_print_leaf(leaf);
4068 BUG();
4069 }
4070}
4071
4072/*
4073 * Make space in the node before inserting one or more items.
4074 *
4075 * @root: root we are inserting items to
4076 * @path: points to the leaf/slot where we are going to insert new items
4077 * @batch: information about the batch of items to insert
4078 *
4079 * Main purpose is to save stack depth by doing the bulk of the work in a
4080 * function that doesn't call btrfs_search_slot
4081 */
4082static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4083 const struct btrfs_item_batch *batch)
4084{
4085 struct btrfs_fs_info *fs_info = root->fs_info;
4086 int i;
4087 u32 nritems;
4088 unsigned int data_end;
4089 struct btrfs_disk_key disk_key;
4090 struct extent_buffer *leaf;
4091 int slot;
4092 struct btrfs_map_token token;
4093 u32 total_size;
4094
4095 /*
4096 * Before anything else, update keys in the parent and other ancestors
4097 * if needed, then release the write locks on them, so that other tasks
4098 * can use them while we modify the leaf.
4099 */
4100 if (path->slots[0] == 0) {
4101 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4102 fixup_low_keys(path, &disk_key, 1);
4103 }
4104 btrfs_unlock_up_safe(path, 1);
4105
4106 leaf = path->nodes[0];
4107 slot = path->slots[0];
4108
4109 nritems = btrfs_header_nritems(leaf);
4110 data_end = leaf_data_end(leaf);
4111 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4112
4113 if (btrfs_leaf_free_space(leaf) < total_size) {
4114 btrfs_print_leaf(leaf);
4115 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4116 total_size, btrfs_leaf_free_space(leaf));
4117 BUG();
4118 }
4119
4120 btrfs_init_map_token(&token, leaf);
4121 if (slot != nritems) {
4122 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4123
4124 if (old_data < data_end) {
4125 btrfs_print_leaf(leaf);
4126 btrfs_crit(fs_info,
4127 "item at slot %d with data offset %u beyond data end of leaf %u",
4128 slot, old_data, data_end);
4129 BUG();
4130 }
4131 /*
4132 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4133 */
4134 /* first correct the data pointers */
4135 for (i = slot; i < nritems; i++) {
4136 u32 ioff;
4137
4138 ioff = btrfs_token_item_offset(&token, i);
4139 btrfs_set_token_item_offset(&token, i,
4140 ioff - batch->total_data_size);
4141 }
4142 /* shift the items */
4143 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
4144
4145 /* shift the data */
4146 memmove_leaf_data(leaf, data_end - batch->total_data_size,
4147 data_end, old_data - data_end);
4148 data_end = old_data;
4149 }
4150
4151 /* setup the item for the new data */
4152 for (i = 0; i < batch->nr; i++) {
4153 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4154 btrfs_set_item_key(leaf, &disk_key, slot + i);
4155 data_end -= batch->data_sizes[i];
4156 btrfs_set_token_item_offset(&token, slot + i, data_end);
4157 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4158 }
4159
4160 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4161 btrfs_mark_buffer_dirty(leaf);
4162
4163 if (btrfs_leaf_free_space(leaf) < 0) {
4164 btrfs_print_leaf(leaf);
4165 BUG();
4166 }
4167}
4168
4169/*
4170 * Insert a new item into a leaf.
4171 *
4172 * @root: The root of the btree.
4173 * @path: A path pointing to the target leaf and slot.
4174 * @key: The key of the new item.
4175 * @data_size: The size of the data associated with the new key.
4176 */
4177void btrfs_setup_item_for_insert(struct btrfs_root *root,
4178 struct btrfs_path *path,
4179 const struct btrfs_key *key,
4180 u32 data_size)
4181{
4182 struct btrfs_item_batch batch;
4183
4184 batch.keys = key;
4185 batch.data_sizes = &data_size;
4186 batch.total_data_size = data_size;
4187 batch.nr = 1;
4188
4189 setup_items_for_insert(root, path, &batch);
4190}
4191
4192/*
4193 * Given a key and some data, insert items into the tree.
4194 * This does all the path init required, making room in the tree if needed.
4195 */
4196int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4197 struct btrfs_root *root,
4198 struct btrfs_path *path,
4199 const struct btrfs_item_batch *batch)
4200{
4201 int ret = 0;
4202 int slot;
4203 u32 total_size;
4204
4205 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4206 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4207 if (ret == 0)
4208 return -EEXIST;
4209 if (ret < 0)
4210 return ret;
4211
4212 slot = path->slots[0];
4213 BUG_ON(slot < 0);
4214
4215 setup_items_for_insert(root, path, batch);
4216 return 0;
4217}
4218
4219/*
4220 * Given a key and some data, insert an item into the tree.
4221 * This does all the path init required, making room in the tree if needed.
4222 */
4223int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4224 const struct btrfs_key *cpu_key, void *data,
4225 u32 data_size)
4226{
4227 int ret = 0;
4228 struct btrfs_path *path;
4229 struct extent_buffer *leaf;
4230 unsigned long ptr;
4231
4232 path = btrfs_alloc_path();
4233 if (!path)
4234 return -ENOMEM;
4235 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4236 if (!ret) {
4237 leaf = path->nodes[0];
4238 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4239 write_extent_buffer(leaf, data, ptr, data_size);
4240 btrfs_mark_buffer_dirty(leaf);
4241 }
4242 btrfs_free_path(path);
4243 return ret;
4244}
4245
4246/*
4247 * This function duplicates an item, giving 'new_key' to the new item.
4248 * It guarantees both items live in the same tree leaf and the new item is
4249 * contiguous with the original item.
4250 *
4251 * This allows us to split a file extent in place, keeping a lock on the leaf
4252 * the entire time.
4253 */
4254int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4255 struct btrfs_root *root,
4256 struct btrfs_path *path,
4257 const struct btrfs_key *new_key)
4258{
4259 struct extent_buffer *leaf;
4260 int ret;
4261 u32 item_size;
4262
4263 leaf = path->nodes[0];
4264 item_size = btrfs_item_size(leaf, path->slots[0]);
4265 ret = setup_leaf_for_split(trans, root, path,
4266 item_size + sizeof(struct btrfs_item));
4267 if (ret)
4268 return ret;
4269
4270 path->slots[0]++;
4271 btrfs_setup_item_for_insert(root, path, new_key, item_size);
4272 leaf = path->nodes[0];
4273 memcpy_extent_buffer(leaf,
4274 btrfs_item_ptr_offset(leaf, path->slots[0]),
4275 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4276 item_size);
4277 return 0;
4278}
4279
4280/*
4281 * delete the pointer from a given node.
4282 *
4283 * the tree should have been previously balanced so the deletion does not
4284 * empty a node.
4285 */
4286static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4287 int level, int slot)
4288{
4289 struct extent_buffer *parent = path->nodes[level];
4290 u32 nritems;
4291 int ret;
4292
4293 nritems = btrfs_header_nritems(parent);
4294 if (slot != nritems - 1) {
4295 if (level) {
4296 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4297 slot + 1, nritems - slot - 1);
4298 BUG_ON(ret < 0);
4299 }
4300 memmove_extent_buffer(parent,
4301 btrfs_node_key_ptr_offset(parent, slot),
4302 btrfs_node_key_ptr_offset(parent, slot + 1),
4303 sizeof(struct btrfs_key_ptr) *
4304 (nritems - slot - 1));
4305 } else if (level) {
4306 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4307 BTRFS_MOD_LOG_KEY_REMOVE);
4308 BUG_ON(ret < 0);
4309 }
4310
4311 nritems--;
4312 btrfs_set_header_nritems(parent, nritems);
4313 if (nritems == 0 && parent == root->node) {
4314 BUG_ON(btrfs_header_level(root->node) != 1);
4315 /* just turn the root into a leaf and break */
4316 btrfs_set_header_level(root->node, 0);
4317 } else if (slot == 0) {
4318 struct btrfs_disk_key disk_key;
4319
4320 btrfs_node_key(parent, &disk_key, 0);
4321 fixup_low_keys(path, &disk_key, level + 1);
4322 }
4323 btrfs_mark_buffer_dirty(parent);
4324}
4325
4326/*
4327 * a helper function to delete the leaf pointed to by path->slots[1] and
4328 * path->nodes[1].
4329 *
4330 * This deletes the pointer in path->nodes[1] and frees the leaf
4331 * block extent. zero is returned if it all worked out, < 0 otherwise.
4332 *
4333 * The path must have already been setup for deleting the leaf, including
4334 * all the proper balancing. path->nodes[1] must be locked.
4335 */
4336static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4337 struct btrfs_root *root,
4338 struct btrfs_path *path,
4339 struct extent_buffer *leaf)
4340{
4341 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4342 del_ptr(root, path, 1, path->slots[1]);
4343
4344 /*
4345 * btrfs_free_extent is expensive, we want to make sure we
4346 * aren't holding any locks when we call it
4347 */
4348 btrfs_unlock_up_safe(path, 0);
4349
4350 root_sub_used(root, leaf->len);
4351
4352 atomic_inc(&leaf->refs);
4353 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4354 free_extent_buffer_stale(leaf);
4355}
4356/*
4357 * delete the item at the leaf level in path. If that empties
4358 * the leaf, remove it from the tree
4359 */
4360int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4361 struct btrfs_path *path, int slot, int nr)
4362{
4363 struct btrfs_fs_info *fs_info = root->fs_info;
4364 struct extent_buffer *leaf;
4365 int ret = 0;
4366 int wret;
4367 u32 nritems;
4368
4369 leaf = path->nodes[0];
4370 nritems = btrfs_header_nritems(leaf);
4371
4372 if (slot + nr != nritems) {
4373 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4374 const int data_end = leaf_data_end(leaf);
4375 struct btrfs_map_token token;
4376 u32 dsize = 0;
4377 int i;
4378
4379 for (i = 0; i < nr; i++)
4380 dsize += btrfs_item_size(leaf, slot + i);
4381
4382 memmove_leaf_data(leaf, data_end + dsize, data_end,
4383 last_off - data_end);
4384
4385 btrfs_init_map_token(&token, leaf);
4386 for (i = slot + nr; i < nritems; i++) {
4387 u32 ioff;
4388
4389 ioff = btrfs_token_item_offset(&token, i);
4390 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4391 }
4392
4393 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
4394 }
4395 btrfs_set_header_nritems(leaf, nritems - nr);
4396 nritems -= nr;
4397
4398 /* delete the leaf if we've emptied it */
4399 if (nritems == 0) {
4400 if (leaf == root->node) {
4401 btrfs_set_header_level(leaf, 0);
4402 } else {
4403 btrfs_clean_tree_block(leaf);
4404 btrfs_del_leaf(trans, root, path, leaf);
4405 }
4406 } else {
4407 int used = leaf_space_used(leaf, 0, nritems);
4408 if (slot == 0) {
4409 struct btrfs_disk_key disk_key;
4410
4411 btrfs_item_key(leaf, &disk_key, 0);
4412 fixup_low_keys(path, &disk_key, 1);
4413 }
4414
4415 /*
4416 * Try to delete the leaf if it is mostly empty. We do this by
4417 * trying to move all its items into its left and right neighbours.
4418 * If we can't move all the items, then we don't delete it - it's
4419 * not ideal, but future insertions might fill the leaf with more
4420 * items, or items from other leaves might be moved later into our
4421 * leaf due to deletions on those leaves.
4422 */
4423 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4424 u32 min_push_space;
4425
4426 /* push_leaf_left fixes the path.
4427 * make sure the path still points to our leaf
4428 * for possible call to del_ptr below
4429 */
4430 slot = path->slots[1];
4431 atomic_inc(&leaf->refs);
4432 /*
4433 * We want to be able to at least push one item to the
4434 * left neighbour leaf, and that's the first item.
4435 */
4436 min_push_space = sizeof(struct btrfs_item) +
4437 btrfs_item_size(leaf, 0);
4438 wret = push_leaf_left(trans, root, path, 0,
4439 min_push_space, 1, (u32)-1);
4440 if (wret < 0 && wret != -ENOSPC)
4441 ret = wret;
4442
4443 if (path->nodes[0] == leaf &&
4444 btrfs_header_nritems(leaf)) {
4445 /*
4446 * If we were not able to push all items from our
4447 * leaf to its left neighbour, then attempt to
4448 * either push all the remaining items to the
4449 * right neighbour or none. There's no advantage
4450 * in pushing only some items, instead of all, as
4451 * it's pointless to end up with a leaf having
4452 * too few items while the neighbours can be full
4453 * or nearly full.
4454 */
4455 nritems = btrfs_header_nritems(leaf);
4456 min_push_space = leaf_space_used(leaf, 0, nritems);
4457 wret = push_leaf_right(trans, root, path, 0,
4458 min_push_space, 1, 0);
4459 if (wret < 0 && wret != -ENOSPC)
4460 ret = wret;
4461 }
4462
4463 if (btrfs_header_nritems(leaf) == 0) {
4464 path->slots[1] = slot;
4465 btrfs_del_leaf(trans, root, path, leaf);
4466 free_extent_buffer(leaf);
4467 ret = 0;
4468 } else {
4469 /* if we're still in the path, make sure
4470 * we're dirty. Otherwise, one of the
4471 * push_leaf functions must have already
4472 * dirtied this buffer
4473 */
4474 if (path->nodes[0] == leaf)
4475 btrfs_mark_buffer_dirty(leaf);
4476 free_extent_buffer(leaf);
4477 }
4478 } else {
4479 btrfs_mark_buffer_dirty(leaf);
4480 }
4481 }
4482 return ret;
4483}
4484
4485/*
4486 * search the tree again to find a leaf with lesser keys
4487 * returns 0 if it found something or 1 if there are no lesser leaves.
4488 * returns < 0 on io errors.
4489 *
4490 * This may release the path, and so you may lose any locks held at the
4491 * time you call it.
4492 */
4493int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4494{
4495 struct btrfs_key key;
4496 struct btrfs_disk_key found_key;
4497 int ret;
4498
4499 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4500
4501 if (key.offset > 0) {
4502 key.offset--;
4503 } else if (key.type > 0) {
4504 key.type--;
4505 key.offset = (u64)-1;
4506 } else if (key.objectid > 0) {
4507 key.objectid--;
4508 key.type = (u8)-1;
4509 key.offset = (u64)-1;
4510 } else {
4511 return 1;
4512 }
4513
4514 btrfs_release_path(path);
4515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4516 if (ret < 0)
4517 return ret;
4518 btrfs_item_key(path->nodes[0], &found_key, 0);
4519 ret = comp_keys(&found_key, &key);
4520 /*
4521 * We might have had an item with the previous key in the tree right
4522 * before we released our path. And after we released our path, that
4523 * item might have been pushed to the first slot (0) of the leaf we
4524 * were holding due to a tree balance. Alternatively, an item with the
4525 * previous key can exist as the only element of a leaf (big fat item).
4526 * Therefore account for these 2 cases, so that our callers (like
4527 * btrfs_previous_item) don't miss an existing item with a key matching
4528 * the previous key we computed above.
4529 */
4530 if (ret <= 0)
4531 return 0;
4532 return 1;
4533}
4534
4535/*
4536 * A helper function to walk down the tree starting at min_key, and looking
4537 * for nodes or leaves that are have a minimum transaction id.
4538 * This is used by the btree defrag code, and tree logging
4539 *
4540 * This does not cow, but it does stuff the starting key it finds back
4541 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4542 * key and get a writable path.
4543 *
4544 * This honors path->lowest_level to prevent descent past a given level
4545 * of the tree.
4546 *
4547 * min_trans indicates the oldest transaction that you are interested
4548 * in walking through. Any nodes or leaves older than min_trans are
4549 * skipped over (without reading them).
4550 *
4551 * returns zero if something useful was found, < 0 on error and 1 if there
4552 * was nothing in the tree that matched the search criteria.
4553 */
4554int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4555 struct btrfs_path *path,
4556 u64 min_trans)
4557{
4558 struct extent_buffer *cur;
4559 struct btrfs_key found_key;
4560 int slot;
4561 int sret;
4562 u32 nritems;
4563 int level;
4564 int ret = 1;
4565 int keep_locks = path->keep_locks;
4566
4567 ASSERT(!path->nowait);
4568 path->keep_locks = 1;
4569again:
4570 cur = btrfs_read_lock_root_node(root);
4571 level = btrfs_header_level(cur);
4572 WARN_ON(path->nodes[level]);
4573 path->nodes[level] = cur;
4574 path->locks[level] = BTRFS_READ_LOCK;
4575
4576 if (btrfs_header_generation(cur) < min_trans) {
4577 ret = 1;
4578 goto out;
4579 }
4580 while (1) {
4581 nritems = btrfs_header_nritems(cur);
4582 level = btrfs_header_level(cur);
4583 sret = btrfs_bin_search(cur, min_key, &slot);
4584 if (sret < 0) {
4585 ret = sret;
4586 goto out;
4587 }
4588
4589 /* at the lowest level, we're done, setup the path and exit */
4590 if (level == path->lowest_level) {
4591 if (slot >= nritems)
4592 goto find_next_key;
4593 ret = 0;
4594 path->slots[level] = slot;
4595 btrfs_item_key_to_cpu(cur, &found_key, slot);
4596 goto out;
4597 }
4598 if (sret && slot > 0)
4599 slot--;
4600 /*
4601 * check this node pointer against the min_trans parameters.
4602 * If it is too old, skip to the next one.
4603 */
4604 while (slot < nritems) {
4605 u64 gen;
4606
4607 gen = btrfs_node_ptr_generation(cur, slot);
4608 if (gen < min_trans) {
4609 slot++;
4610 continue;
4611 }
4612 break;
4613 }
4614find_next_key:
4615 /*
4616 * we didn't find a candidate key in this node, walk forward
4617 * and find another one
4618 */
4619 if (slot >= nritems) {
4620 path->slots[level] = slot;
4621 sret = btrfs_find_next_key(root, path, min_key, level,
4622 min_trans);
4623 if (sret == 0) {
4624 btrfs_release_path(path);
4625 goto again;
4626 } else {
4627 goto out;
4628 }
4629 }
4630 /* save our key for returning back */
4631 btrfs_node_key_to_cpu(cur, &found_key, slot);
4632 path->slots[level] = slot;
4633 if (level == path->lowest_level) {
4634 ret = 0;
4635 goto out;
4636 }
4637 cur = btrfs_read_node_slot(cur, slot);
4638 if (IS_ERR(cur)) {
4639 ret = PTR_ERR(cur);
4640 goto out;
4641 }
4642
4643 btrfs_tree_read_lock(cur);
4644
4645 path->locks[level - 1] = BTRFS_READ_LOCK;
4646 path->nodes[level - 1] = cur;
4647 unlock_up(path, level, 1, 0, NULL);
4648 }
4649out:
4650 path->keep_locks = keep_locks;
4651 if (ret == 0) {
4652 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4653 memcpy(min_key, &found_key, sizeof(found_key));
4654 }
4655 return ret;
4656}
4657
4658/*
4659 * this is similar to btrfs_next_leaf, but does not try to preserve
4660 * and fixup the path. It looks for and returns the next key in the
4661 * tree based on the current path and the min_trans parameters.
4662 *
4663 * 0 is returned if another key is found, < 0 if there are any errors
4664 * and 1 is returned if there are no higher keys in the tree
4665 *
4666 * path->keep_locks should be set to 1 on the search made before
4667 * calling this function.
4668 */
4669int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4670 struct btrfs_key *key, int level, u64 min_trans)
4671{
4672 int slot;
4673 struct extent_buffer *c;
4674
4675 WARN_ON(!path->keep_locks && !path->skip_locking);
4676 while (level < BTRFS_MAX_LEVEL) {
4677 if (!path->nodes[level])
4678 return 1;
4679
4680 slot = path->slots[level] + 1;
4681 c = path->nodes[level];
4682next:
4683 if (slot >= btrfs_header_nritems(c)) {
4684 int ret;
4685 int orig_lowest;
4686 struct btrfs_key cur_key;
4687 if (level + 1 >= BTRFS_MAX_LEVEL ||
4688 !path->nodes[level + 1])
4689 return 1;
4690
4691 if (path->locks[level + 1] || path->skip_locking) {
4692 level++;
4693 continue;
4694 }
4695
4696 slot = btrfs_header_nritems(c) - 1;
4697 if (level == 0)
4698 btrfs_item_key_to_cpu(c, &cur_key, slot);
4699 else
4700 btrfs_node_key_to_cpu(c, &cur_key, slot);
4701
4702 orig_lowest = path->lowest_level;
4703 btrfs_release_path(path);
4704 path->lowest_level = level;
4705 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4706 0, 0);
4707 path->lowest_level = orig_lowest;
4708 if (ret < 0)
4709 return ret;
4710
4711 c = path->nodes[level];
4712 slot = path->slots[level];
4713 if (ret == 0)
4714 slot++;
4715 goto next;
4716 }
4717
4718 if (level == 0)
4719 btrfs_item_key_to_cpu(c, key, slot);
4720 else {
4721 u64 gen = btrfs_node_ptr_generation(c, slot);
4722
4723 if (gen < min_trans) {
4724 slot++;
4725 goto next;
4726 }
4727 btrfs_node_key_to_cpu(c, key, slot);
4728 }
4729 return 0;
4730 }
4731 return 1;
4732}
4733
4734int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4735 u64 time_seq)
4736{
4737 int slot;
4738 int level;
4739 struct extent_buffer *c;
4740 struct extent_buffer *next;
4741 struct btrfs_fs_info *fs_info = root->fs_info;
4742 struct btrfs_key key;
4743 bool need_commit_sem = false;
4744 u32 nritems;
4745 int ret;
4746 int i;
4747
4748 /*
4749 * The nowait semantics are used only for write paths, where we don't
4750 * use the tree mod log and sequence numbers.
4751 */
4752 if (time_seq)
4753 ASSERT(!path->nowait);
4754
4755 nritems = btrfs_header_nritems(path->nodes[0]);
4756 if (nritems == 0)
4757 return 1;
4758
4759 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4760again:
4761 level = 1;
4762 next = NULL;
4763 btrfs_release_path(path);
4764
4765 path->keep_locks = 1;
4766
4767 if (time_seq) {
4768 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4769 } else {
4770 if (path->need_commit_sem) {
4771 path->need_commit_sem = 0;
4772 need_commit_sem = true;
4773 if (path->nowait) {
4774 if (!down_read_trylock(&fs_info->commit_root_sem)) {
4775 ret = -EAGAIN;
4776 goto done;
4777 }
4778 } else {
4779 down_read(&fs_info->commit_root_sem);
4780 }
4781 }
4782 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4783 }
4784 path->keep_locks = 0;
4785
4786 if (ret < 0)
4787 goto done;
4788
4789 nritems = btrfs_header_nritems(path->nodes[0]);
4790 /*
4791 * by releasing the path above we dropped all our locks. A balance
4792 * could have added more items next to the key that used to be
4793 * at the very end of the block. So, check again here and
4794 * advance the path if there are now more items available.
4795 */
4796 if (nritems > 0 && path->slots[0] < nritems - 1) {
4797 if (ret == 0)
4798 path->slots[0]++;
4799 ret = 0;
4800 goto done;
4801 }
4802 /*
4803 * So the above check misses one case:
4804 * - after releasing the path above, someone has removed the item that
4805 * used to be at the very end of the block, and balance between leafs
4806 * gets another one with bigger key.offset to replace it.
4807 *
4808 * This one should be returned as well, or we can get leaf corruption
4809 * later(esp. in __btrfs_drop_extents()).
4810 *
4811 * And a bit more explanation about this check,
4812 * with ret > 0, the key isn't found, the path points to the slot
4813 * where it should be inserted, so the path->slots[0] item must be the
4814 * bigger one.
4815 */
4816 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4817 ret = 0;
4818 goto done;
4819 }
4820
4821 while (level < BTRFS_MAX_LEVEL) {
4822 if (!path->nodes[level]) {
4823 ret = 1;
4824 goto done;
4825 }
4826
4827 slot = path->slots[level] + 1;
4828 c = path->nodes[level];
4829 if (slot >= btrfs_header_nritems(c)) {
4830 level++;
4831 if (level == BTRFS_MAX_LEVEL) {
4832 ret = 1;
4833 goto done;
4834 }
4835 continue;
4836 }
4837
4838
4839 /*
4840 * Our current level is where we're going to start from, and to
4841 * make sure lockdep doesn't complain we need to drop our locks
4842 * and nodes from 0 to our current level.
4843 */
4844 for (i = 0; i < level; i++) {
4845 if (path->locks[level]) {
4846 btrfs_tree_read_unlock(path->nodes[i]);
4847 path->locks[i] = 0;
4848 }
4849 free_extent_buffer(path->nodes[i]);
4850 path->nodes[i] = NULL;
4851 }
4852
4853 next = c;
4854 ret = read_block_for_search(root, path, &next, level,
4855 slot, &key);
4856 if (ret == -EAGAIN && !path->nowait)
4857 goto again;
4858
4859 if (ret < 0) {
4860 btrfs_release_path(path);
4861 goto done;
4862 }
4863
4864 if (!path->skip_locking) {
4865 ret = btrfs_try_tree_read_lock(next);
4866 if (!ret && path->nowait) {
4867 ret = -EAGAIN;
4868 goto done;
4869 }
4870 if (!ret && time_seq) {
4871 /*
4872 * If we don't get the lock, we may be racing
4873 * with push_leaf_left, holding that lock while
4874 * itself waiting for the leaf we've currently
4875 * locked. To solve this situation, we give up
4876 * on our lock and cycle.
4877 */
4878 free_extent_buffer(next);
4879 btrfs_release_path(path);
4880 cond_resched();
4881 goto again;
4882 }
4883 if (!ret)
4884 btrfs_tree_read_lock(next);
4885 }
4886 break;
4887 }
4888 path->slots[level] = slot;
4889 while (1) {
4890 level--;
4891 path->nodes[level] = next;
4892 path->slots[level] = 0;
4893 if (!path->skip_locking)
4894 path->locks[level] = BTRFS_READ_LOCK;
4895 if (!level)
4896 break;
4897
4898 ret = read_block_for_search(root, path, &next, level,
4899 0, &key);
4900 if (ret == -EAGAIN && !path->nowait)
4901 goto again;
4902
4903 if (ret < 0) {
4904 btrfs_release_path(path);
4905 goto done;
4906 }
4907
4908 if (!path->skip_locking) {
4909 if (path->nowait) {
4910 if (!btrfs_try_tree_read_lock(next)) {
4911 ret = -EAGAIN;
4912 goto done;
4913 }
4914 } else {
4915 btrfs_tree_read_lock(next);
4916 }
4917 }
4918 }
4919 ret = 0;
4920done:
4921 unlock_up(path, 0, 1, 0, NULL);
4922 if (need_commit_sem) {
4923 int ret2;
4924
4925 path->need_commit_sem = 1;
4926 ret2 = finish_need_commit_sem_search(path);
4927 up_read(&fs_info->commit_root_sem);
4928 if (ret2)
4929 ret = ret2;
4930 }
4931
4932 return ret;
4933}
4934
4935int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
4936{
4937 path->slots[0]++;
4938 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
4939 return btrfs_next_old_leaf(root, path, time_seq);
4940 return 0;
4941}
4942
4943/*
4944 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4945 * searching until it gets past min_objectid or finds an item of 'type'
4946 *
4947 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4948 */
4949int btrfs_previous_item(struct btrfs_root *root,
4950 struct btrfs_path *path, u64 min_objectid,
4951 int type)
4952{
4953 struct btrfs_key found_key;
4954 struct extent_buffer *leaf;
4955 u32 nritems;
4956 int ret;
4957
4958 while (1) {
4959 if (path->slots[0] == 0) {
4960 ret = btrfs_prev_leaf(root, path);
4961 if (ret != 0)
4962 return ret;
4963 } else {
4964 path->slots[0]--;
4965 }
4966 leaf = path->nodes[0];
4967 nritems = btrfs_header_nritems(leaf);
4968 if (nritems == 0)
4969 return 1;
4970 if (path->slots[0] == nritems)
4971 path->slots[0]--;
4972
4973 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4974 if (found_key.objectid < min_objectid)
4975 break;
4976 if (found_key.type == type)
4977 return 0;
4978 if (found_key.objectid == min_objectid &&
4979 found_key.type < type)
4980 break;
4981 }
4982 return 1;
4983}
4984
4985/*
4986 * search in extent tree to find a previous Metadata/Data extent item with
4987 * min objecitd.
4988 *
4989 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4990 */
4991int btrfs_previous_extent_item(struct btrfs_root *root,
4992 struct btrfs_path *path, u64 min_objectid)
4993{
4994 struct btrfs_key found_key;
4995 struct extent_buffer *leaf;
4996 u32 nritems;
4997 int ret;
4998
4999 while (1) {
5000 if (path->slots[0] == 0) {
5001 ret = btrfs_prev_leaf(root, path);
5002 if (ret != 0)
5003 return ret;
5004 } else {
5005 path->slots[0]--;
5006 }
5007 leaf = path->nodes[0];
5008 nritems = btrfs_header_nritems(leaf);
5009 if (nritems == 0)
5010 return 1;
5011 if (path->slots[0] == nritems)
5012 path->slots[0]--;
5013
5014 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5015 if (found_key.objectid < min_objectid)
5016 break;
5017 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5018 found_key.type == BTRFS_METADATA_ITEM_KEY)
5019 return 0;
5020 if (found_key.objectid == min_objectid &&
5021 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5022 break;
5023 }
5024 return 1;
5025}
5026
5027int __init btrfs_ctree_init(void)
5028{
5029 btrfs_path_cachep = kmem_cache_create("btrfs_path",
5030 sizeof(struct btrfs_path), 0,
5031 SLAB_MEM_SPREAD, NULL);
5032 if (!btrfs_path_cachep)
5033 return -ENOMEM;
5034 return 0;
5035}
5036
5037void __cold btrfs_ctree_exit(void)
5038{
5039 kmem_cache_destroy(btrfs_path_cachep);
5040}