Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/rbtree.h>
9#include <linux/mm.h>
10#include <linux/error-injection.h>
11#include "messages.h"
12#include "ctree.h"
13#include "disk-io.h"
14#include "transaction.h"
15#include "print-tree.h"
16#include "locking.h"
17#include "volumes.h"
18#include "qgroup.h"
19#include "tree-mod-log.h"
20#include "tree-checker.h"
21#include "fs.h"
22#include "accessors.h"
23#include "extent-tree.h"
24#include "relocation.h"
25#include "file-item.h"
26
27static struct kmem_cache *btrfs_path_cachep;
28
29static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 const struct btrfs_key *ins_key, struct btrfs_path *path,
33 int data_size, int extend);
34static int push_node_left(struct btrfs_trans_handle *trans,
35 struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42
43static const struct btrfs_csums {
44 u16 size;
45 const char name[10];
46 const char driver[12];
47} btrfs_csums[] = {
48 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
49 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
50 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
51 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
52 .driver = "blake2b-256" },
53};
54
55/*
56 * The leaf data grows from end-to-front in the node. this returns the address
57 * of the start of the last item, which is the stop of the leaf data stack.
58 */
59static unsigned int leaf_data_end(const struct extent_buffer *leaf)
60{
61 u32 nr = btrfs_header_nritems(leaf);
62
63 if (nr == 0)
64 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info);
65 return btrfs_item_offset(leaf, nr - 1);
66}
67
68/*
69 * Move data in a @leaf (using memmove, safe for overlapping ranges).
70 *
71 * @leaf: leaf that we're doing a memmove on
72 * @dst_offset: item data offset we're moving to
73 * @src_offset: item data offset were' moving from
74 * @len: length of the data we're moving
75 *
76 * Wrapper around memmove_extent_buffer() that takes into account the header on
77 * the leaf. The btrfs_item offset's start directly after the header, so we
78 * have to adjust any offsets to account for the header in the leaf. This
79 * handles that math to simplify the callers.
80 */
81static inline void memmove_leaf_data(const struct extent_buffer *leaf,
82 unsigned long dst_offset,
83 unsigned long src_offset,
84 unsigned long len)
85{
86 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, 0) + dst_offset,
87 btrfs_item_nr_offset(leaf, 0) + src_offset, len);
88}
89
90/*
91 * Copy item data from @src into @dst at the given @offset.
92 *
93 * @dst: destination leaf that we're copying into
94 * @src: source leaf that we're copying from
95 * @dst_offset: item data offset we're copying to
96 * @src_offset: item data offset were' copying from
97 * @len: length of the data we're copying
98 *
99 * Wrapper around copy_extent_buffer() that takes into account the header on
100 * the leaf. The btrfs_item offset's start directly after the header, so we
101 * have to adjust any offsets to account for the header in the leaf. This
102 * handles that math to simplify the callers.
103 */
104static inline void copy_leaf_data(const struct extent_buffer *dst,
105 const struct extent_buffer *src,
106 unsigned long dst_offset,
107 unsigned long src_offset, unsigned long len)
108{
109 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, 0) + dst_offset,
110 btrfs_item_nr_offset(src, 0) + src_offset, len);
111}
112
113/*
114 * Move items in a @leaf (using memmove).
115 *
116 * @dst: destination leaf for the items
117 * @dst_item: the item nr we're copying into
118 * @src_item: the item nr we're copying from
119 * @nr_items: the number of items to copy
120 *
121 * Wrapper around memmove_extent_buffer() that does the math to get the
122 * appropriate offsets into the leaf from the item numbers.
123 */
124static inline void memmove_leaf_items(const struct extent_buffer *leaf,
125 int dst_item, int src_item, int nr_items)
126{
127 memmove_extent_buffer(leaf, btrfs_item_nr_offset(leaf, dst_item),
128 btrfs_item_nr_offset(leaf, src_item),
129 nr_items * sizeof(struct btrfs_item));
130}
131
132/*
133 * Copy items from @src into @dst at the given @offset.
134 *
135 * @dst: destination leaf for the items
136 * @src: source leaf for the items
137 * @dst_item: the item nr we're copying into
138 * @src_item: the item nr we're copying from
139 * @nr_items: the number of items to copy
140 *
141 * Wrapper around copy_extent_buffer() that does the math to get the
142 * appropriate offsets into the leaf from the item numbers.
143 */
144static inline void copy_leaf_items(const struct extent_buffer *dst,
145 const struct extent_buffer *src,
146 int dst_item, int src_item, int nr_items)
147{
148 copy_extent_buffer(dst, src, btrfs_item_nr_offset(dst, dst_item),
149 btrfs_item_nr_offset(src, src_item),
150 nr_items * sizeof(struct btrfs_item));
151}
152
153int btrfs_super_csum_size(const struct btrfs_super_block *s)
154{
155 u16 t = btrfs_super_csum_type(s);
156 /*
157 * csum type is validated at mount time
158 */
159 return btrfs_csums[t].size;
160}
161
162const char *btrfs_super_csum_name(u16 csum_type)
163{
164 /* csum type is validated at mount time */
165 return btrfs_csums[csum_type].name;
166}
167
168/*
169 * Return driver name if defined, otherwise the name that's also a valid driver
170 * name
171 */
172const char *btrfs_super_csum_driver(u16 csum_type)
173{
174 /* csum type is validated at mount time */
175 return btrfs_csums[csum_type].driver[0] ?
176 btrfs_csums[csum_type].driver :
177 btrfs_csums[csum_type].name;
178}
179
180size_t __attribute_const__ btrfs_get_num_csums(void)
181{
182 return ARRAY_SIZE(btrfs_csums);
183}
184
185struct btrfs_path *btrfs_alloc_path(void)
186{
187 might_sleep();
188
189 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
190}
191
192/* this also releases the path */
193void btrfs_free_path(struct btrfs_path *p)
194{
195 if (!p)
196 return;
197 btrfs_release_path(p);
198 kmem_cache_free(btrfs_path_cachep, p);
199}
200
201/*
202 * path release drops references on the extent buffers in the path
203 * and it drops any locks held by this path
204 *
205 * It is safe to call this on paths that no locks or extent buffers held.
206 */
207noinline void btrfs_release_path(struct btrfs_path *p)
208{
209 int i;
210
211 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
212 p->slots[i] = 0;
213 if (!p->nodes[i])
214 continue;
215 if (p->locks[i]) {
216 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
217 p->locks[i] = 0;
218 }
219 free_extent_buffer(p->nodes[i]);
220 p->nodes[i] = NULL;
221 }
222}
223
224/*
225 * We want the transaction abort to print stack trace only for errors where the
226 * cause could be a bug, eg. due to ENOSPC, and not for common errors that are
227 * caused by external factors.
228 */
229bool __cold abort_should_print_stack(int errno)
230{
231 switch (errno) {
232 case -EIO:
233 case -EROFS:
234 case -ENOMEM:
235 return false;
236 }
237 return true;
238}
239
240/*
241 * safely gets a reference on the root node of a tree. A lock
242 * is not taken, so a concurrent writer may put a different node
243 * at the root of the tree. See btrfs_lock_root_node for the
244 * looping required.
245 *
246 * The extent buffer returned by this has a reference taken, so
247 * it won't disappear. It may stop being the root of the tree
248 * at any time because there are no locks held.
249 */
250struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
251{
252 struct extent_buffer *eb;
253
254 while (1) {
255 rcu_read_lock();
256 eb = rcu_dereference(root->node);
257
258 /*
259 * RCU really hurts here, we could free up the root node because
260 * it was COWed but we may not get the new root node yet so do
261 * the inc_not_zero dance and if it doesn't work then
262 * synchronize_rcu and try again.
263 */
264 if (atomic_inc_not_zero(&eb->refs)) {
265 rcu_read_unlock();
266 break;
267 }
268 rcu_read_unlock();
269 synchronize_rcu();
270 }
271 return eb;
272}
273
274/*
275 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
276 * just get put onto a simple dirty list. Transaction walks this list to make
277 * sure they get properly updated on disk.
278 */
279static void add_root_to_dirty_list(struct btrfs_root *root)
280{
281 struct btrfs_fs_info *fs_info = root->fs_info;
282
283 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
284 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
285 return;
286
287 spin_lock(&fs_info->trans_lock);
288 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
289 /* Want the extent tree to be the last on the list */
290 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
291 list_move_tail(&root->dirty_list,
292 &fs_info->dirty_cowonly_roots);
293 else
294 list_move(&root->dirty_list,
295 &fs_info->dirty_cowonly_roots);
296 }
297 spin_unlock(&fs_info->trans_lock);
298}
299
300/*
301 * used by snapshot creation to make a copy of a root for a tree with
302 * a given objectid. The buffer with the new root node is returned in
303 * cow_ret, and this func returns zero on success or a negative error code.
304 */
305int btrfs_copy_root(struct btrfs_trans_handle *trans,
306 struct btrfs_root *root,
307 struct extent_buffer *buf,
308 struct extent_buffer **cow_ret, u64 new_root_objectid)
309{
310 struct btrfs_fs_info *fs_info = root->fs_info;
311 struct extent_buffer *cow;
312 int ret = 0;
313 int level;
314 struct btrfs_disk_key disk_key;
315
316 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
317 trans->transid != fs_info->running_transaction->transid);
318 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
319 trans->transid != root->last_trans);
320
321 level = btrfs_header_level(buf);
322 if (level == 0)
323 btrfs_item_key(buf, &disk_key, 0);
324 else
325 btrfs_node_key(buf, &disk_key, 0);
326
327 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
328 &disk_key, level, buf->start, 0,
329 BTRFS_NESTING_NEW_ROOT);
330 if (IS_ERR(cow))
331 return PTR_ERR(cow);
332
333 copy_extent_buffer_full(cow, buf);
334 btrfs_set_header_bytenr(cow, cow->start);
335 btrfs_set_header_generation(cow, trans->transid);
336 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
337 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
338 BTRFS_HEADER_FLAG_RELOC);
339 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
340 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
341 else
342 btrfs_set_header_owner(cow, new_root_objectid);
343
344 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
345
346 WARN_ON(btrfs_header_generation(buf) > trans->transid);
347 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
348 ret = btrfs_inc_ref(trans, root, cow, 1);
349 else
350 ret = btrfs_inc_ref(trans, root, cow, 0);
351 if (ret) {
352 btrfs_tree_unlock(cow);
353 free_extent_buffer(cow);
354 btrfs_abort_transaction(trans, ret);
355 return ret;
356 }
357
358 btrfs_mark_buffer_dirty(cow);
359 *cow_ret = cow;
360 return 0;
361}
362
363/*
364 * check if the tree block can be shared by multiple trees
365 */
366int btrfs_block_can_be_shared(struct btrfs_root *root,
367 struct extent_buffer *buf)
368{
369 /*
370 * Tree blocks not in shareable trees and tree roots are never shared.
371 * If a block was allocated after the last snapshot and the block was
372 * not allocated by tree relocation, we know the block is not shared.
373 */
374 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
375 buf != root->node && buf != root->commit_root &&
376 (btrfs_header_generation(buf) <=
377 btrfs_root_last_snapshot(&root->root_item) ||
378 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
379 return 1;
380
381 return 0;
382}
383
384static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
385 struct btrfs_root *root,
386 struct extent_buffer *buf,
387 struct extent_buffer *cow,
388 int *last_ref)
389{
390 struct btrfs_fs_info *fs_info = root->fs_info;
391 u64 refs;
392 u64 owner;
393 u64 flags;
394 u64 new_flags = 0;
395 int ret;
396
397 /*
398 * Backrefs update rules:
399 *
400 * Always use full backrefs for extent pointers in tree block
401 * allocated by tree relocation.
402 *
403 * If a shared tree block is no longer referenced by its owner
404 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
405 * use full backrefs for extent pointers in tree block.
406 *
407 * If a tree block is been relocating
408 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
409 * use full backrefs for extent pointers in tree block.
410 * The reason for this is some operations (such as drop tree)
411 * are only allowed for blocks use full backrefs.
412 */
413
414 if (btrfs_block_can_be_shared(root, buf)) {
415 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
416 btrfs_header_level(buf), 1,
417 &refs, &flags);
418 if (ret)
419 return ret;
420 if (refs == 0) {
421 ret = -EROFS;
422 btrfs_handle_fs_error(fs_info, ret, NULL);
423 return ret;
424 }
425 } else {
426 refs = 1;
427 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
428 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
429 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
430 else
431 flags = 0;
432 }
433
434 owner = btrfs_header_owner(buf);
435 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
436 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
437
438 if (refs > 1) {
439 if ((owner == root->root_key.objectid ||
440 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
441 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
442 ret = btrfs_inc_ref(trans, root, buf, 1);
443 if (ret)
444 return ret;
445
446 if (root->root_key.objectid ==
447 BTRFS_TREE_RELOC_OBJECTID) {
448 ret = btrfs_dec_ref(trans, root, buf, 0);
449 if (ret)
450 return ret;
451 ret = btrfs_inc_ref(trans, root, cow, 1);
452 if (ret)
453 return ret;
454 }
455 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
456 } else {
457
458 if (root->root_key.objectid ==
459 BTRFS_TREE_RELOC_OBJECTID)
460 ret = btrfs_inc_ref(trans, root, cow, 1);
461 else
462 ret = btrfs_inc_ref(trans, root, cow, 0);
463 if (ret)
464 return ret;
465 }
466 if (new_flags != 0) {
467 int level = btrfs_header_level(buf);
468
469 ret = btrfs_set_disk_extent_flags(trans, buf,
470 new_flags, level);
471 if (ret)
472 return ret;
473 }
474 } else {
475 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
476 if (root->root_key.objectid ==
477 BTRFS_TREE_RELOC_OBJECTID)
478 ret = btrfs_inc_ref(trans, root, cow, 1);
479 else
480 ret = btrfs_inc_ref(trans, root, cow, 0);
481 if (ret)
482 return ret;
483 ret = btrfs_dec_ref(trans, root, buf, 1);
484 if (ret)
485 return ret;
486 }
487 btrfs_clean_tree_block(buf);
488 *last_ref = 1;
489 }
490 return 0;
491}
492
493/*
494 * does the dirty work in cow of a single block. The parent block (if
495 * supplied) is updated to point to the new cow copy. The new buffer is marked
496 * dirty and returned locked. If you modify the block it needs to be marked
497 * dirty again.
498 *
499 * search_start -- an allocation hint for the new block
500 *
501 * empty_size -- a hint that you plan on doing more cow. This is the size in
502 * bytes the allocator should try to find free next to the block it returns.
503 * This is just a hint and may be ignored by the allocator.
504 */
505static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
506 struct btrfs_root *root,
507 struct extent_buffer *buf,
508 struct extent_buffer *parent, int parent_slot,
509 struct extent_buffer **cow_ret,
510 u64 search_start, u64 empty_size,
511 enum btrfs_lock_nesting nest)
512{
513 struct btrfs_fs_info *fs_info = root->fs_info;
514 struct btrfs_disk_key disk_key;
515 struct extent_buffer *cow;
516 int level, ret;
517 int last_ref = 0;
518 int unlock_orig = 0;
519 u64 parent_start = 0;
520
521 if (*cow_ret == buf)
522 unlock_orig = 1;
523
524 btrfs_assert_tree_write_locked(buf);
525
526 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
527 trans->transid != fs_info->running_transaction->transid);
528 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
529 trans->transid != root->last_trans);
530
531 level = btrfs_header_level(buf);
532
533 if (level == 0)
534 btrfs_item_key(buf, &disk_key, 0);
535 else
536 btrfs_node_key(buf, &disk_key, 0);
537
538 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
539 parent_start = parent->start;
540
541 cow = btrfs_alloc_tree_block(trans, root, parent_start,
542 root->root_key.objectid, &disk_key, level,
543 search_start, empty_size, nest);
544 if (IS_ERR(cow))
545 return PTR_ERR(cow);
546
547 /* cow is set to blocking by btrfs_init_new_buffer */
548
549 copy_extent_buffer_full(cow, buf);
550 btrfs_set_header_bytenr(cow, cow->start);
551 btrfs_set_header_generation(cow, trans->transid);
552 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
553 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
554 BTRFS_HEADER_FLAG_RELOC);
555 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
556 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
557 else
558 btrfs_set_header_owner(cow, root->root_key.objectid);
559
560 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
561
562 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
563 if (ret) {
564 btrfs_tree_unlock(cow);
565 free_extent_buffer(cow);
566 btrfs_abort_transaction(trans, ret);
567 return ret;
568 }
569
570 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
571 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
572 if (ret) {
573 btrfs_tree_unlock(cow);
574 free_extent_buffer(cow);
575 btrfs_abort_transaction(trans, ret);
576 return ret;
577 }
578 }
579
580 if (buf == root->node) {
581 WARN_ON(parent && parent != buf);
582 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
583 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
584 parent_start = buf->start;
585
586 atomic_inc(&cow->refs);
587 ret = btrfs_tree_mod_log_insert_root(root->node, cow, true);
588 BUG_ON(ret < 0);
589 rcu_assign_pointer(root->node, cow);
590
591 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
592 parent_start, last_ref);
593 free_extent_buffer(buf);
594 add_root_to_dirty_list(root);
595 } else {
596 WARN_ON(trans->transid != btrfs_header_generation(parent));
597 btrfs_tree_mod_log_insert_key(parent, parent_slot,
598 BTRFS_MOD_LOG_KEY_REPLACE);
599 btrfs_set_node_blockptr(parent, parent_slot,
600 cow->start);
601 btrfs_set_node_ptr_generation(parent, parent_slot,
602 trans->transid);
603 btrfs_mark_buffer_dirty(parent);
604 if (last_ref) {
605 ret = btrfs_tree_mod_log_free_eb(buf);
606 if (ret) {
607 btrfs_tree_unlock(cow);
608 free_extent_buffer(cow);
609 btrfs_abort_transaction(trans, ret);
610 return ret;
611 }
612 }
613 btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
614 parent_start, last_ref);
615 }
616 if (unlock_orig)
617 btrfs_tree_unlock(buf);
618 free_extent_buffer_stale(buf);
619 btrfs_mark_buffer_dirty(cow);
620 *cow_ret = cow;
621 return 0;
622}
623
624static inline int should_cow_block(struct btrfs_trans_handle *trans,
625 struct btrfs_root *root,
626 struct extent_buffer *buf)
627{
628 if (btrfs_is_testing(root->fs_info))
629 return 0;
630
631 /* Ensure we can see the FORCE_COW bit */
632 smp_mb__before_atomic();
633
634 /*
635 * We do not need to cow a block if
636 * 1) this block is not created or changed in this transaction;
637 * 2) this block does not belong to TREE_RELOC tree;
638 * 3) the root is not forced COW.
639 *
640 * What is forced COW:
641 * when we create snapshot during committing the transaction,
642 * after we've finished copying src root, we must COW the shared
643 * block to ensure the metadata consistency.
644 */
645 if (btrfs_header_generation(buf) == trans->transid &&
646 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
647 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
648 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
649 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
650 return 0;
651 return 1;
652}
653
654/*
655 * cows a single block, see __btrfs_cow_block for the real work.
656 * This version of it has extra checks so that a block isn't COWed more than
657 * once per transaction, as long as it hasn't been written yet
658 */
659noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
660 struct btrfs_root *root, struct extent_buffer *buf,
661 struct extent_buffer *parent, int parent_slot,
662 struct extent_buffer **cow_ret,
663 enum btrfs_lock_nesting nest)
664{
665 struct btrfs_fs_info *fs_info = root->fs_info;
666 u64 search_start;
667 int ret;
668
669 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
670 btrfs_err(fs_info,
671 "COW'ing blocks on a fs root that's being dropped");
672
673 if (trans->transaction != fs_info->running_transaction)
674 WARN(1, KERN_CRIT "trans %llu running %llu\n",
675 trans->transid,
676 fs_info->running_transaction->transid);
677
678 if (trans->transid != fs_info->generation)
679 WARN(1, KERN_CRIT "trans %llu running %llu\n",
680 trans->transid, fs_info->generation);
681
682 if (!should_cow_block(trans, root, buf)) {
683 *cow_ret = buf;
684 return 0;
685 }
686
687 search_start = buf->start & ~((u64)SZ_1G - 1);
688
689 /*
690 * Before CoWing this block for later modification, check if it's
691 * the subtree root and do the delayed subtree trace if needed.
692 *
693 * Also We don't care about the error, as it's handled internally.
694 */
695 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
696 ret = __btrfs_cow_block(trans, root, buf, parent,
697 parent_slot, cow_ret, search_start, 0, nest);
698
699 trace_btrfs_cow_block(root, buf, *cow_ret);
700
701 return ret;
702}
703ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
704
705/*
706 * helper function for defrag to decide if two blocks pointed to by a
707 * node are actually close by
708 */
709static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
710{
711 if (blocknr < other && other - (blocknr + blocksize) < 32768)
712 return 1;
713 if (blocknr > other && blocknr - (other + blocksize) < 32768)
714 return 1;
715 return 0;
716}
717
718#ifdef __LITTLE_ENDIAN
719
720/*
721 * Compare two keys, on little-endian the disk order is same as CPU order and
722 * we can avoid the conversion.
723 */
724static int comp_keys(const struct btrfs_disk_key *disk_key,
725 const struct btrfs_key *k2)
726{
727 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
728
729 return btrfs_comp_cpu_keys(k1, k2);
730}
731
732#else
733
734/*
735 * compare two keys in a memcmp fashion
736 */
737static int comp_keys(const struct btrfs_disk_key *disk,
738 const struct btrfs_key *k2)
739{
740 struct btrfs_key k1;
741
742 btrfs_disk_key_to_cpu(&k1, disk);
743
744 return btrfs_comp_cpu_keys(&k1, k2);
745}
746#endif
747
748/*
749 * same as comp_keys only with two btrfs_key's
750 */
751int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
752{
753 if (k1->objectid > k2->objectid)
754 return 1;
755 if (k1->objectid < k2->objectid)
756 return -1;
757 if (k1->type > k2->type)
758 return 1;
759 if (k1->type < k2->type)
760 return -1;
761 if (k1->offset > k2->offset)
762 return 1;
763 if (k1->offset < k2->offset)
764 return -1;
765 return 0;
766}
767
768/*
769 * this is used by the defrag code to go through all the
770 * leaves pointed to by a node and reallocate them so that
771 * disk order is close to key order
772 */
773int btrfs_realloc_node(struct btrfs_trans_handle *trans,
774 struct btrfs_root *root, struct extent_buffer *parent,
775 int start_slot, u64 *last_ret,
776 struct btrfs_key *progress)
777{
778 struct btrfs_fs_info *fs_info = root->fs_info;
779 struct extent_buffer *cur;
780 u64 blocknr;
781 u64 search_start = *last_ret;
782 u64 last_block = 0;
783 u64 other;
784 u32 parent_nritems;
785 int end_slot;
786 int i;
787 int err = 0;
788 u32 blocksize;
789 int progress_passed = 0;
790 struct btrfs_disk_key disk_key;
791
792 WARN_ON(trans->transaction != fs_info->running_transaction);
793 WARN_ON(trans->transid != fs_info->generation);
794
795 parent_nritems = btrfs_header_nritems(parent);
796 blocksize = fs_info->nodesize;
797 end_slot = parent_nritems - 1;
798
799 if (parent_nritems <= 1)
800 return 0;
801
802 for (i = start_slot; i <= end_slot; i++) {
803 int close = 1;
804
805 btrfs_node_key(parent, &disk_key, i);
806 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
807 continue;
808
809 progress_passed = 1;
810 blocknr = btrfs_node_blockptr(parent, i);
811 if (last_block == 0)
812 last_block = blocknr;
813
814 if (i > 0) {
815 other = btrfs_node_blockptr(parent, i - 1);
816 close = close_blocks(blocknr, other, blocksize);
817 }
818 if (!close && i < end_slot) {
819 other = btrfs_node_blockptr(parent, i + 1);
820 close = close_blocks(blocknr, other, blocksize);
821 }
822 if (close) {
823 last_block = blocknr;
824 continue;
825 }
826
827 cur = btrfs_read_node_slot(parent, i);
828 if (IS_ERR(cur))
829 return PTR_ERR(cur);
830 if (search_start == 0)
831 search_start = last_block;
832
833 btrfs_tree_lock(cur);
834 err = __btrfs_cow_block(trans, root, cur, parent, i,
835 &cur, search_start,
836 min(16 * blocksize,
837 (end_slot - i) * blocksize),
838 BTRFS_NESTING_COW);
839 if (err) {
840 btrfs_tree_unlock(cur);
841 free_extent_buffer(cur);
842 break;
843 }
844 search_start = cur->start;
845 last_block = cur->start;
846 *last_ret = search_start;
847 btrfs_tree_unlock(cur);
848 free_extent_buffer(cur);
849 }
850 return err;
851}
852
853/*
854 * Search for a key in the given extent_buffer.
855 *
856 * The lower boundary for the search is specified by the slot number @low. Use a
857 * value of 0 to search over the whole extent buffer.
858 *
859 * The slot in the extent buffer is returned via @slot. If the key exists in the
860 * extent buffer, then @slot will point to the slot where the key is, otherwise
861 * it points to the slot where you would insert the key.
862 *
863 * Slot may point to the total number of items (i.e. one position beyond the last
864 * key) if the key is bigger than the last key in the extent buffer.
865 */
866static noinline int generic_bin_search(struct extent_buffer *eb, int low,
867 const struct btrfs_key *key, int *slot)
868{
869 unsigned long p;
870 int item_size;
871 int high = btrfs_header_nritems(eb);
872 int ret;
873 const int key_size = sizeof(struct btrfs_disk_key);
874
875 if (low > high) {
876 btrfs_err(eb->fs_info,
877 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
878 __func__, low, high, eb->start,
879 btrfs_header_owner(eb), btrfs_header_level(eb));
880 return -EINVAL;
881 }
882
883 if (btrfs_header_level(eb) == 0) {
884 p = offsetof(struct btrfs_leaf, items);
885 item_size = sizeof(struct btrfs_item);
886 } else {
887 p = offsetof(struct btrfs_node, ptrs);
888 item_size = sizeof(struct btrfs_key_ptr);
889 }
890
891 while (low < high) {
892 unsigned long oip;
893 unsigned long offset;
894 struct btrfs_disk_key *tmp;
895 struct btrfs_disk_key unaligned;
896 int mid;
897
898 mid = (low + high) / 2;
899 offset = p + mid * item_size;
900 oip = offset_in_page(offset);
901
902 if (oip + key_size <= PAGE_SIZE) {
903 const unsigned long idx = get_eb_page_index(offset);
904 char *kaddr = page_address(eb->pages[idx]);
905
906 oip = get_eb_offset_in_page(eb, offset);
907 tmp = (struct btrfs_disk_key *)(kaddr + oip);
908 } else {
909 read_extent_buffer(eb, &unaligned, offset, key_size);
910 tmp = &unaligned;
911 }
912
913 ret = comp_keys(tmp, key);
914
915 if (ret < 0)
916 low = mid + 1;
917 else if (ret > 0)
918 high = mid;
919 else {
920 *slot = mid;
921 return 0;
922 }
923 }
924 *slot = low;
925 return 1;
926}
927
928/*
929 * Simple binary search on an extent buffer. Works for both leaves and nodes, and
930 * always searches over the whole range of keys (slot 0 to slot 'nritems - 1').
931 */
932int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
933 int *slot)
934{
935 return generic_bin_search(eb, 0, key, slot);
936}
937
938static void root_add_used(struct btrfs_root *root, u32 size)
939{
940 spin_lock(&root->accounting_lock);
941 btrfs_set_root_used(&root->root_item,
942 btrfs_root_used(&root->root_item) + size);
943 spin_unlock(&root->accounting_lock);
944}
945
946static void root_sub_used(struct btrfs_root *root, u32 size)
947{
948 spin_lock(&root->accounting_lock);
949 btrfs_set_root_used(&root->root_item,
950 btrfs_root_used(&root->root_item) - size);
951 spin_unlock(&root->accounting_lock);
952}
953
954/* given a node and slot number, this reads the blocks it points to. The
955 * extent buffer is returned with a reference taken (but unlocked).
956 */
957struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
958 int slot)
959{
960 int level = btrfs_header_level(parent);
961 struct btrfs_tree_parent_check check = { 0 };
962 struct extent_buffer *eb;
963
964 if (slot < 0 || slot >= btrfs_header_nritems(parent))
965 return ERR_PTR(-ENOENT);
966
967 BUG_ON(level == 0);
968
969 check.level = level - 1;
970 check.transid = btrfs_node_ptr_generation(parent, slot);
971 check.owner_root = btrfs_header_owner(parent);
972 check.has_first_key = true;
973 btrfs_node_key_to_cpu(parent, &check.first_key, slot);
974
975 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
976 &check);
977 if (IS_ERR(eb))
978 return eb;
979 if (!extent_buffer_uptodate(eb)) {
980 free_extent_buffer(eb);
981 return ERR_PTR(-EIO);
982 }
983
984 return eb;
985}
986
987/*
988 * node level balancing, used to make sure nodes are in proper order for
989 * item deletion. We balance from the top down, so we have to make sure
990 * that a deletion won't leave an node completely empty later on.
991 */
992static noinline int balance_level(struct btrfs_trans_handle *trans,
993 struct btrfs_root *root,
994 struct btrfs_path *path, int level)
995{
996 struct btrfs_fs_info *fs_info = root->fs_info;
997 struct extent_buffer *right = NULL;
998 struct extent_buffer *mid;
999 struct extent_buffer *left = NULL;
1000 struct extent_buffer *parent = NULL;
1001 int ret = 0;
1002 int wret;
1003 int pslot;
1004 int orig_slot = path->slots[level];
1005 u64 orig_ptr;
1006
1007 ASSERT(level > 0);
1008
1009 mid = path->nodes[level];
1010
1011 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
1012 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1013
1014 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1015
1016 if (level < BTRFS_MAX_LEVEL - 1) {
1017 parent = path->nodes[level + 1];
1018 pslot = path->slots[level + 1];
1019 }
1020
1021 /*
1022 * deal with the case where there is only one pointer in the root
1023 * by promoting the node below to a root
1024 */
1025 if (!parent) {
1026 struct extent_buffer *child;
1027
1028 if (btrfs_header_nritems(mid) != 1)
1029 return 0;
1030
1031 /* promote the child to a root */
1032 child = btrfs_read_node_slot(mid, 0);
1033 if (IS_ERR(child)) {
1034 ret = PTR_ERR(child);
1035 btrfs_handle_fs_error(fs_info, ret, NULL);
1036 goto enospc;
1037 }
1038
1039 btrfs_tree_lock(child);
1040 ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
1041 BTRFS_NESTING_COW);
1042 if (ret) {
1043 btrfs_tree_unlock(child);
1044 free_extent_buffer(child);
1045 goto enospc;
1046 }
1047
1048 ret = btrfs_tree_mod_log_insert_root(root->node, child, true);
1049 BUG_ON(ret < 0);
1050 rcu_assign_pointer(root->node, child);
1051
1052 add_root_to_dirty_list(root);
1053 btrfs_tree_unlock(child);
1054
1055 path->locks[level] = 0;
1056 path->nodes[level] = NULL;
1057 btrfs_clean_tree_block(mid);
1058 btrfs_tree_unlock(mid);
1059 /* once for the path */
1060 free_extent_buffer(mid);
1061
1062 root_sub_used(root, mid->len);
1063 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1064 /* once for the root ptr */
1065 free_extent_buffer_stale(mid);
1066 return 0;
1067 }
1068 if (btrfs_header_nritems(mid) >
1069 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1070 return 0;
1071
1072 left = btrfs_read_node_slot(parent, pslot - 1);
1073 if (IS_ERR(left))
1074 left = NULL;
1075
1076 if (left) {
1077 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1078 wret = btrfs_cow_block(trans, root, left,
1079 parent, pslot - 1, &left,
1080 BTRFS_NESTING_LEFT_COW);
1081 if (wret) {
1082 ret = wret;
1083 goto enospc;
1084 }
1085 }
1086
1087 right = btrfs_read_node_slot(parent, pslot + 1);
1088 if (IS_ERR(right))
1089 right = NULL;
1090
1091 if (right) {
1092 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1093 wret = btrfs_cow_block(trans, root, right,
1094 parent, pslot + 1, &right,
1095 BTRFS_NESTING_RIGHT_COW);
1096 if (wret) {
1097 ret = wret;
1098 goto enospc;
1099 }
1100 }
1101
1102 /* first, try to make some room in the middle buffer */
1103 if (left) {
1104 orig_slot += btrfs_header_nritems(left);
1105 wret = push_node_left(trans, left, mid, 1);
1106 if (wret < 0)
1107 ret = wret;
1108 }
1109
1110 /*
1111 * then try to empty the right most buffer into the middle
1112 */
1113 if (right) {
1114 wret = push_node_left(trans, mid, right, 1);
1115 if (wret < 0 && wret != -ENOSPC)
1116 ret = wret;
1117 if (btrfs_header_nritems(right) == 0) {
1118 btrfs_clean_tree_block(right);
1119 btrfs_tree_unlock(right);
1120 del_ptr(root, path, level + 1, pslot + 1);
1121 root_sub_used(root, right->len);
1122 btrfs_free_tree_block(trans, btrfs_root_id(root), right,
1123 0, 1);
1124 free_extent_buffer_stale(right);
1125 right = NULL;
1126 } else {
1127 struct btrfs_disk_key right_key;
1128 btrfs_node_key(right, &right_key, 0);
1129 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1130 BTRFS_MOD_LOG_KEY_REPLACE);
1131 BUG_ON(ret < 0);
1132 btrfs_set_node_key(parent, &right_key, pslot + 1);
1133 btrfs_mark_buffer_dirty(parent);
1134 }
1135 }
1136 if (btrfs_header_nritems(mid) == 1) {
1137 /*
1138 * we're not allowed to leave a node with one item in the
1139 * tree during a delete. A deletion from lower in the tree
1140 * could try to delete the only pointer in this node.
1141 * So, pull some keys from the left.
1142 * There has to be a left pointer at this point because
1143 * otherwise we would have pulled some pointers from the
1144 * right
1145 */
1146 if (!left) {
1147 ret = -EROFS;
1148 btrfs_handle_fs_error(fs_info, ret, NULL);
1149 goto enospc;
1150 }
1151 wret = balance_node_right(trans, mid, left);
1152 if (wret < 0) {
1153 ret = wret;
1154 goto enospc;
1155 }
1156 if (wret == 1) {
1157 wret = push_node_left(trans, left, mid, 1);
1158 if (wret < 0)
1159 ret = wret;
1160 }
1161 BUG_ON(wret == 1);
1162 }
1163 if (btrfs_header_nritems(mid) == 0) {
1164 btrfs_clean_tree_block(mid);
1165 btrfs_tree_unlock(mid);
1166 del_ptr(root, path, level + 1, pslot);
1167 root_sub_used(root, mid->len);
1168 btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
1169 free_extent_buffer_stale(mid);
1170 mid = NULL;
1171 } else {
1172 /* update the parent key to reflect our changes */
1173 struct btrfs_disk_key mid_key;
1174 btrfs_node_key(mid, &mid_key, 0);
1175 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1176 BTRFS_MOD_LOG_KEY_REPLACE);
1177 BUG_ON(ret < 0);
1178 btrfs_set_node_key(parent, &mid_key, pslot);
1179 btrfs_mark_buffer_dirty(parent);
1180 }
1181
1182 /* update the path */
1183 if (left) {
1184 if (btrfs_header_nritems(left) > orig_slot) {
1185 atomic_inc(&left->refs);
1186 /* left was locked after cow */
1187 path->nodes[level] = left;
1188 path->slots[level + 1] -= 1;
1189 path->slots[level] = orig_slot;
1190 if (mid) {
1191 btrfs_tree_unlock(mid);
1192 free_extent_buffer(mid);
1193 }
1194 } else {
1195 orig_slot -= btrfs_header_nritems(left);
1196 path->slots[level] = orig_slot;
1197 }
1198 }
1199 /* double check we haven't messed things up */
1200 if (orig_ptr !=
1201 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1202 BUG();
1203enospc:
1204 if (right) {
1205 btrfs_tree_unlock(right);
1206 free_extent_buffer(right);
1207 }
1208 if (left) {
1209 if (path->nodes[level] != left)
1210 btrfs_tree_unlock(left);
1211 free_extent_buffer(left);
1212 }
1213 return ret;
1214}
1215
1216/* Node balancing for insertion. Here we only split or push nodes around
1217 * when they are completely full. This is also done top down, so we
1218 * have to be pessimistic.
1219 */
1220static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1221 struct btrfs_root *root,
1222 struct btrfs_path *path, int level)
1223{
1224 struct btrfs_fs_info *fs_info = root->fs_info;
1225 struct extent_buffer *right = NULL;
1226 struct extent_buffer *mid;
1227 struct extent_buffer *left = NULL;
1228 struct extent_buffer *parent = NULL;
1229 int ret = 0;
1230 int wret;
1231 int pslot;
1232 int orig_slot = path->slots[level];
1233
1234 if (level == 0)
1235 return 1;
1236
1237 mid = path->nodes[level];
1238 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1239
1240 if (level < BTRFS_MAX_LEVEL - 1) {
1241 parent = path->nodes[level + 1];
1242 pslot = path->slots[level + 1];
1243 }
1244
1245 if (!parent)
1246 return 1;
1247
1248 left = btrfs_read_node_slot(parent, pslot - 1);
1249 if (IS_ERR(left))
1250 left = NULL;
1251
1252 /* first, try to make some room in the middle buffer */
1253 if (left) {
1254 u32 left_nr;
1255
1256 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
1257
1258 left_nr = btrfs_header_nritems(left);
1259 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1260 wret = 1;
1261 } else {
1262 ret = btrfs_cow_block(trans, root, left, parent,
1263 pslot - 1, &left,
1264 BTRFS_NESTING_LEFT_COW);
1265 if (ret)
1266 wret = 1;
1267 else {
1268 wret = push_node_left(trans, left, mid, 0);
1269 }
1270 }
1271 if (wret < 0)
1272 ret = wret;
1273 if (wret == 0) {
1274 struct btrfs_disk_key disk_key;
1275 orig_slot += left_nr;
1276 btrfs_node_key(mid, &disk_key, 0);
1277 ret = btrfs_tree_mod_log_insert_key(parent, pslot,
1278 BTRFS_MOD_LOG_KEY_REPLACE);
1279 BUG_ON(ret < 0);
1280 btrfs_set_node_key(parent, &disk_key, pslot);
1281 btrfs_mark_buffer_dirty(parent);
1282 if (btrfs_header_nritems(left) > orig_slot) {
1283 path->nodes[level] = left;
1284 path->slots[level + 1] -= 1;
1285 path->slots[level] = orig_slot;
1286 btrfs_tree_unlock(mid);
1287 free_extent_buffer(mid);
1288 } else {
1289 orig_slot -=
1290 btrfs_header_nritems(left);
1291 path->slots[level] = orig_slot;
1292 btrfs_tree_unlock(left);
1293 free_extent_buffer(left);
1294 }
1295 return 0;
1296 }
1297 btrfs_tree_unlock(left);
1298 free_extent_buffer(left);
1299 }
1300 right = btrfs_read_node_slot(parent, pslot + 1);
1301 if (IS_ERR(right))
1302 right = NULL;
1303
1304 /*
1305 * then try to empty the right most buffer into the middle
1306 */
1307 if (right) {
1308 u32 right_nr;
1309
1310 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
1311
1312 right_nr = btrfs_header_nritems(right);
1313 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
1314 wret = 1;
1315 } else {
1316 ret = btrfs_cow_block(trans, root, right,
1317 parent, pslot + 1,
1318 &right, BTRFS_NESTING_RIGHT_COW);
1319 if (ret)
1320 wret = 1;
1321 else {
1322 wret = balance_node_right(trans, right, mid);
1323 }
1324 }
1325 if (wret < 0)
1326 ret = wret;
1327 if (wret == 0) {
1328 struct btrfs_disk_key disk_key;
1329
1330 btrfs_node_key(right, &disk_key, 0);
1331 ret = btrfs_tree_mod_log_insert_key(parent, pslot + 1,
1332 BTRFS_MOD_LOG_KEY_REPLACE);
1333 BUG_ON(ret < 0);
1334 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1335 btrfs_mark_buffer_dirty(parent);
1336
1337 if (btrfs_header_nritems(mid) <= orig_slot) {
1338 path->nodes[level] = right;
1339 path->slots[level + 1] += 1;
1340 path->slots[level] = orig_slot -
1341 btrfs_header_nritems(mid);
1342 btrfs_tree_unlock(mid);
1343 free_extent_buffer(mid);
1344 } else {
1345 btrfs_tree_unlock(right);
1346 free_extent_buffer(right);
1347 }
1348 return 0;
1349 }
1350 btrfs_tree_unlock(right);
1351 free_extent_buffer(right);
1352 }
1353 return 1;
1354}
1355
1356/*
1357 * readahead one full node of leaves, finding things that are close
1358 * to the block in 'slot', and triggering ra on them.
1359 */
1360static void reada_for_search(struct btrfs_fs_info *fs_info,
1361 struct btrfs_path *path,
1362 int level, int slot, u64 objectid)
1363{
1364 struct extent_buffer *node;
1365 struct btrfs_disk_key disk_key;
1366 u32 nritems;
1367 u64 search;
1368 u64 target;
1369 u64 nread = 0;
1370 u64 nread_max;
1371 u32 nr;
1372 u32 blocksize;
1373 u32 nscan = 0;
1374
1375 if (level != 1 && path->reada != READA_FORWARD_ALWAYS)
1376 return;
1377
1378 if (!path->nodes[level])
1379 return;
1380
1381 node = path->nodes[level];
1382
1383 /*
1384 * Since the time between visiting leaves is much shorter than the time
1385 * between visiting nodes, limit read ahead of nodes to 1, to avoid too
1386 * much IO at once (possibly random).
1387 */
1388 if (path->reada == READA_FORWARD_ALWAYS) {
1389 if (level > 1)
1390 nread_max = node->fs_info->nodesize;
1391 else
1392 nread_max = SZ_128K;
1393 } else {
1394 nread_max = SZ_64K;
1395 }
1396
1397 search = btrfs_node_blockptr(node, slot);
1398 blocksize = fs_info->nodesize;
1399 if (path->reada != READA_FORWARD_ALWAYS) {
1400 struct extent_buffer *eb;
1401
1402 eb = find_extent_buffer(fs_info, search);
1403 if (eb) {
1404 free_extent_buffer(eb);
1405 return;
1406 }
1407 }
1408
1409 target = search;
1410
1411 nritems = btrfs_header_nritems(node);
1412 nr = slot;
1413
1414 while (1) {
1415 if (path->reada == READA_BACK) {
1416 if (nr == 0)
1417 break;
1418 nr--;
1419 } else if (path->reada == READA_FORWARD ||
1420 path->reada == READA_FORWARD_ALWAYS) {
1421 nr++;
1422 if (nr >= nritems)
1423 break;
1424 }
1425 if (path->reada == READA_BACK && objectid) {
1426 btrfs_node_key(node, &disk_key, nr);
1427 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1428 break;
1429 }
1430 search = btrfs_node_blockptr(node, nr);
1431 if (path->reada == READA_FORWARD_ALWAYS ||
1432 (search <= target && target - search <= 65536) ||
1433 (search > target && search - target <= 65536)) {
1434 btrfs_readahead_node_child(node, nr);
1435 nread += blocksize;
1436 }
1437 nscan++;
1438 if (nread > nread_max || nscan > 32)
1439 break;
1440 }
1441}
1442
1443static noinline void reada_for_balance(struct btrfs_path *path, int level)
1444{
1445 struct extent_buffer *parent;
1446 int slot;
1447 int nritems;
1448
1449 parent = path->nodes[level + 1];
1450 if (!parent)
1451 return;
1452
1453 nritems = btrfs_header_nritems(parent);
1454 slot = path->slots[level + 1];
1455
1456 if (slot > 0)
1457 btrfs_readahead_node_child(parent, slot - 1);
1458 if (slot + 1 < nritems)
1459 btrfs_readahead_node_child(parent, slot + 1);
1460}
1461
1462
1463/*
1464 * when we walk down the tree, it is usually safe to unlock the higher layers
1465 * in the tree. The exceptions are when our path goes through slot 0, because
1466 * operations on the tree might require changing key pointers higher up in the
1467 * tree.
1468 *
1469 * callers might also have set path->keep_locks, which tells this code to keep
1470 * the lock if the path points to the last slot in the block. This is part of
1471 * walking through the tree, and selecting the next slot in the higher block.
1472 *
1473 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1474 * if lowest_unlock is 1, level 0 won't be unlocked
1475 */
1476static noinline void unlock_up(struct btrfs_path *path, int level,
1477 int lowest_unlock, int min_write_lock_level,
1478 int *write_lock_level)
1479{
1480 int i;
1481 int skip_level = level;
1482 bool check_skip = true;
1483
1484 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1485 if (!path->nodes[i])
1486 break;
1487 if (!path->locks[i])
1488 break;
1489
1490 if (check_skip) {
1491 if (path->slots[i] == 0) {
1492 skip_level = i + 1;
1493 continue;
1494 }
1495
1496 if (path->keep_locks) {
1497 u32 nritems;
1498
1499 nritems = btrfs_header_nritems(path->nodes[i]);
1500 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1501 skip_level = i + 1;
1502 continue;
1503 }
1504 }
1505 }
1506
1507 if (i >= lowest_unlock && i > skip_level) {
1508 check_skip = false;
1509 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1510 path->locks[i] = 0;
1511 if (write_lock_level &&
1512 i > min_write_lock_level &&
1513 i <= *write_lock_level) {
1514 *write_lock_level = i - 1;
1515 }
1516 }
1517 }
1518}
1519
1520/*
1521 * Helper function for btrfs_search_slot() and other functions that do a search
1522 * on a btree. The goal is to find a tree block in the cache (the radix tree at
1523 * fs_info->buffer_radix), but if we can't find it, or it's not up to date, read
1524 * its pages from disk.
1525 *
1526 * Returns -EAGAIN, with the path unlocked, if the caller needs to repeat the
1527 * whole btree search, starting again from the current root node.
1528 */
1529static int
1530read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
1531 struct extent_buffer **eb_ret, int level, int slot,
1532 const struct btrfs_key *key)
1533{
1534 struct btrfs_fs_info *fs_info = root->fs_info;
1535 struct btrfs_tree_parent_check check = { 0 };
1536 u64 blocknr;
1537 u64 gen;
1538 struct extent_buffer *tmp;
1539 int ret;
1540 int parent_level;
1541 bool unlock_up;
1542
1543 unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
1544 blocknr = btrfs_node_blockptr(*eb_ret, slot);
1545 gen = btrfs_node_ptr_generation(*eb_ret, slot);
1546 parent_level = btrfs_header_level(*eb_ret);
1547 btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
1548 check.has_first_key = true;
1549 check.level = parent_level - 1;
1550 check.transid = gen;
1551 check.owner_root = root->root_key.objectid;
1552
1553 /*
1554 * If we need to read an extent buffer from disk and we are holding locks
1555 * on upper level nodes, we unlock all the upper nodes before reading the
1556 * extent buffer, and then return -EAGAIN to the caller as it needs to
1557 * restart the search. We don't release the lock on the current level
1558 * because we need to walk this node to figure out which blocks to read.
1559 */
1560 tmp = find_extent_buffer(fs_info, blocknr);
1561 if (tmp) {
1562 if (p->reada == READA_FORWARD_ALWAYS)
1563 reada_for_search(fs_info, p, level, slot, key->objectid);
1564
1565 /* first we do an atomic uptodate check */
1566 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1567 /*
1568 * Do extra check for first_key, eb can be stale due to
1569 * being cached, read from scrub, or have multiple
1570 * parents (shared tree blocks).
1571 */
1572 if (btrfs_verify_level_key(tmp,
1573 parent_level - 1, &check.first_key, gen)) {
1574 free_extent_buffer(tmp);
1575 return -EUCLEAN;
1576 }
1577 *eb_ret = tmp;
1578 return 0;
1579 }
1580
1581 if (p->nowait) {
1582 free_extent_buffer(tmp);
1583 return -EAGAIN;
1584 }
1585
1586 if (unlock_up)
1587 btrfs_unlock_up_safe(p, level + 1);
1588
1589 /* now we're allowed to do a blocking uptodate check */
1590 ret = btrfs_read_extent_buffer(tmp, &check);
1591 if (ret) {
1592 free_extent_buffer(tmp);
1593 btrfs_release_path(p);
1594 return -EIO;
1595 }
1596 if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
1597 free_extent_buffer(tmp);
1598 btrfs_release_path(p);
1599 return -EUCLEAN;
1600 }
1601
1602 if (unlock_up)
1603 ret = -EAGAIN;
1604
1605 goto out;
1606 } else if (p->nowait) {
1607 return -EAGAIN;
1608 }
1609
1610 if (unlock_up) {
1611 btrfs_unlock_up_safe(p, level + 1);
1612 ret = -EAGAIN;
1613 } else {
1614 ret = 0;
1615 }
1616
1617 if (p->reada != READA_NONE)
1618 reada_for_search(fs_info, p, level, slot, key->objectid);
1619
1620 tmp = read_tree_block(fs_info, blocknr, &check);
1621 if (IS_ERR(tmp)) {
1622 btrfs_release_path(p);
1623 return PTR_ERR(tmp);
1624 }
1625 /*
1626 * If the read above didn't mark this buffer up to date,
1627 * it will never end up being up to date. Set ret to EIO now
1628 * and give up so that our caller doesn't loop forever
1629 * on our EAGAINs.
1630 */
1631 if (!extent_buffer_uptodate(tmp))
1632 ret = -EIO;
1633
1634out:
1635 if (ret == 0) {
1636 *eb_ret = tmp;
1637 } else {
1638 free_extent_buffer(tmp);
1639 btrfs_release_path(p);
1640 }
1641
1642 return ret;
1643}
1644
1645/*
1646 * helper function for btrfs_search_slot. This does all of the checks
1647 * for node-level blocks and does any balancing required based on
1648 * the ins_len.
1649 *
1650 * If no extra work was required, zero is returned. If we had to
1651 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1652 * start over
1653 */
1654static int
1655setup_nodes_for_search(struct btrfs_trans_handle *trans,
1656 struct btrfs_root *root, struct btrfs_path *p,
1657 struct extent_buffer *b, int level, int ins_len,
1658 int *write_lock_level)
1659{
1660 struct btrfs_fs_info *fs_info = root->fs_info;
1661 int ret = 0;
1662
1663 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1664 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
1665
1666 if (*write_lock_level < level + 1) {
1667 *write_lock_level = level + 1;
1668 btrfs_release_path(p);
1669 return -EAGAIN;
1670 }
1671
1672 reada_for_balance(p, level);
1673 ret = split_node(trans, root, p, level);
1674
1675 b = p->nodes[level];
1676 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1677 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
1678
1679 if (*write_lock_level < level + 1) {
1680 *write_lock_level = level + 1;
1681 btrfs_release_path(p);
1682 return -EAGAIN;
1683 }
1684
1685 reada_for_balance(p, level);
1686 ret = balance_level(trans, root, p, level);
1687 if (ret)
1688 return ret;
1689
1690 b = p->nodes[level];
1691 if (!b) {
1692 btrfs_release_path(p);
1693 return -EAGAIN;
1694 }
1695 BUG_ON(btrfs_header_nritems(b) == 1);
1696 }
1697 return ret;
1698}
1699
1700int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1701 u64 iobjectid, u64 ioff, u8 key_type,
1702 struct btrfs_key *found_key)
1703{
1704 int ret;
1705 struct btrfs_key key;
1706 struct extent_buffer *eb;
1707
1708 ASSERT(path);
1709 ASSERT(found_key);
1710
1711 key.type = key_type;
1712 key.objectid = iobjectid;
1713 key.offset = ioff;
1714
1715 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1716 if (ret < 0)
1717 return ret;
1718
1719 eb = path->nodes[0];
1720 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1721 ret = btrfs_next_leaf(fs_root, path);
1722 if (ret)
1723 return ret;
1724 eb = path->nodes[0];
1725 }
1726
1727 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1728 if (found_key->type != key.type ||
1729 found_key->objectid != key.objectid)
1730 return 1;
1731
1732 return 0;
1733}
1734
1735static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
1736 struct btrfs_path *p,
1737 int write_lock_level)
1738{
1739 struct extent_buffer *b;
1740 int root_lock = 0;
1741 int level = 0;
1742
1743 if (p->search_commit_root) {
1744 b = root->commit_root;
1745 atomic_inc(&b->refs);
1746 level = btrfs_header_level(b);
1747 /*
1748 * Ensure that all callers have set skip_locking when
1749 * p->search_commit_root = 1.
1750 */
1751 ASSERT(p->skip_locking == 1);
1752
1753 goto out;
1754 }
1755
1756 if (p->skip_locking) {
1757 b = btrfs_root_node(root);
1758 level = btrfs_header_level(b);
1759 goto out;
1760 }
1761
1762 /* We try very hard to do read locks on the root */
1763 root_lock = BTRFS_READ_LOCK;
1764
1765 /*
1766 * If the level is set to maximum, we can skip trying to get the read
1767 * lock.
1768 */
1769 if (write_lock_level < BTRFS_MAX_LEVEL) {
1770 /*
1771 * We don't know the level of the root node until we actually
1772 * have it read locked
1773 */
1774 if (p->nowait) {
1775 b = btrfs_try_read_lock_root_node(root);
1776 if (IS_ERR(b))
1777 return b;
1778 } else {
1779 b = btrfs_read_lock_root_node(root);
1780 }
1781 level = btrfs_header_level(b);
1782 if (level > write_lock_level)
1783 goto out;
1784
1785 /* Whoops, must trade for write lock */
1786 btrfs_tree_read_unlock(b);
1787 free_extent_buffer(b);
1788 }
1789
1790 b = btrfs_lock_root_node(root);
1791 root_lock = BTRFS_WRITE_LOCK;
1792
1793 /* The level might have changed, check again */
1794 level = btrfs_header_level(b);
1795
1796out:
1797 /*
1798 * The root may have failed to write out at some point, and thus is no
1799 * longer valid, return an error in this case.
1800 */
1801 if (!extent_buffer_uptodate(b)) {
1802 if (root_lock)
1803 btrfs_tree_unlock_rw(b, root_lock);
1804 free_extent_buffer(b);
1805 return ERR_PTR(-EIO);
1806 }
1807
1808 p->nodes[level] = b;
1809 if (!p->skip_locking)
1810 p->locks[level] = root_lock;
1811 /*
1812 * Callers are responsible for dropping b's references.
1813 */
1814 return b;
1815}
1816
1817/*
1818 * Replace the extent buffer at the lowest level of the path with a cloned
1819 * version. The purpose is to be able to use it safely, after releasing the
1820 * commit root semaphore, even if relocation is happening in parallel, the
1821 * transaction used for relocation is committed and the extent buffer is
1822 * reallocated in the next transaction.
1823 *
1824 * This is used in a context where the caller does not prevent transaction
1825 * commits from happening, either by holding a transaction handle or holding
1826 * some lock, while it's doing searches through a commit root.
1827 * At the moment it's only used for send operations.
1828 */
1829static int finish_need_commit_sem_search(struct btrfs_path *path)
1830{
1831 const int i = path->lowest_level;
1832 const int slot = path->slots[i];
1833 struct extent_buffer *lowest = path->nodes[i];
1834 struct extent_buffer *clone;
1835
1836 ASSERT(path->need_commit_sem);
1837
1838 if (!lowest)
1839 return 0;
1840
1841 lockdep_assert_held_read(&lowest->fs_info->commit_root_sem);
1842
1843 clone = btrfs_clone_extent_buffer(lowest);
1844 if (!clone)
1845 return -ENOMEM;
1846
1847 btrfs_release_path(path);
1848 path->nodes[i] = clone;
1849 path->slots[i] = slot;
1850
1851 return 0;
1852}
1853
1854static inline int search_for_key_slot(struct extent_buffer *eb,
1855 int search_low_slot,
1856 const struct btrfs_key *key,
1857 int prev_cmp,
1858 int *slot)
1859{
1860 /*
1861 * If a previous call to btrfs_bin_search() on a parent node returned an
1862 * exact match (prev_cmp == 0), we can safely assume the target key will
1863 * always be at slot 0 on lower levels, since each key pointer
1864 * (struct btrfs_key_ptr) refers to the lowest key accessible from the
1865 * subtree it points to. Thus we can skip searching lower levels.
1866 */
1867 if (prev_cmp == 0) {
1868 *slot = 0;
1869 return 0;
1870 }
1871
1872 return generic_bin_search(eb, search_low_slot, key, slot);
1873}
1874
1875static int search_leaf(struct btrfs_trans_handle *trans,
1876 struct btrfs_root *root,
1877 const struct btrfs_key *key,
1878 struct btrfs_path *path,
1879 int ins_len,
1880 int prev_cmp)
1881{
1882 struct extent_buffer *leaf = path->nodes[0];
1883 int leaf_free_space = -1;
1884 int search_low_slot = 0;
1885 int ret;
1886 bool do_bin_search = true;
1887
1888 /*
1889 * If we are doing an insertion, the leaf has enough free space and the
1890 * destination slot for the key is not slot 0, then we can unlock our
1891 * write lock on the parent, and any other upper nodes, before doing the
1892 * binary search on the leaf (with search_for_key_slot()), allowing other
1893 * tasks to lock the parent and any other upper nodes.
1894 */
1895 if (ins_len > 0) {
1896 /*
1897 * Cache the leaf free space, since we will need it later and it
1898 * will not change until then.
1899 */
1900 leaf_free_space = btrfs_leaf_free_space(leaf);
1901
1902 /*
1903 * !path->locks[1] means we have a single node tree, the leaf is
1904 * the root of the tree.
1905 */
1906 if (path->locks[1] && leaf_free_space >= ins_len) {
1907 struct btrfs_disk_key first_key;
1908
1909 ASSERT(btrfs_header_nritems(leaf) > 0);
1910 btrfs_item_key(leaf, &first_key, 0);
1911
1912 /*
1913 * Doing the extra comparison with the first key is cheap,
1914 * taking into account that the first key is very likely
1915 * already in a cache line because it immediately follows
1916 * the extent buffer's header and we have recently accessed
1917 * the header's level field.
1918 */
1919 ret = comp_keys(&first_key, key);
1920 if (ret < 0) {
1921 /*
1922 * The first key is smaller than the key we want
1923 * to insert, so we are safe to unlock all upper
1924 * nodes and we have to do the binary search.
1925 *
1926 * We do use btrfs_unlock_up_safe() and not
1927 * unlock_up() because the later does not unlock
1928 * nodes with a slot of 0 - we can safely unlock
1929 * any node even if its slot is 0 since in this
1930 * case the key does not end up at slot 0 of the
1931 * leaf and there's no need to split the leaf.
1932 */
1933 btrfs_unlock_up_safe(path, 1);
1934 search_low_slot = 1;
1935 } else {
1936 /*
1937 * The first key is >= then the key we want to
1938 * insert, so we can skip the binary search as
1939 * the target key will be at slot 0.
1940 *
1941 * We can not unlock upper nodes when the key is
1942 * less than the first key, because we will need
1943 * to update the key at slot 0 of the parent node
1944 * and possibly of other upper nodes too.
1945 * If the key matches the first key, then we can
1946 * unlock all the upper nodes, using
1947 * btrfs_unlock_up_safe() instead of unlock_up()
1948 * as stated above.
1949 */
1950 if (ret == 0)
1951 btrfs_unlock_up_safe(path, 1);
1952 /*
1953 * ret is already 0 or 1, matching the result of
1954 * a btrfs_bin_search() call, so there is no need
1955 * to adjust it.
1956 */
1957 do_bin_search = false;
1958 path->slots[0] = 0;
1959 }
1960 }
1961 }
1962
1963 if (do_bin_search) {
1964 ret = search_for_key_slot(leaf, search_low_slot, key,
1965 prev_cmp, &path->slots[0]);
1966 if (ret < 0)
1967 return ret;
1968 }
1969
1970 if (ins_len > 0) {
1971 /*
1972 * Item key already exists. In this case, if we are allowed to
1973 * insert the item (for example, in dir_item case, item key
1974 * collision is allowed), it will be merged with the original
1975 * item. Only the item size grows, no new btrfs item will be
1976 * added. If search_for_extension is not set, ins_len already
1977 * accounts the size btrfs_item, deduct it here so leaf space
1978 * check will be correct.
1979 */
1980 if (ret == 0 && !path->search_for_extension) {
1981 ASSERT(ins_len >= sizeof(struct btrfs_item));
1982 ins_len -= sizeof(struct btrfs_item);
1983 }
1984
1985 ASSERT(leaf_free_space >= 0);
1986
1987 if (leaf_free_space < ins_len) {
1988 int err;
1989
1990 err = split_leaf(trans, root, key, path, ins_len,
1991 (ret == 0));
1992 ASSERT(err <= 0);
1993 if (WARN_ON(err > 0))
1994 err = -EUCLEAN;
1995 if (err)
1996 ret = err;
1997 }
1998 }
1999
2000 return ret;
2001}
2002
2003/*
2004 * btrfs_search_slot - look for a key in a tree and perform necessary
2005 * modifications to preserve tree invariants.
2006 *
2007 * @trans: Handle of transaction, used when modifying the tree
2008 * @p: Holds all btree nodes along the search path
2009 * @root: The root node of the tree
2010 * @key: The key we are looking for
2011 * @ins_len: Indicates purpose of search:
2012 * >0 for inserts it's size of item inserted (*)
2013 * <0 for deletions
2014 * 0 for plain searches, not modifying the tree
2015 *
2016 * (*) If size of item inserted doesn't include
2017 * sizeof(struct btrfs_item), then p->search_for_extension must
2018 * be set.
2019 * @cow: boolean should CoW operations be performed. Must always be 1
2020 * when modifying the tree.
2021 *
2022 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2023 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2024 *
2025 * If @key is found, 0 is returned and you can find the item in the leaf level
2026 * of the path (level 0)
2027 *
2028 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2029 * points to the slot where it should be inserted
2030 *
2031 * If an error is encountered while searching the tree a negative error number
2032 * is returned
2033 */
2034int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2035 const struct btrfs_key *key, struct btrfs_path *p,
2036 int ins_len, int cow)
2037{
2038 struct btrfs_fs_info *fs_info = root->fs_info;
2039 struct extent_buffer *b;
2040 int slot;
2041 int ret;
2042 int err;
2043 int level;
2044 int lowest_unlock = 1;
2045 /* everything at write_lock_level or lower must be write locked */
2046 int write_lock_level = 0;
2047 u8 lowest_level = 0;
2048 int min_write_lock_level;
2049 int prev_cmp;
2050
2051 might_sleep();
2052
2053 lowest_level = p->lowest_level;
2054 WARN_ON(lowest_level && ins_len > 0);
2055 WARN_ON(p->nodes[0] != NULL);
2056 BUG_ON(!cow && ins_len);
2057
2058 /*
2059 * For now only allow nowait for read only operations. There's no
2060 * strict reason why we can't, we just only need it for reads so it's
2061 * only implemented for reads.
2062 */
2063 ASSERT(!p->nowait || !cow);
2064
2065 if (ins_len < 0) {
2066 lowest_unlock = 2;
2067
2068 /* when we are removing items, we might have to go up to level
2069 * two as we update tree pointers Make sure we keep write
2070 * for those levels as well
2071 */
2072 write_lock_level = 2;
2073 } else if (ins_len > 0) {
2074 /*
2075 * for inserting items, make sure we have a write lock on
2076 * level 1 so we can update keys
2077 */
2078 write_lock_level = 1;
2079 }
2080
2081 if (!cow)
2082 write_lock_level = -1;
2083
2084 if (cow && (p->keep_locks || p->lowest_level))
2085 write_lock_level = BTRFS_MAX_LEVEL;
2086
2087 min_write_lock_level = write_lock_level;
2088
2089 if (p->need_commit_sem) {
2090 ASSERT(p->search_commit_root);
2091 if (p->nowait) {
2092 if (!down_read_trylock(&fs_info->commit_root_sem))
2093 return -EAGAIN;
2094 } else {
2095 down_read(&fs_info->commit_root_sem);
2096 }
2097 }
2098
2099again:
2100 prev_cmp = -1;
2101 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2102 if (IS_ERR(b)) {
2103 ret = PTR_ERR(b);
2104 goto done;
2105 }
2106
2107 while (b) {
2108 int dec = 0;
2109
2110 level = btrfs_header_level(b);
2111
2112 if (cow) {
2113 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2114
2115 /*
2116 * if we don't really need to cow this block
2117 * then we don't want to set the path blocking,
2118 * so we test it here
2119 */
2120 if (!should_cow_block(trans, root, b))
2121 goto cow_done;
2122
2123 /*
2124 * must have write locks on this node and the
2125 * parent
2126 */
2127 if (level > write_lock_level ||
2128 (level + 1 > write_lock_level &&
2129 level + 1 < BTRFS_MAX_LEVEL &&
2130 p->nodes[level + 1])) {
2131 write_lock_level = level + 1;
2132 btrfs_release_path(p);
2133 goto again;
2134 }
2135
2136 if (last_level)
2137 err = btrfs_cow_block(trans, root, b, NULL, 0,
2138 &b,
2139 BTRFS_NESTING_COW);
2140 else
2141 err = btrfs_cow_block(trans, root, b,
2142 p->nodes[level + 1],
2143 p->slots[level + 1], &b,
2144 BTRFS_NESTING_COW);
2145 if (err) {
2146 ret = err;
2147 goto done;
2148 }
2149 }
2150cow_done:
2151 p->nodes[level] = b;
2152
2153 /*
2154 * we have a lock on b and as long as we aren't changing
2155 * the tree, there is no way to for the items in b to change.
2156 * It is safe to drop the lock on our parent before we
2157 * go through the expensive btree search on b.
2158 *
2159 * If we're inserting or deleting (ins_len != 0), then we might
2160 * be changing slot zero, which may require changing the parent.
2161 * So, we can't drop the lock until after we know which slot
2162 * we're operating on.
2163 */
2164 if (!ins_len && !p->keep_locks) {
2165 int u = level + 1;
2166
2167 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2168 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2169 p->locks[u] = 0;
2170 }
2171 }
2172
2173 if (level == 0) {
2174 if (ins_len > 0)
2175 ASSERT(write_lock_level >= 1);
2176
2177 ret = search_leaf(trans, root, key, p, ins_len, prev_cmp);
2178 if (!p->search_for_split)
2179 unlock_up(p, level, lowest_unlock,
2180 min_write_lock_level, NULL);
2181 goto done;
2182 }
2183
2184 ret = search_for_key_slot(b, 0, key, prev_cmp, &slot);
2185 if (ret < 0)
2186 goto done;
2187 prev_cmp = ret;
2188
2189 if (ret && slot > 0) {
2190 dec = 1;
2191 slot--;
2192 }
2193 p->slots[level] = slot;
2194 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2195 &write_lock_level);
2196 if (err == -EAGAIN)
2197 goto again;
2198 if (err) {
2199 ret = err;
2200 goto done;
2201 }
2202 b = p->nodes[level];
2203 slot = p->slots[level];
2204
2205 /*
2206 * Slot 0 is special, if we change the key we have to update
2207 * the parent pointer which means we must have a write lock on
2208 * the parent
2209 */
2210 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2211 write_lock_level = level + 1;
2212 btrfs_release_path(p);
2213 goto again;
2214 }
2215
2216 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2217 &write_lock_level);
2218
2219 if (level == lowest_level) {
2220 if (dec)
2221 p->slots[level]++;
2222 goto done;
2223 }
2224
2225 err = read_block_for_search(root, p, &b, level, slot, key);
2226 if (err == -EAGAIN)
2227 goto again;
2228 if (err) {
2229 ret = err;
2230 goto done;
2231 }
2232
2233 if (!p->skip_locking) {
2234 level = btrfs_header_level(b);
2235
2236 btrfs_maybe_reset_lockdep_class(root, b);
2237
2238 if (level <= write_lock_level) {
2239 btrfs_tree_lock(b);
2240 p->locks[level] = BTRFS_WRITE_LOCK;
2241 } else {
2242 if (p->nowait) {
2243 if (!btrfs_try_tree_read_lock(b)) {
2244 free_extent_buffer(b);
2245 ret = -EAGAIN;
2246 goto done;
2247 }
2248 } else {
2249 btrfs_tree_read_lock(b);
2250 }
2251 p->locks[level] = BTRFS_READ_LOCK;
2252 }
2253 p->nodes[level] = b;
2254 }
2255 }
2256 ret = 1;
2257done:
2258 if (ret < 0 && !p->skip_release_on_error)
2259 btrfs_release_path(p);
2260
2261 if (p->need_commit_sem) {
2262 int ret2;
2263
2264 ret2 = finish_need_commit_sem_search(p);
2265 up_read(&fs_info->commit_root_sem);
2266 if (ret2)
2267 ret = ret2;
2268 }
2269
2270 return ret;
2271}
2272ALLOW_ERROR_INJECTION(btrfs_search_slot, ERRNO);
2273
2274/*
2275 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2276 * current state of the tree together with the operations recorded in the tree
2277 * modification log to search for the key in a previous version of this tree, as
2278 * denoted by the time_seq parameter.
2279 *
2280 * Naturally, there is no support for insert, delete or cow operations.
2281 *
2282 * The resulting path and return value will be set up as if we called
2283 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2284 */
2285int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2286 struct btrfs_path *p, u64 time_seq)
2287{
2288 struct btrfs_fs_info *fs_info = root->fs_info;
2289 struct extent_buffer *b;
2290 int slot;
2291 int ret;
2292 int err;
2293 int level;
2294 int lowest_unlock = 1;
2295 u8 lowest_level = 0;
2296
2297 lowest_level = p->lowest_level;
2298 WARN_ON(p->nodes[0] != NULL);
2299 ASSERT(!p->nowait);
2300
2301 if (p->search_commit_root) {
2302 BUG_ON(time_seq);
2303 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2304 }
2305
2306again:
2307 b = btrfs_get_old_root(root, time_seq);
2308 if (!b) {
2309 ret = -EIO;
2310 goto done;
2311 }
2312 level = btrfs_header_level(b);
2313 p->locks[level] = BTRFS_READ_LOCK;
2314
2315 while (b) {
2316 int dec = 0;
2317
2318 level = btrfs_header_level(b);
2319 p->nodes[level] = b;
2320
2321 /*
2322 * we have a lock on b and as long as we aren't changing
2323 * the tree, there is no way to for the items in b to change.
2324 * It is safe to drop the lock on our parent before we
2325 * go through the expensive btree search on b.
2326 */
2327 btrfs_unlock_up_safe(p, level + 1);
2328
2329 ret = btrfs_bin_search(b, key, &slot);
2330 if (ret < 0)
2331 goto done;
2332
2333 if (level == 0) {
2334 p->slots[level] = slot;
2335 unlock_up(p, level, lowest_unlock, 0, NULL);
2336 goto done;
2337 }
2338
2339 if (ret && slot > 0) {
2340 dec = 1;
2341 slot--;
2342 }
2343 p->slots[level] = slot;
2344 unlock_up(p, level, lowest_unlock, 0, NULL);
2345
2346 if (level == lowest_level) {
2347 if (dec)
2348 p->slots[level]++;
2349 goto done;
2350 }
2351
2352 err = read_block_for_search(root, p, &b, level, slot, key);
2353 if (err == -EAGAIN)
2354 goto again;
2355 if (err) {
2356 ret = err;
2357 goto done;
2358 }
2359
2360 level = btrfs_header_level(b);
2361 btrfs_tree_read_lock(b);
2362 b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
2363 if (!b) {
2364 ret = -ENOMEM;
2365 goto done;
2366 }
2367 p->locks[level] = BTRFS_READ_LOCK;
2368 p->nodes[level] = b;
2369 }
2370 ret = 1;
2371done:
2372 if (ret < 0)
2373 btrfs_release_path(p);
2374
2375 return ret;
2376}
2377
2378/*
2379 * helper to use instead of search slot if no exact match is needed but
2380 * instead the next or previous item should be returned.
2381 * When find_higher is true, the next higher item is returned, the next lower
2382 * otherwise.
2383 * When return_any and find_higher are both true, and no higher item is found,
2384 * return the next lower instead.
2385 * When return_any is true and find_higher is false, and no lower item is found,
2386 * return the next higher instead.
2387 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2388 * < 0 on error
2389 */
2390int btrfs_search_slot_for_read(struct btrfs_root *root,
2391 const struct btrfs_key *key,
2392 struct btrfs_path *p, int find_higher,
2393 int return_any)
2394{
2395 int ret;
2396 struct extent_buffer *leaf;
2397
2398again:
2399 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2400 if (ret <= 0)
2401 return ret;
2402 /*
2403 * a return value of 1 means the path is at the position where the
2404 * item should be inserted. Normally this is the next bigger item,
2405 * but in case the previous item is the last in a leaf, path points
2406 * to the first free slot in the previous leaf, i.e. at an invalid
2407 * item.
2408 */
2409 leaf = p->nodes[0];
2410
2411 if (find_higher) {
2412 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2413 ret = btrfs_next_leaf(root, p);
2414 if (ret <= 0)
2415 return ret;
2416 if (!return_any)
2417 return 1;
2418 /*
2419 * no higher item found, return the next
2420 * lower instead
2421 */
2422 return_any = 0;
2423 find_higher = 0;
2424 btrfs_release_path(p);
2425 goto again;
2426 }
2427 } else {
2428 if (p->slots[0] == 0) {
2429 ret = btrfs_prev_leaf(root, p);
2430 if (ret < 0)
2431 return ret;
2432 if (!ret) {
2433 leaf = p->nodes[0];
2434 if (p->slots[0] == btrfs_header_nritems(leaf))
2435 p->slots[0]--;
2436 return 0;
2437 }
2438 if (!return_any)
2439 return 1;
2440 /*
2441 * no lower item found, return the next
2442 * higher instead
2443 */
2444 return_any = 0;
2445 find_higher = 1;
2446 btrfs_release_path(p);
2447 goto again;
2448 } else {
2449 --p->slots[0];
2450 }
2451 }
2452 return 0;
2453}
2454
2455/*
2456 * Execute search and call btrfs_previous_item to traverse backwards if the item
2457 * was not found.
2458 *
2459 * Return 0 if found, 1 if not found and < 0 if error.
2460 */
2461int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
2462 struct btrfs_path *path)
2463{
2464 int ret;
2465
2466 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
2467 if (ret > 0)
2468 ret = btrfs_previous_item(root, path, key->objectid, key->type);
2469
2470 if (ret == 0)
2471 btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
2472
2473 return ret;
2474}
2475
2476/*
2477 * Search for a valid slot for the given path.
2478 *
2479 * @root: The root node of the tree.
2480 * @key: Will contain a valid item if found.
2481 * @path: The starting point to validate the slot.
2482 *
2483 * Return: 0 if the item is valid
2484 * 1 if not found
2485 * <0 if error.
2486 */
2487int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
2488 struct btrfs_path *path)
2489{
2490 while (1) {
2491 int ret;
2492 const int slot = path->slots[0];
2493 const struct extent_buffer *leaf = path->nodes[0];
2494
2495 /* This is where we start walking the path. */
2496 if (slot >= btrfs_header_nritems(leaf)) {
2497 /*
2498 * If we've reached the last slot in this leaf we need
2499 * to go to the next leaf and reset the path.
2500 */
2501 ret = btrfs_next_leaf(root, path);
2502 if (ret)
2503 return ret;
2504 continue;
2505 }
2506 /* Store the found, valid item in @key. */
2507 btrfs_item_key_to_cpu(leaf, key, slot);
2508 break;
2509 }
2510 return 0;
2511}
2512
2513/*
2514 * adjust the pointers going up the tree, starting at level
2515 * making sure the right key of each node is points to 'key'.
2516 * This is used after shifting pointers to the left, so it stops
2517 * fixing up pointers when a given leaf/node is not in slot 0 of the
2518 * higher levels
2519 *
2520 */
2521static void fixup_low_keys(struct btrfs_path *path,
2522 struct btrfs_disk_key *key, int level)
2523{
2524 int i;
2525 struct extent_buffer *t;
2526 int ret;
2527
2528 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2529 int tslot = path->slots[i];
2530
2531 if (!path->nodes[i])
2532 break;
2533 t = path->nodes[i];
2534 ret = btrfs_tree_mod_log_insert_key(t, tslot,
2535 BTRFS_MOD_LOG_KEY_REPLACE);
2536 BUG_ON(ret < 0);
2537 btrfs_set_node_key(t, key, tslot);
2538 btrfs_mark_buffer_dirty(path->nodes[i]);
2539 if (tslot != 0)
2540 break;
2541 }
2542}
2543
2544/*
2545 * update item key.
2546 *
2547 * This function isn't completely safe. It's the caller's responsibility
2548 * that the new key won't break the order
2549 */
2550void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
2551 struct btrfs_path *path,
2552 const struct btrfs_key *new_key)
2553{
2554 struct btrfs_disk_key disk_key;
2555 struct extent_buffer *eb;
2556 int slot;
2557
2558 eb = path->nodes[0];
2559 slot = path->slots[0];
2560 if (slot > 0) {
2561 btrfs_item_key(eb, &disk_key, slot - 1);
2562 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
2563 btrfs_crit(fs_info,
2564 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2565 slot, btrfs_disk_key_objectid(&disk_key),
2566 btrfs_disk_key_type(&disk_key),
2567 btrfs_disk_key_offset(&disk_key),
2568 new_key->objectid, new_key->type,
2569 new_key->offset);
2570 btrfs_print_leaf(eb);
2571 BUG();
2572 }
2573 }
2574 if (slot < btrfs_header_nritems(eb) - 1) {
2575 btrfs_item_key(eb, &disk_key, slot + 1);
2576 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
2577 btrfs_crit(fs_info,
2578 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
2579 slot, btrfs_disk_key_objectid(&disk_key),
2580 btrfs_disk_key_type(&disk_key),
2581 btrfs_disk_key_offset(&disk_key),
2582 new_key->objectid, new_key->type,
2583 new_key->offset);
2584 btrfs_print_leaf(eb);
2585 BUG();
2586 }
2587 }
2588
2589 btrfs_cpu_key_to_disk(&disk_key, new_key);
2590 btrfs_set_item_key(eb, &disk_key, slot);
2591 btrfs_mark_buffer_dirty(eb);
2592 if (slot == 0)
2593 fixup_low_keys(path, &disk_key, 1);
2594}
2595
2596/*
2597 * Check key order of two sibling extent buffers.
2598 *
2599 * Return true if something is wrong.
2600 * Return false if everything is fine.
2601 *
2602 * Tree-checker only works inside one tree block, thus the following
2603 * corruption can not be detected by tree-checker:
2604 *
2605 * Leaf @left | Leaf @right
2606 * --------------------------------------------------------------
2607 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
2608 *
2609 * Key f6 in leaf @left itself is valid, but not valid when the next
2610 * key in leaf @right is 7.
2611 * This can only be checked at tree block merge time.
2612 * And since tree checker has ensured all key order in each tree block
2613 * is correct, we only need to bother the last key of @left and the first
2614 * key of @right.
2615 */
2616static bool check_sibling_keys(struct extent_buffer *left,
2617 struct extent_buffer *right)
2618{
2619 struct btrfs_key left_last;
2620 struct btrfs_key right_first;
2621 int level = btrfs_header_level(left);
2622 int nr_left = btrfs_header_nritems(left);
2623 int nr_right = btrfs_header_nritems(right);
2624
2625 /* No key to check in one of the tree blocks */
2626 if (!nr_left || !nr_right)
2627 return false;
2628
2629 if (level) {
2630 btrfs_node_key_to_cpu(left, &left_last, nr_left - 1);
2631 btrfs_node_key_to_cpu(right, &right_first, 0);
2632 } else {
2633 btrfs_item_key_to_cpu(left, &left_last, nr_left - 1);
2634 btrfs_item_key_to_cpu(right, &right_first, 0);
2635 }
2636
2637 if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
2638 btrfs_crit(left->fs_info,
2639"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
2640 left_last.objectid, left_last.type,
2641 left_last.offset, right_first.objectid,
2642 right_first.type, right_first.offset);
2643 return true;
2644 }
2645 return false;
2646}
2647
2648/*
2649 * try to push data from one node into the next node left in the
2650 * tree.
2651 *
2652 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2653 * error, and > 0 if there was no room in the left hand block.
2654 */
2655static int push_node_left(struct btrfs_trans_handle *trans,
2656 struct extent_buffer *dst,
2657 struct extent_buffer *src, int empty)
2658{
2659 struct btrfs_fs_info *fs_info = trans->fs_info;
2660 int push_items = 0;
2661 int src_nritems;
2662 int dst_nritems;
2663 int ret = 0;
2664
2665 src_nritems = btrfs_header_nritems(src);
2666 dst_nritems = btrfs_header_nritems(dst);
2667 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2668 WARN_ON(btrfs_header_generation(src) != trans->transid);
2669 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2670
2671 if (!empty && src_nritems <= 8)
2672 return 1;
2673
2674 if (push_items <= 0)
2675 return 1;
2676
2677 if (empty) {
2678 push_items = min(src_nritems, push_items);
2679 if (push_items < src_nritems) {
2680 /* leave at least 8 pointers in the node if
2681 * we aren't going to empty it
2682 */
2683 if (src_nritems - push_items < 8) {
2684 if (push_items <= 8)
2685 return 1;
2686 push_items -= 8;
2687 }
2688 }
2689 } else
2690 push_items = min(src_nritems - 8, push_items);
2691
2692 /* dst is the left eb, src is the middle eb */
2693 if (check_sibling_keys(dst, src)) {
2694 ret = -EUCLEAN;
2695 btrfs_abort_transaction(trans, ret);
2696 return ret;
2697 }
2698 ret = btrfs_tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
2699 if (ret) {
2700 btrfs_abort_transaction(trans, ret);
2701 return ret;
2702 }
2703 copy_extent_buffer(dst, src,
2704 btrfs_node_key_ptr_offset(dst, dst_nritems),
2705 btrfs_node_key_ptr_offset(src, 0),
2706 push_items * sizeof(struct btrfs_key_ptr));
2707
2708 if (push_items < src_nritems) {
2709 /*
2710 * Don't call btrfs_tree_mod_log_insert_move() here, key removal
2711 * was already fully logged by btrfs_tree_mod_log_eb_copy() above.
2712 */
2713 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(src, 0),
2714 btrfs_node_key_ptr_offset(src, push_items),
2715 (src_nritems - push_items) *
2716 sizeof(struct btrfs_key_ptr));
2717 }
2718 btrfs_set_header_nritems(src, src_nritems - push_items);
2719 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2720 btrfs_mark_buffer_dirty(src);
2721 btrfs_mark_buffer_dirty(dst);
2722
2723 return ret;
2724}
2725
2726/*
2727 * try to push data from one node into the next node right in the
2728 * tree.
2729 *
2730 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2731 * error, and > 0 if there was no room in the right hand block.
2732 *
2733 * this will only push up to 1/2 the contents of the left node over
2734 */
2735static int balance_node_right(struct btrfs_trans_handle *trans,
2736 struct extent_buffer *dst,
2737 struct extent_buffer *src)
2738{
2739 struct btrfs_fs_info *fs_info = trans->fs_info;
2740 int push_items = 0;
2741 int max_push;
2742 int src_nritems;
2743 int dst_nritems;
2744 int ret = 0;
2745
2746 WARN_ON(btrfs_header_generation(src) != trans->transid);
2747 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2748
2749 src_nritems = btrfs_header_nritems(src);
2750 dst_nritems = btrfs_header_nritems(dst);
2751 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
2752 if (push_items <= 0)
2753 return 1;
2754
2755 if (src_nritems < 4)
2756 return 1;
2757
2758 max_push = src_nritems / 2 + 1;
2759 /* don't try to empty the node */
2760 if (max_push >= src_nritems)
2761 return 1;
2762
2763 if (max_push < push_items)
2764 push_items = max_push;
2765
2766 /* dst is the right eb, src is the middle eb */
2767 if (check_sibling_keys(src, dst)) {
2768 ret = -EUCLEAN;
2769 btrfs_abort_transaction(trans, ret);
2770 return ret;
2771 }
2772 ret = btrfs_tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
2773 BUG_ON(ret < 0);
2774 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(dst, push_items),
2775 btrfs_node_key_ptr_offset(dst, 0),
2776 (dst_nritems) *
2777 sizeof(struct btrfs_key_ptr));
2778
2779 ret = btrfs_tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
2780 push_items);
2781 if (ret) {
2782 btrfs_abort_transaction(trans, ret);
2783 return ret;
2784 }
2785 copy_extent_buffer(dst, src,
2786 btrfs_node_key_ptr_offset(dst, 0),
2787 btrfs_node_key_ptr_offset(src, src_nritems - push_items),
2788 push_items * sizeof(struct btrfs_key_ptr));
2789
2790 btrfs_set_header_nritems(src, src_nritems - push_items);
2791 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2792
2793 btrfs_mark_buffer_dirty(src);
2794 btrfs_mark_buffer_dirty(dst);
2795
2796 return ret;
2797}
2798
2799/*
2800 * helper function to insert a new root level in the tree.
2801 * A new node is allocated, and a single item is inserted to
2802 * point to the existing root
2803 *
2804 * returns zero on success or < 0 on failure.
2805 */
2806static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2807 struct btrfs_root *root,
2808 struct btrfs_path *path, int level)
2809{
2810 struct btrfs_fs_info *fs_info = root->fs_info;
2811 u64 lower_gen;
2812 struct extent_buffer *lower;
2813 struct extent_buffer *c;
2814 struct extent_buffer *old;
2815 struct btrfs_disk_key lower_key;
2816 int ret;
2817
2818 BUG_ON(path->nodes[level]);
2819 BUG_ON(path->nodes[level-1] != root->node);
2820
2821 lower = path->nodes[level-1];
2822 if (level == 1)
2823 btrfs_item_key(lower, &lower_key, 0);
2824 else
2825 btrfs_node_key(lower, &lower_key, 0);
2826
2827 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2828 &lower_key, level, root->node->start, 0,
2829 BTRFS_NESTING_NEW_ROOT);
2830 if (IS_ERR(c))
2831 return PTR_ERR(c);
2832
2833 root_add_used(root, fs_info->nodesize);
2834
2835 btrfs_set_header_nritems(c, 1);
2836 btrfs_set_node_key(c, &lower_key, 0);
2837 btrfs_set_node_blockptr(c, 0, lower->start);
2838 lower_gen = btrfs_header_generation(lower);
2839 WARN_ON(lower_gen != trans->transid);
2840
2841 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2842
2843 btrfs_mark_buffer_dirty(c);
2844
2845 old = root->node;
2846 ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
2847 BUG_ON(ret < 0);
2848 rcu_assign_pointer(root->node, c);
2849
2850 /* the super has an extra ref to root->node */
2851 free_extent_buffer(old);
2852
2853 add_root_to_dirty_list(root);
2854 atomic_inc(&c->refs);
2855 path->nodes[level] = c;
2856 path->locks[level] = BTRFS_WRITE_LOCK;
2857 path->slots[level] = 0;
2858 return 0;
2859}
2860
2861/*
2862 * worker function to insert a single pointer in a node.
2863 * the node should have enough room for the pointer already
2864 *
2865 * slot and level indicate where you want the key to go, and
2866 * blocknr is the block the key points to.
2867 */
2868static void insert_ptr(struct btrfs_trans_handle *trans,
2869 struct btrfs_path *path,
2870 struct btrfs_disk_key *key, u64 bytenr,
2871 int slot, int level)
2872{
2873 struct extent_buffer *lower;
2874 int nritems;
2875 int ret;
2876
2877 BUG_ON(!path->nodes[level]);
2878 btrfs_assert_tree_write_locked(path->nodes[level]);
2879 lower = path->nodes[level];
2880 nritems = btrfs_header_nritems(lower);
2881 BUG_ON(slot > nritems);
2882 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
2883 if (slot != nritems) {
2884 if (level) {
2885 ret = btrfs_tree_mod_log_insert_move(lower, slot + 1,
2886 slot, nritems - slot);
2887 BUG_ON(ret < 0);
2888 }
2889 memmove_extent_buffer(lower,
2890 btrfs_node_key_ptr_offset(lower, slot + 1),
2891 btrfs_node_key_ptr_offset(lower, slot),
2892 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2893 }
2894 if (level) {
2895 ret = btrfs_tree_mod_log_insert_key(lower, slot,
2896 BTRFS_MOD_LOG_KEY_ADD);
2897 BUG_ON(ret < 0);
2898 }
2899 btrfs_set_node_key(lower, key, slot);
2900 btrfs_set_node_blockptr(lower, slot, bytenr);
2901 WARN_ON(trans->transid == 0);
2902 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2903 btrfs_set_header_nritems(lower, nritems + 1);
2904 btrfs_mark_buffer_dirty(lower);
2905}
2906
2907/*
2908 * split the node at the specified level in path in two.
2909 * The path is corrected to point to the appropriate node after the split
2910 *
2911 * Before splitting this tries to make some room in the node by pushing
2912 * left and right, if either one works, it returns right away.
2913 *
2914 * returns 0 on success and < 0 on failure
2915 */
2916static noinline int split_node(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root,
2918 struct btrfs_path *path, int level)
2919{
2920 struct btrfs_fs_info *fs_info = root->fs_info;
2921 struct extent_buffer *c;
2922 struct extent_buffer *split;
2923 struct btrfs_disk_key disk_key;
2924 int mid;
2925 int ret;
2926 u32 c_nritems;
2927
2928 c = path->nodes[level];
2929 WARN_ON(btrfs_header_generation(c) != trans->transid);
2930 if (c == root->node) {
2931 /*
2932 * trying to split the root, lets make a new one
2933 *
2934 * tree mod log: We don't log_removal old root in
2935 * insert_new_root, because that root buffer will be kept as a
2936 * normal node. We are going to log removal of half of the
2937 * elements below with btrfs_tree_mod_log_eb_copy(). We're
2938 * holding a tree lock on the buffer, which is why we cannot
2939 * race with other tree_mod_log users.
2940 */
2941 ret = insert_new_root(trans, root, path, level + 1);
2942 if (ret)
2943 return ret;
2944 } else {
2945 ret = push_nodes_for_insert(trans, root, path, level);
2946 c = path->nodes[level];
2947 if (!ret && btrfs_header_nritems(c) <
2948 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
2949 return 0;
2950 if (ret < 0)
2951 return ret;
2952 }
2953
2954 c_nritems = btrfs_header_nritems(c);
2955 mid = (c_nritems + 1) / 2;
2956 btrfs_node_key(c, &disk_key, mid);
2957
2958 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
2959 &disk_key, level, c->start, 0,
2960 BTRFS_NESTING_SPLIT);
2961 if (IS_ERR(split))
2962 return PTR_ERR(split);
2963
2964 root_add_used(root, fs_info->nodesize);
2965 ASSERT(btrfs_header_level(c) == level);
2966
2967 ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
2968 if (ret) {
2969 btrfs_abort_transaction(trans, ret);
2970 return ret;
2971 }
2972 copy_extent_buffer(split, c,
2973 btrfs_node_key_ptr_offset(split, 0),
2974 btrfs_node_key_ptr_offset(c, mid),
2975 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2976 btrfs_set_header_nritems(split, c_nritems - mid);
2977 btrfs_set_header_nritems(c, mid);
2978
2979 btrfs_mark_buffer_dirty(c);
2980 btrfs_mark_buffer_dirty(split);
2981
2982 insert_ptr(trans, path, &disk_key, split->start,
2983 path->slots[level + 1] + 1, level + 1);
2984
2985 if (path->slots[level] >= mid) {
2986 path->slots[level] -= mid;
2987 btrfs_tree_unlock(c);
2988 free_extent_buffer(c);
2989 path->nodes[level] = split;
2990 path->slots[level + 1] += 1;
2991 } else {
2992 btrfs_tree_unlock(split);
2993 free_extent_buffer(split);
2994 }
2995 return 0;
2996}
2997
2998/*
2999 * how many bytes are required to store the items in a leaf. start
3000 * and nr indicate which items in the leaf to check. This totals up the
3001 * space used both by the item structs and the item data
3002 */
3003static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3004{
3005 int data_len;
3006 int nritems = btrfs_header_nritems(l);
3007 int end = min(nritems, start + nr) - 1;
3008
3009 if (!nr)
3010 return 0;
3011 data_len = btrfs_item_offset(l, start) + btrfs_item_size(l, start);
3012 data_len = data_len - btrfs_item_offset(l, end);
3013 data_len += sizeof(struct btrfs_item) * nr;
3014 WARN_ON(data_len < 0);
3015 return data_len;
3016}
3017
3018/*
3019 * The space between the end of the leaf items and
3020 * the start of the leaf data. IOW, how much room
3021 * the leaf has left for both items and data
3022 */
3023noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3024{
3025 struct btrfs_fs_info *fs_info = leaf->fs_info;
3026 int nritems = btrfs_header_nritems(leaf);
3027 int ret;
3028
3029 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3030 if (ret < 0) {
3031 btrfs_crit(fs_info,
3032 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3033 ret,
3034 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3035 leaf_space_used(leaf, 0, nritems), nritems);
3036 }
3037 return ret;
3038}
3039
3040/*
3041 * min slot controls the lowest index we're willing to push to the
3042 * right. We'll push up to and including min_slot, but no lower
3043 */
3044static noinline int __push_leaf_right(struct btrfs_path *path,
3045 int data_size, int empty,
3046 struct extent_buffer *right,
3047 int free_space, u32 left_nritems,
3048 u32 min_slot)
3049{
3050 struct btrfs_fs_info *fs_info = right->fs_info;
3051 struct extent_buffer *left = path->nodes[0];
3052 struct extent_buffer *upper = path->nodes[1];
3053 struct btrfs_map_token token;
3054 struct btrfs_disk_key disk_key;
3055 int slot;
3056 u32 i;
3057 int push_space = 0;
3058 int push_items = 0;
3059 u32 nr;
3060 u32 right_nritems;
3061 u32 data_end;
3062 u32 this_item_size;
3063
3064 if (empty)
3065 nr = 0;
3066 else
3067 nr = max_t(u32, 1, min_slot);
3068
3069 if (path->slots[0] >= left_nritems)
3070 push_space += data_size;
3071
3072 slot = path->slots[1];
3073 i = left_nritems - 1;
3074 while (i >= nr) {
3075 if (!empty && push_items > 0) {
3076 if (path->slots[0] > i)
3077 break;
3078 if (path->slots[0] == i) {
3079 int space = btrfs_leaf_free_space(left);
3080
3081 if (space + push_space * 2 > free_space)
3082 break;
3083 }
3084 }
3085
3086 if (path->slots[0] == i)
3087 push_space += data_size;
3088
3089 this_item_size = btrfs_item_size(left, i);
3090 if (this_item_size + sizeof(struct btrfs_item) +
3091 push_space > free_space)
3092 break;
3093
3094 push_items++;
3095 push_space += this_item_size + sizeof(struct btrfs_item);
3096 if (i == 0)
3097 break;
3098 i--;
3099 }
3100
3101 if (push_items == 0)
3102 goto out_unlock;
3103
3104 WARN_ON(!empty && push_items == left_nritems);
3105
3106 /* push left to right */
3107 right_nritems = btrfs_header_nritems(right);
3108
3109 push_space = btrfs_item_data_end(left, left_nritems - push_items);
3110 push_space -= leaf_data_end(left);
3111
3112 /* make room in the right data area */
3113 data_end = leaf_data_end(right);
3114 memmove_leaf_data(right, data_end - push_space, data_end,
3115 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3116
3117 /* copy from the left data area */
3118 copy_leaf_data(right, left, BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3119 leaf_data_end(left), push_space);
3120
3121 memmove_leaf_items(right, push_items, 0, right_nritems);
3122
3123 /* copy the items from left to right */
3124 copy_leaf_items(right, left, 0, left_nritems - push_items, push_items);
3125
3126 /* update the item pointers */
3127 btrfs_init_map_token(&token, right);
3128 right_nritems += push_items;
3129 btrfs_set_header_nritems(right, right_nritems);
3130 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3131 for (i = 0; i < right_nritems; i++) {
3132 push_space -= btrfs_token_item_size(&token, i);
3133 btrfs_set_token_item_offset(&token, i, push_space);
3134 }
3135
3136 left_nritems -= push_items;
3137 btrfs_set_header_nritems(left, left_nritems);
3138
3139 if (left_nritems)
3140 btrfs_mark_buffer_dirty(left);
3141 else
3142 btrfs_clean_tree_block(left);
3143
3144 btrfs_mark_buffer_dirty(right);
3145
3146 btrfs_item_key(right, &disk_key, 0);
3147 btrfs_set_node_key(upper, &disk_key, slot + 1);
3148 btrfs_mark_buffer_dirty(upper);
3149
3150 /* then fixup the leaf pointer in the path */
3151 if (path->slots[0] >= left_nritems) {
3152 path->slots[0] -= left_nritems;
3153 if (btrfs_header_nritems(path->nodes[0]) == 0)
3154 btrfs_clean_tree_block(path->nodes[0]);
3155 btrfs_tree_unlock(path->nodes[0]);
3156 free_extent_buffer(path->nodes[0]);
3157 path->nodes[0] = right;
3158 path->slots[1] += 1;
3159 } else {
3160 btrfs_tree_unlock(right);
3161 free_extent_buffer(right);
3162 }
3163 return 0;
3164
3165out_unlock:
3166 btrfs_tree_unlock(right);
3167 free_extent_buffer(right);
3168 return 1;
3169}
3170
3171/*
3172 * push some data in the path leaf to the right, trying to free up at
3173 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3174 *
3175 * returns 1 if the push failed because the other node didn't have enough
3176 * room, 0 if everything worked out and < 0 if there were major errors.
3177 *
3178 * this will push starting from min_slot to the end of the leaf. It won't
3179 * push any slot lower than min_slot
3180 */
3181static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3182 *root, struct btrfs_path *path,
3183 int min_data_size, int data_size,
3184 int empty, u32 min_slot)
3185{
3186 struct extent_buffer *left = path->nodes[0];
3187 struct extent_buffer *right;
3188 struct extent_buffer *upper;
3189 int slot;
3190 int free_space;
3191 u32 left_nritems;
3192 int ret;
3193
3194 if (!path->nodes[1])
3195 return 1;
3196
3197 slot = path->slots[1];
3198 upper = path->nodes[1];
3199 if (slot >= btrfs_header_nritems(upper) - 1)
3200 return 1;
3201
3202 btrfs_assert_tree_write_locked(path->nodes[1]);
3203
3204 right = btrfs_read_node_slot(upper, slot + 1);
3205 /*
3206 * slot + 1 is not valid or we fail to read the right node,
3207 * no big deal, just return.
3208 */
3209 if (IS_ERR(right))
3210 return 1;
3211
3212 __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
3213
3214 free_space = btrfs_leaf_free_space(right);
3215 if (free_space < data_size)
3216 goto out_unlock;
3217
3218 ret = btrfs_cow_block(trans, root, right, upper,
3219 slot + 1, &right, BTRFS_NESTING_RIGHT_COW);
3220 if (ret)
3221 goto out_unlock;
3222
3223 left_nritems = btrfs_header_nritems(left);
3224 if (left_nritems == 0)
3225 goto out_unlock;
3226
3227 if (check_sibling_keys(left, right)) {
3228 ret = -EUCLEAN;
3229 btrfs_tree_unlock(right);
3230 free_extent_buffer(right);
3231 return ret;
3232 }
3233 if (path->slots[0] == left_nritems && !empty) {
3234 /* Key greater than all keys in the leaf, right neighbor has
3235 * enough room for it and we're not emptying our leaf to delete
3236 * it, therefore use right neighbor to insert the new item and
3237 * no need to touch/dirty our left leaf. */
3238 btrfs_tree_unlock(left);
3239 free_extent_buffer(left);
3240 path->nodes[0] = right;
3241 path->slots[0] = 0;
3242 path->slots[1]++;
3243 return 0;
3244 }
3245
3246 return __push_leaf_right(path, min_data_size, empty,
3247 right, free_space, left_nritems, min_slot);
3248out_unlock:
3249 btrfs_tree_unlock(right);
3250 free_extent_buffer(right);
3251 return 1;
3252}
3253
3254/*
3255 * push some data in the path leaf to the left, trying to free up at
3256 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3257 *
3258 * max_slot can put a limit on how far into the leaf we'll push items. The
3259 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3260 * items
3261 */
3262static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3263 int empty, struct extent_buffer *left,
3264 int free_space, u32 right_nritems,
3265 u32 max_slot)
3266{
3267 struct btrfs_fs_info *fs_info = left->fs_info;
3268 struct btrfs_disk_key disk_key;
3269 struct extent_buffer *right = path->nodes[0];
3270 int i;
3271 int push_space = 0;
3272 int push_items = 0;
3273 u32 old_left_nritems;
3274 u32 nr;
3275 int ret = 0;
3276 u32 this_item_size;
3277 u32 old_left_item_size;
3278 struct btrfs_map_token token;
3279
3280 if (empty)
3281 nr = min(right_nritems, max_slot);
3282 else
3283 nr = min(right_nritems - 1, max_slot);
3284
3285 for (i = 0; i < nr; i++) {
3286 if (!empty && push_items > 0) {
3287 if (path->slots[0] < i)
3288 break;
3289 if (path->slots[0] == i) {
3290 int space = btrfs_leaf_free_space(right);
3291
3292 if (space + push_space * 2 > free_space)
3293 break;
3294 }
3295 }
3296
3297 if (path->slots[0] == i)
3298 push_space += data_size;
3299
3300 this_item_size = btrfs_item_size(right, i);
3301 if (this_item_size + sizeof(struct btrfs_item) + push_space >
3302 free_space)
3303 break;
3304
3305 push_items++;
3306 push_space += this_item_size + sizeof(struct btrfs_item);
3307 }
3308
3309 if (push_items == 0) {
3310 ret = 1;
3311 goto out;
3312 }
3313 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3314
3315 /* push data from right to left */
3316 copy_leaf_items(left, right, btrfs_header_nritems(left), 0, push_items);
3317
3318 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3319 btrfs_item_offset(right, push_items - 1);
3320
3321 copy_leaf_data(left, right, leaf_data_end(left) - push_space,
3322 btrfs_item_offset(right, push_items - 1), push_space);
3323 old_left_nritems = btrfs_header_nritems(left);
3324 BUG_ON(old_left_nritems <= 0);
3325
3326 btrfs_init_map_token(&token, left);
3327 old_left_item_size = btrfs_item_offset(left, old_left_nritems - 1);
3328 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3329 u32 ioff;
3330
3331 ioff = btrfs_token_item_offset(&token, i);
3332 btrfs_set_token_item_offset(&token, i,
3333 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3334 }
3335 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3336
3337 /* fixup right node */
3338 if (push_items > right_nritems)
3339 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3340 right_nritems);
3341
3342 if (push_items < right_nritems) {
3343 push_space = btrfs_item_offset(right, push_items - 1) -
3344 leaf_data_end(right);
3345 memmove_leaf_data(right,
3346 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3347 leaf_data_end(right), push_space);
3348
3349 memmove_leaf_items(right, 0, push_items,
3350 btrfs_header_nritems(right) - push_items);
3351 }
3352
3353 btrfs_init_map_token(&token, right);
3354 right_nritems -= push_items;
3355 btrfs_set_header_nritems(right, right_nritems);
3356 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3357 for (i = 0; i < right_nritems; i++) {
3358 push_space = push_space - btrfs_token_item_size(&token, i);
3359 btrfs_set_token_item_offset(&token, i, push_space);
3360 }
3361
3362 btrfs_mark_buffer_dirty(left);
3363 if (right_nritems)
3364 btrfs_mark_buffer_dirty(right);
3365 else
3366 btrfs_clean_tree_block(right);
3367
3368 btrfs_item_key(right, &disk_key, 0);
3369 fixup_low_keys(path, &disk_key, 1);
3370
3371 /* then fixup the leaf pointer in the path */
3372 if (path->slots[0] < push_items) {
3373 path->slots[0] += old_left_nritems;
3374 btrfs_tree_unlock(path->nodes[0]);
3375 free_extent_buffer(path->nodes[0]);
3376 path->nodes[0] = left;
3377 path->slots[1] -= 1;
3378 } else {
3379 btrfs_tree_unlock(left);
3380 free_extent_buffer(left);
3381 path->slots[0] -= push_items;
3382 }
3383 BUG_ON(path->slots[0] < 0);
3384 return ret;
3385out:
3386 btrfs_tree_unlock(left);
3387 free_extent_buffer(left);
3388 return ret;
3389}
3390
3391/*
3392 * push some data in the path leaf to the left, trying to free up at
3393 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3394 *
3395 * max_slot can put a limit on how far into the leaf we'll push items. The
3396 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3397 * items
3398 */
3399static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3400 *root, struct btrfs_path *path, int min_data_size,
3401 int data_size, int empty, u32 max_slot)
3402{
3403 struct extent_buffer *right = path->nodes[0];
3404 struct extent_buffer *left;
3405 int slot;
3406 int free_space;
3407 u32 right_nritems;
3408 int ret = 0;
3409
3410 slot = path->slots[1];
3411 if (slot == 0)
3412 return 1;
3413 if (!path->nodes[1])
3414 return 1;
3415
3416 right_nritems = btrfs_header_nritems(right);
3417 if (right_nritems == 0)
3418 return 1;
3419
3420 btrfs_assert_tree_write_locked(path->nodes[1]);
3421
3422 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3423 /*
3424 * slot - 1 is not valid or we fail to read the left node,
3425 * no big deal, just return.
3426 */
3427 if (IS_ERR(left))
3428 return 1;
3429
3430 __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
3431
3432 free_space = btrfs_leaf_free_space(left);
3433 if (free_space < data_size) {
3434 ret = 1;
3435 goto out;
3436 }
3437
3438 ret = btrfs_cow_block(trans, root, left,
3439 path->nodes[1], slot - 1, &left,
3440 BTRFS_NESTING_LEFT_COW);
3441 if (ret) {
3442 /* we hit -ENOSPC, but it isn't fatal here */
3443 if (ret == -ENOSPC)
3444 ret = 1;
3445 goto out;
3446 }
3447
3448 if (check_sibling_keys(left, right)) {
3449 ret = -EUCLEAN;
3450 goto out;
3451 }
3452 return __push_leaf_left(path, min_data_size,
3453 empty, left, free_space, right_nritems,
3454 max_slot);
3455out:
3456 btrfs_tree_unlock(left);
3457 free_extent_buffer(left);
3458 return ret;
3459}
3460
3461/*
3462 * split the path's leaf in two, making sure there is at least data_size
3463 * available for the resulting leaf level of the path.
3464 */
3465static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3466 struct btrfs_path *path,
3467 struct extent_buffer *l,
3468 struct extent_buffer *right,
3469 int slot, int mid, int nritems)
3470{
3471 struct btrfs_fs_info *fs_info = trans->fs_info;
3472 int data_copy_size;
3473 int rt_data_off;
3474 int i;
3475 struct btrfs_disk_key disk_key;
3476 struct btrfs_map_token token;
3477
3478 nritems = nritems - mid;
3479 btrfs_set_header_nritems(right, nritems);
3480 data_copy_size = btrfs_item_data_end(l, mid) - leaf_data_end(l);
3481
3482 copy_leaf_items(right, l, 0, mid, nritems);
3483
3484 copy_leaf_data(right, l, BTRFS_LEAF_DATA_SIZE(fs_info) - data_copy_size,
3485 leaf_data_end(l), data_copy_size);
3486
3487 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_data_end(l, mid);
3488
3489 btrfs_init_map_token(&token, right);
3490 for (i = 0; i < nritems; i++) {
3491 u32 ioff;
3492
3493 ioff = btrfs_token_item_offset(&token, i);
3494 btrfs_set_token_item_offset(&token, i, ioff + rt_data_off);
3495 }
3496
3497 btrfs_set_header_nritems(l, mid);
3498 btrfs_item_key(right, &disk_key, 0);
3499 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
3500
3501 btrfs_mark_buffer_dirty(right);
3502 btrfs_mark_buffer_dirty(l);
3503 BUG_ON(path->slots[0] != slot);
3504
3505 if (mid <= slot) {
3506 btrfs_tree_unlock(path->nodes[0]);
3507 free_extent_buffer(path->nodes[0]);
3508 path->nodes[0] = right;
3509 path->slots[0] -= mid;
3510 path->slots[1] += 1;
3511 } else {
3512 btrfs_tree_unlock(right);
3513 free_extent_buffer(right);
3514 }
3515
3516 BUG_ON(path->slots[0] < 0);
3517}
3518
3519/*
3520 * double splits happen when we need to insert a big item in the middle
3521 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3522 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3523 * A B C
3524 *
3525 * We avoid this by trying to push the items on either side of our target
3526 * into the adjacent leaves. If all goes well we can avoid the double split
3527 * completely.
3528 */
3529static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3530 struct btrfs_root *root,
3531 struct btrfs_path *path,
3532 int data_size)
3533{
3534 int ret;
3535 int progress = 0;
3536 int slot;
3537 u32 nritems;
3538 int space_needed = data_size;
3539
3540 slot = path->slots[0];
3541 if (slot < btrfs_header_nritems(path->nodes[0]))
3542 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3543
3544 /*
3545 * try to push all the items after our slot into the
3546 * right leaf
3547 */
3548 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
3549 if (ret < 0)
3550 return ret;
3551
3552 if (ret == 0)
3553 progress++;
3554
3555 nritems = btrfs_header_nritems(path->nodes[0]);
3556 /*
3557 * our goal is to get our slot at the start or end of a leaf. If
3558 * we've done so we're done
3559 */
3560 if (path->slots[0] == 0 || path->slots[0] == nritems)
3561 return 0;
3562
3563 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3564 return 0;
3565
3566 /* try to push all the items before our slot into the next leaf */
3567 slot = path->slots[0];
3568 space_needed = data_size;
3569 if (slot > 0)
3570 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
3571 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
3572 if (ret < 0)
3573 return ret;
3574
3575 if (ret == 0)
3576 progress++;
3577
3578 if (progress)
3579 return 0;
3580 return 1;
3581}
3582
3583/*
3584 * split the path's leaf in two, making sure there is at least data_size
3585 * available for the resulting leaf level of the path.
3586 *
3587 * returns 0 if all went well and < 0 on failure.
3588 */
3589static noinline int split_leaf(struct btrfs_trans_handle *trans,
3590 struct btrfs_root *root,
3591 const struct btrfs_key *ins_key,
3592 struct btrfs_path *path, int data_size,
3593 int extend)
3594{
3595 struct btrfs_disk_key disk_key;
3596 struct extent_buffer *l;
3597 u32 nritems;
3598 int mid;
3599 int slot;
3600 struct extent_buffer *right;
3601 struct btrfs_fs_info *fs_info = root->fs_info;
3602 int ret = 0;
3603 int wret;
3604 int split;
3605 int num_doubles = 0;
3606 int tried_avoid_double = 0;
3607
3608 l = path->nodes[0];
3609 slot = path->slots[0];
3610 if (extend && data_size + btrfs_item_size(l, slot) +
3611 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
3612 return -EOVERFLOW;
3613
3614 /* first try to make some room by pushing left and right */
3615 if (data_size && path->nodes[1]) {
3616 int space_needed = data_size;
3617
3618 if (slot < btrfs_header_nritems(l))
3619 space_needed -= btrfs_leaf_free_space(l);
3620
3621 wret = push_leaf_right(trans, root, path, space_needed,
3622 space_needed, 0, 0);
3623 if (wret < 0)
3624 return wret;
3625 if (wret) {
3626 space_needed = data_size;
3627 if (slot > 0)
3628 space_needed -= btrfs_leaf_free_space(l);
3629 wret = push_leaf_left(trans, root, path, space_needed,
3630 space_needed, 0, (u32)-1);
3631 if (wret < 0)
3632 return wret;
3633 }
3634 l = path->nodes[0];
3635
3636 /* did the pushes work? */
3637 if (btrfs_leaf_free_space(l) >= data_size)
3638 return 0;
3639 }
3640
3641 if (!path->nodes[1]) {
3642 ret = insert_new_root(trans, root, path, 1);
3643 if (ret)
3644 return ret;
3645 }
3646again:
3647 split = 1;
3648 l = path->nodes[0];
3649 slot = path->slots[0];
3650 nritems = btrfs_header_nritems(l);
3651 mid = (nritems + 1) / 2;
3652
3653 if (mid <= slot) {
3654 if (nritems == 1 ||
3655 leaf_space_used(l, mid, nritems - mid) + data_size >
3656 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3657 if (slot >= nritems) {
3658 split = 0;
3659 } else {
3660 mid = slot;
3661 if (mid != nritems &&
3662 leaf_space_used(l, mid, nritems - mid) +
3663 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3664 if (data_size && !tried_avoid_double)
3665 goto push_for_double;
3666 split = 2;
3667 }
3668 }
3669 }
3670 } else {
3671 if (leaf_space_used(l, 0, mid) + data_size >
3672 BTRFS_LEAF_DATA_SIZE(fs_info)) {
3673 if (!extend && data_size && slot == 0) {
3674 split = 0;
3675 } else if ((extend || !data_size) && slot == 0) {
3676 mid = 1;
3677 } else {
3678 mid = slot;
3679 if (mid != nritems &&
3680 leaf_space_used(l, mid, nritems - mid) +
3681 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
3682 if (data_size && !tried_avoid_double)
3683 goto push_for_double;
3684 split = 2;
3685 }
3686 }
3687 }
3688 }
3689
3690 if (split == 0)
3691 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3692 else
3693 btrfs_item_key(l, &disk_key, mid);
3694
3695 /*
3696 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
3697 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
3698 * subclasses, which is 8 at the time of this patch, and we've maxed it
3699 * out. In the future we could add a
3700 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
3701 * use BTRFS_NESTING_NEW_ROOT.
3702 */
3703 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3704 &disk_key, 0, l->start, 0,
3705 num_doubles ? BTRFS_NESTING_NEW_ROOT :
3706 BTRFS_NESTING_SPLIT);
3707 if (IS_ERR(right))
3708 return PTR_ERR(right);
3709
3710 root_add_used(root, fs_info->nodesize);
3711
3712 if (split == 0) {
3713 if (mid <= slot) {
3714 btrfs_set_header_nritems(right, 0);
3715 insert_ptr(trans, path, &disk_key,
3716 right->start, path->slots[1] + 1, 1);
3717 btrfs_tree_unlock(path->nodes[0]);
3718 free_extent_buffer(path->nodes[0]);
3719 path->nodes[0] = right;
3720 path->slots[0] = 0;
3721 path->slots[1] += 1;
3722 } else {
3723 btrfs_set_header_nritems(right, 0);
3724 insert_ptr(trans, path, &disk_key,
3725 right->start, path->slots[1], 1);
3726 btrfs_tree_unlock(path->nodes[0]);
3727 free_extent_buffer(path->nodes[0]);
3728 path->nodes[0] = right;
3729 path->slots[0] = 0;
3730 if (path->slots[1] == 0)
3731 fixup_low_keys(path, &disk_key, 1);
3732 }
3733 /*
3734 * We create a new leaf 'right' for the required ins_len and
3735 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
3736 * the content of ins_len to 'right'.
3737 */
3738 return ret;
3739 }
3740
3741 copy_for_split(trans, path, l, right, slot, mid, nritems);
3742
3743 if (split == 2) {
3744 BUG_ON(num_doubles != 0);
3745 num_doubles++;
3746 goto again;
3747 }
3748
3749 return 0;
3750
3751push_for_double:
3752 push_for_double_split(trans, root, path, data_size);
3753 tried_avoid_double = 1;
3754 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
3755 return 0;
3756 goto again;
3757}
3758
3759static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3760 struct btrfs_root *root,
3761 struct btrfs_path *path, int ins_len)
3762{
3763 struct btrfs_key key;
3764 struct extent_buffer *leaf;
3765 struct btrfs_file_extent_item *fi;
3766 u64 extent_len = 0;
3767 u32 item_size;
3768 int ret;
3769
3770 leaf = path->nodes[0];
3771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3772
3773 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3774 key.type != BTRFS_EXTENT_CSUM_KEY);
3775
3776 if (btrfs_leaf_free_space(leaf) >= ins_len)
3777 return 0;
3778
3779 item_size = btrfs_item_size(leaf, path->slots[0]);
3780 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3781 fi = btrfs_item_ptr(leaf, path->slots[0],
3782 struct btrfs_file_extent_item);
3783 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3784 }
3785 btrfs_release_path(path);
3786
3787 path->keep_locks = 1;
3788 path->search_for_split = 1;
3789 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3790 path->search_for_split = 0;
3791 if (ret > 0)
3792 ret = -EAGAIN;
3793 if (ret < 0)
3794 goto err;
3795
3796 ret = -EAGAIN;
3797 leaf = path->nodes[0];
3798 /* if our item isn't there, return now */
3799 if (item_size != btrfs_item_size(leaf, path->slots[0]))
3800 goto err;
3801
3802 /* the leaf has changed, it now has room. return now */
3803 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
3804 goto err;
3805
3806 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3807 fi = btrfs_item_ptr(leaf, path->slots[0],
3808 struct btrfs_file_extent_item);
3809 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3810 goto err;
3811 }
3812
3813 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3814 if (ret)
3815 goto err;
3816
3817 path->keep_locks = 0;
3818 btrfs_unlock_up_safe(path, 1);
3819 return 0;
3820err:
3821 path->keep_locks = 0;
3822 return ret;
3823}
3824
3825static noinline int split_item(struct btrfs_path *path,
3826 const struct btrfs_key *new_key,
3827 unsigned long split_offset)
3828{
3829 struct extent_buffer *leaf;
3830 int orig_slot, slot;
3831 char *buf;
3832 u32 nritems;
3833 u32 item_size;
3834 u32 orig_offset;
3835 struct btrfs_disk_key disk_key;
3836
3837 leaf = path->nodes[0];
3838 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
3839
3840 orig_slot = path->slots[0];
3841 orig_offset = btrfs_item_offset(leaf, path->slots[0]);
3842 item_size = btrfs_item_size(leaf, path->slots[0]);
3843
3844 buf = kmalloc(item_size, GFP_NOFS);
3845 if (!buf)
3846 return -ENOMEM;
3847
3848 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3849 path->slots[0]), item_size);
3850
3851 slot = path->slots[0] + 1;
3852 nritems = btrfs_header_nritems(leaf);
3853 if (slot != nritems) {
3854 /* shift the items */
3855 memmove_leaf_items(leaf, slot + 1, slot, nritems - slot);
3856 }
3857
3858 btrfs_cpu_key_to_disk(&disk_key, new_key);
3859 btrfs_set_item_key(leaf, &disk_key, slot);
3860
3861 btrfs_set_item_offset(leaf, slot, orig_offset);
3862 btrfs_set_item_size(leaf, slot, item_size - split_offset);
3863
3864 btrfs_set_item_offset(leaf, orig_slot,
3865 orig_offset + item_size - split_offset);
3866 btrfs_set_item_size(leaf, orig_slot, split_offset);
3867
3868 btrfs_set_header_nritems(leaf, nritems + 1);
3869
3870 /* write the data for the start of the original item */
3871 write_extent_buffer(leaf, buf,
3872 btrfs_item_ptr_offset(leaf, path->slots[0]),
3873 split_offset);
3874
3875 /* write the data for the new item */
3876 write_extent_buffer(leaf, buf + split_offset,
3877 btrfs_item_ptr_offset(leaf, slot),
3878 item_size - split_offset);
3879 btrfs_mark_buffer_dirty(leaf);
3880
3881 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
3882 kfree(buf);
3883 return 0;
3884}
3885
3886/*
3887 * This function splits a single item into two items,
3888 * giving 'new_key' to the new item and splitting the
3889 * old one at split_offset (from the start of the item).
3890 *
3891 * The path may be released by this operation. After
3892 * the split, the path is pointing to the old item. The
3893 * new item is going to be in the same node as the old one.
3894 *
3895 * Note, the item being split must be smaller enough to live alone on
3896 * a tree block with room for one extra struct btrfs_item
3897 *
3898 * This allows us to split the item in place, keeping a lock on the
3899 * leaf the entire time.
3900 */
3901int btrfs_split_item(struct btrfs_trans_handle *trans,
3902 struct btrfs_root *root,
3903 struct btrfs_path *path,
3904 const struct btrfs_key *new_key,
3905 unsigned long split_offset)
3906{
3907 int ret;
3908 ret = setup_leaf_for_split(trans, root, path,
3909 sizeof(struct btrfs_item));
3910 if (ret)
3911 return ret;
3912
3913 ret = split_item(path, new_key, split_offset);
3914 return ret;
3915}
3916
3917/*
3918 * make the item pointed to by the path smaller. new_size indicates
3919 * how small to make it, and from_end tells us if we just chop bytes
3920 * off the end of the item or if we shift the item to chop bytes off
3921 * the front.
3922 */
3923void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
3924{
3925 int slot;
3926 struct extent_buffer *leaf;
3927 u32 nritems;
3928 unsigned int data_end;
3929 unsigned int old_data_start;
3930 unsigned int old_size;
3931 unsigned int size_diff;
3932 int i;
3933 struct btrfs_map_token token;
3934
3935 leaf = path->nodes[0];
3936 slot = path->slots[0];
3937
3938 old_size = btrfs_item_size(leaf, slot);
3939 if (old_size == new_size)
3940 return;
3941
3942 nritems = btrfs_header_nritems(leaf);
3943 data_end = leaf_data_end(leaf);
3944
3945 old_data_start = btrfs_item_offset(leaf, slot);
3946
3947 size_diff = old_size - new_size;
3948
3949 BUG_ON(slot < 0);
3950 BUG_ON(slot >= nritems);
3951
3952 /*
3953 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3954 */
3955 /* first correct the data pointers */
3956 btrfs_init_map_token(&token, leaf);
3957 for (i = slot; i < nritems; i++) {
3958 u32 ioff;
3959
3960 ioff = btrfs_token_item_offset(&token, i);
3961 btrfs_set_token_item_offset(&token, i, ioff + size_diff);
3962 }
3963
3964 /* shift the data */
3965 if (from_end) {
3966 memmove_leaf_data(leaf, data_end + size_diff, data_end,
3967 old_data_start + new_size - data_end);
3968 } else {
3969 struct btrfs_disk_key disk_key;
3970 u64 offset;
3971
3972 btrfs_item_key(leaf, &disk_key, slot);
3973
3974 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3975 unsigned long ptr;
3976 struct btrfs_file_extent_item *fi;
3977
3978 fi = btrfs_item_ptr(leaf, slot,
3979 struct btrfs_file_extent_item);
3980 fi = (struct btrfs_file_extent_item *)(
3981 (unsigned long)fi - size_diff);
3982
3983 if (btrfs_file_extent_type(leaf, fi) ==
3984 BTRFS_FILE_EXTENT_INLINE) {
3985 ptr = btrfs_item_ptr_offset(leaf, slot);
3986 memmove_extent_buffer(leaf, ptr,
3987 (unsigned long)fi,
3988 BTRFS_FILE_EXTENT_INLINE_DATA_START);
3989 }
3990 }
3991
3992 memmove_leaf_data(leaf, data_end + size_diff, data_end,
3993 old_data_start - data_end);
3994
3995 offset = btrfs_disk_key_offset(&disk_key);
3996 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3997 btrfs_set_item_key(leaf, &disk_key, slot);
3998 if (slot == 0)
3999 fixup_low_keys(path, &disk_key, 1);
4000 }
4001
4002 btrfs_set_item_size(leaf, slot, new_size);
4003 btrfs_mark_buffer_dirty(leaf);
4004
4005 if (btrfs_leaf_free_space(leaf) < 0) {
4006 btrfs_print_leaf(leaf);
4007 BUG();
4008 }
4009}
4010
4011/*
4012 * make the item pointed to by the path bigger, data_size is the added size.
4013 */
4014void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4015{
4016 int slot;
4017 struct extent_buffer *leaf;
4018 u32 nritems;
4019 unsigned int data_end;
4020 unsigned int old_data;
4021 unsigned int old_size;
4022 int i;
4023 struct btrfs_map_token token;
4024
4025 leaf = path->nodes[0];
4026
4027 nritems = btrfs_header_nritems(leaf);
4028 data_end = leaf_data_end(leaf);
4029
4030 if (btrfs_leaf_free_space(leaf) < data_size) {
4031 btrfs_print_leaf(leaf);
4032 BUG();
4033 }
4034 slot = path->slots[0];
4035 old_data = btrfs_item_data_end(leaf, slot);
4036
4037 BUG_ON(slot < 0);
4038 if (slot >= nritems) {
4039 btrfs_print_leaf(leaf);
4040 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4041 slot, nritems);
4042 BUG();
4043 }
4044
4045 /*
4046 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4047 */
4048 /* first correct the data pointers */
4049 btrfs_init_map_token(&token, leaf);
4050 for (i = slot; i < nritems; i++) {
4051 u32 ioff;
4052
4053 ioff = btrfs_token_item_offset(&token, i);
4054 btrfs_set_token_item_offset(&token, i, ioff - data_size);
4055 }
4056
4057 /* shift the data */
4058 memmove_leaf_data(leaf, data_end - data_size, data_end,
4059 old_data - data_end);
4060
4061 data_end = old_data;
4062 old_size = btrfs_item_size(leaf, slot);
4063 btrfs_set_item_size(leaf, slot, old_size + data_size);
4064 btrfs_mark_buffer_dirty(leaf);
4065
4066 if (btrfs_leaf_free_space(leaf) < 0) {
4067 btrfs_print_leaf(leaf);
4068 BUG();
4069 }
4070}
4071
4072/*
4073 * Make space in the node before inserting one or more items.
4074 *
4075 * @root: root we are inserting items to
4076 * @path: points to the leaf/slot where we are going to insert new items
4077 * @batch: information about the batch of items to insert
4078 *
4079 * Main purpose is to save stack depth by doing the bulk of the work in a
4080 * function that doesn't call btrfs_search_slot
4081 */
4082static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4083 const struct btrfs_item_batch *batch)
4084{
4085 struct btrfs_fs_info *fs_info = root->fs_info;
4086 int i;
4087 u32 nritems;
4088 unsigned int data_end;
4089 struct btrfs_disk_key disk_key;
4090 struct extent_buffer *leaf;
4091 int slot;
4092 struct btrfs_map_token token;
4093 u32 total_size;
4094
4095 /*
4096 * Before anything else, update keys in the parent and other ancestors
4097 * if needed, then release the write locks on them, so that other tasks
4098 * can use them while we modify the leaf.
4099 */
4100 if (path->slots[0] == 0) {
4101 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
4102 fixup_low_keys(path, &disk_key, 1);
4103 }
4104 btrfs_unlock_up_safe(path, 1);
4105
4106 leaf = path->nodes[0];
4107 slot = path->slots[0];
4108
4109 nritems = btrfs_header_nritems(leaf);
4110 data_end = leaf_data_end(leaf);
4111 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4112
4113 if (btrfs_leaf_free_space(leaf) < total_size) {
4114 btrfs_print_leaf(leaf);
4115 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4116 total_size, btrfs_leaf_free_space(leaf));
4117 BUG();
4118 }
4119
4120 btrfs_init_map_token(&token, leaf);
4121 if (slot != nritems) {
4122 unsigned int old_data = btrfs_item_data_end(leaf, slot);
4123
4124 if (old_data < data_end) {
4125 btrfs_print_leaf(leaf);
4126 btrfs_crit(fs_info,
4127 "item at slot %d with data offset %u beyond data end of leaf %u",
4128 slot, old_data, data_end);
4129 BUG();
4130 }
4131 /*
4132 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4133 */
4134 /* first correct the data pointers */
4135 for (i = slot; i < nritems; i++) {
4136 u32 ioff;
4137
4138 ioff = btrfs_token_item_offset(&token, i);
4139 btrfs_set_token_item_offset(&token, i,
4140 ioff - batch->total_data_size);
4141 }
4142 /* shift the items */
4143 memmove_leaf_items(leaf, slot + batch->nr, slot, nritems - slot);
4144
4145 /* shift the data */
4146 memmove_leaf_data(leaf, data_end - batch->total_data_size,
4147 data_end, old_data - data_end);
4148 data_end = old_data;
4149 }
4150
4151 /* setup the item for the new data */
4152 for (i = 0; i < batch->nr; i++) {
4153 btrfs_cpu_key_to_disk(&disk_key, &batch->keys[i]);
4154 btrfs_set_item_key(leaf, &disk_key, slot + i);
4155 data_end -= batch->data_sizes[i];
4156 btrfs_set_token_item_offset(&token, slot + i, data_end);
4157 btrfs_set_token_item_size(&token, slot + i, batch->data_sizes[i]);
4158 }
4159
4160 btrfs_set_header_nritems(leaf, nritems + batch->nr);
4161 btrfs_mark_buffer_dirty(leaf);
4162
4163 if (btrfs_leaf_free_space(leaf) < 0) {
4164 btrfs_print_leaf(leaf);
4165 BUG();
4166 }
4167}
4168
4169/*
4170 * Insert a new item into a leaf.
4171 *
4172 * @root: The root of the btree.
4173 * @path: A path pointing to the target leaf and slot.
4174 * @key: The key of the new item.
4175 * @data_size: The size of the data associated with the new key.
4176 */
4177void btrfs_setup_item_for_insert(struct btrfs_root *root,
4178 struct btrfs_path *path,
4179 const struct btrfs_key *key,
4180 u32 data_size)
4181{
4182 struct btrfs_item_batch batch;
4183
4184 batch.keys = key;
4185 batch.data_sizes = &data_size;
4186 batch.total_data_size = data_size;
4187 batch.nr = 1;
4188
4189 setup_items_for_insert(root, path, &batch);
4190}
4191
4192/*
4193 * Given a key and some data, insert items into the tree.
4194 * This does all the path init required, making room in the tree if needed.
4195 */
4196int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4197 struct btrfs_root *root,
4198 struct btrfs_path *path,
4199 const struct btrfs_item_batch *batch)
4200{
4201 int ret = 0;
4202 int slot;
4203 u32 total_size;
4204
4205 total_size = batch->total_data_size + (batch->nr * sizeof(struct btrfs_item));
4206 ret = btrfs_search_slot(trans, root, &batch->keys[0], path, total_size, 1);
4207 if (ret == 0)
4208 return -EEXIST;
4209 if (ret < 0)
4210 return ret;
4211
4212 slot = path->slots[0];
4213 BUG_ON(slot < 0);
4214
4215 setup_items_for_insert(root, path, batch);
4216 return 0;
4217}
4218
4219/*
4220 * Given a key and some data, insert an item into the tree.
4221 * This does all the path init required, making room in the tree if needed.
4222 */
4223int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4224 const struct btrfs_key *cpu_key, void *data,
4225 u32 data_size)
4226{
4227 int ret = 0;
4228 struct btrfs_path *path;
4229 struct extent_buffer *leaf;
4230 unsigned long ptr;
4231
4232 path = btrfs_alloc_path();
4233 if (!path)
4234 return -ENOMEM;
4235 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4236 if (!ret) {
4237 leaf = path->nodes[0];
4238 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4239 write_extent_buffer(leaf, data, ptr, data_size);
4240 btrfs_mark_buffer_dirty(leaf);
4241 }
4242 btrfs_free_path(path);
4243 return ret;
4244}
4245
4246/*
4247 * This function duplicates an item, giving 'new_key' to the new item.
4248 * It guarantees both items live in the same tree leaf and the new item is
4249 * contiguous with the original item.
4250 *
4251 * This allows us to split a file extent in place, keeping a lock on the leaf
4252 * the entire time.
4253 */
4254int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4255 struct btrfs_root *root,
4256 struct btrfs_path *path,
4257 const struct btrfs_key *new_key)
4258{
4259 struct extent_buffer *leaf;
4260 int ret;
4261 u32 item_size;
4262
4263 leaf = path->nodes[0];
4264 item_size = btrfs_item_size(leaf, path->slots[0]);
4265 ret = setup_leaf_for_split(trans, root, path,
4266 item_size + sizeof(struct btrfs_item));
4267 if (ret)
4268 return ret;
4269
4270 path->slots[0]++;
4271 btrfs_setup_item_for_insert(root, path, new_key, item_size);
4272 leaf = path->nodes[0];
4273 memcpy_extent_buffer(leaf,
4274 btrfs_item_ptr_offset(leaf, path->slots[0]),
4275 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4276 item_size);
4277 return 0;
4278}
4279
4280/*
4281 * delete the pointer from a given node.
4282 *
4283 * the tree should have been previously balanced so the deletion does not
4284 * empty a node.
4285 */
4286static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4287 int level, int slot)
4288{
4289 struct extent_buffer *parent = path->nodes[level];
4290 u32 nritems;
4291 int ret;
4292
4293 nritems = btrfs_header_nritems(parent);
4294 if (slot != nritems - 1) {
4295 if (level) {
4296 ret = btrfs_tree_mod_log_insert_move(parent, slot,
4297 slot + 1, nritems - slot - 1);
4298 BUG_ON(ret < 0);
4299 }
4300 memmove_extent_buffer(parent,
4301 btrfs_node_key_ptr_offset(parent, slot),
4302 btrfs_node_key_ptr_offset(parent, slot + 1),
4303 sizeof(struct btrfs_key_ptr) *
4304 (nritems - slot - 1));
4305 } else if (level) {
4306 ret = btrfs_tree_mod_log_insert_key(parent, slot,
4307 BTRFS_MOD_LOG_KEY_REMOVE);
4308 BUG_ON(ret < 0);
4309 }
4310
4311 nritems--;
4312 btrfs_set_header_nritems(parent, nritems);
4313 if (nritems == 0 && parent == root->node) {
4314 BUG_ON(btrfs_header_level(root->node) != 1);
4315 /* just turn the root into a leaf and break */
4316 btrfs_set_header_level(root->node, 0);
4317 } else if (slot == 0) {
4318 struct btrfs_disk_key disk_key;
4319
4320 btrfs_node_key(parent, &disk_key, 0);
4321 fixup_low_keys(path, &disk_key, level + 1);
4322 }
4323 btrfs_mark_buffer_dirty(parent);
4324}
4325
4326/*
4327 * a helper function to delete the leaf pointed to by path->slots[1] and
4328 * path->nodes[1].
4329 *
4330 * This deletes the pointer in path->nodes[1] and frees the leaf
4331 * block extent. zero is returned if it all worked out, < 0 otherwise.
4332 *
4333 * The path must have already been setup for deleting the leaf, including
4334 * all the proper balancing. path->nodes[1] must be locked.
4335 */
4336static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4337 struct btrfs_root *root,
4338 struct btrfs_path *path,
4339 struct extent_buffer *leaf)
4340{
4341 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4342 del_ptr(root, path, 1, path->slots[1]);
4343
4344 /*
4345 * btrfs_free_extent is expensive, we want to make sure we
4346 * aren't holding any locks when we call it
4347 */
4348 btrfs_unlock_up_safe(path, 0);
4349
4350 root_sub_used(root, leaf->len);
4351
4352 atomic_inc(&leaf->refs);
4353 btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
4354 free_extent_buffer_stale(leaf);
4355}
4356/*
4357 * delete the item at the leaf level in path. If that empties
4358 * the leaf, remove it from the tree
4359 */
4360int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4361 struct btrfs_path *path, int slot, int nr)
4362{
4363 struct btrfs_fs_info *fs_info = root->fs_info;
4364 struct extent_buffer *leaf;
4365 int ret = 0;
4366 int wret;
4367 u32 nritems;
4368
4369 leaf = path->nodes[0];
4370 nritems = btrfs_header_nritems(leaf);
4371
4372 if (slot + nr != nritems) {
4373 const u32 last_off = btrfs_item_offset(leaf, slot + nr - 1);
4374 const int data_end = leaf_data_end(leaf);
4375 struct btrfs_map_token token;
4376 u32 dsize = 0;
4377 int i;
4378
4379 for (i = 0; i < nr; i++)
4380 dsize += btrfs_item_size(leaf, slot + i);
4381
4382 memmove_leaf_data(leaf, data_end + dsize, data_end,
4383 last_off - data_end);
4384
4385 btrfs_init_map_token(&token, leaf);
4386 for (i = slot + nr; i < nritems; i++) {
4387 u32 ioff;
4388
4389 ioff = btrfs_token_item_offset(&token, i);
4390 btrfs_set_token_item_offset(&token, i, ioff + dsize);
4391 }
4392
4393 memmove_leaf_items(leaf, slot, slot + nr, nritems - slot - nr);
4394 }
4395 btrfs_set_header_nritems(leaf, nritems - nr);
4396 nritems -= nr;
4397
4398 /* delete the leaf if we've emptied it */
4399 if (nritems == 0) {
4400 if (leaf == root->node) {
4401 btrfs_set_header_level(leaf, 0);
4402 } else {
4403 btrfs_clean_tree_block(leaf);
4404 btrfs_del_leaf(trans, root, path, leaf);
4405 }
4406 } else {
4407 int used = leaf_space_used(leaf, 0, nritems);
4408 if (slot == 0) {
4409 struct btrfs_disk_key disk_key;
4410
4411 btrfs_item_key(leaf, &disk_key, 0);
4412 fixup_low_keys(path, &disk_key, 1);
4413 }
4414
4415 /*
4416 * Try to delete the leaf if it is mostly empty. We do this by
4417 * trying to move all its items into its left and right neighbours.
4418 * If we can't move all the items, then we don't delete it - it's
4419 * not ideal, but future insertions might fill the leaf with more
4420 * items, or items from other leaves might be moved later into our
4421 * leaf due to deletions on those leaves.
4422 */
4423 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4424 u32 min_push_space;
4425
4426 /* push_leaf_left fixes the path.
4427 * make sure the path still points to our leaf
4428 * for possible call to del_ptr below
4429 */
4430 slot = path->slots[1];
4431 atomic_inc(&leaf->refs);
4432 /*
4433 * We want to be able to at least push one item to the
4434 * left neighbour leaf, and that's the first item.
4435 */
4436 min_push_space = sizeof(struct btrfs_item) +
4437 btrfs_item_size(leaf, 0);
4438 wret = push_leaf_left(trans, root, path, 0,
4439 min_push_space, 1, (u32)-1);
4440 if (wret < 0 && wret != -ENOSPC)
4441 ret = wret;
4442
4443 if (path->nodes[0] == leaf &&
4444 btrfs_header_nritems(leaf)) {
4445 /*
4446 * If we were not able to push all items from our
4447 * leaf to its left neighbour, then attempt to
4448 * either push all the remaining items to the
4449 * right neighbour or none. There's no advantage
4450 * in pushing only some items, instead of all, as
4451 * it's pointless to end up with a leaf having
4452 * too few items while the neighbours can be full
4453 * or nearly full.
4454 */
4455 nritems = btrfs_header_nritems(leaf);
4456 min_push_space = leaf_space_used(leaf, 0, nritems);
4457 wret = push_leaf_right(trans, root, path, 0,
4458 min_push_space, 1, 0);
4459 if (wret < 0 && wret != -ENOSPC)
4460 ret = wret;
4461 }
4462
4463 if (btrfs_header_nritems(leaf) == 0) {
4464 path->slots[1] = slot;
4465 btrfs_del_leaf(trans, root, path, leaf);
4466 free_extent_buffer(leaf);
4467 ret = 0;
4468 } else {
4469 /* if we're still in the path, make sure
4470 * we're dirty. Otherwise, one of the
4471 * push_leaf functions must have already
4472 * dirtied this buffer
4473 */
4474 if (path->nodes[0] == leaf)
4475 btrfs_mark_buffer_dirty(leaf);
4476 free_extent_buffer(leaf);
4477 }
4478 } else {
4479 btrfs_mark_buffer_dirty(leaf);
4480 }
4481 }
4482 return ret;
4483}
4484
4485/*
4486 * search the tree again to find a leaf with lesser keys
4487 * returns 0 if it found something or 1 if there are no lesser leaves.
4488 * returns < 0 on io errors.
4489 *
4490 * This may release the path, and so you may lose any locks held at the
4491 * time you call it.
4492 */
4493int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4494{
4495 struct btrfs_key key;
4496 struct btrfs_disk_key found_key;
4497 int ret;
4498
4499 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4500
4501 if (key.offset > 0) {
4502 key.offset--;
4503 } else if (key.type > 0) {
4504 key.type--;
4505 key.offset = (u64)-1;
4506 } else if (key.objectid > 0) {
4507 key.objectid--;
4508 key.type = (u8)-1;
4509 key.offset = (u64)-1;
4510 } else {
4511 return 1;
4512 }
4513
4514 btrfs_release_path(path);
4515 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4516 if (ret < 0)
4517 return ret;
4518 btrfs_item_key(path->nodes[0], &found_key, 0);
4519 ret = comp_keys(&found_key, &key);
4520 /*
4521 * We might have had an item with the previous key in the tree right
4522 * before we released our path. And after we released our path, that
4523 * item might have been pushed to the first slot (0) of the leaf we
4524 * were holding due to a tree balance. Alternatively, an item with the
4525 * previous key can exist as the only element of a leaf (big fat item).
4526 * Therefore account for these 2 cases, so that our callers (like
4527 * btrfs_previous_item) don't miss an existing item with a key matching
4528 * the previous key we computed above.
4529 */
4530 if (ret <= 0)
4531 return 0;
4532 return 1;
4533}
4534
4535/*
4536 * A helper function to walk down the tree starting at min_key, and looking
4537 * for nodes or leaves that are have a minimum transaction id.
4538 * This is used by the btree defrag code, and tree logging
4539 *
4540 * This does not cow, but it does stuff the starting key it finds back
4541 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4542 * key and get a writable path.
4543 *
4544 * This honors path->lowest_level to prevent descent past a given level
4545 * of the tree.
4546 *
4547 * min_trans indicates the oldest transaction that you are interested
4548 * in walking through. Any nodes or leaves older than min_trans are
4549 * skipped over (without reading them).
4550 *
4551 * returns zero if something useful was found, < 0 on error and 1 if there
4552 * was nothing in the tree that matched the search criteria.
4553 */
4554int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4555 struct btrfs_path *path,
4556 u64 min_trans)
4557{
4558 struct extent_buffer *cur;
4559 struct btrfs_key found_key;
4560 int slot;
4561 int sret;
4562 u32 nritems;
4563 int level;
4564 int ret = 1;
4565 int keep_locks = path->keep_locks;
4566
4567 ASSERT(!path->nowait);
4568 path->keep_locks = 1;
4569again:
4570 cur = btrfs_read_lock_root_node(root);
4571 level = btrfs_header_level(cur);
4572 WARN_ON(path->nodes[level]);
4573 path->nodes[level] = cur;
4574 path->locks[level] = BTRFS_READ_LOCK;
4575
4576 if (btrfs_header_generation(cur) < min_trans) {
4577 ret = 1;
4578 goto out;
4579 }
4580 while (1) {
4581 nritems = btrfs_header_nritems(cur);
4582 level = btrfs_header_level(cur);
4583 sret = btrfs_bin_search(cur, min_key, &slot);
4584 if (sret < 0) {
4585 ret = sret;
4586 goto out;
4587 }
4588
4589 /* at the lowest level, we're done, setup the path and exit */
4590 if (level == path->lowest_level) {
4591 if (slot >= nritems)
4592 goto find_next_key;
4593 ret = 0;
4594 path->slots[level] = slot;
4595 btrfs_item_key_to_cpu(cur, &found_key, slot);
4596 goto out;
4597 }
4598 if (sret && slot > 0)
4599 slot--;
4600 /*
4601 * check this node pointer against the min_trans parameters.
4602 * If it is too old, skip to the next one.
4603 */
4604 while (slot < nritems) {
4605 u64 gen;
4606
4607 gen = btrfs_node_ptr_generation(cur, slot);
4608 if (gen < min_trans) {
4609 slot++;
4610 continue;
4611 }
4612 break;
4613 }
4614find_next_key:
4615 /*
4616 * we didn't find a candidate key in this node, walk forward
4617 * and find another one
4618 */
4619 if (slot >= nritems) {
4620 path->slots[level] = slot;
4621 sret = btrfs_find_next_key(root, path, min_key, level,
4622 min_trans);
4623 if (sret == 0) {
4624 btrfs_release_path(path);
4625 goto again;
4626 } else {
4627 goto out;
4628 }
4629 }
4630 /* save our key for returning back */
4631 btrfs_node_key_to_cpu(cur, &found_key, slot);
4632 path->slots[level] = slot;
4633 if (level == path->lowest_level) {
4634 ret = 0;
4635 goto out;
4636 }
4637 cur = btrfs_read_node_slot(cur, slot);
4638 if (IS_ERR(cur)) {
4639 ret = PTR_ERR(cur);
4640 goto out;
4641 }
4642
4643 btrfs_tree_read_lock(cur);
4644
4645 path->locks[level - 1] = BTRFS_READ_LOCK;
4646 path->nodes[level - 1] = cur;
4647 unlock_up(path, level, 1, 0, NULL);
4648 }
4649out:
4650 path->keep_locks = keep_locks;
4651 if (ret == 0) {
4652 btrfs_unlock_up_safe(path, path->lowest_level + 1);
4653 memcpy(min_key, &found_key, sizeof(found_key));
4654 }
4655 return ret;
4656}
4657
4658/*
4659 * this is similar to btrfs_next_leaf, but does not try to preserve
4660 * and fixup the path. It looks for and returns the next key in the
4661 * tree based on the current path and the min_trans parameters.
4662 *
4663 * 0 is returned if another key is found, < 0 if there are any errors
4664 * and 1 is returned if there are no higher keys in the tree
4665 *
4666 * path->keep_locks should be set to 1 on the search made before
4667 * calling this function.
4668 */
4669int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4670 struct btrfs_key *key, int level, u64 min_trans)
4671{
4672 int slot;
4673 struct extent_buffer *c;
4674
4675 WARN_ON(!path->keep_locks && !path->skip_locking);
4676 while (level < BTRFS_MAX_LEVEL) {
4677 if (!path->nodes[level])
4678 return 1;
4679
4680 slot = path->slots[level] + 1;
4681 c = path->nodes[level];
4682next:
4683 if (slot >= btrfs_header_nritems(c)) {
4684 int ret;
4685 int orig_lowest;
4686 struct btrfs_key cur_key;
4687 if (level + 1 >= BTRFS_MAX_LEVEL ||
4688 !path->nodes[level + 1])
4689 return 1;
4690
4691 if (path->locks[level + 1] || path->skip_locking) {
4692 level++;
4693 continue;
4694 }
4695
4696 slot = btrfs_header_nritems(c) - 1;
4697 if (level == 0)
4698 btrfs_item_key_to_cpu(c, &cur_key, slot);
4699 else
4700 btrfs_node_key_to_cpu(c, &cur_key, slot);
4701
4702 orig_lowest = path->lowest_level;
4703 btrfs_release_path(path);
4704 path->lowest_level = level;
4705 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4706 0, 0);
4707 path->lowest_level = orig_lowest;
4708 if (ret < 0)
4709 return ret;
4710
4711 c = path->nodes[level];
4712 slot = path->slots[level];
4713 if (ret == 0)
4714 slot++;
4715 goto next;
4716 }
4717
4718 if (level == 0)
4719 btrfs_item_key_to_cpu(c, key, slot);
4720 else {
4721 u64 gen = btrfs_node_ptr_generation(c, slot);
4722
4723 if (gen < min_trans) {
4724 slot++;
4725 goto next;
4726 }
4727 btrfs_node_key_to_cpu(c, key, slot);
4728 }
4729 return 0;
4730 }
4731 return 1;
4732}
4733
4734int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
4735 u64 time_seq)
4736{
4737 int slot;
4738 int level;
4739 struct extent_buffer *c;
4740 struct extent_buffer *next;
4741 struct btrfs_fs_info *fs_info = root->fs_info;
4742 struct btrfs_key key;
4743 bool need_commit_sem = false;
4744 u32 nritems;
4745 int ret;
4746 int i;
4747
4748 /*
4749 * The nowait semantics are used only for write paths, where we don't
4750 * use the tree mod log and sequence numbers.
4751 */
4752 if (time_seq)
4753 ASSERT(!path->nowait);
4754
4755 nritems = btrfs_header_nritems(path->nodes[0]);
4756 if (nritems == 0)
4757 return 1;
4758
4759 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4760again:
4761 level = 1;
4762 next = NULL;
4763 btrfs_release_path(path);
4764
4765 path->keep_locks = 1;
4766
4767 if (time_seq) {
4768 ret = btrfs_search_old_slot(root, &key, path, time_seq);
4769 } else {
4770 if (path->need_commit_sem) {
4771 path->need_commit_sem = 0;
4772 need_commit_sem = true;
4773 if (path->nowait) {
4774 if (!down_read_trylock(&fs_info->commit_root_sem)) {
4775 ret = -EAGAIN;
4776 goto done;
4777 }
4778 } else {
4779 down_read(&fs_info->commit_root_sem);
4780 }
4781 }
4782 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4783 }
4784 path->keep_locks = 0;
4785
4786 if (ret < 0)
4787 goto done;
4788
4789 nritems = btrfs_header_nritems(path->nodes[0]);
4790 /*
4791 * by releasing the path above we dropped all our locks. A balance
4792 * could have added more items next to the key that used to be
4793 * at the very end of the block. So, check again here and
4794 * advance the path if there are now more items available.
4795 */
4796 if (nritems > 0 && path->slots[0] < nritems - 1) {
4797 if (ret == 0)
4798 path->slots[0]++;
4799 ret = 0;
4800 goto done;
4801 }
4802 /*
4803 * So the above check misses one case:
4804 * - after releasing the path above, someone has removed the item that
4805 * used to be at the very end of the block, and balance between leafs
4806 * gets another one with bigger key.offset to replace it.
4807 *
4808 * This one should be returned as well, or we can get leaf corruption
4809 * later(esp. in __btrfs_drop_extents()).
4810 *
4811 * And a bit more explanation about this check,
4812 * with ret > 0, the key isn't found, the path points to the slot
4813 * where it should be inserted, so the path->slots[0] item must be the
4814 * bigger one.
4815 */
4816 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
4817 ret = 0;
4818 goto done;
4819 }
4820
4821 while (level < BTRFS_MAX_LEVEL) {
4822 if (!path->nodes[level]) {
4823 ret = 1;
4824 goto done;
4825 }
4826
4827 slot = path->slots[level] + 1;
4828 c = path->nodes[level];
4829 if (slot >= btrfs_header_nritems(c)) {
4830 level++;
4831 if (level == BTRFS_MAX_LEVEL) {
4832 ret = 1;
4833 goto done;
4834 }
4835 continue;
4836 }
4837
4838
4839 /*
4840 * Our current level is where we're going to start from, and to
4841 * make sure lockdep doesn't complain we need to drop our locks
4842 * and nodes from 0 to our current level.
4843 */
4844 for (i = 0; i < level; i++) {
4845 if (path->locks[level]) {
4846 btrfs_tree_read_unlock(path->nodes[i]);
4847 path->locks[i] = 0;
4848 }
4849 free_extent_buffer(path->nodes[i]);
4850 path->nodes[i] = NULL;
4851 }
4852
4853 next = c;
4854 ret = read_block_for_search(root, path, &next, level,
4855 slot, &key);
4856 if (ret == -EAGAIN && !path->nowait)
4857 goto again;
4858
4859 if (ret < 0) {
4860 btrfs_release_path(path);
4861 goto done;
4862 }
4863
4864 if (!path->skip_locking) {
4865 ret = btrfs_try_tree_read_lock(next);
4866 if (!ret && path->nowait) {
4867 ret = -EAGAIN;
4868 goto done;
4869 }
4870 if (!ret && time_seq) {
4871 /*
4872 * If we don't get the lock, we may be racing
4873 * with push_leaf_left, holding that lock while
4874 * itself waiting for the leaf we've currently
4875 * locked. To solve this situation, we give up
4876 * on our lock and cycle.
4877 */
4878 free_extent_buffer(next);
4879 btrfs_release_path(path);
4880 cond_resched();
4881 goto again;
4882 }
4883 if (!ret)
4884 btrfs_tree_read_lock(next);
4885 }
4886 break;
4887 }
4888 path->slots[level] = slot;
4889 while (1) {
4890 level--;
4891 path->nodes[level] = next;
4892 path->slots[level] = 0;
4893 if (!path->skip_locking)
4894 path->locks[level] = BTRFS_READ_LOCK;
4895 if (!level)
4896 break;
4897
4898 ret = read_block_for_search(root, path, &next, level,
4899 0, &key);
4900 if (ret == -EAGAIN && !path->nowait)
4901 goto again;
4902
4903 if (ret < 0) {
4904 btrfs_release_path(path);
4905 goto done;
4906 }
4907
4908 if (!path->skip_locking) {
4909 if (path->nowait) {
4910 if (!btrfs_try_tree_read_lock(next)) {
4911 ret = -EAGAIN;
4912 goto done;
4913 }
4914 } else {
4915 btrfs_tree_read_lock(next);
4916 }
4917 }
4918 }
4919 ret = 0;
4920done:
4921 unlock_up(path, 0, 1, 0, NULL);
4922 if (need_commit_sem) {
4923 int ret2;
4924
4925 path->need_commit_sem = 1;
4926 ret2 = finish_need_commit_sem_search(path);
4927 up_read(&fs_info->commit_root_sem);
4928 if (ret2)
4929 ret = ret2;
4930 }
4931
4932 return ret;
4933}
4934
4935int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq)
4936{
4937 path->slots[0]++;
4938 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
4939 return btrfs_next_old_leaf(root, path, time_seq);
4940 return 0;
4941}
4942
4943/*
4944 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4945 * searching until it gets past min_objectid or finds an item of 'type'
4946 *
4947 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4948 */
4949int btrfs_previous_item(struct btrfs_root *root,
4950 struct btrfs_path *path, u64 min_objectid,
4951 int type)
4952{
4953 struct btrfs_key found_key;
4954 struct extent_buffer *leaf;
4955 u32 nritems;
4956 int ret;
4957
4958 while (1) {
4959 if (path->slots[0] == 0) {
4960 ret = btrfs_prev_leaf(root, path);
4961 if (ret != 0)
4962 return ret;
4963 } else {
4964 path->slots[0]--;
4965 }
4966 leaf = path->nodes[0];
4967 nritems = btrfs_header_nritems(leaf);
4968 if (nritems == 0)
4969 return 1;
4970 if (path->slots[0] == nritems)
4971 path->slots[0]--;
4972
4973 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4974 if (found_key.objectid < min_objectid)
4975 break;
4976 if (found_key.type == type)
4977 return 0;
4978 if (found_key.objectid == min_objectid &&
4979 found_key.type < type)
4980 break;
4981 }
4982 return 1;
4983}
4984
4985/*
4986 * search in extent tree to find a previous Metadata/Data extent item with
4987 * min objecitd.
4988 *
4989 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4990 */
4991int btrfs_previous_extent_item(struct btrfs_root *root,
4992 struct btrfs_path *path, u64 min_objectid)
4993{
4994 struct btrfs_key found_key;
4995 struct extent_buffer *leaf;
4996 u32 nritems;
4997 int ret;
4998
4999 while (1) {
5000 if (path->slots[0] == 0) {
5001 ret = btrfs_prev_leaf(root, path);
5002 if (ret != 0)
5003 return ret;
5004 } else {
5005 path->slots[0]--;
5006 }
5007 leaf = path->nodes[0];
5008 nritems = btrfs_header_nritems(leaf);
5009 if (nritems == 0)
5010 return 1;
5011 if (path->slots[0] == nritems)
5012 path->slots[0]--;
5013
5014 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5015 if (found_key.objectid < min_objectid)
5016 break;
5017 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5018 found_key.type == BTRFS_METADATA_ITEM_KEY)
5019 return 0;
5020 if (found_key.objectid == min_objectid &&
5021 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5022 break;
5023 }
5024 return 1;
5025}
5026
5027int __init btrfs_ctree_init(void)
5028{
5029 btrfs_path_cachep = kmem_cache_create("btrfs_path",
5030 sizeof(struct btrfs_path), 0,
5031 SLAB_MEM_SPREAD, NULL);
5032 if (!btrfs_path_cachep)
5033 return -ENOMEM;
5034 return 0;
5035}
5036
5037void __cold btrfs_ctree_exit(void)
5038{
5039 kmem_cache_destroy(btrfs_path_cachep);
5040}
1/*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/rbtree.h>
22#include <linux/vmalloc.h>
23#include "ctree.h"
24#include "disk-io.h"
25#include "transaction.h"
26#include "print-tree.h"
27#include "locking.h"
28
29static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_path *path, int level);
31static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
32 *root, struct btrfs_key *ins_key,
33 struct btrfs_path *path, int data_size, int extend);
34static int push_node_left(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root, struct extent_buffer *dst,
36 struct extent_buffer *src, int empty);
37static int balance_node_right(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 struct extent_buffer *dst_buf,
40 struct extent_buffer *src_buf);
41static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
42 int level, int slot);
43static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45
46struct btrfs_path *btrfs_alloc_path(void)
47{
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51}
52
53/*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58{
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 }
69}
70
71/*
72 * reset all the locked nodes in the patch to spinning locks.
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
78 */
79noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
81{
82 int i;
83
84 if (held) {
85 btrfs_set_lock_blocking_rw(held, held_rw);
86 if (held_rw == BTRFS_WRITE_LOCK)
87 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
88 else if (held_rw == BTRFS_READ_LOCK)
89 held_rw = BTRFS_READ_LOCK_BLOCKING;
90 }
91 btrfs_set_path_blocking(p);
92
93 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
94 if (p->nodes[i] && p->locks[i]) {
95 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
96 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
97 p->locks[i] = BTRFS_WRITE_LOCK;
98 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
99 p->locks[i] = BTRFS_READ_LOCK;
100 }
101 }
102
103 if (held)
104 btrfs_clear_lock_blocking_rw(held, held_rw);
105}
106
107/* this also releases the path */
108void btrfs_free_path(struct btrfs_path *p)
109{
110 if (!p)
111 return;
112 btrfs_release_path(p);
113 kmem_cache_free(btrfs_path_cachep, p);
114}
115
116/*
117 * path release drops references on the extent buffers in the path
118 * and it drops any locks held by this path
119 *
120 * It is safe to call this on paths that no locks or extent buffers held.
121 */
122noinline void btrfs_release_path(struct btrfs_path *p)
123{
124 int i;
125
126 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 p->slots[i] = 0;
128 if (!p->nodes[i])
129 continue;
130 if (p->locks[i]) {
131 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
132 p->locks[i] = 0;
133 }
134 free_extent_buffer(p->nodes[i]);
135 p->nodes[i] = NULL;
136 }
137}
138
139/*
140 * safely gets a reference on the root node of a tree. A lock
141 * is not taken, so a concurrent writer may put a different node
142 * at the root of the tree. See btrfs_lock_root_node for the
143 * looping required.
144 *
145 * The extent buffer returned by this has a reference taken, so
146 * it won't disappear. It may stop being the root of the tree
147 * at any time because there are no locks held.
148 */
149struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
150{
151 struct extent_buffer *eb;
152
153 while (1) {
154 rcu_read_lock();
155 eb = rcu_dereference(root->node);
156
157 /*
158 * RCU really hurts here, we could free up the root node because
159 * it was cow'ed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again.
162 */
163 if (atomic_inc_not_zero(&eb->refs)) {
164 rcu_read_unlock();
165 break;
166 }
167 rcu_read_unlock();
168 synchronize_rcu();
169 }
170 return eb;
171}
172
173/* loop around taking references on and locking the root node of the
174 * tree until you end up with a lock on the root. A locked buffer
175 * is returned, with a reference held.
176 */
177struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
178{
179 struct extent_buffer *eb;
180
181 while (1) {
182 eb = btrfs_root_node(root);
183 btrfs_tree_lock(eb);
184 if (eb == root->node)
185 break;
186 btrfs_tree_unlock(eb);
187 free_extent_buffer(eb);
188 }
189 return eb;
190}
191
192/* loop around taking references on and locking the root node of the
193 * tree until you end up with a lock on the root. A locked buffer
194 * is returned, with a reference held.
195 */
196static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
197{
198 struct extent_buffer *eb;
199
200 while (1) {
201 eb = btrfs_root_node(root);
202 btrfs_tree_read_lock(eb);
203 if (eb == root->node)
204 break;
205 btrfs_tree_read_unlock(eb);
206 free_extent_buffer(eb);
207 }
208 return eb;
209}
210
211/* cowonly root (everything not a reference counted cow subvolume), just get
212 * put onto a simple dirty list. transaction.c walks this to make sure they
213 * get properly updated on disk.
214 */
215static void add_root_to_dirty_list(struct btrfs_root *root)
216{
217 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
218 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
219 return;
220
221 spin_lock(&root->fs_info->trans_lock);
222 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
223 /* Want the extent tree to be the last on the list */
224 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
225 list_move_tail(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
227 else
228 list_move(&root->dirty_list,
229 &root->fs_info->dirty_cowonly_roots);
230 }
231 spin_unlock(&root->fs_info->trans_lock);
232}
233
234/*
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
238 */
239int btrfs_copy_root(struct btrfs_trans_handle *trans,
240 struct btrfs_root *root,
241 struct extent_buffer *buf,
242 struct extent_buffer **cow_ret, u64 new_root_objectid)
243{
244 struct extent_buffer *cow;
245 int ret = 0;
246 int level;
247 struct btrfs_disk_key disk_key;
248
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
250 trans->transid != root->fs_info->running_transaction->transid);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
259
260 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
261 &disk_key, level, buf->start, 0);
262 if (IS_ERR(cow))
263 return PTR_ERR(cow);
264
265 copy_extent_buffer(cow, buf, 0, 0, cow->len);
266 btrfs_set_header_bytenr(cow, cow->start);
267 btrfs_set_header_generation(cow, trans->transid);
268 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
269 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
270 BTRFS_HEADER_FLAG_RELOC);
271 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
272 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
273 else
274 btrfs_set_header_owner(cow, new_root_objectid);
275
276 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
277 BTRFS_FSID_SIZE);
278
279 WARN_ON(btrfs_header_generation(buf) > trans->transid);
280 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
281 ret = btrfs_inc_ref(trans, root, cow, 1);
282 else
283 ret = btrfs_inc_ref(trans, root, cow, 0);
284
285 if (ret)
286 return ret;
287
288 btrfs_mark_buffer_dirty(cow);
289 *cow_ret = cow;
290 return 0;
291}
292
293enum mod_log_op {
294 MOD_LOG_KEY_REPLACE,
295 MOD_LOG_KEY_ADD,
296 MOD_LOG_KEY_REMOVE,
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
299 MOD_LOG_MOVE_KEYS,
300 MOD_LOG_ROOT_REPLACE,
301};
302
303struct tree_mod_move {
304 int dst_slot;
305 int nr_items;
306};
307
308struct tree_mod_root {
309 u64 logical;
310 u8 level;
311};
312
313struct tree_mod_elem {
314 struct rb_node node;
315 u64 logical;
316 u64 seq;
317 enum mod_log_op op;
318
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
320 int slot;
321
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
323 u64 generation;
324
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key;
327 u64 blockptr;
328
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move;
331
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root;
334};
335
336static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
337{
338 read_lock(&fs_info->tree_mod_log_lock);
339}
340
341static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
342{
343 read_unlock(&fs_info->tree_mod_log_lock);
344}
345
346static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
347{
348 write_lock(&fs_info->tree_mod_log_lock);
349}
350
351static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
352{
353 write_unlock(&fs_info->tree_mod_log_lock);
354}
355
356/*
357 * Pull a new tree mod seq number for our operation.
358 */
359static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
360{
361 return atomic64_inc_return(&fs_info->tree_mod_seq);
362}
363
364/*
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
370 * blocker was added.
371 */
372u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
373 struct seq_list *elem)
374{
375 tree_mod_log_write_lock(fs_info);
376 spin_lock(&fs_info->tree_mod_seq_lock);
377 if (!elem->seq) {
378 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
379 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
380 }
381 spin_unlock(&fs_info->tree_mod_seq_lock);
382 tree_mod_log_write_unlock(fs_info);
383
384 return elem->seq;
385}
386
387void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
388 struct seq_list *elem)
389{
390 struct rb_root *tm_root;
391 struct rb_node *node;
392 struct rb_node *next;
393 struct seq_list *cur_elem;
394 struct tree_mod_elem *tm;
395 u64 min_seq = (u64)-1;
396 u64 seq_putting = elem->seq;
397
398 if (!seq_putting)
399 return;
400
401 spin_lock(&fs_info->tree_mod_seq_lock);
402 list_del(&elem->list);
403 elem->seq = 0;
404
405 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
406 if (cur_elem->seq < min_seq) {
407 if (seq_putting > cur_elem->seq) {
408 /*
409 * blocker with lower sequence number exists, we
410 * cannot remove anything from the log
411 */
412 spin_unlock(&fs_info->tree_mod_seq_lock);
413 return;
414 }
415 min_seq = cur_elem->seq;
416 }
417 }
418 spin_unlock(&fs_info->tree_mod_seq_lock);
419
420 /*
421 * anything that's lower than the lowest existing (read: blocked)
422 * sequence number can be removed from the tree.
423 */
424 tree_mod_log_write_lock(fs_info);
425 tm_root = &fs_info->tree_mod_log;
426 for (node = rb_first(tm_root); node; node = next) {
427 next = rb_next(node);
428 tm = container_of(node, struct tree_mod_elem, node);
429 if (tm->seq > min_seq)
430 continue;
431 rb_erase(node, tm_root);
432 kfree(tm);
433 }
434 tree_mod_log_write_unlock(fs_info);
435}
436
437/*
438 * key order of the log:
439 * node/leaf start address -> sequence
440 *
441 * The 'start address' is the logical address of the *new* root node
442 * for root replace operations, or the logical address of the affected
443 * block for all other operations.
444 *
445 * Note: must be called with write lock (tree_mod_log_write_lock).
446 */
447static noinline int
448__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
449{
450 struct rb_root *tm_root;
451 struct rb_node **new;
452 struct rb_node *parent = NULL;
453 struct tree_mod_elem *cur;
454
455 BUG_ON(!tm);
456
457 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
458
459 tm_root = &fs_info->tree_mod_log;
460 new = &tm_root->rb_node;
461 while (*new) {
462 cur = container_of(*new, struct tree_mod_elem, node);
463 parent = *new;
464 if (cur->logical < tm->logical)
465 new = &((*new)->rb_left);
466 else if (cur->logical > tm->logical)
467 new = &((*new)->rb_right);
468 else if (cur->seq < tm->seq)
469 new = &((*new)->rb_left);
470 else if (cur->seq > tm->seq)
471 new = &((*new)->rb_right);
472 else
473 return -EEXIST;
474 }
475
476 rb_link_node(&tm->node, parent, new);
477 rb_insert_color(&tm->node, tm_root);
478 return 0;
479}
480
481/*
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
486 */
487static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
488 struct extent_buffer *eb) {
489 smp_mb();
490 if (list_empty(&(fs_info)->tree_mod_seq_list))
491 return 1;
492 if (eb && btrfs_header_level(eb) == 0)
493 return 1;
494
495 tree_mod_log_write_lock(fs_info);
496 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
497 tree_mod_log_write_unlock(fs_info);
498 return 1;
499 }
500
501 return 0;
502}
503
504/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
505static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
506 struct extent_buffer *eb)
507{
508 smp_mb();
509 if (list_empty(&(fs_info)->tree_mod_seq_list))
510 return 0;
511 if (eb && btrfs_header_level(eb) == 0)
512 return 0;
513
514 return 1;
515}
516
517static struct tree_mod_elem *
518alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
519 enum mod_log_op op, gfp_t flags)
520{
521 struct tree_mod_elem *tm;
522
523 tm = kzalloc(sizeof(*tm), flags);
524 if (!tm)
525 return NULL;
526
527 tm->logical = eb->start;
528 if (op != MOD_LOG_KEY_ADD) {
529 btrfs_node_key(eb, &tm->key, slot);
530 tm->blockptr = btrfs_node_blockptr(eb, slot);
531 }
532 tm->op = op;
533 tm->slot = slot;
534 tm->generation = btrfs_node_ptr_generation(eb, slot);
535 RB_CLEAR_NODE(&tm->node);
536
537 return tm;
538}
539
540static noinline int
541tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
542 struct extent_buffer *eb, int slot,
543 enum mod_log_op op, gfp_t flags)
544{
545 struct tree_mod_elem *tm;
546 int ret;
547
548 if (!tree_mod_need_log(fs_info, eb))
549 return 0;
550
551 tm = alloc_tree_mod_elem(eb, slot, op, flags);
552 if (!tm)
553 return -ENOMEM;
554
555 if (tree_mod_dont_log(fs_info, eb)) {
556 kfree(tm);
557 return 0;
558 }
559
560 ret = __tree_mod_log_insert(fs_info, tm);
561 tree_mod_log_write_unlock(fs_info);
562 if (ret)
563 kfree(tm);
564
565 return ret;
566}
567
568static noinline int
569tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
570 struct extent_buffer *eb, int dst_slot, int src_slot,
571 int nr_items, gfp_t flags)
572{
573 struct tree_mod_elem *tm = NULL;
574 struct tree_mod_elem **tm_list = NULL;
575 int ret = 0;
576 int i;
577 int locked = 0;
578
579 if (!tree_mod_need_log(fs_info, eb))
580 return 0;
581
582 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
583 if (!tm_list)
584 return -ENOMEM;
585
586 tm = kzalloc(sizeof(*tm), flags);
587 if (!tm) {
588 ret = -ENOMEM;
589 goto free_tms;
590 }
591
592 tm->logical = eb->start;
593 tm->slot = src_slot;
594 tm->move.dst_slot = dst_slot;
595 tm->move.nr_items = nr_items;
596 tm->op = MOD_LOG_MOVE_KEYS;
597
598 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
599 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
600 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
601 if (!tm_list[i]) {
602 ret = -ENOMEM;
603 goto free_tms;
604 }
605 }
606
607 if (tree_mod_dont_log(fs_info, eb))
608 goto free_tms;
609 locked = 1;
610
611 /*
612 * When we override something during the move, we log these removals.
613 * This can only happen when we move towards the beginning of the
614 * buffer, i.e. dst_slot < src_slot.
615 */
616 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
617 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
618 if (ret)
619 goto free_tms;
620 }
621
622 ret = __tree_mod_log_insert(fs_info, tm);
623 if (ret)
624 goto free_tms;
625 tree_mod_log_write_unlock(fs_info);
626 kfree(tm_list);
627
628 return 0;
629free_tms:
630 for (i = 0; i < nr_items; i++) {
631 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
632 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
633 kfree(tm_list[i]);
634 }
635 if (locked)
636 tree_mod_log_write_unlock(fs_info);
637 kfree(tm_list);
638 kfree(tm);
639
640 return ret;
641}
642
643static inline int
644__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
645 struct tree_mod_elem **tm_list,
646 int nritems)
647{
648 int i, j;
649 int ret;
650
651 for (i = nritems - 1; i >= 0; i--) {
652 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
653 if (ret) {
654 for (j = nritems - 1; j > i; j--)
655 rb_erase(&tm_list[j]->node,
656 &fs_info->tree_mod_log);
657 return ret;
658 }
659 }
660
661 return 0;
662}
663
664static noinline int
665tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
666 struct extent_buffer *old_root,
667 struct extent_buffer *new_root, gfp_t flags,
668 int log_removal)
669{
670 struct tree_mod_elem *tm = NULL;
671 struct tree_mod_elem **tm_list = NULL;
672 int nritems = 0;
673 int ret = 0;
674 int i;
675
676 if (!tree_mod_need_log(fs_info, NULL))
677 return 0;
678
679 if (log_removal && btrfs_header_level(old_root) > 0) {
680 nritems = btrfs_header_nritems(old_root);
681 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
682 flags);
683 if (!tm_list) {
684 ret = -ENOMEM;
685 goto free_tms;
686 }
687 for (i = 0; i < nritems; i++) {
688 tm_list[i] = alloc_tree_mod_elem(old_root, i,
689 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
690 if (!tm_list[i]) {
691 ret = -ENOMEM;
692 goto free_tms;
693 }
694 }
695 }
696
697 tm = kzalloc(sizeof(*tm), flags);
698 if (!tm) {
699 ret = -ENOMEM;
700 goto free_tms;
701 }
702
703 tm->logical = new_root->start;
704 tm->old_root.logical = old_root->start;
705 tm->old_root.level = btrfs_header_level(old_root);
706 tm->generation = btrfs_header_generation(old_root);
707 tm->op = MOD_LOG_ROOT_REPLACE;
708
709 if (tree_mod_dont_log(fs_info, NULL))
710 goto free_tms;
711
712 if (tm_list)
713 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
714 if (!ret)
715 ret = __tree_mod_log_insert(fs_info, tm);
716
717 tree_mod_log_write_unlock(fs_info);
718 if (ret)
719 goto free_tms;
720 kfree(tm_list);
721
722 return ret;
723
724free_tms:
725 if (tm_list) {
726 for (i = 0; i < nritems; i++)
727 kfree(tm_list[i]);
728 kfree(tm_list);
729 }
730 kfree(tm);
731
732 return ret;
733}
734
735static struct tree_mod_elem *
736__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
737 int smallest)
738{
739 struct rb_root *tm_root;
740 struct rb_node *node;
741 struct tree_mod_elem *cur = NULL;
742 struct tree_mod_elem *found = NULL;
743
744 tree_mod_log_read_lock(fs_info);
745 tm_root = &fs_info->tree_mod_log;
746 node = tm_root->rb_node;
747 while (node) {
748 cur = container_of(node, struct tree_mod_elem, node);
749 if (cur->logical < start) {
750 node = node->rb_left;
751 } else if (cur->logical > start) {
752 node = node->rb_right;
753 } else if (cur->seq < min_seq) {
754 node = node->rb_left;
755 } else if (!smallest) {
756 /* we want the node with the highest seq */
757 if (found)
758 BUG_ON(found->seq > cur->seq);
759 found = cur;
760 node = node->rb_left;
761 } else if (cur->seq > min_seq) {
762 /* we want the node with the smallest seq */
763 if (found)
764 BUG_ON(found->seq < cur->seq);
765 found = cur;
766 node = node->rb_right;
767 } else {
768 found = cur;
769 break;
770 }
771 }
772 tree_mod_log_read_unlock(fs_info);
773
774 return found;
775}
776
777/*
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
781 */
782static struct tree_mod_elem *
783tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
784 u64 min_seq)
785{
786 return __tree_mod_log_search(fs_info, start, min_seq, 1);
787}
788
789/*
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
793 */
794static struct tree_mod_elem *
795tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
796{
797 return __tree_mod_log_search(fs_info, start, min_seq, 0);
798}
799
800static noinline int
801tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
802 struct extent_buffer *src, unsigned long dst_offset,
803 unsigned long src_offset, int nr_items)
804{
805 int ret = 0;
806 struct tree_mod_elem **tm_list = NULL;
807 struct tree_mod_elem **tm_list_add, **tm_list_rem;
808 int i;
809 int locked = 0;
810
811 if (!tree_mod_need_log(fs_info, NULL))
812 return 0;
813
814 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
815 return 0;
816
817 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
818 GFP_NOFS);
819 if (!tm_list)
820 return -ENOMEM;
821
822 tm_list_add = tm_list;
823 tm_list_rem = tm_list + nr_items;
824 for (i = 0; i < nr_items; i++) {
825 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
826 MOD_LOG_KEY_REMOVE, GFP_NOFS);
827 if (!tm_list_rem[i]) {
828 ret = -ENOMEM;
829 goto free_tms;
830 }
831
832 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
833 MOD_LOG_KEY_ADD, GFP_NOFS);
834 if (!tm_list_add[i]) {
835 ret = -ENOMEM;
836 goto free_tms;
837 }
838 }
839
840 if (tree_mod_dont_log(fs_info, NULL))
841 goto free_tms;
842 locked = 1;
843
844 for (i = 0; i < nr_items; i++) {
845 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
846 if (ret)
847 goto free_tms;
848 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
849 if (ret)
850 goto free_tms;
851 }
852
853 tree_mod_log_write_unlock(fs_info);
854 kfree(tm_list);
855
856 return 0;
857
858free_tms:
859 for (i = 0; i < nr_items * 2; i++) {
860 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
861 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
862 kfree(tm_list[i]);
863 }
864 if (locked)
865 tree_mod_log_write_unlock(fs_info);
866 kfree(tm_list);
867
868 return ret;
869}
870
871static inline void
872tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
873 int dst_offset, int src_offset, int nr_items)
874{
875 int ret;
876 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
877 nr_items, GFP_NOFS);
878 BUG_ON(ret < 0);
879}
880
881static noinline void
882tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
883 struct extent_buffer *eb, int slot, int atomic)
884{
885 int ret;
886
887 ret = tree_mod_log_insert_key(fs_info, eb, slot,
888 MOD_LOG_KEY_REPLACE,
889 atomic ? GFP_ATOMIC : GFP_NOFS);
890 BUG_ON(ret < 0);
891}
892
893static noinline int
894tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
895{
896 struct tree_mod_elem **tm_list = NULL;
897 int nritems = 0;
898 int i;
899 int ret = 0;
900
901 if (btrfs_header_level(eb) == 0)
902 return 0;
903
904 if (!tree_mod_need_log(fs_info, NULL))
905 return 0;
906
907 nritems = btrfs_header_nritems(eb);
908 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
909 if (!tm_list)
910 return -ENOMEM;
911
912 for (i = 0; i < nritems; i++) {
913 tm_list[i] = alloc_tree_mod_elem(eb, i,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
915 if (!tm_list[i]) {
916 ret = -ENOMEM;
917 goto free_tms;
918 }
919 }
920
921 if (tree_mod_dont_log(fs_info, eb))
922 goto free_tms;
923
924 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
925 tree_mod_log_write_unlock(fs_info);
926 if (ret)
927 goto free_tms;
928 kfree(tm_list);
929
930 return 0;
931
932free_tms:
933 for (i = 0; i < nritems; i++)
934 kfree(tm_list[i]);
935 kfree(tm_list);
936
937 return ret;
938}
939
940static noinline void
941tree_mod_log_set_root_pointer(struct btrfs_root *root,
942 struct extent_buffer *new_root_node,
943 int log_removal)
944{
945 int ret;
946 ret = tree_mod_log_insert_root(root->fs_info, root->node,
947 new_root_node, GFP_NOFS, log_removal);
948 BUG_ON(ret < 0);
949}
950
951/*
952 * check if the tree block can be shared by multiple trees
953 */
954int btrfs_block_can_be_shared(struct btrfs_root *root,
955 struct extent_buffer *buf)
956{
957 /*
958 * Tree blocks not in refernece counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
962 */
963 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
964 buf != root->node && buf != root->commit_root &&
965 (btrfs_header_generation(buf) <=
966 btrfs_root_last_snapshot(&root->root_item) ||
967 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
968 return 1;
969#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
971 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
972 return 1;
973#endif
974 return 0;
975}
976
977static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
978 struct btrfs_root *root,
979 struct extent_buffer *buf,
980 struct extent_buffer *cow,
981 int *last_ref)
982{
983 u64 refs;
984 u64 owner;
985 u64 flags;
986 u64 new_flags = 0;
987 int ret;
988
989 /*
990 * Backrefs update rules:
991 *
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
994 *
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
998 *
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1004 */
1005
1006 if (btrfs_block_can_be_shared(root, buf)) {
1007 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1008 btrfs_header_level(buf), 1,
1009 &refs, &flags);
1010 if (ret)
1011 return ret;
1012 if (refs == 0) {
1013 ret = -EROFS;
1014 btrfs_std_error(root->fs_info, ret, NULL);
1015 return ret;
1016 }
1017 } else {
1018 refs = 1;
1019 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1020 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1021 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1022 else
1023 flags = 0;
1024 }
1025
1026 owner = btrfs_header_owner(buf);
1027 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1028 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1029
1030 if (refs > 1) {
1031 if ((owner == root->root_key.objectid ||
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */
1036
1037 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */
1041 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */
1043 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else {
1046
1047 if (root->root_key.objectid ==
1048 BTRFS_TREE_RELOC_OBJECTID)
1049 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */
1053 }
1054 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf);
1056
1057 ret = btrfs_set_disk_extent_flags(trans, root,
1058 buf->start,
1059 buf->len,
1060 new_flags, level, 0);
1061 if (ret)
1062 return ret;
1063 }
1064 } else {
1065 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1066 if (root->root_key.objectid ==
1067 BTRFS_TREE_RELOC_OBJECTID)
1068 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */
1072 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */
1074 }
1075 clean_tree_block(trans, root->fs_info, buf);
1076 *last_ref = 1;
1077 }
1078 return 0;
1079}
1080
1081/*
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1085 * dirty again.
1086 *
1087 * search_start -- an allocation hint for the new block
1088 *
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1092 */
1093static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1094 struct btrfs_root *root,
1095 struct extent_buffer *buf,
1096 struct extent_buffer *parent, int parent_slot,
1097 struct extent_buffer **cow_ret,
1098 u64 search_start, u64 empty_size)
1099{
1100 struct btrfs_disk_key disk_key;
1101 struct extent_buffer *cow;
1102 int level, ret;
1103 int last_ref = 0;
1104 int unlock_orig = 0;
1105 u64 parent_start;
1106
1107 if (*cow_ret == buf)
1108 unlock_orig = 1;
1109
1110 btrfs_assert_tree_locked(buf);
1111
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1113 trans->transid != root->fs_info->running_transaction->transid);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1115 trans->transid != root->last_trans);
1116
1117 level = btrfs_header_level(buf);
1118
1119 if (level == 0)
1120 btrfs_item_key(buf, &disk_key, 0);
1121 else
1122 btrfs_node_key(buf, &disk_key, 0);
1123
1124 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1125 if (parent)
1126 parent_start = parent->start;
1127 else
1128 parent_start = 0;
1129 } else
1130 parent_start = 0;
1131
1132 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1133 root->root_key.objectid, &disk_key, level,
1134 search_start, empty_size);
1135 if (IS_ERR(cow))
1136 return PTR_ERR(cow);
1137
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1139
1140 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1141 btrfs_set_header_bytenr(cow, cow->start);
1142 btrfs_set_header_generation(cow, trans->transid);
1143 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1144 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1145 BTRFS_HEADER_FLAG_RELOC);
1146 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1147 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1148 else
1149 btrfs_set_header_owner(cow, root->root_key.objectid);
1150
1151 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1152 BTRFS_FSID_SIZE);
1153
1154 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1155 if (ret) {
1156 btrfs_abort_transaction(trans, root, ret);
1157 return ret;
1158 }
1159
1160 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1161 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 return ret;
1165 }
1166 }
1167
1168 if (buf == root->node) {
1169 WARN_ON(parent && parent != buf);
1170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1171 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1172 parent_start = buf->start;
1173 else
1174 parent_start = 0;
1175
1176 extent_buffer_get(cow);
1177 tree_mod_log_set_root_pointer(root, cow, 1);
1178 rcu_assign_pointer(root->node, cow);
1179
1180 btrfs_free_tree_block(trans, root, buf, parent_start,
1181 last_ref);
1182 free_extent_buffer(buf);
1183 add_root_to_dirty_list(root);
1184 } else {
1185 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1186 parent_start = parent->start;
1187 else
1188 parent_start = 0;
1189
1190 WARN_ON(trans->transid != btrfs_header_generation(parent));
1191 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1192 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1193 btrfs_set_node_blockptr(parent, parent_slot,
1194 cow->start);
1195 btrfs_set_node_ptr_generation(parent, parent_slot,
1196 trans->transid);
1197 btrfs_mark_buffer_dirty(parent);
1198 if (last_ref) {
1199 ret = tree_mod_log_free_eb(root->fs_info, buf);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 return ret;
1203 }
1204 }
1205 btrfs_free_tree_block(trans, root, buf, parent_start,
1206 last_ref);
1207 }
1208 if (unlock_orig)
1209 btrfs_tree_unlock(buf);
1210 free_extent_buffer_stale(buf);
1211 btrfs_mark_buffer_dirty(cow);
1212 *cow_ret = cow;
1213 return 0;
1214}
1215
1216/*
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1219 */
1220static struct tree_mod_elem *
1221__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1222 struct extent_buffer *eb_root, u64 time_seq)
1223{
1224 struct tree_mod_elem *tm;
1225 struct tree_mod_elem *found = NULL;
1226 u64 root_logical = eb_root->start;
1227 int looped = 0;
1228
1229 if (!time_seq)
1230 return NULL;
1231
1232 /*
1233 * the very last operation that's logged for a root is the
1234 * replacement operation (if it is replaced at all). this has
1235 * the logical address of the *new* root, making it the very
1236 * first operation that's logged for this root.
1237 */
1238 while (1) {
1239 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1240 time_seq);
1241 if (!looped && !tm)
1242 return NULL;
1243 /*
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1246 * level 0.
1247 */
1248 if (!tm)
1249 break;
1250
1251 /*
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1255 */
1256 if (tm->op != MOD_LOG_ROOT_REPLACE)
1257 break;
1258
1259 found = tm;
1260 root_logical = tm->old_root.logical;
1261 looped = 1;
1262 }
1263
1264 /* if there's no old root to return, return what we found instead */
1265 if (!found)
1266 found = tm;
1267
1268 return found;
1269}
1270
1271/*
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewinded (until we reach something older than
1274 * time_seq).
1275 */
1276static void
1277__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1278 u64 time_seq, struct tree_mod_elem *first_tm)
1279{
1280 u32 n;
1281 struct rb_node *next;
1282 struct tree_mod_elem *tm = first_tm;
1283 unsigned long o_dst;
1284 unsigned long o_src;
1285 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1286
1287 n = btrfs_header_nritems(eb);
1288 tree_mod_log_read_lock(fs_info);
1289 while (tm && tm->seq >= time_seq) {
1290 /*
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1294 */
1295 switch (tm->op) {
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1297 BUG_ON(tm->slot < n);
1298 /* Fallthrough */
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1300 case MOD_LOG_KEY_REMOVE:
1301 btrfs_set_node_key(eb, &tm->key, tm->slot);
1302 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1303 btrfs_set_node_ptr_generation(eb, tm->slot,
1304 tm->generation);
1305 n++;
1306 break;
1307 case MOD_LOG_KEY_REPLACE:
1308 BUG_ON(tm->slot >= n);
1309 btrfs_set_node_key(eb, &tm->key, tm->slot);
1310 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1311 btrfs_set_node_ptr_generation(eb, tm->slot,
1312 tm->generation);
1313 break;
1314 case MOD_LOG_KEY_ADD:
1315 /* if a move operation is needed it's in the log */
1316 n--;
1317 break;
1318 case MOD_LOG_MOVE_KEYS:
1319 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1320 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1321 memmove_extent_buffer(eb, o_dst, o_src,
1322 tm->move.nr_items * p_size);
1323 break;
1324 case MOD_LOG_ROOT_REPLACE:
1325 /*
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1333 */
1334 break;
1335 }
1336 next = rb_next(&tm->node);
1337 if (!next)
1338 break;
1339 tm = container_of(next, struct tree_mod_elem, node);
1340 if (tm->logical != first_tm->logical)
1341 break;
1342 }
1343 tree_mod_log_read_unlock(fs_info);
1344 btrfs_set_header_nritems(eb, n);
1345}
1346
1347/*
1348 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1353 */
1354static struct extent_buffer *
1355tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1356 struct extent_buffer *eb, u64 time_seq)
1357{
1358 struct extent_buffer *eb_rewin;
1359 struct tree_mod_elem *tm;
1360
1361 if (!time_seq)
1362 return eb;
1363
1364 if (btrfs_header_level(eb) == 0)
1365 return eb;
1366
1367 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1368 if (!tm)
1369 return eb;
1370
1371 btrfs_set_path_blocking(path);
1372 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1377 if (!eb_rewin) {
1378 btrfs_tree_read_unlock_blocking(eb);
1379 free_extent_buffer(eb);
1380 return NULL;
1381 }
1382 btrfs_set_header_bytenr(eb_rewin, eb->start);
1383 btrfs_set_header_backref_rev(eb_rewin,
1384 btrfs_header_backref_rev(eb));
1385 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1386 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1387 } else {
1388 eb_rewin = btrfs_clone_extent_buffer(eb);
1389 if (!eb_rewin) {
1390 btrfs_tree_read_unlock_blocking(eb);
1391 free_extent_buffer(eb);
1392 return NULL;
1393 }
1394 }
1395
1396 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1397 btrfs_tree_read_unlock_blocking(eb);
1398 free_extent_buffer(eb);
1399
1400 extent_buffer_get(eb_rewin);
1401 btrfs_tree_read_lock(eb_rewin);
1402 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1403 WARN_ON(btrfs_header_nritems(eb_rewin) >
1404 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1405
1406 return eb_rewin;
1407}
1408
1409/*
1410 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1411 * value. If there are no changes, the current root->root_node is returned. If
1412 * anything changed in between, there's a fresh buffer allocated on which the
1413 * rewind operations are done. In any case, the returned buffer is read locked.
1414 * Returns NULL on error (with no locks held).
1415 */
1416static inline struct extent_buffer *
1417get_old_root(struct btrfs_root *root, u64 time_seq)
1418{
1419 struct tree_mod_elem *tm;
1420 struct extent_buffer *eb = NULL;
1421 struct extent_buffer *eb_root;
1422 struct extent_buffer *old;
1423 struct tree_mod_root *old_root = NULL;
1424 u64 old_generation = 0;
1425 u64 logical;
1426
1427 eb_root = btrfs_read_lock_root_node(root);
1428 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1429 if (!tm)
1430 return eb_root;
1431
1432 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1433 old_root = &tm->old_root;
1434 old_generation = tm->generation;
1435 logical = old_root->logical;
1436 } else {
1437 logical = eb_root->start;
1438 }
1439
1440 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1441 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1442 btrfs_tree_read_unlock(eb_root);
1443 free_extent_buffer(eb_root);
1444 old = read_tree_block(root, logical, 0);
1445 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1446 if (!IS_ERR(old))
1447 free_extent_buffer(old);
1448 btrfs_warn(root->fs_info,
1449 "failed to read tree block %llu from get_old_root", logical);
1450 } else {
1451 eb = btrfs_clone_extent_buffer(old);
1452 free_extent_buffer(old);
1453 }
1454 } else if (old_root) {
1455 btrfs_tree_read_unlock(eb_root);
1456 free_extent_buffer(eb_root);
1457 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1458 } else {
1459 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1460 eb = btrfs_clone_extent_buffer(eb_root);
1461 btrfs_tree_read_unlock_blocking(eb_root);
1462 free_extent_buffer(eb_root);
1463 }
1464
1465 if (!eb)
1466 return NULL;
1467 extent_buffer_get(eb);
1468 btrfs_tree_read_lock(eb);
1469 if (old_root) {
1470 btrfs_set_header_bytenr(eb, eb->start);
1471 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1472 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1473 btrfs_set_header_level(eb, old_root->level);
1474 btrfs_set_header_generation(eb, old_generation);
1475 }
1476 if (tm)
1477 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1478 else
1479 WARN_ON(btrfs_header_level(eb) != 0);
1480 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1481
1482 return eb;
1483}
1484
1485int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1486{
1487 struct tree_mod_elem *tm;
1488 int level;
1489 struct extent_buffer *eb_root = btrfs_root_node(root);
1490
1491 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1492 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1493 level = tm->old_root.level;
1494 } else {
1495 level = btrfs_header_level(eb_root);
1496 }
1497 free_extent_buffer(eb_root);
1498
1499 return level;
1500}
1501
1502static inline int should_cow_block(struct btrfs_trans_handle *trans,
1503 struct btrfs_root *root,
1504 struct extent_buffer *buf)
1505{
1506 if (btrfs_test_is_dummy_root(root))
1507 return 0;
1508
1509 /* ensure we can see the force_cow */
1510 smp_rmb();
1511
1512 /*
1513 * We do not need to cow a block if
1514 * 1) this block is not created or changed in this transaction;
1515 * 2) this block does not belong to TREE_RELOC tree;
1516 * 3) the root is not forced COW.
1517 *
1518 * What is forced COW:
1519 * when we create snapshot during commiting the transaction,
1520 * after we've finished coping src root, we must COW the shared
1521 * block to ensure the metadata consistency.
1522 */
1523 if (btrfs_header_generation(buf) == trans->transid &&
1524 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1525 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1526 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1527 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1528 return 0;
1529 return 1;
1530}
1531
1532/*
1533 * cows a single block, see __btrfs_cow_block for the real work.
1534 * This version of it has extra checks so that a block isn't cow'd more than
1535 * once per transaction, as long as it hasn't been written yet
1536 */
1537noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1538 struct btrfs_root *root, struct extent_buffer *buf,
1539 struct extent_buffer *parent, int parent_slot,
1540 struct extent_buffer **cow_ret)
1541{
1542 u64 search_start;
1543 int ret;
1544
1545 if (trans->transaction != root->fs_info->running_transaction)
1546 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1547 trans->transid,
1548 root->fs_info->running_transaction->transid);
1549
1550 if (trans->transid != root->fs_info->generation)
1551 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1552 trans->transid, root->fs_info->generation);
1553
1554 if (!should_cow_block(trans, root, buf)) {
1555 *cow_ret = buf;
1556 return 0;
1557 }
1558
1559 search_start = buf->start & ~((u64)SZ_1G - 1);
1560
1561 if (parent)
1562 btrfs_set_lock_blocking(parent);
1563 btrfs_set_lock_blocking(buf);
1564
1565 ret = __btrfs_cow_block(trans, root, buf, parent,
1566 parent_slot, cow_ret, search_start, 0);
1567
1568 trace_btrfs_cow_block(root, buf, *cow_ret);
1569
1570 return ret;
1571}
1572
1573/*
1574 * helper function for defrag to decide if two blocks pointed to by a
1575 * node are actually close by
1576 */
1577static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1578{
1579 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1580 return 1;
1581 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1582 return 1;
1583 return 0;
1584}
1585
1586/*
1587 * compare two keys in a memcmp fashion
1588 */
1589static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1590{
1591 struct btrfs_key k1;
1592
1593 btrfs_disk_key_to_cpu(&k1, disk);
1594
1595 return btrfs_comp_cpu_keys(&k1, k2);
1596}
1597
1598/*
1599 * same as comp_keys only with two btrfs_key's
1600 */
1601int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1602{
1603 if (k1->objectid > k2->objectid)
1604 return 1;
1605 if (k1->objectid < k2->objectid)
1606 return -1;
1607 if (k1->type > k2->type)
1608 return 1;
1609 if (k1->type < k2->type)
1610 return -1;
1611 if (k1->offset > k2->offset)
1612 return 1;
1613 if (k1->offset < k2->offset)
1614 return -1;
1615 return 0;
1616}
1617
1618/*
1619 * this is used by the defrag code to go through all the
1620 * leaves pointed to by a node and reallocate them so that
1621 * disk order is close to key order
1622 */
1623int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1624 struct btrfs_root *root, struct extent_buffer *parent,
1625 int start_slot, u64 *last_ret,
1626 struct btrfs_key *progress)
1627{
1628 struct extent_buffer *cur;
1629 u64 blocknr;
1630 u64 gen;
1631 u64 search_start = *last_ret;
1632 u64 last_block = 0;
1633 u64 other;
1634 u32 parent_nritems;
1635 int end_slot;
1636 int i;
1637 int err = 0;
1638 int parent_level;
1639 int uptodate;
1640 u32 blocksize;
1641 int progress_passed = 0;
1642 struct btrfs_disk_key disk_key;
1643
1644 parent_level = btrfs_header_level(parent);
1645
1646 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1647 WARN_ON(trans->transid != root->fs_info->generation);
1648
1649 parent_nritems = btrfs_header_nritems(parent);
1650 blocksize = root->nodesize;
1651 end_slot = parent_nritems - 1;
1652
1653 if (parent_nritems <= 1)
1654 return 0;
1655
1656 btrfs_set_lock_blocking(parent);
1657
1658 for (i = start_slot; i <= end_slot; i++) {
1659 int close = 1;
1660
1661 btrfs_node_key(parent, &disk_key, i);
1662 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1663 continue;
1664
1665 progress_passed = 1;
1666 blocknr = btrfs_node_blockptr(parent, i);
1667 gen = btrfs_node_ptr_generation(parent, i);
1668 if (last_block == 0)
1669 last_block = blocknr;
1670
1671 if (i > 0) {
1672 other = btrfs_node_blockptr(parent, i - 1);
1673 close = close_blocks(blocknr, other, blocksize);
1674 }
1675 if (!close && i < end_slot) {
1676 other = btrfs_node_blockptr(parent, i + 1);
1677 close = close_blocks(blocknr, other, blocksize);
1678 }
1679 if (close) {
1680 last_block = blocknr;
1681 continue;
1682 }
1683
1684 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1685 if (cur)
1686 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1687 else
1688 uptodate = 0;
1689 if (!cur || !uptodate) {
1690 if (!cur) {
1691 cur = read_tree_block(root, blocknr, gen);
1692 if (IS_ERR(cur)) {
1693 return PTR_ERR(cur);
1694 } else if (!extent_buffer_uptodate(cur)) {
1695 free_extent_buffer(cur);
1696 return -EIO;
1697 }
1698 } else if (!uptodate) {
1699 err = btrfs_read_buffer(cur, gen);
1700 if (err) {
1701 free_extent_buffer(cur);
1702 return err;
1703 }
1704 }
1705 }
1706 if (search_start == 0)
1707 search_start = last_block;
1708
1709 btrfs_tree_lock(cur);
1710 btrfs_set_lock_blocking(cur);
1711 err = __btrfs_cow_block(trans, root, cur, parent, i,
1712 &cur, search_start,
1713 min(16 * blocksize,
1714 (end_slot - i) * blocksize));
1715 if (err) {
1716 btrfs_tree_unlock(cur);
1717 free_extent_buffer(cur);
1718 break;
1719 }
1720 search_start = cur->start;
1721 last_block = cur->start;
1722 *last_ret = search_start;
1723 btrfs_tree_unlock(cur);
1724 free_extent_buffer(cur);
1725 }
1726 return err;
1727}
1728
1729/*
1730 * The leaf data grows from end-to-front in the node.
1731 * this returns the address of the start of the last item,
1732 * which is the stop of the leaf data stack
1733 */
1734static inline unsigned int leaf_data_end(struct btrfs_root *root,
1735 struct extent_buffer *leaf)
1736{
1737 u32 nr = btrfs_header_nritems(leaf);
1738 if (nr == 0)
1739 return BTRFS_LEAF_DATA_SIZE(root);
1740 return btrfs_item_offset_nr(leaf, nr - 1);
1741}
1742
1743
1744/*
1745 * search for key in the extent_buffer. The items start at offset p,
1746 * and they are item_size apart. There are 'max' items in p.
1747 *
1748 * the slot in the array is returned via slot, and it points to
1749 * the place where you would insert key if it is not found in
1750 * the array.
1751 *
1752 * slot may point to max if the key is bigger than all of the keys
1753 */
1754static noinline int generic_bin_search(struct extent_buffer *eb,
1755 unsigned long p,
1756 int item_size, struct btrfs_key *key,
1757 int max, int *slot)
1758{
1759 int low = 0;
1760 int high = max;
1761 int mid;
1762 int ret;
1763 struct btrfs_disk_key *tmp = NULL;
1764 struct btrfs_disk_key unaligned;
1765 unsigned long offset;
1766 char *kaddr = NULL;
1767 unsigned long map_start = 0;
1768 unsigned long map_len = 0;
1769 int err;
1770
1771 while (low < high) {
1772 mid = (low + high) / 2;
1773 offset = p + mid * item_size;
1774
1775 if (!kaddr || offset < map_start ||
1776 (offset + sizeof(struct btrfs_disk_key)) >
1777 map_start + map_len) {
1778
1779 err = map_private_extent_buffer(eb, offset,
1780 sizeof(struct btrfs_disk_key),
1781 &kaddr, &map_start, &map_len);
1782
1783 if (!err) {
1784 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1785 map_start);
1786 } else {
1787 read_extent_buffer(eb, &unaligned,
1788 offset, sizeof(unaligned));
1789 tmp = &unaligned;
1790 }
1791
1792 } else {
1793 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1794 map_start);
1795 }
1796 ret = comp_keys(tmp, key);
1797
1798 if (ret < 0)
1799 low = mid + 1;
1800 else if (ret > 0)
1801 high = mid;
1802 else {
1803 *slot = mid;
1804 return 0;
1805 }
1806 }
1807 *slot = low;
1808 return 1;
1809}
1810
1811/*
1812 * simple bin_search frontend that does the right thing for
1813 * leaves vs nodes
1814 */
1815static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1816 int level, int *slot)
1817{
1818 if (level == 0)
1819 return generic_bin_search(eb,
1820 offsetof(struct btrfs_leaf, items),
1821 sizeof(struct btrfs_item),
1822 key, btrfs_header_nritems(eb),
1823 slot);
1824 else
1825 return generic_bin_search(eb,
1826 offsetof(struct btrfs_node, ptrs),
1827 sizeof(struct btrfs_key_ptr),
1828 key, btrfs_header_nritems(eb),
1829 slot);
1830}
1831
1832int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1833 int level, int *slot)
1834{
1835 return bin_search(eb, key, level, slot);
1836}
1837
1838static void root_add_used(struct btrfs_root *root, u32 size)
1839{
1840 spin_lock(&root->accounting_lock);
1841 btrfs_set_root_used(&root->root_item,
1842 btrfs_root_used(&root->root_item) + size);
1843 spin_unlock(&root->accounting_lock);
1844}
1845
1846static void root_sub_used(struct btrfs_root *root, u32 size)
1847{
1848 spin_lock(&root->accounting_lock);
1849 btrfs_set_root_used(&root->root_item,
1850 btrfs_root_used(&root->root_item) - size);
1851 spin_unlock(&root->accounting_lock);
1852}
1853
1854/* given a node and slot number, this reads the blocks it points to. The
1855 * extent buffer is returned with a reference taken (but unlocked).
1856 * NULL is returned on error.
1857 */
1858static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1859 struct extent_buffer *parent, int slot)
1860{
1861 int level = btrfs_header_level(parent);
1862 struct extent_buffer *eb;
1863
1864 if (slot < 0)
1865 return NULL;
1866 if (slot >= btrfs_header_nritems(parent))
1867 return NULL;
1868
1869 BUG_ON(level == 0);
1870
1871 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1872 btrfs_node_ptr_generation(parent, slot));
1873 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1874 if (!IS_ERR(eb))
1875 free_extent_buffer(eb);
1876 eb = NULL;
1877 }
1878
1879 return eb;
1880}
1881
1882/*
1883 * node level balancing, used to make sure nodes are in proper order for
1884 * item deletion. We balance from the top down, so we have to make sure
1885 * that a deletion won't leave an node completely empty later on.
1886 */
1887static noinline int balance_level(struct btrfs_trans_handle *trans,
1888 struct btrfs_root *root,
1889 struct btrfs_path *path, int level)
1890{
1891 struct extent_buffer *right = NULL;
1892 struct extent_buffer *mid;
1893 struct extent_buffer *left = NULL;
1894 struct extent_buffer *parent = NULL;
1895 int ret = 0;
1896 int wret;
1897 int pslot;
1898 int orig_slot = path->slots[level];
1899 u64 orig_ptr;
1900
1901 if (level == 0)
1902 return 0;
1903
1904 mid = path->nodes[level];
1905
1906 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1907 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1908 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1909
1910 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1911
1912 if (level < BTRFS_MAX_LEVEL - 1) {
1913 parent = path->nodes[level + 1];
1914 pslot = path->slots[level + 1];
1915 }
1916
1917 /*
1918 * deal with the case where there is only one pointer in the root
1919 * by promoting the node below to a root
1920 */
1921 if (!parent) {
1922 struct extent_buffer *child;
1923
1924 if (btrfs_header_nritems(mid) != 1)
1925 return 0;
1926
1927 /* promote the child to a root */
1928 child = read_node_slot(root, mid, 0);
1929 if (!child) {
1930 ret = -EROFS;
1931 btrfs_std_error(root->fs_info, ret, NULL);
1932 goto enospc;
1933 }
1934
1935 btrfs_tree_lock(child);
1936 btrfs_set_lock_blocking(child);
1937 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1938 if (ret) {
1939 btrfs_tree_unlock(child);
1940 free_extent_buffer(child);
1941 goto enospc;
1942 }
1943
1944 tree_mod_log_set_root_pointer(root, child, 1);
1945 rcu_assign_pointer(root->node, child);
1946
1947 add_root_to_dirty_list(root);
1948 btrfs_tree_unlock(child);
1949
1950 path->locks[level] = 0;
1951 path->nodes[level] = NULL;
1952 clean_tree_block(trans, root->fs_info, mid);
1953 btrfs_tree_unlock(mid);
1954 /* once for the path */
1955 free_extent_buffer(mid);
1956
1957 root_sub_used(root, mid->len);
1958 btrfs_free_tree_block(trans, root, mid, 0, 1);
1959 /* once for the root ptr */
1960 free_extent_buffer_stale(mid);
1961 return 0;
1962 }
1963 if (btrfs_header_nritems(mid) >
1964 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1965 return 0;
1966
1967 left = read_node_slot(root, parent, pslot - 1);
1968 if (left) {
1969 btrfs_tree_lock(left);
1970 btrfs_set_lock_blocking(left);
1971 wret = btrfs_cow_block(trans, root, left,
1972 parent, pslot - 1, &left);
1973 if (wret) {
1974 ret = wret;
1975 goto enospc;
1976 }
1977 }
1978 right = read_node_slot(root, parent, pslot + 1);
1979 if (right) {
1980 btrfs_tree_lock(right);
1981 btrfs_set_lock_blocking(right);
1982 wret = btrfs_cow_block(trans, root, right,
1983 parent, pslot + 1, &right);
1984 if (wret) {
1985 ret = wret;
1986 goto enospc;
1987 }
1988 }
1989
1990 /* first, try to make some room in the middle buffer */
1991 if (left) {
1992 orig_slot += btrfs_header_nritems(left);
1993 wret = push_node_left(trans, root, left, mid, 1);
1994 if (wret < 0)
1995 ret = wret;
1996 }
1997
1998 /*
1999 * then try to empty the right most buffer into the middle
2000 */
2001 if (right) {
2002 wret = push_node_left(trans, root, mid, right, 1);
2003 if (wret < 0 && wret != -ENOSPC)
2004 ret = wret;
2005 if (btrfs_header_nritems(right) == 0) {
2006 clean_tree_block(trans, root->fs_info, right);
2007 btrfs_tree_unlock(right);
2008 del_ptr(root, path, level + 1, pslot + 1);
2009 root_sub_used(root, right->len);
2010 btrfs_free_tree_block(trans, root, right, 0, 1);
2011 free_extent_buffer_stale(right);
2012 right = NULL;
2013 } else {
2014 struct btrfs_disk_key right_key;
2015 btrfs_node_key(right, &right_key, 0);
2016 tree_mod_log_set_node_key(root->fs_info, parent,
2017 pslot + 1, 0);
2018 btrfs_set_node_key(parent, &right_key, pslot + 1);
2019 btrfs_mark_buffer_dirty(parent);
2020 }
2021 }
2022 if (btrfs_header_nritems(mid) == 1) {
2023 /*
2024 * we're not allowed to leave a node with one item in the
2025 * tree during a delete. A deletion from lower in the tree
2026 * could try to delete the only pointer in this node.
2027 * So, pull some keys from the left.
2028 * There has to be a left pointer at this point because
2029 * otherwise we would have pulled some pointers from the
2030 * right
2031 */
2032 if (!left) {
2033 ret = -EROFS;
2034 btrfs_std_error(root->fs_info, ret, NULL);
2035 goto enospc;
2036 }
2037 wret = balance_node_right(trans, root, mid, left);
2038 if (wret < 0) {
2039 ret = wret;
2040 goto enospc;
2041 }
2042 if (wret == 1) {
2043 wret = push_node_left(trans, root, left, mid, 1);
2044 if (wret < 0)
2045 ret = wret;
2046 }
2047 BUG_ON(wret == 1);
2048 }
2049 if (btrfs_header_nritems(mid) == 0) {
2050 clean_tree_block(trans, root->fs_info, mid);
2051 btrfs_tree_unlock(mid);
2052 del_ptr(root, path, level + 1, pslot);
2053 root_sub_used(root, mid->len);
2054 btrfs_free_tree_block(trans, root, mid, 0, 1);
2055 free_extent_buffer_stale(mid);
2056 mid = NULL;
2057 } else {
2058 /* update the parent key to reflect our changes */
2059 struct btrfs_disk_key mid_key;
2060 btrfs_node_key(mid, &mid_key, 0);
2061 tree_mod_log_set_node_key(root->fs_info, parent,
2062 pslot, 0);
2063 btrfs_set_node_key(parent, &mid_key, pslot);
2064 btrfs_mark_buffer_dirty(parent);
2065 }
2066
2067 /* update the path */
2068 if (left) {
2069 if (btrfs_header_nritems(left) > orig_slot) {
2070 extent_buffer_get(left);
2071 /* left was locked after cow */
2072 path->nodes[level] = left;
2073 path->slots[level + 1] -= 1;
2074 path->slots[level] = orig_slot;
2075 if (mid) {
2076 btrfs_tree_unlock(mid);
2077 free_extent_buffer(mid);
2078 }
2079 } else {
2080 orig_slot -= btrfs_header_nritems(left);
2081 path->slots[level] = orig_slot;
2082 }
2083 }
2084 /* double check we haven't messed things up */
2085 if (orig_ptr !=
2086 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2087 BUG();
2088enospc:
2089 if (right) {
2090 btrfs_tree_unlock(right);
2091 free_extent_buffer(right);
2092 }
2093 if (left) {
2094 if (path->nodes[level] != left)
2095 btrfs_tree_unlock(left);
2096 free_extent_buffer(left);
2097 }
2098 return ret;
2099}
2100
2101/* Node balancing for insertion. Here we only split or push nodes around
2102 * when they are completely full. This is also done top down, so we
2103 * have to be pessimistic.
2104 */
2105static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2106 struct btrfs_root *root,
2107 struct btrfs_path *path, int level)
2108{
2109 struct extent_buffer *right = NULL;
2110 struct extent_buffer *mid;
2111 struct extent_buffer *left = NULL;
2112 struct extent_buffer *parent = NULL;
2113 int ret = 0;
2114 int wret;
2115 int pslot;
2116 int orig_slot = path->slots[level];
2117
2118 if (level == 0)
2119 return 1;
2120
2121 mid = path->nodes[level];
2122 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2123
2124 if (level < BTRFS_MAX_LEVEL - 1) {
2125 parent = path->nodes[level + 1];
2126 pslot = path->slots[level + 1];
2127 }
2128
2129 if (!parent)
2130 return 1;
2131
2132 left = read_node_slot(root, parent, pslot - 1);
2133
2134 /* first, try to make some room in the middle buffer */
2135 if (left) {
2136 u32 left_nr;
2137
2138 btrfs_tree_lock(left);
2139 btrfs_set_lock_blocking(left);
2140
2141 left_nr = btrfs_header_nritems(left);
2142 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2143 wret = 1;
2144 } else {
2145 ret = btrfs_cow_block(trans, root, left, parent,
2146 pslot - 1, &left);
2147 if (ret)
2148 wret = 1;
2149 else {
2150 wret = push_node_left(trans, root,
2151 left, mid, 0);
2152 }
2153 }
2154 if (wret < 0)
2155 ret = wret;
2156 if (wret == 0) {
2157 struct btrfs_disk_key disk_key;
2158 orig_slot += left_nr;
2159 btrfs_node_key(mid, &disk_key, 0);
2160 tree_mod_log_set_node_key(root->fs_info, parent,
2161 pslot, 0);
2162 btrfs_set_node_key(parent, &disk_key, pslot);
2163 btrfs_mark_buffer_dirty(parent);
2164 if (btrfs_header_nritems(left) > orig_slot) {
2165 path->nodes[level] = left;
2166 path->slots[level + 1] -= 1;
2167 path->slots[level] = orig_slot;
2168 btrfs_tree_unlock(mid);
2169 free_extent_buffer(mid);
2170 } else {
2171 orig_slot -=
2172 btrfs_header_nritems(left);
2173 path->slots[level] = orig_slot;
2174 btrfs_tree_unlock(left);
2175 free_extent_buffer(left);
2176 }
2177 return 0;
2178 }
2179 btrfs_tree_unlock(left);
2180 free_extent_buffer(left);
2181 }
2182 right = read_node_slot(root, parent, pslot + 1);
2183
2184 /*
2185 * then try to empty the right most buffer into the middle
2186 */
2187 if (right) {
2188 u32 right_nr;
2189
2190 btrfs_tree_lock(right);
2191 btrfs_set_lock_blocking(right);
2192
2193 right_nr = btrfs_header_nritems(right);
2194 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2195 wret = 1;
2196 } else {
2197 ret = btrfs_cow_block(trans, root, right,
2198 parent, pslot + 1,
2199 &right);
2200 if (ret)
2201 wret = 1;
2202 else {
2203 wret = balance_node_right(trans, root,
2204 right, mid);
2205 }
2206 }
2207 if (wret < 0)
2208 ret = wret;
2209 if (wret == 0) {
2210 struct btrfs_disk_key disk_key;
2211
2212 btrfs_node_key(right, &disk_key, 0);
2213 tree_mod_log_set_node_key(root->fs_info, parent,
2214 pslot + 1, 0);
2215 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2216 btrfs_mark_buffer_dirty(parent);
2217
2218 if (btrfs_header_nritems(mid) <= orig_slot) {
2219 path->nodes[level] = right;
2220 path->slots[level + 1] += 1;
2221 path->slots[level] = orig_slot -
2222 btrfs_header_nritems(mid);
2223 btrfs_tree_unlock(mid);
2224 free_extent_buffer(mid);
2225 } else {
2226 btrfs_tree_unlock(right);
2227 free_extent_buffer(right);
2228 }
2229 return 0;
2230 }
2231 btrfs_tree_unlock(right);
2232 free_extent_buffer(right);
2233 }
2234 return 1;
2235}
2236
2237/*
2238 * readahead one full node of leaves, finding things that are close
2239 * to the block in 'slot', and triggering ra on them.
2240 */
2241static void reada_for_search(struct btrfs_root *root,
2242 struct btrfs_path *path,
2243 int level, int slot, u64 objectid)
2244{
2245 struct extent_buffer *node;
2246 struct btrfs_disk_key disk_key;
2247 u32 nritems;
2248 u64 search;
2249 u64 target;
2250 u64 nread = 0;
2251 u64 gen;
2252 struct extent_buffer *eb;
2253 u32 nr;
2254 u32 blocksize;
2255 u32 nscan = 0;
2256
2257 if (level != 1)
2258 return;
2259
2260 if (!path->nodes[level])
2261 return;
2262
2263 node = path->nodes[level];
2264
2265 search = btrfs_node_blockptr(node, slot);
2266 blocksize = root->nodesize;
2267 eb = btrfs_find_tree_block(root->fs_info, search);
2268 if (eb) {
2269 free_extent_buffer(eb);
2270 return;
2271 }
2272
2273 target = search;
2274
2275 nritems = btrfs_header_nritems(node);
2276 nr = slot;
2277
2278 while (1) {
2279 if (path->reada == READA_BACK) {
2280 if (nr == 0)
2281 break;
2282 nr--;
2283 } else if (path->reada == READA_FORWARD) {
2284 nr++;
2285 if (nr >= nritems)
2286 break;
2287 }
2288 if (path->reada == READA_BACK && objectid) {
2289 btrfs_node_key(node, &disk_key, nr);
2290 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2291 break;
2292 }
2293 search = btrfs_node_blockptr(node, nr);
2294 if ((search <= target && target - search <= 65536) ||
2295 (search > target && search - target <= 65536)) {
2296 gen = btrfs_node_ptr_generation(node, nr);
2297 readahead_tree_block(root, search);
2298 nread += blocksize;
2299 }
2300 nscan++;
2301 if ((nread > 65536 || nscan > 32))
2302 break;
2303 }
2304}
2305
2306static noinline void reada_for_balance(struct btrfs_root *root,
2307 struct btrfs_path *path, int level)
2308{
2309 int slot;
2310 int nritems;
2311 struct extent_buffer *parent;
2312 struct extent_buffer *eb;
2313 u64 gen;
2314 u64 block1 = 0;
2315 u64 block2 = 0;
2316
2317 parent = path->nodes[level + 1];
2318 if (!parent)
2319 return;
2320
2321 nritems = btrfs_header_nritems(parent);
2322 slot = path->slots[level + 1];
2323
2324 if (slot > 0) {
2325 block1 = btrfs_node_blockptr(parent, slot - 1);
2326 gen = btrfs_node_ptr_generation(parent, slot - 1);
2327 eb = btrfs_find_tree_block(root->fs_info, block1);
2328 /*
2329 * if we get -eagain from btrfs_buffer_uptodate, we
2330 * don't want to return eagain here. That will loop
2331 * forever
2332 */
2333 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2334 block1 = 0;
2335 free_extent_buffer(eb);
2336 }
2337 if (slot + 1 < nritems) {
2338 block2 = btrfs_node_blockptr(parent, slot + 1);
2339 gen = btrfs_node_ptr_generation(parent, slot + 1);
2340 eb = btrfs_find_tree_block(root->fs_info, block2);
2341 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2342 block2 = 0;
2343 free_extent_buffer(eb);
2344 }
2345
2346 if (block1)
2347 readahead_tree_block(root, block1);
2348 if (block2)
2349 readahead_tree_block(root, block2);
2350}
2351
2352
2353/*
2354 * when we walk down the tree, it is usually safe to unlock the higher layers
2355 * in the tree. The exceptions are when our path goes through slot 0, because
2356 * operations on the tree might require changing key pointers higher up in the
2357 * tree.
2358 *
2359 * callers might also have set path->keep_locks, which tells this code to keep
2360 * the lock if the path points to the last slot in the block. This is part of
2361 * walking through the tree, and selecting the next slot in the higher block.
2362 *
2363 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2364 * if lowest_unlock is 1, level 0 won't be unlocked
2365 */
2366static noinline void unlock_up(struct btrfs_path *path, int level,
2367 int lowest_unlock, int min_write_lock_level,
2368 int *write_lock_level)
2369{
2370 int i;
2371 int skip_level = level;
2372 int no_skips = 0;
2373 struct extent_buffer *t;
2374
2375 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2376 if (!path->nodes[i])
2377 break;
2378 if (!path->locks[i])
2379 break;
2380 if (!no_skips && path->slots[i] == 0) {
2381 skip_level = i + 1;
2382 continue;
2383 }
2384 if (!no_skips && path->keep_locks) {
2385 u32 nritems;
2386 t = path->nodes[i];
2387 nritems = btrfs_header_nritems(t);
2388 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2389 skip_level = i + 1;
2390 continue;
2391 }
2392 }
2393 if (skip_level < i && i >= lowest_unlock)
2394 no_skips = 1;
2395
2396 t = path->nodes[i];
2397 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2398 btrfs_tree_unlock_rw(t, path->locks[i]);
2399 path->locks[i] = 0;
2400 if (write_lock_level &&
2401 i > min_write_lock_level &&
2402 i <= *write_lock_level) {
2403 *write_lock_level = i - 1;
2404 }
2405 }
2406 }
2407}
2408
2409/*
2410 * This releases any locks held in the path starting at level and
2411 * going all the way up to the root.
2412 *
2413 * btrfs_search_slot will keep the lock held on higher nodes in a few
2414 * corner cases, such as COW of the block at slot zero in the node. This
2415 * ignores those rules, and it should only be called when there are no
2416 * more updates to be done higher up in the tree.
2417 */
2418noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2419{
2420 int i;
2421
2422 if (path->keep_locks)
2423 return;
2424
2425 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2426 if (!path->nodes[i])
2427 continue;
2428 if (!path->locks[i])
2429 continue;
2430 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2431 path->locks[i] = 0;
2432 }
2433}
2434
2435/*
2436 * helper function for btrfs_search_slot. The goal is to find a block
2437 * in cache without setting the path to blocking. If we find the block
2438 * we return zero and the path is unchanged.
2439 *
2440 * If we can't find the block, we set the path blocking and do some
2441 * reada. -EAGAIN is returned and the search must be repeated.
2442 */
2443static int
2444read_block_for_search(struct btrfs_trans_handle *trans,
2445 struct btrfs_root *root, struct btrfs_path *p,
2446 struct extent_buffer **eb_ret, int level, int slot,
2447 struct btrfs_key *key, u64 time_seq)
2448{
2449 u64 blocknr;
2450 u64 gen;
2451 struct extent_buffer *b = *eb_ret;
2452 struct extent_buffer *tmp;
2453 int ret;
2454
2455 blocknr = btrfs_node_blockptr(b, slot);
2456 gen = btrfs_node_ptr_generation(b, slot);
2457
2458 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2459 if (tmp) {
2460 /* first we do an atomic uptodate check */
2461 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2462 *eb_ret = tmp;
2463 return 0;
2464 }
2465
2466 /* the pages were up to date, but we failed
2467 * the generation number check. Do a full
2468 * read for the generation number that is correct.
2469 * We must do this without dropping locks so
2470 * we can trust our generation number
2471 */
2472 btrfs_set_path_blocking(p);
2473
2474 /* now we're allowed to do a blocking uptodate check */
2475 ret = btrfs_read_buffer(tmp, gen);
2476 if (!ret) {
2477 *eb_ret = tmp;
2478 return 0;
2479 }
2480 free_extent_buffer(tmp);
2481 btrfs_release_path(p);
2482 return -EIO;
2483 }
2484
2485 /*
2486 * reduce lock contention at high levels
2487 * of the btree by dropping locks before
2488 * we read. Don't release the lock on the current
2489 * level because we need to walk this node to figure
2490 * out which blocks to read.
2491 */
2492 btrfs_unlock_up_safe(p, level + 1);
2493 btrfs_set_path_blocking(p);
2494
2495 free_extent_buffer(tmp);
2496 if (p->reada != READA_NONE)
2497 reada_for_search(root, p, level, slot, key->objectid);
2498
2499 btrfs_release_path(p);
2500
2501 ret = -EAGAIN;
2502 tmp = read_tree_block(root, blocknr, 0);
2503 if (!IS_ERR(tmp)) {
2504 /*
2505 * If the read above didn't mark this buffer up to date,
2506 * it will never end up being up to date. Set ret to EIO now
2507 * and give up so that our caller doesn't loop forever
2508 * on our EAGAINs.
2509 */
2510 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2511 ret = -EIO;
2512 free_extent_buffer(tmp);
2513 }
2514 return ret;
2515}
2516
2517/*
2518 * helper function for btrfs_search_slot. This does all of the checks
2519 * for node-level blocks and does any balancing required based on
2520 * the ins_len.
2521 *
2522 * If no extra work was required, zero is returned. If we had to
2523 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2524 * start over
2525 */
2526static int
2527setup_nodes_for_search(struct btrfs_trans_handle *trans,
2528 struct btrfs_root *root, struct btrfs_path *p,
2529 struct extent_buffer *b, int level, int ins_len,
2530 int *write_lock_level)
2531{
2532 int ret;
2533 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2534 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2535 int sret;
2536
2537 if (*write_lock_level < level + 1) {
2538 *write_lock_level = level + 1;
2539 btrfs_release_path(p);
2540 goto again;
2541 }
2542
2543 btrfs_set_path_blocking(p);
2544 reada_for_balance(root, p, level);
2545 sret = split_node(trans, root, p, level);
2546 btrfs_clear_path_blocking(p, NULL, 0);
2547
2548 BUG_ON(sret > 0);
2549 if (sret) {
2550 ret = sret;
2551 goto done;
2552 }
2553 b = p->nodes[level];
2554 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2555 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2556 int sret;
2557
2558 if (*write_lock_level < level + 1) {
2559 *write_lock_level = level + 1;
2560 btrfs_release_path(p);
2561 goto again;
2562 }
2563
2564 btrfs_set_path_blocking(p);
2565 reada_for_balance(root, p, level);
2566 sret = balance_level(trans, root, p, level);
2567 btrfs_clear_path_blocking(p, NULL, 0);
2568
2569 if (sret) {
2570 ret = sret;
2571 goto done;
2572 }
2573 b = p->nodes[level];
2574 if (!b) {
2575 btrfs_release_path(p);
2576 goto again;
2577 }
2578 BUG_ON(btrfs_header_nritems(b) == 1);
2579 }
2580 return 0;
2581
2582again:
2583 ret = -EAGAIN;
2584done:
2585 return ret;
2586}
2587
2588static void key_search_validate(struct extent_buffer *b,
2589 struct btrfs_key *key,
2590 int level)
2591{
2592#ifdef CONFIG_BTRFS_ASSERT
2593 struct btrfs_disk_key disk_key;
2594
2595 btrfs_cpu_key_to_disk(&disk_key, key);
2596
2597 if (level == 0)
2598 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2599 offsetof(struct btrfs_leaf, items[0].key),
2600 sizeof(disk_key)));
2601 else
2602 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2603 offsetof(struct btrfs_node, ptrs[0].key),
2604 sizeof(disk_key)));
2605#endif
2606}
2607
2608static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2609 int level, int *prev_cmp, int *slot)
2610{
2611 if (*prev_cmp != 0) {
2612 *prev_cmp = bin_search(b, key, level, slot);
2613 return *prev_cmp;
2614 }
2615
2616 key_search_validate(b, key, level);
2617 *slot = 0;
2618
2619 return 0;
2620}
2621
2622int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2623 u64 iobjectid, u64 ioff, u8 key_type,
2624 struct btrfs_key *found_key)
2625{
2626 int ret;
2627 struct btrfs_key key;
2628 struct extent_buffer *eb;
2629
2630 ASSERT(path);
2631 ASSERT(found_key);
2632
2633 key.type = key_type;
2634 key.objectid = iobjectid;
2635 key.offset = ioff;
2636
2637 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2638 if (ret < 0)
2639 return ret;
2640
2641 eb = path->nodes[0];
2642 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2643 ret = btrfs_next_leaf(fs_root, path);
2644 if (ret)
2645 return ret;
2646 eb = path->nodes[0];
2647 }
2648
2649 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2650 if (found_key->type != key.type ||
2651 found_key->objectid != key.objectid)
2652 return 1;
2653
2654 return 0;
2655}
2656
2657/*
2658 * look for key in the tree. path is filled in with nodes along the way
2659 * if key is found, we return zero and you can find the item in the leaf
2660 * level of the path (level 0)
2661 *
2662 * If the key isn't found, the path points to the slot where it should
2663 * be inserted, and 1 is returned. If there are other errors during the
2664 * search a negative error number is returned.
2665 *
2666 * if ins_len > 0, nodes and leaves will be split as we walk down the
2667 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2668 * possible)
2669 */
2670int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2671 *root, struct btrfs_key *key, struct btrfs_path *p, int
2672 ins_len, int cow)
2673{
2674 struct extent_buffer *b;
2675 int slot;
2676 int ret;
2677 int err;
2678 int level;
2679 int lowest_unlock = 1;
2680 int root_lock;
2681 /* everything at write_lock_level or lower must be write locked */
2682 int write_lock_level = 0;
2683 u8 lowest_level = 0;
2684 int min_write_lock_level;
2685 int prev_cmp;
2686
2687 lowest_level = p->lowest_level;
2688 WARN_ON(lowest_level && ins_len > 0);
2689 WARN_ON(p->nodes[0] != NULL);
2690 BUG_ON(!cow && ins_len);
2691
2692 if (ins_len < 0) {
2693 lowest_unlock = 2;
2694
2695 /* when we are removing items, we might have to go up to level
2696 * two as we update tree pointers Make sure we keep write
2697 * for those levels as well
2698 */
2699 write_lock_level = 2;
2700 } else if (ins_len > 0) {
2701 /*
2702 * for inserting items, make sure we have a write lock on
2703 * level 1 so we can update keys
2704 */
2705 write_lock_level = 1;
2706 }
2707
2708 if (!cow)
2709 write_lock_level = -1;
2710
2711 if (cow && (p->keep_locks || p->lowest_level))
2712 write_lock_level = BTRFS_MAX_LEVEL;
2713
2714 min_write_lock_level = write_lock_level;
2715
2716again:
2717 prev_cmp = -1;
2718 /*
2719 * we try very hard to do read locks on the root
2720 */
2721 root_lock = BTRFS_READ_LOCK;
2722 level = 0;
2723 if (p->search_commit_root) {
2724 /*
2725 * the commit roots are read only
2726 * so we always do read locks
2727 */
2728 if (p->need_commit_sem)
2729 down_read(&root->fs_info->commit_root_sem);
2730 b = root->commit_root;
2731 extent_buffer_get(b);
2732 level = btrfs_header_level(b);
2733 if (p->need_commit_sem)
2734 up_read(&root->fs_info->commit_root_sem);
2735 if (!p->skip_locking)
2736 btrfs_tree_read_lock(b);
2737 } else {
2738 if (p->skip_locking) {
2739 b = btrfs_root_node(root);
2740 level = btrfs_header_level(b);
2741 } else {
2742 /* we don't know the level of the root node
2743 * until we actually have it read locked
2744 */
2745 b = btrfs_read_lock_root_node(root);
2746 level = btrfs_header_level(b);
2747 if (level <= write_lock_level) {
2748 /* whoops, must trade for write lock */
2749 btrfs_tree_read_unlock(b);
2750 free_extent_buffer(b);
2751 b = btrfs_lock_root_node(root);
2752 root_lock = BTRFS_WRITE_LOCK;
2753
2754 /* the level might have changed, check again */
2755 level = btrfs_header_level(b);
2756 }
2757 }
2758 }
2759 p->nodes[level] = b;
2760 if (!p->skip_locking)
2761 p->locks[level] = root_lock;
2762
2763 while (b) {
2764 level = btrfs_header_level(b);
2765
2766 /*
2767 * setup the path here so we can release it under lock
2768 * contention with the cow code
2769 */
2770 if (cow) {
2771 /*
2772 * if we don't really need to cow this block
2773 * then we don't want to set the path blocking,
2774 * so we test it here
2775 */
2776 if (!should_cow_block(trans, root, b))
2777 goto cow_done;
2778
2779 /*
2780 * must have write locks on this node and the
2781 * parent
2782 */
2783 if (level > write_lock_level ||
2784 (level + 1 > write_lock_level &&
2785 level + 1 < BTRFS_MAX_LEVEL &&
2786 p->nodes[level + 1])) {
2787 write_lock_level = level + 1;
2788 btrfs_release_path(p);
2789 goto again;
2790 }
2791
2792 btrfs_set_path_blocking(p);
2793 err = btrfs_cow_block(trans, root, b,
2794 p->nodes[level + 1],
2795 p->slots[level + 1], &b);
2796 if (err) {
2797 ret = err;
2798 goto done;
2799 }
2800 }
2801cow_done:
2802 p->nodes[level] = b;
2803 btrfs_clear_path_blocking(p, NULL, 0);
2804
2805 /*
2806 * we have a lock on b and as long as we aren't changing
2807 * the tree, there is no way to for the items in b to change.
2808 * It is safe to drop the lock on our parent before we
2809 * go through the expensive btree search on b.
2810 *
2811 * If we're inserting or deleting (ins_len != 0), then we might
2812 * be changing slot zero, which may require changing the parent.
2813 * So, we can't drop the lock until after we know which slot
2814 * we're operating on.
2815 */
2816 if (!ins_len && !p->keep_locks) {
2817 int u = level + 1;
2818
2819 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2820 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2821 p->locks[u] = 0;
2822 }
2823 }
2824
2825 ret = key_search(b, key, level, &prev_cmp, &slot);
2826
2827 if (level != 0) {
2828 int dec = 0;
2829 if (ret && slot > 0) {
2830 dec = 1;
2831 slot -= 1;
2832 }
2833 p->slots[level] = slot;
2834 err = setup_nodes_for_search(trans, root, p, b, level,
2835 ins_len, &write_lock_level);
2836 if (err == -EAGAIN)
2837 goto again;
2838 if (err) {
2839 ret = err;
2840 goto done;
2841 }
2842 b = p->nodes[level];
2843 slot = p->slots[level];
2844
2845 /*
2846 * slot 0 is special, if we change the key
2847 * we have to update the parent pointer
2848 * which means we must have a write lock
2849 * on the parent
2850 */
2851 if (slot == 0 && ins_len &&
2852 write_lock_level < level + 1) {
2853 write_lock_level = level + 1;
2854 btrfs_release_path(p);
2855 goto again;
2856 }
2857
2858 unlock_up(p, level, lowest_unlock,
2859 min_write_lock_level, &write_lock_level);
2860
2861 if (level == lowest_level) {
2862 if (dec)
2863 p->slots[level]++;
2864 goto done;
2865 }
2866
2867 err = read_block_for_search(trans, root, p,
2868 &b, level, slot, key, 0);
2869 if (err == -EAGAIN)
2870 goto again;
2871 if (err) {
2872 ret = err;
2873 goto done;
2874 }
2875
2876 if (!p->skip_locking) {
2877 level = btrfs_header_level(b);
2878 if (level <= write_lock_level) {
2879 err = btrfs_try_tree_write_lock(b);
2880 if (!err) {
2881 btrfs_set_path_blocking(p);
2882 btrfs_tree_lock(b);
2883 btrfs_clear_path_blocking(p, b,
2884 BTRFS_WRITE_LOCK);
2885 }
2886 p->locks[level] = BTRFS_WRITE_LOCK;
2887 } else {
2888 err = btrfs_tree_read_lock_atomic(b);
2889 if (!err) {
2890 btrfs_set_path_blocking(p);
2891 btrfs_tree_read_lock(b);
2892 btrfs_clear_path_blocking(p, b,
2893 BTRFS_READ_LOCK);
2894 }
2895 p->locks[level] = BTRFS_READ_LOCK;
2896 }
2897 p->nodes[level] = b;
2898 }
2899 } else {
2900 p->slots[level] = slot;
2901 if (ins_len > 0 &&
2902 btrfs_leaf_free_space(root, b) < ins_len) {
2903 if (write_lock_level < 1) {
2904 write_lock_level = 1;
2905 btrfs_release_path(p);
2906 goto again;
2907 }
2908
2909 btrfs_set_path_blocking(p);
2910 err = split_leaf(trans, root, key,
2911 p, ins_len, ret == 0);
2912 btrfs_clear_path_blocking(p, NULL, 0);
2913
2914 BUG_ON(err > 0);
2915 if (err) {
2916 ret = err;
2917 goto done;
2918 }
2919 }
2920 if (!p->search_for_split)
2921 unlock_up(p, level, lowest_unlock,
2922 min_write_lock_level, &write_lock_level);
2923 goto done;
2924 }
2925 }
2926 ret = 1;
2927done:
2928 /*
2929 * we don't really know what they plan on doing with the path
2930 * from here on, so for now just mark it as blocking
2931 */
2932 if (!p->leave_spinning)
2933 btrfs_set_path_blocking(p);
2934 if (ret < 0 && !p->skip_release_on_error)
2935 btrfs_release_path(p);
2936 return ret;
2937}
2938
2939/*
2940 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2941 * current state of the tree together with the operations recorded in the tree
2942 * modification log to search for the key in a previous version of this tree, as
2943 * denoted by the time_seq parameter.
2944 *
2945 * Naturally, there is no support for insert, delete or cow operations.
2946 *
2947 * The resulting path and return value will be set up as if we called
2948 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2949 */
2950int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2951 struct btrfs_path *p, u64 time_seq)
2952{
2953 struct extent_buffer *b;
2954 int slot;
2955 int ret;
2956 int err;
2957 int level;
2958 int lowest_unlock = 1;
2959 u8 lowest_level = 0;
2960 int prev_cmp = -1;
2961
2962 lowest_level = p->lowest_level;
2963 WARN_ON(p->nodes[0] != NULL);
2964
2965 if (p->search_commit_root) {
2966 BUG_ON(time_seq);
2967 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2968 }
2969
2970again:
2971 b = get_old_root(root, time_seq);
2972 level = btrfs_header_level(b);
2973 p->locks[level] = BTRFS_READ_LOCK;
2974
2975 while (b) {
2976 level = btrfs_header_level(b);
2977 p->nodes[level] = b;
2978 btrfs_clear_path_blocking(p, NULL, 0);
2979
2980 /*
2981 * we have a lock on b and as long as we aren't changing
2982 * the tree, there is no way to for the items in b to change.
2983 * It is safe to drop the lock on our parent before we
2984 * go through the expensive btree search on b.
2985 */
2986 btrfs_unlock_up_safe(p, level + 1);
2987
2988 /*
2989 * Since we can unwind eb's we want to do a real search every
2990 * time.
2991 */
2992 prev_cmp = -1;
2993 ret = key_search(b, key, level, &prev_cmp, &slot);
2994
2995 if (level != 0) {
2996 int dec = 0;
2997 if (ret && slot > 0) {
2998 dec = 1;
2999 slot -= 1;
3000 }
3001 p->slots[level] = slot;
3002 unlock_up(p, level, lowest_unlock, 0, NULL);
3003
3004 if (level == lowest_level) {
3005 if (dec)
3006 p->slots[level]++;
3007 goto done;
3008 }
3009
3010 err = read_block_for_search(NULL, root, p, &b, level,
3011 slot, key, time_seq);
3012 if (err == -EAGAIN)
3013 goto again;
3014 if (err) {
3015 ret = err;
3016 goto done;
3017 }
3018
3019 level = btrfs_header_level(b);
3020 err = btrfs_tree_read_lock_atomic(b);
3021 if (!err) {
3022 btrfs_set_path_blocking(p);
3023 btrfs_tree_read_lock(b);
3024 btrfs_clear_path_blocking(p, b,
3025 BTRFS_READ_LOCK);
3026 }
3027 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3028 if (!b) {
3029 ret = -ENOMEM;
3030 goto done;
3031 }
3032 p->locks[level] = BTRFS_READ_LOCK;
3033 p->nodes[level] = b;
3034 } else {
3035 p->slots[level] = slot;
3036 unlock_up(p, level, lowest_unlock, 0, NULL);
3037 goto done;
3038 }
3039 }
3040 ret = 1;
3041done:
3042 if (!p->leave_spinning)
3043 btrfs_set_path_blocking(p);
3044 if (ret < 0)
3045 btrfs_release_path(p);
3046
3047 return ret;
3048}
3049
3050/*
3051 * helper to use instead of search slot if no exact match is needed but
3052 * instead the next or previous item should be returned.
3053 * When find_higher is true, the next higher item is returned, the next lower
3054 * otherwise.
3055 * When return_any and find_higher are both true, and no higher item is found,
3056 * return the next lower instead.
3057 * When return_any is true and find_higher is false, and no lower item is found,
3058 * return the next higher instead.
3059 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3060 * < 0 on error
3061 */
3062int btrfs_search_slot_for_read(struct btrfs_root *root,
3063 struct btrfs_key *key, struct btrfs_path *p,
3064 int find_higher, int return_any)
3065{
3066 int ret;
3067 struct extent_buffer *leaf;
3068
3069again:
3070 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3071 if (ret <= 0)
3072 return ret;
3073 /*
3074 * a return value of 1 means the path is at the position where the
3075 * item should be inserted. Normally this is the next bigger item,
3076 * but in case the previous item is the last in a leaf, path points
3077 * to the first free slot in the previous leaf, i.e. at an invalid
3078 * item.
3079 */
3080 leaf = p->nodes[0];
3081
3082 if (find_higher) {
3083 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3084 ret = btrfs_next_leaf(root, p);
3085 if (ret <= 0)
3086 return ret;
3087 if (!return_any)
3088 return 1;
3089 /*
3090 * no higher item found, return the next
3091 * lower instead
3092 */
3093 return_any = 0;
3094 find_higher = 0;
3095 btrfs_release_path(p);
3096 goto again;
3097 }
3098 } else {
3099 if (p->slots[0] == 0) {
3100 ret = btrfs_prev_leaf(root, p);
3101 if (ret < 0)
3102 return ret;
3103 if (!ret) {
3104 leaf = p->nodes[0];
3105 if (p->slots[0] == btrfs_header_nritems(leaf))
3106 p->slots[0]--;
3107 return 0;
3108 }
3109 if (!return_any)
3110 return 1;
3111 /*
3112 * no lower item found, return the next
3113 * higher instead
3114 */
3115 return_any = 0;
3116 find_higher = 1;
3117 btrfs_release_path(p);
3118 goto again;
3119 } else {
3120 --p->slots[0];
3121 }
3122 }
3123 return 0;
3124}
3125
3126/*
3127 * adjust the pointers going up the tree, starting at level
3128 * making sure the right key of each node is points to 'key'.
3129 * This is used after shifting pointers to the left, so it stops
3130 * fixing up pointers when a given leaf/node is not in slot 0 of the
3131 * higher levels
3132 *
3133 */
3134static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3135 struct btrfs_path *path,
3136 struct btrfs_disk_key *key, int level)
3137{
3138 int i;
3139 struct extent_buffer *t;
3140
3141 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3142 int tslot = path->slots[i];
3143 if (!path->nodes[i])
3144 break;
3145 t = path->nodes[i];
3146 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3147 btrfs_set_node_key(t, key, tslot);
3148 btrfs_mark_buffer_dirty(path->nodes[i]);
3149 if (tslot != 0)
3150 break;
3151 }
3152}
3153
3154/*
3155 * update item key.
3156 *
3157 * This function isn't completely safe. It's the caller's responsibility
3158 * that the new key won't break the order
3159 */
3160void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3161 struct btrfs_path *path,
3162 struct btrfs_key *new_key)
3163{
3164 struct btrfs_disk_key disk_key;
3165 struct extent_buffer *eb;
3166 int slot;
3167
3168 eb = path->nodes[0];
3169 slot = path->slots[0];
3170 if (slot > 0) {
3171 btrfs_item_key(eb, &disk_key, slot - 1);
3172 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3173 }
3174 if (slot < btrfs_header_nritems(eb) - 1) {
3175 btrfs_item_key(eb, &disk_key, slot + 1);
3176 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3177 }
3178
3179 btrfs_cpu_key_to_disk(&disk_key, new_key);
3180 btrfs_set_item_key(eb, &disk_key, slot);
3181 btrfs_mark_buffer_dirty(eb);
3182 if (slot == 0)
3183 fixup_low_keys(fs_info, path, &disk_key, 1);
3184}
3185
3186/*
3187 * try to push data from one node into the next node left in the
3188 * tree.
3189 *
3190 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3191 * error, and > 0 if there was no room in the left hand block.
3192 */
3193static int push_node_left(struct btrfs_trans_handle *trans,
3194 struct btrfs_root *root, struct extent_buffer *dst,
3195 struct extent_buffer *src, int empty)
3196{
3197 int push_items = 0;
3198 int src_nritems;
3199 int dst_nritems;
3200 int ret = 0;
3201
3202 src_nritems = btrfs_header_nritems(src);
3203 dst_nritems = btrfs_header_nritems(dst);
3204 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3205 WARN_ON(btrfs_header_generation(src) != trans->transid);
3206 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3207
3208 if (!empty && src_nritems <= 8)
3209 return 1;
3210
3211 if (push_items <= 0)
3212 return 1;
3213
3214 if (empty) {
3215 push_items = min(src_nritems, push_items);
3216 if (push_items < src_nritems) {
3217 /* leave at least 8 pointers in the node if
3218 * we aren't going to empty it
3219 */
3220 if (src_nritems - push_items < 8) {
3221 if (push_items <= 8)
3222 return 1;
3223 push_items -= 8;
3224 }
3225 }
3226 } else
3227 push_items = min(src_nritems - 8, push_items);
3228
3229 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3230 push_items);
3231 if (ret) {
3232 btrfs_abort_transaction(trans, root, ret);
3233 return ret;
3234 }
3235 copy_extent_buffer(dst, src,
3236 btrfs_node_key_ptr_offset(dst_nritems),
3237 btrfs_node_key_ptr_offset(0),
3238 push_items * sizeof(struct btrfs_key_ptr));
3239
3240 if (push_items < src_nritems) {
3241 /*
3242 * don't call tree_mod_log_eb_move here, key removal was already
3243 * fully logged by tree_mod_log_eb_copy above.
3244 */
3245 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3246 btrfs_node_key_ptr_offset(push_items),
3247 (src_nritems - push_items) *
3248 sizeof(struct btrfs_key_ptr));
3249 }
3250 btrfs_set_header_nritems(src, src_nritems - push_items);
3251 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3252 btrfs_mark_buffer_dirty(src);
3253 btrfs_mark_buffer_dirty(dst);
3254
3255 return ret;
3256}
3257
3258/*
3259 * try to push data from one node into the next node right in the
3260 * tree.
3261 *
3262 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3263 * error, and > 0 if there was no room in the right hand block.
3264 *
3265 * this will only push up to 1/2 the contents of the left node over
3266 */
3267static int balance_node_right(struct btrfs_trans_handle *trans,
3268 struct btrfs_root *root,
3269 struct extent_buffer *dst,
3270 struct extent_buffer *src)
3271{
3272 int push_items = 0;
3273 int max_push;
3274 int src_nritems;
3275 int dst_nritems;
3276 int ret = 0;
3277
3278 WARN_ON(btrfs_header_generation(src) != trans->transid);
3279 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3280
3281 src_nritems = btrfs_header_nritems(src);
3282 dst_nritems = btrfs_header_nritems(dst);
3283 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3284 if (push_items <= 0)
3285 return 1;
3286
3287 if (src_nritems < 4)
3288 return 1;
3289
3290 max_push = src_nritems / 2 + 1;
3291 /* don't try to empty the node */
3292 if (max_push >= src_nritems)
3293 return 1;
3294
3295 if (max_push < push_items)
3296 push_items = max_push;
3297
3298 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3299 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3300 btrfs_node_key_ptr_offset(0),
3301 (dst_nritems) *
3302 sizeof(struct btrfs_key_ptr));
3303
3304 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3305 src_nritems - push_items, push_items);
3306 if (ret) {
3307 btrfs_abort_transaction(trans, root, ret);
3308 return ret;
3309 }
3310 copy_extent_buffer(dst, src,
3311 btrfs_node_key_ptr_offset(0),
3312 btrfs_node_key_ptr_offset(src_nritems - push_items),
3313 push_items * sizeof(struct btrfs_key_ptr));
3314
3315 btrfs_set_header_nritems(src, src_nritems - push_items);
3316 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3317
3318 btrfs_mark_buffer_dirty(src);
3319 btrfs_mark_buffer_dirty(dst);
3320
3321 return ret;
3322}
3323
3324/*
3325 * helper function to insert a new root level in the tree.
3326 * A new node is allocated, and a single item is inserted to
3327 * point to the existing root
3328 *
3329 * returns zero on success or < 0 on failure.
3330 */
3331static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3332 struct btrfs_root *root,
3333 struct btrfs_path *path, int level)
3334{
3335 u64 lower_gen;
3336 struct extent_buffer *lower;
3337 struct extent_buffer *c;
3338 struct extent_buffer *old;
3339 struct btrfs_disk_key lower_key;
3340
3341 BUG_ON(path->nodes[level]);
3342 BUG_ON(path->nodes[level-1] != root->node);
3343
3344 lower = path->nodes[level-1];
3345 if (level == 1)
3346 btrfs_item_key(lower, &lower_key, 0);
3347 else
3348 btrfs_node_key(lower, &lower_key, 0);
3349
3350 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3351 &lower_key, level, root->node->start, 0);
3352 if (IS_ERR(c))
3353 return PTR_ERR(c);
3354
3355 root_add_used(root, root->nodesize);
3356
3357 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3358 btrfs_set_header_nritems(c, 1);
3359 btrfs_set_header_level(c, level);
3360 btrfs_set_header_bytenr(c, c->start);
3361 btrfs_set_header_generation(c, trans->transid);
3362 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3363 btrfs_set_header_owner(c, root->root_key.objectid);
3364
3365 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3366 BTRFS_FSID_SIZE);
3367
3368 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3369 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3370
3371 btrfs_set_node_key(c, &lower_key, 0);
3372 btrfs_set_node_blockptr(c, 0, lower->start);
3373 lower_gen = btrfs_header_generation(lower);
3374 WARN_ON(lower_gen != trans->transid);
3375
3376 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3377
3378 btrfs_mark_buffer_dirty(c);
3379
3380 old = root->node;
3381 tree_mod_log_set_root_pointer(root, c, 0);
3382 rcu_assign_pointer(root->node, c);
3383
3384 /* the super has an extra ref to root->node */
3385 free_extent_buffer(old);
3386
3387 add_root_to_dirty_list(root);
3388 extent_buffer_get(c);
3389 path->nodes[level] = c;
3390 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3391 path->slots[level] = 0;
3392 return 0;
3393}
3394
3395/*
3396 * worker function to insert a single pointer in a node.
3397 * the node should have enough room for the pointer already
3398 *
3399 * slot and level indicate where you want the key to go, and
3400 * blocknr is the block the key points to.
3401 */
3402static void insert_ptr(struct btrfs_trans_handle *trans,
3403 struct btrfs_root *root, struct btrfs_path *path,
3404 struct btrfs_disk_key *key, u64 bytenr,
3405 int slot, int level)
3406{
3407 struct extent_buffer *lower;
3408 int nritems;
3409 int ret;
3410
3411 BUG_ON(!path->nodes[level]);
3412 btrfs_assert_tree_locked(path->nodes[level]);
3413 lower = path->nodes[level];
3414 nritems = btrfs_header_nritems(lower);
3415 BUG_ON(slot > nritems);
3416 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3417 if (slot != nritems) {
3418 if (level)
3419 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3420 slot, nritems - slot);
3421 memmove_extent_buffer(lower,
3422 btrfs_node_key_ptr_offset(slot + 1),
3423 btrfs_node_key_ptr_offset(slot),
3424 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3425 }
3426 if (level) {
3427 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3428 MOD_LOG_KEY_ADD, GFP_NOFS);
3429 BUG_ON(ret < 0);
3430 }
3431 btrfs_set_node_key(lower, key, slot);
3432 btrfs_set_node_blockptr(lower, slot, bytenr);
3433 WARN_ON(trans->transid == 0);
3434 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3435 btrfs_set_header_nritems(lower, nritems + 1);
3436 btrfs_mark_buffer_dirty(lower);
3437}
3438
3439/*
3440 * split the node at the specified level in path in two.
3441 * The path is corrected to point to the appropriate node after the split
3442 *
3443 * Before splitting this tries to make some room in the node by pushing
3444 * left and right, if either one works, it returns right away.
3445 *
3446 * returns 0 on success and < 0 on failure
3447 */
3448static noinline int split_node(struct btrfs_trans_handle *trans,
3449 struct btrfs_root *root,
3450 struct btrfs_path *path, int level)
3451{
3452 struct extent_buffer *c;
3453 struct extent_buffer *split;
3454 struct btrfs_disk_key disk_key;
3455 int mid;
3456 int ret;
3457 u32 c_nritems;
3458
3459 c = path->nodes[level];
3460 WARN_ON(btrfs_header_generation(c) != trans->transid);
3461 if (c == root->node) {
3462 /*
3463 * trying to split the root, lets make a new one
3464 *
3465 * tree mod log: We don't log_removal old root in
3466 * insert_new_root, because that root buffer will be kept as a
3467 * normal node. We are going to log removal of half of the
3468 * elements below with tree_mod_log_eb_copy. We're holding a
3469 * tree lock on the buffer, which is why we cannot race with
3470 * other tree_mod_log users.
3471 */
3472 ret = insert_new_root(trans, root, path, level + 1);
3473 if (ret)
3474 return ret;
3475 } else {
3476 ret = push_nodes_for_insert(trans, root, path, level);
3477 c = path->nodes[level];
3478 if (!ret && btrfs_header_nritems(c) <
3479 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3480 return 0;
3481 if (ret < 0)
3482 return ret;
3483 }
3484
3485 c_nritems = btrfs_header_nritems(c);
3486 mid = (c_nritems + 1) / 2;
3487 btrfs_node_key(c, &disk_key, mid);
3488
3489 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3490 &disk_key, level, c->start, 0);
3491 if (IS_ERR(split))
3492 return PTR_ERR(split);
3493
3494 root_add_used(root, root->nodesize);
3495
3496 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3497 btrfs_set_header_level(split, btrfs_header_level(c));
3498 btrfs_set_header_bytenr(split, split->start);
3499 btrfs_set_header_generation(split, trans->transid);
3500 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3501 btrfs_set_header_owner(split, root->root_key.objectid);
3502 write_extent_buffer(split, root->fs_info->fsid,
3503 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3504 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3505 btrfs_header_chunk_tree_uuid(split),
3506 BTRFS_UUID_SIZE);
3507
3508 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3509 mid, c_nritems - mid);
3510 if (ret) {
3511 btrfs_abort_transaction(trans, root, ret);
3512 return ret;
3513 }
3514 copy_extent_buffer(split, c,
3515 btrfs_node_key_ptr_offset(0),
3516 btrfs_node_key_ptr_offset(mid),
3517 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3518 btrfs_set_header_nritems(split, c_nritems - mid);
3519 btrfs_set_header_nritems(c, mid);
3520 ret = 0;
3521
3522 btrfs_mark_buffer_dirty(c);
3523 btrfs_mark_buffer_dirty(split);
3524
3525 insert_ptr(trans, root, path, &disk_key, split->start,
3526 path->slots[level + 1] + 1, level + 1);
3527
3528 if (path->slots[level] >= mid) {
3529 path->slots[level] -= mid;
3530 btrfs_tree_unlock(c);
3531 free_extent_buffer(c);
3532 path->nodes[level] = split;
3533 path->slots[level + 1] += 1;
3534 } else {
3535 btrfs_tree_unlock(split);
3536 free_extent_buffer(split);
3537 }
3538 return ret;
3539}
3540
3541/*
3542 * how many bytes are required to store the items in a leaf. start
3543 * and nr indicate which items in the leaf to check. This totals up the
3544 * space used both by the item structs and the item data
3545 */
3546static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3547{
3548 struct btrfs_item *start_item;
3549 struct btrfs_item *end_item;
3550 struct btrfs_map_token token;
3551 int data_len;
3552 int nritems = btrfs_header_nritems(l);
3553 int end = min(nritems, start + nr) - 1;
3554
3555 if (!nr)
3556 return 0;
3557 btrfs_init_map_token(&token);
3558 start_item = btrfs_item_nr(start);
3559 end_item = btrfs_item_nr(end);
3560 data_len = btrfs_token_item_offset(l, start_item, &token) +
3561 btrfs_token_item_size(l, start_item, &token);
3562 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3563 data_len += sizeof(struct btrfs_item) * nr;
3564 WARN_ON(data_len < 0);
3565 return data_len;
3566}
3567
3568/*
3569 * The space between the end of the leaf items and
3570 * the start of the leaf data. IOW, how much room
3571 * the leaf has left for both items and data
3572 */
3573noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3574 struct extent_buffer *leaf)
3575{
3576 int nritems = btrfs_header_nritems(leaf);
3577 int ret;
3578 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3579 if (ret < 0) {
3580 btrfs_crit(root->fs_info,
3581 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3582 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3583 leaf_space_used(leaf, 0, nritems), nritems);
3584 }
3585 return ret;
3586}
3587
3588/*
3589 * min slot controls the lowest index we're willing to push to the
3590 * right. We'll push up to and including min_slot, but no lower
3591 */
3592static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3593 struct btrfs_root *root,
3594 struct btrfs_path *path,
3595 int data_size, int empty,
3596 struct extent_buffer *right,
3597 int free_space, u32 left_nritems,
3598 u32 min_slot)
3599{
3600 struct extent_buffer *left = path->nodes[0];
3601 struct extent_buffer *upper = path->nodes[1];
3602 struct btrfs_map_token token;
3603 struct btrfs_disk_key disk_key;
3604 int slot;
3605 u32 i;
3606 int push_space = 0;
3607 int push_items = 0;
3608 struct btrfs_item *item;
3609 u32 nr;
3610 u32 right_nritems;
3611 u32 data_end;
3612 u32 this_item_size;
3613
3614 btrfs_init_map_token(&token);
3615
3616 if (empty)
3617 nr = 0;
3618 else
3619 nr = max_t(u32, 1, min_slot);
3620
3621 if (path->slots[0] >= left_nritems)
3622 push_space += data_size;
3623
3624 slot = path->slots[1];
3625 i = left_nritems - 1;
3626 while (i >= nr) {
3627 item = btrfs_item_nr(i);
3628
3629 if (!empty && push_items > 0) {
3630 if (path->slots[0] > i)
3631 break;
3632 if (path->slots[0] == i) {
3633 int space = btrfs_leaf_free_space(root, left);
3634 if (space + push_space * 2 > free_space)
3635 break;
3636 }
3637 }
3638
3639 if (path->slots[0] == i)
3640 push_space += data_size;
3641
3642 this_item_size = btrfs_item_size(left, item);
3643 if (this_item_size + sizeof(*item) + push_space > free_space)
3644 break;
3645
3646 push_items++;
3647 push_space += this_item_size + sizeof(*item);
3648 if (i == 0)
3649 break;
3650 i--;
3651 }
3652
3653 if (push_items == 0)
3654 goto out_unlock;
3655
3656 WARN_ON(!empty && push_items == left_nritems);
3657
3658 /* push left to right */
3659 right_nritems = btrfs_header_nritems(right);
3660
3661 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3662 push_space -= leaf_data_end(root, left);
3663
3664 /* make room in the right data area */
3665 data_end = leaf_data_end(root, right);
3666 memmove_extent_buffer(right,
3667 btrfs_leaf_data(right) + data_end - push_space,
3668 btrfs_leaf_data(right) + data_end,
3669 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3670
3671 /* copy from the left data area */
3672 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3673 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3674 btrfs_leaf_data(left) + leaf_data_end(root, left),
3675 push_space);
3676
3677 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3678 btrfs_item_nr_offset(0),
3679 right_nritems * sizeof(struct btrfs_item));
3680
3681 /* copy the items from left to right */
3682 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3683 btrfs_item_nr_offset(left_nritems - push_items),
3684 push_items * sizeof(struct btrfs_item));
3685
3686 /* update the item pointers */
3687 right_nritems += push_items;
3688 btrfs_set_header_nritems(right, right_nritems);
3689 push_space = BTRFS_LEAF_DATA_SIZE(root);
3690 for (i = 0; i < right_nritems; i++) {
3691 item = btrfs_item_nr(i);
3692 push_space -= btrfs_token_item_size(right, item, &token);
3693 btrfs_set_token_item_offset(right, item, push_space, &token);
3694 }
3695
3696 left_nritems -= push_items;
3697 btrfs_set_header_nritems(left, left_nritems);
3698
3699 if (left_nritems)
3700 btrfs_mark_buffer_dirty(left);
3701 else
3702 clean_tree_block(trans, root->fs_info, left);
3703
3704 btrfs_mark_buffer_dirty(right);
3705
3706 btrfs_item_key(right, &disk_key, 0);
3707 btrfs_set_node_key(upper, &disk_key, slot + 1);
3708 btrfs_mark_buffer_dirty(upper);
3709
3710 /* then fixup the leaf pointer in the path */
3711 if (path->slots[0] >= left_nritems) {
3712 path->slots[0] -= left_nritems;
3713 if (btrfs_header_nritems(path->nodes[0]) == 0)
3714 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3715 btrfs_tree_unlock(path->nodes[0]);
3716 free_extent_buffer(path->nodes[0]);
3717 path->nodes[0] = right;
3718 path->slots[1] += 1;
3719 } else {
3720 btrfs_tree_unlock(right);
3721 free_extent_buffer(right);
3722 }
3723 return 0;
3724
3725out_unlock:
3726 btrfs_tree_unlock(right);
3727 free_extent_buffer(right);
3728 return 1;
3729}
3730
3731/*
3732 * push some data in the path leaf to the right, trying to free up at
3733 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3734 *
3735 * returns 1 if the push failed because the other node didn't have enough
3736 * room, 0 if everything worked out and < 0 if there were major errors.
3737 *
3738 * this will push starting from min_slot to the end of the leaf. It won't
3739 * push any slot lower than min_slot
3740 */
3741static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3742 *root, struct btrfs_path *path,
3743 int min_data_size, int data_size,
3744 int empty, u32 min_slot)
3745{
3746 struct extent_buffer *left = path->nodes[0];
3747 struct extent_buffer *right;
3748 struct extent_buffer *upper;
3749 int slot;
3750 int free_space;
3751 u32 left_nritems;
3752 int ret;
3753
3754 if (!path->nodes[1])
3755 return 1;
3756
3757 slot = path->slots[1];
3758 upper = path->nodes[1];
3759 if (slot >= btrfs_header_nritems(upper) - 1)
3760 return 1;
3761
3762 btrfs_assert_tree_locked(path->nodes[1]);
3763
3764 right = read_node_slot(root, upper, slot + 1);
3765 if (right == NULL)
3766 return 1;
3767
3768 btrfs_tree_lock(right);
3769 btrfs_set_lock_blocking(right);
3770
3771 free_space = btrfs_leaf_free_space(root, right);
3772 if (free_space < data_size)
3773 goto out_unlock;
3774
3775 /* cow and double check */
3776 ret = btrfs_cow_block(trans, root, right, upper,
3777 slot + 1, &right);
3778 if (ret)
3779 goto out_unlock;
3780
3781 free_space = btrfs_leaf_free_space(root, right);
3782 if (free_space < data_size)
3783 goto out_unlock;
3784
3785 left_nritems = btrfs_header_nritems(left);
3786 if (left_nritems == 0)
3787 goto out_unlock;
3788
3789 if (path->slots[0] == left_nritems && !empty) {
3790 /* Key greater than all keys in the leaf, right neighbor has
3791 * enough room for it and we're not emptying our leaf to delete
3792 * it, therefore use right neighbor to insert the new item and
3793 * no need to touch/dirty our left leaft. */
3794 btrfs_tree_unlock(left);
3795 free_extent_buffer(left);
3796 path->nodes[0] = right;
3797 path->slots[0] = 0;
3798 path->slots[1]++;
3799 return 0;
3800 }
3801
3802 return __push_leaf_right(trans, root, path, min_data_size, empty,
3803 right, free_space, left_nritems, min_slot);
3804out_unlock:
3805 btrfs_tree_unlock(right);
3806 free_extent_buffer(right);
3807 return 1;
3808}
3809
3810/*
3811 * push some data in the path leaf to the left, trying to free up at
3812 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3813 *
3814 * max_slot can put a limit on how far into the leaf we'll push items. The
3815 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3816 * items
3817 */
3818static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root,
3820 struct btrfs_path *path, int data_size,
3821 int empty, struct extent_buffer *left,
3822 int free_space, u32 right_nritems,
3823 u32 max_slot)
3824{
3825 struct btrfs_disk_key disk_key;
3826 struct extent_buffer *right = path->nodes[0];
3827 int i;
3828 int push_space = 0;
3829 int push_items = 0;
3830 struct btrfs_item *item;
3831 u32 old_left_nritems;
3832 u32 nr;
3833 int ret = 0;
3834 u32 this_item_size;
3835 u32 old_left_item_size;
3836 struct btrfs_map_token token;
3837
3838 btrfs_init_map_token(&token);
3839
3840 if (empty)
3841 nr = min(right_nritems, max_slot);
3842 else
3843 nr = min(right_nritems - 1, max_slot);
3844
3845 for (i = 0; i < nr; i++) {
3846 item = btrfs_item_nr(i);
3847
3848 if (!empty && push_items > 0) {
3849 if (path->slots[0] < i)
3850 break;
3851 if (path->slots[0] == i) {
3852 int space = btrfs_leaf_free_space(root, right);
3853 if (space + push_space * 2 > free_space)
3854 break;
3855 }
3856 }
3857
3858 if (path->slots[0] == i)
3859 push_space += data_size;
3860
3861 this_item_size = btrfs_item_size(right, item);
3862 if (this_item_size + sizeof(*item) + push_space > free_space)
3863 break;
3864
3865 push_items++;
3866 push_space += this_item_size + sizeof(*item);
3867 }
3868
3869 if (push_items == 0) {
3870 ret = 1;
3871 goto out;
3872 }
3873 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3874
3875 /* push data from right to left */
3876 copy_extent_buffer(left, right,
3877 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3878 btrfs_item_nr_offset(0),
3879 push_items * sizeof(struct btrfs_item));
3880
3881 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3882 btrfs_item_offset_nr(right, push_items - 1);
3883
3884 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3885 leaf_data_end(root, left) - push_space,
3886 btrfs_leaf_data(right) +
3887 btrfs_item_offset_nr(right, push_items - 1),
3888 push_space);
3889 old_left_nritems = btrfs_header_nritems(left);
3890 BUG_ON(old_left_nritems <= 0);
3891
3892 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3893 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3894 u32 ioff;
3895
3896 item = btrfs_item_nr(i);
3897
3898 ioff = btrfs_token_item_offset(left, item, &token);
3899 btrfs_set_token_item_offset(left, item,
3900 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3901 &token);
3902 }
3903 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3904
3905 /* fixup right node */
3906 if (push_items > right_nritems)
3907 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3908 right_nritems);
3909
3910 if (push_items < right_nritems) {
3911 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3912 leaf_data_end(root, right);
3913 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3914 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3915 btrfs_leaf_data(right) +
3916 leaf_data_end(root, right), push_space);
3917
3918 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3919 btrfs_item_nr_offset(push_items),
3920 (btrfs_header_nritems(right) - push_items) *
3921 sizeof(struct btrfs_item));
3922 }
3923 right_nritems -= push_items;
3924 btrfs_set_header_nritems(right, right_nritems);
3925 push_space = BTRFS_LEAF_DATA_SIZE(root);
3926 for (i = 0; i < right_nritems; i++) {
3927 item = btrfs_item_nr(i);
3928
3929 push_space = push_space - btrfs_token_item_size(right,
3930 item, &token);
3931 btrfs_set_token_item_offset(right, item, push_space, &token);
3932 }
3933
3934 btrfs_mark_buffer_dirty(left);
3935 if (right_nritems)
3936 btrfs_mark_buffer_dirty(right);
3937 else
3938 clean_tree_block(trans, root->fs_info, right);
3939
3940 btrfs_item_key(right, &disk_key, 0);
3941 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3942
3943 /* then fixup the leaf pointer in the path */
3944 if (path->slots[0] < push_items) {
3945 path->slots[0] += old_left_nritems;
3946 btrfs_tree_unlock(path->nodes[0]);
3947 free_extent_buffer(path->nodes[0]);
3948 path->nodes[0] = left;
3949 path->slots[1] -= 1;
3950 } else {
3951 btrfs_tree_unlock(left);
3952 free_extent_buffer(left);
3953 path->slots[0] -= push_items;
3954 }
3955 BUG_ON(path->slots[0] < 0);
3956 return ret;
3957out:
3958 btrfs_tree_unlock(left);
3959 free_extent_buffer(left);
3960 return ret;
3961}
3962
3963/*
3964 * push some data in the path leaf to the left, trying to free up at
3965 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3966 *
3967 * max_slot can put a limit on how far into the leaf we'll push items. The
3968 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3969 * items
3970 */
3971static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3972 *root, struct btrfs_path *path, int min_data_size,
3973 int data_size, int empty, u32 max_slot)
3974{
3975 struct extent_buffer *right = path->nodes[0];
3976 struct extent_buffer *left;
3977 int slot;
3978 int free_space;
3979 u32 right_nritems;
3980 int ret = 0;
3981
3982 slot = path->slots[1];
3983 if (slot == 0)
3984 return 1;
3985 if (!path->nodes[1])
3986 return 1;
3987
3988 right_nritems = btrfs_header_nritems(right);
3989 if (right_nritems == 0)
3990 return 1;
3991
3992 btrfs_assert_tree_locked(path->nodes[1]);
3993
3994 left = read_node_slot(root, path->nodes[1], slot - 1);
3995 if (left == NULL)
3996 return 1;
3997
3998 btrfs_tree_lock(left);
3999 btrfs_set_lock_blocking(left);
4000
4001 free_space = btrfs_leaf_free_space(root, left);
4002 if (free_space < data_size) {
4003 ret = 1;
4004 goto out;
4005 }
4006
4007 /* cow and double check */
4008 ret = btrfs_cow_block(trans, root, left,
4009 path->nodes[1], slot - 1, &left);
4010 if (ret) {
4011 /* we hit -ENOSPC, but it isn't fatal here */
4012 if (ret == -ENOSPC)
4013 ret = 1;
4014 goto out;
4015 }
4016
4017 free_space = btrfs_leaf_free_space(root, left);
4018 if (free_space < data_size) {
4019 ret = 1;
4020 goto out;
4021 }
4022
4023 return __push_leaf_left(trans, root, path, min_data_size,
4024 empty, left, free_space, right_nritems,
4025 max_slot);
4026out:
4027 btrfs_tree_unlock(left);
4028 free_extent_buffer(left);
4029 return ret;
4030}
4031
4032/*
4033 * split the path's leaf in two, making sure there is at least data_size
4034 * available for the resulting leaf level of the path.
4035 */
4036static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4037 struct btrfs_root *root,
4038 struct btrfs_path *path,
4039 struct extent_buffer *l,
4040 struct extent_buffer *right,
4041 int slot, int mid, int nritems)
4042{
4043 int data_copy_size;
4044 int rt_data_off;
4045 int i;
4046 struct btrfs_disk_key disk_key;
4047 struct btrfs_map_token token;
4048
4049 btrfs_init_map_token(&token);
4050
4051 nritems = nritems - mid;
4052 btrfs_set_header_nritems(right, nritems);
4053 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4054
4055 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4056 btrfs_item_nr_offset(mid),
4057 nritems * sizeof(struct btrfs_item));
4058
4059 copy_extent_buffer(right, l,
4060 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4061 data_copy_size, btrfs_leaf_data(l) +
4062 leaf_data_end(root, l), data_copy_size);
4063
4064 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4065 btrfs_item_end_nr(l, mid);
4066
4067 for (i = 0; i < nritems; i++) {
4068 struct btrfs_item *item = btrfs_item_nr(i);
4069 u32 ioff;
4070
4071 ioff = btrfs_token_item_offset(right, item, &token);
4072 btrfs_set_token_item_offset(right, item,
4073 ioff + rt_data_off, &token);
4074 }
4075
4076 btrfs_set_header_nritems(l, mid);
4077 btrfs_item_key(right, &disk_key, 0);
4078 insert_ptr(trans, root, path, &disk_key, right->start,
4079 path->slots[1] + 1, 1);
4080
4081 btrfs_mark_buffer_dirty(right);
4082 btrfs_mark_buffer_dirty(l);
4083 BUG_ON(path->slots[0] != slot);
4084
4085 if (mid <= slot) {
4086 btrfs_tree_unlock(path->nodes[0]);
4087 free_extent_buffer(path->nodes[0]);
4088 path->nodes[0] = right;
4089 path->slots[0] -= mid;
4090 path->slots[1] += 1;
4091 } else {
4092 btrfs_tree_unlock(right);
4093 free_extent_buffer(right);
4094 }
4095
4096 BUG_ON(path->slots[0] < 0);
4097}
4098
4099/*
4100 * double splits happen when we need to insert a big item in the middle
4101 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4102 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4103 * A B C
4104 *
4105 * We avoid this by trying to push the items on either side of our target
4106 * into the adjacent leaves. If all goes well we can avoid the double split
4107 * completely.
4108 */
4109static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4110 struct btrfs_root *root,
4111 struct btrfs_path *path,
4112 int data_size)
4113{
4114 int ret;
4115 int progress = 0;
4116 int slot;
4117 u32 nritems;
4118 int space_needed = data_size;
4119
4120 slot = path->slots[0];
4121 if (slot < btrfs_header_nritems(path->nodes[0]))
4122 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4123
4124 /*
4125 * try to push all the items after our slot into the
4126 * right leaf
4127 */
4128 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4129 if (ret < 0)
4130 return ret;
4131
4132 if (ret == 0)
4133 progress++;
4134
4135 nritems = btrfs_header_nritems(path->nodes[0]);
4136 /*
4137 * our goal is to get our slot at the start or end of a leaf. If
4138 * we've done so we're done
4139 */
4140 if (path->slots[0] == 0 || path->slots[0] == nritems)
4141 return 0;
4142
4143 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4144 return 0;
4145
4146 /* try to push all the items before our slot into the next leaf */
4147 slot = path->slots[0];
4148 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4149 if (ret < 0)
4150 return ret;
4151
4152 if (ret == 0)
4153 progress++;
4154
4155 if (progress)
4156 return 0;
4157 return 1;
4158}
4159
4160/*
4161 * split the path's leaf in two, making sure there is at least data_size
4162 * available for the resulting leaf level of the path.
4163 *
4164 * returns 0 if all went well and < 0 on failure.
4165 */
4166static noinline int split_leaf(struct btrfs_trans_handle *trans,
4167 struct btrfs_root *root,
4168 struct btrfs_key *ins_key,
4169 struct btrfs_path *path, int data_size,
4170 int extend)
4171{
4172 struct btrfs_disk_key disk_key;
4173 struct extent_buffer *l;
4174 u32 nritems;
4175 int mid;
4176 int slot;
4177 struct extent_buffer *right;
4178 struct btrfs_fs_info *fs_info = root->fs_info;
4179 int ret = 0;
4180 int wret;
4181 int split;
4182 int num_doubles = 0;
4183 int tried_avoid_double = 0;
4184
4185 l = path->nodes[0];
4186 slot = path->slots[0];
4187 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4188 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4189 return -EOVERFLOW;
4190
4191 /* first try to make some room by pushing left and right */
4192 if (data_size && path->nodes[1]) {
4193 int space_needed = data_size;
4194
4195 if (slot < btrfs_header_nritems(l))
4196 space_needed -= btrfs_leaf_free_space(root, l);
4197
4198 wret = push_leaf_right(trans, root, path, space_needed,
4199 space_needed, 0, 0);
4200 if (wret < 0)
4201 return wret;
4202 if (wret) {
4203 wret = push_leaf_left(trans, root, path, space_needed,
4204 space_needed, 0, (u32)-1);
4205 if (wret < 0)
4206 return wret;
4207 }
4208 l = path->nodes[0];
4209
4210 /* did the pushes work? */
4211 if (btrfs_leaf_free_space(root, l) >= data_size)
4212 return 0;
4213 }
4214
4215 if (!path->nodes[1]) {
4216 ret = insert_new_root(trans, root, path, 1);
4217 if (ret)
4218 return ret;
4219 }
4220again:
4221 split = 1;
4222 l = path->nodes[0];
4223 slot = path->slots[0];
4224 nritems = btrfs_header_nritems(l);
4225 mid = (nritems + 1) / 2;
4226
4227 if (mid <= slot) {
4228 if (nritems == 1 ||
4229 leaf_space_used(l, mid, nritems - mid) + data_size >
4230 BTRFS_LEAF_DATA_SIZE(root)) {
4231 if (slot >= nritems) {
4232 split = 0;
4233 } else {
4234 mid = slot;
4235 if (mid != nritems &&
4236 leaf_space_used(l, mid, nritems - mid) +
4237 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4238 if (data_size && !tried_avoid_double)
4239 goto push_for_double;
4240 split = 2;
4241 }
4242 }
4243 }
4244 } else {
4245 if (leaf_space_used(l, 0, mid) + data_size >
4246 BTRFS_LEAF_DATA_SIZE(root)) {
4247 if (!extend && data_size && slot == 0) {
4248 split = 0;
4249 } else if ((extend || !data_size) && slot == 0) {
4250 mid = 1;
4251 } else {
4252 mid = slot;
4253 if (mid != nritems &&
4254 leaf_space_used(l, mid, nritems - mid) +
4255 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4256 if (data_size && !tried_avoid_double)
4257 goto push_for_double;
4258 split = 2;
4259 }
4260 }
4261 }
4262 }
4263
4264 if (split == 0)
4265 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4266 else
4267 btrfs_item_key(l, &disk_key, mid);
4268
4269 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4270 &disk_key, 0, l->start, 0);
4271 if (IS_ERR(right))
4272 return PTR_ERR(right);
4273
4274 root_add_used(root, root->nodesize);
4275
4276 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4277 btrfs_set_header_bytenr(right, right->start);
4278 btrfs_set_header_generation(right, trans->transid);
4279 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4280 btrfs_set_header_owner(right, root->root_key.objectid);
4281 btrfs_set_header_level(right, 0);
4282 write_extent_buffer(right, fs_info->fsid,
4283 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4284
4285 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4286 btrfs_header_chunk_tree_uuid(right),
4287 BTRFS_UUID_SIZE);
4288
4289 if (split == 0) {
4290 if (mid <= slot) {
4291 btrfs_set_header_nritems(right, 0);
4292 insert_ptr(trans, root, path, &disk_key, right->start,
4293 path->slots[1] + 1, 1);
4294 btrfs_tree_unlock(path->nodes[0]);
4295 free_extent_buffer(path->nodes[0]);
4296 path->nodes[0] = right;
4297 path->slots[0] = 0;
4298 path->slots[1] += 1;
4299 } else {
4300 btrfs_set_header_nritems(right, 0);
4301 insert_ptr(trans, root, path, &disk_key, right->start,
4302 path->slots[1], 1);
4303 btrfs_tree_unlock(path->nodes[0]);
4304 free_extent_buffer(path->nodes[0]);
4305 path->nodes[0] = right;
4306 path->slots[0] = 0;
4307 if (path->slots[1] == 0)
4308 fixup_low_keys(fs_info, path, &disk_key, 1);
4309 }
4310 btrfs_mark_buffer_dirty(right);
4311 return ret;
4312 }
4313
4314 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4315
4316 if (split == 2) {
4317 BUG_ON(num_doubles != 0);
4318 num_doubles++;
4319 goto again;
4320 }
4321
4322 return 0;
4323
4324push_for_double:
4325 push_for_double_split(trans, root, path, data_size);
4326 tried_avoid_double = 1;
4327 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4328 return 0;
4329 goto again;
4330}
4331
4332static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4333 struct btrfs_root *root,
4334 struct btrfs_path *path, int ins_len)
4335{
4336 struct btrfs_key key;
4337 struct extent_buffer *leaf;
4338 struct btrfs_file_extent_item *fi;
4339 u64 extent_len = 0;
4340 u32 item_size;
4341 int ret;
4342
4343 leaf = path->nodes[0];
4344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4345
4346 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4347 key.type != BTRFS_EXTENT_CSUM_KEY);
4348
4349 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4350 return 0;
4351
4352 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4353 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4354 fi = btrfs_item_ptr(leaf, path->slots[0],
4355 struct btrfs_file_extent_item);
4356 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4357 }
4358 btrfs_release_path(path);
4359
4360 path->keep_locks = 1;
4361 path->search_for_split = 1;
4362 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4363 path->search_for_split = 0;
4364 if (ret > 0)
4365 ret = -EAGAIN;
4366 if (ret < 0)
4367 goto err;
4368
4369 ret = -EAGAIN;
4370 leaf = path->nodes[0];
4371 /* if our item isn't there, return now */
4372 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4373 goto err;
4374
4375 /* the leaf has changed, it now has room. return now */
4376 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4377 goto err;
4378
4379 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4380 fi = btrfs_item_ptr(leaf, path->slots[0],
4381 struct btrfs_file_extent_item);
4382 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4383 goto err;
4384 }
4385
4386 btrfs_set_path_blocking(path);
4387 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4388 if (ret)
4389 goto err;
4390
4391 path->keep_locks = 0;
4392 btrfs_unlock_up_safe(path, 1);
4393 return 0;
4394err:
4395 path->keep_locks = 0;
4396 return ret;
4397}
4398
4399static noinline int split_item(struct btrfs_trans_handle *trans,
4400 struct btrfs_root *root,
4401 struct btrfs_path *path,
4402 struct btrfs_key *new_key,
4403 unsigned long split_offset)
4404{
4405 struct extent_buffer *leaf;
4406 struct btrfs_item *item;
4407 struct btrfs_item *new_item;
4408 int slot;
4409 char *buf;
4410 u32 nritems;
4411 u32 item_size;
4412 u32 orig_offset;
4413 struct btrfs_disk_key disk_key;
4414
4415 leaf = path->nodes[0];
4416 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4417
4418 btrfs_set_path_blocking(path);
4419
4420 item = btrfs_item_nr(path->slots[0]);
4421 orig_offset = btrfs_item_offset(leaf, item);
4422 item_size = btrfs_item_size(leaf, item);
4423
4424 buf = kmalloc(item_size, GFP_NOFS);
4425 if (!buf)
4426 return -ENOMEM;
4427
4428 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4429 path->slots[0]), item_size);
4430
4431 slot = path->slots[0] + 1;
4432 nritems = btrfs_header_nritems(leaf);
4433 if (slot != nritems) {
4434 /* shift the items */
4435 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4436 btrfs_item_nr_offset(slot),
4437 (nritems - slot) * sizeof(struct btrfs_item));
4438 }
4439
4440 btrfs_cpu_key_to_disk(&disk_key, new_key);
4441 btrfs_set_item_key(leaf, &disk_key, slot);
4442
4443 new_item = btrfs_item_nr(slot);
4444
4445 btrfs_set_item_offset(leaf, new_item, orig_offset);
4446 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4447
4448 btrfs_set_item_offset(leaf, item,
4449 orig_offset + item_size - split_offset);
4450 btrfs_set_item_size(leaf, item, split_offset);
4451
4452 btrfs_set_header_nritems(leaf, nritems + 1);
4453
4454 /* write the data for the start of the original item */
4455 write_extent_buffer(leaf, buf,
4456 btrfs_item_ptr_offset(leaf, path->slots[0]),
4457 split_offset);
4458
4459 /* write the data for the new item */
4460 write_extent_buffer(leaf, buf + split_offset,
4461 btrfs_item_ptr_offset(leaf, slot),
4462 item_size - split_offset);
4463 btrfs_mark_buffer_dirty(leaf);
4464
4465 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4466 kfree(buf);
4467 return 0;
4468}
4469
4470/*
4471 * This function splits a single item into two items,
4472 * giving 'new_key' to the new item and splitting the
4473 * old one at split_offset (from the start of the item).
4474 *
4475 * The path may be released by this operation. After
4476 * the split, the path is pointing to the old item. The
4477 * new item is going to be in the same node as the old one.
4478 *
4479 * Note, the item being split must be smaller enough to live alone on
4480 * a tree block with room for one extra struct btrfs_item
4481 *
4482 * This allows us to split the item in place, keeping a lock on the
4483 * leaf the entire time.
4484 */
4485int btrfs_split_item(struct btrfs_trans_handle *trans,
4486 struct btrfs_root *root,
4487 struct btrfs_path *path,
4488 struct btrfs_key *new_key,
4489 unsigned long split_offset)
4490{
4491 int ret;
4492 ret = setup_leaf_for_split(trans, root, path,
4493 sizeof(struct btrfs_item));
4494 if (ret)
4495 return ret;
4496
4497 ret = split_item(trans, root, path, new_key, split_offset);
4498 return ret;
4499}
4500
4501/*
4502 * This function duplicate a item, giving 'new_key' to the new item.
4503 * It guarantees both items live in the same tree leaf and the new item
4504 * is contiguous with the original item.
4505 *
4506 * This allows us to split file extent in place, keeping a lock on the
4507 * leaf the entire time.
4508 */
4509int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4510 struct btrfs_root *root,
4511 struct btrfs_path *path,
4512 struct btrfs_key *new_key)
4513{
4514 struct extent_buffer *leaf;
4515 int ret;
4516 u32 item_size;
4517
4518 leaf = path->nodes[0];
4519 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4520 ret = setup_leaf_for_split(trans, root, path,
4521 item_size + sizeof(struct btrfs_item));
4522 if (ret)
4523 return ret;
4524
4525 path->slots[0]++;
4526 setup_items_for_insert(root, path, new_key, &item_size,
4527 item_size, item_size +
4528 sizeof(struct btrfs_item), 1);
4529 leaf = path->nodes[0];
4530 memcpy_extent_buffer(leaf,
4531 btrfs_item_ptr_offset(leaf, path->slots[0]),
4532 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4533 item_size);
4534 return 0;
4535}
4536
4537/*
4538 * make the item pointed to by the path smaller. new_size indicates
4539 * how small to make it, and from_end tells us if we just chop bytes
4540 * off the end of the item or if we shift the item to chop bytes off
4541 * the front.
4542 */
4543void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4544 u32 new_size, int from_end)
4545{
4546 int slot;
4547 struct extent_buffer *leaf;
4548 struct btrfs_item *item;
4549 u32 nritems;
4550 unsigned int data_end;
4551 unsigned int old_data_start;
4552 unsigned int old_size;
4553 unsigned int size_diff;
4554 int i;
4555 struct btrfs_map_token token;
4556
4557 btrfs_init_map_token(&token);
4558
4559 leaf = path->nodes[0];
4560 slot = path->slots[0];
4561
4562 old_size = btrfs_item_size_nr(leaf, slot);
4563 if (old_size == new_size)
4564 return;
4565
4566 nritems = btrfs_header_nritems(leaf);
4567 data_end = leaf_data_end(root, leaf);
4568
4569 old_data_start = btrfs_item_offset_nr(leaf, slot);
4570
4571 size_diff = old_size - new_size;
4572
4573 BUG_ON(slot < 0);
4574 BUG_ON(slot >= nritems);
4575
4576 /*
4577 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4578 */
4579 /* first correct the data pointers */
4580 for (i = slot; i < nritems; i++) {
4581 u32 ioff;
4582 item = btrfs_item_nr(i);
4583
4584 ioff = btrfs_token_item_offset(leaf, item, &token);
4585 btrfs_set_token_item_offset(leaf, item,
4586 ioff + size_diff, &token);
4587 }
4588
4589 /* shift the data */
4590 if (from_end) {
4591 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4592 data_end + size_diff, btrfs_leaf_data(leaf) +
4593 data_end, old_data_start + new_size - data_end);
4594 } else {
4595 struct btrfs_disk_key disk_key;
4596 u64 offset;
4597
4598 btrfs_item_key(leaf, &disk_key, slot);
4599
4600 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4601 unsigned long ptr;
4602 struct btrfs_file_extent_item *fi;
4603
4604 fi = btrfs_item_ptr(leaf, slot,
4605 struct btrfs_file_extent_item);
4606 fi = (struct btrfs_file_extent_item *)(
4607 (unsigned long)fi - size_diff);
4608
4609 if (btrfs_file_extent_type(leaf, fi) ==
4610 BTRFS_FILE_EXTENT_INLINE) {
4611 ptr = btrfs_item_ptr_offset(leaf, slot);
4612 memmove_extent_buffer(leaf, ptr,
4613 (unsigned long)fi,
4614 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4615 }
4616 }
4617
4618 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4619 data_end + size_diff, btrfs_leaf_data(leaf) +
4620 data_end, old_data_start - data_end);
4621
4622 offset = btrfs_disk_key_offset(&disk_key);
4623 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4624 btrfs_set_item_key(leaf, &disk_key, slot);
4625 if (slot == 0)
4626 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4627 }
4628
4629 item = btrfs_item_nr(slot);
4630 btrfs_set_item_size(leaf, item, new_size);
4631 btrfs_mark_buffer_dirty(leaf);
4632
4633 if (btrfs_leaf_free_space(root, leaf) < 0) {
4634 btrfs_print_leaf(root, leaf);
4635 BUG();
4636 }
4637}
4638
4639/*
4640 * make the item pointed to by the path bigger, data_size is the added size.
4641 */
4642void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4643 u32 data_size)
4644{
4645 int slot;
4646 struct extent_buffer *leaf;
4647 struct btrfs_item *item;
4648 u32 nritems;
4649 unsigned int data_end;
4650 unsigned int old_data;
4651 unsigned int old_size;
4652 int i;
4653 struct btrfs_map_token token;
4654
4655 btrfs_init_map_token(&token);
4656
4657 leaf = path->nodes[0];
4658
4659 nritems = btrfs_header_nritems(leaf);
4660 data_end = leaf_data_end(root, leaf);
4661
4662 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4663 btrfs_print_leaf(root, leaf);
4664 BUG();
4665 }
4666 slot = path->slots[0];
4667 old_data = btrfs_item_end_nr(leaf, slot);
4668
4669 BUG_ON(slot < 0);
4670 if (slot >= nritems) {
4671 btrfs_print_leaf(root, leaf);
4672 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4673 slot, nritems);
4674 BUG_ON(1);
4675 }
4676
4677 /*
4678 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4679 */
4680 /* first correct the data pointers */
4681 for (i = slot; i < nritems; i++) {
4682 u32 ioff;
4683 item = btrfs_item_nr(i);
4684
4685 ioff = btrfs_token_item_offset(leaf, item, &token);
4686 btrfs_set_token_item_offset(leaf, item,
4687 ioff - data_size, &token);
4688 }
4689
4690 /* shift the data */
4691 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4692 data_end - data_size, btrfs_leaf_data(leaf) +
4693 data_end, old_data - data_end);
4694
4695 data_end = old_data;
4696 old_size = btrfs_item_size_nr(leaf, slot);
4697 item = btrfs_item_nr(slot);
4698 btrfs_set_item_size(leaf, item, old_size + data_size);
4699 btrfs_mark_buffer_dirty(leaf);
4700
4701 if (btrfs_leaf_free_space(root, leaf) < 0) {
4702 btrfs_print_leaf(root, leaf);
4703 BUG();
4704 }
4705}
4706
4707/*
4708 * this is a helper for btrfs_insert_empty_items, the main goal here is
4709 * to save stack depth by doing the bulk of the work in a function
4710 * that doesn't call btrfs_search_slot
4711 */
4712void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4713 struct btrfs_key *cpu_key, u32 *data_size,
4714 u32 total_data, u32 total_size, int nr)
4715{
4716 struct btrfs_item *item;
4717 int i;
4718 u32 nritems;
4719 unsigned int data_end;
4720 struct btrfs_disk_key disk_key;
4721 struct extent_buffer *leaf;
4722 int slot;
4723 struct btrfs_map_token token;
4724
4725 if (path->slots[0] == 0) {
4726 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4727 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4728 }
4729 btrfs_unlock_up_safe(path, 1);
4730
4731 btrfs_init_map_token(&token);
4732
4733 leaf = path->nodes[0];
4734 slot = path->slots[0];
4735
4736 nritems = btrfs_header_nritems(leaf);
4737 data_end = leaf_data_end(root, leaf);
4738
4739 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4740 btrfs_print_leaf(root, leaf);
4741 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4742 total_size, btrfs_leaf_free_space(root, leaf));
4743 BUG();
4744 }
4745
4746 if (slot != nritems) {
4747 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4748
4749 if (old_data < data_end) {
4750 btrfs_print_leaf(root, leaf);
4751 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4752 slot, old_data, data_end);
4753 BUG_ON(1);
4754 }
4755 /*
4756 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4757 */
4758 /* first correct the data pointers */
4759 for (i = slot; i < nritems; i++) {
4760 u32 ioff;
4761
4762 item = btrfs_item_nr( i);
4763 ioff = btrfs_token_item_offset(leaf, item, &token);
4764 btrfs_set_token_item_offset(leaf, item,
4765 ioff - total_data, &token);
4766 }
4767 /* shift the items */
4768 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4769 btrfs_item_nr_offset(slot),
4770 (nritems - slot) * sizeof(struct btrfs_item));
4771
4772 /* shift the data */
4773 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4774 data_end - total_data, btrfs_leaf_data(leaf) +
4775 data_end, old_data - data_end);
4776 data_end = old_data;
4777 }
4778
4779 /* setup the item for the new data */
4780 for (i = 0; i < nr; i++) {
4781 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4782 btrfs_set_item_key(leaf, &disk_key, slot + i);
4783 item = btrfs_item_nr(slot + i);
4784 btrfs_set_token_item_offset(leaf, item,
4785 data_end - data_size[i], &token);
4786 data_end -= data_size[i];
4787 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4788 }
4789
4790 btrfs_set_header_nritems(leaf, nritems + nr);
4791 btrfs_mark_buffer_dirty(leaf);
4792
4793 if (btrfs_leaf_free_space(root, leaf) < 0) {
4794 btrfs_print_leaf(root, leaf);
4795 BUG();
4796 }
4797}
4798
4799/*
4800 * Given a key and some data, insert items into the tree.
4801 * This does all the path init required, making room in the tree if needed.
4802 */
4803int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4804 struct btrfs_root *root,
4805 struct btrfs_path *path,
4806 struct btrfs_key *cpu_key, u32 *data_size,
4807 int nr)
4808{
4809 int ret = 0;
4810 int slot;
4811 int i;
4812 u32 total_size = 0;
4813 u32 total_data = 0;
4814
4815 for (i = 0; i < nr; i++)
4816 total_data += data_size[i];
4817
4818 total_size = total_data + (nr * sizeof(struct btrfs_item));
4819 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4820 if (ret == 0)
4821 return -EEXIST;
4822 if (ret < 0)
4823 return ret;
4824
4825 slot = path->slots[0];
4826 BUG_ON(slot < 0);
4827
4828 setup_items_for_insert(root, path, cpu_key, data_size,
4829 total_data, total_size, nr);
4830 return 0;
4831}
4832
4833/*
4834 * Given a key and some data, insert an item into the tree.
4835 * This does all the path init required, making room in the tree if needed.
4836 */
4837int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4838 *root, struct btrfs_key *cpu_key, void *data, u32
4839 data_size)
4840{
4841 int ret = 0;
4842 struct btrfs_path *path;
4843 struct extent_buffer *leaf;
4844 unsigned long ptr;
4845
4846 path = btrfs_alloc_path();
4847 if (!path)
4848 return -ENOMEM;
4849 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4850 if (!ret) {
4851 leaf = path->nodes[0];
4852 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4853 write_extent_buffer(leaf, data, ptr, data_size);
4854 btrfs_mark_buffer_dirty(leaf);
4855 }
4856 btrfs_free_path(path);
4857 return ret;
4858}
4859
4860/*
4861 * delete the pointer from a given node.
4862 *
4863 * the tree should have been previously balanced so the deletion does not
4864 * empty a node.
4865 */
4866static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4867 int level, int slot)
4868{
4869 struct extent_buffer *parent = path->nodes[level];
4870 u32 nritems;
4871 int ret;
4872
4873 nritems = btrfs_header_nritems(parent);
4874 if (slot != nritems - 1) {
4875 if (level)
4876 tree_mod_log_eb_move(root->fs_info, parent, slot,
4877 slot + 1, nritems - slot - 1);
4878 memmove_extent_buffer(parent,
4879 btrfs_node_key_ptr_offset(slot),
4880 btrfs_node_key_ptr_offset(slot + 1),
4881 sizeof(struct btrfs_key_ptr) *
4882 (nritems - slot - 1));
4883 } else if (level) {
4884 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4885 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4886 BUG_ON(ret < 0);
4887 }
4888
4889 nritems--;
4890 btrfs_set_header_nritems(parent, nritems);
4891 if (nritems == 0 && parent == root->node) {
4892 BUG_ON(btrfs_header_level(root->node) != 1);
4893 /* just turn the root into a leaf and break */
4894 btrfs_set_header_level(root->node, 0);
4895 } else if (slot == 0) {
4896 struct btrfs_disk_key disk_key;
4897
4898 btrfs_node_key(parent, &disk_key, 0);
4899 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4900 }
4901 btrfs_mark_buffer_dirty(parent);
4902}
4903
4904/*
4905 * a helper function to delete the leaf pointed to by path->slots[1] and
4906 * path->nodes[1].
4907 *
4908 * This deletes the pointer in path->nodes[1] and frees the leaf
4909 * block extent. zero is returned if it all worked out, < 0 otherwise.
4910 *
4911 * The path must have already been setup for deleting the leaf, including
4912 * all the proper balancing. path->nodes[1] must be locked.
4913 */
4914static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4915 struct btrfs_root *root,
4916 struct btrfs_path *path,
4917 struct extent_buffer *leaf)
4918{
4919 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4920 del_ptr(root, path, 1, path->slots[1]);
4921
4922 /*
4923 * btrfs_free_extent is expensive, we want to make sure we
4924 * aren't holding any locks when we call it
4925 */
4926 btrfs_unlock_up_safe(path, 0);
4927
4928 root_sub_used(root, leaf->len);
4929
4930 extent_buffer_get(leaf);
4931 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4932 free_extent_buffer_stale(leaf);
4933}
4934/*
4935 * delete the item at the leaf level in path. If that empties
4936 * the leaf, remove it from the tree
4937 */
4938int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4939 struct btrfs_path *path, int slot, int nr)
4940{
4941 struct extent_buffer *leaf;
4942 struct btrfs_item *item;
4943 u32 last_off;
4944 u32 dsize = 0;
4945 int ret = 0;
4946 int wret;
4947 int i;
4948 u32 nritems;
4949 struct btrfs_map_token token;
4950
4951 btrfs_init_map_token(&token);
4952
4953 leaf = path->nodes[0];
4954 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4955
4956 for (i = 0; i < nr; i++)
4957 dsize += btrfs_item_size_nr(leaf, slot + i);
4958
4959 nritems = btrfs_header_nritems(leaf);
4960
4961 if (slot + nr != nritems) {
4962 int data_end = leaf_data_end(root, leaf);
4963
4964 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4965 data_end + dsize,
4966 btrfs_leaf_data(leaf) + data_end,
4967 last_off - data_end);
4968
4969 for (i = slot + nr; i < nritems; i++) {
4970 u32 ioff;
4971
4972 item = btrfs_item_nr(i);
4973 ioff = btrfs_token_item_offset(leaf, item, &token);
4974 btrfs_set_token_item_offset(leaf, item,
4975 ioff + dsize, &token);
4976 }
4977
4978 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4979 btrfs_item_nr_offset(slot + nr),
4980 sizeof(struct btrfs_item) *
4981 (nritems - slot - nr));
4982 }
4983 btrfs_set_header_nritems(leaf, nritems - nr);
4984 nritems -= nr;
4985
4986 /* delete the leaf if we've emptied it */
4987 if (nritems == 0) {
4988 if (leaf == root->node) {
4989 btrfs_set_header_level(leaf, 0);
4990 } else {
4991 btrfs_set_path_blocking(path);
4992 clean_tree_block(trans, root->fs_info, leaf);
4993 btrfs_del_leaf(trans, root, path, leaf);
4994 }
4995 } else {
4996 int used = leaf_space_used(leaf, 0, nritems);
4997 if (slot == 0) {
4998 struct btrfs_disk_key disk_key;
4999
5000 btrfs_item_key(leaf, &disk_key, 0);
5001 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5002 }
5003
5004 /* delete the leaf if it is mostly empty */
5005 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5006 /* push_leaf_left fixes the path.
5007 * make sure the path still points to our leaf
5008 * for possible call to del_ptr below
5009 */
5010 slot = path->slots[1];
5011 extent_buffer_get(leaf);
5012
5013 btrfs_set_path_blocking(path);
5014 wret = push_leaf_left(trans, root, path, 1, 1,
5015 1, (u32)-1);
5016 if (wret < 0 && wret != -ENOSPC)
5017 ret = wret;
5018
5019 if (path->nodes[0] == leaf &&
5020 btrfs_header_nritems(leaf)) {
5021 wret = push_leaf_right(trans, root, path, 1,
5022 1, 1, 0);
5023 if (wret < 0 && wret != -ENOSPC)
5024 ret = wret;
5025 }
5026
5027 if (btrfs_header_nritems(leaf) == 0) {
5028 path->slots[1] = slot;
5029 btrfs_del_leaf(trans, root, path, leaf);
5030 free_extent_buffer(leaf);
5031 ret = 0;
5032 } else {
5033 /* if we're still in the path, make sure
5034 * we're dirty. Otherwise, one of the
5035 * push_leaf functions must have already
5036 * dirtied this buffer
5037 */
5038 if (path->nodes[0] == leaf)
5039 btrfs_mark_buffer_dirty(leaf);
5040 free_extent_buffer(leaf);
5041 }
5042 } else {
5043 btrfs_mark_buffer_dirty(leaf);
5044 }
5045 }
5046 return ret;
5047}
5048
5049/*
5050 * search the tree again to find a leaf with lesser keys
5051 * returns 0 if it found something or 1 if there are no lesser leaves.
5052 * returns < 0 on io errors.
5053 *
5054 * This may release the path, and so you may lose any locks held at the
5055 * time you call it.
5056 */
5057int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5058{
5059 struct btrfs_key key;
5060 struct btrfs_disk_key found_key;
5061 int ret;
5062
5063 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5064
5065 if (key.offset > 0) {
5066 key.offset--;
5067 } else if (key.type > 0) {
5068 key.type--;
5069 key.offset = (u64)-1;
5070 } else if (key.objectid > 0) {
5071 key.objectid--;
5072 key.type = (u8)-1;
5073 key.offset = (u64)-1;
5074 } else {
5075 return 1;
5076 }
5077
5078 btrfs_release_path(path);
5079 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5080 if (ret < 0)
5081 return ret;
5082 btrfs_item_key(path->nodes[0], &found_key, 0);
5083 ret = comp_keys(&found_key, &key);
5084 /*
5085 * We might have had an item with the previous key in the tree right
5086 * before we released our path. And after we released our path, that
5087 * item might have been pushed to the first slot (0) of the leaf we
5088 * were holding due to a tree balance. Alternatively, an item with the
5089 * previous key can exist as the only element of a leaf (big fat item).
5090 * Therefore account for these 2 cases, so that our callers (like
5091 * btrfs_previous_item) don't miss an existing item with a key matching
5092 * the previous key we computed above.
5093 */
5094 if (ret <= 0)
5095 return 0;
5096 return 1;
5097}
5098
5099/*
5100 * A helper function to walk down the tree starting at min_key, and looking
5101 * for nodes or leaves that are have a minimum transaction id.
5102 * This is used by the btree defrag code, and tree logging
5103 *
5104 * This does not cow, but it does stuff the starting key it finds back
5105 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5106 * key and get a writable path.
5107 *
5108 * This does lock as it descends, and path->keep_locks should be set
5109 * to 1 by the caller.
5110 *
5111 * This honors path->lowest_level to prevent descent past a given level
5112 * of the tree.
5113 *
5114 * min_trans indicates the oldest transaction that you are interested
5115 * in walking through. Any nodes or leaves older than min_trans are
5116 * skipped over (without reading them).
5117 *
5118 * returns zero if something useful was found, < 0 on error and 1 if there
5119 * was nothing in the tree that matched the search criteria.
5120 */
5121int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5122 struct btrfs_path *path,
5123 u64 min_trans)
5124{
5125 struct extent_buffer *cur;
5126 struct btrfs_key found_key;
5127 int slot;
5128 int sret;
5129 u32 nritems;
5130 int level;
5131 int ret = 1;
5132 int keep_locks = path->keep_locks;
5133
5134 path->keep_locks = 1;
5135again:
5136 cur = btrfs_read_lock_root_node(root);
5137 level = btrfs_header_level(cur);
5138 WARN_ON(path->nodes[level]);
5139 path->nodes[level] = cur;
5140 path->locks[level] = BTRFS_READ_LOCK;
5141
5142 if (btrfs_header_generation(cur) < min_trans) {
5143 ret = 1;
5144 goto out;
5145 }
5146 while (1) {
5147 nritems = btrfs_header_nritems(cur);
5148 level = btrfs_header_level(cur);
5149 sret = bin_search(cur, min_key, level, &slot);
5150
5151 /* at the lowest level, we're done, setup the path and exit */
5152 if (level == path->lowest_level) {
5153 if (slot >= nritems)
5154 goto find_next_key;
5155 ret = 0;
5156 path->slots[level] = slot;
5157 btrfs_item_key_to_cpu(cur, &found_key, slot);
5158 goto out;
5159 }
5160 if (sret && slot > 0)
5161 slot--;
5162 /*
5163 * check this node pointer against the min_trans parameters.
5164 * If it is too old, old, skip to the next one.
5165 */
5166 while (slot < nritems) {
5167 u64 gen;
5168
5169 gen = btrfs_node_ptr_generation(cur, slot);
5170 if (gen < min_trans) {
5171 slot++;
5172 continue;
5173 }
5174 break;
5175 }
5176find_next_key:
5177 /*
5178 * we didn't find a candidate key in this node, walk forward
5179 * and find another one
5180 */
5181 if (slot >= nritems) {
5182 path->slots[level] = slot;
5183 btrfs_set_path_blocking(path);
5184 sret = btrfs_find_next_key(root, path, min_key, level,
5185 min_trans);
5186 if (sret == 0) {
5187 btrfs_release_path(path);
5188 goto again;
5189 } else {
5190 goto out;
5191 }
5192 }
5193 /* save our key for returning back */
5194 btrfs_node_key_to_cpu(cur, &found_key, slot);
5195 path->slots[level] = slot;
5196 if (level == path->lowest_level) {
5197 ret = 0;
5198 goto out;
5199 }
5200 btrfs_set_path_blocking(path);
5201 cur = read_node_slot(root, cur, slot);
5202 BUG_ON(!cur); /* -ENOMEM */
5203
5204 btrfs_tree_read_lock(cur);
5205
5206 path->locks[level - 1] = BTRFS_READ_LOCK;
5207 path->nodes[level - 1] = cur;
5208 unlock_up(path, level, 1, 0, NULL);
5209 btrfs_clear_path_blocking(path, NULL, 0);
5210 }
5211out:
5212 path->keep_locks = keep_locks;
5213 if (ret == 0) {
5214 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5215 btrfs_set_path_blocking(path);
5216 memcpy(min_key, &found_key, sizeof(found_key));
5217 }
5218 return ret;
5219}
5220
5221static void tree_move_down(struct btrfs_root *root,
5222 struct btrfs_path *path,
5223 int *level, int root_level)
5224{
5225 BUG_ON(*level == 0);
5226 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5227 path->slots[*level]);
5228 path->slots[*level - 1] = 0;
5229 (*level)--;
5230}
5231
5232static int tree_move_next_or_upnext(struct btrfs_root *root,
5233 struct btrfs_path *path,
5234 int *level, int root_level)
5235{
5236 int ret = 0;
5237 int nritems;
5238 nritems = btrfs_header_nritems(path->nodes[*level]);
5239
5240 path->slots[*level]++;
5241
5242 while (path->slots[*level] >= nritems) {
5243 if (*level == root_level)
5244 return -1;
5245
5246 /* move upnext */
5247 path->slots[*level] = 0;
5248 free_extent_buffer(path->nodes[*level]);
5249 path->nodes[*level] = NULL;
5250 (*level)++;
5251 path->slots[*level]++;
5252
5253 nritems = btrfs_header_nritems(path->nodes[*level]);
5254 ret = 1;
5255 }
5256 return ret;
5257}
5258
5259/*
5260 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5261 * or down.
5262 */
5263static int tree_advance(struct btrfs_root *root,
5264 struct btrfs_path *path,
5265 int *level, int root_level,
5266 int allow_down,
5267 struct btrfs_key *key)
5268{
5269 int ret;
5270
5271 if (*level == 0 || !allow_down) {
5272 ret = tree_move_next_or_upnext(root, path, level, root_level);
5273 } else {
5274 tree_move_down(root, path, level, root_level);
5275 ret = 0;
5276 }
5277 if (ret >= 0) {
5278 if (*level == 0)
5279 btrfs_item_key_to_cpu(path->nodes[*level], key,
5280 path->slots[*level]);
5281 else
5282 btrfs_node_key_to_cpu(path->nodes[*level], key,
5283 path->slots[*level]);
5284 }
5285 return ret;
5286}
5287
5288static int tree_compare_item(struct btrfs_root *left_root,
5289 struct btrfs_path *left_path,
5290 struct btrfs_path *right_path,
5291 char *tmp_buf)
5292{
5293 int cmp;
5294 int len1, len2;
5295 unsigned long off1, off2;
5296
5297 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5298 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5299 if (len1 != len2)
5300 return 1;
5301
5302 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5303 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5304 right_path->slots[0]);
5305
5306 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5307
5308 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5309 if (cmp)
5310 return 1;
5311 return 0;
5312}
5313
5314#define ADVANCE 1
5315#define ADVANCE_ONLY_NEXT -1
5316
5317/*
5318 * This function compares two trees and calls the provided callback for
5319 * every changed/new/deleted item it finds.
5320 * If shared tree blocks are encountered, whole subtrees are skipped, making
5321 * the compare pretty fast on snapshotted subvolumes.
5322 *
5323 * This currently works on commit roots only. As commit roots are read only,
5324 * we don't do any locking. The commit roots are protected with transactions.
5325 * Transactions are ended and rejoined when a commit is tried in between.
5326 *
5327 * This function checks for modifications done to the trees while comparing.
5328 * If it detects a change, it aborts immediately.
5329 */
5330int btrfs_compare_trees(struct btrfs_root *left_root,
5331 struct btrfs_root *right_root,
5332 btrfs_changed_cb_t changed_cb, void *ctx)
5333{
5334 int ret;
5335 int cmp;
5336 struct btrfs_path *left_path = NULL;
5337 struct btrfs_path *right_path = NULL;
5338 struct btrfs_key left_key;
5339 struct btrfs_key right_key;
5340 char *tmp_buf = NULL;
5341 int left_root_level;
5342 int right_root_level;
5343 int left_level;
5344 int right_level;
5345 int left_end_reached;
5346 int right_end_reached;
5347 int advance_left;
5348 int advance_right;
5349 u64 left_blockptr;
5350 u64 right_blockptr;
5351 u64 left_gen;
5352 u64 right_gen;
5353
5354 left_path = btrfs_alloc_path();
5355 if (!left_path) {
5356 ret = -ENOMEM;
5357 goto out;
5358 }
5359 right_path = btrfs_alloc_path();
5360 if (!right_path) {
5361 ret = -ENOMEM;
5362 goto out;
5363 }
5364
5365 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
5366 if (!tmp_buf) {
5367 tmp_buf = vmalloc(left_root->nodesize);
5368 if (!tmp_buf) {
5369 ret = -ENOMEM;
5370 goto out;
5371 }
5372 }
5373
5374 left_path->search_commit_root = 1;
5375 left_path->skip_locking = 1;
5376 right_path->search_commit_root = 1;
5377 right_path->skip_locking = 1;
5378
5379 /*
5380 * Strategy: Go to the first items of both trees. Then do
5381 *
5382 * If both trees are at level 0
5383 * Compare keys of current items
5384 * If left < right treat left item as new, advance left tree
5385 * and repeat
5386 * If left > right treat right item as deleted, advance right tree
5387 * and repeat
5388 * If left == right do deep compare of items, treat as changed if
5389 * needed, advance both trees and repeat
5390 * If both trees are at the same level but not at level 0
5391 * Compare keys of current nodes/leafs
5392 * If left < right advance left tree and repeat
5393 * If left > right advance right tree and repeat
5394 * If left == right compare blockptrs of the next nodes/leafs
5395 * If they match advance both trees but stay at the same level
5396 * and repeat
5397 * If they don't match advance both trees while allowing to go
5398 * deeper and repeat
5399 * If tree levels are different
5400 * Advance the tree that needs it and repeat
5401 *
5402 * Advancing a tree means:
5403 * If we are at level 0, try to go to the next slot. If that's not
5404 * possible, go one level up and repeat. Stop when we found a level
5405 * where we could go to the next slot. We may at this point be on a
5406 * node or a leaf.
5407 *
5408 * If we are not at level 0 and not on shared tree blocks, go one
5409 * level deeper.
5410 *
5411 * If we are not at level 0 and on shared tree blocks, go one slot to
5412 * the right if possible or go up and right.
5413 */
5414
5415 down_read(&left_root->fs_info->commit_root_sem);
5416 left_level = btrfs_header_level(left_root->commit_root);
5417 left_root_level = left_level;
5418 left_path->nodes[left_level] = left_root->commit_root;
5419 extent_buffer_get(left_path->nodes[left_level]);
5420
5421 right_level = btrfs_header_level(right_root->commit_root);
5422 right_root_level = right_level;
5423 right_path->nodes[right_level] = right_root->commit_root;
5424 extent_buffer_get(right_path->nodes[right_level]);
5425 up_read(&left_root->fs_info->commit_root_sem);
5426
5427 if (left_level == 0)
5428 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5429 &left_key, left_path->slots[left_level]);
5430 else
5431 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5432 &left_key, left_path->slots[left_level]);
5433 if (right_level == 0)
5434 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5435 &right_key, right_path->slots[right_level]);
5436 else
5437 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5438 &right_key, right_path->slots[right_level]);
5439
5440 left_end_reached = right_end_reached = 0;
5441 advance_left = advance_right = 0;
5442
5443 while (1) {
5444 if (advance_left && !left_end_reached) {
5445 ret = tree_advance(left_root, left_path, &left_level,
5446 left_root_level,
5447 advance_left != ADVANCE_ONLY_NEXT,
5448 &left_key);
5449 if (ret < 0)
5450 left_end_reached = ADVANCE;
5451 advance_left = 0;
5452 }
5453 if (advance_right && !right_end_reached) {
5454 ret = tree_advance(right_root, right_path, &right_level,
5455 right_root_level,
5456 advance_right != ADVANCE_ONLY_NEXT,
5457 &right_key);
5458 if (ret < 0)
5459 right_end_reached = ADVANCE;
5460 advance_right = 0;
5461 }
5462
5463 if (left_end_reached && right_end_reached) {
5464 ret = 0;
5465 goto out;
5466 } else if (left_end_reached) {
5467 if (right_level == 0) {
5468 ret = changed_cb(left_root, right_root,
5469 left_path, right_path,
5470 &right_key,
5471 BTRFS_COMPARE_TREE_DELETED,
5472 ctx);
5473 if (ret < 0)
5474 goto out;
5475 }
5476 advance_right = ADVANCE;
5477 continue;
5478 } else if (right_end_reached) {
5479 if (left_level == 0) {
5480 ret = changed_cb(left_root, right_root,
5481 left_path, right_path,
5482 &left_key,
5483 BTRFS_COMPARE_TREE_NEW,
5484 ctx);
5485 if (ret < 0)
5486 goto out;
5487 }
5488 advance_left = ADVANCE;
5489 continue;
5490 }
5491
5492 if (left_level == 0 && right_level == 0) {
5493 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5494 if (cmp < 0) {
5495 ret = changed_cb(left_root, right_root,
5496 left_path, right_path,
5497 &left_key,
5498 BTRFS_COMPARE_TREE_NEW,
5499 ctx);
5500 if (ret < 0)
5501 goto out;
5502 advance_left = ADVANCE;
5503 } else if (cmp > 0) {
5504 ret = changed_cb(left_root, right_root,
5505 left_path, right_path,
5506 &right_key,
5507 BTRFS_COMPARE_TREE_DELETED,
5508 ctx);
5509 if (ret < 0)
5510 goto out;
5511 advance_right = ADVANCE;
5512 } else {
5513 enum btrfs_compare_tree_result result;
5514
5515 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5516 ret = tree_compare_item(left_root, left_path,
5517 right_path, tmp_buf);
5518 if (ret)
5519 result = BTRFS_COMPARE_TREE_CHANGED;
5520 else
5521 result = BTRFS_COMPARE_TREE_SAME;
5522 ret = changed_cb(left_root, right_root,
5523 left_path, right_path,
5524 &left_key, result, ctx);
5525 if (ret < 0)
5526 goto out;
5527 advance_left = ADVANCE;
5528 advance_right = ADVANCE;
5529 }
5530 } else if (left_level == right_level) {
5531 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5532 if (cmp < 0) {
5533 advance_left = ADVANCE;
5534 } else if (cmp > 0) {
5535 advance_right = ADVANCE;
5536 } else {
5537 left_blockptr = btrfs_node_blockptr(
5538 left_path->nodes[left_level],
5539 left_path->slots[left_level]);
5540 right_blockptr = btrfs_node_blockptr(
5541 right_path->nodes[right_level],
5542 right_path->slots[right_level]);
5543 left_gen = btrfs_node_ptr_generation(
5544 left_path->nodes[left_level],
5545 left_path->slots[left_level]);
5546 right_gen = btrfs_node_ptr_generation(
5547 right_path->nodes[right_level],
5548 right_path->slots[right_level]);
5549 if (left_blockptr == right_blockptr &&
5550 left_gen == right_gen) {
5551 /*
5552 * As we're on a shared block, don't
5553 * allow to go deeper.
5554 */
5555 advance_left = ADVANCE_ONLY_NEXT;
5556 advance_right = ADVANCE_ONLY_NEXT;
5557 } else {
5558 advance_left = ADVANCE;
5559 advance_right = ADVANCE;
5560 }
5561 }
5562 } else if (left_level < right_level) {
5563 advance_right = ADVANCE;
5564 } else {
5565 advance_left = ADVANCE;
5566 }
5567 }
5568
5569out:
5570 btrfs_free_path(left_path);
5571 btrfs_free_path(right_path);
5572 kvfree(tmp_buf);
5573 return ret;
5574}
5575
5576/*
5577 * this is similar to btrfs_next_leaf, but does not try to preserve
5578 * and fixup the path. It looks for and returns the next key in the
5579 * tree based on the current path and the min_trans parameters.
5580 *
5581 * 0 is returned if another key is found, < 0 if there are any errors
5582 * and 1 is returned if there are no higher keys in the tree
5583 *
5584 * path->keep_locks should be set to 1 on the search made before
5585 * calling this function.
5586 */
5587int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5588 struct btrfs_key *key, int level, u64 min_trans)
5589{
5590 int slot;
5591 struct extent_buffer *c;
5592
5593 WARN_ON(!path->keep_locks);
5594 while (level < BTRFS_MAX_LEVEL) {
5595 if (!path->nodes[level])
5596 return 1;
5597
5598 slot = path->slots[level] + 1;
5599 c = path->nodes[level];
5600next:
5601 if (slot >= btrfs_header_nritems(c)) {
5602 int ret;
5603 int orig_lowest;
5604 struct btrfs_key cur_key;
5605 if (level + 1 >= BTRFS_MAX_LEVEL ||
5606 !path->nodes[level + 1])
5607 return 1;
5608
5609 if (path->locks[level + 1]) {
5610 level++;
5611 continue;
5612 }
5613
5614 slot = btrfs_header_nritems(c) - 1;
5615 if (level == 0)
5616 btrfs_item_key_to_cpu(c, &cur_key, slot);
5617 else
5618 btrfs_node_key_to_cpu(c, &cur_key, slot);
5619
5620 orig_lowest = path->lowest_level;
5621 btrfs_release_path(path);
5622 path->lowest_level = level;
5623 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5624 0, 0);
5625 path->lowest_level = orig_lowest;
5626 if (ret < 0)
5627 return ret;
5628
5629 c = path->nodes[level];
5630 slot = path->slots[level];
5631 if (ret == 0)
5632 slot++;
5633 goto next;
5634 }
5635
5636 if (level == 0)
5637 btrfs_item_key_to_cpu(c, key, slot);
5638 else {
5639 u64 gen = btrfs_node_ptr_generation(c, slot);
5640
5641 if (gen < min_trans) {
5642 slot++;
5643 goto next;
5644 }
5645 btrfs_node_key_to_cpu(c, key, slot);
5646 }
5647 return 0;
5648 }
5649 return 1;
5650}
5651
5652/*
5653 * search the tree again to find a leaf with greater keys
5654 * returns 0 if it found something or 1 if there are no greater leaves.
5655 * returns < 0 on io errors.
5656 */
5657int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5658{
5659 return btrfs_next_old_leaf(root, path, 0);
5660}
5661
5662int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5663 u64 time_seq)
5664{
5665 int slot;
5666 int level;
5667 struct extent_buffer *c;
5668 struct extent_buffer *next;
5669 struct btrfs_key key;
5670 u32 nritems;
5671 int ret;
5672 int old_spinning = path->leave_spinning;
5673 int next_rw_lock = 0;
5674
5675 nritems = btrfs_header_nritems(path->nodes[0]);
5676 if (nritems == 0)
5677 return 1;
5678
5679 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5680again:
5681 level = 1;
5682 next = NULL;
5683 next_rw_lock = 0;
5684 btrfs_release_path(path);
5685
5686 path->keep_locks = 1;
5687 path->leave_spinning = 1;
5688
5689 if (time_seq)
5690 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5691 else
5692 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5693 path->keep_locks = 0;
5694
5695 if (ret < 0)
5696 return ret;
5697
5698 nritems = btrfs_header_nritems(path->nodes[0]);
5699 /*
5700 * by releasing the path above we dropped all our locks. A balance
5701 * could have added more items next to the key that used to be
5702 * at the very end of the block. So, check again here and
5703 * advance the path if there are now more items available.
5704 */
5705 if (nritems > 0 && path->slots[0] < nritems - 1) {
5706 if (ret == 0)
5707 path->slots[0]++;
5708 ret = 0;
5709 goto done;
5710 }
5711 /*
5712 * So the above check misses one case:
5713 * - after releasing the path above, someone has removed the item that
5714 * used to be at the very end of the block, and balance between leafs
5715 * gets another one with bigger key.offset to replace it.
5716 *
5717 * This one should be returned as well, or we can get leaf corruption
5718 * later(esp. in __btrfs_drop_extents()).
5719 *
5720 * And a bit more explanation about this check,
5721 * with ret > 0, the key isn't found, the path points to the slot
5722 * where it should be inserted, so the path->slots[0] item must be the
5723 * bigger one.
5724 */
5725 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5726 ret = 0;
5727 goto done;
5728 }
5729
5730 while (level < BTRFS_MAX_LEVEL) {
5731 if (!path->nodes[level]) {
5732 ret = 1;
5733 goto done;
5734 }
5735
5736 slot = path->slots[level] + 1;
5737 c = path->nodes[level];
5738 if (slot >= btrfs_header_nritems(c)) {
5739 level++;
5740 if (level == BTRFS_MAX_LEVEL) {
5741 ret = 1;
5742 goto done;
5743 }
5744 continue;
5745 }
5746
5747 if (next) {
5748 btrfs_tree_unlock_rw(next, next_rw_lock);
5749 free_extent_buffer(next);
5750 }
5751
5752 next = c;
5753 next_rw_lock = path->locks[level];
5754 ret = read_block_for_search(NULL, root, path, &next, level,
5755 slot, &key, 0);
5756 if (ret == -EAGAIN)
5757 goto again;
5758
5759 if (ret < 0) {
5760 btrfs_release_path(path);
5761 goto done;
5762 }
5763
5764 if (!path->skip_locking) {
5765 ret = btrfs_try_tree_read_lock(next);
5766 if (!ret && time_seq) {
5767 /*
5768 * If we don't get the lock, we may be racing
5769 * with push_leaf_left, holding that lock while
5770 * itself waiting for the leaf we've currently
5771 * locked. To solve this situation, we give up
5772 * on our lock and cycle.
5773 */
5774 free_extent_buffer(next);
5775 btrfs_release_path(path);
5776 cond_resched();
5777 goto again;
5778 }
5779 if (!ret) {
5780 btrfs_set_path_blocking(path);
5781 btrfs_tree_read_lock(next);
5782 btrfs_clear_path_blocking(path, next,
5783 BTRFS_READ_LOCK);
5784 }
5785 next_rw_lock = BTRFS_READ_LOCK;
5786 }
5787 break;
5788 }
5789 path->slots[level] = slot;
5790 while (1) {
5791 level--;
5792 c = path->nodes[level];
5793 if (path->locks[level])
5794 btrfs_tree_unlock_rw(c, path->locks[level]);
5795
5796 free_extent_buffer(c);
5797 path->nodes[level] = next;
5798 path->slots[level] = 0;
5799 if (!path->skip_locking)
5800 path->locks[level] = next_rw_lock;
5801 if (!level)
5802 break;
5803
5804 ret = read_block_for_search(NULL, root, path, &next, level,
5805 0, &key, 0);
5806 if (ret == -EAGAIN)
5807 goto again;
5808
5809 if (ret < 0) {
5810 btrfs_release_path(path);
5811 goto done;
5812 }
5813
5814 if (!path->skip_locking) {
5815 ret = btrfs_try_tree_read_lock(next);
5816 if (!ret) {
5817 btrfs_set_path_blocking(path);
5818 btrfs_tree_read_lock(next);
5819 btrfs_clear_path_blocking(path, next,
5820 BTRFS_READ_LOCK);
5821 }
5822 next_rw_lock = BTRFS_READ_LOCK;
5823 }
5824 }
5825 ret = 0;
5826done:
5827 unlock_up(path, 0, 1, 0, NULL);
5828 path->leave_spinning = old_spinning;
5829 if (!old_spinning)
5830 btrfs_set_path_blocking(path);
5831
5832 return ret;
5833}
5834
5835/*
5836 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5837 * searching until it gets past min_objectid or finds an item of 'type'
5838 *
5839 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5840 */
5841int btrfs_previous_item(struct btrfs_root *root,
5842 struct btrfs_path *path, u64 min_objectid,
5843 int type)
5844{
5845 struct btrfs_key found_key;
5846 struct extent_buffer *leaf;
5847 u32 nritems;
5848 int ret;
5849
5850 while (1) {
5851 if (path->slots[0] == 0) {
5852 btrfs_set_path_blocking(path);
5853 ret = btrfs_prev_leaf(root, path);
5854 if (ret != 0)
5855 return ret;
5856 } else {
5857 path->slots[0]--;
5858 }
5859 leaf = path->nodes[0];
5860 nritems = btrfs_header_nritems(leaf);
5861 if (nritems == 0)
5862 return 1;
5863 if (path->slots[0] == nritems)
5864 path->slots[0]--;
5865
5866 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5867 if (found_key.objectid < min_objectid)
5868 break;
5869 if (found_key.type == type)
5870 return 0;
5871 if (found_key.objectid == min_objectid &&
5872 found_key.type < type)
5873 break;
5874 }
5875 return 1;
5876}
5877
5878/*
5879 * search in extent tree to find a previous Metadata/Data extent item with
5880 * min objecitd.
5881 *
5882 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5883 */
5884int btrfs_previous_extent_item(struct btrfs_root *root,
5885 struct btrfs_path *path, u64 min_objectid)
5886{
5887 struct btrfs_key found_key;
5888 struct extent_buffer *leaf;
5889 u32 nritems;
5890 int ret;
5891
5892 while (1) {
5893 if (path->slots[0] == 0) {
5894 btrfs_set_path_blocking(path);
5895 ret = btrfs_prev_leaf(root, path);
5896 if (ret != 0)
5897 return ret;
5898 } else {
5899 path->slots[0]--;
5900 }
5901 leaf = path->nodes[0];
5902 nritems = btrfs_header_nritems(leaf);
5903 if (nritems == 0)
5904 return 1;
5905 if (path->slots[0] == nritems)
5906 path->slots[0]--;
5907
5908 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5909 if (found_key.objectid < min_objectid)
5910 break;
5911 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5912 found_key.type == BTRFS_METADATA_ITEM_KEY)
5913 return 0;
5914 if (found_key.objectid == min_objectid &&
5915 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5916 break;
5917 }
5918 return 1;
5919}