Loading...
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include "ctree.h"
22#include "transaction.h"
23#include "disk-io.h"
24#include "locking.h"
25#include "print-tree.h"
26#include "compat.h"
27#include "tree-log.h"
28
29/* magic values for the inode_only field in btrfs_log_inode:
30 *
31 * LOG_INODE_ALL means to log everything
32 * LOG_INODE_EXISTS means to log just enough to recreate the inode
33 * during log replay
34 */
35#define LOG_INODE_ALL 0
36#define LOG_INODE_EXISTS 1
37
38/*
39 * directory trouble cases
40 *
41 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
42 * log, we must force a full commit before doing an fsync of the directory
43 * where the unlink was done.
44 * ---> record transid of last unlink/rename per directory
45 *
46 * mkdir foo/some_dir
47 * normal commit
48 * rename foo/some_dir foo2/some_dir
49 * mkdir foo/some_dir
50 * fsync foo/some_dir/some_file
51 *
52 * The fsync above will unlink the original some_dir without recording
53 * it in its new location (foo2). After a crash, some_dir will be gone
54 * unless the fsync of some_file forces a full commit
55 *
56 * 2) we must log any new names for any file or dir that is in the fsync
57 * log. ---> check inode while renaming/linking.
58 *
59 * 2a) we must log any new names for any file or dir during rename
60 * when the directory they are being removed from was logged.
61 * ---> check inode and old parent dir during rename
62 *
63 * 2a is actually the more important variant. With the extra logging
64 * a crash might unlink the old name without recreating the new one
65 *
66 * 3) after a crash, we must go through any directories with a link count
67 * of zero and redo the rm -rf
68 *
69 * mkdir f1/foo
70 * normal commit
71 * rm -rf f1/foo
72 * fsync(f1)
73 *
74 * The directory f1 was fully removed from the FS, but fsync was never
75 * called on f1, only its parent dir. After a crash the rm -rf must
76 * be replayed. This must be able to recurse down the entire
77 * directory tree. The inode link count fixup code takes care of the
78 * ugly details.
79 */
80
81/*
82 * stages for the tree walking. The first
83 * stage (0) is to only pin down the blocks we find
84 * the second stage (1) is to make sure that all the inodes
85 * we find in the log are created in the subvolume.
86 *
87 * The last stage is to deal with directories and links and extents
88 * and all the other fun semantics
89 */
90#define LOG_WALK_PIN_ONLY 0
91#define LOG_WALK_REPLAY_INODES 1
92#define LOG_WALK_REPLAY_ALL 2
93
94static int btrfs_log_inode(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root, struct inode *inode,
96 int inode_only);
97static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root,
99 struct btrfs_path *path, u64 objectid);
100static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
101 struct btrfs_root *root,
102 struct btrfs_root *log,
103 struct btrfs_path *path,
104 u64 dirid, int del_all);
105
106/*
107 * tree logging is a special write ahead log used to make sure that
108 * fsyncs and O_SYNCs can happen without doing full tree commits.
109 *
110 * Full tree commits are expensive because they require commonly
111 * modified blocks to be recowed, creating many dirty pages in the
112 * extent tree an 4x-6x higher write load than ext3.
113 *
114 * Instead of doing a tree commit on every fsync, we use the
115 * key ranges and transaction ids to find items for a given file or directory
116 * that have changed in this transaction. Those items are copied into
117 * a special tree (one per subvolume root), that tree is written to disk
118 * and then the fsync is considered complete.
119 *
120 * After a crash, items are copied out of the log-tree back into the
121 * subvolume tree. Any file data extents found are recorded in the extent
122 * allocation tree, and the log-tree freed.
123 *
124 * The log tree is read three times, once to pin down all the extents it is
125 * using in ram and once, once to create all the inodes logged in the tree
126 * and once to do all the other items.
127 */
128
129/*
130 * start a sub transaction and setup the log tree
131 * this increments the log tree writer count to make the people
132 * syncing the tree wait for us to finish
133 */
134static int start_log_trans(struct btrfs_trans_handle *trans,
135 struct btrfs_root *root)
136{
137 int ret;
138 int err = 0;
139
140 mutex_lock(&root->log_mutex);
141 if (root->log_root) {
142 if (!root->log_start_pid) {
143 root->log_start_pid = current->pid;
144 root->log_multiple_pids = false;
145 } else if (root->log_start_pid != current->pid) {
146 root->log_multiple_pids = true;
147 }
148
149 root->log_batch++;
150 atomic_inc(&root->log_writers);
151 mutex_unlock(&root->log_mutex);
152 return 0;
153 }
154 root->log_multiple_pids = false;
155 root->log_start_pid = current->pid;
156 mutex_lock(&root->fs_info->tree_log_mutex);
157 if (!root->fs_info->log_root_tree) {
158 ret = btrfs_init_log_root_tree(trans, root->fs_info);
159 if (ret)
160 err = ret;
161 }
162 if (err == 0 && !root->log_root) {
163 ret = btrfs_add_log_tree(trans, root);
164 if (ret)
165 err = ret;
166 }
167 mutex_unlock(&root->fs_info->tree_log_mutex);
168 root->log_batch++;
169 atomic_inc(&root->log_writers);
170 mutex_unlock(&root->log_mutex);
171 return err;
172}
173
174/*
175 * returns 0 if there was a log transaction running and we were able
176 * to join, or returns -ENOENT if there were not transactions
177 * in progress
178 */
179static int join_running_log_trans(struct btrfs_root *root)
180{
181 int ret = -ENOENT;
182
183 smp_mb();
184 if (!root->log_root)
185 return -ENOENT;
186
187 mutex_lock(&root->log_mutex);
188 if (root->log_root) {
189 ret = 0;
190 atomic_inc(&root->log_writers);
191 }
192 mutex_unlock(&root->log_mutex);
193 return ret;
194}
195
196/*
197 * This either makes the current running log transaction wait
198 * until you call btrfs_end_log_trans() or it makes any future
199 * log transactions wait until you call btrfs_end_log_trans()
200 */
201int btrfs_pin_log_trans(struct btrfs_root *root)
202{
203 int ret = -ENOENT;
204
205 mutex_lock(&root->log_mutex);
206 atomic_inc(&root->log_writers);
207 mutex_unlock(&root->log_mutex);
208 return ret;
209}
210
211/*
212 * indicate we're done making changes to the log tree
213 * and wake up anyone waiting to do a sync
214 */
215int btrfs_end_log_trans(struct btrfs_root *root)
216{
217 if (atomic_dec_and_test(&root->log_writers)) {
218 smp_mb();
219 if (waitqueue_active(&root->log_writer_wait))
220 wake_up(&root->log_writer_wait);
221 }
222 return 0;
223}
224
225
226/*
227 * the walk control struct is used to pass state down the chain when
228 * processing the log tree. The stage field tells us which part
229 * of the log tree processing we are currently doing. The others
230 * are state fields used for that specific part
231 */
232struct walk_control {
233 /* should we free the extent on disk when done? This is used
234 * at transaction commit time while freeing a log tree
235 */
236 int free;
237
238 /* should we write out the extent buffer? This is used
239 * while flushing the log tree to disk during a sync
240 */
241 int write;
242
243 /* should we wait for the extent buffer io to finish? Also used
244 * while flushing the log tree to disk for a sync
245 */
246 int wait;
247
248 /* pin only walk, we record which extents on disk belong to the
249 * log trees
250 */
251 int pin;
252
253 /* what stage of the replay code we're currently in */
254 int stage;
255
256 /* the root we are currently replaying */
257 struct btrfs_root *replay_dest;
258
259 /* the trans handle for the current replay */
260 struct btrfs_trans_handle *trans;
261
262 /* the function that gets used to process blocks we find in the
263 * tree. Note the extent_buffer might not be up to date when it is
264 * passed in, and it must be checked or read if you need the data
265 * inside it
266 */
267 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
268 struct walk_control *wc, u64 gen);
269};
270
271/*
272 * process_func used to pin down extents, write them or wait on them
273 */
274static int process_one_buffer(struct btrfs_root *log,
275 struct extent_buffer *eb,
276 struct walk_control *wc, u64 gen)
277{
278 if (wc->pin)
279 btrfs_pin_extent(log->fs_info->extent_root,
280 eb->start, eb->len, 0);
281
282 if (btrfs_buffer_uptodate(eb, gen)) {
283 if (wc->write)
284 btrfs_write_tree_block(eb);
285 if (wc->wait)
286 btrfs_wait_tree_block_writeback(eb);
287 }
288 return 0;
289}
290
291/*
292 * Item overwrite used by replay and tree logging. eb, slot and key all refer
293 * to the src data we are copying out.
294 *
295 * root is the tree we are copying into, and path is a scratch
296 * path for use in this function (it should be released on entry and
297 * will be released on exit).
298 *
299 * If the key is already in the destination tree the existing item is
300 * overwritten. If the existing item isn't big enough, it is extended.
301 * If it is too large, it is truncated.
302 *
303 * If the key isn't in the destination yet, a new item is inserted.
304 */
305static noinline int overwrite_item(struct btrfs_trans_handle *trans,
306 struct btrfs_root *root,
307 struct btrfs_path *path,
308 struct extent_buffer *eb, int slot,
309 struct btrfs_key *key)
310{
311 int ret;
312 u32 item_size;
313 u64 saved_i_size = 0;
314 int save_old_i_size = 0;
315 unsigned long src_ptr;
316 unsigned long dst_ptr;
317 int overwrite_root = 0;
318
319 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
320 overwrite_root = 1;
321
322 item_size = btrfs_item_size_nr(eb, slot);
323 src_ptr = btrfs_item_ptr_offset(eb, slot);
324
325 /* look for the key in the destination tree */
326 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
327 if (ret == 0) {
328 char *src_copy;
329 char *dst_copy;
330 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
331 path->slots[0]);
332 if (dst_size != item_size)
333 goto insert;
334
335 if (item_size == 0) {
336 btrfs_release_path(path);
337 return 0;
338 }
339 dst_copy = kmalloc(item_size, GFP_NOFS);
340 src_copy = kmalloc(item_size, GFP_NOFS);
341 if (!dst_copy || !src_copy) {
342 btrfs_release_path(path);
343 kfree(dst_copy);
344 kfree(src_copy);
345 return -ENOMEM;
346 }
347
348 read_extent_buffer(eb, src_copy, src_ptr, item_size);
349
350 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
351 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
352 item_size);
353 ret = memcmp(dst_copy, src_copy, item_size);
354
355 kfree(dst_copy);
356 kfree(src_copy);
357 /*
358 * they have the same contents, just return, this saves
359 * us from cowing blocks in the destination tree and doing
360 * extra writes that may not have been done by a previous
361 * sync
362 */
363 if (ret == 0) {
364 btrfs_release_path(path);
365 return 0;
366 }
367
368 }
369insert:
370 btrfs_release_path(path);
371 /* try to insert the key into the destination tree */
372 ret = btrfs_insert_empty_item(trans, root, path,
373 key, item_size);
374
375 /* make sure any existing item is the correct size */
376 if (ret == -EEXIST) {
377 u32 found_size;
378 found_size = btrfs_item_size_nr(path->nodes[0],
379 path->slots[0]);
380 if (found_size > item_size) {
381 btrfs_truncate_item(trans, root, path, item_size, 1);
382 } else if (found_size < item_size) {
383 ret = btrfs_extend_item(trans, root, path,
384 item_size - found_size);
385 }
386 } else if (ret) {
387 return ret;
388 }
389 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
390 path->slots[0]);
391
392 /* don't overwrite an existing inode if the generation number
393 * was logged as zero. This is done when the tree logging code
394 * is just logging an inode to make sure it exists after recovery.
395 *
396 * Also, don't overwrite i_size on directories during replay.
397 * log replay inserts and removes directory items based on the
398 * state of the tree found in the subvolume, and i_size is modified
399 * as it goes
400 */
401 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
402 struct btrfs_inode_item *src_item;
403 struct btrfs_inode_item *dst_item;
404
405 src_item = (struct btrfs_inode_item *)src_ptr;
406 dst_item = (struct btrfs_inode_item *)dst_ptr;
407
408 if (btrfs_inode_generation(eb, src_item) == 0)
409 goto no_copy;
410
411 if (overwrite_root &&
412 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
413 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
414 save_old_i_size = 1;
415 saved_i_size = btrfs_inode_size(path->nodes[0],
416 dst_item);
417 }
418 }
419
420 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
421 src_ptr, item_size);
422
423 if (save_old_i_size) {
424 struct btrfs_inode_item *dst_item;
425 dst_item = (struct btrfs_inode_item *)dst_ptr;
426 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
427 }
428
429 /* make sure the generation is filled in */
430 if (key->type == BTRFS_INODE_ITEM_KEY) {
431 struct btrfs_inode_item *dst_item;
432 dst_item = (struct btrfs_inode_item *)dst_ptr;
433 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
434 btrfs_set_inode_generation(path->nodes[0], dst_item,
435 trans->transid);
436 }
437 }
438no_copy:
439 btrfs_mark_buffer_dirty(path->nodes[0]);
440 btrfs_release_path(path);
441 return 0;
442}
443
444/*
445 * simple helper to read an inode off the disk from a given root
446 * This can only be called for subvolume roots and not for the log
447 */
448static noinline struct inode *read_one_inode(struct btrfs_root *root,
449 u64 objectid)
450{
451 struct btrfs_key key;
452 struct inode *inode;
453
454 key.objectid = objectid;
455 key.type = BTRFS_INODE_ITEM_KEY;
456 key.offset = 0;
457 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
458 if (IS_ERR(inode)) {
459 inode = NULL;
460 } else if (is_bad_inode(inode)) {
461 iput(inode);
462 inode = NULL;
463 }
464 return inode;
465}
466
467/* replays a single extent in 'eb' at 'slot' with 'key' into the
468 * subvolume 'root'. path is released on entry and should be released
469 * on exit.
470 *
471 * extents in the log tree have not been allocated out of the extent
472 * tree yet. So, this completes the allocation, taking a reference
473 * as required if the extent already exists or creating a new extent
474 * if it isn't in the extent allocation tree yet.
475 *
476 * The extent is inserted into the file, dropping any existing extents
477 * from the file that overlap the new one.
478 */
479static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
480 struct btrfs_root *root,
481 struct btrfs_path *path,
482 struct extent_buffer *eb, int slot,
483 struct btrfs_key *key)
484{
485 int found_type;
486 u64 mask = root->sectorsize - 1;
487 u64 extent_end;
488 u64 alloc_hint;
489 u64 start = key->offset;
490 u64 saved_nbytes;
491 struct btrfs_file_extent_item *item;
492 struct inode *inode = NULL;
493 unsigned long size;
494 int ret = 0;
495
496 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
497 found_type = btrfs_file_extent_type(eb, item);
498
499 if (found_type == BTRFS_FILE_EXTENT_REG ||
500 found_type == BTRFS_FILE_EXTENT_PREALLOC)
501 extent_end = start + btrfs_file_extent_num_bytes(eb, item);
502 else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
503 size = btrfs_file_extent_inline_len(eb, item);
504 extent_end = (start + size + mask) & ~mask;
505 } else {
506 ret = 0;
507 goto out;
508 }
509
510 inode = read_one_inode(root, key->objectid);
511 if (!inode) {
512 ret = -EIO;
513 goto out;
514 }
515
516 /*
517 * first check to see if we already have this extent in the
518 * file. This must be done before the btrfs_drop_extents run
519 * so we don't try to drop this extent.
520 */
521 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
522 start, 0);
523
524 if (ret == 0 &&
525 (found_type == BTRFS_FILE_EXTENT_REG ||
526 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
527 struct btrfs_file_extent_item cmp1;
528 struct btrfs_file_extent_item cmp2;
529 struct btrfs_file_extent_item *existing;
530 struct extent_buffer *leaf;
531
532 leaf = path->nodes[0];
533 existing = btrfs_item_ptr(leaf, path->slots[0],
534 struct btrfs_file_extent_item);
535
536 read_extent_buffer(eb, &cmp1, (unsigned long)item,
537 sizeof(cmp1));
538 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
539 sizeof(cmp2));
540
541 /*
542 * we already have a pointer to this exact extent,
543 * we don't have to do anything
544 */
545 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
546 btrfs_release_path(path);
547 goto out;
548 }
549 }
550 btrfs_release_path(path);
551
552 saved_nbytes = inode_get_bytes(inode);
553 /* drop any overlapping extents */
554 ret = btrfs_drop_extents(trans, inode, start, extent_end,
555 &alloc_hint, 1);
556 BUG_ON(ret);
557
558 if (found_type == BTRFS_FILE_EXTENT_REG ||
559 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
560 u64 offset;
561 unsigned long dest_offset;
562 struct btrfs_key ins;
563
564 ret = btrfs_insert_empty_item(trans, root, path, key,
565 sizeof(*item));
566 BUG_ON(ret);
567 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
568 path->slots[0]);
569 copy_extent_buffer(path->nodes[0], eb, dest_offset,
570 (unsigned long)item, sizeof(*item));
571
572 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
573 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
574 ins.type = BTRFS_EXTENT_ITEM_KEY;
575 offset = key->offset - btrfs_file_extent_offset(eb, item);
576
577 if (ins.objectid > 0) {
578 u64 csum_start;
579 u64 csum_end;
580 LIST_HEAD(ordered_sums);
581 /*
582 * is this extent already allocated in the extent
583 * allocation tree? If so, just add a reference
584 */
585 ret = btrfs_lookup_extent(root, ins.objectid,
586 ins.offset);
587 if (ret == 0) {
588 ret = btrfs_inc_extent_ref(trans, root,
589 ins.objectid, ins.offset,
590 0, root->root_key.objectid,
591 key->objectid, offset);
592 BUG_ON(ret);
593 } else {
594 /*
595 * insert the extent pointer in the extent
596 * allocation tree
597 */
598 ret = btrfs_alloc_logged_file_extent(trans,
599 root, root->root_key.objectid,
600 key->objectid, offset, &ins);
601 BUG_ON(ret);
602 }
603 btrfs_release_path(path);
604
605 if (btrfs_file_extent_compression(eb, item)) {
606 csum_start = ins.objectid;
607 csum_end = csum_start + ins.offset;
608 } else {
609 csum_start = ins.objectid +
610 btrfs_file_extent_offset(eb, item);
611 csum_end = csum_start +
612 btrfs_file_extent_num_bytes(eb, item);
613 }
614
615 ret = btrfs_lookup_csums_range(root->log_root,
616 csum_start, csum_end - 1,
617 &ordered_sums, 0);
618 BUG_ON(ret);
619 while (!list_empty(&ordered_sums)) {
620 struct btrfs_ordered_sum *sums;
621 sums = list_entry(ordered_sums.next,
622 struct btrfs_ordered_sum,
623 list);
624 ret = btrfs_csum_file_blocks(trans,
625 root->fs_info->csum_root,
626 sums);
627 BUG_ON(ret);
628 list_del(&sums->list);
629 kfree(sums);
630 }
631 } else {
632 btrfs_release_path(path);
633 }
634 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
635 /* inline extents are easy, we just overwrite them */
636 ret = overwrite_item(trans, root, path, eb, slot, key);
637 BUG_ON(ret);
638 }
639
640 inode_set_bytes(inode, saved_nbytes);
641 btrfs_update_inode(trans, root, inode);
642out:
643 if (inode)
644 iput(inode);
645 return ret;
646}
647
648/*
649 * when cleaning up conflicts between the directory names in the
650 * subvolume, directory names in the log and directory names in the
651 * inode back references, we may have to unlink inodes from directories.
652 *
653 * This is a helper function to do the unlink of a specific directory
654 * item
655 */
656static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
657 struct btrfs_root *root,
658 struct btrfs_path *path,
659 struct inode *dir,
660 struct btrfs_dir_item *di)
661{
662 struct inode *inode;
663 char *name;
664 int name_len;
665 struct extent_buffer *leaf;
666 struct btrfs_key location;
667 int ret;
668
669 leaf = path->nodes[0];
670
671 btrfs_dir_item_key_to_cpu(leaf, di, &location);
672 name_len = btrfs_dir_name_len(leaf, di);
673 name = kmalloc(name_len, GFP_NOFS);
674 if (!name)
675 return -ENOMEM;
676
677 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
678 btrfs_release_path(path);
679
680 inode = read_one_inode(root, location.objectid);
681 if (!inode) {
682 kfree(name);
683 return -EIO;
684 }
685
686 ret = link_to_fixup_dir(trans, root, path, location.objectid);
687 BUG_ON(ret);
688
689 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
690 BUG_ON(ret);
691 kfree(name);
692
693 iput(inode);
694 return ret;
695}
696
697/*
698 * helper function to see if a given name and sequence number found
699 * in an inode back reference are already in a directory and correctly
700 * point to this inode
701 */
702static noinline int inode_in_dir(struct btrfs_root *root,
703 struct btrfs_path *path,
704 u64 dirid, u64 objectid, u64 index,
705 const char *name, int name_len)
706{
707 struct btrfs_dir_item *di;
708 struct btrfs_key location;
709 int match = 0;
710
711 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
712 index, name, name_len, 0);
713 if (di && !IS_ERR(di)) {
714 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
715 if (location.objectid != objectid)
716 goto out;
717 } else
718 goto out;
719 btrfs_release_path(path);
720
721 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
722 if (di && !IS_ERR(di)) {
723 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
724 if (location.objectid != objectid)
725 goto out;
726 } else
727 goto out;
728 match = 1;
729out:
730 btrfs_release_path(path);
731 return match;
732}
733
734/*
735 * helper function to check a log tree for a named back reference in
736 * an inode. This is used to decide if a back reference that is
737 * found in the subvolume conflicts with what we find in the log.
738 *
739 * inode backreferences may have multiple refs in a single item,
740 * during replay we process one reference at a time, and we don't
741 * want to delete valid links to a file from the subvolume if that
742 * link is also in the log.
743 */
744static noinline int backref_in_log(struct btrfs_root *log,
745 struct btrfs_key *key,
746 char *name, int namelen)
747{
748 struct btrfs_path *path;
749 struct btrfs_inode_ref *ref;
750 unsigned long ptr;
751 unsigned long ptr_end;
752 unsigned long name_ptr;
753 int found_name_len;
754 int item_size;
755 int ret;
756 int match = 0;
757
758 path = btrfs_alloc_path();
759 if (!path)
760 return -ENOMEM;
761
762 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
763 if (ret != 0)
764 goto out;
765
766 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
767 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
768 ptr_end = ptr + item_size;
769 while (ptr < ptr_end) {
770 ref = (struct btrfs_inode_ref *)ptr;
771 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
772 if (found_name_len == namelen) {
773 name_ptr = (unsigned long)(ref + 1);
774 ret = memcmp_extent_buffer(path->nodes[0], name,
775 name_ptr, namelen);
776 if (ret == 0) {
777 match = 1;
778 goto out;
779 }
780 }
781 ptr = (unsigned long)(ref + 1) + found_name_len;
782 }
783out:
784 btrfs_free_path(path);
785 return match;
786}
787
788
789/*
790 * replay one inode back reference item found in the log tree.
791 * eb, slot and key refer to the buffer and key found in the log tree.
792 * root is the destination we are replaying into, and path is for temp
793 * use by this function. (it should be released on return).
794 */
795static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
796 struct btrfs_root *root,
797 struct btrfs_root *log,
798 struct btrfs_path *path,
799 struct extent_buffer *eb, int slot,
800 struct btrfs_key *key)
801{
802 struct btrfs_inode_ref *ref;
803 struct btrfs_dir_item *di;
804 struct inode *dir;
805 struct inode *inode;
806 unsigned long ref_ptr;
807 unsigned long ref_end;
808 char *name;
809 int namelen;
810 int ret;
811 int search_done = 0;
812
813 /*
814 * it is possible that we didn't log all the parent directories
815 * for a given inode. If we don't find the dir, just don't
816 * copy the back ref in. The link count fixup code will take
817 * care of the rest
818 */
819 dir = read_one_inode(root, key->offset);
820 if (!dir)
821 return -ENOENT;
822
823 inode = read_one_inode(root, key->objectid);
824 if (!inode) {
825 iput(dir);
826 return -EIO;
827 }
828
829 ref_ptr = btrfs_item_ptr_offset(eb, slot);
830 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
831
832again:
833 ref = (struct btrfs_inode_ref *)ref_ptr;
834
835 namelen = btrfs_inode_ref_name_len(eb, ref);
836 name = kmalloc(namelen, GFP_NOFS);
837 BUG_ON(!name);
838
839 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
840
841 /* if we already have a perfect match, we're done */
842 if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
843 btrfs_inode_ref_index(eb, ref),
844 name, namelen)) {
845 goto out;
846 }
847
848 /*
849 * look for a conflicting back reference in the metadata.
850 * if we find one we have to unlink that name of the file
851 * before we add our new link. Later on, we overwrite any
852 * existing back reference, and we don't want to create
853 * dangling pointers in the directory.
854 */
855
856 if (search_done)
857 goto insert;
858
859 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
860 if (ret == 0) {
861 char *victim_name;
862 int victim_name_len;
863 struct btrfs_inode_ref *victim_ref;
864 unsigned long ptr;
865 unsigned long ptr_end;
866 struct extent_buffer *leaf = path->nodes[0];
867
868 /* are we trying to overwrite a back ref for the root directory
869 * if so, just jump out, we're done
870 */
871 if (key->objectid == key->offset)
872 goto out_nowrite;
873
874 /* check all the names in this back reference to see
875 * if they are in the log. if so, we allow them to stay
876 * otherwise they must be unlinked as a conflict
877 */
878 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
879 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
880 while (ptr < ptr_end) {
881 victim_ref = (struct btrfs_inode_ref *)ptr;
882 victim_name_len = btrfs_inode_ref_name_len(leaf,
883 victim_ref);
884 victim_name = kmalloc(victim_name_len, GFP_NOFS);
885 BUG_ON(!victim_name);
886
887 read_extent_buffer(leaf, victim_name,
888 (unsigned long)(victim_ref + 1),
889 victim_name_len);
890
891 if (!backref_in_log(log, key, victim_name,
892 victim_name_len)) {
893 btrfs_inc_nlink(inode);
894 btrfs_release_path(path);
895
896 ret = btrfs_unlink_inode(trans, root, dir,
897 inode, victim_name,
898 victim_name_len);
899 }
900 kfree(victim_name);
901 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
902 }
903 BUG_ON(ret);
904
905 /*
906 * NOTE: we have searched root tree and checked the
907 * coresponding ref, it does not need to check again.
908 */
909 search_done = 1;
910 }
911 btrfs_release_path(path);
912
913 /* look for a conflicting sequence number */
914 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
915 btrfs_inode_ref_index(eb, ref),
916 name, namelen, 0);
917 if (di && !IS_ERR(di)) {
918 ret = drop_one_dir_item(trans, root, path, dir, di);
919 BUG_ON(ret);
920 }
921 btrfs_release_path(path);
922
923 /* look for a conflicing name */
924 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
925 name, namelen, 0);
926 if (di && !IS_ERR(di)) {
927 ret = drop_one_dir_item(trans, root, path, dir, di);
928 BUG_ON(ret);
929 }
930 btrfs_release_path(path);
931
932insert:
933 /* insert our name */
934 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
935 btrfs_inode_ref_index(eb, ref));
936 BUG_ON(ret);
937
938 btrfs_update_inode(trans, root, inode);
939
940out:
941 ref_ptr = (unsigned long)(ref + 1) + namelen;
942 kfree(name);
943 if (ref_ptr < ref_end)
944 goto again;
945
946 /* finally write the back reference in the inode */
947 ret = overwrite_item(trans, root, path, eb, slot, key);
948 BUG_ON(ret);
949
950out_nowrite:
951 btrfs_release_path(path);
952 iput(dir);
953 iput(inode);
954 return 0;
955}
956
957static int insert_orphan_item(struct btrfs_trans_handle *trans,
958 struct btrfs_root *root, u64 offset)
959{
960 int ret;
961 ret = btrfs_find_orphan_item(root, offset);
962 if (ret > 0)
963 ret = btrfs_insert_orphan_item(trans, root, offset);
964 return ret;
965}
966
967
968/*
969 * There are a few corners where the link count of the file can't
970 * be properly maintained during replay. So, instead of adding
971 * lots of complexity to the log code, we just scan the backrefs
972 * for any file that has been through replay.
973 *
974 * The scan will update the link count on the inode to reflect the
975 * number of back refs found. If it goes down to zero, the iput
976 * will free the inode.
977 */
978static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
979 struct btrfs_root *root,
980 struct inode *inode)
981{
982 struct btrfs_path *path;
983 int ret;
984 struct btrfs_key key;
985 u64 nlink = 0;
986 unsigned long ptr;
987 unsigned long ptr_end;
988 int name_len;
989 u64 ino = btrfs_ino(inode);
990
991 key.objectid = ino;
992 key.type = BTRFS_INODE_REF_KEY;
993 key.offset = (u64)-1;
994
995 path = btrfs_alloc_path();
996 if (!path)
997 return -ENOMEM;
998
999 while (1) {
1000 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1001 if (ret < 0)
1002 break;
1003 if (ret > 0) {
1004 if (path->slots[0] == 0)
1005 break;
1006 path->slots[0]--;
1007 }
1008 btrfs_item_key_to_cpu(path->nodes[0], &key,
1009 path->slots[0]);
1010 if (key.objectid != ino ||
1011 key.type != BTRFS_INODE_REF_KEY)
1012 break;
1013 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1014 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1015 path->slots[0]);
1016 while (ptr < ptr_end) {
1017 struct btrfs_inode_ref *ref;
1018
1019 ref = (struct btrfs_inode_ref *)ptr;
1020 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1021 ref);
1022 ptr = (unsigned long)(ref + 1) + name_len;
1023 nlink++;
1024 }
1025
1026 if (key.offset == 0)
1027 break;
1028 key.offset--;
1029 btrfs_release_path(path);
1030 }
1031 btrfs_release_path(path);
1032 if (nlink != inode->i_nlink) {
1033 inode->i_nlink = nlink;
1034 btrfs_update_inode(trans, root, inode);
1035 }
1036 BTRFS_I(inode)->index_cnt = (u64)-1;
1037
1038 if (inode->i_nlink == 0) {
1039 if (S_ISDIR(inode->i_mode)) {
1040 ret = replay_dir_deletes(trans, root, NULL, path,
1041 ino, 1);
1042 BUG_ON(ret);
1043 }
1044 ret = insert_orphan_item(trans, root, ino);
1045 BUG_ON(ret);
1046 }
1047 btrfs_free_path(path);
1048
1049 return 0;
1050}
1051
1052static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1053 struct btrfs_root *root,
1054 struct btrfs_path *path)
1055{
1056 int ret;
1057 struct btrfs_key key;
1058 struct inode *inode;
1059
1060 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1061 key.type = BTRFS_ORPHAN_ITEM_KEY;
1062 key.offset = (u64)-1;
1063 while (1) {
1064 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1065 if (ret < 0)
1066 break;
1067
1068 if (ret == 1) {
1069 if (path->slots[0] == 0)
1070 break;
1071 path->slots[0]--;
1072 }
1073
1074 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1075 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1076 key.type != BTRFS_ORPHAN_ITEM_KEY)
1077 break;
1078
1079 ret = btrfs_del_item(trans, root, path);
1080 if (ret)
1081 goto out;
1082
1083 btrfs_release_path(path);
1084 inode = read_one_inode(root, key.offset);
1085 if (!inode)
1086 return -EIO;
1087
1088 ret = fixup_inode_link_count(trans, root, inode);
1089 BUG_ON(ret);
1090
1091 iput(inode);
1092
1093 /*
1094 * fixup on a directory may create new entries,
1095 * make sure we always look for the highset possible
1096 * offset
1097 */
1098 key.offset = (u64)-1;
1099 }
1100 ret = 0;
1101out:
1102 btrfs_release_path(path);
1103 return ret;
1104}
1105
1106
1107/*
1108 * record a given inode in the fixup dir so we can check its link
1109 * count when replay is done. The link count is incremented here
1110 * so the inode won't go away until we check it
1111 */
1112static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1113 struct btrfs_root *root,
1114 struct btrfs_path *path,
1115 u64 objectid)
1116{
1117 struct btrfs_key key;
1118 int ret = 0;
1119 struct inode *inode;
1120
1121 inode = read_one_inode(root, objectid);
1122 if (!inode)
1123 return -EIO;
1124
1125 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1126 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1127 key.offset = objectid;
1128
1129 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1130
1131 btrfs_release_path(path);
1132 if (ret == 0) {
1133 btrfs_inc_nlink(inode);
1134 btrfs_update_inode(trans, root, inode);
1135 } else if (ret == -EEXIST) {
1136 ret = 0;
1137 } else {
1138 BUG();
1139 }
1140 iput(inode);
1141
1142 return ret;
1143}
1144
1145/*
1146 * when replaying the log for a directory, we only insert names
1147 * for inodes that actually exist. This means an fsync on a directory
1148 * does not implicitly fsync all the new files in it
1149 */
1150static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1151 struct btrfs_root *root,
1152 struct btrfs_path *path,
1153 u64 dirid, u64 index,
1154 char *name, int name_len, u8 type,
1155 struct btrfs_key *location)
1156{
1157 struct inode *inode;
1158 struct inode *dir;
1159 int ret;
1160
1161 inode = read_one_inode(root, location->objectid);
1162 if (!inode)
1163 return -ENOENT;
1164
1165 dir = read_one_inode(root, dirid);
1166 if (!dir) {
1167 iput(inode);
1168 return -EIO;
1169 }
1170 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1171
1172 /* FIXME, put inode into FIXUP list */
1173
1174 iput(inode);
1175 iput(dir);
1176 return ret;
1177}
1178
1179/*
1180 * take a single entry in a log directory item and replay it into
1181 * the subvolume.
1182 *
1183 * if a conflicting item exists in the subdirectory already,
1184 * the inode it points to is unlinked and put into the link count
1185 * fix up tree.
1186 *
1187 * If a name from the log points to a file or directory that does
1188 * not exist in the FS, it is skipped. fsyncs on directories
1189 * do not force down inodes inside that directory, just changes to the
1190 * names or unlinks in a directory.
1191 */
1192static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1193 struct btrfs_root *root,
1194 struct btrfs_path *path,
1195 struct extent_buffer *eb,
1196 struct btrfs_dir_item *di,
1197 struct btrfs_key *key)
1198{
1199 char *name;
1200 int name_len;
1201 struct btrfs_dir_item *dst_di;
1202 struct btrfs_key found_key;
1203 struct btrfs_key log_key;
1204 struct inode *dir;
1205 u8 log_type;
1206 int exists;
1207 int ret;
1208
1209 dir = read_one_inode(root, key->objectid);
1210 if (!dir)
1211 return -EIO;
1212
1213 name_len = btrfs_dir_name_len(eb, di);
1214 name = kmalloc(name_len, GFP_NOFS);
1215 if (!name)
1216 return -ENOMEM;
1217
1218 log_type = btrfs_dir_type(eb, di);
1219 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1220 name_len);
1221
1222 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1223 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1224 if (exists == 0)
1225 exists = 1;
1226 else
1227 exists = 0;
1228 btrfs_release_path(path);
1229
1230 if (key->type == BTRFS_DIR_ITEM_KEY) {
1231 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1232 name, name_len, 1);
1233 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1234 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1235 key->objectid,
1236 key->offset, name,
1237 name_len, 1);
1238 } else {
1239 BUG();
1240 }
1241 if (IS_ERR_OR_NULL(dst_di)) {
1242 /* we need a sequence number to insert, so we only
1243 * do inserts for the BTRFS_DIR_INDEX_KEY types
1244 */
1245 if (key->type != BTRFS_DIR_INDEX_KEY)
1246 goto out;
1247 goto insert;
1248 }
1249
1250 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1251 /* the existing item matches the logged item */
1252 if (found_key.objectid == log_key.objectid &&
1253 found_key.type == log_key.type &&
1254 found_key.offset == log_key.offset &&
1255 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1256 goto out;
1257 }
1258
1259 /*
1260 * don't drop the conflicting directory entry if the inode
1261 * for the new entry doesn't exist
1262 */
1263 if (!exists)
1264 goto out;
1265
1266 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1267 BUG_ON(ret);
1268
1269 if (key->type == BTRFS_DIR_INDEX_KEY)
1270 goto insert;
1271out:
1272 btrfs_release_path(path);
1273 kfree(name);
1274 iput(dir);
1275 return 0;
1276
1277insert:
1278 btrfs_release_path(path);
1279 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1280 name, name_len, log_type, &log_key);
1281
1282 BUG_ON(ret && ret != -ENOENT);
1283 goto out;
1284}
1285
1286/*
1287 * find all the names in a directory item and reconcile them into
1288 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1289 * one name in a directory item, but the same code gets used for
1290 * both directory index types
1291 */
1292static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1293 struct btrfs_root *root,
1294 struct btrfs_path *path,
1295 struct extent_buffer *eb, int slot,
1296 struct btrfs_key *key)
1297{
1298 int ret;
1299 u32 item_size = btrfs_item_size_nr(eb, slot);
1300 struct btrfs_dir_item *di;
1301 int name_len;
1302 unsigned long ptr;
1303 unsigned long ptr_end;
1304
1305 ptr = btrfs_item_ptr_offset(eb, slot);
1306 ptr_end = ptr + item_size;
1307 while (ptr < ptr_end) {
1308 di = (struct btrfs_dir_item *)ptr;
1309 if (verify_dir_item(root, eb, di))
1310 return -EIO;
1311 name_len = btrfs_dir_name_len(eb, di);
1312 ret = replay_one_name(trans, root, path, eb, di, key);
1313 BUG_ON(ret);
1314 ptr = (unsigned long)(di + 1);
1315 ptr += name_len;
1316 }
1317 return 0;
1318}
1319
1320/*
1321 * directory replay has two parts. There are the standard directory
1322 * items in the log copied from the subvolume, and range items
1323 * created in the log while the subvolume was logged.
1324 *
1325 * The range items tell us which parts of the key space the log
1326 * is authoritative for. During replay, if a key in the subvolume
1327 * directory is in a logged range item, but not actually in the log
1328 * that means it was deleted from the directory before the fsync
1329 * and should be removed.
1330 */
1331static noinline int find_dir_range(struct btrfs_root *root,
1332 struct btrfs_path *path,
1333 u64 dirid, int key_type,
1334 u64 *start_ret, u64 *end_ret)
1335{
1336 struct btrfs_key key;
1337 u64 found_end;
1338 struct btrfs_dir_log_item *item;
1339 int ret;
1340 int nritems;
1341
1342 if (*start_ret == (u64)-1)
1343 return 1;
1344
1345 key.objectid = dirid;
1346 key.type = key_type;
1347 key.offset = *start_ret;
1348
1349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1350 if (ret < 0)
1351 goto out;
1352 if (ret > 0) {
1353 if (path->slots[0] == 0)
1354 goto out;
1355 path->slots[0]--;
1356 }
1357 if (ret != 0)
1358 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1359
1360 if (key.type != key_type || key.objectid != dirid) {
1361 ret = 1;
1362 goto next;
1363 }
1364 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1365 struct btrfs_dir_log_item);
1366 found_end = btrfs_dir_log_end(path->nodes[0], item);
1367
1368 if (*start_ret >= key.offset && *start_ret <= found_end) {
1369 ret = 0;
1370 *start_ret = key.offset;
1371 *end_ret = found_end;
1372 goto out;
1373 }
1374 ret = 1;
1375next:
1376 /* check the next slot in the tree to see if it is a valid item */
1377 nritems = btrfs_header_nritems(path->nodes[0]);
1378 if (path->slots[0] >= nritems) {
1379 ret = btrfs_next_leaf(root, path);
1380 if (ret)
1381 goto out;
1382 } else {
1383 path->slots[0]++;
1384 }
1385
1386 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1387
1388 if (key.type != key_type || key.objectid != dirid) {
1389 ret = 1;
1390 goto out;
1391 }
1392 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1393 struct btrfs_dir_log_item);
1394 found_end = btrfs_dir_log_end(path->nodes[0], item);
1395 *start_ret = key.offset;
1396 *end_ret = found_end;
1397 ret = 0;
1398out:
1399 btrfs_release_path(path);
1400 return ret;
1401}
1402
1403/*
1404 * this looks for a given directory item in the log. If the directory
1405 * item is not in the log, the item is removed and the inode it points
1406 * to is unlinked
1407 */
1408static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1409 struct btrfs_root *root,
1410 struct btrfs_root *log,
1411 struct btrfs_path *path,
1412 struct btrfs_path *log_path,
1413 struct inode *dir,
1414 struct btrfs_key *dir_key)
1415{
1416 int ret;
1417 struct extent_buffer *eb;
1418 int slot;
1419 u32 item_size;
1420 struct btrfs_dir_item *di;
1421 struct btrfs_dir_item *log_di;
1422 int name_len;
1423 unsigned long ptr;
1424 unsigned long ptr_end;
1425 char *name;
1426 struct inode *inode;
1427 struct btrfs_key location;
1428
1429again:
1430 eb = path->nodes[0];
1431 slot = path->slots[0];
1432 item_size = btrfs_item_size_nr(eb, slot);
1433 ptr = btrfs_item_ptr_offset(eb, slot);
1434 ptr_end = ptr + item_size;
1435 while (ptr < ptr_end) {
1436 di = (struct btrfs_dir_item *)ptr;
1437 if (verify_dir_item(root, eb, di)) {
1438 ret = -EIO;
1439 goto out;
1440 }
1441
1442 name_len = btrfs_dir_name_len(eb, di);
1443 name = kmalloc(name_len, GFP_NOFS);
1444 if (!name) {
1445 ret = -ENOMEM;
1446 goto out;
1447 }
1448 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1449 name_len);
1450 log_di = NULL;
1451 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1452 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1453 dir_key->objectid,
1454 name, name_len, 0);
1455 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1456 log_di = btrfs_lookup_dir_index_item(trans, log,
1457 log_path,
1458 dir_key->objectid,
1459 dir_key->offset,
1460 name, name_len, 0);
1461 }
1462 if (IS_ERR_OR_NULL(log_di)) {
1463 btrfs_dir_item_key_to_cpu(eb, di, &location);
1464 btrfs_release_path(path);
1465 btrfs_release_path(log_path);
1466 inode = read_one_inode(root, location.objectid);
1467 if (!inode) {
1468 kfree(name);
1469 return -EIO;
1470 }
1471
1472 ret = link_to_fixup_dir(trans, root,
1473 path, location.objectid);
1474 BUG_ON(ret);
1475 btrfs_inc_nlink(inode);
1476 ret = btrfs_unlink_inode(trans, root, dir, inode,
1477 name, name_len);
1478 BUG_ON(ret);
1479 kfree(name);
1480 iput(inode);
1481
1482 /* there might still be more names under this key
1483 * check and repeat if required
1484 */
1485 ret = btrfs_search_slot(NULL, root, dir_key, path,
1486 0, 0);
1487 if (ret == 0)
1488 goto again;
1489 ret = 0;
1490 goto out;
1491 }
1492 btrfs_release_path(log_path);
1493 kfree(name);
1494
1495 ptr = (unsigned long)(di + 1);
1496 ptr += name_len;
1497 }
1498 ret = 0;
1499out:
1500 btrfs_release_path(path);
1501 btrfs_release_path(log_path);
1502 return ret;
1503}
1504
1505/*
1506 * deletion replay happens before we copy any new directory items
1507 * out of the log or out of backreferences from inodes. It
1508 * scans the log to find ranges of keys that log is authoritative for,
1509 * and then scans the directory to find items in those ranges that are
1510 * not present in the log.
1511 *
1512 * Anything we don't find in the log is unlinked and removed from the
1513 * directory.
1514 */
1515static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1516 struct btrfs_root *root,
1517 struct btrfs_root *log,
1518 struct btrfs_path *path,
1519 u64 dirid, int del_all)
1520{
1521 u64 range_start;
1522 u64 range_end;
1523 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1524 int ret = 0;
1525 struct btrfs_key dir_key;
1526 struct btrfs_key found_key;
1527 struct btrfs_path *log_path;
1528 struct inode *dir;
1529
1530 dir_key.objectid = dirid;
1531 dir_key.type = BTRFS_DIR_ITEM_KEY;
1532 log_path = btrfs_alloc_path();
1533 if (!log_path)
1534 return -ENOMEM;
1535
1536 dir = read_one_inode(root, dirid);
1537 /* it isn't an error if the inode isn't there, that can happen
1538 * because we replay the deletes before we copy in the inode item
1539 * from the log
1540 */
1541 if (!dir) {
1542 btrfs_free_path(log_path);
1543 return 0;
1544 }
1545again:
1546 range_start = 0;
1547 range_end = 0;
1548 while (1) {
1549 if (del_all)
1550 range_end = (u64)-1;
1551 else {
1552 ret = find_dir_range(log, path, dirid, key_type,
1553 &range_start, &range_end);
1554 if (ret != 0)
1555 break;
1556 }
1557
1558 dir_key.offset = range_start;
1559 while (1) {
1560 int nritems;
1561 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1562 0, 0);
1563 if (ret < 0)
1564 goto out;
1565
1566 nritems = btrfs_header_nritems(path->nodes[0]);
1567 if (path->slots[0] >= nritems) {
1568 ret = btrfs_next_leaf(root, path);
1569 if (ret)
1570 break;
1571 }
1572 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1573 path->slots[0]);
1574 if (found_key.objectid != dirid ||
1575 found_key.type != dir_key.type)
1576 goto next_type;
1577
1578 if (found_key.offset > range_end)
1579 break;
1580
1581 ret = check_item_in_log(trans, root, log, path,
1582 log_path, dir,
1583 &found_key);
1584 BUG_ON(ret);
1585 if (found_key.offset == (u64)-1)
1586 break;
1587 dir_key.offset = found_key.offset + 1;
1588 }
1589 btrfs_release_path(path);
1590 if (range_end == (u64)-1)
1591 break;
1592 range_start = range_end + 1;
1593 }
1594
1595next_type:
1596 ret = 0;
1597 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
1598 key_type = BTRFS_DIR_LOG_INDEX_KEY;
1599 dir_key.type = BTRFS_DIR_INDEX_KEY;
1600 btrfs_release_path(path);
1601 goto again;
1602 }
1603out:
1604 btrfs_release_path(path);
1605 btrfs_free_path(log_path);
1606 iput(dir);
1607 return ret;
1608}
1609
1610/*
1611 * the process_func used to replay items from the log tree. This
1612 * gets called in two different stages. The first stage just looks
1613 * for inodes and makes sure they are all copied into the subvolume.
1614 *
1615 * The second stage copies all the other item types from the log into
1616 * the subvolume. The two stage approach is slower, but gets rid of
1617 * lots of complexity around inodes referencing other inodes that exist
1618 * only in the log (references come from either directory items or inode
1619 * back refs).
1620 */
1621static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1622 struct walk_control *wc, u64 gen)
1623{
1624 int nritems;
1625 struct btrfs_path *path;
1626 struct btrfs_root *root = wc->replay_dest;
1627 struct btrfs_key key;
1628 int level;
1629 int i;
1630 int ret;
1631
1632 btrfs_read_buffer(eb, gen);
1633
1634 level = btrfs_header_level(eb);
1635
1636 if (level != 0)
1637 return 0;
1638
1639 path = btrfs_alloc_path();
1640 if (!path)
1641 return -ENOMEM;
1642
1643 nritems = btrfs_header_nritems(eb);
1644 for (i = 0; i < nritems; i++) {
1645 btrfs_item_key_to_cpu(eb, &key, i);
1646
1647 /* inode keys are done during the first stage */
1648 if (key.type == BTRFS_INODE_ITEM_KEY &&
1649 wc->stage == LOG_WALK_REPLAY_INODES) {
1650 struct btrfs_inode_item *inode_item;
1651 u32 mode;
1652
1653 inode_item = btrfs_item_ptr(eb, i,
1654 struct btrfs_inode_item);
1655 mode = btrfs_inode_mode(eb, inode_item);
1656 if (S_ISDIR(mode)) {
1657 ret = replay_dir_deletes(wc->trans,
1658 root, log, path, key.objectid, 0);
1659 BUG_ON(ret);
1660 }
1661 ret = overwrite_item(wc->trans, root, path,
1662 eb, i, &key);
1663 BUG_ON(ret);
1664
1665 /* for regular files, make sure corresponding
1666 * orhpan item exist. extents past the new EOF
1667 * will be truncated later by orphan cleanup.
1668 */
1669 if (S_ISREG(mode)) {
1670 ret = insert_orphan_item(wc->trans, root,
1671 key.objectid);
1672 BUG_ON(ret);
1673 }
1674
1675 ret = link_to_fixup_dir(wc->trans, root,
1676 path, key.objectid);
1677 BUG_ON(ret);
1678 }
1679 if (wc->stage < LOG_WALK_REPLAY_ALL)
1680 continue;
1681
1682 /* these keys are simply copied */
1683 if (key.type == BTRFS_XATTR_ITEM_KEY) {
1684 ret = overwrite_item(wc->trans, root, path,
1685 eb, i, &key);
1686 BUG_ON(ret);
1687 } else if (key.type == BTRFS_INODE_REF_KEY) {
1688 ret = add_inode_ref(wc->trans, root, log, path,
1689 eb, i, &key);
1690 BUG_ON(ret && ret != -ENOENT);
1691 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
1692 ret = replay_one_extent(wc->trans, root, path,
1693 eb, i, &key);
1694 BUG_ON(ret);
1695 } else if (key.type == BTRFS_DIR_ITEM_KEY ||
1696 key.type == BTRFS_DIR_INDEX_KEY) {
1697 ret = replay_one_dir_item(wc->trans, root, path,
1698 eb, i, &key);
1699 BUG_ON(ret);
1700 }
1701 }
1702 btrfs_free_path(path);
1703 return 0;
1704}
1705
1706static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1707 struct btrfs_root *root,
1708 struct btrfs_path *path, int *level,
1709 struct walk_control *wc)
1710{
1711 u64 root_owner;
1712 u64 bytenr;
1713 u64 ptr_gen;
1714 struct extent_buffer *next;
1715 struct extent_buffer *cur;
1716 struct extent_buffer *parent;
1717 u32 blocksize;
1718 int ret = 0;
1719
1720 WARN_ON(*level < 0);
1721 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1722
1723 while (*level > 0) {
1724 WARN_ON(*level < 0);
1725 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1726 cur = path->nodes[*level];
1727
1728 if (btrfs_header_level(cur) != *level)
1729 WARN_ON(1);
1730
1731 if (path->slots[*level] >=
1732 btrfs_header_nritems(cur))
1733 break;
1734
1735 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1736 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
1737 blocksize = btrfs_level_size(root, *level - 1);
1738
1739 parent = path->nodes[*level];
1740 root_owner = btrfs_header_owner(parent);
1741
1742 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1743 if (!next)
1744 return -ENOMEM;
1745
1746 if (*level == 1) {
1747 ret = wc->process_func(root, next, wc, ptr_gen);
1748 if (ret)
1749 return ret;
1750
1751 path->slots[*level]++;
1752 if (wc->free) {
1753 btrfs_read_buffer(next, ptr_gen);
1754
1755 btrfs_tree_lock(next);
1756 btrfs_set_lock_blocking(next);
1757 clean_tree_block(trans, root, next);
1758 btrfs_wait_tree_block_writeback(next);
1759 btrfs_tree_unlock(next);
1760
1761 WARN_ON(root_owner !=
1762 BTRFS_TREE_LOG_OBJECTID);
1763 ret = btrfs_free_reserved_extent(root,
1764 bytenr, blocksize);
1765 BUG_ON(ret);
1766 }
1767 free_extent_buffer(next);
1768 continue;
1769 }
1770 btrfs_read_buffer(next, ptr_gen);
1771
1772 WARN_ON(*level <= 0);
1773 if (path->nodes[*level-1])
1774 free_extent_buffer(path->nodes[*level-1]);
1775 path->nodes[*level-1] = next;
1776 *level = btrfs_header_level(next);
1777 path->slots[*level] = 0;
1778 cond_resched();
1779 }
1780 WARN_ON(*level < 0);
1781 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1782
1783 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1784
1785 cond_resched();
1786 return 0;
1787}
1788
1789static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1790 struct btrfs_root *root,
1791 struct btrfs_path *path, int *level,
1792 struct walk_control *wc)
1793{
1794 u64 root_owner;
1795 int i;
1796 int slot;
1797 int ret;
1798
1799 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1800 slot = path->slots[i];
1801 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
1802 path->slots[i]++;
1803 *level = i;
1804 WARN_ON(*level == 0);
1805 return 0;
1806 } else {
1807 struct extent_buffer *parent;
1808 if (path->nodes[*level] == root->node)
1809 parent = path->nodes[*level];
1810 else
1811 parent = path->nodes[*level + 1];
1812
1813 root_owner = btrfs_header_owner(parent);
1814 ret = wc->process_func(root, path->nodes[*level], wc,
1815 btrfs_header_generation(path->nodes[*level]));
1816 if (ret)
1817 return ret;
1818
1819 if (wc->free) {
1820 struct extent_buffer *next;
1821
1822 next = path->nodes[*level];
1823
1824 btrfs_tree_lock(next);
1825 btrfs_set_lock_blocking(next);
1826 clean_tree_block(trans, root, next);
1827 btrfs_wait_tree_block_writeback(next);
1828 btrfs_tree_unlock(next);
1829
1830 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1831 ret = btrfs_free_reserved_extent(root,
1832 path->nodes[*level]->start,
1833 path->nodes[*level]->len);
1834 BUG_ON(ret);
1835 }
1836 free_extent_buffer(path->nodes[*level]);
1837 path->nodes[*level] = NULL;
1838 *level = i + 1;
1839 }
1840 }
1841 return 1;
1842}
1843
1844/*
1845 * drop the reference count on the tree rooted at 'snap'. This traverses
1846 * the tree freeing any blocks that have a ref count of zero after being
1847 * decremented.
1848 */
1849static int walk_log_tree(struct btrfs_trans_handle *trans,
1850 struct btrfs_root *log, struct walk_control *wc)
1851{
1852 int ret = 0;
1853 int wret;
1854 int level;
1855 struct btrfs_path *path;
1856 int i;
1857 int orig_level;
1858
1859 path = btrfs_alloc_path();
1860 if (!path)
1861 return -ENOMEM;
1862
1863 level = btrfs_header_level(log->node);
1864 orig_level = level;
1865 path->nodes[level] = log->node;
1866 extent_buffer_get(log->node);
1867 path->slots[level] = 0;
1868
1869 while (1) {
1870 wret = walk_down_log_tree(trans, log, path, &level, wc);
1871 if (wret > 0)
1872 break;
1873 if (wret < 0)
1874 ret = wret;
1875
1876 wret = walk_up_log_tree(trans, log, path, &level, wc);
1877 if (wret > 0)
1878 break;
1879 if (wret < 0)
1880 ret = wret;
1881 }
1882
1883 /* was the root node processed? if not, catch it here */
1884 if (path->nodes[orig_level]) {
1885 wc->process_func(log, path->nodes[orig_level], wc,
1886 btrfs_header_generation(path->nodes[orig_level]));
1887 if (wc->free) {
1888 struct extent_buffer *next;
1889
1890 next = path->nodes[orig_level];
1891
1892 btrfs_tree_lock(next);
1893 btrfs_set_lock_blocking(next);
1894 clean_tree_block(trans, log, next);
1895 btrfs_wait_tree_block_writeback(next);
1896 btrfs_tree_unlock(next);
1897
1898 WARN_ON(log->root_key.objectid !=
1899 BTRFS_TREE_LOG_OBJECTID);
1900 ret = btrfs_free_reserved_extent(log, next->start,
1901 next->len);
1902 BUG_ON(ret);
1903 }
1904 }
1905
1906 for (i = 0; i <= orig_level; i++) {
1907 if (path->nodes[i]) {
1908 free_extent_buffer(path->nodes[i]);
1909 path->nodes[i] = NULL;
1910 }
1911 }
1912 btrfs_free_path(path);
1913 return ret;
1914}
1915
1916/*
1917 * helper function to update the item for a given subvolumes log root
1918 * in the tree of log roots
1919 */
1920static int update_log_root(struct btrfs_trans_handle *trans,
1921 struct btrfs_root *log)
1922{
1923 int ret;
1924
1925 if (log->log_transid == 1) {
1926 /* insert root item on the first sync */
1927 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
1928 &log->root_key, &log->root_item);
1929 } else {
1930 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
1931 &log->root_key, &log->root_item);
1932 }
1933 return ret;
1934}
1935
1936static int wait_log_commit(struct btrfs_trans_handle *trans,
1937 struct btrfs_root *root, unsigned long transid)
1938{
1939 DEFINE_WAIT(wait);
1940 int index = transid % 2;
1941
1942 /*
1943 * we only allow two pending log transactions at a time,
1944 * so we know that if ours is more than 2 older than the
1945 * current transaction, we're done
1946 */
1947 do {
1948 prepare_to_wait(&root->log_commit_wait[index],
1949 &wait, TASK_UNINTERRUPTIBLE);
1950 mutex_unlock(&root->log_mutex);
1951
1952 if (root->fs_info->last_trans_log_full_commit !=
1953 trans->transid && root->log_transid < transid + 2 &&
1954 atomic_read(&root->log_commit[index]))
1955 schedule();
1956
1957 finish_wait(&root->log_commit_wait[index], &wait);
1958 mutex_lock(&root->log_mutex);
1959 } while (root->log_transid < transid + 2 &&
1960 atomic_read(&root->log_commit[index]));
1961 return 0;
1962}
1963
1964static int wait_for_writer(struct btrfs_trans_handle *trans,
1965 struct btrfs_root *root)
1966{
1967 DEFINE_WAIT(wait);
1968 while (atomic_read(&root->log_writers)) {
1969 prepare_to_wait(&root->log_writer_wait,
1970 &wait, TASK_UNINTERRUPTIBLE);
1971 mutex_unlock(&root->log_mutex);
1972 if (root->fs_info->last_trans_log_full_commit !=
1973 trans->transid && atomic_read(&root->log_writers))
1974 schedule();
1975 mutex_lock(&root->log_mutex);
1976 finish_wait(&root->log_writer_wait, &wait);
1977 }
1978 return 0;
1979}
1980
1981/*
1982 * btrfs_sync_log does sends a given tree log down to the disk and
1983 * updates the super blocks to record it. When this call is done,
1984 * you know that any inodes previously logged are safely on disk only
1985 * if it returns 0.
1986 *
1987 * Any other return value means you need to call btrfs_commit_transaction.
1988 * Some of the edge cases for fsyncing directories that have had unlinks
1989 * or renames done in the past mean that sometimes the only safe
1990 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
1991 * that has happened.
1992 */
1993int btrfs_sync_log(struct btrfs_trans_handle *trans,
1994 struct btrfs_root *root)
1995{
1996 int index1;
1997 int index2;
1998 int mark;
1999 int ret;
2000 struct btrfs_root *log = root->log_root;
2001 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2002 unsigned long log_transid = 0;
2003
2004 mutex_lock(&root->log_mutex);
2005 index1 = root->log_transid % 2;
2006 if (atomic_read(&root->log_commit[index1])) {
2007 wait_log_commit(trans, root, root->log_transid);
2008 mutex_unlock(&root->log_mutex);
2009 return 0;
2010 }
2011 atomic_set(&root->log_commit[index1], 1);
2012
2013 /* wait for previous tree log sync to complete */
2014 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2015 wait_log_commit(trans, root, root->log_transid - 1);
2016
2017 while (1) {
2018 unsigned long batch = root->log_batch;
2019 if (root->log_multiple_pids) {
2020 mutex_unlock(&root->log_mutex);
2021 schedule_timeout_uninterruptible(1);
2022 mutex_lock(&root->log_mutex);
2023 }
2024 wait_for_writer(trans, root);
2025 if (batch == root->log_batch)
2026 break;
2027 }
2028
2029 /* bail out if we need to do a full commit */
2030 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2031 ret = -EAGAIN;
2032 mutex_unlock(&root->log_mutex);
2033 goto out;
2034 }
2035
2036 log_transid = root->log_transid;
2037 if (log_transid % 2 == 0)
2038 mark = EXTENT_DIRTY;
2039 else
2040 mark = EXTENT_NEW;
2041
2042 /* we start IO on all the marked extents here, but we don't actually
2043 * wait for them until later.
2044 */
2045 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2046 BUG_ON(ret);
2047
2048 btrfs_set_root_node(&log->root_item, log->node);
2049
2050 root->log_batch = 0;
2051 root->log_transid++;
2052 log->log_transid = root->log_transid;
2053 root->log_start_pid = 0;
2054 smp_mb();
2055 /*
2056 * IO has been started, blocks of the log tree have WRITTEN flag set
2057 * in their headers. new modifications of the log will be written to
2058 * new positions. so it's safe to allow log writers to go in.
2059 */
2060 mutex_unlock(&root->log_mutex);
2061
2062 mutex_lock(&log_root_tree->log_mutex);
2063 log_root_tree->log_batch++;
2064 atomic_inc(&log_root_tree->log_writers);
2065 mutex_unlock(&log_root_tree->log_mutex);
2066
2067 ret = update_log_root(trans, log);
2068
2069 mutex_lock(&log_root_tree->log_mutex);
2070 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2071 smp_mb();
2072 if (waitqueue_active(&log_root_tree->log_writer_wait))
2073 wake_up(&log_root_tree->log_writer_wait);
2074 }
2075
2076 if (ret) {
2077 BUG_ON(ret != -ENOSPC);
2078 root->fs_info->last_trans_log_full_commit = trans->transid;
2079 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2080 mutex_unlock(&log_root_tree->log_mutex);
2081 ret = -EAGAIN;
2082 goto out;
2083 }
2084
2085 index2 = log_root_tree->log_transid % 2;
2086 if (atomic_read(&log_root_tree->log_commit[index2])) {
2087 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2088 wait_log_commit(trans, log_root_tree,
2089 log_root_tree->log_transid);
2090 mutex_unlock(&log_root_tree->log_mutex);
2091 ret = 0;
2092 goto out;
2093 }
2094 atomic_set(&log_root_tree->log_commit[index2], 1);
2095
2096 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2097 wait_log_commit(trans, log_root_tree,
2098 log_root_tree->log_transid - 1);
2099 }
2100
2101 wait_for_writer(trans, log_root_tree);
2102
2103 /*
2104 * now that we've moved on to the tree of log tree roots,
2105 * check the full commit flag again
2106 */
2107 if (root->fs_info->last_trans_log_full_commit == trans->transid) {
2108 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2109 mutex_unlock(&log_root_tree->log_mutex);
2110 ret = -EAGAIN;
2111 goto out_wake_log_root;
2112 }
2113
2114 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2115 &log_root_tree->dirty_log_pages,
2116 EXTENT_DIRTY | EXTENT_NEW);
2117 BUG_ON(ret);
2118 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2119
2120 btrfs_set_super_log_root(&root->fs_info->super_for_commit,
2121 log_root_tree->node->start);
2122 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
2123 btrfs_header_level(log_root_tree->node));
2124
2125 log_root_tree->log_batch = 0;
2126 log_root_tree->log_transid++;
2127 smp_mb();
2128
2129 mutex_unlock(&log_root_tree->log_mutex);
2130
2131 /*
2132 * nobody else is going to jump in and write the the ctree
2133 * super here because the log_commit atomic below is protecting
2134 * us. We must be called with a transaction handle pinning
2135 * the running transaction open, so a full commit can't hop
2136 * in and cause problems either.
2137 */
2138 btrfs_scrub_pause_super(root);
2139 write_ctree_super(trans, root->fs_info->tree_root, 1);
2140 btrfs_scrub_continue_super(root);
2141 ret = 0;
2142
2143 mutex_lock(&root->log_mutex);
2144 if (root->last_log_commit < log_transid)
2145 root->last_log_commit = log_transid;
2146 mutex_unlock(&root->log_mutex);
2147
2148out_wake_log_root:
2149 atomic_set(&log_root_tree->log_commit[index2], 0);
2150 smp_mb();
2151 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2152 wake_up(&log_root_tree->log_commit_wait[index2]);
2153out:
2154 atomic_set(&root->log_commit[index1], 0);
2155 smp_mb();
2156 if (waitqueue_active(&root->log_commit_wait[index1]))
2157 wake_up(&root->log_commit_wait[index1]);
2158 return ret;
2159}
2160
2161static void free_log_tree(struct btrfs_trans_handle *trans,
2162 struct btrfs_root *log)
2163{
2164 int ret;
2165 u64 start;
2166 u64 end;
2167 struct walk_control wc = {
2168 .free = 1,
2169 .process_func = process_one_buffer
2170 };
2171
2172 ret = walk_log_tree(trans, log, &wc);
2173 BUG_ON(ret);
2174
2175 while (1) {
2176 ret = find_first_extent_bit(&log->dirty_log_pages,
2177 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
2178 if (ret)
2179 break;
2180
2181 clear_extent_bits(&log->dirty_log_pages, start, end,
2182 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2183 }
2184
2185 free_extent_buffer(log->node);
2186 kfree(log);
2187}
2188
2189/*
2190 * free all the extents used by the tree log. This should be called
2191 * at commit time of the full transaction
2192 */
2193int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2194{
2195 if (root->log_root) {
2196 free_log_tree(trans, root->log_root);
2197 root->log_root = NULL;
2198 }
2199 return 0;
2200}
2201
2202int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2203 struct btrfs_fs_info *fs_info)
2204{
2205 if (fs_info->log_root_tree) {
2206 free_log_tree(trans, fs_info->log_root_tree);
2207 fs_info->log_root_tree = NULL;
2208 }
2209 return 0;
2210}
2211
2212/*
2213 * If both a file and directory are logged, and unlinks or renames are
2214 * mixed in, we have a few interesting corners:
2215 *
2216 * create file X in dir Y
2217 * link file X to X.link in dir Y
2218 * fsync file X
2219 * unlink file X but leave X.link
2220 * fsync dir Y
2221 *
2222 * After a crash we would expect only X.link to exist. But file X
2223 * didn't get fsync'd again so the log has back refs for X and X.link.
2224 *
2225 * We solve this by removing directory entries and inode backrefs from the
2226 * log when a file that was logged in the current transaction is
2227 * unlinked. Any later fsync will include the updated log entries, and
2228 * we'll be able to reconstruct the proper directory items from backrefs.
2229 *
2230 * This optimizations allows us to avoid relogging the entire inode
2231 * or the entire directory.
2232 */
2233int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2234 struct btrfs_root *root,
2235 const char *name, int name_len,
2236 struct inode *dir, u64 index)
2237{
2238 struct btrfs_root *log;
2239 struct btrfs_dir_item *di;
2240 struct btrfs_path *path;
2241 int ret;
2242 int err = 0;
2243 int bytes_del = 0;
2244 u64 dir_ino = btrfs_ino(dir);
2245
2246 if (BTRFS_I(dir)->logged_trans < trans->transid)
2247 return 0;
2248
2249 ret = join_running_log_trans(root);
2250 if (ret)
2251 return 0;
2252
2253 mutex_lock(&BTRFS_I(dir)->log_mutex);
2254
2255 log = root->log_root;
2256 path = btrfs_alloc_path();
2257 if (!path) {
2258 err = -ENOMEM;
2259 goto out_unlock;
2260 }
2261
2262 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2263 name, name_len, -1);
2264 if (IS_ERR(di)) {
2265 err = PTR_ERR(di);
2266 goto fail;
2267 }
2268 if (di) {
2269 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2270 bytes_del += name_len;
2271 BUG_ON(ret);
2272 }
2273 btrfs_release_path(path);
2274 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2275 index, name, name_len, -1);
2276 if (IS_ERR(di)) {
2277 err = PTR_ERR(di);
2278 goto fail;
2279 }
2280 if (di) {
2281 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2282 bytes_del += name_len;
2283 BUG_ON(ret);
2284 }
2285
2286 /* update the directory size in the log to reflect the names
2287 * we have removed
2288 */
2289 if (bytes_del) {
2290 struct btrfs_key key;
2291
2292 key.objectid = dir_ino;
2293 key.offset = 0;
2294 key.type = BTRFS_INODE_ITEM_KEY;
2295 btrfs_release_path(path);
2296
2297 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2298 if (ret < 0) {
2299 err = ret;
2300 goto fail;
2301 }
2302 if (ret == 0) {
2303 struct btrfs_inode_item *item;
2304 u64 i_size;
2305
2306 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2307 struct btrfs_inode_item);
2308 i_size = btrfs_inode_size(path->nodes[0], item);
2309 if (i_size > bytes_del)
2310 i_size -= bytes_del;
2311 else
2312 i_size = 0;
2313 btrfs_set_inode_size(path->nodes[0], item, i_size);
2314 btrfs_mark_buffer_dirty(path->nodes[0]);
2315 } else
2316 ret = 0;
2317 btrfs_release_path(path);
2318 }
2319fail:
2320 btrfs_free_path(path);
2321out_unlock:
2322 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2323 if (ret == -ENOSPC) {
2324 root->fs_info->last_trans_log_full_commit = trans->transid;
2325 ret = 0;
2326 }
2327 btrfs_end_log_trans(root);
2328
2329 return err;
2330}
2331
2332/* see comments for btrfs_del_dir_entries_in_log */
2333int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2334 struct btrfs_root *root,
2335 const char *name, int name_len,
2336 struct inode *inode, u64 dirid)
2337{
2338 struct btrfs_root *log;
2339 u64 index;
2340 int ret;
2341
2342 if (BTRFS_I(inode)->logged_trans < trans->transid)
2343 return 0;
2344
2345 ret = join_running_log_trans(root);
2346 if (ret)
2347 return 0;
2348 log = root->log_root;
2349 mutex_lock(&BTRFS_I(inode)->log_mutex);
2350
2351 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2352 dirid, &index);
2353 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2354 if (ret == -ENOSPC) {
2355 root->fs_info->last_trans_log_full_commit = trans->transid;
2356 ret = 0;
2357 }
2358 btrfs_end_log_trans(root);
2359
2360 return ret;
2361}
2362
2363/*
2364 * creates a range item in the log for 'dirid'. first_offset and
2365 * last_offset tell us which parts of the key space the log should
2366 * be considered authoritative for.
2367 */
2368static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2369 struct btrfs_root *log,
2370 struct btrfs_path *path,
2371 int key_type, u64 dirid,
2372 u64 first_offset, u64 last_offset)
2373{
2374 int ret;
2375 struct btrfs_key key;
2376 struct btrfs_dir_log_item *item;
2377
2378 key.objectid = dirid;
2379 key.offset = first_offset;
2380 if (key_type == BTRFS_DIR_ITEM_KEY)
2381 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2382 else
2383 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2384 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2385 if (ret)
2386 return ret;
2387
2388 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2389 struct btrfs_dir_log_item);
2390 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2391 btrfs_mark_buffer_dirty(path->nodes[0]);
2392 btrfs_release_path(path);
2393 return 0;
2394}
2395
2396/*
2397 * log all the items included in the current transaction for a given
2398 * directory. This also creates the range items in the log tree required
2399 * to replay anything deleted before the fsync
2400 */
2401static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2402 struct btrfs_root *root, struct inode *inode,
2403 struct btrfs_path *path,
2404 struct btrfs_path *dst_path, int key_type,
2405 u64 min_offset, u64 *last_offset_ret)
2406{
2407 struct btrfs_key min_key;
2408 struct btrfs_key max_key;
2409 struct btrfs_root *log = root->log_root;
2410 struct extent_buffer *src;
2411 int err = 0;
2412 int ret;
2413 int i;
2414 int nritems;
2415 u64 first_offset = min_offset;
2416 u64 last_offset = (u64)-1;
2417 u64 ino = btrfs_ino(inode);
2418
2419 log = root->log_root;
2420 max_key.objectid = ino;
2421 max_key.offset = (u64)-1;
2422 max_key.type = key_type;
2423
2424 min_key.objectid = ino;
2425 min_key.type = key_type;
2426 min_key.offset = min_offset;
2427
2428 path->keep_locks = 1;
2429
2430 ret = btrfs_search_forward(root, &min_key, &max_key,
2431 path, 0, trans->transid);
2432
2433 /*
2434 * we didn't find anything from this transaction, see if there
2435 * is anything at all
2436 */
2437 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2438 min_key.objectid = ino;
2439 min_key.type = key_type;
2440 min_key.offset = (u64)-1;
2441 btrfs_release_path(path);
2442 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2443 if (ret < 0) {
2444 btrfs_release_path(path);
2445 return ret;
2446 }
2447 ret = btrfs_previous_item(root, path, ino, key_type);
2448
2449 /* if ret == 0 there are items for this type,
2450 * create a range to tell us the last key of this type.
2451 * otherwise, there are no items in this directory after
2452 * *min_offset, and we create a range to indicate that.
2453 */
2454 if (ret == 0) {
2455 struct btrfs_key tmp;
2456 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
2457 path->slots[0]);
2458 if (key_type == tmp.type)
2459 first_offset = max(min_offset, tmp.offset) + 1;
2460 }
2461 goto done;
2462 }
2463
2464 /* go backward to find any previous key */
2465 ret = btrfs_previous_item(root, path, ino, key_type);
2466 if (ret == 0) {
2467 struct btrfs_key tmp;
2468 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2469 if (key_type == tmp.type) {
2470 first_offset = tmp.offset;
2471 ret = overwrite_item(trans, log, dst_path,
2472 path->nodes[0], path->slots[0],
2473 &tmp);
2474 if (ret) {
2475 err = ret;
2476 goto done;
2477 }
2478 }
2479 }
2480 btrfs_release_path(path);
2481
2482 /* find the first key from this transaction again */
2483 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2484 if (ret != 0) {
2485 WARN_ON(1);
2486 goto done;
2487 }
2488
2489 /*
2490 * we have a block from this transaction, log every item in it
2491 * from our directory
2492 */
2493 while (1) {
2494 struct btrfs_key tmp;
2495 src = path->nodes[0];
2496 nritems = btrfs_header_nritems(src);
2497 for (i = path->slots[0]; i < nritems; i++) {
2498 btrfs_item_key_to_cpu(src, &min_key, i);
2499
2500 if (min_key.objectid != ino || min_key.type != key_type)
2501 goto done;
2502 ret = overwrite_item(trans, log, dst_path, src, i,
2503 &min_key);
2504 if (ret) {
2505 err = ret;
2506 goto done;
2507 }
2508 }
2509 path->slots[0] = nritems;
2510
2511 /*
2512 * look ahead to the next item and see if it is also
2513 * from this directory and from this transaction
2514 */
2515 ret = btrfs_next_leaf(root, path);
2516 if (ret == 1) {
2517 last_offset = (u64)-1;
2518 goto done;
2519 }
2520 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2521 if (tmp.objectid != ino || tmp.type != key_type) {
2522 last_offset = (u64)-1;
2523 goto done;
2524 }
2525 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
2526 ret = overwrite_item(trans, log, dst_path,
2527 path->nodes[0], path->slots[0],
2528 &tmp);
2529 if (ret)
2530 err = ret;
2531 else
2532 last_offset = tmp.offset;
2533 goto done;
2534 }
2535 }
2536done:
2537 btrfs_release_path(path);
2538 btrfs_release_path(dst_path);
2539
2540 if (err == 0) {
2541 *last_offset_ret = last_offset;
2542 /*
2543 * insert the log range keys to indicate where the log
2544 * is valid
2545 */
2546 ret = insert_dir_log_key(trans, log, path, key_type,
2547 ino, first_offset, last_offset);
2548 if (ret)
2549 err = ret;
2550 }
2551 return err;
2552}
2553
2554/*
2555 * logging directories is very similar to logging inodes, We find all the items
2556 * from the current transaction and write them to the log.
2557 *
2558 * The recovery code scans the directory in the subvolume, and if it finds a
2559 * key in the range logged that is not present in the log tree, then it means
2560 * that dir entry was unlinked during the transaction.
2561 *
2562 * In order for that scan to work, we must include one key smaller than
2563 * the smallest logged by this transaction and one key larger than the largest
2564 * key logged by this transaction.
2565 */
2566static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
2567 struct btrfs_root *root, struct inode *inode,
2568 struct btrfs_path *path,
2569 struct btrfs_path *dst_path)
2570{
2571 u64 min_key;
2572 u64 max_key;
2573 int ret;
2574 int key_type = BTRFS_DIR_ITEM_KEY;
2575
2576again:
2577 min_key = 0;
2578 max_key = 0;
2579 while (1) {
2580 ret = log_dir_items(trans, root, inode, path,
2581 dst_path, key_type, min_key,
2582 &max_key);
2583 if (ret)
2584 return ret;
2585 if (max_key == (u64)-1)
2586 break;
2587 min_key = max_key + 1;
2588 }
2589
2590 if (key_type == BTRFS_DIR_ITEM_KEY) {
2591 key_type = BTRFS_DIR_INDEX_KEY;
2592 goto again;
2593 }
2594 return 0;
2595}
2596
2597/*
2598 * a helper function to drop items from the log before we relog an
2599 * inode. max_key_type indicates the highest item type to remove.
2600 * This cannot be run for file data extents because it does not
2601 * free the extents they point to.
2602 */
2603static int drop_objectid_items(struct btrfs_trans_handle *trans,
2604 struct btrfs_root *log,
2605 struct btrfs_path *path,
2606 u64 objectid, int max_key_type)
2607{
2608 int ret;
2609 struct btrfs_key key;
2610 struct btrfs_key found_key;
2611
2612 key.objectid = objectid;
2613 key.type = max_key_type;
2614 key.offset = (u64)-1;
2615
2616 while (1) {
2617 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2618 BUG_ON(ret == 0);
2619 if (ret < 0)
2620 break;
2621
2622 if (path->slots[0] == 0)
2623 break;
2624
2625 path->slots[0]--;
2626 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2627 path->slots[0]);
2628
2629 if (found_key.objectid != objectid)
2630 break;
2631
2632 ret = btrfs_del_item(trans, log, path);
2633 if (ret)
2634 break;
2635 btrfs_release_path(path);
2636 }
2637 btrfs_release_path(path);
2638 return ret;
2639}
2640
2641static noinline int copy_items(struct btrfs_trans_handle *trans,
2642 struct btrfs_root *log,
2643 struct btrfs_path *dst_path,
2644 struct extent_buffer *src,
2645 int start_slot, int nr, int inode_only)
2646{
2647 unsigned long src_offset;
2648 unsigned long dst_offset;
2649 struct btrfs_file_extent_item *extent;
2650 struct btrfs_inode_item *inode_item;
2651 int ret;
2652 struct btrfs_key *ins_keys;
2653 u32 *ins_sizes;
2654 char *ins_data;
2655 int i;
2656 struct list_head ordered_sums;
2657
2658 INIT_LIST_HEAD(&ordered_sums);
2659
2660 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
2661 nr * sizeof(u32), GFP_NOFS);
2662 if (!ins_data)
2663 return -ENOMEM;
2664
2665 ins_sizes = (u32 *)ins_data;
2666 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
2667
2668 for (i = 0; i < nr; i++) {
2669 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
2670 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
2671 }
2672 ret = btrfs_insert_empty_items(trans, log, dst_path,
2673 ins_keys, ins_sizes, nr);
2674 if (ret) {
2675 kfree(ins_data);
2676 return ret;
2677 }
2678
2679 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
2680 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
2681 dst_path->slots[0]);
2682
2683 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
2684
2685 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
2686 src_offset, ins_sizes[i]);
2687
2688 if (inode_only == LOG_INODE_EXISTS &&
2689 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
2690 inode_item = btrfs_item_ptr(dst_path->nodes[0],
2691 dst_path->slots[0],
2692 struct btrfs_inode_item);
2693 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
2694
2695 /* set the generation to zero so the recover code
2696 * can tell the difference between an logging
2697 * just to say 'this inode exists' and a logging
2698 * to say 'update this inode with these values'
2699 */
2700 btrfs_set_inode_generation(dst_path->nodes[0],
2701 inode_item, 0);
2702 }
2703 /* take a reference on file data extents so that truncates
2704 * or deletes of this inode don't have to relog the inode
2705 * again
2706 */
2707 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
2708 int found_type;
2709 extent = btrfs_item_ptr(src, start_slot + i,
2710 struct btrfs_file_extent_item);
2711
2712 if (btrfs_file_extent_generation(src, extent) < trans->transid)
2713 continue;
2714
2715 found_type = btrfs_file_extent_type(src, extent);
2716 if (found_type == BTRFS_FILE_EXTENT_REG ||
2717 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
2718 u64 ds, dl, cs, cl;
2719 ds = btrfs_file_extent_disk_bytenr(src,
2720 extent);
2721 /* ds == 0 is a hole */
2722 if (ds == 0)
2723 continue;
2724
2725 dl = btrfs_file_extent_disk_num_bytes(src,
2726 extent);
2727 cs = btrfs_file_extent_offset(src, extent);
2728 cl = btrfs_file_extent_num_bytes(src,
2729 extent);
2730 if (btrfs_file_extent_compression(src,
2731 extent)) {
2732 cs = 0;
2733 cl = dl;
2734 }
2735
2736 ret = btrfs_lookup_csums_range(
2737 log->fs_info->csum_root,
2738 ds + cs, ds + cs + cl - 1,
2739 &ordered_sums, 0);
2740 BUG_ON(ret);
2741 }
2742 }
2743 }
2744
2745 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
2746 btrfs_release_path(dst_path);
2747 kfree(ins_data);
2748
2749 /*
2750 * we have to do this after the loop above to avoid changing the
2751 * log tree while trying to change the log tree.
2752 */
2753 ret = 0;
2754 while (!list_empty(&ordered_sums)) {
2755 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2756 struct btrfs_ordered_sum,
2757 list);
2758 if (!ret)
2759 ret = btrfs_csum_file_blocks(trans, log, sums);
2760 list_del(&sums->list);
2761 kfree(sums);
2762 }
2763 return ret;
2764}
2765
2766/* log a single inode in the tree log.
2767 * At least one parent directory for this inode must exist in the tree
2768 * or be logged already.
2769 *
2770 * Any items from this inode changed by the current transaction are copied
2771 * to the log tree. An extra reference is taken on any extents in this
2772 * file, allowing us to avoid a whole pile of corner cases around logging
2773 * blocks that have been removed from the tree.
2774 *
2775 * See LOG_INODE_ALL and related defines for a description of what inode_only
2776 * does.
2777 *
2778 * This handles both files and directories.
2779 */
2780static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2781 struct btrfs_root *root, struct inode *inode,
2782 int inode_only)
2783{
2784 struct btrfs_path *path;
2785 struct btrfs_path *dst_path;
2786 struct btrfs_key min_key;
2787 struct btrfs_key max_key;
2788 struct btrfs_root *log = root->log_root;
2789 struct extent_buffer *src = NULL;
2790 int err = 0;
2791 int ret;
2792 int nritems;
2793 int ins_start_slot = 0;
2794 int ins_nr;
2795 u64 ino = btrfs_ino(inode);
2796
2797 log = root->log_root;
2798
2799 path = btrfs_alloc_path();
2800 if (!path)
2801 return -ENOMEM;
2802 dst_path = btrfs_alloc_path();
2803 if (!dst_path) {
2804 btrfs_free_path(path);
2805 return -ENOMEM;
2806 }
2807
2808 min_key.objectid = ino;
2809 min_key.type = BTRFS_INODE_ITEM_KEY;
2810 min_key.offset = 0;
2811
2812 max_key.objectid = ino;
2813
2814 /* today the code can only do partial logging of directories */
2815 if (!S_ISDIR(inode->i_mode))
2816 inode_only = LOG_INODE_ALL;
2817
2818 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
2819 max_key.type = BTRFS_XATTR_ITEM_KEY;
2820 else
2821 max_key.type = (u8)-1;
2822 max_key.offset = (u64)-1;
2823
2824 ret = btrfs_commit_inode_delayed_items(trans, inode);
2825 if (ret) {
2826 btrfs_free_path(path);
2827 btrfs_free_path(dst_path);
2828 return ret;
2829 }
2830
2831 mutex_lock(&BTRFS_I(inode)->log_mutex);
2832
2833 /*
2834 * a brute force approach to making sure we get the most uptodate
2835 * copies of everything.
2836 */
2837 if (S_ISDIR(inode->i_mode)) {
2838 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
2839
2840 if (inode_only == LOG_INODE_EXISTS)
2841 max_key_type = BTRFS_XATTR_ITEM_KEY;
2842 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
2843 } else {
2844 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2845 }
2846 if (ret) {
2847 err = ret;
2848 goto out_unlock;
2849 }
2850 path->keep_locks = 1;
2851
2852 while (1) {
2853 ins_nr = 0;
2854 ret = btrfs_search_forward(root, &min_key, &max_key,
2855 path, 0, trans->transid);
2856 if (ret != 0)
2857 break;
2858again:
2859 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2860 if (min_key.objectid != ino)
2861 break;
2862 if (min_key.type > max_key.type)
2863 break;
2864
2865 src = path->nodes[0];
2866 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
2867 ins_nr++;
2868 goto next_slot;
2869 } else if (!ins_nr) {
2870 ins_start_slot = path->slots[0];
2871 ins_nr = 1;
2872 goto next_slot;
2873 }
2874
2875 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
2876 ins_nr, inode_only);
2877 if (ret) {
2878 err = ret;
2879 goto out_unlock;
2880 }
2881 ins_nr = 1;
2882 ins_start_slot = path->slots[0];
2883next_slot:
2884
2885 nritems = btrfs_header_nritems(path->nodes[0]);
2886 path->slots[0]++;
2887 if (path->slots[0] < nritems) {
2888 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
2889 path->slots[0]);
2890 goto again;
2891 }
2892 if (ins_nr) {
2893 ret = copy_items(trans, log, dst_path, src,
2894 ins_start_slot,
2895 ins_nr, inode_only);
2896 if (ret) {
2897 err = ret;
2898 goto out_unlock;
2899 }
2900 ins_nr = 0;
2901 }
2902 btrfs_release_path(path);
2903
2904 if (min_key.offset < (u64)-1)
2905 min_key.offset++;
2906 else if (min_key.type < (u8)-1)
2907 min_key.type++;
2908 else if (min_key.objectid < (u64)-1)
2909 min_key.objectid++;
2910 else
2911 break;
2912 }
2913 if (ins_nr) {
2914 ret = copy_items(trans, log, dst_path, src,
2915 ins_start_slot,
2916 ins_nr, inode_only);
2917 if (ret) {
2918 err = ret;
2919 goto out_unlock;
2920 }
2921 ins_nr = 0;
2922 }
2923 WARN_ON(ins_nr);
2924 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
2925 btrfs_release_path(path);
2926 btrfs_release_path(dst_path);
2927 ret = log_directory_changes(trans, root, inode, path, dst_path);
2928 if (ret) {
2929 err = ret;
2930 goto out_unlock;
2931 }
2932 }
2933 BTRFS_I(inode)->logged_trans = trans->transid;
2934out_unlock:
2935 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2936
2937 btrfs_free_path(path);
2938 btrfs_free_path(dst_path);
2939 return err;
2940}
2941
2942/*
2943 * follow the dentry parent pointers up the chain and see if any
2944 * of the directories in it require a full commit before they can
2945 * be logged. Returns zero if nothing special needs to be done or 1 if
2946 * a full commit is required.
2947 */
2948static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2949 struct inode *inode,
2950 struct dentry *parent,
2951 struct super_block *sb,
2952 u64 last_committed)
2953{
2954 int ret = 0;
2955 struct btrfs_root *root;
2956 struct dentry *old_parent = NULL;
2957
2958 /*
2959 * for regular files, if its inode is already on disk, we don't
2960 * have to worry about the parents at all. This is because
2961 * we can use the last_unlink_trans field to record renames
2962 * and other fun in this file.
2963 */
2964 if (S_ISREG(inode->i_mode) &&
2965 BTRFS_I(inode)->generation <= last_committed &&
2966 BTRFS_I(inode)->last_unlink_trans <= last_committed)
2967 goto out;
2968
2969 if (!S_ISDIR(inode->i_mode)) {
2970 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
2971 goto out;
2972 inode = parent->d_inode;
2973 }
2974
2975 while (1) {
2976 BTRFS_I(inode)->logged_trans = trans->transid;
2977 smp_mb();
2978
2979 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
2980 root = BTRFS_I(inode)->root;
2981
2982 /*
2983 * make sure any commits to the log are forced
2984 * to be full commits
2985 */
2986 root->fs_info->last_trans_log_full_commit =
2987 trans->transid;
2988 ret = 1;
2989 break;
2990 }
2991
2992 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
2993 break;
2994
2995 if (IS_ROOT(parent))
2996 break;
2997
2998 parent = dget_parent(parent);
2999 dput(old_parent);
3000 old_parent = parent;
3001 inode = parent->d_inode;
3002
3003 }
3004 dput(old_parent);
3005out:
3006 return ret;
3007}
3008
3009static int inode_in_log(struct btrfs_trans_handle *trans,
3010 struct inode *inode)
3011{
3012 struct btrfs_root *root = BTRFS_I(inode)->root;
3013 int ret = 0;
3014
3015 mutex_lock(&root->log_mutex);
3016 if (BTRFS_I(inode)->logged_trans == trans->transid &&
3017 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
3018 ret = 1;
3019 mutex_unlock(&root->log_mutex);
3020 return ret;
3021}
3022
3023
3024/*
3025 * helper function around btrfs_log_inode to make sure newly created
3026 * parent directories also end up in the log. A minimal inode and backref
3027 * only logging is done of any parent directories that are older than
3028 * the last committed transaction
3029 */
3030int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3031 struct btrfs_root *root, struct inode *inode,
3032 struct dentry *parent, int exists_only)
3033{
3034 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
3035 struct super_block *sb;
3036 struct dentry *old_parent = NULL;
3037 int ret = 0;
3038 u64 last_committed = root->fs_info->last_trans_committed;
3039
3040 sb = inode->i_sb;
3041
3042 if (btrfs_test_opt(root, NOTREELOG)) {
3043 ret = 1;
3044 goto end_no_trans;
3045 }
3046
3047 if (root->fs_info->last_trans_log_full_commit >
3048 root->fs_info->last_trans_committed) {
3049 ret = 1;
3050 goto end_no_trans;
3051 }
3052
3053 if (root != BTRFS_I(inode)->root ||
3054 btrfs_root_refs(&root->root_item) == 0) {
3055 ret = 1;
3056 goto end_no_trans;
3057 }
3058
3059 ret = check_parent_dirs_for_sync(trans, inode, parent,
3060 sb, last_committed);
3061 if (ret)
3062 goto end_no_trans;
3063
3064 if (inode_in_log(trans, inode)) {
3065 ret = BTRFS_NO_LOG_SYNC;
3066 goto end_no_trans;
3067 }
3068
3069 ret = start_log_trans(trans, root);
3070 if (ret)
3071 goto end_trans;
3072
3073 ret = btrfs_log_inode(trans, root, inode, inode_only);
3074 if (ret)
3075 goto end_trans;
3076
3077 /*
3078 * for regular files, if its inode is already on disk, we don't
3079 * have to worry about the parents at all. This is because
3080 * we can use the last_unlink_trans field to record renames
3081 * and other fun in this file.
3082 */
3083 if (S_ISREG(inode->i_mode) &&
3084 BTRFS_I(inode)->generation <= last_committed &&
3085 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
3086 ret = 0;
3087 goto end_trans;
3088 }
3089
3090 inode_only = LOG_INODE_EXISTS;
3091 while (1) {
3092 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
3093 break;
3094
3095 inode = parent->d_inode;
3096 if (root != BTRFS_I(inode)->root)
3097 break;
3098
3099 if (BTRFS_I(inode)->generation >
3100 root->fs_info->last_trans_committed) {
3101 ret = btrfs_log_inode(trans, root, inode, inode_only);
3102 if (ret)
3103 goto end_trans;
3104 }
3105 if (IS_ROOT(parent))
3106 break;
3107
3108 parent = dget_parent(parent);
3109 dput(old_parent);
3110 old_parent = parent;
3111 }
3112 ret = 0;
3113end_trans:
3114 dput(old_parent);
3115 if (ret < 0) {
3116 BUG_ON(ret != -ENOSPC);
3117 root->fs_info->last_trans_log_full_commit = trans->transid;
3118 ret = 1;
3119 }
3120 btrfs_end_log_trans(root);
3121end_no_trans:
3122 return ret;
3123}
3124
3125/*
3126 * it is not safe to log dentry if the chunk root has added new
3127 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
3128 * If this returns 1, you must commit the transaction to safely get your
3129 * data on disk.
3130 */
3131int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3132 struct btrfs_root *root, struct dentry *dentry)
3133{
3134 struct dentry *parent = dget_parent(dentry);
3135 int ret;
3136
3137 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3138 dput(parent);
3139
3140 return ret;
3141}
3142
3143/*
3144 * should be called during mount to recover any replay any log trees
3145 * from the FS
3146 */
3147int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3148{
3149 int ret;
3150 struct btrfs_path *path;
3151 struct btrfs_trans_handle *trans;
3152 struct btrfs_key key;
3153 struct btrfs_key found_key;
3154 struct btrfs_key tmp_key;
3155 struct btrfs_root *log;
3156 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
3157 struct walk_control wc = {
3158 .process_func = process_one_buffer,
3159 .stage = 0,
3160 };
3161
3162 path = btrfs_alloc_path();
3163 if (!path)
3164 return -ENOMEM;
3165
3166 fs_info->log_root_recovering = 1;
3167
3168 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3169 BUG_ON(IS_ERR(trans));
3170
3171 wc.trans = trans;
3172 wc.pin = 1;
3173
3174 ret = walk_log_tree(trans, log_root_tree, &wc);
3175 BUG_ON(ret);
3176
3177again:
3178 key.objectid = BTRFS_TREE_LOG_OBJECTID;
3179 key.offset = (u64)-1;
3180 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
3181
3182 while (1) {
3183 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3184 if (ret < 0)
3185 break;
3186 if (ret > 0) {
3187 if (path->slots[0] == 0)
3188 break;
3189 path->slots[0]--;
3190 }
3191 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3192 path->slots[0]);
3193 btrfs_release_path(path);
3194 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
3195 break;
3196
3197 log = btrfs_read_fs_root_no_radix(log_root_tree,
3198 &found_key);
3199 BUG_ON(IS_ERR(log));
3200
3201 tmp_key.objectid = found_key.offset;
3202 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3203 tmp_key.offset = (u64)-1;
3204
3205 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3206 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3207
3208 wc.replay_dest->log_root = log;
3209 btrfs_record_root_in_trans(trans, wc.replay_dest);
3210 ret = walk_log_tree(trans, log, &wc);
3211 BUG_ON(ret);
3212
3213 if (wc.stage == LOG_WALK_REPLAY_ALL) {
3214 ret = fixup_inode_link_counts(trans, wc.replay_dest,
3215 path);
3216 BUG_ON(ret);
3217 }
3218
3219 key.offset = found_key.offset - 1;
3220 wc.replay_dest->log_root = NULL;
3221 free_extent_buffer(log->node);
3222 free_extent_buffer(log->commit_root);
3223 kfree(log);
3224
3225 if (found_key.offset == 0)
3226 break;
3227 }
3228 btrfs_release_path(path);
3229
3230 /* step one is to pin it all, step two is to replay just inodes */
3231 if (wc.pin) {
3232 wc.pin = 0;
3233 wc.process_func = replay_one_buffer;
3234 wc.stage = LOG_WALK_REPLAY_INODES;
3235 goto again;
3236 }
3237 /* step three is to replay everything */
3238 if (wc.stage < LOG_WALK_REPLAY_ALL) {
3239 wc.stage++;
3240 goto again;
3241 }
3242
3243 btrfs_free_path(path);
3244
3245 free_extent_buffer(log_root_tree->node);
3246 log_root_tree->log_root = NULL;
3247 fs_info->log_root_recovering = 0;
3248
3249 /* step 4: commit the transaction, which also unpins the blocks */
3250 btrfs_commit_transaction(trans, fs_info->tree_root);
3251
3252 kfree(log_root_tree);
3253 return 0;
3254}
3255
3256/*
3257 * there are some corner cases where we want to force a full
3258 * commit instead of allowing a directory to be logged.
3259 *
3260 * They revolve around files there were unlinked from the directory, and
3261 * this function updates the parent directory so that a full commit is
3262 * properly done if it is fsync'd later after the unlinks are done.
3263 */
3264void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
3265 struct inode *dir, struct inode *inode,
3266 int for_rename)
3267{
3268 /*
3269 * when we're logging a file, if it hasn't been renamed
3270 * or unlinked, and its inode is fully committed on disk,
3271 * we don't have to worry about walking up the directory chain
3272 * to log its parents.
3273 *
3274 * So, we use the last_unlink_trans field to put this transid
3275 * into the file. When the file is logged we check it and
3276 * don't log the parents if the file is fully on disk.
3277 */
3278 if (S_ISREG(inode->i_mode))
3279 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3280
3281 /*
3282 * if this directory was already logged any new
3283 * names for this file/dir will get recorded
3284 */
3285 smp_mb();
3286 if (BTRFS_I(dir)->logged_trans == trans->transid)
3287 return;
3288
3289 /*
3290 * if the inode we're about to unlink was logged,
3291 * the log will be properly updated for any new names
3292 */
3293 if (BTRFS_I(inode)->logged_trans == trans->transid)
3294 return;
3295
3296 /*
3297 * when renaming files across directories, if the directory
3298 * there we're unlinking from gets fsync'd later on, there's
3299 * no way to find the destination directory later and fsync it
3300 * properly. So, we have to be conservative and force commits
3301 * so the new name gets discovered.
3302 */
3303 if (for_rename)
3304 goto record;
3305
3306 /* we can safely do the unlink without any special recording */
3307 return;
3308
3309record:
3310 BTRFS_I(dir)->last_unlink_trans = trans->transid;
3311}
3312
3313/*
3314 * Call this after adding a new name for a file and it will properly
3315 * update the log to reflect the new name.
3316 *
3317 * It will return zero if all goes well, and it will return 1 if a
3318 * full transaction commit is required.
3319 */
3320int btrfs_log_new_name(struct btrfs_trans_handle *trans,
3321 struct inode *inode, struct inode *old_dir,
3322 struct dentry *parent)
3323{
3324 struct btrfs_root * root = BTRFS_I(inode)->root;
3325
3326 /*
3327 * this will force the logging code to walk the dentry chain
3328 * up for the file
3329 */
3330 if (S_ISREG(inode->i_mode))
3331 BTRFS_I(inode)->last_unlink_trans = trans->transid;
3332
3333 /*
3334 * if this inode hasn't been logged and directory we're renaming it
3335 * from hasn't been logged, we don't need to log it
3336 */
3337 if (BTRFS_I(inode)->logged_trans <=
3338 root->fs_info->last_trans_committed &&
3339 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
3340 root->fs_info->last_trans_committed))
3341 return 0;
3342
3343 return btrfs_log_inode_parent(trans, root, inode, parent, 1);
3344}
3345
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/blkdev.h>
22#include <linux/list_sort.h>
23#include "tree-log.h"
24#include "disk-io.h"
25#include "locking.h"
26#include "print-tree.h"
27#include "backref.h"
28#include "hash.h"
29#include "compression.h"
30#include "qgroup.h"
31
32/* magic values for the inode_only field in btrfs_log_inode:
33 *
34 * LOG_INODE_ALL means to log everything
35 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 * during log replay
37 */
38#define LOG_INODE_ALL 0
39#define LOG_INODE_EXISTS 1
40#define LOG_OTHER_INODE 2
41
42/*
43 * directory trouble cases
44 *
45 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
46 * log, we must force a full commit before doing an fsync of the directory
47 * where the unlink was done.
48 * ---> record transid of last unlink/rename per directory
49 *
50 * mkdir foo/some_dir
51 * normal commit
52 * rename foo/some_dir foo2/some_dir
53 * mkdir foo/some_dir
54 * fsync foo/some_dir/some_file
55 *
56 * The fsync above will unlink the original some_dir without recording
57 * it in its new location (foo2). After a crash, some_dir will be gone
58 * unless the fsync of some_file forces a full commit
59 *
60 * 2) we must log any new names for any file or dir that is in the fsync
61 * log. ---> check inode while renaming/linking.
62 *
63 * 2a) we must log any new names for any file or dir during rename
64 * when the directory they are being removed from was logged.
65 * ---> check inode and old parent dir during rename
66 *
67 * 2a is actually the more important variant. With the extra logging
68 * a crash might unlink the old name without recreating the new one
69 *
70 * 3) after a crash, we must go through any directories with a link count
71 * of zero and redo the rm -rf
72 *
73 * mkdir f1/foo
74 * normal commit
75 * rm -rf f1/foo
76 * fsync(f1)
77 *
78 * The directory f1 was fully removed from the FS, but fsync was never
79 * called on f1, only its parent dir. After a crash the rm -rf must
80 * be replayed. This must be able to recurse down the entire
81 * directory tree. The inode link count fixup code takes care of the
82 * ugly details.
83 */
84
85/*
86 * stages for the tree walking. The first
87 * stage (0) is to only pin down the blocks we find
88 * the second stage (1) is to make sure that all the inodes
89 * we find in the log are created in the subvolume.
90 *
91 * The last stage is to deal with directories and links and extents
92 * and all the other fun semantics
93 */
94#define LOG_WALK_PIN_ONLY 0
95#define LOG_WALK_REPLAY_INODES 1
96#define LOG_WALK_REPLAY_DIR_INDEX 2
97#define LOG_WALK_REPLAY_ALL 3
98
99static int btrfs_log_inode(struct btrfs_trans_handle *trans,
100 struct btrfs_root *root, struct inode *inode,
101 int inode_only,
102 const loff_t start,
103 const loff_t end,
104 struct btrfs_log_ctx *ctx);
105static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_path *path, u64 objectid);
108static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
109 struct btrfs_root *root,
110 struct btrfs_root *log,
111 struct btrfs_path *path,
112 u64 dirid, int del_all);
113
114/*
115 * tree logging is a special write ahead log used to make sure that
116 * fsyncs and O_SYNCs can happen without doing full tree commits.
117 *
118 * Full tree commits are expensive because they require commonly
119 * modified blocks to be recowed, creating many dirty pages in the
120 * extent tree an 4x-6x higher write load than ext3.
121 *
122 * Instead of doing a tree commit on every fsync, we use the
123 * key ranges and transaction ids to find items for a given file or directory
124 * that have changed in this transaction. Those items are copied into
125 * a special tree (one per subvolume root), that tree is written to disk
126 * and then the fsync is considered complete.
127 *
128 * After a crash, items are copied out of the log-tree back into the
129 * subvolume tree. Any file data extents found are recorded in the extent
130 * allocation tree, and the log-tree freed.
131 *
132 * The log tree is read three times, once to pin down all the extents it is
133 * using in ram and once, once to create all the inodes logged in the tree
134 * and once to do all the other items.
135 */
136
137/*
138 * start a sub transaction and setup the log tree
139 * this increments the log tree writer count to make the people
140 * syncing the tree wait for us to finish
141 */
142static int start_log_trans(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root,
144 struct btrfs_log_ctx *ctx)
145{
146 struct btrfs_fs_info *fs_info = root->fs_info;
147 int ret = 0;
148
149 mutex_lock(&root->log_mutex);
150
151 if (root->log_root) {
152 if (btrfs_need_log_full_commit(fs_info, trans)) {
153 ret = -EAGAIN;
154 goto out;
155 }
156
157 if (!root->log_start_pid) {
158 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 root->log_start_pid = current->pid;
160 } else if (root->log_start_pid != current->pid) {
161 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
162 }
163 } else {
164 mutex_lock(&fs_info->tree_log_mutex);
165 if (!fs_info->log_root_tree)
166 ret = btrfs_init_log_root_tree(trans, fs_info);
167 mutex_unlock(&fs_info->tree_log_mutex);
168 if (ret)
169 goto out;
170
171 ret = btrfs_add_log_tree(trans, root);
172 if (ret)
173 goto out;
174
175 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
176 root->log_start_pid = current->pid;
177 }
178
179 atomic_inc(&root->log_batch);
180 atomic_inc(&root->log_writers);
181 if (ctx) {
182 int index = root->log_transid % 2;
183 list_add_tail(&ctx->list, &root->log_ctxs[index]);
184 ctx->log_transid = root->log_transid;
185 }
186
187out:
188 mutex_unlock(&root->log_mutex);
189 return ret;
190}
191
192/*
193 * returns 0 if there was a log transaction running and we were able
194 * to join, or returns -ENOENT if there were not transactions
195 * in progress
196 */
197static int join_running_log_trans(struct btrfs_root *root)
198{
199 int ret = -ENOENT;
200
201 smp_mb();
202 if (!root->log_root)
203 return -ENOENT;
204
205 mutex_lock(&root->log_mutex);
206 if (root->log_root) {
207 ret = 0;
208 atomic_inc(&root->log_writers);
209 }
210 mutex_unlock(&root->log_mutex);
211 return ret;
212}
213
214/*
215 * This either makes the current running log transaction wait
216 * until you call btrfs_end_log_trans() or it makes any future
217 * log transactions wait until you call btrfs_end_log_trans()
218 */
219int btrfs_pin_log_trans(struct btrfs_root *root)
220{
221 int ret = -ENOENT;
222
223 mutex_lock(&root->log_mutex);
224 atomic_inc(&root->log_writers);
225 mutex_unlock(&root->log_mutex);
226 return ret;
227}
228
229/*
230 * indicate we're done making changes to the log tree
231 * and wake up anyone waiting to do a sync
232 */
233void btrfs_end_log_trans(struct btrfs_root *root)
234{
235 if (atomic_dec_and_test(&root->log_writers)) {
236 /*
237 * Implicit memory barrier after atomic_dec_and_test
238 */
239 if (waitqueue_active(&root->log_writer_wait))
240 wake_up(&root->log_writer_wait);
241 }
242}
243
244
245/*
246 * the walk control struct is used to pass state down the chain when
247 * processing the log tree. The stage field tells us which part
248 * of the log tree processing we are currently doing. The others
249 * are state fields used for that specific part
250 */
251struct walk_control {
252 /* should we free the extent on disk when done? This is used
253 * at transaction commit time while freeing a log tree
254 */
255 int free;
256
257 /* should we write out the extent buffer? This is used
258 * while flushing the log tree to disk during a sync
259 */
260 int write;
261
262 /* should we wait for the extent buffer io to finish? Also used
263 * while flushing the log tree to disk for a sync
264 */
265 int wait;
266
267 /* pin only walk, we record which extents on disk belong to the
268 * log trees
269 */
270 int pin;
271
272 /* what stage of the replay code we're currently in */
273 int stage;
274
275 /* the root we are currently replaying */
276 struct btrfs_root *replay_dest;
277
278 /* the trans handle for the current replay */
279 struct btrfs_trans_handle *trans;
280
281 /* the function that gets used to process blocks we find in the
282 * tree. Note the extent_buffer might not be up to date when it is
283 * passed in, and it must be checked or read if you need the data
284 * inside it
285 */
286 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
287 struct walk_control *wc, u64 gen);
288};
289
290/*
291 * process_func used to pin down extents, write them or wait on them
292 */
293static int process_one_buffer(struct btrfs_root *log,
294 struct extent_buffer *eb,
295 struct walk_control *wc, u64 gen)
296{
297 struct btrfs_fs_info *fs_info = log->fs_info;
298 int ret = 0;
299
300 /*
301 * If this fs is mixed then we need to be able to process the leaves to
302 * pin down any logged extents, so we have to read the block.
303 */
304 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
305 ret = btrfs_read_buffer(eb, gen);
306 if (ret)
307 return ret;
308 }
309
310 if (wc->pin)
311 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
312 eb->len);
313
314 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
315 if (wc->pin && btrfs_header_level(eb) == 0)
316 ret = btrfs_exclude_logged_extents(fs_info, eb);
317 if (wc->write)
318 btrfs_write_tree_block(eb);
319 if (wc->wait)
320 btrfs_wait_tree_block_writeback(eb);
321 }
322 return ret;
323}
324
325/*
326 * Item overwrite used by replay and tree logging. eb, slot and key all refer
327 * to the src data we are copying out.
328 *
329 * root is the tree we are copying into, and path is a scratch
330 * path for use in this function (it should be released on entry and
331 * will be released on exit).
332 *
333 * If the key is already in the destination tree the existing item is
334 * overwritten. If the existing item isn't big enough, it is extended.
335 * If it is too large, it is truncated.
336 *
337 * If the key isn't in the destination yet, a new item is inserted.
338 */
339static noinline int overwrite_item(struct btrfs_trans_handle *trans,
340 struct btrfs_root *root,
341 struct btrfs_path *path,
342 struct extent_buffer *eb, int slot,
343 struct btrfs_key *key)
344{
345 struct btrfs_fs_info *fs_info = root->fs_info;
346 int ret;
347 u32 item_size;
348 u64 saved_i_size = 0;
349 int save_old_i_size = 0;
350 unsigned long src_ptr;
351 unsigned long dst_ptr;
352 int overwrite_root = 0;
353 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
354
355 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
356 overwrite_root = 1;
357
358 item_size = btrfs_item_size_nr(eb, slot);
359 src_ptr = btrfs_item_ptr_offset(eb, slot);
360
361 /* look for the key in the destination tree */
362 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
363 if (ret < 0)
364 return ret;
365
366 if (ret == 0) {
367 char *src_copy;
368 char *dst_copy;
369 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
370 path->slots[0]);
371 if (dst_size != item_size)
372 goto insert;
373
374 if (item_size == 0) {
375 btrfs_release_path(path);
376 return 0;
377 }
378 dst_copy = kmalloc(item_size, GFP_NOFS);
379 src_copy = kmalloc(item_size, GFP_NOFS);
380 if (!dst_copy || !src_copy) {
381 btrfs_release_path(path);
382 kfree(dst_copy);
383 kfree(src_copy);
384 return -ENOMEM;
385 }
386
387 read_extent_buffer(eb, src_copy, src_ptr, item_size);
388
389 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
390 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
391 item_size);
392 ret = memcmp(dst_copy, src_copy, item_size);
393
394 kfree(dst_copy);
395 kfree(src_copy);
396 /*
397 * they have the same contents, just return, this saves
398 * us from cowing blocks in the destination tree and doing
399 * extra writes that may not have been done by a previous
400 * sync
401 */
402 if (ret == 0) {
403 btrfs_release_path(path);
404 return 0;
405 }
406
407 /*
408 * We need to load the old nbytes into the inode so when we
409 * replay the extents we've logged we get the right nbytes.
410 */
411 if (inode_item) {
412 struct btrfs_inode_item *item;
413 u64 nbytes;
414 u32 mode;
415
416 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
417 struct btrfs_inode_item);
418 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
419 item = btrfs_item_ptr(eb, slot,
420 struct btrfs_inode_item);
421 btrfs_set_inode_nbytes(eb, item, nbytes);
422
423 /*
424 * If this is a directory we need to reset the i_size to
425 * 0 so that we can set it up properly when replaying
426 * the rest of the items in this log.
427 */
428 mode = btrfs_inode_mode(eb, item);
429 if (S_ISDIR(mode))
430 btrfs_set_inode_size(eb, item, 0);
431 }
432 } else if (inode_item) {
433 struct btrfs_inode_item *item;
434 u32 mode;
435
436 /*
437 * New inode, set nbytes to 0 so that the nbytes comes out
438 * properly when we replay the extents.
439 */
440 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
441 btrfs_set_inode_nbytes(eb, item, 0);
442
443 /*
444 * If this is a directory we need to reset the i_size to 0 so
445 * that we can set it up properly when replaying the rest of
446 * the items in this log.
447 */
448 mode = btrfs_inode_mode(eb, item);
449 if (S_ISDIR(mode))
450 btrfs_set_inode_size(eb, item, 0);
451 }
452insert:
453 btrfs_release_path(path);
454 /* try to insert the key into the destination tree */
455 path->skip_release_on_error = 1;
456 ret = btrfs_insert_empty_item(trans, root, path,
457 key, item_size);
458 path->skip_release_on_error = 0;
459
460 /* make sure any existing item is the correct size */
461 if (ret == -EEXIST || ret == -EOVERFLOW) {
462 u32 found_size;
463 found_size = btrfs_item_size_nr(path->nodes[0],
464 path->slots[0]);
465 if (found_size > item_size)
466 btrfs_truncate_item(fs_info, path, item_size, 1);
467 else if (found_size < item_size)
468 btrfs_extend_item(fs_info, path,
469 item_size - found_size);
470 } else if (ret) {
471 return ret;
472 }
473 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
474 path->slots[0]);
475
476 /* don't overwrite an existing inode if the generation number
477 * was logged as zero. This is done when the tree logging code
478 * is just logging an inode to make sure it exists after recovery.
479 *
480 * Also, don't overwrite i_size on directories during replay.
481 * log replay inserts and removes directory items based on the
482 * state of the tree found in the subvolume, and i_size is modified
483 * as it goes
484 */
485 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
486 struct btrfs_inode_item *src_item;
487 struct btrfs_inode_item *dst_item;
488
489 src_item = (struct btrfs_inode_item *)src_ptr;
490 dst_item = (struct btrfs_inode_item *)dst_ptr;
491
492 if (btrfs_inode_generation(eb, src_item) == 0) {
493 struct extent_buffer *dst_eb = path->nodes[0];
494 const u64 ino_size = btrfs_inode_size(eb, src_item);
495
496 /*
497 * For regular files an ino_size == 0 is used only when
498 * logging that an inode exists, as part of a directory
499 * fsync, and the inode wasn't fsynced before. In this
500 * case don't set the size of the inode in the fs/subvol
501 * tree, otherwise we would be throwing valid data away.
502 */
503 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
504 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
505 ino_size != 0) {
506 struct btrfs_map_token token;
507
508 btrfs_init_map_token(&token);
509 btrfs_set_token_inode_size(dst_eb, dst_item,
510 ino_size, &token);
511 }
512 goto no_copy;
513 }
514
515 if (overwrite_root &&
516 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
517 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
518 save_old_i_size = 1;
519 saved_i_size = btrfs_inode_size(path->nodes[0],
520 dst_item);
521 }
522 }
523
524 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
525 src_ptr, item_size);
526
527 if (save_old_i_size) {
528 struct btrfs_inode_item *dst_item;
529 dst_item = (struct btrfs_inode_item *)dst_ptr;
530 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
531 }
532
533 /* make sure the generation is filled in */
534 if (key->type == BTRFS_INODE_ITEM_KEY) {
535 struct btrfs_inode_item *dst_item;
536 dst_item = (struct btrfs_inode_item *)dst_ptr;
537 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
538 btrfs_set_inode_generation(path->nodes[0], dst_item,
539 trans->transid);
540 }
541 }
542no_copy:
543 btrfs_mark_buffer_dirty(path->nodes[0]);
544 btrfs_release_path(path);
545 return 0;
546}
547
548/*
549 * simple helper to read an inode off the disk from a given root
550 * This can only be called for subvolume roots and not for the log
551 */
552static noinline struct inode *read_one_inode(struct btrfs_root *root,
553 u64 objectid)
554{
555 struct btrfs_key key;
556 struct inode *inode;
557
558 key.objectid = objectid;
559 key.type = BTRFS_INODE_ITEM_KEY;
560 key.offset = 0;
561 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
562 if (IS_ERR(inode)) {
563 inode = NULL;
564 } else if (is_bad_inode(inode)) {
565 iput(inode);
566 inode = NULL;
567 }
568 return inode;
569}
570
571/* replays a single extent in 'eb' at 'slot' with 'key' into the
572 * subvolume 'root'. path is released on entry and should be released
573 * on exit.
574 *
575 * extents in the log tree have not been allocated out of the extent
576 * tree yet. So, this completes the allocation, taking a reference
577 * as required if the extent already exists or creating a new extent
578 * if it isn't in the extent allocation tree yet.
579 *
580 * The extent is inserted into the file, dropping any existing extents
581 * from the file that overlap the new one.
582 */
583static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
584 struct btrfs_root *root,
585 struct btrfs_path *path,
586 struct extent_buffer *eb, int slot,
587 struct btrfs_key *key)
588{
589 struct btrfs_fs_info *fs_info = root->fs_info;
590 int found_type;
591 u64 extent_end;
592 u64 start = key->offset;
593 u64 nbytes = 0;
594 struct btrfs_file_extent_item *item;
595 struct inode *inode = NULL;
596 unsigned long size;
597 int ret = 0;
598
599 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
600 found_type = btrfs_file_extent_type(eb, item);
601
602 if (found_type == BTRFS_FILE_EXTENT_REG ||
603 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
604 nbytes = btrfs_file_extent_num_bytes(eb, item);
605 extent_end = start + nbytes;
606
607 /*
608 * We don't add to the inodes nbytes if we are prealloc or a
609 * hole.
610 */
611 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
612 nbytes = 0;
613 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
614 size = btrfs_file_extent_inline_len(eb, slot, item);
615 nbytes = btrfs_file_extent_ram_bytes(eb, item);
616 extent_end = ALIGN(start + size,
617 fs_info->sectorsize);
618 } else {
619 ret = 0;
620 goto out;
621 }
622
623 inode = read_one_inode(root, key->objectid);
624 if (!inode) {
625 ret = -EIO;
626 goto out;
627 }
628
629 /*
630 * first check to see if we already have this extent in the
631 * file. This must be done before the btrfs_drop_extents run
632 * so we don't try to drop this extent.
633 */
634 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
635 start, 0);
636
637 if (ret == 0 &&
638 (found_type == BTRFS_FILE_EXTENT_REG ||
639 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
640 struct btrfs_file_extent_item cmp1;
641 struct btrfs_file_extent_item cmp2;
642 struct btrfs_file_extent_item *existing;
643 struct extent_buffer *leaf;
644
645 leaf = path->nodes[0];
646 existing = btrfs_item_ptr(leaf, path->slots[0],
647 struct btrfs_file_extent_item);
648
649 read_extent_buffer(eb, &cmp1, (unsigned long)item,
650 sizeof(cmp1));
651 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
652 sizeof(cmp2));
653
654 /*
655 * we already have a pointer to this exact extent,
656 * we don't have to do anything
657 */
658 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
659 btrfs_release_path(path);
660 goto out;
661 }
662 }
663 btrfs_release_path(path);
664
665 /* drop any overlapping extents */
666 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
667 if (ret)
668 goto out;
669
670 if (found_type == BTRFS_FILE_EXTENT_REG ||
671 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
672 u64 offset;
673 unsigned long dest_offset;
674 struct btrfs_key ins;
675
676 ret = btrfs_insert_empty_item(trans, root, path, key,
677 sizeof(*item));
678 if (ret)
679 goto out;
680 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
681 path->slots[0]);
682 copy_extent_buffer(path->nodes[0], eb, dest_offset,
683 (unsigned long)item, sizeof(*item));
684
685 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
686 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
687 ins.type = BTRFS_EXTENT_ITEM_KEY;
688 offset = key->offset - btrfs_file_extent_offset(eb, item);
689
690 /*
691 * Manually record dirty extent, as here we did a shallow
692 * file extent item copy and skip normal backref update,
693 * but modifying extent tree all by ourselves.
694 * So need to manually record dirty extent for qgroup,
695 * as the owner of the file extent changed from log tree
696 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
697 */
698 ret = btrfs_qgroup_trace_extent(trans, fs_info,
699 btrfs_file_extent_disk_bytenr(eb, item),
700 btrfs_file_extent_disk_num_bytes(eb, item),
701 GFP_NOFS);
702 if (ret < 0)
703 goto out;
704
705 if (ins.objectid > 0) {
706 u64 csum_start;
707 u64 csum_end;
708 LIST_HEAD(ordered_sums);
709 /*
710 * is this extent already allocated in the extent
711 * allocation tree? If so, just add a reference
712 */
713 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
714 ins.offset);
715 if (ret == 0) {
716 ret = btrfs_inc_extent_ref(trans, fs_info,
717 ins.objectid, ins.offset,
718 0, root->root_key.objectid,
719 key->objectid, offset);
720 if (ret)
721 goto out;
722 } else {
723 /*
724 * insert the extent pointer in the extent
725 * allocation tree
726 */
727 ret = btrfs_alloc_logged_file_extent(trans,
728 fs_info,
729 root->root_key.objectid,
730 key->objectid, offset, &ins);
731 if (ret)
732 goto out;
733 }
734 btrfs_release_path(path);
735
736 if (btrfs_file_extent_compression(eb, item)) {
737 csum_start = ins.objectid;
738 csum_end = csum_start + ins.offset;
739 } else {
740 csum_start = ins.objectid +
741 btrfs_file_extent_offset(eb, item);
742 csum_end = csum_start +
743 btrfs_file_extent_num_bytes(eb, item);
744 }
745
746 ret = btrfs_lookup_csums_range(root->log_root,
747 csum_start, csum_end - 1,
748 &ordered_sums, 0);
749 if (ret)
750 goto out;
751 /*
752 * Now delete all existing cums in the csum root that
753 * cover our range. We do this because we can have an
754 * extent that is completely referenced by one file
755 * extent item and partially referenced by another
756 * file extent item (like after using the clone or
757 * extent_same ioctls). In this case if we end up doing
758 * the replay of the one that partially references the
759 * extent first, and we do not do the csum deletion
760 * below, we can get 2 csum items in the csum tree that
761 * overlap each other. For example, imagine our log has
762 * the two following file extent items:
763 *
764 * key (257 EXTENT_DATA 409600)
765 * extent data disk byte 12845056 nr 102400
766 * extent data offset 20480 nr 20480 ram 102400
767 *
768 * key (257 EXTENT_DATA 819200)
769 * extent data disk byte 12845056 nr 102400
770 * extent data offset 0 nr 102400 ram 102400
771 *
772 * Where the second one fully references the 100K extent
773 * that starts at disk byte 12845056, and the log tree
774 * has a single csum item that covers the entire range
775 * of the extent:
776 *
777 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
778 *
779 * After the first file extent item is replayed, the
780 * csum tree gets the following csum item:
781 *
782 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
783 *
784 * Which covers the 20K sub-range starting at offset 20K
785 * of our extent. Now when we replay the second file
786 * extent item, if we do not delete existing csum items
787 * that cover any of its blocks, we end up getting two
788 * csum items in our csum tree that overlap each other:
789 *
790 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
791 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
792 *
793 * Which is a problem, because after this anyone trying
794 * to lookup up for the checksum of any block of our
795 * extent starting at an offset of 40K or higher, will
796 * end up looking at the second csum item only, which
797 * does not contain the checksum for any block starting
798 * at offset 40K or higher of our extent.
799 */
800 while (!list_empty(&ordered_sums)) {
801 struct btrfs_ordered_sum *sums;
802 sums = list_entry(ordered_sums.next,
803 struct btrfs_ordered_sum,
804 list);
805 if (!ret)
806 ret = btrfs_del_csums(trans, fs_info,
807 sums->bytenr,
808 sums->len);
809 if (!ret)
810 ret = btrfs_csum_file_blocks(trans,
811 fs_info->csum_root, sums);
812 list_del(&sums->list);
813 kfree(sums);
814 }
815 if (ret)
816 goto out;
817 } else {
818 btrfs_release_path(path);
819 }
820 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
821 /* inline extents are easy, we just overwrite them */
822 ret = overwrite_item(trans, root, path, eb, slot, key);
823 if (ret)
824 goto out;
825 }
826
827 inode_add_bytes(inode, nbytes);
828 ret = btrfs_update_inode(trans, root, inode);
829out:
830 if (inode)
831 iput(inode);
832 return ret;
833}
834
835/*
836 * when cleaning up conflicts between the directory names in the
837 * subvolume, directory names in the log and directory names in the
838 * inode back references, we may have to unlink inodes from directories.
839 *
840 * This is a helper function to do the unlink of a specific directory
841 * item
842 */
843static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
844 struct btrfs_root *root,
845 struct btrfs_path *path,
846 struct inode *dir,
847 struct btrfs_dir_item *di)
848{
849 struct btrfs_fs_info *fs_info = root->fs_info;
850 struct inode *inode;
851 char *name;
852 int name_len;
853 struct extent_buffer *leaf;
854 struct btrfs_key location;
855 int ret;
856
857 leaf = path->nodes[0];
858
859 btrfs_dir_item_key_to_cpu(leaf, di, &location);
860 name_len = btrfs_dir_name_len(leaf, di);
861 name = kmalloc(name_len, GFP_NOFS);
862 if (!name)
863 return -ENOMEM;
864
865 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
866 btrfs_release_path(path);
867
868 inode = read_one_inode(root, location.objectid);
869 if (!inode) {
870 ret = -EIO;
871 goto out;
872 }
873
874 ret = link_to_fixup_dir(trans, root, path, location.objectid);
875 if (ret)
876 goto out;
877
878 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
879 if (ret)
880 goto out;
881 else
882 ret = btrfs_run_delayed_items(trans, fs_info);
883out:
884 kfree(name);
885 iput(inode);
886 return ret;
887}
888
889/*
890 * helper function to see if a given name and sequence number found
891 * in an inode back reference are already in a directory and correctly
892 * point to this inode
893 */
894static noinline int inode_in_dir(struct btrfs_root *root,
895 struct btrfs_path *path,
896 u64 dirid, u64 objectid, u64 index,
897 const char *name, int name_len)
898{
899 struct btrfs_dir_item *di;
900 struct btrfs_key location;
901 int match = 0;
902
903 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
904 index, name, name_len, 0);
905 if (di && !IS_ERR(di)) {
906 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
907 if (location.objectid != objectid)
908 goto out;
909 } else
910 goto out;
911 btrfs_release_path(path);
912
913 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
914 if (di && !IS_ERR(di)) {
915 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
916 if (location.objectid != objectid)
917 goto out;
918 } else
919 goto out;
920 match = 1;
921out:
922 btrfs_release_path(path);
923 return match;
924}
925
926/*
927 * helper function to check a log tree for a named back reference in
928 * an inode. This is used to decide if a back reference that is
929 * found in the subvolume conflicts with what we find in the log.
930 *
931 * inode backreferences may have multiple refs in a single item,
932 * during replay we process one reference at a time, and we don't
933 * want to delete valid links to a file from the subvolume if that
934 * link is also in the log.
935 */
936static noinline int backref_in_log(struct btrfs_root *log,
937 struct btrfs_key *key,
938 u64 ref_objectid,
939 const char *name, int namelen)
940{
941 struct btrfs_path *path;
942 struct btrfs_inode_ref *ref;
943 unsigned long ptr;
944 unsigned long ptr_end;
945 unsigned long name_ptr;
946 int found_name_len;
947 int item_size;
948 int ret;
949 int match = 0;
950
951 path = btrfs_alloc_path();
952 if (!path)
953 return -ENOMEM;
954
955 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
956 if (ret != 0)
957 goto out;
958
959 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
960
961 if (key->type == BTRFS_INODE_EXTREF_KEY) {
962 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
963 name, namelen, NULL))
964 match = 1;
965
966 goto out;
967 }
968
969 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
970 ptr_end = ptr + item_size;
971 while (ptr < ptr_end) {
972 ref = (struct btrfs_inode_ref *)ptr;
973 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
974 if (found_name_len == namelen) {
975 name_ptr = (unsigned long)(ref + 1);
976 ret = memcmp_extent_buffer(path->nodes[0], name,
977 name_ptr, namelen);
978 if (ret == 0) {
979 match = 1;
980 goto out;
981 }
982 }
983 ptr = (unsigned long)(ref + 1) + found_name_len;
984 }
985out:
986 btrfs_free_path(path);
987 return match;
988}
989
990static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
991 struct btrfs_root *root,
992 struct btrfs_path *path,
993 struct btrfs_root *log_root,
994 struct inode *dir, struct inode *inode,
995 struct extent_buffer *eb,
996 u64 inode_objectid, u64 parent_objectid,
997 u64 ref_index, char *name, int namelen,
998 int *search_done)
999{
1000 struct btrfs_fs_info *fs_info = root->fs_info;
1001 int ret;
1002 char *victim_name;
1003 int victim_name_len;
1004 struct extent_buffer *leaf;
1005 struct btrfs_dir_item *di;
1006 struct btrfs_key search_key;
1007 struct btrfs_inode_extref *extref;
1008
1009again:
1010 /* Search old style refs */
1011 search_key.objectid = inode_objectid;
1012 search_key.type = BTRFS_INODE_REF_KEY;
1013 search_key.offset = parent_objectid;
1014 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1015 if (ret == 0) {
1016 struct btrfs_inode_ref *victim_ref;
1017 unsigned long ptr;
1018 unsigned long ptr_end;
1019
1020 leaf = path->nodes[0];
1021
1022 /* are we trying to overwrite a back ref for the root directory
1023 * if so, just jump out, we're done
1024 */
1025 if (search_key.objectid == search_key.offset)
1026 return 1;
1027
1028 /* check all the names in this back reference to see
1029 * if they are in the log. if so, we allow them to stay
1030 * otherwise they must be unlinked as a conflict
1031 */
1032 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1033 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1034 while (ptr < ptr_end) {
1035 victim_ref = (struct btrfs_inode_ref *)ptr;
1036 victim_name_len = btrfs_inode_ref_name_len(leaf,
1037 victim_ref);
1038 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1039 if (!victim_name)
1040 return -ENOMEM;
1041
1042 read_extent_buffer(leaf, victim_name,
1043 (unsigned long)(victim_ref + 1),
1044 victim_name_len);
1045
1046 if (!backref_in_log(log_root, &search_key,
1047 parent_objectid,
1048 victim_name,
1049 victim_name_len)) {
1050 inc_nlink(inode);
1051 btrfs_release_path(path);
1052
1053 ret = btrfs_unlink_inode(trans, root, dir,
1054 inode, victim_name,
1055 victim_name_len);
1056 kfree(victim_name);
1057 if (ret)
1058 return ret;
1059 ret = btrfs_run_delayed_items(trans, fs_info);
1060 if (ret)
1061 return ret;
1062 *search_done = 1;
1063 goto again;
1064 }
1065 kfree(victim_name);
1066
1067 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1068 }
1069
1070 /*
1071 * NOTE: we have searched root tree and checked the
1072 * corresponding ref, it does not need to check again.
1073 */
1074 *search_done = 1;
1075 }
1076 btrfs_release_path(path);
1077
1078 /* Same search but for extended refs */
1079 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1080 inode_objectid, parent_objectid, 0,
1081 0);
1082 if (!IS_ERR_OR_NULL(extref)) {
1083 u32 item_size;
1084 u32 cur_offset = 0;
1085 unsigned long base;
1086 struct inode *victim_parent;
1087
1088 leaf = path->nodes[0];
1089
1090 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1091 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1092
1093 while (cur_offset < item_size) {
1094 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1095
1096 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1097
1098 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1099 goto next;
1100
1101 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1102 if (!victim_name)
1103 return -ENOMEM;
1104 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1105 victim_name_len);
1106
1107 search_key.objectid = inode_objectid;
1108 search_key.type = BTRFS_INODE_EXTREF_KEY;
1109 search_key.offset = btrfs_extref_hash(parent_objectid,
1110 victim_name,
1111 victim_name_len);
1112 ret = 0;
1113 if (!backref_in_log(log_root, &search_key,
1114 parent_objectid, victim_name,
1115 victim_name_len)) {
1116 ret = -ENOENT;
1117 victim_parent = read_one_inode(root,
1118 parent_objectid);
1119 if (victim_parent) {
1120 inc_nlink(inode);
1121 btrfs_release_path(path);
1122
1123 ret = btrfs_unlink_inode(trans, root,
1124 victim_parent,
1125 inode,
1126 victim_name,
1127 victim_name_len);
1128 if (!ret)
1129 ret = btrfs_run_delayed_items(
1130 trans,
1131 fs_info);
1132 }
1133 iput(victim_parent);
1134 kfree(victim_name);
1135 if (ret)
1136 return ret;
1137 *search_done = 1;
1138 goto again;
1139 }
1140 kfree(victim_name);
1141 if (ret)
1142 return ret;
1143next:
1144 cur_offset += victim_name_len + sizeof(*extref);
1145 }
1146 *search_done = 1;
1147 }
1148 btrfs_release_path(path);
1149
1150 /* look for a conflicting sequence number */
1151 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1152 ref_index, name, namelen, 0);
1153 if (di && !IS_ERR(di)) {
1154 ret = drop_one_dir_item(trans, root, path, dir, di);
1155 if (ret)
1156 return ret;
1157 }
1158 btrfs_release_path(path);
1159
1160 /* look for a conflicing name */
1161 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1162 name, namelen, 0);
1163 if (di && !IS_ERR(di)) {
1164 ret = drop_one_dir_item(trans, root, path, dir, di);
1165 if (ret)
1166 return ret;
1167 }
1168 btrfs_release_path(path);
1169
1170 return 0;
1171}
1172
1173static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1174 u32 *namelen, char **name, u64 *index,
1175 u64 *parent_objectid)
1176{
1177 struct btrfs_inode_extref *extref;
1178
1179 extref = (struct btrfs_inode_extref *)ref_ptr;
1180
1181 *namelen = btrfs_inode_extref_name_len(eb, extref);
1182 *name = kmalloc(*namelen, GFP_NOFS);
1183 if (*name == NULL)
1184 return -ENOMEM;
1185
1186 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1187 *namelen);
1188
1189 *index = btrfs_inode_extref_index(eb, extref);
1190 if (parent_objectid)
1191 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1192
1193 return 0;
1194}
1195
1196static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1197 u32 *namelen, char **name, u64 *index)
1198{
1199 struct btrfs_inode_ref *ref;
1200
1201 ref = (struct btrfs_inode_ref *)ref_ptr;
1202
1203 *namelen = btrfs_inode_ref_name_len(eb, ref);
1204 *name = kmalloc(*namelen, GFP_NOFS);
1205 if (*name == NULL)
1206 return -ENOMEM;
1207
1208 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1209
1210 *index = btrfs_inode_ref_index(eb, ref);
1211
1212 return 0;
1213}
1214
1215/*
1216 * replay one inode back reference item found in the log tree.
1217 * eb, slot and key refer to the buffer and key found in the log tree.
1218 * root is the destination we are replaying into, and path is for temp
1219 * use by this function. (it should be released on return).
1220 */
1221static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1222 struct btrfs_root *root,
1223 struct btrfs_root *log,
1224 struct btrfs_path *path,
1225 struct extent_buffer *eb, int slot,
1226 struct btrfs_key *key)
1227{
1228 struct inode *dir = NULL;
1229 struct inode *inode = NULL;
1230 unsigned long ref_ptr;
1231 unsigned long ref_end;
1232 char *name = NULL;
1233 int namelen;
1234 int ret;
1235 int search_done = 0;
1236 int log_ref_ver = 0;
1237 u64 parent_objectid;
1238 u64 inode_objectid;
1239 u64 ref_index = 0;
1240 int ref_struct_size;
1241
1242 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1243 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1244
1245 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1246 struct btrfs_inode_extref *r;
1247
1248 ref_struct_size = sizeof(struct btrfs_inode_extref);
1249 log_ref_ver = 1;
1250 r = (struct btrfs_inode_extref *)ref_ptr;
1251 parent_objectid = btrfs_inode_extref_parent(eb, r);
1252 } else {
1253 ref_struct_size = sizeof(struct btrfs_inode_ref);
1254 parent_objectid = key->offset;
1255 }
1256 inode_objectid = key->objectid;
1257
1258 /*
1259 * it is possible that we didn't log all the parent directories
1260 * for a given inode. If we don't find the dir, just don't
1261 * copy the back ref in. The link count fixup code will take
1262 * care of the rest
1263 */
1264 dir = read_one_inode(root, parent_objectid);
1265 if (!dir) {
1266 ret = -ENOENT;
1267 goto out;
1268 }
1269
1270 inode = read_one_inode(root, inode_objectid);
1271 if (!inode) {
1272 ret = -EIO;
1273 goto out;
1274 }
1275
1276 while (ref_ptr < ref_end) {
1277 if (log_ref_ver) {
1278 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1279 &ref_index, &parent_objectid);
1280 /*
1281 * parent object can change from one array
1282 * item to another.
1283 */
1284 if (!dir)
1285 dir = read_one_inode(root, parent_objectid);
1286 if (!dir) {
1287 ret = -ENOENT;
1288 goto out;
1289 }
1290 } else {
1291 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1292 &ref_index);
1293 }
1294 if (ret)
1295 goto out;
1296
1297 /* if we already have a perfect match, we're done */
1298 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1299 ref_index, name, namelen)) {
1300 /*
1301 * look for a conflicting back reference in the
1302 * metadata. if we find one we have to unlink that name
1303 * of the file before we add our new link. Later on, we
1304 * overwrite any existing back reference, and we don't
1305 * want to create dangling pointers in the directory.
1306 */
1307
1308 if (!search_done) {
1309 ret = __add_inode_ref(trans, root, path, log,
1310 dir, inode, eb,
1311 inode_objectid,
1312 parent_objectid,
1313 ref_index, name, namelen,
1314 &search_done);
1315 if (ret) {
1316 if (ret == 1)
1317 ret = 0;
1318 goto out;
1319 }
1320 }
1321
1322 /* insert our name */
1323 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1324 0, ref_index);
1325 if (ret)
1326 goto out;
1327
1328 btrfs_update_inode(trans, root, inode);
1329 }
1330
1331 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1332 kfree(name);
1333 name = NULL;
1334 if (log_ref_ver) {
1335 iput(dir);
1336 dir = NULL;
1337 }
1338 }
1339
1340 /* finally write the back reference in the inode */
1341 ret = overwrite_item(trans, root, path, eb, slot, key);
1342out:
1343 btrfs_release_path(path);
1344 kfree(name);
1345 iput(dir);
1346 iput(inode);
1347 return ret;
1348}
1349
1350static int insert_orphan_item(struct btrfs_trans_handle *trans,
1351 struct btrfs_root *root, u64 ino)
1352{
1353 int ret;
1354
1355 ret = btrfs_insert_orphan_item(trans, root, ino);
1356 if (ret == -EEXIST)
1357 ret = 0;
1358
1359 return ret;
1360}
1361
1362static int count_inode_extrefs(struct btrfs_root *root,
1363 struct inode *inode, struct btrfs_path *path)
1364{
1365 int ret = 0;
1366 int name_len;
1367 unsigned int nlink = 0;
1368 u32 item_size;
1369 u32 cur_offset = 0;
1370 u64 inode_objectid = btrfs_ino(inode);
1371 u64 offset = 0;
1372 unsigned long ptr;
1373 struct btrfs_inode_extref *extref;
1374 struct extent_buffer *leaf;
1375
1376 while (1) {
1377 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1378 &extref, &offset);
1379 if (ret)
1380 break;
1381
1382 leaf = path->nodes[0];
1383 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1384 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1385 cur_offset = 0;
1386
1387 while (cur_offset < item_size) {
1388 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1389 name_len = btrfs_inode_extref_name_len(leaf, extref);
1390
1391 nlink++;
1392
1393 cur_offset += name_len + sizeof(*extref);
1394 }
1395
1396 offset++;
1397 btrfs_release_path(path);
1398 }
1399 btrfs_release_path(path);
1400
1401 if (ret < 0 && ret != -ENOENT)
1402 return ret;
1403 return nlink;
1404}
1405
1406static int count_inode_refs(struct btrfs_root *root,
1407 struct inode *inode, struct btrfs_path *path)
1408{
1409 int ret;
1410 struct btrfs_key key;
1411 unsigned int nlink = 0;
1412 unsigned long ptr;
1413 unsigned long ptr_end;
1414 int name_len;
1415 u64 ino = btrfs_ino(inode);
1416
1417 key.objectid = ino;
1418 key.type = BTRFS_INODE_REF_KEY;
1419 key.offset = (u64)-1;
1420
1421 while (1) {
1422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1423 if (ret < 0)
1424 break;
1425 if (ret > 0) {
1426 if (path->slots[0] == 0)
1427 break;
1428 path->slots[0]--;
1429 }
1430process_slot:
1431 btrfs_item_key_to_cpu(path->nodes[0], &key,
1432 path->slots[0]);
1433 if (key.objectid != ino ||
1434 key.type != BTRFS_INODE_REF_KEY)
1435 break;
1436 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1437 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1438 path->slots[0]);
1439 while (ptr < ptr_end) {
1440 struct btrfs_inode_ref *ref;
1441
1442 ref = (struct btrfs_inode_ref *)ptr;
1443 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1444 ref);
1445 ptr = (unsigned long)(ref + 1) + name_len;
1446 nlink++;
1447 }
1448
1449 if (key.offset == 0)
1450 break;
1451 if (path->slots[0] > 0) {
1452 path->slots[0]--;
1453 goto process_slot;
1454 }
1455 key.offset--;
1456 btrfs_release_path(path);
1457 }
1458 btrfs_release_path(path);
1459
1460 return nlink;
1461}
1462
1463/*
1464 * There are a few corners where the link count of the file can't
1465 * be properly maintained during replay. So, instead of adding
1466 * lots of complexity to the log code, we just scan the backrefs
1467 * for any file that has been through replay.
1468 *
1469 * The scan will update the link count on the inode to reflect the
1470 * number of back refs found. If it goes down to zero, the iput
1471 * will free the inode.
1472 */
1473static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1474 struct btrfs_root *root,
1475 struct inode *inode)
1476{
1477 struct btrfs_path *path;
1478 int ret;
1479 u64 nlink = 0;
1480 u64 ino = btrfs_ino(inode);
1481
1482 path = btrfs_alloc_path();
1483 if (!path)
1484 return -ENOMEM;
1485
1486 ret = count_inode_refs(root, inode, path);
1487 if (ret < 0)
1488 goto out;
1489
1490 nlink = ret;
1491
1492 ret = count_inode_extrefs(root, inode, path);
1493 if (ret < 0)
1494 goto out;
1495
1496 nlink += ret;
1497
1498 ret = 0;
1499
1500 if (nlink != inode->i_nlink) {
1501 set_nlink(inode, nlink);
1502 btrfs_update_inode(trans, root, inode);
1503 }
1504 BTRFS_I(inode)->index_cnt = (u64)-1;
1505
1506 if (inode->i_nlink == 0) {
1507 if (S_ISDIR(inode->i_mode)) {
1508 ret = replay_dir_deletes(trans, root, NULL, path,
1509 ino, 1);
1510 if (ret)
1511 goto out;
1512 }
1513 ret = insert_orphan_item(trans, root, ino);
1514 }
1515
1516out:
1517 btrfs_free_path(path);
1518 return ret;
1519}
1520
1521static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1522 struct btrfs_root *root,
1523 struct btrfs_path *path)
1524{
1525 int ret;
1526 struct btrfs_key key;
1527 struct inode *inode;
1528
1529 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1530 key.type = BTRFS_ORPHAN_ITEM_KEY;
1531 key.offset = (u64)-1;
1532 while (1) {
1533 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1534 if (ret < 0)
1535 break;
1536
1537 if (ret == 1) {
1538 if (path->slots[0] == 0)
1539 break;
1540 path->slots[0]--;
1541 }
1542
1543 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1544 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1545 key.type != BTRFS_ORPHAN_ITEM_KEY)
1546 break;
1547
1548 ret = btrfs_del_item(trans, root, path);
1549 if (ret)
1550 goto out;
1551
1552 btrfs_release_path(path);
1553 inode = read_one_inode(root, key.offset);
1554 if (!inode)
1555 return -EIO;
1556
1557 ret = fixup_inode_link_count(trans, root, inode);
1558 iput(inode);
1559 if (ret)
1560 goto out;
1561
1562 /*
1563 * fixup on a directory may create new entries,
1564 * make sure we always look for the highset possible
1565 * offset
1566 */
1567 key.offset = (u64)-1;
1568 }
1569 ret = 0;
1570out:
1571 btrfs_release_path(path);
1572 return ret;
1573}
1574
1575
1576/*
1577 * record a given inode in the fixup dir so we can check its link
1578 * count when replay is done. The link count is incremented here
1579 * so the inode won't go away until we check it
1580 */
1581static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1582 struct btrfs_root *root,
1583 struct btrfs_path *path,
1584 u64 objectid)
1585{
1586 struct btrfs_key key;
1587 int ret = 0;
1588 struct inode *inode;
1589
1590 inode = read_one_inode(root, objectid);
1591 if (!inode)
1592 return -EIO;
1593
1594 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1595 key.type = BTRFS_ORPHAN_ITEM_KEY;
1596 key.offset = objectid;
1597
1598 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1599
1600 btrfs_release_path(path);
1601 if (ret == 0) {
1602 if (!inode->i_nlink)
1603 set_nlink(inode, 1);
1604 else
1605 inc_nlink(inode);
1606 ret = btrfs_update_inode(trans, root, inode);
1607 } else if (ret == -EEXIST) {
1608 ret = 0;
1609 } else {
1610 BUG(); /* Logic Error */
1611 }
1612 iput(inode);
1613
1614 return ret;
1615}
1616
1617/*
1618 * when replaying the log for a directory, we only insert names
1619 * for inodes that actually exist. This means an fsync on a directory
1620 * does not implicitly fsync all the new files in it
1621 */
1622static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1623 struct btrfs_root *root,
1624 u64 dirid, u64 index,
1625 char *name, int name_len,
1626 struct btrfs_key *location)
1627{
1628 struct inode *inode;
1629 struct inode *dir;
1630 int ret;
1631
1632 inode = read_one_inode(root, location->objectid);
1633 if (!inode)
1634 return -ENOENT;
1635
1636 dir = read_one_inode(root, dirid);
1637 if (!dir) {
1638 iput(inode);
1639 return -EIO;
1640 }
1641
1642 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1643
1644 /* FIXME, put inode into FIXUP list */
1645
1646 iput(inode);
1647 iput(dir);
1648 return ret;
1649}
1650
1651/*
1652 * Return true if an inode reference exists in the log for the given name,
1653 * inode and parent inode.
1654 */
1655static bool name_in_log_ref(struct btrfs_root *log_root,
1656 const char *name, const int name_len,
1657 const u64 dirid, const u64 ino)
1658{
1659 struct btrfs_key search_key;
1660
1661 search_key.objectid = ino;
1662 search_key.type = BTRFS_INODE_REF_KEY;
1663 search_key.offset = dirid;
1664 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1665 return true;
1666
1667 search_key.type = BTRFS_INODE_EXTREF_KEY;
1668 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1669 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1670 return true;
1671
1672 return false;
1673}
1674
1675/*
1676 * take a single entry in a log directory item and replay it into
1677 * the subvolume.
1678 *
1679 * if a conflicting item exists in the subdirectory already,
1680 * the inode it points to is unlinked and put into the link count
1681 * fix up tree.
1682 *
1683 * If a name from the log points to a file or directory that does
1684 * not exist in the FS, it is skipped. fsyncs on directories
1685 * do not force down inodes inside that directory, just changes to the
1686 * names or unlinks in a directory.
1687 *
1688 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1689 * non-existing inode) and 1 if the name was replayed.
1690 */
1691static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1692 struct btrfs_root *root,
1693 struct btrfs_path *path,
1694 struct extent_buffer *eb,
1695 struct btrfs_dir_item *di,
1696 struct btrfs_key *key)
1697{
1698 char *name;
1699 int name_len;
1700 struct btrfs_dir_item *dst_di;
1701 struct btrfs_key found_key;
1702 struct btrfs_key log_key;
1703 struct inode *dir;
1704 u8 log_type;
1705 int exists;
1706 int ret = 0;
1707 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1708 bool name_added = false;
1709
1710 dir = read_one_inode(root, key->objectid);
1711 if (!dir)
1712 return -EIO;
1713
1714 name_len = btrfs_dir_name_len(eb, di);
1715 name = kmalloc(name_len, GFP_NOFS);
1716 if (!name) {
1717 ret = -ENOMEM;
1718 goto out;
1719 }
1720
1721 log_type = btrfs_dir_type(eb, di);
1722 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1723 name_len);
1724
1725 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1726 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1727 if (exists == 0)
1728 exists = 1;
1729 else
1730 exists = 0;
1731 btrfs_release_path(path);
1732
1733 if (key->type == BTRFS_DIR_ITEM_KEY) {
1734 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1735 name, name_len, 1);
1736 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1737 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1738 key->objectid,
1739 key->offset, name,
1740 name_len, 1);
1741 } else {
1742 /* Corruption */
1743 ret = -EINVAL;
1744 goto out;
1745 }
1746 if (IS_ERR_OR_NULL(dst_di)) {
1747 /* we need a sequence number to insert, so we only
1748 * do inserts for the BTRFS_DIR_INDEX_KEY types
1749 */
1750 if (key->type != BTRFS_DIR_INDEX_KEY)
1751 goto out;
1752 goto insert;
1753 }
1754
1755 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1756 /* the existing item matches the logged item */
1757 if (found_key.objectid == log_key.objectid &&
1758 found_key.type == log_key.type &&
1759 found_key.offset == log_key.offset &&
1760 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1761 update_size = false;
1762 goto out;
1763 }
1764
1765 /*
1766 * don't drop the conflicting directory entry if the inode
1767 * for the new entry doesn't exist
1768 */
1769 if (!exists)
1770 goto out;
1771
1772 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1773 if (ret)
1774 goto out;
1775
1776 if (key->type == BTRFS_DIR_INDEX_KEY)
1777 goto insert;
1778out:
1779 btrfs_release_path(path);
1780 if (!ret && update_size) {
1781 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1782 ret = btrfs_update_inode(trans, root, dir);
1783 }
1784 kfree(name);
1785 iput(dir);
1786 if (!ret && name_added)
1787 ret = 1;
1788 return ret;
1789
1790insert:
1791 if (name_in_log_ref(root->log_root, name, name_len,
1792 key->objectid, log_key.objectid)) {
1793 /* The dentry will be added later. */
1794 ret = 0;
1795 update_size = false;
1796 goto out;
1797 }
1798 btrfs_release_path(path);
1799 ret = insert_one_name(trans, root, key->objectid, key->offset,
1800 name, name_len, &log_key);
1801 if (ret && ret != -ENOENT && ret != -EEXIST)
1802 goto out;
1803 if (!ret)
1804 name_added = true;
1805 update_size = false;
1806 ret = 0;
1807 goto out;
1808}
1809
1810/*
1811 * find all the names in a directory item and reconcile them into
1812 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1813 * one name in a directory item, but the same code gets used for
1814 * both directory index types
1815 */
1816static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1817 struct btrfs_root *root,
1818 struct btrfs_path *path,
1819 struct extent_buffer *eb, int slot,
1820 struct btrfs_key *key)
1821{
1822 struct btrfs_fs_info *fs_info = root->fs_info;
1823 int ret = 0;
1824 u32 item_size = btrfs_item_size_nr(eb, slot);
1825 struct btrfs_dir_item *di;
1826 int name_len;
1827 unsigned long ptr;
1828 unsigned long ptr_end;
1829 struct btrfs_path *fixup_path = NULL;
1830
1831 ptr = btrfs_item_ptr_offset(eb, slot);
1832 ptr_end = ptr + item_size;
1833 while (ptr < ptr_end) {
1834 di = (struct btrfs_dir_item *)ptr;
1835 if (verify_dir_item(fs_info, eb, di))
1836 return -EIO;
1837 name_len = btrfs_dir_name_len(eb, di);
1838 ret = replay_one_name(trans, root, path, eb, di, key);
1839 if (ret < 0)
1840 break;
1841 ptr = (unsigned long)(di + 1);
1842 ptr += name_len;
1843
1844 /*
1845 * If this entry refers to a non-directory (directories can not
1846 * have a link count > 1) and it was added in the transaction
1847 * that was not committed, make sure we fixup the link count of
1848 * the inode it the entry points to. Otherwise something like
1849 * the following would result in a directory pointing to an
1850 * inode with a wrong link that does not account for this dir
1851 * entry:
1852 *
1853 * mkdir testdir
1854 * touch testdir/foo
1855 * touch testdir/bar
1856 * sync
1857 *
1858 * ln testdir/bar testdir/bar_link
1859 * ln testdir/foo testdir/foo_link
1860 * xfs_io -c "fsync" testdir/bar
1861 *
1862 * <power failure>
1863 *
1864 * mount fs, log replay happens
1865 *
1866 * File foo would remain with a link count of 1 when it has two
1867 * entries pointing to it in the directory testdir. This would
1868 * make it impossible to ever delete the parent directory has
1869 * it would result in stale dentries that can never be deleted.
1870 */
1871 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1872 struct btrfs_key di_key;
1873
1874 if (!fixup_path) {
1875 fixup_path = btrfs_alloc_path();
1876 if (!fixup_path) {
1877 ret = -ENOMEM;
1878 break;
1879 }
1880 }
1881
1882 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1883 ret = link_to_fixup_dir(trans, root, fixup_path,
1884 di_key.objectid);
1885 if (ret)
1886 break;
1887 }
1888 ret = 0;
1889 }
1890 btrfs_free_path(fixup_path);
1891 return ret;
1892}
1893
1894/*
1895 * directory replay has two parts. There are the standard directory
1896 * items in the log copied from the subvolume, and range items
1897 * created in the log while the subvolume was logged.
1898 *
1899 * The range items tell us which parts of the key space the log
1900 * is authoritative for. During replay, if a key in the subvolume
1901 * directory is in a logged range item, but not actually in the log
1902 * that means it was deleted from the directory before the fsync
1903 * and should be removed.
1904 */
1905static noinline int find_dir_range(struct btrfs_root *root,
1906 struct btrfs_path *path,
1907 u64 dirid, int key_type,
1908 u64 *start_ret, u64 *end_ret)
1909{
1910 struct btrfs_key key;
1911 u64 found_end;
1912 struct btrfs_dir_log_item *item;
1913 int ret;
1914 int nritems;
1915
1916 if (*start_ret == (u64)-1)
1917 return 1;
1918
1919 key.objectid = dirid;
1920 key.type = key_type;
1921 key.offset = *start_ret;
1922
1923 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1924 if (ret < 0)
1925 goto out;
1926 if (ret > 0) {
1927 if (path->slots[0] == 0)
1928 goto out;
1929 path->slots[0]--;
1930 }
1931 if (ret != 0)
1932 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1933
1934 if (key.type != key_type || key.objectid != dirid) {
1935 ret = 1;
1936 goto next;
1937 }
1938 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1939 struct btrfs_dir_log_item);
1940 found_end = btrfs_dir_log_end(path->nodes[0], item);
1941
1942 if (*start_ret >= key.offset && *start_ret <= found_end) {
1943 ret = 0;
1944 *start_ret = key.offset;
1945 *end_ret = found_end;
1946 goto out;
1947 }
1948 ret = 1;
1949next:
1950 /* check the next slot in the tree to see if it is a valid item */
1951 nritems = btrfs_header_nritems(path->nodes[0]);
1952 path->slots[0]++;
1953 if (path->slots[0] >= nritems) {
1954 ret = btrfs_next_leaf(root, path);
1955 if (ret)
1956 goto out;
1957 }
1958
1959 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1960
1961 if (key.type != key_type || key.objectid != dirid) {
1962 ret = 1;
1963 goto out;
1964 }
1965 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1966 struct btrfs_dir_log_item);
1967 found_end = btrfs_dir_log_end(path->nodes[0], item);
1968 *start_ret = key.offset;
1969 *end_ret = found_end;
1970 ret = 0;
1971out:
1972 btrfs_release_path(path);
1973 return ret;
1974}
1975
1976/*
1977 * this looks for a given directory item in the log. If the directory
1978 * item is not in the log, the item is removed and the inode it points
1979 * to is unlinked
1980 */
1981static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1982 struct btrfs_root *root,
1983 struct btrfs_root *log,
1984 struct btrfs_path *path,
1985 struct btrfs_path *log_path,
1986 struct inode *dir,
1987 struct btrfs_key *dir_key)
1988{
1989 struct btrfs_fs_info *fs_info = root->fs_info;
1990 int ret;
1991 struct extent_buffer *eb;
1992 int slot;
1993 u32 item_size;
1994 struct btrfs_dir_item *di;
1995 struct btrfs_dir_item *log_di;
1996 int name_len;
1997 unsigned long ptr;
1998 unsigned long ptr_end;
1999 char *name;
2000 struct inode *inode;
2001 struct btrfs_key location;
2002
2003again:
2004 eb = path->nodes[0];
2005 slot = path->slots[0];
2006 item_size = btrfs_item_size_nr(eb, slot);
2007 ptr = btrfs_item_ptr_offset(eb, slot);
2008 ptr_end = ptr + item_size;
2009 while (ptr < ptr_end) {
2010 di = (struct btrfs_dir_item *)ptr;
2011 if (verify_dir_item(fs_info, eb, di)) {
2012 ret = -EIO;
2013 goto out;
2014 }
2015
2016 name_len = btrfs_dir_name_len(eb, di);
2017 name = kmalloc(name_len, GFP_NOFS);
2018 if (!name) {
2019 ret = -ENOMEM;
2020 goto out;
2021 }
2022 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2023 name_len);
2024 log_di = NULL;
2025 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2026 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2027 dir_key->objectid,
2028 name, name_len, 0);
2029 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2030 log_di = btrfs_lookup_dir_index_item(trans, log,
2031 log_path,
2032 dir_key->objectid,
2033 dir_key->offset,
2034 name, name_len, 0);
2035 }
2036 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2037 btrfs_dir_item_key_to_cpu(eb, di, &location);
2038 btrfs_release_path(path);
2039 btrfs_release_path(log_path);
2040 inode = read_one_inode(root, location.objectid);
2041 if (!inode) {
2042 kfree(name);
2043 return -EIO;
2044 }
2045
2046 ret = link_to_fixup_dir(trans, root,
2047 path, location.objectid);
2048 if (ret) {
2049 kfree(name);
2050 iput(inode);
2051 goto out;
2052 }
2053
2054 inc_nlink(inode);
2055 ret = btrfs_unlink_inode(trans, root, dir, inode,
2056 name, name_len);
2057 if (!ret)
2058 ret = btrfs_run_delayed_items(trans, fs_info);
2059 kfree(name);
2060 iput(inode);
2061 if (ret)
2062 goto out;
2063
2064 /* there might still be more names under this key
2065 * check and repeat if required
2066 */
2067 ret = btrfs_search_slot(NULL, root, dir_key, path,
2068 0, 0);
2069 if (ret == 0)
2070 goto again;
2071 ret = 0;
2072 goto out;
2073 } else if (IS_ERR(log_di)) {
2074 kfree(name);
2075 return PTR_ERR(log_di);
2076 }
2077 btrfs_release_path(log_path);
2078 kfree(name);
2079
2080 ptr = (unsigned long)(di + 1);
2081 ptr += name_len;
2082 }
2083 ret = 0;
2084out:
2085 btrfs_release_path(path);
2086 btrfs_release_path(log_path);
2087 return ret;
2088}
2089
2090static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2091 struct btrfs_root *root,
2092 struct btrfs_root *log,
2093 struct btrfs_path *path,
2094 const u64 ino)
2095{
2096 struct btrfs_key search_key;
2097 struct btrfs_path *log_path;
2098 int i;
2099 int nritems;
2100 int ret;
2101
2102 log_path = btrfs_alloc_path();
2103 if (!log_path)
2104 return -ENOMEM;
2105
2106 search_key.objectid = ino;
2107 search_key.type = BTRFS_XATTR_ITEM_KEY;
2108 search_key.offset = 0;
2109again:
2110 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2111 if (ret < 0)
2112 goto out;
2113process_leaf:
2114 nritems = btrfs_header_nritems(path->nodes[0]);
2115 for (i = path->slots[0]; i < nritems; i++) {
2116 struct btrfs_key key;
2117 struct btrfs_dir_item *di;
2118 struct btrfs_dir_item *log_di;
2119 u32 total_size;
2120 u32 cur;
2121
2122 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2123 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2124 ret = 0;
2125 goto out;
2126 }
2127
2128 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2129 total_size = btrfs_item_size_nr(path->nodes[0], i);
2130 cur = 0;
2131 while (cur < total_size) {
2132 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2133 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2134 u32 this_len = sizeof(*di) + name_len + data_len;
2135 char *name;
2136
2137 name = kmalloc(name_len, GFP_NOFS);
2138 if (!name) {
2139 ret = -ENOMEM;
2140 goto out;
2141 }
2142 read_extent_buffer(path->nodes[0], name,
2143 (unsigned long)(di + 1), name_len);
2144
2145 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2146 name, name_len, 0);
2147 btrfs_release_path(log_path);
2148 if (!log_di) {
2149 /* Doesn't exist in log tree, so delete it. */
2150 btrfs_release_path(path);
2151 di = btrfs_lookup_xattr(trans, root, path, ino,
2152 name, name_len, -1);
2153 kfree(name);
2154 if (IS_ERR(di)) {
2155 ret = PTR_ERR(di);
2156 goto out;
2157 }
2158 ASSERT(di);
2159 ret = btrfs_delete_one_dir_name(trans, root,
2160 path, di);
2161 if (ret)
2162 goto out;
2163 btrfs_release_path(path);
2164 search_key = key;
2165 goto again;
2166 }
2167 kfree(name);
2168 if (IS_ERR(log_di)) {
2169 ret = PTR_ERR(log_di);
2170 goto out;
2171 }
2172 cur += this_len;
2173 di = (struct btrfs_dir_item *)((char *)di + this_len);
2174 }
2175 }
2176 ret = btrfs_next_leaf(root, path);
2177 if (ret > 0)
2178 ret = 0;
2179 else if (ret == 0)
2180 goto process_leaf;
2181out:
2182 btrfs_free_path(log_path);
2183 btrfs_release_path(path);
2184 return ret;
2185}
2186
2187
2188/*
2189 * deletion replay happens before we copy any new directory items
2190 * out of the log or out of backreferences from inodes. It
2191 * scans the log to find ranges of keys that log is authoritative for,
2192 * and then scans the directory to find items in those ranges that are
2193 * not present in the log.
2194 *
2195 * Anything we don't find in the log is unlinked and removed from the
2196 * directory.
2197 */
2198static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2199 struct btrfs_root *root,
2200 struct btrfs_root *log,
2201 struct btrfs_path *path,
2202 u64 dirid, int del_all)
2203{
2204 u64 range_start;
2205 u64 range_end;
2206 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2207 int ret = 0;
2208 struct btrfs_key dir_key;
2209 struct btrfs_key found_key;
2210 struct btrfs_path *log_path;
2211 struct inode *dir;
2212
2213 dir_key.objectid = dirid;
2214 dir_key.type = BTRFS_DIR_ITEM_KEY;
2215 log_path = btrfs_alloc_path();
2216 if (!log_path)
2217 return -ENOMEM;
2218
2219 dir = read_one_inode(root, dirid);
2220 /* it isn't an error if the inode isn't there, that can happen
2221 * because we replay the deletes before we copy in the inode item
2222 * from the log
2223 */
2224 if (!dir) {
2225 btrfs_free_path(log_path);
2226 return 0;
2227 }
2228again:
2229 range_start = 0;
2230 range_end = 0;
2231 while (1) {
2232 if (del_all)
2233 range_end = (u64)-1;
2234 else {
2235 ret = find_dir_range(log, path, dirid, key_type,
2236 &range_start, &range_end);
2237 if (ret != 0)
2238 break;
2239 }
2240
2241 dir_key.offset = range_start;
2242 while (1) {
2243 int nritems;
2244 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2245 0, 0);
2246 if (ret < 0)
2247 goto out;
2248
2249 nritems = btrfs_header_nritems(path->nodes[0]);
2250 if (path->slots[0] >= nritems) {
2251 ret = btrfs_next_leaf(root, path);
2252 if (ret)
2253 break;
2254 }
2255 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2256 path->slots[0]);
2257 if (found_key.objectid != dirid ||
2258 found_key.type != dir_key.type)
2259 goto next_type;
2260
2261 if (found_key.offset > range_end)
2262 break;
2263
2264 ret = check_item_in_log(trans, root, log, path,
2265 log_path, dir,
2266 &found_key);
2267 if (ret)
2268 goto out;
2269 if (found_key.offset == (u64)-1)
2270 break;
2271 dir_key.offset = found_key.offset + 1;
2272 }
2273 btrfs_release_path(path);
2274 if (range_end == (u64)-1)
2275 break;
2276 range_start = range_end + 1;
2277 }
2278
2279next_type:
2280 ret = 0;
2281 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2282 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2283 dir_key.type = BTRFS_DIR_INDEX_KEY;
2284 btrfs_release_path(path);
2285 goto again;
2286 }
2287out:
2288 btrfs_release_path(path);
2289 btrfs_free_path(log_path);
2290 iput(dir);
2291 return ret;
2292}
2293
2294/*
2295 * the process_func used to replay items from the log tree. This
2296 * gets called in two different stages. The first stage just looks
2297 * for inodes and makes sure they are all copied into the subvolume.
2298 *
2299 * The second stage copies all the other item types from the log into
2300 * the subvolume. The two stage approach is slower, but gets rid of
2301 * lots of complexity around inodes referencing other inodes that exist
2302 * only in the log (references come from either directory items or inode
2303 * back refs).
2304 */
2305static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2306 struct walk_control *wc, u64 gen)
2307{
2308 int nritems;
2309 struct btrfs_path *path;
2310 struct btrfs_root *root = wc->replay_dest;
2311 struct btrfs_key key;
2312 int level;
2313 int i;
2314 int ret;
2315
2316 ret = btrfs_read_buffer(eb, gen);
2317 if (ret)
2318 return ret;
2319
2320 level = btrfs_header_level(eb);
2321
2322 if (level != 0)
2323 return 0;
2324
2325 path = btrfs_alloc_path();
2326 if (!path)
2327 return -ENOMEM;
2328
2329 nritems = btrfs_header_nritems(eb);
2330 for (i = 0; i < nritems; i++) {
2331 btrfs_item_key_to_cpu(eb, &key, i);
2332
2333 /* inode keys are done during the first stage */
2334 if (key.type == BTRFS_INODE_ITEM_KEY &&
2335 wc->stage == LOG_WALK_REPLAY_INODES) {
2336 struct btrfs_inode_item *inode_item;
2337 u32 mode;
2338
2339 inode_item = btrfs_item_ptr(eb, i,
2340 struct btrfs_inode_item);
2341 ret = replay_xattr_deletes(wc->trans, root, log,
2342 path, key.objectid);
2343 if (ret)
2344 break;
2345 mode = btrfs_inode_mode(eb, inode_item);
2346 if (S_ISDIR(mode)) {
2347 ret = replay_dir_deletes(wc->trans,
2348 root, log, path, key.objectid, 0);
2349 if (ret)
2350 break;
2351 }
2352 ret = overwrite_item(wc->trans, root, path,
2353 eb, i, &key);
2354 if (ret)
2355 break;
2356
2357 /* for regular files, make sure corresponding
2358 * orphan item exist. extents past the new EOF
2359 * will be truncated later by orphan cleanup.
2360 */
2361 if (S_ISREG(mode)) {
2362 ret = insert_orphan_item(wc->trans, root,
2363 key.objectid);
2364 if (ret)
2365 break;
2366 }
2367
2368 ret = link_to_fixup_dir(wc->trans, root,
2369 path, key.objectid);
2370 if (ret)
2371 break;
2372 }
2373
2374 if (key.type == BTRFS_DIR_INDEX_KEY &&
2375 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2376 ret = replay_one_dir_item(wc->trans, root, path,
2377 eb, i, &key);
2378 if (ret)
2379 break;
2380 }
2381
2382 if (wc->stage < LOG_WALK_REPLAY_ALL)
2383 continue;
2384
2385 /* these keys are simply copied */
2386 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2387 ret = overwrite_item(wc->trans, root, path,
2388 eb, i, &key);
2389 if (ret)
2390 break;
2391 } else if (key.type == BTRFS_INODE_REF_KEY ||
2392 key.type == BTRFS_INODE_EXTREF_KEY) {
2393 ret = add_inode_ref(wc->trans, root, log, path,
2394 eb, i, &key);
2395 if (ret && ret != -ENOENT)
2396 break;
2397 ret = 0;
2398 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2399 ret = replay_one_extent(wc->trans, root, path,
2400 eb, i, &key);
2401 if (ret)
2402 break;
2403 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2404 ret = replay_one_dir_item(wc->trans, root, path,
2405 eb, i, &key);
2406 if (ret)
2407 break;
2408 }
2409 }
2410 btrfs_free_path(path);
2411 return ret;
2412}
2413
2414static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2415 struct btrfs_root *root,
2416 struct btrfs_path *path, int *level,
2417 struct walk_control *wc)
2418{
2419 struct btrfs_fs_info *fs_info = root->fs_info;
2420 u64 root_owner;
2421 u64 bytenr;
2422 u64 ptr_gen;
2423 struct extent_buffer *next;
2424 struct extent_buffer *cur;
2425 struct extent_buffer *parent;
2426 u32 blocksize;
2427 int ret = 0;
2428
2429 WARN_ON(*level < 0);
2430 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2431
2432 while (*level > 0) {
2433 WARN_ON(*level < 0);
2434 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2435 cur = path->nodes[*level];
2436
2437 WARN_ON(btrfs_header_level(cur) != *level);
2438
2439 if (path->slots[*level] >=
2440 btrfs_header_nritems(cur))
2441 break;
2442
2443 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2444 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2445 blocksize = fs_info->nodesize;
2446
2447 parent = path->nodes[*level];
2448 root_owner = btrfs_header_owner(parent);
2449
2450 next = btrfs_find_create_tree_block(fs_info, bytenr);
2451 if (IS_ERR(next))
2452 return PTR_ERR(next);
2453
2454 if (*level == 1) {
2455 ret = wc->process_func(root, next, wc, ptr_gen);
2456 if (ret) {
2457 free_extent_buffer(next);
2458 return ret;
2459 }
2460
2461 path->slots[*level]++;
2462 if (wc->free) {
2463 ret = btrfs_read_buffer(next, ptr_gen);
2464 if (ret) {
2465 free_extent_buffer(next);
2466 return ret;
2467 }
2468
2469 if (trans) {
2470 btrfs_tree_lock(next);
2471 btrfs_set_lock_blocking(next);
2472 clean_tree_block(trans, fs_info, next);
2473 btrfs_wait_tree_block_writeback(next);
2474 btrfs_tree_unlock(next);
2475 }
2476
2477 WARN_ON(root_owner !=
2478 BTRFS_TREE_LOG_OBJECTID);
2479 ret = btrfs_free_and_pin_reserved_extent(
2480 fs_info, bytenr,
2481 blocksize);
2482 if (ret) {
2483 free_extent_buffer(next);
2484 return ret;
2485 }
2486 }
2487 free_extent_buffer(next);
2488 continue;
2489 }
2490 ret = btrfs_read_buffer(next, ptr_gen);
2491 if (ret) {
2492 free_extent_buffer(next);
2493 return ret;
2494 }
2495
2496 WARN_ON(*level <= 0);
2497 if (path->nodes[*level-1])
2498 free_extent_buffer(path->nodes[*level-1]);
2499 path->nodes[*level-1] = next;
2500 *level = btrfs_header_level(next);
2501 path->slots[*level] = 0;
2502 cond_resched();
2503 }
2504 WARN_ON(*level < 0);
2505 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2506
2507 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2508
2509 cond_resched();
2510 return 0;
2511}
2512
2513static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2514 struct btrfs_root *root,
2515 struct btrfs_path *path, int *level,
2516 struct walk_control *wc)
2517{
2518 struct btrfs_fs_info *fs_info = root->fs_info;
2519 u64 root_owner;
2520 int i;
2521 int slot;
2522 int ret;
2523
2524 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2525 slot = path->slots[i];
2526 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2527 path->slots[i]++;
2528 *level = i;
2529 WARN_ON(*level == 0);
2530 return 0;
2531 } else {
2532 struct extent_buffer *parent;
2533 if (path->nodes[*level] == root->node)
2534 parent = path->nodes[*level];
2535 else
2536 parent = path->nodes[*level + 1];
2537
2538 root_owner = btrfs_header_owner(parent);
2539 ret = wc->process_func(root, path->nodes[*level], wc,
2540 btrfs_header_generation(path->nodes[*level]));
2541 if (ret)
2542 return ret;
2543
2544 if (wc->free) {
2545 struct extent_buffer *next;
2546
2547 next = path->nodes[*level];
2548
2549 if (trans) {
2550 btrfs_tree_lock(next);
2551 btrfs_set_lock_blocking(next);
2552 clean_tree_block(trans, fs_info, next);
2553 btrfs_wait_tree_block_writeback(next);
2554 btrfs_tree_unlock(next);
2555 }
2556
2557 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2558 ret = btrfs_free_and_pin_reserved_extent(
2559 fs_info,
2560 path->nodes[*level]->start,
2561 path->nodes[*level]->len);
2562 if (ret)
2563 return ret;
2564 }
2565 free_extent_buffer(path->nodes[*level]);
2566 path->nodes[*level] = NULL;
2567 *level = i + 1;
2568 }
2569 }
2570 return 1;
2571}
2572
2573/*
2574 * drop the reference count on the tree rooted at 'snap'. This traverses
2575 * the tree freeing any blocks that have a ref count of zero after being
2576 * decremented.
2577 */
2578static int walk_log_tree(struct btrfs_trans_handle *trans,
2579 struct btrfs_root *log, struct walk_control *wc)
2580{
2581 struct btrfs_fs_info *fs_info = log->fs_info;
2582 int ret = 0;
2583 int wret;
2584 int level;
2585 struct btrfs_path *path;
2586 int orig_level;
2587
2588 path = btrfs_alloc_path();
2589 if (!path)
2590 return -ENOMEM;
2591
2592 level = btrfs_header_level(log->node);
2593 orig_level = level;
2594 path->nodes[level] = log->node;
2595 extent_buffer_get(log->node);
2596 path->slots[level] = 0;
2597
2598 while (1) {
2599 wret = walk_down_log_tree(trans, log, path, &level, wc);
2600 if (wret > 0)
2601 break;
2602 if (wret < 0) {
2603 ret = wret;
2604 goto out;
2605 }
2606
2607 wret = walk_up_log_tree(trans, log, path, &level, wc);
2608 if (wret > 0)
2609 break;
2610 if (wret < 0) {
2611 ret = wret;
2612 goto out;
2613 }
2614 }
2615
2616 /* was the root node processed? if not, catch it here */
2617 if (path->nodes[orig_level]) {
2618 ret = wc->process_func(log, path->nodes[orig_level], wc,
2619 btrfs_header_generation(path->nodes[orig_level]));
2620 if (ret)
2621 goto out;
2622 if (wc->free) {
2623 struct extent_buffer *next;
2624
2625 next = path->nodes[orig_level];
2626
2627 if (trans) {
2628 btrfs_tree_lock(next);
2629 btrfs_set_lock_blocking(next);
2630 clean_tree_block(trans, fs_info, next);
2631 btrfs_wait_tree_block_writeback(next);
2632 btrfs_tree_unlock(next);
2633 }
2634
2635 WARN_ON(log->root_key.objectid !=
2636 BTRFS_TREE_LOG_OBJECTID);
2637 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2638 next->start, next->len);
2639 if (ret)
2640 goto out;
2641 }
2642 }
2643
2644out:
2645 btrfs_free_path(path);
2646 return ret;
2647}
2648
2649/*
2650 * helper function to update the item for a given subvolumes log root
2651 * in the tree of log roots
2652 */
2653static int update_log_root(struct btrfs_trans_handle *trans,
2654 struct btrfs_root *log)
2655{
2656 struct btrfs_fs_info *fs_info = log->fs_info;
2657 int ret;
2658
2659 if (log->log_transid == 1) {
2660 /* insert root item on the first sync */
2661 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2662 &log->root_key, &log->root_item);
2663 } else {
2664 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2665 &log->root_key, &log->root_item);
2666 }
2667 return ret;
2668}
2669
2670static void wait_log_commit(struct btrfs_root *root, int transid)
2671{
2672 DEFINE_WAIT(wait);
2673 int index = transid % 2;
2674
2675 /*
2676 * we only allow two pending log transactions at a time,
2677 * so we know that if ours is more than 2 older than the
2678 * current transaction, we're done
2679 */
2680 do {
2681 prepare_to_wait(&root->log_commit_wait[index],
2682 &wait, TASK_UNINTERRUPTIBLE);
2683 mutex_unlock(&root->log_mutex);
2684
2685 if (root->log_transid_committed < transid &&
2686 atomic_read(&root->log_commit[index]))
2687 schedule();
2688
2689 finish_wait(&root->log_commit_wait[index], &wait);
2690 mutex_lock(&root->log_mutex);
2691 } while (root->log_transid_committed < transid &&
2692 atomic_read(&root->log_commit[index]));
2693}
2694
2695static void wait_for_writer(struct btrfs_root *root)
2696{
2697 DEFINE_WAIT(wait);
2698
2699 while (atomic_read(&root->log_writers)) {
2700 prepare_to_wait(&root->log_writer_wait,
2701 &wait, TASK_UNINTERRUPTIBLE);
2702 mutex_unlock(&root->log_mutex);
2703 if (atomic_read(&root->log_writers))
2704 schedule();
2705 finish_wait(&root->log_writer_wait, &wait);
2706 mutex_lock(&root->log_mutex);
2707 }
2708}
2709
2710static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2711 struct btrfs_log_ctx *ctx)
2712{
2713 if (!ctx)
2714 return;
2715
2716 mutex_lock(&root->log_mutex);
2717 list_del_init(&ctx->list);
2718 mutex_unlock(&root->log_mutex);
2719}
2720
2721/*
2722 * Invoked in log mutex context, or be sure there is no other task which
2723 * can access the list.
2724 */
2725static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2726 int index, int error)
2727{
2728 struct btrfs_log_ctx *ctx;
2729 struct btrfs_log_ctx *safe;
2730
2731 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2732 list_del_init(&ctx->list);
2733 ctx->log_ret = error;
2734 }
2735
2736 INIT_LIST_HEAD(&root->log_ctxs[index]);
2737}
2738
2739/*
2740 * btrfs_sync_log does sends a given tree log down to the disk and
2741 * updates the super blocks to record it. When this call is done,
2742 * you know that any inodes previously logged are safely on disk only
2743 * if it returns 0.
2744 *
2745 * Any other return value means you need to call btrfs_commit_transaction.
2746 * Some of the edge cases for fsyncing directories that have had unlinks
2747 * or renames done in the past mean that sometimes the only safe
2748 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2749 * that has happened.
2750 */
2751int btrfs_sync_log(struct btrfs_trans_handle *trans,
2752 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2753{
2754 int index1;
2755 int index2;
2756 int mark;
2757 int ret;
2758 struct btrfs_fs_info *fs_info = root->fs_info;
2759 struct btrfs_root *log = root->log_root;
2760 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
2761 int log_transid = 0;
2762 struct btrfs_log_ctx root_log_ctx;
2763 struct blk_plug plug;
2764
2765 mutex_lock(&root->log_mutex);
2766 log_transid = ctx->log_transid;
2767 if (root->log_transid_committed >= log_transid) {
2768 mutex_unlock(&root->log_mutex);
2769 return ctx->log_ret;
2770 }
2771
2772 index1 = log_transid % 2;
2773 if (atomic_read(&root->log_commit[index1])) {
2774 wait_log_commit(root, log_transid);
2775 mutex_unlock(&root->log_mutex);
2776 return ctx->log_ret;
2777 }
2778 ASSERT(log_transid == root->log_transid);
2779 atomic_set(&root->log_commit[index1], 1);
2780
2781 /* wait for previous tree log sync to complete */
2782 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2783 wait_log_commit(root, log_transid - 1);
2784
2785 while (1) {
2786 int batch = atomic_read(&root->log_batch);
2787 /* when we're on an ssd, just kick the log commit out */
2788 if (!btrfs_test_opt(fs_info, SSD) &&
2789 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2790 mutex_unlock(&root->log_mutex);
2791 schedule_timeout_uninterruptible(1);
2792 mutex_lock(&root->log_mutex);
2793 }
2794 wait_for_writer(root);
2795 if (batch == atomic_read(&root->log_batch))
2796 break;
2797 }
2798
2799 /* bail out if we need to do a full commit */
2800 if (btrfs_need_log_full_commit(fs_info, trans)) {
2801 ret = -EAGAIN;
2802 btrfs_free_logged_extents(log, log_transid);
2803 mutex_unlock(&root->log_mutex);
2804 goto out;
2805 }
2806
2807 if (log_transid % 2 == 0)
2808 mark = EXTENT_DIRTY;
2809 else
2810 mark = EXTENT_NEW;
2811
2812 /* we start IO on all the marked extents here, but we don't actually
2813 * wait for them until later.
2814 */
2815 blk_start_plug(&plug);
2816 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
2817 if (ret) {
2818 blk_finish_plug(&plug);
2819 btrfs_abort_transaction(trans, ret);
2820 btrfs_free_logged_extents(log, log_transid);
2821 btrfs_set_log_full_commit(fs_info, trans);
2822 mutex_unlock(&root->log_mutex);
2823 goto out;
2824 }
2825
2826 btrfs_set_root_node(&log->root_item, log->node);
2827
2828 root->log_transid++;
2829 log->log_transid = root->log_transid;
2830 root->log_start_pid = 0;
2831 /*
2832 * IO has been started, blocks of the log tree have WRITTEN flag set
2833 * in their headers. new modifications of the log will be written to
2834 * new positions. so it's safe to allow log writers to go in.
2835 */
2836 mutex_unlock(&root->log_mutex);
2837
2838 btrfs_init_log_ctx(&root_log_ctx, NULL);
2839
2840 mutex_lock(&log_root_tree->log_mutex);
2841 atomic_inc(&log_root_tree->log_batch);
2842 atomic_inc(&log_root_tree->log_writers);
2843
2844 index2 = log_root_tree->log_transid % 2;
2845 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2846 root_log_ctx.log_transid = log_root_tree->log_transid;
2847
2848 mutex_unlock(&log_root_tree->log_mutex);
2849
2850 ret = update_log_root(trans, log);
2851
2852 mutex_lock(&log_root_tree->log_mutex);
2853 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2854 /*
2855 * Implicit memory barrier after atomic_dec_and_test
2856 */
2857 if (waitqueue_active(&log_root_tree->log_writer_wait))
2858 wake_up(&log_root_tree->log_writer_wait);
2859 }
2860
2861 if (ret) {
2862 if (!list_empty(&root_log_ctx.list))
2863 list_del_init(&root_log_ctx.list);
2864
2865 blk_finish_plug(&plug);
2866 btrfs_set_log_full_commit(fs_info, trans);
2867
2868 if (ret != -ENOSPC) {
2869 btrfs_abort_transaction(trans, ret);
2870 mutex_unlock(&log_root_tree->log_mutex);
2871 goto out;
2872 }
2873 btrfs_wait_tree_log_extents(log, mark);
2874 btrfs_free_logged_extents(log, log_transid);
2875 mutex_unlock(&log_root_tree->log_mutex);
2876 ret = -EAGAIN;
2877 goto out;
2878 }
2879
2880 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2881 blk_finish_plug(&plug);
2882 list_del_init(&root_log_ctx.list);
2883 mutex_unlock(&log_root_tree->log_mutex);
2884 ret = root_log_ctx.log_ret;
2885 goto out;
2886 }
2887
2888 index2 = root_log_ctx.log_transid % 2;
2889 if (atomic_read(&log_root_tree->log_commit[index2])) {
2890 blk_finish_plug(&plug);
2891 ret = btrfs_wait_tree_log_extents(log, mark);
2892 btrfs_wait_logged_extents(trans, log, log_transid);
2893 wait_log_commit(log_root_tree,
2894 root_log_ctx.log_transid);
2895 mutex_unlock(&log_root_tree->log_mutex);
2896 if (!ret)
2897 ret = root_log_ctx.log_ret;
2898 goto out;
2899 }
2900 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2901 atomic_set(&log_root_tree->log_commit[index2], 1);
2902
2903 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2904 wait_log_commit(log_root_tree,
2905 root_log_ctx.log_transid - 1);
2906 }
2907
2908 wait_for_writer(log_root_tree);
2909
2910 /*
2911 * now that we've moved on to the tree of log tree roots,
2912 * check the full commit flag again
2913 */
2914 if (btrfs_need_log_full_commit(fs_info, trans)) {
2915 blk_finish_plug(&plug);
2916 btrfs_wait_tree_log_extents(log, mark);
2917 btrfs_free_logged_extents(log, log_transid);
2918 mutex_unlock(&log_root_tree->log_mutex);
2919 ret = -EAGAIN;
2920 goto out_wake_log_root;
2921 }
2922
2923 ret = btrfs_write_marked_extents(fs_info,
2924 &log_root_tree->dirty_log_pages,
2925 EXTENT_DIRTY | EXTENT_NEW);
2926 blk_finish_plug(&plug);
2927 if (ret) {
2928 btrfs_set_log_full_commit(fs_info, trans);
2929 btrfs_abort_transaction(trans, ret);
2930 btrfs_free_logged_extents(log, log_transid);
2931 mutex_unlock(&log_root_tree->log_mutex);
2932 goto out_wake_log_root;
2933 }
2934 ret = btrfs_wait_tree_log_extents(log, mark);
2935 if (!ret)
2936 ret = btrfs_wait_tree_log_extents(log_root_tree,
2937 EXTENT_NEW | EXTENT_DIRTY);
2938 if (ret) {
2939 btrfs_set_log_full_commit(fs_info, trans);
2940 btrfs_free_logged_extents(log, log_transid);
2941 mutex_unlock(&log_root_tree->log_mutex);
2942 goto out_wake_log_root;
2943 }
2944 btrfs_wait_logged_extents(trans, log, log_transid);
2945
2946 btrfs_set_super_log_root(fs_info->super_for_commit,
2947 log_root_tree->node->start);
2948 btrfs_set_super_log_root_level(fs_info->super_for_commit,
2949 btrfs_header_level(log_root_tree->node));
2950
2951 log_root_tree->log_transid++;
2952 mutex_unlock(&log_root_tree->log_mutex);
2953
2954 /*
2955 * nobody else is going to jump in and write the the ctree
2956 * super here because the log_commit atomic below is protecting
2957 * us. We must be called with a transaction handle pinning
2958 * the running transaction open, so a full commit can't hop
2959 * in and cause problems either.
2960 */
2961 ret = write_ctree_super(trans, fs_info, 1);
2962 if (ret) {
2963 btrfs_set_log_full_commit(fs_info, trans);
2964 btrfs_abort_transaction(trans, ret);
2965 goto out_wake_log_root;
2966 }
2967
2968 mutex_lock(&root->log_mutex);
2969 if (root->last_log_commit < log_transid)
2970 root->last_log_commit = log_transid;
2971 mutex_unlock(&root->log_mutex);
2972
2973out_wake_log_root:
2974 mutex_lock(&log_root_tree->log_mutex);
2975 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2976
2977 log_root_tree->log_transid_committed++;
2978 atomic_set(&log_root_tree->log_commit[index2], 0);
2979 mutex_unlock(&log_root_tree->log_mutex);
2980
2981 /*
2982 * The barrier before waitqueue_active is implied by mutex_unlock
2983 */
2984 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2985 wake_up(&log_root_tree->log_commit_wait[index2]);
2986out:
2987 mutex_lock(&root->log_mutex);
2988 btrfs_remove_all_log_ctxs(root, index1, ret);
2989 root->log_transid_committed++;
2990 atomic_set(&root->log_commit[index1], 0);
2991 mutex_unlock(&root->log_mutex);
2992
2993 /*
2994 * The barrier before waitqueue_active is implied by mutex_unlock
2995 */
2996 if (waitqueue_active(&root->log_commit_wait[index1]))
2997 wake_up(&root->log_commit_wait[index1]);
2998 return ret;
2999}
3000
3001static void free_log_tree(struct btrfs_trans_handle *trans,
3002 struct btrfs_root *log)
3003{
3004 int ret;
3005 u64 start;
3006 u64 end;
3007 struct walk_control wc = {
3008 .free = 1,
3009 .process_func = process_one_buffer
3010 };
3011
3012 ret = walk_log_tree(trans, log, &wc);
3013 /* I don't think this can happen but just in case */
3014 if (ret)
3015 btrfs_abort_transaction(trans, ret);
3016
3017 while (1) {
3018 ret = find_first_extent_bit(&log->dirty_log_pages,
3019 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
3020 NULL);
3021 if (ret)
3022 break;
3023
3024 clear_extent_bits(&log->dirty_log_pages, start, end,
3025 EXTENT_DIRTY | EXTENT_NEW);
3026 }
3027
3028 /*
3029 * We may have short-circuited the log tree with the full commit logic
3030 * and left ordered extents on our list, so clear these out to keep us
3031 * from leaking inodes and memory.
3032 */
3033 btrfs_free_logged_extents(log, 0);
3034 btrfs_free_logged_extents(log, 1);
3035
3036 free_extent_buffer(log->node);
3037 kfree(log);
3038}
3039
3040/*
3041 * free all the extents used by the tree log. This should be called
3042 * at commit time of the full transaction
3043 */
3044int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3045{
3046 if (root->log_root) {
3047 free_log_tree(trans, root->log_root);
3048 root->log_root = NULL;
3049 }
3050 return 0;
3051}
3052
3053int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3054 struct btrfs_fs_info *fs_info)
3055{
3056 if (fs_info->log_root_tree) {
3057 free_log_tree(trans, fs_info->log_root_tree);
3058 fs_info->log_root_tree = NULL;
3059 }
3060 return 0;
3061}
3062
3063/*
3064 * If both a file and directory are logged, and unlinks or renames are
3065 * mixed in, we have a few interesting corners:
3066 *
3067 * create file X in dir Y
3068 * link file X to X.link in dir Y
3069 * fsync file X
3070 * unlink file X but leave X.link
3071 * fsync dir Y
3072 *
3073 * After a crash we would expect only X.link to exist. But file X
3074 * didn't get fsync'd again so the log has back refs for X and X.link.
3075 *
3076 * We solve this by removing directory entries and inode backrefs from the
3077 * log when a file that was logged in the current transaction is
3078 * unlinked. Any later fsync will include the updated log entries, and
3079 * we'll be able to reconstruct the proper directory items from backrefs.
3080 *
3081 * This optimizations allows us to avoid relogging the entire inode
3082 * or the entire directory.
3083 */
3084int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3085 struct btrfs_root *root,
3086 const char *name, int name_len,
3087 struct inode *dir, u64 index)
3088{
3089 struct btrfs_root *log;
3090 struct btrfs_dir_item *di;
3091 struct btrfs_path *path;
3092 int ret;
3093 int err = 0;
3094 int bytes_del = 0;
3095 u64 dir_ino = btrfs_ino(dir);
3096
3097 if (BTRFS_I(dir)->logged_trans < trans->transid)
3098 return 0;
3099
3100 ret = join_running_log_trans(root);
3101 if (ret)
3102 return 0;
3103
3104 mutex_lock(&BTRFS_I(dir)->log_mutex);
3105
3106 log = root->log_root;
3107 path = btrfs_alloc_path();
3108 if (!path) {
3109 err = -ENOMEM;
3110 goto out_unlock;
3111 }
3112
3113 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3114 name, name_len, -1);
3115 if (IS_ERR(di)) {
3116 err = PTR_ERR(di);
3117 goto fail;
3118 }
3119 if (di) {
3120 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3121 bytes_del += name_len;
3122 if (ret) {
3123 err = ret;
3124 goto fail;
3125 }
3126 }
3127 btrfs_release_path(path);
3128 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3129 index, name, name_len, -1);
3130 if (IS_ERR(di)) {
3131 err = PTR_ERR(di);
3132 goto fail;
3133 }
3134 if (di) {
3135 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3136 bytes_del += name_len;
3137 if (ret) {
3138 err = ret;
3139 goto fail;
3140 }
3141 }
3142
3143 /* update the directory size in the log to reflect the names
3144 * we have removed
3145 */
3146 if (bytes_del) {
3147 struct btrfs_key key;
3148
3149 key.objectid = dir_ino;
3150 key.offset = 0;
3151 key.type = BTRFS_INODE_ITEM_KEY;
3152 btrfs_release_path(path);
3153
3154 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3155 if (ret < 0) {
3156 err = ret;
3157 goto fail;
3158 }
3159 if (ret == 0) {
3160 struct btrfs_inode_item *item;
3161 u64 i_size;
3162
3163 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3164 struct btrfs_inode_item);
3165 i_size = btrfs_inode_size(path->nodes[0], item);
3166 if (i_size > bytes_del)
3167 i_size -= bytes_del;
3168 else
3169 i_size = 0;
3170 btrfs_set_inode_size(path->nodes[0], item, i_size);
3171 btrfs_mark_buffer_dirty(path->nodes[0]);
3172 } else
3173 ret = 0;
3174 btrfs_release_path(path);
3175 }
3176fail:
3177 btrfs_free_path(path);
3178out_unlock:
3179 mutex_unlock(&BTRFS_I(dir)->log_mutex);
3180 if (ret == -ENOSPC) {
3181 btrfs_set_log_full_commit(root->fs_info, trans);
3182 ret = 0;
3183 } else if (ret < 0)
3184 btrfs_abort_transaction(trans, ret);
3185
3186 btrfs_end_log_trans(root);
3187
3188 return err;
3189}
3190
3191/* see comments for btrfs_del_dir_entries_in_log */
3192int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3193 struct btrfs_root *root,
3194 const char *name, int name_len,
3195 struct inode *inode, u64 dirid)
3196{
3197 struct btrfs_fs_info *fs_info = root->fs_info;
3198 struct btrfs_root *log;
3199 u64 index;
3200 int ret;
3201
3202 if (BTRFS_I(inode)->logged_trans < trans->transid)
3203 return 0;
3204
3205 ret = join_running_log_trans(root);
3206 if (ret)
3207 return 0;
3208 log = root->log_root;
3209 mutex_lock(&BTRFS_I(inode)->log_mutex);
3210
3211 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3212 dirid, &index);
3213 mutex_unlock(&BTRFS_I(inode)->log_mutex);
3214 if (ret == -ENOSPC) {
3215 btrfs_set_log_full_commit(fs_info, trans);
3216 ret = 0;
3217 } else if (ret < 0 && ret != -ENOENT)
3218 btrfs_abort_transaction(trans, ret);
3219 btrfs_end_log_trans(root);
3220
3221 return ret;
3222}
3223
3224/*
3225 * creates a range item in the log for 'dirid'. first_offset and
3226 * last_offset tell us which parts of the key space the log should
3227 * be considered authoritative for.
3228 */
3229static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3230 struct btrfs_root *log,
3231 struct btrfs_path *path,
3232 int key_type, u64 dirid,
3233 u64 first_offset, u64 last_offset)
3234{
3235 int ret;
3236 struct btrfs_key key;
3237 struct btrfs_dir_log_item *item;
3238
3239 key.objectid = dirid;
3240 key.offset = first_offset;
3241 if (key_type == BTRFS_DIR_ITEM_KEY)
3242 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3243 else
3244 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3245 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3246 if (ret)
3247 return ret;
3248
3249 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3250 struct btrfs_dir_log_item);
3251 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3252 btrfs_mark_buffer_dirty(path->nodes[0]);
3253 btrfs_release_path(path);
3254 return 0;
3255}
3256
3257/*
3258 * log all the items included in the current transaction for a given
3259 * directory. This also creates the range items in the log tree required
3260 * to replay anything deleted before the fsync
3261 */
3262static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3263 struct btrfs_root *root, struct inode *inode,
3264 struct btrfs_path *path,
3265 struct btrfs_path *dst_path, int key_type,
3266 struct btrfs_log_ctx *ctx,
3267 u64 min_offset, u64 *last_offset_ret)
3268{
3269 struct btrfs_key min_key;
3270 struct btrfs_root *log = root->log_root;
3271 struct extent_buffer *src;
3272 int err = 0;
3273 int ret;
3274 int i;
3275 int nritems;
3276 u64 first_offset = min_offset;
3277 u64 last_offset = (u64)-1;
3278 u64 ino = btrfs_ino(inode);
3279
3280 log = root->log_root;
3281
3282 min_key.objectid = ino;
3283 min_key.type = key_type;
3284 min_key.offset = min_offset;
3285
3286 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3287
3288 /*
3289 * we didn't find anything from this transaction, see if there
3290 * is anything at all
3291 */
3292 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3293 min_key.objectid = ino;
3294 min_key.type = key_type;
3295 min_key.offset = (u64)-1;
3296 btrfs_release_path(path);
3297 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3298 if (ret < 0) {
3299 btrfs_release_path(path);
3300 return ret;
3301 }
3302 ret = btrfs_previous_item(root, path, ino, key_type);
3303
3304 /* if ret == 0 there are items for this type,
3305 * create a range to tell us the last key of this type.
3306 * otherwise, there are no items in this directory after
3307 * *min_offset, and we create a range to indicate that.
3308 */
3309 if (ret == 0) {
3310 struct btrfs_key tmp;
3311 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3312 path->slots[0]);
3313 if (key_type == tmp.type)
3314 first_offset = max(min_offset, tmp.offset) + 1;
3315 }
3316 goto done;
3317 }
3318
3319 /* go backward to find any previous key */
3320 ret = btrfs_previous_item(root, path, ino, key_type);
3321 if (ret == 0) {
3322 struct btrfs_key tmp;
3323 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3324 if (key_type == tmp.type) {
3325 first_offset = tmp.offset;
3326 ret = overwrite_item(trans, log, dst_path,
3327 path->nodes[0], path->slots[0],
3328 &tmp);
3329 if (ret) {
3330 err = ret;
3331 goto done;
3332 }
3333 }
3334 }
3335 btrfs_release_path(path);
3336
3337 /* find the first key from this transaction again */
3338 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3339 if (WARN_ON(ret != 0))
3340 goto done;
3341
3342 /*
3343 * we have a block from this transaction, log every item in it
3344 * from our directory
3345 */
3346 while (1) {
3347 struct btrfs_key tmp;
3348 src = path->nodes[0];
3349 nritems = btrfs_header_nritems(src);
3350 for (i = path->slots[0]; i < nritems; i++) {
3351 struct btrfs_dir_item *di;
3352
3353 btrfs_item_key_to_cpu(src, &min_key, i);
3354
3355 if (min_key.objectid != ino || min_key.type != key_type)
3356 goto done;
3357 ret = overwrite_item(trans, log, dst_path, src, i,
3358 &min_key);
3359 if (ret) {
3360 err = ret;
3361 goto done;
3362 }
3363
3364 /*
3365 * We must make sure that when we log a directory entry,
3366 * the corresponding inode, after log replay, has a
3367 * matching link count. For example:
3368 *
3369 * touch foo
3370 * mkdir mydir
3371 * sync
3372 * ln foo mydir/bar
3373 * xfs_io -c "fsync" mydir
3374 * <crash>
3375 * <mount fs and log replay>
3376 *
3377 * Would result in a fsync log that when replayed, our
3378 * file inode would have a link count of 1, but we get
3379 * two directory entries pointing to the same inode.
3380 * After removing one of the names, it would not be
3381 * possible to remove the other name, which resulted
3382 * always in stale file handle errors, and would not
3383 * be possible to rmdir the parent directory, since
3384 * its i_size could never decrement to the value
3385 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3386 */
3387 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3388 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3389 if (ctx &&
3390 (btrfs_dir_transid(src, di) == trans->transid ||
3391 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3392 tmp.type != BTRFS_ROOT_ITEM_KEY)
3393 ctx->log_new_dentries = true;
3394 }
3395 path->slots[0] = nritems;
3396
3397 /*
3398 * look ahead to the next item and see if it is also
3399 * from this directory and from this transaction
3400 */
3401 ret = btrfs_next_leaf(root, path);
3402 if (ret == 1) {
3403 last_offset = (u64)-1;
3404 goto done;
3405 }
3406 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3407 if (tmp.objectid != ino || tmp.type != key_type) {
3408 last_offset = (u64)-1;
3409 goto done;
3410 }
3411 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3412 ret = overwrite_item(trans, log, dst_path,
3413 path->nodes[0], path->slots[0],
3414 &tmp);
3415 if (ret)
3416 err = ret;
3417 else
3418 last_offset = tmp.offset;
3419 goto done;
3420 }
3421 }
3422done:
3423 btrfs_release_path(path);
3424 btrfs_release_path(dst_path);
3425
3426 if (err == 0) {
3427 *last_offset_ret = last_offset;
3428 /*
3429 * insert the log range keys to indicate where the log
3430 * is valid
3431 */
3432 ret = insert_dir_log_key(trans, log, path, key_type,
3433 ino, first_offset, last_offset);
3434 if (ret)
3435 err = ret;
3436 }
3437 return err;
3438}
3439
3440/*
3441 * logging directories is very similar to logging inodes, We find all the items
3442 * from the current transaction and write them to the log.
3443 *
3444 * The recovery code scans the directory in the subvolume, and if it finds a
3445 * key in the range logged that is not present in the log tree, then it means
3446 * that dir entry was unlinked during the transaction.
3447 *
3448 * In order for that scan to work, we must include one key smaller than
3449 * the smallest logged by this transaction and one key larger than the largest
3450 * key logged by this transaction.
3451 */
3452static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3453 struct btrfs_root *root, struct inode *inode,
3454 struct btrfs_path *path,
3455 struct btrfs_path *dst_path,
3456 struct btrfs_log_ctx *ctx)
3457{
3458 u64 min_key;
3459 u64 max_key;
3460 int ret;
3461 int key_type = BTRFS_DIR_ITEM_KEY;
3462
3463again:
3464 min_key = 0;
3465 max_key = 0;
3466 while (1) {
3467 ret = log_dir_items(trans, root, inode, path,
3468 dst_path, key_type, ctx, min_key,
3469 &max_key);
3470 if (ret)
3471 return ret;
3472 if (max_key == (u64)-1)
3473 break;
3474 min_key = max_key + 1;
3475 }
3476
3477 if (key_type == BTRFS_DIR_ITEM_KEY) {
3478 key_type = BTRFS_DIR_INDEX_KEY;
3479 goto again;
3480 }
3481 return 0;
3482}
3483
3484/*
3485 * a helper function to drop items from the log before we relog an
3486 * inode. max_key_type indicates the highest item type to remove.
3487 * This cannot be run for file data extents because it does not
3488 * free the extents they point to.
3489 */
3490static int drop_objectid_items(struct btrfs_trans_handle *trans,
3491 struct btrfs_root *log,
3492 struct btrfs_path *path,
3493 u64 objectid, int max_key_type)
3494{
3495 int ret;
3496 struct btrfs_key key;
3497 struct btrfs_key found_key;
3498 int start_slot;
3499
3500 key.objectid = objectid;
3501 key.type = max_key_type;
3502 key.offset = (u64)-1;
3503
3504 while (1) {
3505 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3506 BUG_ON(ret == 0); /* Logic error */
3507 if (ret < 0)
3508 break;
3509
3510 if (path->slots[0] == 0)
3511 break;
3512
3513 path->slots[0]--;
3514 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3515 path->slots[0]);
3516
3517 if (found_key.objectid != objectid)
3518 break;
3519
3520 found_key.offset = 0;
3521 found_key.type = 0;
3522 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3523 &start_slot);
3524
3525 ret = btrfs_del_items(trans, log, path, start_slot,
3526 path->slots[0] - start_slot + 1);
3527 /*
3528 * If start slot isn't 0 then we don't need to re-search, we've
3529 * found the last guy with the objectid in this tree.
3530 */
3531 if (ret || start_slot != 0)
3532 break;
3533 btrfs_release_path(path);
3534 }
3535 btrfs_release_path(path);
3536 if (ret > 0)
3537 ret = 0;
3538 return ret;
3539}
3540
3541static void fill_inode_item(struct btrfs_trans_handle *trans,
3542 struct extent_buffer *leaf,
3543 struct btrfs_inode_item *item,
3544 struct inode *inode, int log_inode_only,
3545 u64 logged_isize)
3546{
3547 struct btrfs_map_token token;
3548
3549 btrfs_init_map_token(&token);
3550
3551 if (log_inode_only) {
3552 /* set the generation to zero so the recover code
3553 * can tell the difference between an logging
3554 * just to say 'this inode exists' and a logging
3555 * to say 'update this inode with these values'
3556 */
3557 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3558 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3559 } else {
3560 btrfs_set_token_inode_generation(leaf, item,
3561 BTRFS_I(inode)->generation,
3562 &token);
3563 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3564 }
3565
3566 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3567 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3568 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3569 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3570
3571 btrfs_set_token_timespec_sec(leaf, &item->atime,
3572 inode->i_atime.tv_sec, &token);
3573 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3574 inode->i_atime.tv_nsec, &token);
3575
3576 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3577 inode->i_mtime.tv_sec, &token);
3578 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3579 inode->i_mtime.tv_nsec, &token);
3580
3581 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3582 inode->i_ctime.tv_sec, &token);
3583 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3584 inode->i_ctime.tv_nsec, &token);
3585
3586 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3587 &token);
3588
3589 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3590 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3591 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3592 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3593 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3594}
3595
3596static int log_inode_item(struct btrfs_trans_handle *trans,
3597 struct btrfs_root *log, struct btrfs_path *path,
3598 struct inode *inode)
3599{
3600 struct btrfs_inode_item *inode_item;
3601 int ret;
3602
3603 ret = btrfs_insert_empty_item(trans, log, path,
3604 &BTRFS_I(inode)->location,
3605 sizeof(*inode_item));
3606 if (ret && ret != -EEXIST)
3607 return ret;
3608 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3609 struct btrfs_inode_item);
3610 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3611 btrfs_release_path(path);
3612 return 0;
3613}
3614
3615static noinline int copy_items(struct btrfs_trans_handle *trans,
3616 struct inode *inode,
3617 struct btrfs_path *dst_path,
3618 struct btrfs_path *src_path, u64 *last_extent,
3619 int start_slot, int nr, int inode_only,
3620 u64 logged_isize)
3621{
3622 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3623 unsigned long src_offset;
3624 unsigned long dst_offset;
3625 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3626 struct btrfs_file_extent_item *extent;
3627 struct btrfs_inode_item *inode_item;
3628 struct extent_buffer *src = src_path->nodes[0];
3629 struct btrfs_key first_key, last_key, key;
3630 int ret;
3631 struct btrfs_key *ins_keys;
3632 u32 *ins_sizes;
3633 char *ins_data;
3634 int i;
3635 struct list_head ordered_sums;
3636 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3637 bool has_extents = false;
3638 bool need_find_last_extent = true;
3639 bool done = false;
3640
3641 INIT_LIST_HEAD(&ordered_sums);
3642
3643 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3644 nr * sizeof(u32), GFP_NOFS);
3645 if (!ins_data)
3646 return -ENOMEM;
3647
3648 first_key.objectid = (u64)-1;
3649
3650 ins_sizes = (u32 *)ins_data;
3651 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3652
3653 for (i = 0; i < nr; i++) {
3654 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3655 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3656 }
3657 ret = btrfs_insert_empty_items(trans, log, dst_path,
3658 ins_keys, ins_sizes, nr);
3659 if (ret) {
3660 kfree(ins_data);
3661 return ret;
3662 }
3663
3664 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3665 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3666 dst_path->slots[0]);
3667
3668 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3669
3670 if ((i == (nr - 1)))
3671 last_key = ins_keys[i];
3672
3673 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3674 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3675 dst_path->slots[0],
3676 struct btrfs_inode_item);
3677 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3678 inode, inode_only == LOG_INODE_EXISTS,
3679 logged_isize);
3680 } else {
3681 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3682 src_offset, ins_sizes[i]);
3683 }
3684
3685 /*
3686 * We set need_find_last_extent here in case we know we were
3687 * processing other items and then walk into the first extent in
3688 * the inode. If we don't hit an extent then nothing changes,
3689 * we'll do the last search the next time around.
3690 */
3691 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3692 has_extents = true;
3693 if (first_key.objectid == (u64)-1)
3694 first_key = ins_keys[i];
3695 } else {
3696 need_find_last_extent = false;
3697 }
3698
3699 /* take a reference on file data extents so that truncates
3700 * or deletes of this inode don't have to relog the inode
3701 * again
3702 */
3703 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3704 !skip_csum) {
3705 int found_type;
3706 extent = btrfs_item_ptr(src, start_slot + i,
3707 struct btrfs_file_extent_item);
3708
3709 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3710 continue;
3711
3712 found_type = btrfs_file_extent_type(src, extent);
3713 if (found_type == BTRFS_FILE_EXTENT_REG) {
3714 u64 ds, dl, cs, cl;
3715 ds = btrfs_file_extent_disk_bytenr(src,
3716 extent);
3717 /* ds == 0 is a hole */
3718 if (ds == 0)
3719 continue;
3720
3721 dl = btrfs_file_extent_disk_num_bytes(src,
3722 extent);
3723 cs = btrfs_file_extent_offset(src, extent);
3724 cl = btrfs_file_extent_num_bytes(src,
3725 extent);
3726 if (btrfs_file_extent_compression(src,
3727 extent)) {
3728 cs = 0;
3729 cl = dl;
3730 }
3731
3732 ret = btrfs_lookup_csums_range(
3733 fs_info->csum_root,
3734 ds + cs, ds + cs + cl - 1,
3735 &ordered_sums, 0);
3736 if (ret) {
3737 btrfs_release_path(dst_path);
3738 kfree(ins_data);
3739 return ret;
3740 }
3741 }
3742 }
3743 }
3744
3745 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3746 btrfs_release_path(dst_path);
3747 kfree(ins_data);
3748
3749 /*
3750 * we have to do this after the loop above to avoid changing the
3751 * log tree while trying to change the log tree.
3752 */
3753 ret = 0;
3754 while (!list_empty(&ordered_sums)) {
3755 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3756 struct btrfs_ordered_sum,
3757 list);
3758 if (!ret)
3759 ret = btrfs_csum_file_blocks(trans, log, sums);
3760 list_del(&sums->list);
3761 kfree(sums);
3762 }
3763
3764 if (!has_extents)
3765 return ret;
3766
3767 if (need_find_last_extent && *last_extent == first_key.offset) {
3768 /*
3769 * We don't have any leafs between our current one and the one
3770 * we processed before that can have file extent items for our
3771 * inode (and have a generation number smaller than our current
3772 * transaction id).
3773 */
3774 need_find_last_extent = false;
3775 }
3776
3777 /*
3778 * Because we use btrfs_search_forward we could skip leaves that were
3779 * not modified and then assume *last_extent is valid when it really
3780 * isn't. So back up to the previous leaf and read the end of the last
3781 * extent before we go and fill in holes.
3782 */
3783 if (need_find_last_extent) {
3784 u64 len;
3785
3786 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3787 if (ret < 0)
3788 return ret;
3789 if (ret)
3790 goto fill_holes;
3791 if (src_path->slots[0])
3792 src_path->slots[0]--;
3793 src = src_path->nodes[0];
3794 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3795 if (key.objectid != btrfs_ino(inode) ||
3796 key.type != BTRFS_EXTENT_DATA_KEY)
3797 goto fill_holes;
3798 extent = btrfs_item_ptr(src, src_path->slots[0],
3799 struct btrfs_file_extent_item);
3800 if (btrfs_file_extent_type(src, extent) ==
3801 BTRFS_FILE_EXTENT_INLINE) {
3802 len = btrfs_file_extent_inline_len(src,
3803 src_path->slots[0],
3804 extent);
3805 *last_extent = ALIGN(key.offset + len,
3806 fs_info->sectorsize);
3807 } else {
3808 len = btrfs_file_extent_num_bytes(src, extent);
3809 *last_extent = key.offset + len;
3810 }
3811 }
3812fill_holes:
3813 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3814 * things could have happened
3815 *
3816 * 1) A merge could have happened, so we could currently be on a leaf
3817 * that holds what we were copying in the first place.
3818 * 2) A split could have happened, and now not all of the items we want
3819 * are on the same leaf.
3820 *
3821 * So we need to adjust how we search for holes, we need to drop the
3822 * path and re-search for the first extent key we found, and then walk
3823 * forward until we hit the last one we copied.
3824 */
3825 if (need_find_last_extent) {
3826 /* btrfs_prev_leaf could return 1 without releasing the path */
3827 btrfs_release_path(src_path);
3828 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3829 src_path, 0, 0);
3830 if (ret < 0)
3831 return ret;
3832 ASSERT(ret == 0);
3833 src = src_path->nodes[0];
3834 i = src_path->slots[0];
3835 } else {
3836 i = start_slot;
3837 }
3838
3839 /*
3840 * Ok so here we need to go through and fill in any holes we may have
3841 * to make sure that holes are punched for those areas in case they had
3842 * extents previously.
3843 */
3844 while (!done) {
3845 u64 offset, len;
3846 u64 extent_end;
3847
3848 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3849 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3850 if (ret < 0)
3851 return ret;
3852 ASSERT(ret == 0);
3853 src = src_path->nodes[0];
3854 i = 0;
3855 }
3856
3857 btrfs_item_key_to_cpu(src, &key, i);
3858 if (!btrfs_comp_cpu_keys(&key, &last_key))
3859 done = true;
3860 if (key.objectid != btrfs_ino(inode) ||
3861 key.type != BTRFS_EXTENT_DATA_KEY) {
3862 i++;
3863 continue;
3864 }
3865 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3866 if (btrfs_file_extent_type(src, extent) ==
3867 BTRFS_FILE_EXTENT_INLINE) {
3868 len = btrfs_file_extent_inline_len(src, i, extent);
3869 extent_end = ALIGN(key.offset + len,
3870 fs_info->sectorsize);
3871 } else {
3872 len = btrfs_file_extent_num_bytes(src, extent);
3873 extent_end = key.offset + len;
3874 }
3875 i++;
3876
3877 if (*last_extent == key.offset) {
3878 *last_extent = extent_end;
3879 continue;
3880 }
3881 offset = *last_extent;
3882 len = key.offset - *last_extent;
3883 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3884 offset, 0, 0, len, 0, len, 0,
3885 0, 0);
3886 if (ret)
3887 break;
3888 *last_extent = extent_end;
3889 }
3890 /*
3891 * Need to let the callers know we dropped the path so they should
3892 * re-search.
3893 */
3894 if (!ret && need_find_last_extent)
3895 ret = 1;
3896 return ret;
3897}
3898
3899static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3900{
3901 struct extent_map *em1, *em2;
3902
3903 em1 = list_entry(a, struct extent_map, list);
3904 em2 = list_entry(b, struct extent_map, list);
3905
3906 if (em1->start < em2->start)
3907 return -1;
3908 else if (em1->start > em2->start)
3909 return 1;
3910 return 0;
3911}
3912
3913static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3914 struct inode *inode,
3915 struct btrfs_root *root,
3916 const struct extent_map *em,
3917 const struct list_head *logged_list,
3918 bool *ordered_io_error)
3919{
3920 struct btrfs_fs_info *fs_info = root->fs_info;
3921 struct btrfs_ordered_extent *ordered;
3922 struct btrfs_root *log = root->log_root;
3923 u64 mod_start = em->mod_start;
3924 u64 mod_len = em->mod_len;
3925 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3926 u64 csum_offset;
3927 u64 csum_len;
3928 LIST_HEAD(ordered_sums);
3929 int ret = 0;
3930
3931 *ordered_io_error = false;
3932
3933 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3934 em->block_start == EXTENT_MAP_HOLE)
3935 return 0;
3936
3937 /*
3938 * Wait far any ordered extent that covers our extent map. If it
3939 * finishes without an error, first check and see if our csums are on
3940 * our outstanding ordered extents.
3941 */
3942 list_for_each_entry(ordered, logged_list, log_list) {
3943 struct btrfs_ordered_sum *sum;
3944
3945 if (!mod_len)
3946 break;
3947
3948 if (ordered->file_offset + ordered->len <= mod_start ||
3949 mod_start + mod_len <= ordered->file_offset)
3950 continue;
3951
3952 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3953 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3954 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3955 const u64 start = ordered->file_offset;
3956 const u64 end = ordered->file_offset + ordered->len - 1;
3957
3958 WARN_ON(ordered->inode != inode);
3959 filemap_fdatawrite_range(inode->i_mapping, start, end);
3960 }
3961
3962 wait_event(ordered->wait,
3963 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3964 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3965
3966 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3967 /*
3968 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3969 * i_mapping flags, so that the next fsync won't get
3970 * an outdated io error too.
3971 */
3972 filemap_check_errors(inode->i_mapping);
3973 *ordered_io_error = true;
3974 break;
3975 }
3976 /*
3977 * We are going to copy all the csums on this ordered extent, so
3978 * go ahead and adjust mod_start and mod_len in case this
3979 * ordered extent has already been logged.
3980 */
3981 if (ordered->file_offset > mod_start) {
3982 if (ordered->file_offset + ordered->len >=
3983 mod_start + mod_len)
3984 mod_len = ordered->file_offset - mod_start;
3985 /*
3986 * If we have this case
3987 *
3988 * |--------- logged extent ---------|
3989 * |----- ordered extent ----|
3990 *
3991 * Just don't mess with mod_start and mod_len, we'll
3992 * just end up logging more csums than we need and it
3993 * will be ok.
3994 */
3995 } else {
3996 if (ordered->file_offset + ordered->len <
3997 mod_start + mod_len) {
3998 mod_len = (mod_start + mod_len) -
3999 (ordered->file_offset + ordered->len);
4000 mod_start = ordered->file_offset +
4001 ordered->len;
4002 } else {
4003 mod_len = 0;
4004 }
4005 }
4006
4007 if (skip_csum)
4008 continue;
4009
4010 /*
4011 * To keep us from looping for the above case of an ordered
4012 * extent that falls inside of the logged extent.
4013 */
4014 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
4015 &ordered->flags))
4016 continue;
4017
4018 list_for_each_entry(sum, &ordered->list, list) {
4019 ret = btrfs_csum_file_blocks(trans, log, sum);
4020 if (ret)
4021 break;
4022 }
4023 }
4024
4025 if (*ordered_io_error || !mod_len || ret || skip_csum)
4026 return ret;
4027
4028 if (em->compress_type) {
4029 csum_offset = 0;
4030 csum_len = max(em->block_len, em->orig_block_len);
4031 } else {
4032 csum_offset = mod_start - em->start;
4033 csum_len = mod_len;
4034 }
4035
4036 /* block start is already adjusted for the file extent offset. */
4037 ret = btrfs_lookup_csums_range(fs_info->csum_root,
4038 em->block_start + csum_offset,
4039 em->block_start + csum_offset +
4040 csum_len - 1, &ordered_sums, 0);
4041 if (ret)
4042 return ret;
4043
4044 while (!list_empty(&ordered_sums)) {
4045 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4046 struct btrfs_ordered_sum,
4047 list);
4048 if (!ret)
4049 ret = btrfs_csum_file_blocks(trans, log, sums);
4050 list_del(&sums->list);
4051 kfree(sums);
4052 }
4053
4054 return ret;
4055}
4056
4057static int log_one_extent(struct btrfs_trans_handle *trans,
4058 struct inode *inode, struct btrfs_root *root,
4059 const struct extent_map *em,
4060 struct btrfs_path *path,
4061 const struct list_head *logged_list,
4062 struct btrfs_log_ctx *ctx)
4063{
4064 struct btrfs_root *log = root->log_root;
4065 struct btrfs_file_extent_item *fi;
4066 struct extent_buffer *leaf;
4067 struct btrfs_map_token token;
4068 struct btrfs_key key;
4069 u64 extent_offset = em->start - em->orig_start;
4070 u64 block_len;
4071 int ret;
4072 int extent_inserted = 0;
4073 bool ordered_io_err = false;
4074
4075 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
4076 &ordered_io_err);
4077 if (ret)
4078 return ret;
4079
4080 if (ordered_io_err) {
4081 ctx->io_err = -EIO;
4082 return 0;
4083 }
4084
4085 btrfs_init_map_token(&token);
4086
4087 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
4088 em->start + em->len, NULL, 0, 1,
4089 sizeof(*fi), &extent_inserted);
4090 if (ret)
4091 return ret;
4092
4093 if (!extent_inserted) {
4094 key.objectid = btrfs_ino(inode);
4095 key.type = BTRFS_EXTENT_DATA_KEY;
4096 key.offset = em->start;
4097
4098 ret = btrfs_insert_empty_item(trans, log, path, &key,
4099 sizeof(*fi));
4100 if (ret)
4101 return ret;
4102 }
4103 leaf = path->nodes[0];
4104 fi = btrfs_item_ptr(leaf, path->slots[0],
4105 struct btrfs_file_extent_item);
4106
4107 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4108 &token);
4109 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4110 btrfs_set_token_file_extent_type(leaf, fi,
4111 BTRFS_FILE_EXTENT_PREALLOC,
4112 &token);
4113 else
4114 btrfs_set_token_file_extent_type(leaf, fi,
4115 BTRFS_FILE_EXTENT_REG,
4116 &token);
4117
4118 block_len = max(em->block_len, em->orig_block_len);
4119 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4120 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4121 em->block_start,
4122 &token);
4123 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4124 &token);
4125 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4126 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4127 em->block_start -
4128 extent_offset, &token);
4129 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4130 &token);
4131 } else {
4132 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4133 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4134 &token);
4135 }
4136
4137 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4138 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4139 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4140 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4141 &token);
4142 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4143 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4144 btrfs_mark_buffer_dirty(leaf);
4145
4146 btrfs_release_path(path);
4147
4148 return ret;
4149}
4150
4151static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4152 struct btrfs_root *root,
4153 struct inode *inode,
4154 struct btrfs_path *path,
4155 struct list_head *logged_list,
4156 struct btrfs_log_ctx *ctx,
4157 const u64 start,
4158 const u64 end)
4159{
4160 struct extent_map *em, *n;
4161 struct list_head extents;
4162 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
4163 u64 test_gen;
4164 int ret = 0;
4165 int num = 0;
4166
4167 INIT_LIST_HEAD(&extents);
4168
4169 down_write(&BTRFS_I(inode)->dio_sem);
4170 write_lock(&tree->lock);
4171 test_gen = root->fs_info->last_trans_committed;
4172
4173 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4174 list_del_init(&em->list);
4175
4176 /*
4177 * Just an arbitrary number, this can be really CPU intensive
4178 * once we start getting a lot of extents, and really once we
4179 * have a bunch of extents we just want to commit since it will
4180 * be faster.
4181 */
4182 if (++num > 32768) {
4183 list_del_init(&tree->modified_extents);
4184 ret = -EFBIG;
4185 goto process;
4186 }
4187
4188 if (em->generation <= test_gen)
4189 continue;
4190 /* Need a ref to keep it from getting evicted from cache */
4191 atomic_inc(&em->refs);
4192 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4193 list_add_tail(&em->list, &extents);
4194 num++;
4195 }
4196
4197 list_sort(NULL, &extents, extent_cmp);
4198 btrfs_get_logged_extents(inode, logged_list, start, end);
4199 /*
4200 * Some ordered extents started by fsync might have completed
4201 * before we could collect them into the list logged_list, which
4202 * means they're gone, not in our logged_list nor in the inode's
4203 * ordered tree. We want the application/user space to know an
4204 * error happened while attempting to persist file data so that
4205 * it can take proper action. If such error happened, we leave
4206 * without writing to the log tree and the fsync must report the
4207 * file data write error and not commit the current transaction.
4208 */
4209 ret = filemap_check_errors(inode->i_mapping);
4210 if (ret)
4211 ctx->io_err = ret;
4212process:
4213 while (!list_empty(&extents)) {
4214 em = list_entry(extents.next, struct extent_map, list);
4215
4216 list_del_init(&em->list);
4217
4218 /*
4219 * If we had an error we just need to delete everybody from our
4220 * private list.
4221 */
4222 if (ret) {
4223 clear_em_logging(tree, em);
4224 free_extent_map(em);
4225 continue;
4226 }
4227
4228 write_unlock(&tree->lock);
4229
4230 ret = log_one_extent(trans, inode, root, em, path, logged_list,
4231 ctx);
4232 write_lock(&tree->lock);
4233 clear_em_logging(tree, em);
4234 free_extent_map(em);
4235 }
4236 WARN_ON(!list_empty(&extents));
4237 write_unlock(&tree->lock);
4238 up_write(&BTRFS_I(inode)->dio_sem);
4239
4240 btrfs_release_path(path);
4241 return ret;
4242}
4243
4244static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4245 struct btrfs_path *path, u64 *size_ret)
4246{
4247 struct btrfs_key key;
4248 int ret;
4249
4250 key.objectid = btrfs_ino(inode);
4251 key.type = BTRFS_INODE_ITEM_KEY;
4252 key.offset = 0;
4253
4254 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4255 if (ret < 0) {
4256 return ret;
4257 } else if (ret > 0) {
4258 *size_ret = 0;
4259 } else {
4260 struct btrfs_inode_item *item;
4261
4262 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4263 struct btrfs_inode_item);
4264 *size_ret = btrfs_inode_size(path->nodes[0], item);
4265 }
4266
4267 btrfs_release_path(path);
4268 return 0;
4269}
4270
4271/*
4272 * At the moment we always log all xattrs. This is to figure out at log replay
4273 * time which xattrs must have their deletion replayed. If a xattr is missing
4274 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4275 * because if a xattr is deleted, the inode is fsynced and a power failure
4276 * happens, causing the log to be replayed the next time the fs is mounted,
4277 * we want the xattr to not exist anymore (same behaviour as other filesystems
4278 * with a journal, ext3/4, xfs, f2fs, etc).
4279 */
4280static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4281 struct btrfs_root *root,
4282 struct inode *inode,
4283 struct btrfs_path *path,
4284 struct btrfs_path *dst_path)
4285{
4286 int ret;
4287 struct btrfs_key key;
4288 const u64 ino = btrfs_ino(inode);
4289 int ins_nr = 0;
4290 int start_slot = 0;
4291
4292 key.objectid = ino;
4293 key.type = BTRFS_XATTR_ITEM_KEY;
4294 key.offset = 0;
4295
4296 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4297 if (ret < 0)
4298 return ret;
4299
4300 while (true) {
4301 int slot = path->slots[0];
4302 struct extent_buffer *leaf = path->nodes[0];
4303 int nritems = btrfs_header_nritems(leaf);
4304
4305 if (slot >= nritems) {
4306 if (ins_nr > 0) {
4307 u64 last_extent = 0;
4308
4309 ret = copy_items(trans, inode, dst_path, path,
4310 &last_extent, start_slot,
4311 ins_nr, 1, 0);
4312 /* can't be 1, extent items aren't processed */
4313 ASSERT(ret <= 0);
4314 if (ret < 0)
4315 return ret;
4316 ins_nr = 0;
4317 }
4318 ret = btrfs_next_leaf(root, path);
4319 if (ret < 0)
4320 return ret;
4321 else if (ret > 0)
4322 break;
4323 continue;
4324 }
4325
4326 btrfs_item_key_to_cpu(leaf, &key, slot);
4327 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4328 break;
4329
4330 if (ins_nr == 0)
4331 start_slot = slot;
4332 ins_nr++;
4333 path->slots[0]++;
4334 cond_resched();
4335 }
4336 if (ins_nr > 0) {
4337 u64 last_extent = 0;
4338
4339 ret = copy_items(trans, inode, dst_path, path,
4340 &last_extent, start_slot,
4341 ins_nr, 1, 0);
4342 /* can't be 1, extent items aren't processed */
4343 ASSERT(ret <= 0);
4344 if (ret < 0)
4345 return ret;
4346 }
4347
4348 return 0;
4349}
4350
4351/*
4352 * If the no holes feature is enabled we need to make sure any hole between the
4353 * last extent and the i_size of our inode is explicitly marked in the log. This
4354 * is to make sure that doing something like:
4355 *
4356 * 1) create file with 128Kb of data
4357 * 2) truncate file to 64Kb
4358 * 3) truncate file to 256Kb
4359 * 4) fsync file
4360 * 5) <crash/power failure>
4361 * 6) mount fs and trigger log replay
4362 *
4363 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4364 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4365 * file correspond to a hole. The presence of explicit holes in a log tree is
4366 * what guarantees that log replay will remove/adjust file extent items in the
4367 * fs/subvol tree.
4368 *
4369 * Here we do not need to care about holes between extents, that is already done
4370 * by copy_items(). We also only need to do this in the full sync path, where we
4371 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4372 * lookup the list of modified extent maps and if any represents a hole, we
4373 * insert a corresponding extent representing a hole in the log tree.
4374 */
4375static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4376 struct btrfs_root *root,
4377 struct inode *inode,
4378 struct btrfs_path *path)
4379{
4380 struct btrfs_fs_info *fs_info = root->fs_info;
4381 int ret;
4382 struct btrfs_key key;
4383 u64 hole_start;
4384 u64 hole_size;
4385 struct extent_buffer *leaf;
4386 struct btrfs_root *log = root->log_root;
4387 const u64 ino = btrfs_ino(inode);
4388 const u64 i_size = i_size_read(inode);
4389
4390 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4391 return 0;
4392
4393 key.objectid = ino;
4394 key.type = BTRFS_EXTENT_DATA_KEY;
4395 key.offset = (u64)-1;
4396
4397 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4398 ASSERT(ret != 0);
4399 if (ret < 0)
4400 return ret;
4401
4402 ASSERT(path->slots[0] > 0);
4403 path->slots[0]--;
4404 leaf = path->nodes[0];
4405 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4406
4407 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4408 /* inode does not have any extents */
4409 hole_start = 0;
4410 hole_size = i_size;
4411 } else {
4412 struct btrfs_file_extent_item *extent;
4413 u64 len;
4414
4415 /*
4416 * If there's an extent beyond i_size, an explicit hole was
4417 * already inserted by copy_items().
4418 */
4419 if (key.offset >= i_size)
4420 return 0;
4421
4422 extent = btrfs_item_ptr(leaf, path->slots[0],
4423 struct btrfs_file_extent_item);
4424
4425 if (btrfs_file_extent_type(leaf, extent) ==
4426 BTRFS_FILE_EXTENT_INLINE) {
4427 len = btrfs_file_extent_inline_len(leaf,
4428 path->slots[0],
4429 extent);
4430 ASSERT(len == i_size);
4431 return 0;
4432 }
4433
4434 len = btrfs_file_extent_num_bytes(leaf, extent);
4435 /* Last extent goes beyond i_size, no need to log a hole. */
4436 if (key.offset + len > i_size)
4437 return 0;
4438 hole_start = key.offset + len;
4439 hole_size = i_size - hole_start;
4440 }
4441 btrfs_release_path(path);
4442
4443 /* Last extent ends at i_size. */
4444 if (hole_size == 0)
4445 return 0;
4446
4447 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4448 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4449 hole_size, 0, hole_size, 0, 0, 0);
4450 return ret;
4451}
4452
4453/*
4454 * When we are logging a new inode X, check if it doesn't have a reference that
4455 * matches the reference from some other inode Y created in a past transaction
4456 * and that was renamed in the current transaction. If we don't do this, then at
4457 * log replay time we can lose inode Y (and all its files if it's a directory):
4458 *
4459 * mkdir /mnt/x
4460 * echo "hello world" > /mnt/x/foobar
4461 * sync
4462 * mv /mnt/x /mnt/y
4463 * mkdir /mnt/x # or touch /mnt/x
4464 * xfs_io -c fsync /mnt/x
4465 * <power fail>
4466 * mount fs, trigger log replay
4467 *
4468 * After the log replay procedure, we would lose the first directory and all its
4469 * files (file foobar).
4470 * For the case where inode Y is not a directory we simply end up losing it:
4471 *
4472 * echo "123" > /mnt/foo
4473 * sync
4474 * mv /mnt/foo /mnt/bar
4475 * echo "abc" > /mnt/foo
4476 * xfs_io -c fsync /mnt/foo
4477 * <power fail>
4478 *
4479 * We also need this for cases where a snapshot entry is replaced by some other
4480 * entry (file or directory) otherwise we end up with an unreplayable log due to
4481 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4482 * if it were a regular entry:
4483 *
4484 * mkdir /mnt/x
4485 * btrfs subvolume snapshot /mnt /mnt/x/snap
4486 * btrfs subvolume delete /mnt/x/snap
4487 * rmdir /mnt/x
4488 * mkdir /mnt/x
4489 * fsync /mnt/x or fsync some new file inside it
4490 * <power fail>
4491 *
4492 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4493 * the same transaction.
4494 */
4495static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4496 const int slot,
4497 const struct btrfs_key *key,
4498 struct inode *inode,
4499 u64 *other_ino)
4500{
4501 int ret;
4502 struct btrfs_path *search_path;
4503 char *name = NULL;
4504 u32 name_len = 0;
4505 u32 item_size = btrfs_item_size_nr(eb, slot);
4506 u32 cur_offset = 0;
4507 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4508
4509 search_path = btrfs_alloc_path();
4510 if (!search_path)
4511 return -ENOMEM;
4512 search_path->search_commit_root = 1;
4513 search_path->skip_locking = 1;
4514
4515 while (cur_offset < item_size) {
4516 u64 parent;
4517 u32 this_name_len;
4518 u32 this_len;
4519 unsigned long name_ptr;
4520 struct btrfs_dir_item *di;
4521
4522 if (key->type == BTRFS_INODE_REF_KEY) {
4523 struct btrfs_inode_ref *iref;
4524
4525 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4526 parent = key->offset;
4527 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4528 name_ptr = (unsigned long)(iref + 1);
4529 this_len = sizeof(*iref) + this_name_len;
4530 } else {
4531 struct btrfs_inode_extref *extref;
4532
4533 extref = (struct btrfs_inode_extref *)(ptr +
4534 cur_offset);
4535 parent = btrfs_inode_extref_parent(eb, extref);
4536 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4537 name_ptr = (unsigned long)&extref->name;
4538 this_len = sizeof(*extref) + this_name_len;
4539 }
4540
4541 if (this_name_len > name_len) {
4542 char *new_name;
4543
4544 new_name = krealloc(name, this_name_len, GFP_NOFS);
4545 if (!new_name) {
4546 ret = -ENOMEM;
4547 goto out;
4548 }
4549 name_len = this_name_len;
4550 name = new_name;
4551 }
4552
4553 read_extent_buffer(eb, name, name_ptr, this_name_len);
4554 di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
4555 search_path, parent,
4556 name, this_name_len, 0);
4557 if (di && !IS_ERR(di)) {
4558 struct btrfs_key di_key;
4559
4560 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4561 di, &di_key);
4562 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4563 ret = 1;
4564 *other_ino = di_key.objectid;
4565 } else {
4566 ret = -EAGAIN;
4567 }
4568 goto out;
4569 } else if (IS_ERR(di)) {
4570 ret = PTR_ERR(di);
4571 goto out;
4572 }
4573 btrfs_release_path(search_path);
4574
4575 cur_offset += this_len;
4576 }
4577 ret = 0;
4578out:
4579 btrfs_free_path(search_path);
4580 kfree(name);
4581 return ret;
4582}
4583
4584/* log a single inode in the tree log.
4585 * At least one parent directory for this inode must exist in the tree
4586 * or be logged already.
4587 *
4588 * Any items from this inode changed by the current transaction are copied
4589 * to the log tree. An extra reference is taken on any extents in this
4590 * file, allowing us to avoid a whole pile of corner cases around logging
4591 * blocks that have been removed from the tree.
4592 *
4593 * See LOG_INODE_ALL and related defines for a description of what inode_only
4594 * does.
4595 *
4596 * This handles both files and directories.
4597 */
4598static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4599 struct btrfs_root *root, struct inode *inode,
4600 int inode_only,
4601 const loff_t start,
4602 const loff_t end,
4603 struct btrfs_log_ctx *ctx)
4604{
4605 struct btrfs_fs_info *fs_info = root->fs_info;
4606 struct btrfs_path *path;
4607 struct btrfs_path *dst_path;
4608 struct btrfs_key min_key;
4609 struct btrfs_key max_key;
4610 struct btrfs_root *log = root->log_root;
4611 struct extent_buffer *src = NULL;
4612 LIST_HEAD(logged_list);
4613 u64 last_extent = 0;
4614 int err = 0;
4615 int ret;
4616 int nritems;
4617 int ins_start_slot = 0;
4618 int ins_nr;
4619 bool fast_search = false;
4620 u64 ino = btrfs_ino(inode);
4621 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4622 u64 logged_isize = 0;
4623 bool need_log_inode_item = true;
4624
4625 path = btrfs_alloc_path();
4626 if (!path)
4627 return -ENOMEM;
4628 dst_path = btrfs_alloc_path();
4629 if (!dst_path) {
4630 btrfs_free_path(path);
4631 return -ENOMEM;
4632 }
4633
4634 min_key.objectid = ino;
4635 min_key.type = BTRFS_INODE_ITEM_KEY;
4636 min_key.offset = 0;
4637
4638 max_key.objectid = ino;
4639
4640
4641 /* today the code can only do partial logging of directories */
4642 if (S_ISDIR(inode->i_mode) ||
4643 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4644 &BTRFS_I(inode)->runtime_flags) &&
4645 inode_only >= LOG_INODE_EXISTS))
4646 max_key.type = BTRFS_XATTR_ITEM_KEY;
4647 else
4648 max_key.type = (u8)-1;
4649 max_key.offset = (u64)-1;
4650
4651 /*
4652 * Only run delayed items if we are a dir or a new file.
4653 * Otherwise commit the delayed inode only, which is needed in
4654 * order for the log replay code to mark inodes for link count
4655 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4656 */
4657 if (S_ISDIR(inode->i_mode) ||
4658 BTRFS_I(inode)->generation > fs_info->last_trans_committed)
4659 ret = btrfs_commit_inode_delayed_items(trans, inode);
4660 else
4661 ret = btrfs_commit_inode_delayed_inode(inode);
4662
4663 if (ret) {
4664 btrfs_free_path(path);
4665 btrfs_free_path(dst_path);
4666 return ret;
4667 }
4668
4669 if (inode_only == LOG_OTHER_INODE) {
4670 inode_only = LOG_INODE_EXISTS;
4671 mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
4672 SINGLE_DEPTH_NESTING);
4673 } else {
4674 mutex_lock(&BTRFS_I(inode)->log_mutex);
4675 }
4676
4677 /*
4678 * a brute force approach to making sure we get the most uptodate
4679 * copies of everything.
4680 */
4681 if (S_ISDIR(inode->i_mode)) {
4682 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4683
4684 if (inode_only == LOG_INODE_EXISTS)
4685 max_key_type = BTRFS_XATTR_ITEM_KEY;
4686 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4687 } else {
4688 if (inode_only == LOG_INODE_EXISTS) {
4689 /*
4690 * Make sure the new inode item we write to the log has
4691 * the same isize as the current one (if it exists).
4692 * This is necessary to prevent data loss after log
4693 * replay, and also to prevent doing a wrong expanding
4694 * truncate - for e.g. create file, write 4K into offset
4695 * 0, fsync, write 4K into offset 4096, add hard link,
4696 * fsync some other file (to sync log), power fail - if
4697 * we use the inode's current i_size, after log replay
4698 * we get a 8Kb file, with the last 4Kb extent as a hole
4699 * (zeroes), as if an expanding truncate happened,
4700 * instead of getting a file of 4Kb only.
4701 */
4702 err = logged_inode_size(log, inode, path,
4703 &logged_isize);
4704 if (err)
4705 goto out_unlock;
4706 }
4707 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4708 &BTRFS_I(inode)->runtime_flags)) {
4709 if (inode_only == LOG_INODE_EXISTS) {
4710 max_key.type = BTRFS_XATTR_ITEM_KEY;
4711 ret = drop_objectid_items(trans, log, path, ino,
4712 max_key.type);
4713 } else {
4714 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4715 &BTRFS_I(inode)->runtime_flags);
4716 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4717 &BTRFS_I(inode)->runtime_flags);
4718 while(1) {
4719 ret = btrfs_truncate_inode_items(trans,
4720 log, inode, 0, 0);
4721 if (ret != -EAGAIN)
4722 break;
4723 }
4724 }
4725 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4726 &BTRFS_I(inode)->runtime_flags) ||
4727 inode_only == LOG_INODE_EXISTS) {
4728 if (inode_only == LOG_INODE_ALL)
4729 fast_search = true;
4730 max_key.type = BTRFS_XATTR_ITEM_KEY;
4731 ret = drop_objectid_items(trans, log, path, ino,
4732 max_key.type);
4733 } else {
4734 if (inode_only == LOG_INODE_ALL)
4735 fast_search = true;
4736 goto log_extents;
4737 }
4738
4739 }
4740 if (ret) {
4741 err = ret;
4742 goto out_unlock;
4743 }
4744
4745 while (1) {
4746 ins_nr = 0;
4747 ret = btrfs_search_forward(root, &min_key,
4748 path, trans->transid);
4749 if (ret < 0) {
4750 err = ret;
4751 goto out_unlock;
4752 }
4753 if (ret != 0)
4754 break;
4755again:
4756 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4757 if (min_key.objectid != ino)
4758 break;
4759 if (min_key.type > max_key.type)
4760 break;
4761
4762 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4763 need_log_inode_item = false;
4764
4765 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4766 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4767 BTRFS_I(inode)->generation == trans->transid) {
4768 u64 other_ino = 0;
4769
4770 ret = btrfs_check_ref_name_override(path->nodes[0],
4771 path->slots[0],
4772 &min_key, inode,
4773 &other_ino);
4774 if (ret < 0) {
4775 err = ret;
4776 goto out_unlock;
4777 } else if (ret > 0 && ctx &&
4778 other_ino != btrfs_ino(ctx->inode)) {
4779 struct btrfs_key inode_key;
4780 struct inode *other_inode;
4781
4782 if (ins_nr > 0) {
4783 ins_nr++;
4784 } else {
4785 ins_nr = 1;
4786 ins_start_slot = path->slots[0];
4787 }
4788 ret = copy_items(trans, inode, dst_path, path,
4789 &last_extent, ins_start_slot,
4790 ins_nr, inode_only,
4791 logged_isize);
4792 if (ret < 0) {
4793 err = ret;
4794 goto out_unlock;
4795 }
4796 ins_nr = 0;
4797 btrfs_release_path(path);
4798 inode_key.objectid = other_ino;
4799 inode_key.type = BTRFS_INODE_ITEM_KEY;
4800 inode_key.offset = 0;
4801 other_inode = btrfs_iget(fs_info->sb,
4802 &inode_key, root,
4803 NULL);
4804 /*
4805 * If the other inode that had a conflicting dir
4806 * entry was deleted in the current transaction,
4807 * we don't need to do more work nor fallback to
4808 * a transaction commit.
4809 */
4810 if (IS_ERR(other_inode) &&
4811 PTR_ERR(other_inode) == -ENOENT) {
4812 goto next_key;
4813 } else if (IS_ERR(other_inode)) {
4814 err = PTR_ERR(other_inode);
4815 goto out_unlock;
4816 }
4817 /*
4818 * We are safe logging the other inode without
4819 * acquiring its i_mutex as long as we log with
4820 * the LOG_INODE_EXISTS mode. We're safe against
4821 * concurrent renames of the other inode as well
4822 * because during a rename we pin the log and
4823 * update the log with the new name before we
4824 * unpin it.
4825 */
4826 err = btrfs_log_inode(trans, root, other_inode,
4827 LOG_OTHER_INODE,
4828 0, LLONG_MAX, ctx);
4829 iput(other_inode);
4830 if (err)
4831 goto out_unlock;
4832 else
4833 goto next_key;
4834 }
4835 }
4836
4837 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4838 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4839 if (ins_nr == 0)
4840 goto next_slot;
4841 ret = copy_items(trans, inode, dst_path, path,
4842 &last_extent, ins_start_slot,
4843 ins_nr, inode_only, logged_isize);
4844 if (ret < 0) {
4845 err = ret;
4846 goto out_unlock;
4847 }
4848 ins_nr = 0;
4849 if (ret) {
4850 btrfs_release_path(path);
4851 continue;
4852 }
4853 goto next_slot;
4854 }
4855
4856 src = path->nodes[0];
4857 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4858 ins_nr++;
4859 goto next_slot;
4860 } else if (!ins_nr) {
4861 ins_start_slot = path->slots[0];
4862 ins_nr = 1;
4863 goto next_slot;
4864 }
4865
4866 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4867 ins_start_slot, ins_nr, inode_only,
4868 logged_isize);
4869 if (ret < 0) {
4870 err = ret;
4871 goto out_unlock;
4872 }
4873 if (ret) {
4874 ins_nr = 0;
4875 btrfs_release_path(path);
4876 continue;
4877 }
4878 ins_nr = 1;
4879 ins_start_slot = path->slots[0];
4880next_slot:
4881
4882 nritems = btrfs_header_nritems(path->nodes[0]);
4883 path->slots[0]++;
4884 if (path->slots[0] < nritems) {
4885 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4886 path->slots[0]);
4887 goto again;
4888 }
4889 if (ins_nr) {
4890 ret = copy_items(trans, inode, dst_path, path,
4891 &last_extent, ins_start_slot,
4892 ins_nr, inode_only, logged_isize);
4893 if (ret < 0) {
4894 err = ret;
4895 goto out_unlock;
4896 }
4897 ret = 0;
4898 ins_nr = 0;
4899 }
4900 btrfs_release_path(path);
4901next_key:
4902 if (min_key.offset < (u64)-1) {
4903 min_key.offset++;
4904 } else if (min_key.type < max_key.type) {
4905 min_key.type++;
4906 min_key.offset = 0;
4907 } else {
4908 break;
4909 }
4910 }
4911 if (ins_nr) {
4912 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4913 ins_start_slot, ins_nr, inode_only,
4914 logged_isize);
4915 if (ret < 0) {
4916 err = ret;
4917 goto out_unlock;
4918 }
4919 ret = 0;
4920 ins_nr = 0;
4921 }
4922
4923 btrfs_release_path(path);
4924 btrfs_release_path(dst_path);
4925 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4926 if (err)
4927 goto out_unlock;
4928 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4929 btrfs_release_path(path);
4930 btrfs_release_path(dst_path);
4931 err = btrfs_log_trailing_hole(trans, root, inode, path);
4932 if (err)
4933 goto out_unlock;
4934 }
4935log_extents:
4936 btrfs_release_path(path);
4937 btrfs_release_path(dst_path);
4938 if (need_log_inode_item) {
4939 err = log_inode_item(trans, log, dst_path, inode);
4940 if (err)
4941 goto out_unlock;
4942 }
4943 if (fast_search) {
4944 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4945 &logged_list, ctx, start, end);
4946 if (ret) {
4947 err = ret;
4948 goto out_unlock;
4949 }
4950 } else if (inode_only == LOG_INODE_ALL) {
4951 struct extent_map *em, *n;
4952
4953 write_lock(&em_tree->lock);
4954 /*
4955 * We can't just remove every em if we're called for a ranged
4956 * fsync - that is, one that doesn't cover the whole possible
4957 * file range (0 to LLONG_MAX). This is because we can have
4958 * em's that fall outside the range we're logging and therefore
4959 * their ordered operations haven't completed yet
4960 * (btrfs_finish_ordered_io() not invoked yet). This means we
4961 * didn't get their respective file extent item in the fs/subvol
4962 * tree yet, and need to let the next fast fsync (one which
4963 * consults the list of modified extent maps) find the em so
4964 * that it logs a matching file extent item and waits for the
4965 * respective ordered operation to complete (if it's still
4966 * running).
4967 *
4968 * Removing every em outside the range we're logging would make
4969 * the next fast fsync not log their matching file extent items,
4970 * therefore making us lose data after a log replay.
4971 */
4972 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4973 list) {
4974 const u64 mod_end = em->mod_start + em->mod_len - 1;
4975
4976 if (em->mod_start >= start && mod_end <= end)
4977 list_del_init(&em->list);
4978 }
4979 write_unlock(&em_tree->lock);
4980 }
4981
4982 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4983 ret = log_directory_changes(trans, root, inode, path, dst_path,
4984 ctx);
4985 if (ret) {
4986 err = ret;
4987 goto out_unlock;
4988 }
4989 }
4990
4991 spin_lock(&BTRFS_I(inode)->lock);
4992 BTRFS_I(inode)->logged_trans = trans->transid;
4993 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4994 spin_unlock(&BTRFS_I(inode)->lock);
4995out_unlock:
4996 if (unlikely(err))
4997 btrfs_put_logged_extents(&logged_list);
4998 else
4999 btrfs_submit_logged_extents(&logged_list, log);
5000 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5001
5002 btrfs_free_path(path);
5003 btrfs_free_path(dst_path);
5004 return err;
5005}
5006
5007/*
5008 * Check if we must fallback to a transaction commit when logging an inode.
5009 * This must be called after logging the inode and is used only in the context
5010 * when fsyncing an inode requires the need to log some other inode - in which
5011 * case we can't lock the i_mutex of each other inode we need to log as that
5012 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5013 * log inodes up or down in the hierarchy) or rename operations for example. So
5014 * we take the log_mutex of the inode after we have logged it and then check for
5015 * its last_unlink_trans value - this is safe because any task setting
5016 * last_unlink_trans must take the log_mutex and it must do this before it does
5017 * the actual unlink operation, so if we do this check before a concurrent task
5018 * sets last_unlink_trans it means we've logged a consistent version/state of
5019 * all the inode items, otherwise we are not sure and must do a transaction
5020 * commit (the concurrent task might have only updated last_unlink_trans before
5021 * we logged the inode or it might have also done the unlink).
5022 */
5023static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5024 struct inode *inode)
5025{
5026 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
5027 bool ret = false;
5028
5029 mutex_lock(&BTRFS_I(inode)->log_mutex);
5030 if (BTRFS_I(inode)->last_unlink_trans > fs_info->last_trans_committed) {
5031 /*
5032 * Make sure any commits to the log are forced to be full
5033 * commits.
5034 */
5035 btrfs_set_log_full_commit(fs_info, trans);
5036 ret = true;
5037 }
5038 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5039
5040 return ret;
5041}
5042
5043/*
5044 * follow the dentry parent pointers up the chain and see if any
5045 * of the directories in it require a full commit before they can
5046 * be logged. Returns zero if nothing special needs to be done or 1 if
5047 * a full commit is required.
5048 */
5049static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5050 struct inode *inode,
5051 struct dentry *parent,
5052 struct super_block *sb,
5053 u64 last_committed)
5054{
5055 int ret = 0;
5056 struct dentry *old_parent = NULL;
5057 struct inode *orig_inode = inode;
5058
5059 /*
5060 * for regular files, if its inode is already on disk, we don't
5061 * have to worry about the parents at all. This is because
5062 * we can use the last_unlink_trans field to record renames
5063 * and other fun in this file.
5064 */
5065 if (S_ISREG(inode->i_mode) &&
5066 BTRFS_I(inode)->generation <= last_committed &&
5067 BTRFS_I(inode)->last_unlink_trans <= last_committed)
5068 goto out;
5069
5070 if (!S_ISDIR(inode->i_mode)) {
5071 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5072 goto out;
5073 inode = d_inode(parent);
5074 }
5075
5076 while (1) {
5077 /*
5078 * If we are logging a directory then we start with our inode,
5079 * not our parent's inode, so we need to skip setting the
5080 * logged_trans so that further down in the log code we don't
5081 * think this inode has already been logged.
5082 */
5083 if (inode != orig_inode)
5084 BTRFS_I(inode)->logged_trans = trans->transid;
5085 smp_mb();
5086
5087 if (btrfs_must_commit_transaction(trans, inode)) {
5088 ret = 1;
5089 break;
5090 }
5091
5092 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5093 break;
5094
5095 if (IS_ROOT(parent)) {
5096 inode = d_inode(parent);
5097 if (btrfs_must_commit_transaction(trans, inode))
5098 ret = 1;
5099 break;
5100 }
5101
5102 parent = dget_parent(parent);
5103 dput(old_parent);
5104 old_parent = parent;
5105 inode = d_inode(parent);
5106
5107 }
5108 dput(old_parent);
5109out:
5110 return ret;
5111}
5112
5113struct btrfs_dir_list {
5114 u64 ino;
5115 struct list_head list;
5116};
5117
5118/*
5119 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5120 * details about the why it is needed.
5121 * This is a recursive operation - if an existing dentry corresponds to a
5122 * directory, that directory's new entries are logged too (same behaviour as
5123 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5124 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5125 * complains about the following circular lock dependency / possible deadlock:
5126 *
5127 * CPU0 CPU1
5128 * ---- ----
5129 * lock(&type->i_mutex_dir_key#3/2);
5130 * lock(sb_internal#2);
5131 * lock(&type->i_mutex_dir_key#3/2);
5132 * lock(&sb->s_type->i_mutex_key#14);
5133 *
5134 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5135 * sb_start_intwrite() in btrfs_start_transaction().
5136 * Not locking i_mutex of the inodes is still safe because:
5137 *
5138 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5139 * that while logging the inode new references (names) are added or removed
5140 * from the inode, leaving the logged inode item with a link count that does
5141 * not match the number of logged inode reference items. This is fine because
5142 * at log replay time we compute the real number of links and correct the
5143 * link count in the inode item (see replay_one_buffer() and
5144 * link_to_fixup_dir());
5145 *
5146 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5147 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5148 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5149 * has a size that doesn't match the sum of the lengths of all the logged
5150 * names. This does not result in a problem because if a dir_item key is
5151 * logged but its matching dir_index key is not logged, at log replay time we
5152 * don't use it to replay the respective name (see replay_one_name()). On the
5153 * other hand if only the dir_index key ends up being logged, the respective
5154 * name is added to the fs/subvol tree with both the dir_item and dir_index
5155 * keys created (see replay_one_name()).
5156 * The directory's inode item with a wrong i_size is not a problem as well,
5157 * since we don't use it at log replay time to set the i_size in the inode
5158 * item of the fs/subvol tree (see overwrite_item()).
5159 */
5160static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5161 struct btrfs_root *root,
5162 struct inode *start_inode,
5163 struct btrfs_log_ctx *ctx)
5164{
5165 struct btrfs_fs_info *fs_info = root->fs_info;
5166 struct btrfs_root *log = root->log_root;
5167 struct btrfs_path *path;
5168 LIST_HEAD(dir_list);
5169 struct btrfs_dir_list *dir_elem;
5170 int ret = 0;
5171
5172 path = btrfs_alloc_path();
5173 if (!path)
5174 return -ENOMEM;
5175
5176 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5177 if (!dir_elem) {
5178 btrfs_free_path(path);
5179 return -ENOMEM;
5180 }
5181 dir_elem->ino = btrfs_ino(start_inode);
5182 list_add_tail(&dir_elem->list, &dir_list);
5183
5184 while (!list_empty(&dir_list)) {
5185 struct extent_buffer *leaf;
5186 struct btrfs_key min_key;
5187 int nritems;
5188 int i;
5189
5190 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5191 list);
5192 if (ret)
5193 goto next_dir_inode;
5194
5195 min_key.objectid = dir_elem->ino;
5196 min_key.type = BTRFS_DIR_ITEM_KEY;
5197 min_key.offset = 0;
5198again:
5199 btrfs_release_path(path);
5200 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5201 if (ret < 0) {
5202 goto next_dir_inode;
5203 } else if (ret > 0) {
5204 ret = 0;
5205 goto next_dir_inode;
5206 }
5207
5208process_leaf:
5209 leaf = path->nodes[0];
5210 nritems = btrfs_header_nritems(leaf);
5211 for (i = path->slots[0]; i < nritems; i++) {
5212 struct btrfs_dir_item *di;
5213 struct btrfs_key di_key;
5214 struct inode *di_inode;
5215 struct btrfs_dir_list *new_dir_elem;
5216 int log_mode = LOG_INODE_EXISTS;
5217 int type;
5218
5219 btrfs_item_key_to_cpu(leaf, &min_key, i);
5220 if (min_key.objectid != dir_elem->ino ||
5221 min_key.type != BTRFS_DIR_ITEM_KEY)
5222 goto next_dir_inode;
5223
5224 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5225 type = btrfs_dir_type(leaf, di);
5226 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5227 type != BTRFS_FT_DIR)
5228 continue;
5229 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5230 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5231 continue;
5232
5233 btrfs_release_path(path);
5234 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5235 if (IS_ERR(di_inode)) {
5236 ret = PTR_ERR(di_inode);
5237 goto next_dir_inode;
5238 }
5239
5240 if (btrfs_inode_in_log(di_inode, trans->transid)) {
5241 iput(di_inode);
5242 break;
5243 }
5244
5245 ctx->log_new_dentries = false;
5246 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5247 log_mode = LOG_INODE_ALL;
5248 ret = btrfs_log_inode(trans, root, di_inode,
5249 log_mode, 0, LLONG_MAX, ctx);
5250 if (!ret &&
5251 btrfs_must_commit_transaction(trans, di_inode))
5252 ret = 1;
5253 iput(di_inode);
5254 if (ret)
5255 goto next_dir_inode;
5256 if (ctx->log_new_dentries) {
5257 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5258 GFP_NOFS);
5259 if (!new_dir_elem) {
5260 ret = -ENOMEM;
5261 goto next_dir_inode;
5262 }
5263 new_dir_elem->ino = di_key.objectid;
5264 list_add_tail(&new_dir_elem->list, &dir_list);
5265 }
5266 break;
5267 }
5268 if (i == nritems) {
5269 ret = btrfs_next_leaf(log, path);
5270 if (ret < 0) {
5271 goto next_dir_inode;
5272 } else if (ret > 0) {
5273 ret = 0;
5274 goto next_dir_inode;
5275 }
5276 goto process_leaf;
5277 }
5278 if (min_key.offset < (u64)-1) {
5279 min_key.offset++;
5280 goto again;
5281 }
5282next_dir_inode:
5283 list_del(&dir_elem->list);
5284 kfree(dir_elem);
5285 }
5286
5287 btrfs_free_path(path);
5288 return ret;
5289}
5290
5291static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5292 struct inode *inode,
5293 struct btrfs_log_ctx *ctx)
5294{
5295 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5296 int ret;
5297 struct btrfs_path *path;
5298 struct btrfs_key key;
5299 struct btrfs_root *root = BTRFS_I(inode)->root;
5300 const u64 ino = btrfs_ino(inode);
5301
5302 path = btrfs_alloc_path();
5303 if (!path)
5304 return -ENOMEM;
5305 path->skip_locking = 1;
5306 path->search_commit_root = 1;
5307
5308 key.objectid = ino;
5309 key.type = BTRFS_INODE_REF_KEY;
5310 key.offset = 0;
5311 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5312 if (ret < 0)
5313 goto out;
5314
5315 while (true) {
5316 struct extent_buffer *leaf = path->nodes[0];
5317 int slot = path->slots[0];
5318 u32 cur_offset = 0;
5319 u32 item_size;
5320 unsigned long ptr;
5321
5322 if (slot >= btrfs_header_nritems(leaf)) {
5323 ret = btrfs_next_leaf(root, path);
5324 if (ret < 0)
5325 goto out;
5326 else if (ret > 0)
5327 break;
5328 continue;
5329 }
5330
5331 btrfs_item_key_to_cpu(leaf, &key, slot);
5332 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5333 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5334 break;
5335
5336 item_size = btrfs_item_size_nr(leaf, slot);
5337 ptr = btrfs_item_ptr_offset(leaf, slot);
5338 while (cur_offset < item_size) {
5339 struct btrfs_key inode_key;
5340 struct inode *dir_inode;
5341
5342 inode_key.type = BTRFS_INODE_ITEM_KEY;
5343 inode_key.offset = 0;
5344
5345 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5346 struct btrfs_inode_extref *extref;
5347
5348 extref = (struct btrfs_inode_extref *)
5349 (ptr + cur_offset);
5350 inode_key.objectid = btrfs_inode_extref_parent(
5351 leaf, extref);
5352 cur_offset += sizeof(*extref);
5353 cur_offset += btrfs_inode_extref_name_len(leaf,
5354 extref);
5355 } else {
5356 inode_key.objectid = key.offset;
5357 cur_offset = item_size;
5358 }
5359
5360 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5361 root, NULL);
5362 /* If parent inode was deleted, skip it. */
5363 if (IS_ERR(dir_inode))
5364 continue;
5365
5366 if (ctx)
5367 ctx->log_new_dentries = false;
5368 ret = btrfs_log_inode(trans, root, dir_inode,
5369 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5370 if (!ret &&
5371 btrfs_must_commit_transaction(trans, dir_inode))
5372 ret = 1;
5373 if (!ret && ctx && ctx->log_new_dentries)
5374 ret = log_new_dir_dentries(trans, root,
5375 dir_inode, ctx);
5376 iput(dir_inode);
5377 if (ret)
5378 goto out;
5379 }
5380 path->slots[0]++;
5381 }
5382 ret = 0;
5383out:
5384 btrfs_free_path(path);
5385 return ret;
5386}
5387
5388/*
5389 * helper function around btrfs_log_inode to make sure newly created
5390 * parent directories also end up in the log. A minimal inode and backref
5391 * only logging is done of any parent directories that are older than
5392 * the last committed transaction
5393 */
5394static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5395 struct btrfs_root *root, struct inode *inode,
5396 struct dentry *parent,
5397 const loff_t start,
5398 const loff_t end,
5399 int exists_only,
5400 struct btrfs_log_ctx *ctx)
5401{
5402 struct btrfs_fs_info *fs_info = root->fs_info;
5403 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
5404 struct super_block *sb;
5405 struct dentry *old_parent = NULL;
5406 int ret = 0;
5407 u64 last_committed = fs_info->last_trans_committed;
5408 bool log_dentries = false;
5409 struct inode *orig_inode = inode;
5410
5411 sb = inode->i_sb;
5412
5413 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5414 ret = 1;
5415 goto end_no_trans;
5416 }
5417
5418 /*
5419 * The prev transaction commit doesn't complete, we need do
5420 * full commit by ourselves.
5421 */
5422 if (fs_info->last_trans_log_full_commit >
5423 fs_info->last_trans_committed) {
5424 ret = 1;
5425 goto end_no_trans;
5426 }
5427
5428 if (root != BTRFS_I(inode)->root ||
5429 btrfs_root_refs(&root->root_item) == 0) {
5430 ret = 1;
5431 goto end_no_trans;
5432 }
5433
5434 ret = check_parent_dirs_for_sync(trans, inode, parent,
5435 sb, last_committed);
5436 if (ret)
5437 goto end_no_trans;
5438
5439 if (btrfs_inode_in_log(inode, trans->transid)) {
5440 ret = BTRFS_NO_LOG_SYNC;
5441 goto end_no_trans;
5442 }
5443
5444 ret = start_log_trans(trans, root, ctx);
5445 if (ret)
5446 goto end_no_trans;
5447
5448 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5449 if (ret)
5450 goto end_trans;
5451
5452 /*
5453 * for regular files, if its inode is already on disk, we don't
5454 * have to worry about the parents at all. This is because
5455 * we can use the last_unlink_trans field to record renames
5456 * and other fun in this file.
5457 */
5458 if (S_ISREG(inode->i_mode) &&
5459 BTRFS_I(inode)->generation <= last_committed &&
5460 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
5461 ret = 0;
5462 goto end_trans;
5463 }
5464
5465 if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
5466 log_dentries = true;
5467
5468 /*
5469 * On unlink we must make sure all our current and old parent directory
5470 * inodes are fully logged. This is to prevent leaving dangling
5471 * directory index entries in directories that were our parents but are
5472 * not anymore. Not doing this results in old parent directory being
5473 * impossible to delete after log replay (rmdir will always fail with
5474 * error -ENOTEMPTY).
5475 *
5476 * Example 1:
5477 *
5478 * mkdir testdir
5479 * touch testdir/foo
5480 * ln testdir/foo testdir/bar
5481 * sync
5482 * unlink testdir/bar
5483 * xfs_io -c fsync testdir/foo
5484 * <power failure>
5485 * mount fs, triggers log replay
5486 *
5487 * If we don't log the parent directory (testdir), after log replay the
5488 * directory still has an entry pointing to the file inode using the bar
5489 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5490 * the file inode has a link count of 1.
5491 *
5492 * Example 2:
5493 *
5494 * mkdir testdir
5495 * touch foo
5496 * ln foo testdir/foo2
5497 * ln foo testdir/foo3
5498 * sync
5499 * unlink testdir/foo3
5500 * xfs_io -c fsync foo
5501 * <power failure>
5502 * mount fs, triggers log replay
5503 *
5504 * Similar as the first example, after log replay the parent directory
5505 * testdir still has an entry pointing to the inode file with name foo3
5506 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5507 * and has a link count of 2.
5508 */
5509 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
5510 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5511 if (ret)
5512 goto end_trans;
5513 }
5514
5515 while (1) {
5516 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5517 break;
5518
5519 inode = d_inode(parent);
5520 if (root != BTRFS_I(inode)->root)
5521 break;
5522
5523 if (BTRFS_I(inode)->generation > last_committed) {
5524 ret = btrfs_log_inode(trans, root, inode,
5525 LOG_INODE_EXISTS,
5526 0, LLONG_MAX, ctx);
5527 if (ret)
5528 goto end_trans;
5529 }
5530 if (IS_ROOT(parent))
5531 break;
5532
5533 parent = dget_parent(parent);
5534 dput(old_parent);
5535 old_parent = parent;
5536 }
5537 if (log_dentries)
5538 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5539 else
5540 ret = 0;
5541end_trans:
5542 dput(old_parent);
5543 if (ret < 0) {
5544 btrfs_set_log_full_commit(fs_info, trans);
5545 ret = 1;
5546 }
5547
5548 if (ret)
5549 btrfs_remove_log_ctx(root, ctx);
5550 btrfs_end_log_trans(root);
5551end_no_trans:
5552 return ret;
5553}
5554
5555/*
5556 * it is not safe to log dentry if the chunk root has added new
5557 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5558 * If this returns 1, you must commit the transaction to safely get your
5559 * data on disk.
5560 */
5561int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5562 struct btrfs_root *root, struct dentry *dentry,
5563 const loff_t start,
5564 const loff_t end,
5565 struct btrfs_log_ctx *ctx)
5566{
5567 struct dentry *parent = dget_parent(dentry);
5568 int ret;
5569
5570 ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent,
5571 start, end, 0, ctx);
5572 dput(parent);
5573
5574 return ret;
5575}
5576
5577/*
5578 * should be called during mount to recover any replay any log trees
5579 * from the FS
5580 */
5581int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5582{
5583 int ret;
5584 struct btrfs_path *path;
5585 struct btrfs_trans_handle *trans;
5586 struct btrfs_key key;
5587 struct btrfs_key found_key;
5588 struct btrfs_key tmp_key;
5589 struct btrfs_root *log;
5590 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5591 struct walk_control wc = {
5592 .process_func = process_one_buffer,
5593 .stage = 0,
5594 };
5595
5596 path = btrfs_alloc_path();
5597 if (!path)
5598 return -ENOMEM;
5599
5600 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5601
5602 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5603 if (IS_ERR(trans)) {
5604 ret = PTR_ERR(trans);
5605 goto error;
5606 }
5607
5608 wc.trans = trans;
5609 wc.pin = 1;
5610
5611 ret = walk_log_tree(trans, log_root_tree, &wc);
5612 if (ret) {
5613 btrfs_handle_fs_error(fs_info, ret,
5614 "Failed to pin buffers while recovering log root tree.");
5615 goto error;
5616 }
5617
5618again:
5619 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5620 key.offset = (u64)-1;
5621 key.type = BTRFS_ROOT_ITEM_KEY;
5622
5623 while (1) {
5624 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5625
5626 if (ret < 0) {
5627 btrfs_handle_fs_error(fs_info, ret,
5628 "Couldn't find tree log root.");
5629 goto error;
5630 }
5631 if (ret > 0) {
5632 if (path->slots[0] == 0)
5633 break;
5634 path->slots[0]--;
5635 }
5636 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5637 path->slots[0]);
5638 btrfs_release_path(path);
5639 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5640 break;
5641
5642 log = btrfs_read_fs_root(log_root_tree, &found_key);
5643 if (IS_ERR(log)) {
5644 ret = PTR_ERR(log);
5645 btrfs_handle_fs_error(fs_info, ret,
5646 "Couldn't read tree log root.");
5647 goto error;
5648 }
5649
5650 tmp_key.objectid = found_key.offset;
5651 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5652 tmp_key.offset = (u64)-1;
5653
5654 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5655 if (IS_ERR(wc.replay_dest)) {
5656 ret = PTR_ERR(wc.replay_dest);
5657 free_extent_buffer(log->node);
5658 free_extent_buffer(log->commit_root);
5659 kfree(log);
5660 btrfs_handle_fs_error(fs_info, ret,
5661 "Couldn't read target root for tree log recovery.");
5662 goto error;
5663 }
5664
5665 wc.replay_dest->log_root = log;
5666 btrfs_record_root_in_trans(trans, wc.replay_dest);
5667 ret = walk_log_tree(trans, log, &wc);
5668
5669 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5670 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5671 path);
5672 }
5673
5674 key.offset = found_key.offset - 1;
5675 wc.replay_dest->log_root = NULL;
5676 free_extent_buffer(log->node);
5677 free_extent_buffer(log->commit_root);
5678 kfree(log);
5679
5680 if (ret)
5681 goto error;
5682
5683 if (found_key.offset == 0)
5684 break;
5685 }
5686 btrfs_release_path(path);
5687
5688 /* step one is to pin it all, step two is to replay just inodes */
5689 if (wc.pin) {
5690 wc.pin = 0;
5691 wc.process_func = replay_one_buffer;
5692 wc.stage = LOG_WALK_REPLAY_INODES;
5693 goto again;
5694 }
5695 /* step three is to replay everything */
5696 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5697 wc.stage++;
5698 goto again;
5699 }
5700
5701 btrfs_free_path(path);
5702
5703 /* step 4: commit the transaction, which also unpins the blocks */
5704 ret = btrfs_commit_transaction(trans);
5705 if (ret)
5706 return ret;
5707
5708 free_extent_buffer(log_root_tree->node);
5709 log_root_tree->log_root = NULL;
5710 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5711 kfree(log_root_tree);
5712
5713 return 0;
5714error:
5715 if (wc.trans)
5716 btrfs_end_transaction(wc.trans);
5717 btrfs_free_path(path);
5718 return ret;
5719}
5720
5721/*
5722 * there are some corner cases where we want to force a full
5723 * commit instead of allowing a directory to be logged.
5724 *
5725 * They revolve around files there were unlinked from the directory, and
5726 * this function updates the parent directory so that a full commit is
5727 * properly done if it is fsync'd later after the unlinks are done.
5728 *
5729 * Must be called before the unlink operations (updates to the subvolume tree,
5730 * inodes, etc) are done.
5731 */
5732void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5733 struct inode *dir, struct inode *inode,
5734 int for_rename)
5735{
5736 /*
5737 * when we're logging a file, if it hasn't been renamed
5738 * or unlinked, and its inode is fully committed on disk,
5739 * we don't have to worry about walking up the directory chain
5740 * to log its parents.
5741 *
5742 * So, we use the last_unlink_trans field to put this transid
5743 * into the file. When the file is logged we check it and
5744 * don't log the parents if the file is fully on disk.
5745 */
5746 mutex_lock(&BTRFS_I(inode)->log_mutex);
5747 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5748 mutex_unlock(&BTRFS_I(inode)->log_mutex);
5749
5750 /*
5751 * if this directory was already logged any new
5752 * names for this file/dir will get recorded
5753 */
5754 smp_mb();
5755 if (BTRFS_I(dir)->logged_trans == trans->transid)
5756 return;
5757
5758 /*
5759 * if the inode we're about to unlink was logged,
5760 * the log will be properly updated for any new names
5761 */
5762 if (BTRFS_I(inode)->logged_trans == trans->transid)
5763 return;
5764
5765 /*
5766 * when renaming files across directories, if the directory
5767 * there we're unlinking from gets fsync'd later on, there's
5768 * no way to find the destination directory later and fsync it
5769 * properly. So, we have to be conservative and force commits
5770 * so the new name gets discovered.
5771 */
5772 if (for_rename)
5773 goto record;
5774
5775 /* we can safely do the unlink without any special recording */
5776 return;
5777
5778record:
5779 mutex_lock(&BTRFS_I(dir)->log_mutex);
5780 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5781 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5782}
5783
5784/*
5785 * Make sure that if someone attempts to fsync the parent directory of a deleted
5786 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5787 * that after replaying the log tree of the parent directory's root we will not
5788 * see the snapshot anymore and at log replay time we will not see any log tree
5789 * corresponding to the deleted snapshot's root, which could lead to replaying
5790 * it after replaying the log tree of the parent directory (which would replay
5791 * the snapshot delete operation).
5792 *
5793 * Must be called before the actual snapshot destroy operation (updates to the
5794 * parent root and tree of tree roots trees, etc) are done.
5795 */
5796void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5797 struct inode *dir)
5798{
5799 mutex_lock(&BTRFS_I(dir)->log_mutex);
5800 BTRFS_I(dir)->last_unlink_trans = trans->transid;
5801 mutex_unlock(&BTRFS_I(dir)->log_mutex);
5802}
5803
5804/*
5805 * Call this after adding a new name for a file and it will properly
5806 * update the log to reflect the new name.
5807 *
5808 * It will return zero if all goes well, and it will return 1 if a
5809 * full transaction commit is required.
5810 */
5811int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5812 struct inode *inode, struct inode *old_dir,
5813 struct dentry *parent)
5814{
5815 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
5816 struct btrfs_root * root = BTRFS_I(inode)->root;
5817
5818 /*
5819 * this will force the logging code to walk the dentry chain
5820 * up for the file
5821 */
5822 if (S_ISREG(inode->i_mode))
5823 BTRFS_I(inode)->last_unlink_trans = trans->transid;
5824
5825 /*
5826 * if this inode hasn't been logged and directory we're renaming it
5827 * from hasn't been logged, we don't need to log it
5828 */
5829 if (BTRFS_I(inode)->logged_trans <=
5830 fs_info->last_trans_committed &&
5831 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
5832 fs_info->last_trans_committed))
5833 return 0;
5834
5835 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
5836 LLONG_MAX, 1, NULL);
5837}
5838