Loading...
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include "ctree.h"
24#include "disk-io.h"
25#include "transaction.h"
26#include "print-tree.h"
27
28#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
29 sizeof(struct btrfs_item) * 2) / \
30 size) - 1))
31
32#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
33
34#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
35 sizeof(struct btrfs_ordered_sum)) / \
36 sizeof(struct btrfs_sector_sum) * \
37 (r)->sectorsize - (r)->sectorsize)
38
39int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
40 struct btrfs_root *root,
41 u64 objectid, u64 pos,
42 u64 disk_offset, u64 disk_num_bytes,
43 u64 num_bytes, u64 offset, u64 ram_bytes,
44 u8 compression, u8 encryption, u16 other_encoding)
45{
46 int ret = 0;
47 struct btrfs_file_extent_item *item;
48 struct btrfs_key file_key;
49 struct btrfs_path *path;
50 struct extent_buffer *leaf;
51
52 path = btrfs_alloc_path();
53 if (!path)
54 return -ENOMEM;
55 file_key.objectid = objectid;
56 file_key.offset = pos;
57 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
58
59 path->leave_spinning = 1;
60 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
61 sizeof(*item));
62 if (ret < 0)
63 goto out;
64 BUG_ON(ret); /* Can't happen */
65 leaf = path->nodes[0];
66 item = btrfs_item_ptr(leaf, path->slots[0],
67 struct btrfs_file_extent_item);
68 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
69 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
70 btrfs_set_file_extent_offset(leaf, item, offset);
71 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
72 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
73 btrfs_set_file_extent_generation(leaf, item, trans->transid);
74 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
75 btrfs_set_file_extent_compression(leaf, item, compression);
76 btrfs_set_file_extent_encryption(leaf, item, encryption);
77 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
78
79 btrfs_mark_buffer_dirty(leaf);
80out:
81 btrfs_free_path(path);
82 return ret;
83}
84
85struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root,
87 struct btrfs_path *path,
88 u64 bytenr, int cow)
89{
90 int ret;
91 struct btrfs_key file_key;
92 struct btrfs_key found_key;
93 struct btrfs_csum_item *item;
94 struct extent_buffer *leaf;
95 u64 csum_offset = 0;
96 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
97 int csums_in_item;
98
99 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
100 file_key.offset = bytenr;
101 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
102 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
103 if (ret < 0)
104 goto fail;
105 leaf = path->nodes[0];
106 if (ret > 0) {
107 ret = 1;
108 if (path->slots[0] == 0)
109 goto fail;
110 path->slots[0]--;
111 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
112 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY)
113 goto fail;
114
115 csum_offset = (bytenr - found_key.offset) >>
116 root->fs_info->sb->s_blocksize_bits;
117 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
118 csums_in_item /= csum_size;
119
120 if (csum_offset >= csums_in_item) {
121 ret = -EFBIG;
122 goto fail;
123 }
124 }
125 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
126 item = (struct btrfs_csum_item *)((unsigned char *)item +
127 csum_offset * csum_size);
128 return item;
129fail:
130 if (ret > 0)
131 ret = -ENOENT;
132 return ERR_PTR(ret);
133}
134
135
136int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
137 struct btrfs_root *root,
138 struct btrfs_path *path, u64 objectid,
139 u64 offset, int mod)
140{
141 int ret;
142 struct btrfs_key file_key;
143 int ins_len = mod < 0 ? -1 : 0;
144 int cow = mod != 0;
145
146 file_key.objectid = objectid;
147 file_key.offset = offset;
148 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);
149 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
150 return ret;
151}
152
153
154static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
155 struct inode *inode, struct bio *bio,
156 u64 logical_offset, u32 *dst, int dio)
157{
158 u32 sum;
159 struct bio_vec *bvec = bio->bi_io_vec;
160 int bio_index = 0;
161 u64 offset = 0;
162 u64 item_start_offset = 0;
163 u64 item_last_offset = 0;
164 u64 disk_bytenr;
165 u32 diff;
166 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
167 int ret;
168 struct btrfs_path *path;
169 struct btrfs_csum_item *item = NULL;
170 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
171
172 path = btrfs_alloc_path();
173 if (!path)
174 return -ENOMEM;
175 if (bio->bi_size > PAGE_CACHE_SIZE * 8)
176 path->reada = 2;
177
178 WARN_ON(bio->bi_vcnt <= 0);
179
180 /*
181 * the free space stuff is only read when it hasn't been
182 * updated in the current transaction. So, we can safely
183 * read from the commit root and sidestep a nasty deadlock
184 * between reading the free space cache and updating the csum tree.
185 */
186 if (btrfs_is_free_space_inode(root, inode)) {
187 path->search_commit_root = 1;
188 path->skip_locking = 1;
189 }
190
191 disk_bytenr = (u64)bio->bi_sector << 9;
192 if (dio)
193 offset = logical_offset;
194 while (bio_index < bio->bi_vcnt) {
195 if (!dio)
196 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
197 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
198 if (ret == 0)
199 goto found;
200
201 if (!item || disk_bytenr < item_start_offset ||
202 disk_bytenr >= item_last_offset) {
203 struct btrfs_key found_key;
204 u32 item_size;
205
206 if (item)
207 btrfs_release_path(path);
208 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
209 path, disk_bytenr, 0);
210 if (IS_ERR(item)) {
211 ret = PTR_ERR(item);
212 if (ret == -ENOENT || ret == -EFBIG)
213 ret = 0;
214 sum = 0;
215 if (BTRFS_I(inode)->root->root_key.objectid ==
216 BTRFS_DATA_RELOC_TREE_OBJECTID) {
217 set_extent_bits(io_tree, offset,
218 offset + bvec->bv_len - 1,
219 EXTENT_NODATASUM, GFP_NOFS);
220 } else {
221 printk(KERN_INFO "btrfs no csum found "
222 "for inode %llu start %llu\n",
223 (unsigned long long)
224 btrfs_ino(inode),
225 (unsigned long long)offset);
226 }
227 item = NULL;
228 btrfs_release_path(path);
229 goto found;
230 }
231 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
232 path->slots[0]);
233
234 item_start_offset = found_key.offset;
235 item_size = btrfs_item_size_nr(path->nodes[0],
236 path->slots[0]);
237 item_last_offset = item_start_offset +
238 (item_size / csum_size) *
239 root->sectorsize;
240 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
241 struct btrfs_csum_item);
242 }
243 /*
244 * this byte range must be able to fit inside
245 * a single leaf so it will also fit inside a u32
246 */
247 diff = disk_bytenr - item_start_offset;
248 diff = diff / root->sectorsize;
249 diff = diff * csum_size;
250
251 read_extent_buffer(path->nodes[0], &sum,
252 ((unsigned long)item) + diff,
253 csum_size);
254found:
255 if (dst)
256 *dst++ = sum;
257 else
258 set_state_private(io_tree, offset, sum);
259 disk_bytenr += bvec->bv_len;
260 offset += bvec->bv_len;
261 bio_index++;
262 bvec++;
263 }
264 btrfs_free_path(path);
265 return 0;
266}
267
268int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
269 struct bio *bio, u32 *dst)
270{
271 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
272}
273
274int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
275 struct bio *bio, u64 offset, u32 *dst)
276{
277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1);
278}
279
280int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
281 struct list_head *list, int search_commit)
282{
283 struct btrfs_key key;
284 struct btrfs_path *path;
285 struct extent_buffer *leaf;
286 struct btrfs_ordered_sum *sums;
287 struct btrfs_sector_sum *sector_sum;
288 struct btrfs_csum_item *item;
289 LIST_HEAD(tmplist);
290 unsigned long offset;
291 int ret;
292 size_t size;
293 u64 csum_end;
294 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
295
296 path = btrfs_alloc_path();
297 if (!path)
298 return -ENOMEM;
299
300 if (search_commit) {
301 path->skip_locking = 1;
302 path->reada = 2;
303 path->search_commit_root = 1;
304 }
305
306 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
307 key.offset = start;
308 key.type = BTRFS_EXTENT_CSUM_KEY;
309
310 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
311 if (ret < 0)
312 goto fail;
313 if (ret > 0 && path->slots[0] > 0) {
314 leaf = path->nodes[0];
315 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
316 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
317 key.type == BTRFS_EXTENT_CSUM_KEY) {
318 offset = (start - key.offset) >>
319 root->fs_info->sb->s_blocksize_bits;
320 if (offset * csum_size <
321 btrfs_item_size_nr(leaf, path->slots[0] - 1))
322 path->slots[0]--;
323 }
324 }
325
326 while (start <= end) {
327 leaf = path->nodes[0];
328 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
329 ret = btrfs_next_leaf(root, path);
330 if (ret < 0)
331 goto fail;
332 if (ret > 0)
333 break;
334 leaf = path->nodes[0];
335 }
336
337 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
338 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
339 key.type != BTRFS_EXTENT_CSUM_KEY)
340 break;
341
342 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
343 if (key.offset > end)
344 break;
345
346 if (key.offset > start)
347 start = key.offset;
348
349 size = btrfs_item_size_nr(leaf, path->slots[0]);
350 csum_end = key.offset + (size / csum_size) * root->sectorsize;
351 if (csum_end <= start) {
352 path->slots[0]++;
353 continue;
354 }
355
356 csum_end = min(csum_end, end + 1);
357 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
358 struct btrfs_csum_item);
359 while (start < csum_end) {
360 size = min_t(size_t, csum_end - start,
361 MAX_ORDERED_SUM_BYTES(root));
362 sums = kzalloc(btrfs_ordered_sum_size(root, size),
363 GFP_NOFS);
364 if (!sums) {
365 ret = -ENOMEM;
366 goto fail;
367 }
368
369 sector_sum = sums->sums;
370 sums->bytenr = start;
371 sums->len = size;
372
373 offset = (start - key.offset) >>
374 root->fs_info->sb->s_blocksize_bits;
375 offset *= csum_size;
376
377 while (size > 0) {
378 read_extent_buffer(path->nodes[0],
379 §or_sum->sum,
380 ((unsigned long)item) +
381 offset, csum_size);
382 sector_sum->bytenr = start;
383
384 size -= root->sectorsize;
385 start += root->sectorsize;
386 offset += csum_size;
387 sector_sum++;
388 }
389 list_add_tail(&sums->list, &tmplist);
390 }
391 path->slots[0]++;
392 }
393 ret = 0;
394fail:
395 while (ret < 0 && !list_empty(&tmplist)) {
396 sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
397 list_del(&sums->list);
398 kfree(sums);
399 }
400 list_splice_tail(&tmplist, list);
401
402 btrfs_free_path(path);
403 return ret;
404}
405
406int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
407 struct bio *bio, u64 file_start, int contig)
408{
409 struct btrfs_ordered_sum *sums;
410 struct btrfs_sector_sum *sector_sum;
411 struct btrfs_ordered_extent *ordered;
412 char *data;
413 struct bio_vec *bvec = bio->bi_io_vec;
414 int bio_index = 0;
415 unsigned long total_bytes = 0;
416 unsigned long this_sum_bytes = 0;
417 u64 offset;
418 u64 disk_bytenr;
419
420 WARN_ON(bio->bi_vcnt <= 0);
421 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
422 if (!sums)
423 return -ENOMEM;
424
425 sector_sum = sums->sums;
426 disk_bytenr = (u64)bio->bi_sector << 9;
427 sums->len = bio->bi_size;
428 INIT_LIST_HEAD(&sums->list);
429
430 if (contig)
431 offset = file_start;
432 else
433 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
434
435 ordered = btrfs_lookup_ordered_extent(inode, offset);
436 BUG_ON(!ordered); /* Logic error */
437 sums->bytenr = ordered->start;
438
439 while (bio_index < bio->bi_vcnt) {
440 if (!contig)
441 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
442
443 if (!contig && (offset >= ordered->file_offset + ordered->len ||
444 offset < ordered->file_offset)) {
445 unsigned long bytes_left;
446 sums->len = this_sum_bytes;
447 this_sum_bytes = 0;
448 btrfs_add_ordered_sum(inode, ordered, sums);
449 btrfs_put_ordered_extent(ordered);
450
451 bytes_left = bio->bi_size - total_bytes;
452
453 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
454 GFP_NOFS);
455 BUG_ON(!sums); /* -ENOMEM */
456 sector_sum = sums->sums;
457 sums->len = bytes_left;
458 ordered = btrfs_lookup_ordered_extent(inode, offset);
459 BUG_ON(!ordered); /* Logic error */
460 sums->bytenr = ordered->start;
461 }
462
463 data = kmap_atomic(bvec->bv_page);
464 sector_sum->sum = ~(u32)0;
465 sector_sum->sum = btrfs_csum_data(root,
466 data + bvec->bv_offset,
467 sector_sum->sum,
468 bvec->bv_len);
469 kunmap_atomic(data);
470 btrfs_csum_final(sector_sum->sum,
471 (char *)§or_sum->sum);
472 sector_sum->bytenr = disk_bytenr;
473
474 sector_sum++;
475 bio_index++;
476 total_bytes += bvec->bv_len;
477 this_sum_bytes += bvec->bv_len;
478 disk_bytenr += bvec->bv_len;
479 offset += bvec->bv_len;
480 bvec++;
481 }
482 this_sum_bytes = 0;
483 btrfs_add_ordered_sum(inode, ordered, sums);
484 btrfs_put_ordered_extent(ordered);
485 return 0;
486}
487
488/*
489 * helper function for csum removal, this expects the
490 * key to describe the csum pointed to by the path, and it expects
491 * the csum to overlap the range [bytenr, len]
492 *
493 * The csum should not be entirely contained in the range and the
494 * range should not be entirely contained in the csum.
495 *
496 * This calls btrfs_truncate_item with the correct args based on the
497 * overlap, and fixes up the key as required.
498 */
499static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
500 struct btrfs_root *root,
501 struct btrfs_path *path,
502 struct btrfs_key *key,
503 u64 bytenr, u64 len)
504{
505 struct extent_buffer *leaf;
506 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
507 u64 csum_end;
508 u64 end_byte = bytenr + len;
509 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
510
511 leaf = path->nodes[0];
512 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
513 csum_end <<= root->fs_info->sb->s_blocksize_bits;
514 csum_end += key->offset;
515
516 if (key->offset < bytenr && csum_end <= end_byte) {
517 /*
518 * [ bytenr - len ]
519 * [ ]
520 * [csum ]
521 * A simple truncate off the end of the item
522 */
523 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
524 new_size *= csum_size;
525 btrfs_truncate_item(trans, root, path, new_size, 1);
526 } else if (key->offset >= bytenr && csum_end > end_byte &&
527 end_byte > key->offset) {
528 /*
529 * [ bytenr - len ]
530 * [ ]
531 * [csum ]
532 * we need to truncate from the beginning of the csum
533 */
534 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
535 new_size *= csum_size;
536
537 btrfs_truncate_item(trans, root, path, new_size, 0);
538
539 key->offset = end_byte;
540 btrfs_set_item_key_safe(trans, root, path, key);
541 } else {
542 BUG();
543 }
544}
545
546/*
547 * deletes the csum items from the csum tree for a given
548 * range of bytes.
549 */
550int btrfs_del_csums(struct btrfs_trans_handle *trans,
551 struct btrfs_root *root, u64 bytenr, u64 len)
552{
553 struct btrfs_path *path;
554 struct btrfs_key key;
555 u64 end_byte = bytenr + len;
556 u64 csum_end;
557 struct extent_buffer *leaf;
558 int ret;
559 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
560 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
561
562 root = root->fs_info->csum_root;
563
564 path = btrfs_alloc_path();
565 if (!path)
566 return -ENOMEM;
567
568 while (1) {
569 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
570 key.offset = end_byte - 1;
571 key.type = BTRFS_EXTENT_CSUM_KEY;
572
573 path->leave_spinning = 1;
574 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
575 if (ret > 0) {
576 if (path->slots[0] == 0)
577 break;
578 path->slots[0]--;
579 } else if (ret < 0) {
580 break;
581 }
582
583 leaf = path->nodes[0];
584 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
585
586 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
587 key.type != BTRFS_EXTENT_CSUM_KEY) {
588 break;
589 }
590
591 if (key.offset >= end_byte)
592 break;
593
594 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
595 csum_end <<= blocksize_bits;
596 csum_end += key.offset;
597
598 /* this csum ends before we start, we're done */
599 if (csum_end <= bytenr)
600 break;
601
602 /* delete the entire item, it is inside our range */
603 if (key.offset >= bytenr && csum_end <= end_byte) {
604 ret = btrfs_del_item(trans, root, path);
605 if (ret)
606 goto out;
607 if (key.offset == bytenr)
608 break;
609 } else if (key.offset < bytenr && csum_end > end_byte) {
610 unsigned long offset;
611 unsigned long shift_len;
612 unsigned long item_offset;
613 /*
614 * [ bytenr - len ]
615 * [csum ]
616 *
617 * Our bytes are in the middle of the csum,
618 * we need to split this item and insert a new one.
619 *
620 * But we can't drop the path because the
621 * csum could change, get removed, extended etc.
622 *
623 * The trick here is the max size of a csum item leaves
624 * enough room in the tree block for a single
625 * item header. So, we split the item in place,
626 * adding a new header pointing to the existing
627 * bytes. Then we loop around again and we have
628 * a nicely formed csum item that we can neatly
629 * truncate.
630 */
631 offset = (bytenr - key.offset) >> blocksize_bits;
632 offset *= csum_size;
633
634 shift_len = (len >> blocksize_bits) * csum_size;
635
636 item_offset = btrfs_item_ptr_offset(leaf,
637 path->slots[0]);
638
639 memset_extent_buffer(leaf, 0, item_offset + offset,
640 shift_len);
641 key.offset = bytenr;
642
643 /*
644 * btrfs_split_item returns -EAGAIN when the
645 * item changed size or key
646 */
647 ret = btrfs_split_item(trans, root, path, &key, offset);
648 if (ret && ret != -EAGAIN) {
649 btrfs_abort_transaction(trans, root, ret);
650 goto out;
651 }
652
653 key.offset = end_byte - 1;
654 } else {
655 truncate_one_csum(trans, root, path, &key, bytenr, len);
656 if (key.offset < bytenr)
657 break;
658 }
659 btrfs_release_path(path);
660 }
661 ret = 0;
662out:
663 btrfs_free_path(path);
664 return ret;
665}
666
667int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
668 struct btrfs_root *root,
669 struct btrfs_ordered_sum *sums)
670{
671 u64 bytenr;
672 int ret;
673 struct btrfs_key file_key;
674 struct btrfs_key found_key;
675 u64 next_offset;
676 u64 total_bytes = 0;
677 int found_next;
678 struct btrfs_path *path;
679 struct btrfs_csum_item *item;
680 struct btrfs_csum_item *item_end;
681 struct extent_buffer *leaf = NULL;
682 u64 csum_offset;
683 struct btrfs_sector_sum *sector_sum;
684 u32 nritems;
685 u32 ins_size;
686 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
687
688 path = btrfs_alloc_path();
689 if (!path)
690 return -ENOMEM;
691
692 sector_sum = sums->sums;
693again:
694 next_offset = (u64)-1;
695 found_next = 0;
696 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
697 file_key.offset = sector_sum->bytenr;
698 bytenr = sector_sum->bytenr;
699 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
700
701 item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1);
702 if (!IS_ERR(item)) {
703 leaf = path->nodes[0];
704 ret = 0;
705 goto found;
706 }
707 ret = PTR_ERR(item);
708 if (ret != -EFBIG && ret != -ENOENT)
709 goto fail_unlock;
710
711 if (ret == -EFBIG) {
712 u32 item_size;
713 /* we found one, but it isn't big enough yet */
714 leaf = path->nodes[0];
715 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
716 if ((item_size / csum_size) >=
717 MAX_CSUM_ITEMS(root, csum_size)) {
718 /* already at max size, make a new one */
719 goto insert;
720 }
721 } else {
722 int slot = path->slots[0] + 1;
723 /* we didn't find a csum item, insert one */
724 nritems = btrfs_header_nritems(path->nodes[0]);
725 if (path->slots[0] >= nritems - 1) {
726 ret = btrfs_next_leaf(root, path);
727 if (ret == 1)
728 found_next = 1;
729 if (ret != 0)
730 goto insert;
731 slot = 0;
732 }
733 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
734 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
735 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
736 found_next = 1;
737 goto insert;
738 }
739 next_offset = found_key.offset;
740 found_next = 1;
741 goto insert;
742 }
743
744 /*
745 * at this point, we know the tree has an item, but it isn't big
746 * enough yet to put our csum in. Grow it
747 */
748 btrfs_release_path(path);
749 ret = btrfs_search_slot(trans, root, &file_key, path,
750 csum_size, 1);
751 if (ret < 0)
752 goto fail_unlock;
753
754 if (ret > 0) {
755 if (path->slots[0] == 0)
756 goto insert;
757 path->slots[0]--;
758 }
759
760 leaf = path->nodes[0];
761 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
762 csum_offset = (bytenr - found_key.offset) >>
763 root->fs_info->sb->s_blocksize_bits;
764
765 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY ||
766 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
767 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
768 goto insert;
769 }
770
771 if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
772 csum_size) {
773 u32 diff = (csum_offset + 1) * csum_size;
774
775 /*
776 * is the item big enough already? we dropped our lock
777 * before and need to recheck
778 */
779 if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
780 goto csum;
781
782 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
783 if (diff != csum_size)
784 goto insert;
785
786 btrfs_extend_item(trans, root, path, diff);
787 goto csum;
788 }
789
790insert:
791 btrfs_release_path(path);
792 csum_offset = 0;
793 if (found_next) {
794 u64 tmp = total_bytes + root->sectorsize;
795 u64 next_sector = sector_sum->bytenr;
796 struct btrfs_sector_sum *next = sector_sum + 1;
797
798 while (tmp < sums->len) {
799 if (next_sector + root->sectorsize != next->bytenr)
800 break;
801 tmp += root->sectorsize;
802 next_sector = next->bytenr;
803 next++;
804 }
805 tmp = min(tmp, next_offset - file_key.offset);
806 tmp >>= root->fs_info->sb->s_blocksize_bits;
807 tmp = max((u64)1, tmp);
808 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
809 ins_size = csum_size * tmp;
810 } else {
811 ins_size = csum_size;
812 }
813 path->leave_spinning = 1;
814 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
815 ins_size);
816 path->leave_spinning = 0;
817 if (ret < 0)
818 goto fail_unlock;
819 if (ret != 0) {
820 WARN_ON(1);
821 goto fail_unlock;
822 }
823csum:
824 leaf = path->nodes[0];
825 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
826 ret = 0;
827 item = (struct btrfs_csum_item *)((unsigned char *)item +
828 csum_offset * csum_size);
829found:
830 item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
831 item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
832 btrfs_item_size_nr(leaf, path->slots[0]));
833next_sector:
834
835 write_extent_buffer(leaf, §or_sum->sum, (unsigned long)item, csum_size);
836
837 total_bytes += root->sectorsize;
838 sector_sum++;
839 if (total_bytes < sums->len) {
840 item = (struct btrfs_csum_item *)((char *)item +
841 csum_size);
842 if (item < item_end && bytenr + PAGE_CACHE_SIZE ==
843 sector_sum->bytenr) {
844 bytenr = sector_sum->bytenr;
845 goto next_sector;
846 }
847 }
848
849 btrfs_mark_buffer_dirty(path->nodes[0]);
850 if (total_bytes < sums->len) {
851 btrfs_release_path(path);
852 cond_resched();
853 goto again;
854 }
855out:
856 btrfs_free_path(path);
857 return ret;
858
859fail_unlock:
860 goto out;
861}
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/bio.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include "ctree.h"
24#include "disk-io.h"
25#include "transaction.h"
26#include "volumes.h"
27#include "print-tree.h"
28#include "compression.h"
29
30#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
31 sizeof(struct btrfs_item) * 2) / \
32 size) - 1))
33
34#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_SIZE))
36
37#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \
39 sizeof(u32) * (r)->sectorsize)
40
41int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 objectid, u64 pos,
44 u64 disk_offset, u64 disk_num_bytes,
45 u64 num_bytes, u64 offset, u64 ram_bytes,
46 u8 compression, u8 encryption, u16 other_encoding)
47{
48 int ret = 0;
49 struct btrfs_file_extent_item *item;
50 struct btrfs_key file_key;
51 struct btrfs_path *path;
52 struct extent_buffer *leaf;
53
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57 file_key.objectid = objectid;
58 file_key.offset = pos;
59 file_key.type = BTRFS_EXTENT_DATA_KEY;
60
61 path->leave_spinning = 1;
62 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
63 sizeof(*item));
64 if (ret < 0)
65 goto out;
66 BUG_ON(ret); /* Can't happen */
67 leaf = path->nodes[0];
68 item = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_file_extent_item);
70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
72 btrfs_set_file_extent_offset(leaf, item, offset);
73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
75 btrfs_set_file_extent_generation(leaf, item, trans->transid);
76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
77 btrfs_set_file_extent_compression(leaf, item, compression);
78 btrfs_set_file_extent_encryption(leaf, item, encryption);
79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
80
81 btrfs_mark_buffer_dirty(leaf);
82out:
83 btrfs_free_path(path);
84 return ret;
85}
86
87static struct btrfs_csum_item *
88btrfs_lookup_csum(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 struct btrfs_path *path,
91 u64 bytenr, int cow)
92{
93 int ret;
94 struct btrfs_key file_key;
95 struct btrfs_key found_key;
96 struct btrfs_csum_item *item;
97 struct extent_buffer *leaf;
98 u64 csum_offset = 0;
99 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
100 int csums_in_item;
101
102 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
103 file_key.offset = bytenr;
104 file_key.type = BTRFS_EXTENT_CSUM_KEY;
105 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
106 if (ret < 0)
107 goto fail;
108 leaf = path->nodes[0];
109 if (ret > 0) {
110 ret = 1;
111 if (path->slots[0] == 0)
112 goto fail;
113 path->slots[0]--;
114 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
115 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
116 goto fail;
117
118 csum_offset = (bytenr - found_key.offset) >>
119 root->fs_info->sb->s_blocksize_bits;
120 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
121 csums_in_item /= csum_size;
122
123 if (csum_offset == csums_in_item) {
124 ret = -EFBIG;
125 goto fail;
126 } else if (csum_offset > csums_in_item) {
127 goto fail;
128 }
129 }
130 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
131 item = (struct btrfs_csum_item *)((unsigned char *)item +
132 csum_offset * csum_size);
133 return item;
134fail:
135 if (ret > 0)
136 ret = -ENOENT;
137 return ERR_PTR(ret);
138}
139
140int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
141 struct btrfs_root *root,
142 struct btrfs_path *path, u64 objectid,
143 u64 offset, int mod)
144{
145 int ret;
146 struct btrfs_key file_key;
147 int ins_len = mod < 0 ? -1 : 0;
148 int cow = mod != 0;
149
150 file_key.objectid = objectid;
151 file_key.offset = offset;
152 file_key.type = BTRFS_EXTENT_DATA_KEY;
153 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
154 return ret;
155}
156
157static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
158{
159 kfree(bio->csum_allocated);
160}
161
162static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
163 struct inode *inode, struct bio *bio,
164 u64 logical_offset, u32 *dst, int dio)
165{
166 struct bio_vec *bvec = bio->bi_io_vec;
167 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
168 struct btrfs_csum_item *item = NULL;
169 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
170 struct btrfs_path *path;
171 u8 *csum;
172 u64 offset = 0;
173 u64 item_start_offset = 0;
174 u64 item_last_offset = 0;
175 u64 disk_bytenr;
176 u64 page_bytes_left;
177 u32 diff;
178 int nblocks;
179 int bio_index = 0;
180 int count;
181 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
182
183 path = btrfs_alloc_path();
184 if (!path)
185 return -ENOMEM;
186
187 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
188 if (!dst) {
189 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
190 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
191 csum_size, GFP_NOFS);
192 if (!btrfs_bio->csum_allocated) {
193 btrfs_free_path(path);
194 return -ENOMEM;
195 }
196 btrfs_bio->csum = btrfs_bio->csum_allocated;
197 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
198 } else {
199 btrfs_bio->csum = btrfs_bio->csum_inline;
200 }
201 csum = btrfs_bio->csum;
202 } else {
203 csum = (u8 *)dst;
204 }
205
206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
207 path->reada = READA_FORWARD;
208
209 WARN_ON(bio->bi_vcnt <= 0);
210
211 /*
212 * the free space stuff is only read when it hasn't been
213 * updated in the current transaction. So, we can safely
214 * read from the commit root and sidestep a nasty deadlock
215 * between reading the free space cache and updating the csum tree.
216 */
217 if (btrfs_is_free_space_inode(inode)) {
218 path->search_commit_root = 1;
219 path->skip_locking = 1;
220 }
221
222 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
223 if (dio)
224 offset = logical_offset;
225
226 page_bytes_left = bvec->bv_len;
227 while (bio_index < bio->bi_vcnt) {
228 if (!dio)
229 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
230 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
231 (u32 *)csum, nblocks);
232 if (count)
233 goto found;
234
235 if (!item || disk_bytenr < item_start_offset ||
236 disk_bytenr >= item_last_offset) {
237 struct btrfs_key found_key;
238 u32 item_size;
239
240 if (item)
241 btrfs_release_path(path);
242 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
243 path, disk_bytenr, 0);
244 if (IS_ERR(item)) {
245 count = 1;
246 memset(csum, 0, csum_size);
247 if (BTRFS_I(inode)->root->root_key.objectid ==
248 BTRFS_DATA_RELOC_TREE_OBJECTID) {
249 set_extent_bits(io_tree, offset,
250 offset + root->sectorsize - 1,
251 EXTENT_NODATASUM, GFP_NOFS);
252 } else {
253 btrfs_info(BTRFS_I(inode)->root->fs_info,
254 "no csum found for inode %llu start %llu",
255 btrfs_ino(inode), offset);
256 }
257 item = NULL;
258 btrfs_release_path(path);
259 goto found;
260 }
261 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
262 path->slots[0]);
263
264 item_start_offset = found_key.offset;
265 item_size = btrfs_item_size_nr(path->nodes[0],
266 path->slots[0]);
267 item_last_offset = item_start_offset +
268 (item_size / csum_size) *
269 root->sectorsize;
270 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
271 struct btrfs_csum_item);
272 }
273 /*
274 * this byte range must be able to fit inside
275 * a single leaf so it will also fit inside a u32
276 */
277 diff = disk_bytenr - item_start_offset;
278 diff = diff / root->sectorsize;
279 diff = diff * csum_size;
280 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
281 inode->i_sb->s_blocksize_bits);
282 read_extent_buffer(path->nodes[0], csum,
283 ((unsigned long)item) + diff,
284 csum_size * count);
285found:
286 csum += count * csum_size;
287 nblocks -= count;
288
289 while (count--) {
290 disk_bytenr += root->sectorsize;
291 offset += root->sectorsize;
292 page_bytes_left -= root->sectorsize;
293 if (!page_bytes_left) {
294 bio_index++;
295 /*
296 * make sure we're still inside the
297 * bio before we update page_bytes_left
298 */
299 if (bio_index >= bio->bi_vcnt) {
300 WARN_ON_ONCE(count);
301 goto done;
302 }
303 bvec++;
304 page_bytes_left = bvec->bv_len;
305 }
306
307 }
308 }
309
310done:
311 btrfs_free_path(path);
312 return 0;
313}
314
315int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
316 struct bio *bio, u32 *dst)
317{
318 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
319}
320
321int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
322 struct bio *bio, u64 offset)
323{
324 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
325}
326
327int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
328 struct list_head *list, int search_commit)
329{
330 struct btrfs_key key;
331 struct btrfs_path *path;
332 struct extent_buffer *leaf;
333 struct btrfs_ordered_sum *sums;
334 struct btrfs_csum_item *item;
335 LIST_HEAD(tmplist);
336 unsigned long offset;
337 int ret;
338 size_t size;
339 u64 csum_end;
340 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
341
342 ASSERT(IS_ALIGNED(start, root->sectorsize) &&
343 IS_ALIGNED(end + 1, root->sectorsize));
344
345 path = btrfs_alloc_path();
346 if (!path)
347 return -ENOMEM;
348
349 if (search_commit) {
350 path->skip_locking = 1;
351 path->reada = READA_FORWARD;
352 path->search_commit_root = 1;
353 }
354
355 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
356 key.offset = start;
357 key.type = BTRFS_EXTENT_CSUM_KEY;
358
359 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
360 if (ret < 0)
361 goto fail;
362 if (ret > 0 && path->slots[0] > 0) {
363 leaf = path->nodes[0];
364 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
365 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
366 key.type == BTRFS_EXTENT_CSUM_KEY) {
367 offset = (start - key.offset) >>
368 root->fs_info->sb->s_blocksize_bits;
369 if (offset * csum_size <
370 btrfs_item_size_nr(leaf, path->slots[0] - 1))
371 path->slots[0]--;
372 }
373 }
374
375 while (start <= end) {
376 leaf = path->nodes[0];
377 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
378 ret = btrfs_next_leaf(root, path);
379 if (ret < 0)
380 goto fail;
381 if (ret > 0)
382 break;
383 leaf = path->nodes[0];
384 }
385
386 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
387 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
388 key.type != BTRFS_EXTENT_CSUM_KEY ||
389 key.offset > end)
390 break;
391
392 if (key.offset > start)
393 start = key.offset;
394
395 size = btrfs_item_size_nr(leaf, path->slots[0]);
396 csum_end = key.offset + (size / csum_size) * root->sectorsize;
397 if (csum_end <= start) {
398 path->slots[0]++;
399 continue;
400 }
401
402 csum_end = min(csum_end, end + 1);
403 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
404 struct btrfs_csum_item);
405 while (start < csum_end) {
406 size = min_t(size_t, csum_end - start,
407 MAX_ORDERED_SUM_BYTES(root));
408 sums = kzalloc(btrfs_ordered_sum_size(root, size),
409 GFP_NOFS);
410 if (!sums) {
411 ret = -ENOMEM;
412 goto fail;
413 }
414
415 sums->bytenr = start;
416 sums->len = (int)size;
417
418 offset = (start - key.offset) >>
419 root->fs_info->sb->s_blocksize_bits;
420 offset *= csum_size;
421 size >>= root->fs_info->sb->s_blocksize_bits;
422
423 read_extent_buffer(path->nodes[0],
424 sums->sums,
425 ((unsigned long)item) + offset,
426 csum_size * size);
427
428 start += root->sectorsize * size;
429 list_add_tail(&sums->list, &tmplist);
430 }
431 path->slots[0]++;
432 }
433 ret = 0;
434fail:
435 while (ret < 0 && !list_empty(&tmplist)) {
436 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
437 list_del(&sums->list);
438 kfree(sums);
439 }
440 list_splice_tail(&tmplist, list);
441
442 btrfs_free_path(path);
443 return ret;
444}
445
446int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
447 struct bio *bio, u64 file_start, int contig)
448{
449 struct btrfs_ordered_sum *sums;
450 struct btrfs_ordered_extent *ordered;
451 char *data;
452 struct bio_vec *bvec = bio->bi_io_vec;
453 int bio_index = 0;
454 int index;
455 int nr_sectors;
456 int i;
457 unsigned long total_bytes = 0;
458 unsigned long this_sum_bytes = 0;
459 u64 offset;
460
461 WARN_ON(bio->bi_vcnt <= 0);
462 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
463 GFP_NOFS);
464 if (!sums)
465 return -ENOMEM;
466
467 sums->len = bio->bi_iter.bi_size;
468 INIT_LIST_HEAD(&sums->list);
469
470 if (contig)
471 offset = file_start;
472 else
473 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
474
475 ordered = btrfs_lookup_ordered_extent(inode, offset);
476 BUG_ON(!ordered); /* Logic error */
477 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
478 index = 0;
479
480 while (bio_index < bio->bi_vcnt) {
481 if (!contig)
482 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
483
484 data = kmap_atomic(bvec->bv_page);
485
486 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
487 bvec->bv_len + root->sectorsize
488 - 1);
489
490 for (i = 0; i < nr_sectors; i++) {
491 if (offset >= ordered->file_offset + ordered->len ||
492 offset < ordered->file_offset) {
493 unsigned long bytes_left;
494
495 kunmap_atomic(data);
496 sums->len = this_sum_bytes;
497 this_sum_bytes = 0;
498 btrfs_add_ordered_sum(inode, ordered, sums);
499 btrfs_put_ordered_extent(ordered);
500
501 bytes_left = bio->bi_iter.bi_size - total_bytes;
502
503 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
504 GFP_NOFS);
505 BUG_ON(!sums); /* -ENOMEM */
506 sums->len = bytes_left;
507 ordered = btrfs_lookup_ordered_extent(inode,
508 offset);
509 ASSERT(ordered); /* Logic error */
510 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
511 + total_bytes;
512 index = 0;
513
514 data = kmap_atomic(bvec->bv_page);
515 }
516
517 sums->sums[index] = ~(u32)0;
518 sums->sums[index]
519 = btrfs_csum_data(data + bvec->bv_offset
520 + (i * root->sectorsize),
521 sums->sums[index],
522 root->sectorsize);
523 btrfs_csum_final(sums->sums[index],
524 (char *)(sums->sums + index));
525 index++;
526 offset += root->sectorsize;
527 this_sum_bytes += root->sectorsize;
528 total_bytes += root->sectorsize;
529 }
530
531 kunmap_atomic(data);
532
533 bio_index++;
534 bvec++;
535 }
536 this_sum_bytes = 0;
537 btrfs_add_ordered_sum(inode, ordered, sums);
538 btrfs_put_ordered_extent(ordered);
539 return 0;
540}
541
542/*
543 * helper function for csum removal, this expects the
544 * key to describe the csum pointed to by the path, and it expects
545 * the csum to overlap the range [bytenr, len]
546 *
547 * The csum should not be entirely contained in the range and the
548 * range should not be entirely contained in the csum.
549 *
550 * This calls btrfs_truncate_item with the correct args based on the
551 * overlap, and fixes up the key as required.
552 */
553static noinline void truncate_one_csum(struct btrfs_root *root,
554 struct btrfs_path *path,
555 struct btrfs_key *key,
556 u64 bytenr, u64 len)
557{
558 struct extent_buffer *leaf;
559 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
560 u64 csum_end;
561 u64 end_byte = bytenr + len;
562 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
563
564 leaf = path->nodes[0];
565 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
566 csum_end <<= root->fs_info->sb->s_blocksize_bits;
567 csum_end += key->offset;
568
569 if (key->offset < bytenr && csum_end <= end_byte) {
570 /*
571 * [ bytenr - len ]
572 * [ ]
573 * [csum ]
574 * A simple truncate off the end of the item
575 */
576 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
577 new_size *= csum_size;
578 btrfs_truncate_item(root, path, new_size, 1);
579 } else if (key->offset >= bytenr && csum_end > end_byte &&
580 end_byte > key->offset) {
581 /*
582 * [ bytenr - len ]
583 * [ ]
584 * [csum ]
585 * we need to truncate from the beginning of the csum
586 */
587 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
588 new_size *= csum_size;
589
590 btrfs_truncate_item(root, path, new_size, 0);
591
592 key->offset = end_byte;
593 btrfs_set_item_key_safe(root->fs_info, path, key);
594 } else {
595 BUG();
596 }
597}
598
599/*
600 * deletes the csum items from the csum tree for a given
601 * range of bytes.
602 */
603int btrfs_del_csums(struct btrfs_trans_handle *trans,
604 struct btrfs_root *root, u64 bytenr, u64 len)
605{
606 struct btrfs_path *path;
607 struct btrfs_key key;
608 u64 end_byte = bytenr + len;
609 u64 csum_end;
610 struct extent_buffer *leaf;
611 int ret;
612 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
613 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
614
615 root = root->fs_info->csum_root;
616
617 path = btrfs_alloc_path();
618 if (!path)
619 return -ENOMEM;
620
621 while (1) {
622 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
623 key.offset = end_byte - 1;
624 key.type = BTRFS_EXTENT_CSUM_KEY;
625
626 path->leave_spinning = 1;
627 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
628 if (ret > 0) {
629 if (path->slots[0] == 0)
630 break;
631 path->slots[0]--;
632 } else if (ret < 0) {
633 break;
634 }
635
636 leaf = path->nodes[0];
637 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
638
639 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
640 key.type != BTRFS_EXTENT_CSUM_KEY) {
641 break;
642 }
643
644 if (key.offset >= end_byte)
645 break;
646
647 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
648 csum_end <<= blocksize_bits;
649 csum_end += key.offset;
650
651 /* this csum ends before we start, we're done */
652 if (csum_end <= bytenr)
653 break;
654
655 /* delete the entire item, it is inside our range */
656 if (key.offset >= bytenr && csum_end <= end_byte) {
657 ret = btrfs_del_item(trans, root, path);
658 if (ret)
659 goto out;
660 if (key.offset == bytenr)
661 break;
662 } else if (key.offset < bytenr && csum_end > end_byte) {
663 unsigned long offset;
664 unsigned long shift_len;
665 unsigned long item_offset;
666 /*
667 * [ bytenr - len ]
668 * [csum ]
669 *
670 * Our bytes are in the middle of the csum,
671 * we need to split this item and insert a new one.
672 *
673 * But we can't drop the path because the
674 * csum could change, get removed, extended etc.
675 *
676 * The trick here is the max size of a csum item leaves
677 * enough room in the tree block for a single
678 * item header. So, we split the item in place,
679 * adding a new header pointing to the existing
680 * bytes. Then we loop around again and we have
681 * a nicely formed csum item that we can neatly
682 * truncate.
683 */
684 offset = (bytenr - key.offset) >> blocksize_bits;
685 offset *= csum_size;
686
687 shift_len = (len >> blocksize_bits) * csum_size;
688
689 item_offset = btrfs_item_ptr_offset(leaf,
690 path->slots[0]);
691
692 memset_extent_buffer(leaf, 0, item_offset + offset,
693 shift_len);
694 key.offset = bytenr;
695
696 /*
697 * btrfs_split_item returns -EAGAIN when the
698 * item changed size or key
699 */
700 ret = btrfs_split_item(trans, root, path, &key, offset);
701 if (ret && ret != -EAGAIN) {
702 btrfs_abort_transaction(trans, root, ret);
703 goto out;
704 }
705
706 key.offset = end_byte - 1;
707 } else {
708 truncate_one_csum(root, path, &key, bytenr, len);
709 if (key.offset < bytenr)
710 break;
711 }
712 btrfs_release_path(path);
713 }
714 ret = 0;
715out:
716 btrfs_free_path(path);
717 return ret;
718}
719
720int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
721 struct btrfs_root *root,
722 struct btrfs_ordered_sum *sums)
723{
724 struct btrfs_key file_key;
725 struct btrfs_key found_key;
726 struct btrfs_path *path;
727 struct btrfs_csum_item *item;
728 struct btrfs_csum_item *item_end;
729 struct extent_buffer *leaf = NULL;
730 u64 next_offset;
731 u64 total_bytes = 0;
732 u64 csum_offset;
733 u64 bytenr;
734 u32 nritems;
735 u32 ins_size;
736 int index = 0;
737 int found_next;
738 int ret;
739 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
740
741 path = btrfs_alloc_path();
742 if (!path)
743 return -ENOMEM;
744again:
745 next_offset = (u64)-1;
746 found_next = 0;
747 bytenr = sums->bytenr + total_bytes;
748 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
749 file_key.offset = bytenr;
750 file_key.type = BTRFS_EXTENT_CSUM_KEY;
751
752 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
753 if (!IS_ERR(item)) {
754 ret = 0;
755 leaf = path->nodes[0];
756 item_end = btrfs_item_ptr(leaf, path->slots[0],
757 struct btrfs_csum_item);
758 item_end = (struct btrfs_csum_item *)((char *)item_end +
759 btrfs_item_size_nr(leaf, path->slots[0]));
760 goto found;
761 }
762 ret = PTR_ERR(item);
763 if (ret != -EFBIG && ret != -ENOENT)
764 goto fail_unlock;
765
766 if (ret == -EFBIG) {
767 u32 item_size;
768 /* we found one, but it isn't big enough yet */
769 leaf = path->nodes[0];
770 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
771 if ((item_size / csum_size) >=
772 MAX_CSUM_ITEMS(root, csum_size)) {
773 /* already at max size, make a new one */
774 goto insert;
775 }
776 } else {
777 int slot = path->slots[0] + 1;
778 /* we didn't find a csum item, insert one */
779 nritems = btrfs_header_nritems(path->nodes[0]);
780 if (!nritems || (path->slots[0] >= nritems - 1)) {
781 ret = btrfs_next_leaf(root, path);
782 if (ret == 1)
783 found_next = 1;
784 if (ret != 0)
785 goto insert;
786 slot = path->slots[0];
787 }
788 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
789 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
790 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
791 found_next = 1;
792 goto insert;
793 }
794 next_offset = found_key.offset;
795 found_next = 1;
796 goto insert;
797 }
798
799 /*
800 * at this point, we know the tree has an item, but it isn't big
801 * enough yet to put our csum in. Grow it
802 */
803 btrfs_release_path(path);
804 ret = btrfs_search_slot(trans, root, &file_key, path,
805 csum_size, 1);
806 if (ret < 0)
807 goto fail_unlock;
808
809 if (ret > 0) {
810 if (path->slots[0] == 0)
811 goto insert;
812 path->slots[0]--;
813 }
814
815 leaf = path->nodes[0];
816 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
817 csum_offset = (bytenr - found_key.offset) >>
818 root->fs_info->sb->s_blocksize_bits;
819
820 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
821 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
822 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
823 goto insert;
824 }
825
826 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
827 csum_size) {
828 int extend_nr;
829 u64 tmp;
830 u32 diff;
831 u32 free_space;
832
833 if (btrfs_leaf_free_space(root, leaf) <
834 sizeof(struct btrfs_item) + csum_size * 2)
835 goto insert;
836
837 free_space = btrfs_leaf_free_space(root, leaf) -
838 sizeof(struct btrfs_item) - csum_size;
839 tmp = sums->len - total_bytes;
840 tmp >>= root->fs_info->sb->s_blocksize_bits;
841 WARN_ON(tmp < 1);
842
843 extend_nr = max_t(int, 1, (int)tmp);
844 diff = (csum_offset + extend_nr) * csum_size;
845 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
846
847 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
848 diff = min(free_space, diff);
849 diff /= csum_size;
850 diff *= csum_size;
851
852 btrfs_extend_item(root, path, diff);
853 ret = 0;
854 goto csum;
855 }
856
857insert:
858 btrfs_release_path(path);
859 csum_offset = 0;
860 if (found_next) {
861 u64 tmp;
862
863 tmp = sums->len - total_bytes;
864 tmp >>= root->fs_info->sb->s_blocksize_bits;
865 tmp = min(tmp, (next_offset - file_key.offset) >>
866 root->fs_info->sb->s_blocksize_bits);
867
868 tmp = max((u64)1, tmp);
869 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
870 ins_size = csum_size * tmp;
871 } else {
872 ins_size = csum_size;
873 }
874 path->leave_spinning = 1;
875 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
876 ins_size);
877 path->leave_spinning = 0;
878 if (ret < 0)
879 goto fail_unlock;
880 if (WARN_ON(ret != 0))
881 goto fail_unlock;
882 leaf = path->nodes[0];
883csum:
884 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
885 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
886 btrfs_item_size_nr(leaf, path->slots[0]));
887 item = (struct btrfs_csum_item *)((unsigned char *)item +
888 csum_offset * csum_size);
889found:
890 ins_size = (u32)(sums->len - total_bytes) >>
891 root->fs_info->sb->s_blocksize_bits;
892 ins_size *= csum_size;
893 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
894 ins_size);
895 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
896 ins_size);
897
898 ins_size /= csum_size;
899 total_bytes += ins_size * root->sectorsize;
900 index += ins_size;
901
902 btrfs_mark_buffer_dirty(path->nodes[0]);
903 if (total_bytes < sums->len) {
904 btrfs_release_path(path);
905 cond_resched();
906 goto again;
907 }
908out:
909 btrfs_free_path(path);
910 return ret;
911
912fail_unlock:
913 goto out;
914}
915
916void btrfs_extent_item_to_extent_map(struct inode *inode,
917 const struct btrfs_path *path,
918 struct btrfs_file_extent_item *fi,
919 const bool new_inline,
920 struct extent_map *em)
921{
922 struct btrfs_root *root = BTRFS_I(inode)->root;
923 struct extent_buffer *leaf = path->nodes[0];
924 const int slot = path->slots[0];
925 struct btrfs_key key;
926 u64 extent_start, extent_end;
927 u64 bytenr;
928 u8 type = btrfs_file_extent_type(leaf, fi);
929 int compress_type = btrfs_file_extent_compression(leaf, fi);
930
931 em->bdev = root->fs_info->fs_devices->latest_bdev;
932 btrfs_item_key_to_cpu(leaf, &key, slot);
933 extent_start = key.offset;
934
935 if (type == BTRFS_FILE_EXTENT_REG ||
936 type == BTRFS_FILE_EXTENT_PREALLOC) {
937 extent_end = extent_start +
938 btrfs_file_extent_num_bytes(leaf, fi);
939 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
940 size_t size;
941 size = btrfs_file_extent_inline_len(leaf, slot, fi);
942 extent_end = ALIGN(extent_start + size, root->sectorsize);
943 }
944
945 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
946 if (type == BTRFS_FILE_EXTENT_REG ||
947 type == BTRFS_FILE_EXTENT_PREALLOC) {
948 em->start = extent_start;
949 em->len = extent_end - extent_start;
950 em->orig_start = extent_start -
951 btrfs_file_extent_offset(leaf, fi);
952 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
953 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
954 if (bytenr == 0) {
955 em->block_start = EXTENT_MAP_HOLE;
956 return;
957 }
958 if (compress_type != BTRFS_COMPRESS_NONE) {
959 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
960 em->compress_type = compress_type;
961 em->block_start = bytenr;
962 em->block_len = em->orig_block_len;
963 } else {
964 bytenr += btrfs_file_extent_offset(leaf, fi);
965 em->block_start = bytenr;
966 em->block_len = em->len;
967 if (type == BTRFS_FILE_EXTENT_PREALLOC)
968 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
969 }
970 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
971 em->block_start = EXTENT_MAP_INLINE;
972 em->start = extent_start;
973 em->len = extent_end - extent_start;
974 /*
975 * Initialize orig_start and block_len with the same values
976 * as in inode.c:btrfs_get_extent().
977 */
978 em->orig_start = EXTENT_MAP_HOLE;
979 em->block_len = (u64)-1;
980 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
981 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
982 em->compress_type = compress_type;
983 }
984 } else {
985 btrfs_err(root->fs_info,
986 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
987 type, btrfs_ino(inode), extent_start,
988 root->root_key.objectid);
989 }
990}