Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/init.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/rwsem.h>
7#include <linux/xattr.h>
8#include <linux/security.h>
9#include <linux/posix_acl_xattr.h>
10#include <linux/iversion.h>
11#include <linux/fsverity.h>
12#include <linux/sched/mm.h>
13#include "messages.h"
14#include "ctree.h"
15#include "btrfs_inode.h"
16#include "transaction.h"
17#include "disk-io.h"
18#include "locking.h"
19#include "fs.h"
20#include "accessors.h"
21#include "ioctl.h"
22#include "verity.h"
23#include "orphan.h"
24
25/*
26 * Implementation of the interface defined in struct fsverity_operations.
27 *
28 * The main question is how and where to store the verity descriptor and the
29 * Merkle tree. We store both in dedicated btree items in the filesystem tree,
30 * together with the rest of the inode metadata. This means we'll need to do
31 * extra work to encrypt them once encryption is supported in btrfs, but btrfs
32 * has a lot of careful code around i_size and it seems better to make a new key
33 * type than try and adjust all of our expectations for i_size.
34 *
35 * Note that this differs from the implementation in ext4 and f2fs, where
36 * this data is stored as if it were in the file, but past EOF. However, btrfs
37 * does not have a widespread mechanism for caching opaque metadata pages, so we
38 * do pretend that the Merkle tree pages themselves are past EOF for the
39 * purposes of caching them (as opposed to creating a virtual inode).
40 *
41 * fs verity items are stored under two different key types on disk.
42 * The descriptor items:
43 * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
44 *
45 * At offset 0, we store a btrfs_verity_descriptor_item which tracks the
46 * size of the descriptor item and some extra data for encryption.
47 * Starting at offset 1, these hold the generic fs verity descriptor.
48 * The latter are opaque to btrfs, we just read and write them as a blob for
49 * the higher level verity code. The most common descriptor size is 256 bytes.
50 *
51 * The merkle tree items:
52 * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
53 *
54 * These also start at offset 0, and correspond to the merkle tree bytes.
55 * So when fsverity asks for page 0 of the merkle tree, we pull up one page
56 * starting at offset 0 for this key type. These are also opaque to btrfs,
57 * we're blindly storing whatever fsverity sends down.
58 *
59 * Another important consideration is the fact that the Merkle tree data scales
60 * linearly with the size of the file (with 4K pages/blocks and SHA-256, it's
61 * ~1/127th the size) so for large files, writing the tree can be a lengthy
62 * operation. For that reason, we guard the whole enable verity operation
63 * (between begin_enable_verity and end_enable_verity) with an orphan item.
64 * Again, because the data can be pretty large, it's quite possible that we
65 * could run out of space writing it, so we try our best to handle errors by
66 * stopping and rolling back rather than aborting the victim transaction.
67 */
68
69#define MERKLE_START_ALIGN 65536
70
71/*
72 * Compute the logical file offset where we cache the Merkle tree.
73 *
74 * @inode: inode of the verity file
75 *
76 * For the purposes of caching the Merkle tree pages, as required by
77 * fs-verity, it is convenient to do size computations in terms of a file
78 * offset, rather than in terms of page indices.
79 *
80 * Use 64K to be sure it's past the last page in the file, even with 64K pages.
81 * That rounding operation itself can overflow loff_t, so we do it in u64 and
82 * check.
83 *
84 * Returns the file offset on success, negative error code on failure.
85 */
86static loff_t merkle_file_pos(const struct inode *inode)
87{
88 u64 sz = inode->i_size;
89 u64 rounded = round_up(sz, MERKLE_START_ALIGN);
90
91 if (rounded > inode->i_sb->s_maxbytes)
92 return -EFBIG;
93
94 return rounded;
95}
96
97/*
98 * Drop all the items for this inode with this key_type.
99 *
100 * @inode: inode to drop items for
101 * @key_type: type of items to drop (BTRFS_VERITY_DESC_ITEM or
102 * BTRFS_VERITY_MERKLE_ITEM)
103 *
104 * Before doing a verity enable we cleanup any existing verity items.
105 * This is also used to clean up if a verity enable failed half way through.
106 *
107 * Returns number of dropped items on success, negative error code on failure.
108 */
109static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
110{
111 struct btrfs_trans_handle *trans;
112 struct btrfs_root *root = inode->root;
113 struct btrfs_path *path;
114 struct btrfs_key key;
115 int count = 0;
116 int ret;
117
118 path = btrfs_alloc_path();
119 if (!path)
120 return -ENOMEM;
121
122 while (1) {
123 /* 1 for the item being dropped */
124 trans = btrfs_start_transaction(root, 1);
125 if (IS_ERR(trans)) {
126 ret = PTR_ERR(trans);
127 goto out;
128 }
129
130 /*
131 * Walk backwards through all the items until we find one that
132 * isn't from our key type or objectid
133 */
134 key.objectid = btrfs_ino(inode);
135 key.type = key_type;
136 key.offset = (u64)-1;
137
138 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
139 if (ret > 0) {
140 ret = 0;
141 /* No more keys of this type, we're done */
142 if (path->slots[0] == 0)
143 break;
144 path->slots[0]--;
145 } else if (ret < 0) {
146 btrfs_end_transaction(trans);
147 goto out;
148 }
149
150 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
151
152 /* No more keys of this type, we're done */
153 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
154 break;
155
156 /*
157 * This shouldn't be a performance sensitive function because
158 * it's not used as part of truncate. If it ever becomes
159 * perf sensitive, change this to walk forward and bulk delete
160 * items
161 */
162 ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
163 if (ret) {
164 btrfs_end_transaction(trans);
165 goto out;
166 }
167 count++;
168 btrfs_release_path(path);
169 btrfs_end_transaction(trans);
170 }
171 ret = count;
172 btrfs_end_transaction(trans);
173out:
174 btrfs_free_path(path);
175 return ret;
176}
177
178/*
179 * Drop all verity items
180 *
181 * @inode: inode to drop verity items for
182 *
183 * In most contexts where we are dropping verity items, we want to do it for all
184 * the types of verity items, not a particular one.
185 *
186 * Returns: 0 on success, negative error code on failure.
187 */
188int btrfs_drop_verity_items(struct btrfs_inode *inode)
189{
190 int ret;
191
192 ret = drop_verity_items(inode, BTRFS_VERITY_DESC_ITEM_KEY);
193 if (ret < 0)
194 return ret;
195 ret = drop_verity_items(inode, BTRFS_VERITY_MERKLE_ITEM_KEY);
196 if (ret < 0)
197 return ret;
198
199 return 0;
200}
201
202/*
203 * Insert and write inode items with a given key type and offset.
204 *
205 * @inode: inode to insert for
206 * @key_type: key type to insert
207 * @offset: item offset to insert at
208 * @src: source data to write
209 * @len: length of source data to write
210 *
211 * Write len bytes from src into items of up to 2K length.
212 * The inserted items will have key (ino, key_type, offset + off) where off is
213 * consecutively increasing from 0 up to the last item ending at offset + len.
214 *
215 * Returns 0 on success and a negative error code on failure.
216 */
217static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
218 const char *src, u64 len)
219{
220 struct btrfs_trans_handle *trans;
221 struct btrfs_path *path;
222 struct btrfs_root *root = inode->root;
223 struct extent_buffer *leaf;
224 struct btrfs_key key;
225 unsigned long copy_bytes;
226 unsigned long src_offset = 0;
227 void *data;
228 int ret = 0;
229
230 path = btrfs_alloc_path();
231 if (!path)
232 return -ENOMEM;
233
234 while (len > 0) {
235 /* 1 for the new item being inserted */
236 trans = btrfs_start_transaction(root, 1);
237 if (IS_ERR(trans)) {
238 ret = PTR_ERR(trans);
239 break;
240 }
241
242 key.objectid = btrfs_ino(inode);
243 key.type = key_type;
244 key.offset = offset;
245
246 /*
247 * Insert 2K at a time mostly to be friendly for smaller leaf
248 * size filesystems
249 */
250 copy_bytes = min_t(u64, len, 2048);
251
252 ret = btrfs_insert_empty_item(trans, root, path, &key, copy_bytes);
253 if (ret) {
254 btrfs_end_transaction(trans);
255 break;
256 }
257
258 leaf = path->nodes[0];
259
260 data = btrfs_item_ptr(leaf, path->slots[0], void);
261 write_extent_buffer(leaf, src + src_offset,
262 (unsigned long)data, copy_bytes);
263 offset += copy_bytes;
264 src_offset += copy_bytes;
265 len -= copy_bytes;
266
267 btrfs_release_path(path);
268 btrfs_end_transaction(trans);
269 }
270
271 btrfs_free_path(path);
272 return ret;
273}
274
275/*
276 * Read inode items of the given key type and offset from the btree.
277 *
278 * @inode: inode to read items of
279 * @key_type: key type to read
280 * @offset: item offset to read from
281 * @dest: Buffer to read into. This parameter has slightly tricky
282 * semantics. If it is NULL, the function will not do any copying
283 * and will just return the size of all the items up to len bytes.
284 * If dest_page is passed, then the function will kmap_local the
285 * page and ignore dest, but it must still be non-NULL to avoid the
286 * counting-only behavior.
287 * @len: length in bytes to read
288 * @dest_page: copy into this page instead of the dest buffer
289 *
290 * Helper function to read items from the btree. This returns the number of
291 * bytes read or < 0 for errors. We can return short reads if the items don't
292 * exist on disk or aren't big enough to fill the desired length. Supports
293 * reading into a provided buffer (dest) or into the page cache
294 *
295 * Returns number of bytes read or a negative error code on failure.
296 */
297static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
298 char *dest, u64 len, struct page *dest_page)
299{
300 struct btrfs_path *path;
301 struct btrfs_root *root = inode->root;
302 struct extent_buffer *leaf;
303 struct btrfs_key key;
304 u64 item_end;
305 u64 copy_end;
306 int copied = 0;
307 u32 copy_offset;
308 unsigned long copy_bytes;
309 unsigned long dest_offset = 0;
310 void *data;
311 char *kaddr = dest;
312 int ret;
313
314 path = btrfs_alloc_path();
315 if (!path)
316 return -ENOMEM;
317
318 if (dest_page)
319 path->reada = READA_FORWARD;
320
321 key.objectid = btrfs_ino(inode);
322 key.type = key_type;
323 key.offset = offset;
324
325 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
326 if (ret < 0) {
327 goto out;
328 } else if (ret > 0) {
329 ret = 0;
330 if (path->slots[0] == 0)
331 goto out;
332 path->slots[0]--;
333 }
334
335 while (len > 0) {
336 leaf = path->nodes[0];
337 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
338
339 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
340 break;
341
342 item_end = btrfs_item_size(leaf, path->slots[0]) + key.offset;
343
344 if (copied > 0) {
345 /*
346 * Once we've copied something, we want all of the items
347 * to be sequential
348 */
349 if (key.offset != offset)
350 break;
351 } else {
352 /*
353 * Our initial offset might be in the middle of an
354 * item. Make sure it all makes sense.
355 */
356 if (key.offset > offset)
357 break;
358 if (item_end <= offset)
359 break;
360 }
361
362 /* desc = NULL to just sum all the item lengths */
363 if (!dest)
364 copy_end = item_end;
365 else
366 copy_end = min(offset + len, item_end);
367
368 /* Number of bytes in this item we want to copy */
369 copy_bytes = copy_end - offset;
370
371 /* Offset from the start of item for copying */
372 copy_offset = offset - key.offset;
373
374 if (dest) {
375 if (dest_page)
376 kaddr = kmap_local_page(dest_page);
377
378 data = btrfs_item_ptr(leaf, path->slots[0], void);
379 read_extent_buffer(leaf, kaddr + dest_offset,
380 (unsigned long)data + copy_offset,
381 copy_bytes);
382
383 if (dest_page)
384 kunmap_local(kaddr);
385 }
386
387 offset += copy_bytes;
388 dest_offset += copy_bytes;
389 len -= copy_bytes;
390 copied += copy_bytes;
391
392 path->slots[0]++;
393 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
394 /*
395 * We've reached the last slot in this leaf and we need
396 * to go to the next leaf.
397 */
398 ret = btrfs_next_leaf(root, path);
399 if (ret < 0) {
400 break;
401 } else if (ret > 0) {
402 ret = 0;
403 break;
404 }
405 }
406 }
407out:
408 btrfs_free_path(path);
409 if (!ret)
410 ret = copied;
411 return ret;
412}
413
414/*
415 * Delete an fsverity orphan
416 *
417 * @trans: transaction to do the delete in
418 * @inode: inode to orphan
419 *
420 * Capture verity orphan specific logic that is repeated in the couple places
421 * we delete verity orphans. Specifically, handling ENOENT and ignoring inodes
422 * with 0 links.
423 *
424 * Returns zero on success or a negative error code on failure.
425 */
426static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inode)
427{
428 struct btrfs_root *root = inode->root;
429 int ret;
430
431 /*
432 * If the inode has no links, it is either already unlinked, or was
433 * created with O_TMPFILE. In either case, it should have an orphan from
434 * that other operation. Rather than reference count the orphans, we
435 * simply ignore them here, because we only invoke the verity path in
436 * the orphan logic when i_nlink is 1.
437 */
438 if (!inode->vfs_inode.i_nlink)
439 return 0;
440
441 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
442 if (ret == -ENOENT)
443 ret = 0;
444 return ret;
445}
446
447/*
448 * Rollback in-progress verity if we encounter an error.
449 *
450 * @inode: inode verity had an error for
451 *
452 * We try to handle recoverable errors while enabling verity by rolling it back
453 * and just failing the operation, rather than having an fs level error no
454 * matter what. However, any error in rollback is unrecoverable.
455 *
456 * Returns 0 on success, negative error code on failure.
457 */
458static int rollback_verity(struct btrfs_inode *inode)
459{
460 struct btrfs_trans_handle *trans = NULL;
461 struct btrfs_root *root = inode->root;
462 int ret;
463
464 ASSERT(inode_is_locked(&inode->vfs_inode));
465 truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
466 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
467 ret = btrfs_drop_verity_items(inode);
468 if (ret) {
469 btrfs_handle_fs_error(root->fs_info, ret,
470 "failed to drop verity items in rollback %llu",
471 (u64)inode->vfs_inode.i_ino);
472 goto out;
473 }
474
475 /*
476 * 1 for updating the inode flag
477 * 1 for deleting the orphan
478 */
479 trans = btrfs_start_transaction(root, 2);
480 if (IS_ERR(trans)) {
481 ret = PTR_ERR(trans);
482 trans = NULL;
483 btrfs_handle_fs_error(root->fs_info, ret,
484 "failed to start transaction in verity rollback %llu",
485 (u64)inode->vfs_inode.i_ino);
486 goto out;
487 }
488 inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
489 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
490 ret = btrfs_update_inode(trans, inode);
491 if (ret) {
492 btrfs_abort_transaction(trans, ret);
493 goto out;
494 }
495 ret = del_orphan(trans, inode);
496 if (ret) {
497 btrfs_abort_transaction(trans, ret);
498 goto out;
499 }
500out:
501 if (trans)
502 btrfs_end_transaction(trans);
503 return ret;
504}
505
506/*
507 * Finalize making the file a valid verity file
508 *
509 * @inode: inode to be marked as verity
510 * @desc: contents of the verity descriptor to write (not NULL)
511 * @desc_size: size of the verity descriptor
512 *
513 * Do the actual work of finalizing verity after successfully writing the Merkle
514 * tree:
515 *
516 * - write out the descriptor items
517 * - mark the inode with the verity flag
518 * - delete the orphan item
519 * - mark the ro compat bit
520 * - clear the in progress bit
521 *
522 * Returns 0 on success, negative error code on failure.
523 */
524static int finish_verity(struct btrfs_inode *inode, const void *desc,
525 size_t desc_size)
526{
527 struct btrfs_trans_handle *trans = NULL;
528 struct btrfs_root *root = inode->root;
529 struct btrfs_verity_descriptor_item item;
530 int ret;
531
532 /* Write out the descriptor item */
533 memset(&item, 0, sizeof(item));
534 btrfs_set_stack_verity_descriptor_size(&item, desc_size);
535 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 0,
536 (const char *)&item, sizeof(item));
537 if (ret)
538 goto out;
539
540 /* Write out the descriptor itself */
541 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 1,
542 desc, desc_size);
543 if (ret)
544 goto out;
545
546 /*
547 * 1 for updating the inode flag
548 * 1 for deleting the orphan
549 */
550 trans = btrfs_start_transaction(root, 2);
551 if (IS_ERR(trans)) {
552 ret = PTR_ERR(trans);
553 goto out;
554 }
555 inode->ro_flags |= BTRFS_INODE_RO_VERITY;
556 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
557 ret = btrfs_update_inode(trans, inode);
558 if (ret)
559 goto end_trans;
560 ret = del_orphan(trans, inode);
561 if (ret)
562 goto end_trans;
563 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
564 btrfs_set_fs_compat_ro(root->fs_info, VERITY);
565end_trans:
566 btrfs_end_transaction(trans);
567out:
568 return ret;
569
570}
571
572/*
573 * fsverity op that begins enabling verity.
574 *
575 * @filp: file to enable verity on
576 *
577 * Begin enabling fsverity for the file. We drop any existing verity items, add
578 * an orphan and set the in progress bit.
579 *
580 * Returns 0 on success, negative error code on failure.
581 */
582static int btrfs_begin_enable_verity(struct file *filp)
583{
584 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
585 struct btrfs_root *root = inode->root;
586 struct btrfs_trans_handle *trans;
587 int ret;
588
589 ASSERT(inode_is_locked(file_inode(filp)));
590
591 if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
592 return -EBUSY;
593
594 /*
595 * This should almost never do anything, but theoretically, it's
596 * possible that we failed to enable verity on a file, then were
597 * interrupted or failed while rolling back, failed to cleanup the
598 * orphan, and finally attempt to enable verity again.
599 */
600 ret = btrfs_drop_verity_items(inode);
601 if (ret)
602 return ret;
603
604 /* 1 for the orphan item */
605 trans = btrfs_start_transaction(root, 1);
606 if (IS_ERR(trans))
607 return PTR_ERR(trans);
608
609 ret = btrfs_orphan_add(trans, inode);
610 if (!ret)
611 set_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
612 btrfs_end_transaction(trans);
613
614 return 0;
615}
616
617/*
618 * fsverity op that ends enabling verity.
619 *
620 * @filp: file we are finishing enabling verity on
621 * @desc: verity descriptor to write out (NULL in error conditions)
622 * @desc_size: size of the verity descriptor (variable with signatures)
623 * @merkle_tree_size: size of the merkle tree in bytes
624 *
625 * If desc is null, then VFS is signaling an error occurred during verity
626 * enable, and we should try to rollback. Otherwise, attempt to finish verity.
627 *
628 * Returns 0 on success, negative error code on error.
629 */
630static int btrfs_end_enable_verity(struct file *filp, const void *desc,
631 size_t desc_size, u64 merkle_tree_size)
632{
633 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
634 int ret = 0;
635 int rollback_ret;
636
637 ASSERT(inode_is_locked(file_inode(filp)));
638
639 if (desc == NULL)
640 goto rollback;
641
642 ret = finish_verity(inode, desc, desc_size);
643 if (ret)
644 goto rollback;
645 return ret;
646
647rollback:
648 rollback_ret = rollback_verity(inode);
649 if (rollback_ret)
650 btrfs_err(inode->root->fs_info,
651 "failed to rollback verity items: %d", rollback_ret);
652 return ret;
653}
654
655/*
656 * fsverity op that gets the struct fsverity_descriptor.
657 *
658 * @inode: inode to get the descriptor of
659 * @buf: output buffer for the descriptor contents
660 * @buf_size: size of the output buffer. 0 to query the size
661 *
662 * fsverity does a two pass setup for reading the descriptor, in the first pass
663 * it calls with buf_size = 0 to query the size of the descriptor, and then in
664 * the second pass it actually reads the descriptor off disk.
665 *
666 * Returns the size on success or a negative error code on failure.
667 */
668int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size)
669{
670 u64 true_size;
671 int ret = 0;
672 struct btrfs_verity_descriptor_item item;
673
674 memset(&item, 0, sizeof(item));
675 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 0,
676 (char *)&item, sizeof(item), NULL);
677 if (ret < 0)
678 return ret;
679
680 if (item.reserved[0] != 0 || item.reserved[1] != 0)
681 return -EUCLEAN;
682
683 true_size = btrfs_stack_verity_descriptor_size(&item);
684 if (true_size > INT_MAX)
685 return -EUCLEAN;
686
687 if (buf_size == 0)
688 return true_size;
689 if (buf_size < true_size)
690 return -ERANGE;
691
692 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 1,
693 buf, buf_size, NULL);
694 if (ret < 0)
695 return ret;
696 if (ret != true_size)
697 return -EIO;
698
699 return true_size;
700}
701
702/*
703 * fsverity op that reads and caches a merkle tree page.
704 *
705 * @inode: inode to read a merkle tree page for
706 * @index: page index relative to the start of the merkle tree
707 * @num_ra_pages: number of pages to readahead. Optional, we ignore it
708 *
709 * The Merkle tree is stored in the filesystem btree, but its pages are cached
710 * with a logical position past EOF in the inode's mapping.
711 *
712 * Returns the page we read, or an ERR_PTR on error.
713 */
714static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
715 pgoff_t index,
716 unsigned long num_ra_pages)
717{
718 struct folio *folio;
719 u64 off = (u64)index << PAGE_SHIFT;
720 loff_t merkle_pos = merkle_file_pos(inode);
721 int ret;
722
723 if (merkle_pos < 0)
724 return ERR_PTR(merkle_pos);
725 if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
726 return ERR_PTR(-EFBIG);
727 index += merkle_pos >> PAGE_SHIFT;
728again:
729 folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
730 if (!IS_ERR(folio)) {
731 if (folio_test_uptodate(folio))
732 goto out;
733
734 folio_lock(folio);
735 /* If it's not uptodate after we have the lock, we got a read error. */
736 if (!folio_test_uptodate(folio)) {
737 folio_unlock(folio);
738 folio_put(folio);
739 return ERR_PTR(-EIO);
740 }
741 folio_unlock(folio);
742 goto out;
743 }
744
745 folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
746 0);
747 if (!folio)
748 return ERR_PTR(-ENOMEM);
749
750 ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
751 if (ret) {
752 folio_put(folio);
753 /* Did someone else insert a folio here? */
754 if (ret == -EEXIST)
755 goto again;
756 return ERR_PTR(ret);
757 }
758
759 /*
760 * Merkle item keys are indexed from byte 0 in the merkle tree.
761 * They have the form:
762 *
763 * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
764 */
765 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
766 folio_address(folio), PAGE_SIZE, &folio->page);
767 if (ret < 0) {
768 folio_put(folio);
769 return ERR_PTR(ret);
770 }
771 if (ret < PAGE_SIZE)
772 folio_zero_segment(folio, ret, PAGE_SIZE);
773
774 folio_mark_uptodate(folio);
775 folio_unlock(folio);
776
777out:
778 return folio_file_page(folio, index);
779}
780
781/*
782 * fsverity op that writes a Merkle tree block into the btree.
783 *
784 * @inode: inode to write a Merkle tree block for
785 * @buf: Merkle tree block to write
786 * @pos: the position of the block in the Merkle tree (in bytes)
787 * @size: the Merkle tree block size (in bytes)
788 *
789 * Returns 0 on success or negative error code on failure
790 */
791static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
792 u64 pos, unsigned int size)
793{
794 loff_t merkle_pos = merkle_file_pos(inode);
795
796 if (merkle_pos < 0)
797 return merkle_pos;
798 if (merkle_pos > inode->i_sb->s_maxbytes - pos - size)
799 return -EFBIG;
800
801 return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
802 pos, buf, size);
803}
804
805const struct fsverity_operations btrfs_verityops = {
806 .begin_enable_verity = btrfs_begin_enable_verity,
807 .end_enable_verity = btrfs_end_enable_verity,
808 .get_verity_descriptor = btrfs_get_verity_descriptor,
809 .read_merkle_tree_page = btrfs_read_merkle_tree_page,
810 .write_merkle_tree_block = btrfs_write_merkle_tree_block,
811};
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/init.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/rwsem.h>
7#include <linux/xattr.h>
8#include <linux/security.h>
9#include <linux/posix_acl_xattr.h>
10#include <linux/iversion.h>
11#include <linux/fsverity.h>
12#include <linux/sched/mm.h>
13#include "messages.h"
14#include "ctree.h"
15#include "btrfs_inode.h"
16#include "transaction.h"
17#include "locking.h"
18#include "fs.h"
19#include "accessors.h"
20#include "ioctl.h"
21#include "verity.h"
22#include "orphan.h"
23
24/*
25 * Implementation of the interface defined in struct fsverity_operations.
26 *
27 * The main question is how and where to store the verity descriptor and the
28 * Merkle tree. We store both in dedicated btree items in the filesystem tree,
29 * together with the rest of the inode metadata. This means we'll need to do
30 * extra work to encrypt them once encryption is supported in btrfs, but btrfs
31 * has a lot of careful code around i_size and it seems better to make a new key
32 * type than try and adjust all of our expectations for i_size.
33 *
34 * Note that this differs from the implementation in ext4 and f2fs, where
35 * this data is stored as if it were in the file, but past EOF. However, btrfs
36 * does not have a widespread mechanism for caching opaque metadata pages, so we
37 * do pretend that the Merkle tree pages themselves are past EOF for the
38 * purposes of caching them (as opposed to creating a virtual inode).
39 *
40 * fs verity items are stored under two different key types on disk.
41 * The descriptor items:
42 * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
43 *
44 * At offset 0, we store a btrfs_verity_descriptor_item which tracks the
45 * size of the descriptor item and some extra data for encryption.
46 * Starting at offset 1, these hold the generic fs verity descriptor.
47 * The latter are opaque to btrfs, we just read and write them as a blob for
48 * the higher level verity code. The most common descriptor size is 256 bytes.
49 *
50 * The merkle tree items:
51 * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
52 *
53 * These also start at offset 0, and correspond to the merkle tree bytes.
54 * So when fsverity asks for page 0 of the merkle tree, we pull up one page
55 * starting at offset 0 for this key type. These are also opaque to btrfs,
56 * we're blindly storing whatever fsverity sends down.
57 *
58 * Another important consideration is the fact that the Merkle tree data scales
59 * linearly with the size of the file (with 4K pages/blocks and SHA-256, it's
60 * ~1/127th the size) so for large files, writing the tree can be a lengthy
61 * operation. For that reason, we guard the whole enable verity operation
62 * (between begin_enable_verity and end_enable_verity) with an orphan item.
63 * Again, because the data can be pretty large, it's quite possible that we
64 * could run out of space writing it, so we try our best to handle errors by
65 * stopping and rolling back rather than aborting the victim transaction.
66 */
67
68#define MERKLE_START_ALIGN 65536
69
70/*
71 * Compute the logical file offset where we cache the Merkle tree.
72 *
73 * @inode: inode of the verity file
74 *
75 * For the purposes of caching the Merkle tree pages, as required by
76 * fs-verity, it is convenient to do size computations in terms of a file
77 * offset, rather than in terms of page indices.
78 *
79 * Use 64K to be sure it's past the last page in the file, even with 64K pages.
80 * That rounding operation itself can overflow loff_t, so we do it in u64 and
81 * check.
82 *
83 * Returns the file offset on success, negative error code on failure.
84 */
85static loff_t merkle_file_pos(const struct inode *inode)
86{
87 u64 sz = inode->i_size;
88 u64 rounded = round_up(sz, MERKLE_START_ALIGN);
89
90 if (rounded > inode->i_sb->s_maxbytes)
91 return -EFBIG;
92
93 return rounded;
94}
95
96/*
97 * Drop all the items for this inode with this key_type.
98 *
99 * @inode: inode to drop items for
100 * @key_type: type of items to drop (BTRFS_VERITY_DESC_ITEM or
101 * BTRFS_VERITY_MERKLE_ITEM)
102 *
103 * Before doing a verity enable we cleanup any existing verity items.
104 * This is also used to clean up if a verity enable failed half way through.
105 *
106 * Returns number of dropped items on success, negative error code on failure.
107 */
108static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
109{
110 struct btrfs_trans_handle *trans;
111 struct btrfs_root *root = inode->root;
112 struct btrfs_path *path;
113 struct btrfs_key key;
114 int count = 0;
115 int ret;
116
117 path = btrfs_alloc_path();
118 if (!path)
119 return -ENOMEM;
120
121 while (1) {
122 /* 1 for the item being dropped */
123 trans = btrfs_start_transaction(root, 1);
124 if (IS_ERR(trans)) {
125 ret = PTR_ERR(trans);
126 goto out;
127 }
128
129 /*
130 * Walk backwards through all the items until we find one that
131 * isn't from our key type or objectid
132 */
133 key.objectid = btrfs_ino(inode);
134 key.type = key_type;
135 key.offset = (u64)-1;
136
137 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
138 if (ret > 0) {
139 ret = 0;
140 /* No more keys of this type, we're done */
141 if (path->slots[0] == 0)
142 break;
143 path->slots[0]--;
144 } else if (ret < 0) {
145 btrfs_end_transaction(trans);
146 goto out;
147 }
148
149 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
150
151 /* No more keys of this type, we're done */
152 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
153 break;
154
155 /*
156 * This shouldn't be a performance sensitive function because
157 * it's not used as part of truncate. If it ever becomes
158 * perf sensitive, change this to walk forward and bulk delete
159 * items
160 */
161 ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
162 if (ret) {
163 btrfs_end_transaction(trans);
164 goto out;
165 }
166 count++;
167 btrfs_release_path(path);
168 btrfs_end_transaction(trans);
169 }
170 ret = count;
171 btrfs_end_transaction(trans);
172out:
173 btrfs_free_path(path);
174 return ret;
175}
176
177/*
178 * Drop all verity items
179 *
180 * @inode: inode to drop verity items for
181 *
182 * In most contexts where we are dropping verity items, we want to do it for all
183 * the types of verity items, not a particular one.
184 *
185 * Returns: 0 on success, negative error code on failure.
186 */
187int btrfs_drop_verity_items(struct btrfs_inode *inode)
188{
189 int ret;
190
191 ret = drop_verity_items(inode, BTRFS_VERITY_DESC_ITEM_KEY);
192 if (ret < 0)
193 return ret;
194 ret = drop_verity_items(inode, BTRFS_VERITY_MERKLE_ITEM_KEY);
195 if (ret < 0)
196 return ret;
197
198 return 0;
199}
200
201/*
202 * Insert and write inode items with a given key type and offset.
203 *
204 * @inode: inode to insert for
205 * @key_type: key type to insert
206 * @offset: item offset to insert at
207 * @src: source data to write
208 * @len: length of source data to write
209 *
210 * Write len bytes from src into items of up to 2K length.
211 * The inserted items will have key (ino, key_type, offset + off) where off is
212 * consecutively increasing from 0 up to the last item ending at offset + len.
213 *
214 * Returns 0 on success and a negative error code on failure.
215 */
216static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
217 const char *src, u64 len)
218{
219 struct btrfs_trans_handle *trans;
220 struct btrfs_path *path;
221 struct btrfs_root *root = inode->root;
222 struct extent_buffer *leaf;
223 struct btrfs_key key;
224 unsigned long copy_bytes;
225 unsigned long src_offset = 0;
226 void *data;
227 int ret = 0;
228
229 path = btrfs_alloc_path();
230 if (!path)
231 return -ENOMEM;
232
233 while (len > 0) {
234 /* 1 for the new item being inserted */
235 trans = btrfs_start_transaction(root, 1);
236 if (IS_ERR(trans)) {
237 ret = PTR_ERR(trans);
238 break;
239 }
240
241 key.objectid = btrfs_ino(inode);
242 key.type = key_type;
243 key.offset = offset;
244
245 /*
246 * Insert 2K at a time mostly to be friendly for smaller leaf
247 * size filesystems
248 */
249 copy_bytes = min_t(u64, len, 2048);
250
251 ret = btrfs_insert_empty_item(trans, root, path, &key, copy_bytes);
252 if (ret) {
253 btrfs_end_transaction(trans);
254 break;
255 }
256
257 leaf = path->nodes[0];
258
259 data = btrfs_item_ptr(leaf, path->slots[0], void);
260 write_extent_buffer(leaf, src + src_offset,
261 (unsigned long)data, copy_bytes);
262 offset += copy_bytes;
263 src_offset += copy_bytes;
264 len -= copy_bytes;
265
266 btrfs_release_path(path);
267 btrfs_end_transaction(trans);
268 }
269
270 btrfs_free_path(path);
271 return ret;
272}
273
274/*
275 * Read inode items of the given key type and offset from the btree.
276 *
277 * @inode: inode to read items of
278 * @key_type: key type to read
279 * @offset: item offset to read from
280 * @dest: Buffer to read into. This parameter has slightly tricky
281 * semantics. If it is NULL, the function will not do any copying
282 * and will just return the size of all the items up to len bytes.
283 * If dest_page is passed, then the function will kmap_local the
284 * page and ignore dest, but it must still be non-NULL to avoid the
285 * counting-only behavior.
286 * @len: length in bytes to read
287 * @dest_page: copy into this page instead of the dest buffer
288 *
289 * Helper function to read items from the btree. This returns the number of
290 * bytes read or < 0 for errors. We can return short reads if the items don't
291 * exist on disk or aren't big enough to fill the desired length. Supports
292 * reading into a provided buffer (dest) or into the page cache
293 *
294 * Returns number of bytes read or a negative error code on failure.
295 */
296static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
297 char *dest, u64 len, struct page *dest_page)
298{
299 struct btrfs_path *path;
300 struct btrfs_root *root = inode->root;
301 struct extent_buffer *leaf;
302 struct btrfs_key key;
303 u64 item_end;
304 u64 copy_end;
305 int copied = 0;
306 u32 copy_offset;
307 unsigned long copy_bytes;
308 unsigned long dest_offset = 0;
309 void *data;
310 char *kaddr = dest;
311 int ret;
312
313 path = btrfs_alloc_path();
314 if (!path)
315 return -ENOMEM;
316
317 if (dest_page)
318 path->reada = READA_FORWARD;
319
320 key.objectid = btrfs_ino(inode);
321 key.type = key_type;
322 key.offset = offset;
323
324 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
325 if (ret < 0) {
326 goto out;
327 } else if (ret > 0) {
328 ret = 0;
329 if (path->slots[0] == 0)
330 goto out;
331 path->slots[0]--;
332 }
333
334 while (len > 0) {
335 leaf = path->nodes[0];
336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
337
338 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
339 break;
340
341 item_end = btrfs_item_size(leaf, path->slots[0]) + key.offset;
342
343 if (copied > 0) {
344 /*
345 * Once we've copied something, we want all of the items
346 * to be sequential
347 */
348 if (key.offset != offset)
349 break;
350 } else {
351 /*
352 * Our initial offset might be in the middle of an
353 * item. Make sure it all makes sense.
354 */
355 if (key.offset > offset)
356 break;
357 if (item_end <= offset)
358 break;
359 }
360
361 /* desc = NULL to just sum all the item lengths */
362 if (!dest)
363 copy_end = item_end;
364 else
365 copy_end = min(offset + len, item_end);
366
367 /* Number of bytes in this item we want to copy */
368 copy_bytes = copy_end - offset;
369
370 /* Offset from the start of item for copying */
371 copy_offset = offset - key.offset;
372
373 if (dest) {
374 if (dest_page)
375 kaddr = kmap_local_page(dest_page);
376
377 data = btrfs_item_ptr(leaf, path->slots[0], void);
378 read_extent_buffer(leaf, kaddr + dest_offset,
379 (unsigned long)data + copy_offset,
380 copy_bytes);
381
382 if (dest_page)
383 kunmap_local(kaddr);
384 }
385
386 offset += copy_bytes;
387 dest_offset += copy_bytes;
388 len -= copy_bytes;
389 copied += copy_bytes;
390
391 path->slots[0]++;
392 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
393 /*
394 * We've reached the last slot in this leaf and we need
395 * to go to the next leaf.
396 */
397 ret = btrfs_next_leaf(root, path);
398 if (ret < 0) {
399 break;
400 } else if (ret > 0) {
401 ret = 0;
402 break;
403 }
404 }
405 }
406out:
407 btrfs_free_path(path);
408 if (!ret)
409 ret = copied;
410 return ret;
411}
412
413/*
414 * Delete an fsverity orphan
415 *
416 * @trans: transaction to do the delete in
417 * @inode: inode to orphan
418 *
419 * Capture verity orphan specific logic that is repeated in the couple places
420 * we delete verity orphans. Specifically, handling ENOENT and ignoring inodes
421 * with 0 links.
422 *
423 * Returns zero on success or a negative error code on failure.
424 */
425static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inode)
426{
427 struct btrfs_root *root = inode->root;
428 int ret;
429
430 /*
431 * If the inode has no links, it is either already unlinked, or was
432 * created with O_TMPFILE. In either case, it should have an orphan from
433 * that other operation. Rather than reference count the orphans, we
434 * simply ignore them here, because we only invoke the verity path in
435 * the orphan logic when i_nlink is 1.
436 */
437 if (!inode->vfs_inode.i_nlink)
438 return 0;
439
440 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
441 if (ret == -ENOENT)
442 ret = 0;
443 return ret;
444}
445
446/*
447 * Rollback in-progress verity if we encounter an error.
448 *
449 * @inode: inode verity had an error for
450 *
451 * We try to handle recoverable errors while enabling verity by rolling it back
452 * and just failing the operation, rather than having an fs level error no
453 * matter what. However, any error in rollback is unrecoverable.
454 *
455 * Returns 0 on success, negative error code on failure.
456 */
457static int rollback_verity(struct btrfs_inode *inode)
458{
459 struct btrfs_trans_handle *trans = NULL;
460 struct btrfs_root *root = inode->root;
461 int ret;
462
463 ASSERT(inode_is_locked(&inode->vfs_inode));
464 truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
465 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
466 ret = btrfs_drop_verity_items(inode);
467 if (ret) {
468 btrfs_handle_fs_error(root->fs_info, ret,
469 "failed to drop verity items in rollback %llu",
470 (u64)inode->vfs_inode.i_ino);
471 goto out;
472 }
473
474 /*
475 * 1 for updating the inode flag
476 * 1 for deleting the orphan
477 */
478 trans = btrfs_start_transaction(root, 2);
479 if (IS_ERR(trans)) {
480 ret = PTR_ERR(trans);
481 trans = NULL;
482 btrfs_handle_fs_error(root->fs_info, ret,
483 "failed to start transaction in verity rollback %llu",
484 (u64)inode->vfs_inode.i_ino);
485 goto out;
486 }
487 inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
488 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
489 ret = btrfs_update_inode(trans, inode);
490 if (ret) {
491 btrfs_abort_transaction(trans, ret);
492 goto out;
493 }
494 ret = del_orphan(trans, inode);
495 if (ret) {
496 btrfs_abort_transaction(trans, ret);
497 goto out;
498 }
499out:
500 if (trans)
501 btrfs_end_transaction(trans);
502 return ret;
503}
504
505/*
506 * Finalize making the file a valid verity file
507 *
508 * @inode: inode to be marked as verity
509 * @desc: contents of the verity descriptor to write (not NULL)
510 * @desc_size: size of the verity descriptor
511 *
512 * Do the actual work of finalizing verity after successfully writing the Merkle
513 * tree:
514 *
515 * - write out the descriptor items
516 * - mark the inode with the verity flag
517 * - delete the orphan item
518 * - mark the ro compat bit
519 * - clear the in progress bit
520 *
521 * Returns 0 on success, negative error code on failure.
522 */
523static int finish_verity(struct btrfs_inode *inode, const void *desc,
524 size_t desc_size)
525{
526 struct btrfs_trans_handle *trans = NULL;
527 struct btrfs_root *root = inode->root;
528 struct btrfs_verity_descriptor_item item;
529 int ret;
530
531 /* Write out the descriptor item */
532 memset(&item, 0, sizeof(item));
533 btrfs_set_stack_verity_descriptor_size(&item, desc_size);
534 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 0,
535 (const char *)&item, sizeof(item));
536 if (ret)
537 goto out;
538
539 /* Write out the descriptor itself */
540 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 1,
541 desc, desc_size);
542 if (ret)
543 goto out;
544
545 /*
546 * 1 for updating the inode flag
547 * 1 for deleting the orphan
548 */
549 trans = btrfs_start_transaction(root, 2);
550 if (IS_ERR(trans)) {
551 ret = PTR_ERR(trans);
552 goto out;
553 }
554 inode->ro_flags |= BTRFS_INODE_RO_VERITY;
555 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
556 ret = btrfs_update_inode(trans, inode);
557 if (ret)
558 goto end_trans;
559 ret = del_orphan(trans, inode);
560 if (ret)
561 goto end_trans;
562 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
563 btrfs_set_fs_compat_ro(root->fs_info, VERITY);
564end_trans:
565 btrfs_end_transaction(trans);
566out:
567 return ret;
568
569}
570
571/*
572 * fsverity op that begins enabling verity.
573 *
574 * @filp: file to enable verity on
575 *
576 * Begin enabling fsverity for the file. We drop any existing verity items, add
577 * an orphan and set the in progress bit.
578 *
579 * Returns 0 on success, negative error code on failure.
580 */
581static int btrfs_begin_enable_verity(struct file *filp)
582{
583 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
584 struct btrfs_root *root = inode->root;
585 struct btrfs_trans_handle *trans;
586 int ret;
587
588 ASSERT(inode_is_locked(file_inode(filp)));
589
590 if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
591 return -EBUSY;
592
593 /*
594 * This should almost never do anything, but theoretically, it's
595 * possible that we failed to enable verity on a file, then were
596 * interrupted or failed while rolling back, failed to cleanup the
597 * orphan, and finally attempt to enable verity again.
598 */
599 ret = btrfs_drop_verity_items(inode);
600 if (ret)
601 return ret;
602
603 /* 1 for the orphan item */
604 trans = btrfs_start_transaction(root, 1);
605 if (IS_ERR(trans))
606 return PTR_ERR(trans);
607
608 ret = btrfs_orphan_add(trans, inode);
609 if (!ret)
610 set_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
611 btrfs_end_transaction(trans);
612
613 return 0;
614}
615
616/*
617 * fsverity op that ends enabling verity.
618 *
619 * @filp: file we are finishing enabling verity on
620 * @desc: verity descriptor to write out (NULL in error conditions)
621 * @desc_size: size of the verity descriptor (variable with signatures)
622 * @merkle_tree_size: size of the merkle tree in bytes
623 *
624 * If desc is null, then VFS is signaling an error occurred during verity
625 * enable, and we should try to rollback. Otherwise, attempt to finish verity.
626 *
627 * Returns 0 on success, negative error code on error.
628 */
629static int btrfs_end_enable_verity(struct file *filp, const void *desc,
630 size_t desc_size, u64 merkle_tree_size)
631{
632 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
633 int ret = 0;
634 int rollback_ret;
635
636 ASSERT(inode_is_locked(file_inode(filp)));
637
638 if (desc == NULL)
639 goto rollback;
640
641 ret = finish_verity(inode, desc, desc_size);
642 if (ret)
643 goto rollback;
644 return ret;
645
646rollback:
647 rollback_ret = rollback_verity(inode);
648 if (rollback_ret)
649 btrfs_err(inode->root->fs_info,
650 "failed to rollback verity items: %d", rollback_ret);
651 return ret;
652}
653
654/*
655 * fsverity op that gets the struct fsverity_descriptor.
656 *
657 * @inode: inode to get the descriptor of
658 * @buf: output buffer for the descriptor contents
659 * @buf_size: size of the output buffer. 0 to query the size
660 *
661 * fsverity does a two pass setup for reading the descriptor, in the first pass
662 * it calls with buf_size = 0 to query the size of the descriptor, and then in
663 * the second pass it actually reads the descriptor off disk.
664 *
665 * Returns the size on success or a negative error code on failure.
666 */
667int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size)
668{
669 u64 true_size;
670 int ret = 0;
671 struct btrfs_verity_descriptor_item item;
672
673 memset(&item, 0, sizeof(item));
674 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 0,
675 (char *)&item, sizeof(item), NULL);
676 if (ret < 0)
677 return ret;
678
679 if (item.reserved[0] != 0 || item.reserved[1] != 0)
680 return -EUCLEAN;
681
682 true_size = btrfs_stack_verity_descriptor_size(&item);
683 if (true_size > INT_MAX)
684 return -EUCLEAN;
685
686 if (buf_size == 0)
687 return true_size;
688 if (buf_size < true_size)
689 return -ERANGE;
690
691 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 1,
692 buf, buf_size, NULL);
693 if (ret < 0)
694 return ret;
695 if (ret != true_size)
696 return -EIO;
697
698 return true_size;
699}
700
701/*
702 * fsverity op that reads and caches a merkle tree page.
703 *
704 * @inode: inode to read a merkle tree page for
705 * @index: page index relative to the start of the merkle tree
706 * @num_ra_pages: number of pages to readahead. Optional, we ignore it
707 *
708 * The Merkle tree is stored in the filesystem btree, but its pages are cached
709 * with a logical position past EOF in the inode's mapping.
710 *
711 * Returns the page we read, or an ERR_PTR on error.
712 */
713static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
714 pgoff_t index,
715 unsigned long num_ra_pages)
716{
717 struct folio *folio;
718 u64 off = (u64)index << PAGE_SHIFT;
719 loff_t merkle_pos = merkle_file_pos(inode);
720 int ret;
721
722 if (merkle_pos < 0)
723 return ERR_PTR(merkle_pos);
724 if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
725 return ERR_PTR(-EFBIG);
726 index += merkle_pos >> PAGE_SHIFT;
727again:
728 folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
729 if (!IS_ERR(folio)) {
730 if (folio_test_uptodate(folio))
731 goto out;
732
733 folio_lock(folio);
734 /* If it's not uptodate after we have the lock, we got a read error. */
735 if (!folio_test_uptodate(folio)) {
736 folio_unlock(folio);
737 folio_put(folio);
738 return ERR_PTR(-EIO);
739 }
740 folio_unlock(folio);
741 goto out;
742 }
743
744 folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
745 0);
746 if (!folio)
747 return ERR_PTR(-ENOMEM);
748
749 ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
750 if (ret) {
751 folio_put(folio);
752 /* Did someone else insert a folio here? */
753 if (ret == -EEXIST)
754 goto again;
755 return ERR_PTR(ret);
756 }
757
758 /*
759 * Merkle item keys are indexed from byte 0 in the merkle tree.
760 * They have the form:
761 *
762 * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
763 */
764 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
765 folio_address(folio), PAGE_SIZE, &folio->page);
766 if (ret < 0) {
767 folio_put(folio);
768 return ERR_PTR(ret);
769 }
770 if (ret < PAGE_SIZE)
771 folio_zero_segment(folio, ret, PAGE_SIZE);
772
773 folio_mark_uptodate(folio);
774 folio_unlock(folio);
775
776out:
777 return folio_file_page(folio, index);
778}
779
780/*
781 * fsverity op that writes a Merkle tree block into the btree.
782 *
783 * @inode: inode to write a Merkle tree block for
784 * @buf: Merkle tree block to write
785 * @pos: the position of the block in the Merkle tree (in bytes)
786 * @size: the Merkle tree block size (in bytes)
787 *
788 * Returns 0 on success or negative error code on failure
789 */
790static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
791 u64 pos, unsigned int size)
792{
793 loff_t merkle_pos = merkle_file_pos(inode);
794
795 if (merkle_pos < 0)
796 return merkle_pos;
797 if (merkle_pos > inode->i_sb->s_maxbytes - pos - size)
798 return -EFBIG;
799
800 return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
801 pos, buf, size);
802}
803
804const struct fsverity_operations btrfs_verityops = {
805 .begin_enable_verity = btrfs_begin_enable_verity,
806 .end_enable_verity = btrfs_end_enable_verity,
807 .get_verity_descriptor = btrfs_get_verity_descriptor,
808 .read_merkle_tree_page = btrfs_read_merkle_tree_page,
809 .write_merkle_tree_block = btrfs_write_merkle_tree_block,
810};