Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/time.h>
9#include <linux/init.h>
10#include <linux/string.h>
11#include <linux/backing-dev.h>
12#include <linux/falloc.h>
13#include <linux/writeback.h>
14#include <linux/compat.h>
15#include <linux/slab.h>
16#include <linux/btrfs.h>
17#include <linux/uio.h>
18#include <linux/iversion.h>
19#include <linux/fsverity.h>
20#include <linux/iomap.h>
21#include "ctree.h"
22#include "disk-io.h"
23#include "transaction.h"
24#include "btrfs_inode.h"
25#include "print-tree.h"
26#include "tree-log.h"
27#include "locking.h"
28#include "volumes.h"
29#include "qgroup.h"
30#include "compression.h"
31#include "delalloc-space.h"
32#include "reflink.h"
33#include "subpage.h"
34#include "fs.h"
35#include "accessors.h"
36#include "extent-tree.h"
37#include "file-item.h"
38#include "ioctl.h"
39#include "file.h"
40#include "super.h"
41
42/* simple helper to fault in pages and copy. This should go away
43 * and be replaced with calls into generic code.
44 */
45static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
46 struct page **prepared_pages,
47 struct iov_iter *i)
48{
49 size_t copied = 0;
50 size_t total_copied = 0;
51 int pg = 0;
52 int offset = offset_in_page(pos);
53
54 while (write_bytes > 0) {
55 size_t count = min_t(size_t,
56 PAGE_SIZE - offset, write_bytes);
57 struct page *page = prepared_pages[pg];
58 /*
59 * Copy data from userspace to the current page
60 */
61 copied = copy_page_from_iter_atomic(page, offset, count, i);
62
63 /* Flush processor's dcache for this page */
64 flush_dcache_page(page);
65
66 /*
67 * if we get a partial write, we can end up with
68 * partially up to date pages. These add
69 * a lot of complexity, so make sure they don't
70 * happen by forcing this copy to be retried.
71 *
72 * The rest of the btrfs_file_write code will fall
73 * back to page at a time copies after we return 0.
74 */
75 if (unlikely(copied < count)) {
76 if (!PageUptodate(page)) {
77 iov_iter_revert(i, copied);
78 copied = 0;
79 }
80 if (!copied)
81 break;
82 }
83
84 write_bytes -= copied;
85 total_copied += copied;
86 offset += copied;
87 if (offset == PAGE_SIZE) {
88 pg++;
89 offset = 0;
90 }
91 }
92 return total_copied;
93}
94
95/*
96 * unlocks pages after btrfs_file_write is done with them
97 */
98static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
99 struct page **pages, size_t num_pages,
100 u64 pos, u64 copied)
101{
102 size_t i;
103 u64 block_start = round_down(pos, fs_info->sectorsize);
104 u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
105
106 ASSERT(block_len <= U32_MAX);
107 for (i = 0; i < num_pages; i++) {
108 /* page checked is some magic around finding pages that
109 * have been modified without going through btrfs_set_page_dirty
110 * clear it here. There should be no need to mark the pages
111 * accessed as prepare_pages should have marked them accessed
112 * in prepare_pages via find_or_create_page()
113 */
114 btrfs_folio_clamp_clear_checked(fs_info, page_folio(pages[i]),
115 block_start, block_len);
116 unlock_page(pages[i]);
117 put_page(pages[i]);
118 }
119}
120
121/*
122 * After btrfs_copy_from_user(), update the following things for delalloc:
123 * - Mark newly dirtied pages as DELALLOC in the io tree.
124 * Used to advise which range is to be written back.
125 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
126 * - Update inode size for past EOF write
127 */
128int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
129 size_t num_pages, loff_t pos, size_t write_bytes,
130 struct extent_state **cached, bool noreserve)
131{
132 struct btrfs_fs_info *fs_info = inode->root->fs_info;
133 int err = 0;
134 int i;
135 u64 num_bytes;
136 u64 start_pos;
137 u64 end_of_last_block;
138 u64 end_pos = pos + write_bytes;
139 loff_t isize = i_size_read(&inode->vfs_inode);
140 unsigned int extra_bits = 0;
141
142 if (write_bytes == 0)
143 return 0;
144
145 if (noreserve)
146 extra_bits |= EXTENT_NORESERVE;
147
148 start_pos = round_down(pos, fs_info->sectorsize);
149 num_bytes = round_up(write_bytes + pos - start_pos,
150 fs_info->sectorsize);
151 ASSERT(num_bytes <= U32_MAX);
152
153 end_of_last_block = start_pos + num_bytes - 1;
154
155 /*
156 * The pages may have already been dirty, clear out old accounting so
157 * we can set things up properly
158 */
159 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
160 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
161 cached);
162
163 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
164 extra_bits, cached);
165 if (err)
166 return err;
167
168 for (i = 0; i < num_pages; i++) {
169 struct page *p = pages[i];
170
171 btrfs_folio_clamp_set_uptodate(fs_info, page_folio(p),
172 start_pos, num_bytes);
173 btrfs_folio_clamp_clear_checked(fs_info, page_folio(p),
174 start_pos, num_bytes);
175 btrfs_folio_clamp_set_dirty(fs_info, page_folio(p),
176 start_pos, num_bytes);
177 }
178
179 /*
180 * we've only changed i_size in ram, and we haven't updated
181 * the disk i_size. There is no need to log the inode
182 * at this time.
183 */
184 if (end_pos > isize)
185 i_size_write(&inode->vfs_inode, end_pos);
186 return 0;
187}
188
189/*
190 * this is very complex, but the basic idea is to drop all extents
191 * in the range start - end. hint_block is filled in with a block number
192 * that would be a good hint to the block allocator for this file.
193 *
194 * If an extent intersects the range but is not entirely inside the range
195 * it is either truncated or split. Anything entirely inside the range
196 * is deleted from the tree.
197 *
198 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
199 * to deal with that. We set the field 'bytes_found' of the arguments structure
200 * with the number of allocated bytes found in the target range, so that the
201 * caller can update the inode's number of bytes in an atomic way when
202 * replacing extents in a range to avoid races with stat(2).
203 */
204int btrfs_drop_extents(struct btrfs_trans_handle *trans,
205 struct btrfs_root *root, struct btrfs_inode *inode,
206 struct btrfs_drop_extents_args *args)
207{
208 struct btrfs_fs_info *fs_info = root->fs_info;
209 struct extent_buffer *leaf;
210 struct btrfs_file_extent_item *fi;
211 struct btrfs_ref ref = { 0 };
212 struct btrfs_key key;
213 struct btrfs_key new_key;
214 u64 ino = btrfs_ino(inode);
215 u64 search_start = args->start;
216 u64 disk_bytenr = 0;
217 u64 num_bytes = 0;
218 u64 extent_offset = 0;
219 u64 extent_end = 0;
220 u64 last_end = args->start;
221 int del_nr = 0;
222 int del_slot = 0;
223 int extent_type;
224 int recow;
225 int ret;
226 int modify_tree = -1;
227 int update_refs;
228 int found = 0;
229 struct btrfs_path *path = args->path;
230
231 args->bytes_found = 0;
232 args->extent_inserted = false;
233
234 /* Must always have a path if ->replace_extent is true */
235 ASSERT(!(args->replace_extent && !args->path));
236
237 if (!path) {
238 path = btrfs_alloc_path();
239 if (!path) {
240 ret = -ENOMEM;
241 goto out;
242 }
243 }
244
245 if (args->drop_cache)
246 btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
247
248 if (args->start >= inode->disk_i_size && !args->replace_extent)
249 modify_tree = 0;
250
251 update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
252 while (1) {
253 recow = 0;
254 ret = btrfs_lookup_file_extent(trans, root, path, ino,
255 search_start, modify_tree);
256 if (ret < 0)
257 break;
258 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
259 leaf = path->nodes[0];
260 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
261 if (key.objectid == ino &&
262 key.type == BTRFS_EXTENT_DATA_KEY)
263 path->slots[0]--;
264 }
265 ret = 0;
266next_slot:
267 leaf = path->nodes[0];
268 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
269 BUG_ON(del_nr > 0);
270 ret = btrfs_next_leaf(root, path);
271 if (ret < 0)
272 break;
273 if (ret > 0) {
274 ret = 0;
275 break;
276 }
277 leaf = path->nodes[0];
278 recow = 1;
279 }
280
281 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
282
283 if (key.objectid > ino)
284 break;
285 if (WARN_ON_ONCE(key.objectid < ino) ||
286 key.type < BTRFS_EXTENT_DATA_KEY) {
287 ASSERT(del_nr == 0);
288 path->slots[0]++;
289 goto next_slot;
290 }
291 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
292 break;
293
294 fi = btrfs_item_ptr(leaf, path->slots[0],
295 struct btrfs_file_extent_item);
296 extent_type = btrfs_file_extent_type(leaf, fi);
297
298 if (extent_type == BTRFS_FILE_EXTENT_REG ||
299 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
300 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
301 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
302 extent_offset = btrfs_file_extent_offset(leaf, fi);
303 extent_end = key.offset +
304 btrfs_file_extent_num_bytes(leaf, fi);
305 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
306 extent_end = key.offset +
307 btrfs_file_extent_ram_bytes(leaf, fi);
308 } else {
309 /* can't happen */
310 BUG();
311 }
312
313 /*
314 * Don't skip extent items representing 0 byte lengths. They
315 * used to be created (bug) if while punching holes we hit
316 * -ENOSPC condition. So if we find one here, just ensure we
317 * delete it, otherwise we would insert a new file extent item
318 * with the same key (offset) as that 0 bytes length file
319 * extent item in the call to setup_items_for_insert() later
320 * in this function.
321 */
322 if (extent_end == key.offset && extent_end >= search_start) {
323 last_end = extent_end;
324 goto delete_extent_item;
325 }
326
327 if (extent_end <= search_start) {
328 path->slots[0]++;
329 goto next_slot;
330 }
331
332 found = 1;
333 search_start = max(key.offset, args->start);
334 if (recow || !modify_tree) {
335 modify_tree = -1;
336 btrfs_release_path(path);
337 continue;
338 }
339
340 /*
341 * | - range to drop - |
342 * | -------- extent -------- |
343 */
344 if (args->start > key.offset && args->end < extent_end) {
345 BUG_ON(del_nr > 0);
346 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
347 ret = -EOPNOTSUPP;
348 break;
349 }
350
351 memcpy(&new_key, &key, sizeof(new_key));
352 new_key.offset = args->start;
353 ret = btrfs_duplicate_item(trans, root, path,
354 &new_key);
355 if (ret == -EAGAIN) {
356 btrfs_release_path(path);
357 continue;
358 }
359 if (ret < 0)
360 break;
361
362 leaf = path->nodes[0];
363 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
364 struct btrfs_file_extent_item);
365 btrfs_set_file_extent_num_bytes(leaf, fi,
366 args->start - key.offset);
367
368 fi = btrfs_item_ptr(leaf, path->slots[0],
369 struct btrfs_file_extent_item);
370
371 extent_offset += args->start - key.offset;
372 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
373 btrfs_set_file_extent_num_bytes(leaf, fi,
374 extent_end - args->start);
375 btrfs_mark_buffer_dirty(trans, leaf);
376
377 if (update_refs && disk_bytenr > 0) {
378 btrfs_init_generic_ref(&ref,
379 BTRFS_ADD_DELAYED_REF,
380 disk_bytenr, num_bytes, 0,
381 root->root_key.objectid);
382 btrfs_init_data_ref(&ref,
383 root->root_key.objectid,
384 new_key.objectid,
385 args->start - extent_offset,
386 0, false);
387 ret = btrfs_inc_extent_ref(trans, &ref);
388 if (ret) {
389 btrfs_abort_transaction(trans, ret);
390 break;
391 }
392 }
393 key.offset = args->start;
394 }
395 /*
396 * From here on out we will have actually dropped something, so
397 * last_end can be updated.
398 */
399 last_end = extent_end;
400
401 /*
402 * | ---- range to drop ----- |
403 * | -------- extent -------- |
404 */
405 if (args->start <= key.offset && args->end < extent_end) {
406 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
407 ret = -EOPNOTSUPP;
408 break;
409 }
410
411 memcpy(&new_key, &key, sizeof(new_key));
412 new_key.offset = args->end;
413 btrfs_set_item_key_safe(trans, path, &new_key);
414
415 extent_offset += args->end - key.offset;
416 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
417 btrfs_set_file_extent_num_bytes(leaf, fi,
418 extent_end - args->end);
419 btrfs_mark_buffer_dirty(trans, leaf);
420 if (update_refs && disk_bytenr > 0)
421 args->bytes_found += args->end - key.offset;
422 break;
423 }
424
425 search_start = extent_end;
426 /*
427 * | ---- range to drop ----- |
428 * | -------- extent -------- |
429 */
430 if (args->start > key.offset && args->end >= extent_end) {
431 BUG_ON(del_nr > 0);
432 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
433 ret = -EOPNOTSUPP;
434 break;
435 }
436
437 btrfs_set_file_extent_num_bytes(leaf, fi,
438 args->start - key.offset);
439 btrfs_mark_buffer_dirty(trans, leaf);
440 if (update_refs && disk_bytenr > 0)
441 args->bytes_found += extent_end - args->start;
442 if (args->end == extent_end)
443 break;
444
445 path->slots[0]++;
446 goto next_slot;
447 }
448
449 /*
450 * | ---- range to drop ----- |
451 * | ------ extent ------ |
452 */
453 if (args->start <= key.offset && args->end >= extent_end) {
454delete_extent_item:
455 if (del_nr == 0) {
456 del_slot = path->slots[0];
457 del_nr = 1;
458 } else {
459 BUG_ON(del_slot + del_nr != path->slots[0]);
460 del_nr++;
461 }
462
463 if (update_refs &&
464 extent_type == BTRFS_FILE_EXTENT_INLINE) {
465 args->bytes_found += extent_end - key.offset;
466 extent_end = ALIGN(extent_end,
467 fs_info->sectorsize);
468 } else if (update_refs && disk_bytenr > 0) {
469 btrfs_init_generic_ref(&ref,
470 BTRFS_DROP_DELAYED_REF,
471 disk_bytenr, num_bytes, 0,
472 root->root_key.objectid);
473 btrfs_init_data_ref(&ref,
474 root->root_key.objectid,
475 key.objectid,
476 key.offset - extent_offset, 0,
477 false);
478 ret = btrfs_free_extent(trans, &ref);
479 if (ret) {
480 btrfs_abort_transaction(trans, ret);
481 break;
482 }
483 args->bytes_found += extent_end - key.offset;
484 }
485
486 if (args->end == extent_end)
487 break;
488
489 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
490 path->slots[0]++;
491 goto next_slot;
492 }
493
494 ret = btrfs_del_items(trans, root, path, del_slot,
495 del_nr);
496 if (ret) {
497 btrfs_abort_transaction(trans, ret);
498 break;
499 }
500
501 del_nr = 0;
502 del_slot = 0;
503
504 btrfs_release_path(path);
505 continue;
506 }
507
508 BUG();
509 }
510
511 if (!ret && del_nr > 0) {
512 /*
513 * Set path->slots[0] to first slot, so that after the delete
514 * if items are move off from our leaf to its immediate left or
515 * right neighbor leafs, we end up with a correct and adjusted
516 * path->slots[0] for our insertion (if args->replace_extent).
517 */
518 path->slots[0] = del_slot;
519 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
520 if (ret)
521 btrfs_abort_transaction(trans, ret);
522 }
523
524 leaf = path->nodes[0];
525 /*
526 * If btrfs_del_items() was called, it might have deleted a leaf, in
527 * which case it unlocked our path, so check path->locks[0] matches a
528 * write lock.
529 */
530 if (!ret && args->replace_extent &&
531 path->locks[0] == BTRFS_WRITE_LOCK &&
532 btrfs_leaf_free_space(leaf) >=
533 sizeof(struct btrfs_item) + args->extent_item_size) {
534
535 key.objectid = ino;
536 key.type = BTRFS_EXTENT_DATA_KEY;
537 key.offset = args->start;
538 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
539 struct btrfs_key slot_key;
540
541 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
542 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
543 path->slots[0]++;
544 }
545 btrfs_setup_item_for_insert(trans, root, path, &key,
546 args->extent_item_size);
547 args->extent_inserted = true;
548 }
549
550 if (!args->path)
551 btrfs_free_path(path);
552 else if (!args->extent_inserted)
553 btrfs_release_path(path);
554out:
555 args->drop_end = found ? min(args->end, last_end) : args->end;
556
557 return ret;
558}
559
560static int extent_mergeable(struct extent_buffer *leaf, int slot,
561 u64 objectid, u64 bytenr, u64 orig_offset,
562 u64 *start, u64 *end)
563{
564 struct btrfs_file_extent_item *fi;
565 struct btrfs_key key;
566 u64 extent_end;
567
568 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
569 return 0;
570
571 btrfs_item_key_to_cpu(leaf, &key, slot);
572 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
573 return 0;
574
575 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
576 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
577 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
578 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
579 btrfs_file_extent_compression(leaf, fi) ||
580 btrfs_file_extent_encryption(leaf, fi) ||
581 btrfs_file_extent_other_encoding(leaf, fi))
582 return 0;
583
584 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
585 if ((*start && *start != key.offset) || (*end && *end != extent_end))
586 return 0;
587
588 *start = key.offset;
589 *end = extent_end;
590 return 1;
591}
592
593/*
594 * Mark extent in the range start - end as written.
595 *
596 * This changes extent type from 'pre-allocated' to 'regular'. If only
597 * part of extent is marked as written, the extent will be split into
598 * two or three.
599 */
600int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
601 struct btrfs_inode *inode, u64 start, u64 end)
602{
603 struct btrfs_root *root = inode->root;
604 struct extent_buffer *leaf;
605 struct btrfs_path *path;
606 struct btrfs_file_extent_item *fi;
607 struct btrfs_ref ref = { 0 };
608 struct btrfs_key key;
609 struct btrfs_key new_key;
610 u64 bytenr;
611 u64 num_bytes;
612 u64 extent_end;
613 u64 orig_offset;
614 u64 other_start;
615 u64 other_end;
616 u64 split;
617 int del_nr = 0;
618 int del_slot = 0;
619 int recow;
620 int ret = 0;
621 u64 ino = btrfs_ino(inode);
622
623 path = btrfs_alloc_path();
624 if (!path)
625 return -ENOMEM;
626again:
627 recow = 0;
628 split = start;
629 key.objectid = ino;
630 key.type = BTRFS_EXTENT_DATA_KEY;
631 key.offset = split;
632
633 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
634 if (ret < 0)
635 goto out;
636 if (ret > 0 && path->slots[0] > 0)
637 path->slots[0]--;
638
639 leaf = path->nodes[0];
640 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
641 if (key.objectid != ino ||
642 key.type != BTRFS_EXTENT_DATA_KEY) {
643 ret = -EINVAL;
644 btrfs_abort_transaction(trans, ret);
645 goto out;
646 }
647 fi = btrfs_item_ptr(leaf, path->slots[0],
648 struct btrfs_file_extent_item);
649 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
650 ret = -EINVAL;
651 btrfs_abort_transaction(trans, ret);
652 goto out;
653 }
654 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
655 if (key.offset > start || extent_end < end) {
656 ret = -EINVAL;
657 btrfs_abort_transaction(trans, ret);
658 goto out;
659 }
660
661 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
662 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
663 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
664 memcpy(&new_key, &key, sizeof(new_key));
665
666 if (start == key.offset && end < extent_end) {
667 other_start = 0;
668 other_end = start;
669 if (extent_mergeable(leaf, path->slots[0] - 1,
670 ino, bytenr, orig_offset,
671 &other_start, &other_end)) {
672 new_key.offset = end;
673 btrfs_set_item_key_safe(trans, path, &new_key);
674 fi = btrfs_item_ptr(leaf, path->slots[0],
675 struct btrfs_file_extent_item);
676 btrfs_set_file_extent_generation(leaf, fi,
677 trans->transid);
678 btrfs_set_file_extent_num_bytes(leaf, fi,
679 extent_end - end);
680 btrfs_set_file_extent_offset(leaf, fi,
681 end - orig_offset);
682 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
683 struct btrfs_file_extent_item);
684 btrfs_set_file_extent_generation(leaf, fi,
685 trans->transid);
686 btrfs_set_file_extent_num_bytes(leaf, fi,
687 end - other_start);
688 btrfs_mark_buffer_dirty(trans, leaf);
689 goto out;
690 }
691 }
692
693 if (start > key.offset && end == extent_end) {
694 other_start = end;
695 other_end = 0;
696 if (extent_mergeable(leaf, path->slots[0] + 1,
697 ino, bytenr, orig_offset,
698 &other_start, &other_end)) {
699 fi = btrfs_item_ptr(leaf, path->slots[0],
700 struct btrfs_file_extent_item);
701 btrfs_set_file_extent_num_bytes(leaf, fi,
702 start - key.offset);
703 btrfs_set_file_extent_generation(leaf, fi,
704 trans->transid);
705 path->slots[0]++;
706 new_key.offset = start;
707 btrfs_set_item_key_safe(trans, path, &new_key);
708
709 fi = btrfs_item_ptr(leaf, path->slots[0],
710 struct btrfs_file_extent_item);
711 btrfs_set_file_extent_generation(leaf, fi,
712 trans->transid);
713 btrfs_set_file_extent_num_bytes(leaf, fi,
714 other_end - start);
715 btrfs_set_file_extent_offset(leaf, fi,
716 start - orig_offset);
717 btrfs_mark_buffer_dirty(trans, leaf);
718 goto out;
719 }
720 }
721
722 while (start > key.offset || end < extent_end) {
723 if (key.offset == start)
724 split = end;
725
726 new_key.offset = split;
727 ret = btrfs_duplicate_item(trans, root, path, &new_key);
728 if (ret == -EAGAIN) {
729 btrfs_release_path(path);
730 goto again;
731 }
732 if (ret < 0) {
733 btrfs_abort_transaction(trans, ret);
734 goto out;
735 }
736
737 leaf = path->nodes[0];
738 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
739 struct btrfs_file_extent_item);
740 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
741 btrfs_set_file_extent_num_bytes(leaf, fi,
742 split - key.offset);
743
744 fi = btrfs_item_ptr(leaf, path->slots[0],
745 struct btrfs_file_extent_item);
746
747 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
748 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
749 btrfs_set_file_extent_num_bytes(leaf, fi,
750 extent_end - split);
751 btrfs_mark_buffer_dirty(trans, leaf);
752
753 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
754 num_bytes, 0, root->root_key.objectid);
755 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
756 orig_offset, 0, false);
757 ret = btrfs_inc_extent_ref(trans, &ref);
758 if (ret) {
759 btrfs_abort_transaction(trans, ret);
760 goto out;
761 }
762
763 if (split == start) {
764 key.offset = start;
765 } else {
766 if (start != key.offset) {
767 ret = -EINVAL;
768 btrfs_abort_transaction(trans, ret);
769 goto out;
770 }
771 path->slots[0]--;
772 extent_end = end;
773 }
774 recow = 1;
775 }
776
777 other_start = end;
778 other_end = 0;
779 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
780 num_bytes, 0, root->root_key.objectid);
781 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
782 0, false);
783 if (extent_mergeable(leaf, path->slots[0] + 1,
784 ino, bytenr, orig_offset,
785 &other_start, &other_end)) {
786 if (recow) {
787 btrfs_release_path(path);
788 goto again;
789 }
790 extent_end = other_end;
791 del_slot = path->slots[0] + 1;
792 del_nr++;
793 ret = btrfs_free_extent(trans, &ref);
794 if (ret) {
795 btrfs_abort_transaction(trans, ret);
796 goto out;
797 }
798 }
799 other_start = 0;
800 other_end = start;
801 if (extent_mergeable(leaf, path->slots[0] - 1,
802 ino, bytenr, orig_offset,
803 &other_start, &other_end)) {
804 if (recow) {
805 btrfs_release_path(path);
806 goto again;
807 }
808 key.offset = other_start;
809 del_slot = path->slots[0];
810 del_nr++;
811 ret = btrfs_free_extent(trans, &ref);
812 if (ret) {
813 btrfs_abort_transaction(trans, ret);
814 goto out;
815 }
816 }
817 if (del_nr == 0) {
818 fi = btrfs_item_ptr(leaf, path->slots[0],
819 struct btrfs_file_extent_item);
820 btrfs_set_file_extent_type(leaf, fi,
821 BTRFS_FILE_EXTENT_REG);
822 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
823 btrfs_mark_buffer_dirty(trans, leaf);
824 } else {
825 fi = btrfs_item_ptr(leaf, del_slot - 1,
826 struct btrfs_file_extent_item);
827 btrfs_set_file_extent_type(leaf, fi,
828 BTRFS_FILE_EXTENT_REG);
829 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
830 btrfs_set_file_extent_num_bytes(leaf, fi,
831 extent_end - key.offset);
832 btrfs_mark_buffer_dirty(trans, leaf);
833
834 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
835 if (ret < 0) {
836 btrfs_abort_transaction(trans, ret);
837 goto out;
838 }
839 }
840out:
841 btrfs_free_path(path);
842 return ret;
843}
844
845/*
846 * on error we return an unlocked page and the error value
847 * on success we return a locked page and 0
848 */
849static int prepare_uptodate_page(struct inode *inode,
850 struct page *page, u64 pos,
851 bool force_uptodate)
852{
853 struct folio *folio = page_folio(page);
854 int ret = 0;
855
856 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
857 !PageUptodate(page)) {
858 ret = btrfs_read_folio(NULL, folio);
859 if (ret)
860 return ret;
861 lock_page(page);
862 if (!PageUptodate(page)) {
863 unlock_page(page);
864 return -EIO;
865 }
866
867 /*
868 * Since btrfs_read_folio() will unlock the folio before it
869 * returns, there is a window where btrfs_release_folio() can be
870 * called to release the page. Here we check both inode
871 * mapping and PagePrivate() to make sure the page was not
872 * released.
873 *
874 * The private flag check is essential for subpage as we need
875 * to store extra bitmap using folio private.
876 */
877 if (page->mapping != inode->i_mapping || !folio_test_private(folio)) {
878 unlock_page(page);
879 return -EAGAIN;
880 }
881 }
882 return 0;
883}
884
885static fgf_t get_prepare_fgp_flags(bool nowait)
886{
887 fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
888
889 if (nowait)
890 fgp_flags |= FGP_NOWAIT;
891
892 return fgp_flags;
893}
894
895static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
896{
897 gfp_t gfp;
898
899 gfp = btrfs_alloc_write_mask(inode->i_mapping);
900 if (nowait) {
901 gfp &= ~__GFP_DIRECT_RECLAIM;
902 gfp |= GFP_NOWAIT;
903 }
904
905 return gfp;
906}
907
908/*
909 * this just gets pages into the page cache and locks them down.
910 */
911static noinline int prepare_pages(struct inode *inode, struct page **pages,
912 size_t num_pages, loff_t pos,
913 size_t write_bytes, bool force_uptodate,
914 bool nowait)
915{
916 int i;
917 unsigned long index = pos >> PAGE_SHIFT;
918 gfp_t mask = get_prepare_gfp_flags(inode, nowait);
919 fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
920 int err = 0;
921 int faili;
922
923 for (i = 0; i < num_pages; i++) {
924again:
925 pages[i] = pagecache_get_page(inode->i_mapping, index + i,
926 fgp_flags, mask | __GFP_WRITE);
927 if (!pages[i]) {
928 faili = i - 1;
929 if (nowait)
930 err = -EAGAIN;
931 else
932 err = -ENOMEM;
933 goto fail;
934 }
935
936 err = set_page_extent_mapped(pages[i]);
937 if (err < 0) {
938 faili = i;
939 goto fail;
940 }
941
942 if (i == 0)
943 err = prepare_uptodate_page(inode, pages[i], pos,
944 force_uptodate);
945 if (!err && i == num_pages - 1)
946 err = prepare_uptodate_page(inode, pages[i],
947 pos + write_bytes, false);
948 if (err) {
949 put_page(pages[i]);
950 if (!nowait && err == -EAGAIN) {
951 err = 0;
952 goto again;
953 }
954 faili = i - 1;
955 goto fail;
956 }
957 wait_on_page_writeback(pages[i]);
958 }
959
960 return 0;
961fail:
962 while (faili >= 0) {
963 unlock_page(pages[faili]);
964 put_page(pages[faili]);
965 faili--;
966 }
967 return err;
968
969}
970
971/*
972 * This function locks the extent and properly waits for data=ordered extents
973 * to finish before allowing the pages to be modified if need.
974 *
975 * The return value:
976 * 1 - the extent is locked
977 * 0 - the extent is not locked, and everything is OK
978 * -EAGAIN - need re-prepare the pages
979 * the other < 0 number - Something wrong happens
980 */
981static noinline int
982lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
983 size_t num_pages, loff_t pos,
984 size_t write_bytes,
985 u64 *lockstart, u64 *lockend, bool nowait,
986 struct extent_state **cached_state)
987{
988 struct btrfs_fs_info *fs_info = inode->root->fs_info;
989 u64 start_pos;
990 u64 last_pos;
991 int i;
992 int ret = 0;
993
994 start_pos = round_down(pos, fs_info->sectorsize);
995 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
996
997 if (start_pos < inode->vfs_inode.i_size) {
998 struct btrfs_ordered_extent *ordered;
999
1000 if (nowait) {
1001 if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
1002 cached_state)) {
1003 for (i = 0; i < num_pages; i++) {
1004 unlock_page(pages[i]);
1005 put_page(pages[i]);
1006 pages[i] = NULL;
1007 }
1008
1009 return -EAGAIN;
1010 }
1011 } else {
1012 lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1013 }
1014
1015 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1016 last_pos - start_pos + 1);
1017 if (ordered &&
1018 ordered->file_offset + ordered->num_bytes > start_pos &&
1019 ordered->file_offset <= last_pos) {
1020 unlock_extent(&inode->io_tree, start_pos, last_pos,
1021 cached_state);
1022 for (i = 0; i < num_pages; i++) {
1023 unlock_page(pages[i]);
1024 put_page(pages[i]);
1025 }
1026 btrfs_start_ordered_extent(ordered);
1027 btrfs_put_ordered_extent(ordered);
1028 return -EAGAIN;
1029 }
1030 if (ordered)
1031 btrfs_put_ordered_extent(ordered);
1032
1033 *lockstart = start_pos;
1034 *lockend = last_pos;
1035 ret = 1;
1036 }
1037
1038 /*
1039 * We should be called after prepare_pages() which should have locked
1040 * all pages in the range.
1041 */
1042 for (i = 0; i < num_pages; i++)
1043 WARN_ON(!PageLocked(pages[i]));
1044
1045 return ret;
1046}
1047
1048/*
1049 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1050 *
1051 * @pos: File offset.
1052 * @write_bytes: The length to write, will be updated to the nocow writeable
1053 * range.
1054 *
1055 * This function will flush ordered extents in the range to ensure proper
1056 * nocow checks.
1057 *
1058 * Return:
1059 * > 0 If we can nocow, and updates @write_bytes.
1060 * 0 If we can't do a nocow write.
1061 * -EAGAIN If we can't do a nocow write because snapshoting of the inode's
1062 * root is in progress.
1063 * < 0 If an error happened.
1064 *
1065 * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1066 */
1067int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1068 size_t *write_bytes, bool nowait)
1069{
1070 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1071 struct btrfs_root *root = inode->root;
1072 struct extent_state *cached_state = NULL;
1073 u64 lockstart, lockend;
1074 u64 num_bytes;
1075 int ret;
1076
1077 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1078 return 0;
1079
1080 if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1081 return -EAGAIN;
1082
1083 lockstart = round_down(pos, fs_info->sectorsize);
1084 lockend = round_up(pos + *write_bytes,
1085 fs_info->sectorsize) - 1;
1086 num_bytes = lockend - lockstart + 1;
1087
1088 if (nowait) {
1089 if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1090 &cached_state)) {
1091 btrfs_drew_write_unlock(&root->snapshot_lock);
1092 return -EAGAIN;
1093 }
1094 } else {
1095 btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1096 &cached_state);
1097 }
1098 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1099 NULL, NULL, NULL, nowait, false);
1100 if (ret <= 0)
1101 btrfs_drew_write_unlock(&root->snapshot_lock);
1102 else
1103 *write_bytes = min_t(size_t, *write_bytes ,
1104 num_bytes - pos + lockstart);
1105 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1106
1107 return ret;
1108}
1109
1110void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1111{
1112 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1113}
1114
1115static void update_time_for_write(struct inode *inode)
1116{
1117 struct timespec64 now, ts;
1118
1119 if (IS_NOCMTIME(inode))
1120 return;
1121
1122 now = current_time(inode);
1123 ts = inode_get_mtime(inode);
1124 if (!timespec64_equal(&ts, &now))
1125 inode_set_mtime_to_ts(inode, now);
1126
1127 ts = inode_get_ctime(inode);
1128 if (!timespec64_equal(&ts, &now))
1129 inode_set_ctime_to_ts(inode, now);
1130
1131 if (IS_I_VERSION(inode))
1132 inode_inc_iversion(inode);
1133}
1134
1135static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1136 size_t count)
1137{
1138 struct file *file = iocb->ki_filp;
1139 struct inode *inode = file_inode(file);
1140 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1141 loff_t pos = iocb->ki_pos;
1142 int ret;
1143 loff_t oldsize;
1144 loff_t start_pos;
1145
1146 /*
1147 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1148 * prealloc flags, as without those flags we always have to COW. We will
1149 * later check if we can really COW into the target range (using
1150 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1151 */
1152 if ((iocb->ki_flags & IOCB_NOWAIT) &&
1153 !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1154 return -EAGAIN;
1155
1156 ret = file_remove_privs(file);
1157 if (ret)
1158 return ret;
1159
1160 /*
1161 * We reserve space for updating the inode when we reserve space for the
1162 * extent we are going to write, so we will enospc out there. We don't
1163 * need to start yet another transaction to update the inode as we will
1164 * update the inode when we finish writing whatever data we write.
1165 */
1166 update_time_for_write(inode);
1167
1168 start_pos = round_down(pos, fs_info->sectorsize);
1169 oldsize = i_size_read(inode);
1170 if (start_pos > oldsize) {
1171 /* Expand hole size to cover write data, preventing empty gap */
1172 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1173
1174 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1175 if (ret)
1176 return ret;
1177 }
1178
1179 return 0;
1180}
1181
1182static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1183 struct iov_iter *i)
1184{
1185 struct file *file = iocb->ki_filp;
1186 loff_t pos;
1187 struct inode *inode = file_inode(file);
1188 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1189 struct page **pages = NULL;
1190 struct extent_changeset *data_reserved = NULL;
1191 u64 release_bytes = 0;
1192 u64 lockstart;
1193 u64 lockend;
1194 size_t num_written = 0;
1195 int nrptrs;
1196 ssize_t ret;
1197 bool only_release_metadata = false;
1198 bool force_page_uptodate = false;
1199 loff_t old_isize = i_size_read(inode);
1200 unsigned int ilock_flags = 0;
1201 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1202 unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1203
1204 if (nowait)
1205 ilock_flags |= BTRFS_ILOCK_TRY;
1206
1207 ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1208 if (ret < 0)
1209 return ret;
1210
1211 ret = generic_write_checks(iocb, i);
1212 if (ret <= 0)
1213 goto out;
1214
1215 ret = btrfs_write_check(iocb, i, ret);
1216 if (ret < 0)
1217 goto out;
1218
1219 pos = iocb->ki_pos;
1220 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1221 PAGE_SIZE / (sizeof(struct page *)));
1222 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1223 nrptrs = max(nrptrs, 8);
1224 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1225 if (!pages) {
1226 ret = -ENOMEM;
1227 goto out;
1228 }
1229
1230 while (iov_iter_count(i) > 0) {
1231 struct extent_state *cached_state = NULL;
1232 size_t offset = offset_in_page(pos);
1233 size_t sector_offset;
1234 size_t write_bytes = min(iov_iter_count(i),
1235 nrptrs * (size_t)PAGE_SIZE -
1236 offset);
1237 size_t num_pages;
1238 size_t reserve_bytes;
1239 size_t dirty_pages;
1240 size_t copied;
1241 size_t dirty_sectors;
1242 size_t num_sectors;
1243 int extents_locked;
1244
1245 /*
1246 * Fault pages before locking them in prepare_pages
1247 * to avoid recursive lock
1248 */
1249 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1250 ret = -EFAULT;
1251 break;
1252 }
1253
1254 only_release_metadata = false;
1255 sector_offset = pos & (fs_info->sectorsize - 1);
1256
1257 extent_changeset_release(data_reserved);
1258 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1259 &data_reserved, pos,
1260 write_bytes, nowait);
1261 if (ret < 0) {
1262 int can_nocow;
1263
1264 if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1265 ret = -EAGAIN;
1266 break;
1267 }
1268
1269 /*
1270 * If we don't have to COW at the offset, reserve
1271 * metadata only. write_bytes may get smaller than
1272 * requested here.
1273 */
1274 can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1275 &write_bytes, nowait);
1276 if (can_nocow < 0)
1277 ret = can_nocow;
1278 if (can_nocow > 0)
1279 ret = 0;
1280 if (ret)
1281 break;
1282 only_release_metadata = true;
1283 }
1284
1285 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1286 WARN_ON(num_pages > nrptrs);
1287 reserve_bytes = round_up(write_bytes + sector_offset,
1288 fs_info->sectorsize);
1289 WARN_ON(reserve_bytes == 0);
1290 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1291 reserve_bytes,
1292 reserve_bytes, nowait);
1293 if (ret) {
1294 if (!only_release_metadata)
1295 btrfs_free_reserved_data_space(BTRFS_I(inode),
1296 data_reserved, pos,
1297 write_bytes);
1298 else
1299 btrfs_check_nocow_unlock(BTRFS_I(inode));
1300
1301 if (nowait && ret == -ENOSPC)
1302 ret = -EAGAIN;
1303 break;
1304 }
1305
1306 release_bytes = reserve_bytes;
1307again:
1308 ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1309 if (ret) {
1310 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1311 break;
1312 }
1313
1314 /*
1315 * This is going to setup the pages array with the number of
1316 * pages we want, so we don't really need to worry about the
1317 * contents of pages from loop to loop
1318 */
1319 ret = prepare_pages(inode, pages, num_pages,
1320 pos, write_bytes, force_page_uptodate, false);
1321 if (ret) {
1322 btrfs_delalloc_release_extents(BTRFS_I(inode),
1323 reserve_bytes);
1324 break;
1325 }
1326
1327 extents_locked = lock_and_cleanup_extent_if_need(
1328 BTRFS_I(inode), pages,
1329 num_pages, pos, write_bytes, &lockstart,
1330 &lockend, nowait, &cached_state);
1331 if (extents_locked < 0) {
1332 if (!nowait && extents_locked == -EAGAIN)
1333 goto again;
1334
1335 btrfs_delalloc_release_extents(BTRFS_I(inode),
1336 reserve_bytes);
1337 ret = extents_locked;
1338 break;
1339 }
1340
1341 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1342
1343 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1344 dirty_sectors = round_up(copied + sector_offset,
1345 fs_info->sectorsize);
1346 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1347
1348 /*
1349 * if we have trouble faulting in the pages, fall
1350 * back to one page at a time
1351 */
1352 if (copied < write_bytes)
1353 nrptrs = 1;
1354
1355 if (copied == 0) {
1356 force_page_uptodate = true;
1357 dirty_sectors = 0;
1358 dirty_pages = 0;
1359 } else {
1360 force_page_uptodate = false;
1361 dirty_pages = DIV_ROUND_UP(copied + offset,
1362 PAGE_SIZE);
1363 }
1364
1365 if (num_sectors > dirty_sectors) {
1366 /* release everything except the sectors we dirtied */
1367 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1368 if (only_release_metadata) {
1369 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1370 release_bytes, true);
1371 } else {
1372 u64 __pos;
1373
1374 __pos = round_down(pos,
1375 fs_info->sectorsize) +
1376 (dirty_pages << PAGE_SHIFT);
1377 btrfs_delalloc_release_space(BTRFS_I(inode),
1378 data_reserved, __pos,
1379 release_bytes, true);
1380 }
1381 }
1382
1383 release_bytes = round_up(copied + sector_offset,
1384 fs_info->sectorsize);
1385
1386 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1387 dirty_pages, pos, copied,
1388 &cached_state, only_release_metadata);
1389
1390 /*
1391 * If we have not locked the extent range, because the range's
1392 * start offset is >= i_size, we might still have a non-NULL
1393 * cached extent state, acquired while marking the extent range
1394 * as delalloc through btrfs_dirty_pages(). Therefore free any
1395 * possible cached extent state to avoid a memory leak.
1396 */
1397 if (extents_locked)
1398 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1399 lockend, &cached_state);
1400 else
1401 free_extent_state(cached_state);
1402
1403 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1404 if (ret) {
1405 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1406 break;
1407 }
1408
1409 release_bytes = 0;
1410 if (only_release_metadata)
1411 btrfs_check_nocow_unlock(BTRFS_I(inode));
1412
1413 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1414
1415 cond_resched();
1416
1417 pos += copied;
1418 num_written += copied;
1419 }
1420
1421 kfree(pages);
1422
1423 if (release_bytes) {
1424 if (only_release_metadata) {
1425 btrfs_check_nocow_unlock(BTRFS_I(inode));
1426 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1427 release_bytes, true);
1428 } else {
1429 btrfs_delalloc_release_space(BTRFS_I(inode),
1430 data_reserved,
1431 round_down(pos, fs_info->sectorsize),
1432 release_bytes, true);
1433 }
1434 }
1435
1436 extent_changeset_free(data_reserved);
1437 if (num_written > 0) {
1438 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1439 iocb->ki_pos += num_written;
1440 }
1441out:
1442 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1443 return num_written ? num_written : ret;
1444}
1445
1446static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1447 const struct iov_iter *iter, loff_t offset)
1448{
1449 const u32 blocksize_mask = fs_info->sectorsize - 1;
1450
1451 if (offset & blocksize_mask)
1452 return -EINVAL;
1453
1454 if (iov_iter_alignment(iter) & blocksize_mask)
1455 return -EINVAL;
1456
1457 return 0;
1458}
1459
1460static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1461{
1462 struct file *file = iocb->ki_filp;
1463 struct inode *inode = file_inode(file);
1464 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1465 loff_t pos;
1466 ssize_t written = 0;
1467 ssize_t written_buffered;
1468 size_t prev_left = 0;
1469 loff_t endbyte;
1470 ssize_t err;
1471 unsigned int ilock_flags = 0;
1472 struct iomap_dio *dio;
1473
1474 if (iocb->ki_flags & IOCB_NOWAIT)
1475 ilock_flags |= BTRFS_ILOCK_TRY;
1476
1477 /*
1478 * If the write DIO is within EOF, use a shared lock and also only if
1479 * security bits will likely not be dropped by file_remove_privs() called
1480 * from btrfs_write_check(). Either will need to be rechecked after the
1481 * lock was acquired.
1482 */
1483 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
1484 ilock_flags |= BTRFS_ILOCK_SHARED;
1485
1486relock:
1487 err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1488 if (err < 0)
1489 return err;
1490
1491 /* Shared lock cannot be used with security bits set. */
1492 if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
1493 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1494 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1495 goto relock;
1496 }
1497
1498 err = generic_write_checks(iocb, from);
1499 if (err <= 0) {
1500 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1501 return err;
1502 }
1503
1504 err = btrfs_write_check(iocb, from, err);
1505 if (err < 0) {
1506 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1507 goto out;
1508 }
1509
1510 pos = iocb->ki_pos;
1511 /*
1512 * Re-check since file size may have changed just before taking the
1513 * lock or pos may have changed because of O_APPEND in generic_write_check()
1514 */
1515 if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1516 pos + iov_iter_count(from) > i_size_read(inode)) {
1517 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1518 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1519 goto relock;
1520 }
1521
1522 if (check_direct_IO(fs_info, from, pos)) {
1523 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1524 goto buffered;
1525 }
1526
1527 /*
1528 * The iov_iter can be mapped to the same file range we are writing to.
1529 * If that's the case, then we will deadlock in the iomap code, because
1530 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1531 * an ordered extent, and after that it will fault in the pages that the
1532 * iov_iter refers to. During the fault in we end up in the readahead
1533 * pages code (starting at btrfs_readahead()), which will lock the range,
1534 * find that ordered extent and then wait for it to complete (at
1535 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1536 * obviously the ordered extent can never complete as we didn't submit
1537 * yet the respective bio(s). This always happens when the buffer is
1538 * memory mapped to the same file range, since the iomap DIO code always
1539 * invalidates pages in the target file range (after starting and waiting
1540 * for any writeback).
1541 *
1542 * So here we disable page faults in the iov_iter and then retry if we
1543 * got -EFAULT, faulting in the pages before the retry.
1544 */
1545 from->nofault = true;
1546 dio = btrfs_dio_write(iocb, from, written);
1547 from->nofault = false;
1548
1549 /*
1550 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
1551 * iocb, and that needs to lock the inode. So unlock it before calling
1552 * iomap_dio_complete() to avoid a deadlock.
1553 */
1554 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1555
1556 if (IS_ERR_OR_NULL(dio))
1557 err = PTR_ERR_OR_ZERO(dio);
1558 else
1559 err = iomap_dio_complete(dio);
1560
1561 /* No increment (+=) because iomap returns a cumulative value. */
1562 if (err > 0)
1563 written = err;
1564
1565 if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1566 const size_t left = iov_iter_count(from);
1567 /*
1568 * We have more data left to write. Try to fault in as many as
1569 * possible of the remainder pages and retry. We do this without
1570 * releasing and locking again the inode, to prevent races with
1571 * truncate.
1572 *
1573 * Also, in case the iov refers to pages in the file range of the
1574 * file we want to write to (due to a mmap), we could enter an
1575 * infinite loop if we retry after faulting the pages in, since
1576 * iomap will invalidate any pages in the range early on, before
1577 * it tries to fault in the pages of the iov. So we keep track of
1578 * how much was left of iov in the previous EFAULT and fallback
1579 * to buffered IO in case we haven't made any progress.
1580 */
1581 if (left == prev_left) {
1582 err = -ENOTBLK;
1583 } else {
1584 fault_in_iov_iter_readable(from, left);
1585 prev_left = left;
1586 goto relock;
1587 }
1588 }
1589
1590 /*
1591 * If 'err' is -ENOTBLK or we have not written all data, then it means
1592 * we must fallback to buffered IO.
1593 */
1594 if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1595 goto out;
1596
1597buffered:
1598 /*
1599 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1600 * it must retry the operation in a context where blocking is acceptable,
1601 * because even if we end up not blocking during the buffered IO attempt
1602 * below, we will block when flushing and waiting for the IO.
1603 */
1604 if (iocb->ki_flags & IOCB_NOWAIT) {
1605 err = -EAGAIN;
1606 goto out;
1607 }
1608
1609 pos = iocb->ki_pos;
1610 written_buffered = btrfs_buffered_write(iocb, from);
1611 if (written_buffered < 0) {
1612 err = written_buffered;
1613 goto out;
1614 }
1615 /*
1616 * Ensure all data is persisted. We want the next direct IO read to be
1617 * able to read what was just written.
1618 */
1619 endbyte = pos + written_buffered - 1;
1620 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1621 if (err)
1622 goto out;
1623 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1624 if (err)
1625 goto out;
1626 written += written_buffered;
1627 iocb->ki_pos = pos + written_buffered;
1628 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1629 endbyte >> PAGE_SHIFT);
1630out:
1631 return err < 0 ? err : written;
1632}
1633
1634static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1635 const struct btrfs_ioctl_encoded_io_args *encoded)
1636{
1637 struct file *file = iocb->ki_filp;
1638 struct inode *inode = file_inode(file);
1639 loff_t count;
1640 ssize_t ret;
1641
1642 btrfs_inode_lock(BTRFS_I(inode), 0);
1643 count = encoded->len;
1644 ret = generic_write_checks_count(iocb, &count);
1645 if (ret == 0 && count != encoded->len) {
1646 /*
1647 * The write got truncated by generic_write_checks_count(). We
1648 * can't do a partial encoded write.
1649 */
1650 ret = -EFBIG;
1651 }
1652 if (ret || encoded->len == 0)
1653 goto out;
1654
1655 ret = btrfs_write_check(iocb, from, encoded->len);
1656 if (ret < 0)
1657 goto out;
1658
1659 ret = btrfs_do_encoded_write(iocb, from, encoded);
1660out:
1661 btrfs_inode_unlock(BTRFS_I(inode), 0);
1662 return ret;
1663}
1664
1665ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1666 const struct btrfs_ioctl_encoded_io_args *encoded)
1667{
1668 struct file *file = iocb->ki_filp;
1669 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1670 ssize_t num_written, num_sync;
1671
1672 /*
1673 * If the fs flips readonly due to some impossible error, although we
1674 * have opened a file as writable, we have to stop this write operation
1675 * to ensure consistency.
1676 */
1677 if (BTRFS_FS_ERROR(inode->root->fs_info))
1678 return -EROFS;
1679
1680 if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1681 return -EOPNOTSUPP;
1682
1683 if (encoded) {
1684 num_written = btrfs_encoded_write(iocb, from, encoded);
1685 num_sync = encoded->len;
1686 } else if (iocb->ki_flags & IOCB_DIRECT) {
1687 num_written = btrfs_direct_write(iocb, from);
1688 num_sync = num_written;
1689 } else {
1690 num_written = btrfs_buffered_write(iocb, from);
1691 num_sync = num_written;
1692 }
1693
1694 btrfs_set_inode_last_sub_trans(inode);
1695
1696 if (num_sync > 0) {
1697 num_sync = generic_write_sync(iocb, num_sync);
1698 if (num_sync < 0)
1699 num_written = num_sync;
1700 }
1701
1702 return num_written;
1703}
1704
1705static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1706{
1707 return btrfs_do_write_iter(iocb, from, NULL);
1708}
1709
1710int btrfs_release_file(struct inode *inode, struct file *filp)
1711{
1712 struct btrfs_file_private *private = filp->private_data;
1713
1714 if (private) {
1715 kfree(private->filldir_buf);
1716 free_extent_state(private->llseek_cached_state);
1717 kfree(private);
1718 filp->private_data = NULL;
1719 }
1720
1721 /*
1722 * Set by setattr when we are about to truncate a file from a non-zero
1723 * size to a zero size. This tries to flush down new bytes that may
1724 * have been written if the application were using truncate to replace
1725 * a file in place.
1726 */
1727 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1728 &BTRFS_I(inode)->runtime_flags))
1729 filemap_flush(inode->i_mapping);
1730 return 0;
1731}
1732
1733static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1734{
1735 int ret;
1736 struct blk_plug plug;
1737
1738 /*
1739 * This is only called in fsync, which would do synchronous writes, so
1740 * a plug can merge adjacent IOs as much as possible. Esp. in case of
1741 * multiple disks using raid profile, a large IO can be split to
1742 * several segments of stripe length (currently 64K).
1743 */
1744 blk_start_plug(&plug);
1745 ret = btrfs_fdatawrite_range(inode, start, end);
1746 blk_finish_plug(&plug);
1747
1748 return ret;
1749}
1750
1751static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1752{
1753 struct btrfs_inode *inode = BTRFS_I(ctx->inode);
1754 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1755
1756 if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
1757 list_empty(&ctx->ordered_extents))
1758 return true;
1759
1760 /*
1761 * If we are doing a fast fsync we can not bail out if the inode's
1762 * last_trans is <= then the last committed transaction, because we only
1763 * update the last_trans of the inode during ordered extent completion,
1764 * and for a fast fsync we don't wait for that, we only wait for the
1765 * writeback to complete.
1766 */
1767 if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
1768 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1769 list_empty(&ctx->ordered_extents)))
1770 return true;
1771
1772 return false;
1773}
1774
1775/*
1776 * fsync call for both files and directories. This logs the inode into
1777 * the tree log instead of forcing full commits whenever possible.
1778 *
1779 * It needs to call filemap_fdatawait so that all ordered extent updates are
1780 * in the metadata btree are up to date for copying to the log.
1781 *
1782 * It drops the inode mutex before doing the tree log commit. This is an
1783 * important optimization for directories because holding the mutex prevents
1784 * new operations on the dir while we write to disk.
1785 */
1786int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1787{
1788 struct dentry *dentry = file_dentry(file);
1789 struct inode *inode = d_inode(dentry);
1790 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1791 struct btrfs_root *root = BTRFS_I(inode)->root;
1792 struct btrfs_trans_handle *trans;
1793 struct btrfs_log_ctx ctx;
1794 int ret = 0, err;
1795 u64 len;
1796 bool full_sync;
1797
1798 trace_btrfs_sync_file(file, datasync);
1799
1800 btrfs_init_log_ctx(&ctx, inode);
1801
1802 /*
1803 * Always set the range to a full range, otherwise we can get into
1804 * several problems, from missing file extent items to represent holes
1805 * when not using the NO_HOLES feature, to log tree corruption due to
1806 * races between hole detection during logging and completion of ordered
1807 * extents outside the range, to missing checksums due to ordered extents
1808 * for which we flushed only a subset of their pages.
1809 */
1810 start = 0;
1811 end = LLONG_MAX;
1812 len = (u64)LLONG_MAX + 1;
1813
1814 /*
1815 * We write the dirty pages in the range and wait until they complete
1816 * out of the ->i_mutex. If so, we can flush the dirty pages by
1817 * multi-task, and make the performance up. See
1818 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1819 */
1820 ret = start_ordered_ops(inode, start, end);
1821 if (ret)
1822 goto out;
1823
1824 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1825
1826 atomic_inc(&root->log_batch);
1827
1828 /*
1829 * Before we acquired the inode's lock and the mmap lock, someone may
1830 * have dirtied more pages in the target range. We need to make sure
1831 * that writeback for any such pages does not start while we are logging
1832 * the inode, because if it does, any of the following might happen when
1833 * we are not doing a full inode sync:
1834 *
1835 * 1) We log an extent after its writeback finishes but before its
1836 * checksums are added to the csum tree, leading to -EIO errors
1837 * when attempting to read the extent after a log replay.
1838 *
1839 * 2) We can end up logging an extent before its writeback finishes.
1840 * Therefore after the log replay we will have a file extent item
1841 * pointing to an unwritten extent (and no data checksums as well).
1842 *
1843 * So trigger writeback for any eventual new dirty pages and then we
1844 * wait for all ordered extents to complete below.
1845 */
1846 ret = start_ordered_ops(inode, start, end);
1847 if (ret) {
1848 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1849 goto out;
1850 }
1851
1852 /*
1853 * Always check for the full sync flag while holding the inode's lock,
1854 * to avoid races with other tasks. The flag must be either set all the
1855 * time during logging or always off all the time while logging.
1856 * We check the flag here after starting delalloc above, because when
1857 * running delalloc the full sync flag may be set if we need to drop
1858 * extra extent map ranges due to temporary memory allocation failures.
1859 */
1860 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1861 &BTRFS_I(inode)->runtime_flags);
1862
1863 /*
1864 * We have to do this here to avoid the priority inversion of waiting on
1865 * IO of a lower priority task while holding a transaction open.
1866 *
1867 * For a full fsync we wait for the ordered extents to complete while
1868 * for a fast fsync we wait just for writeback to complete, and then
1869 * attach the ordered extents to the transaction so that a transaction
1870 * commit waits for their completion, to avoid data loss if we fsync,
1871 * the current transaction commits before the ordered extents complete
1872 * and a power failure happens right after that.
1873 *
1874 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1875 * logical address recorded in the ordered extent may change. We need
1876 * to wait for the IO to stabilize the logical address.
1877 */
1878 if (full_sync || btrfs_is_zoned(fs_info)) {
1879 ret = btrfs_wait_ordered_range(inode, start, len);
1880 } else {
1881 /*
1882 * Get our ordered extents as soon as possible to avoid doing
1883 * checksum lookups in the csum tree, and use instead the
1884 * checksums attached to the ordered extents.
1885 */
1886 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
1887 &ctx.ordered_extents);
1888 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
1889 }
1890
1891 if (ret)
1892 goto out_release_extents;
1893
1894 atomic_inc(&root->log_batch);
1895
1896 if (skip_inode_logging(&ctx)) {
1897 /*
1898 * We've had everything committed since the last time we were
1899 * modified so clear this flag in case it was set for whatever
1900 * reason, it's no longer relevant.
1901 */
1902 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1903 &BTRFS_I(inode)->runtime_flags);
1904 /*
1905 * An ordered extent might have started before and completed
1906 * already with io errors, in which case the inode was not
1907 * updated and we end up here. So check the inode's mapping
1908 * for any errors that might have happened since we last
1909 * checked called fsync.
1910 */
1911 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
1912 goto out_release_extents;
1913 }
1914
1915 /*
1916 * We use start here because we will need to wait on the IO to complete
1917 * in btrfs_sync_log, which could require joining a transaction (for
1918 * example checking cross references in the nocow path). If we use join
1919 * here we could get into a situation where we're waiting on IO to
1920 * happen that is blocked on a transaction trying to commit. With start
1921 * we inc the extwriter counter, so we wait for all extwriters to exit
1922 * before we start blocking joiners. This comment is to keep somebody
1923 * from thinking they are super smart and changing this to
1924 * btrfs_join_transaction *cough*Josef*cough*.
1925 */
1926 trans = btrfs_start_transaction(root, 0);
1927 if (IS_ERR(trans)) {
1928 ret = PTR_ERR(trans);
1929 goto out_release_extents;
1930 }
1931 trans->in_fsync = true;
1932
1933 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1934 btrfs_release_log_ctx_extents(&ctx);
1935 if (ret < 0) {
1936 /* Fallthrough and commit/free transaction. */
1937 ret = BTRFS_LOG_FORCE_COMMIT;
1938 }
1939
1940 /* we've logged all the items and now have a consistent
1941 * version of the file in the log. It is possible that
1942 * someone will come in and modify the file, but that's
1943 * fine because the log is consistent on disk, and we
1944 * have references to all of the file's extents
1945 *
1946 * It is possible that someone will come in and log the
1947 * file again, but that will end up using the synchronization
1948 * inside btrfs_sync_log to keep things safe.
1949 */
1950 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1951
1952 if (ret == BTRFS_NO_LOG_SYNC) {
1953 ret = btrfs_end_transaction(trans);
1954 goto out;
1955 }
1956
1957 /* We successfully logged the inode, attempt to sync the log. */
1958 if (!ret) {
1959 ret = btrfs_sync_log(trans, root, &ctx);
1960 if (!ret) {
1961 ret = btrfs_end_transaction(trans);
1962 goto out;
1963 }
1964 }
1965
1966 /*
1967 * At this point we need to commit the transaction because we had
1968 * btrfs_need_log_full_commit() or some other error.
1969 *
1970 * If we didn't do a full sync we have to stop the trans handle, wait on
1971 * the ordered extents, start it again and commit the transaction. If
1972 * we attempt to wait on the ordered extents here we could deadlock with
1973 * something like fallocate() that is holding the extent lock trying to
1974 * start a transaction while some other thread is trying to commit the
1975 * transaction while we (fsync) are currently holding the transaction
1976 * open.
1977 */
1978 if (!full_sync) {
1979 ret = btrfs_end_transaction(trans);
1980 if (ret)
1981 goto out;
1982 ret = btrfs_wait_ordered_range(inode, start, len);
1983 if (ret)
1984 goto out;
1985
1986 /*
1987 * This is safe to use here because we're only interested in
1988 * making sure the transaction that had the ordered extents is
1989 * committed. We aren't waiting on anything past this point,
1990 * we're purely getting the transaction and committing it.
1991 */
1992 trans = btrfs_attach_transaction_barrier(root);
1993 if (IS_ERR(trans)) {
1994 ret = PTR_ERR(trans);
1995
1996 /*
1997 * We committed the transaction and there's no currently
1998 * running transaction, this means everything we care
1999 * about made it to disk and we are done.
2000 */
2001 if (ret == -ENOENT)
2002 ret = 0;
2003 goto out;
2004 }
2005 }
2006
2007 ret = btrfs_commit_transaction(trans);
2008out:
2009 ASSERT(list_empty(&ctx.list));
2010 ASSERT(list_empty(&ctx.conflict_inodes));
2011 err = file_check_and_advance_wb_err(file);
2012 if (!ret)
2013 ret = err;
2014 return ret > 0 ? -EIO : ret;
2015
2016out_release_extents:
2017 btrfs_release_log_ctx_extents(&ctx);
2018 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2019 goto out;
2020}
2021
2022static const struct vm_operations_struct btrfs_file_vm_ops = {
2023 .fault = filemap_fault,
2024 .map_pages = filemap_map_pages,
2025 .page_mkwrite = btrfs_page_mkwrite,
2026};
2027
2028static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2029{
2030 struct address_space *mapping = filp->f_mapping;
2031
2032 if (!mapping->a_ops->read_folio)
2033 return -ENOEXEC;
2034
2035 file_accessed(filp);
2036 vma->vm_ops = &btrfs_file_vm_ops;
2037
2038 return 0;
2039}
2040
2041static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2042 int slot, u64 start, u64 end)
2043{
2044 struct btrfs_file_extent_item *fi;
2045 struct btrfs_key key;
2046
2047 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2048 return 0;
2049
2050 btrfs_item_key_to_cpu(leaf, &key, slot);
2051 if (key.objectid != btrfs_ino(inode) ||
2052 key.type != BTRFS_EXTENT_DATA_KEY)
2053 return 0;
2054
2055 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2056
2057 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2058 return 0;
2059
2060 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2061 return 0;
2062
2063 if (key.offset == end)
2064 return 1;
2065 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2066 return 1;
2067 return 0;
2068}
2069
2070static int fill_holes(struct btrfs_trans_handle *trans,
2071 struct btrfs_inode *inode,
2072 struct btrfs_path *path, u64 offset, u64 end)
2073{
2074 struct btrfs_fs_info *fs_info = trans->fs_info;
2075 struct btrfs_root *root = inode->root;
2076 struct extent_buffer *leaf;
2077 struct btrfs_file_extent_item *fi;
2078 struct extent_map *hole_em;
2079 struct btrfs_key key;
2080 int ret;
2081
2082 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2083 goto out;
2084
2085 key.objectid = btrfs_ino(inode);
2086 key.type = BTRFS_EXTENT_DATA_KEY;
2087 key.offset = offset;
2088
2089 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2090 if (ret <= 0) {
2091 /*
2092 * We should have dropped this offset, so if we find it then
2093 * something has gone horribly wrong.
2094 */
2095 if (ret == 0)
2096 ret = -EINVAL;
2097 return ret;
2098 }
2099
2100 leaf = path->nodes[0];
2101 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2102 u64 num_bytes;
2103
2104 path->slots[0]--;
2105 fi = btrfs_item_ptr(leaf, path->slots[0],
2106 struct btrfs_file_extent_item);
2107 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2108 end - offset;
2109 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2110 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2111 btrfs_set_file_extent_offset(leaf, fi, 0);
2112 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2113 btrfs_mark_buffer_dirty(trans, leaf);
2114 goto out;
2115 }
2116
2117 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2118 u64 num_bytes;
2119
2120 key.offset = offset;
2121 btrfs_set_item_key_safe(trans, path, &key);
2122 fi = btrfs_item_ptr(leaf, path->slots[0],
2123 struct btrfs_file_extent_item);
2124 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2125 offset;
2126 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2127 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2128 btrfs_set_file_extent_offset(leaf, fi, 0);
2129 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2130 btrfs_mark_buffer_dirty(trans, leaf);
2131 goto out;
2132 }
2133 btrfs_release_path(path);
2134
2135 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2136 end - offset);
2137 if (ret)
2138 return ret;
2139
2140out:
2141 btrfs_release_path(path);
2142
2143 hole_em = alloc_extent_map();
2144 if (!hole_em) {
2145 btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2146 btrfs_set_inode_full_sync(inode);
2147 } else {
2148 hole_em->start = offset;
2149 hole_em->len = end - offset;
2150 hole_em->ram_bytes = hole_em->len;
2151 hole_em->orig_start = offset;
2152
2153 hole_em->block_start = EXTENT_MAP_HOLE;
2154 hole_em->block_len = 0;
2155 hole_em->orig_block_len = 0;
2156 hole_em->generation = trans->transid;
2157
2158 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2159 free_extent_map(hole_em);
2160 if (ret)
2161 btrfs_set_inode_full_sync(inode);
2162 }
2163
2164 return 0;
2165}
2166
2167/*
2168 * Find a hole extent on given inode and change start/len to the end of hole
2169 * extent.(hole/vacuum extent whose em->start <= start &&
2170 * em->start + em->len > start)
2171 * When a hole extent is found, return 1 and modify start/len.
2172 */
2173static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2174{
2175 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2176 struct extent_map *em;
2177 int ret = 0;
2178
2179 em = btrfs_get_extent(inode, NULL, 0,
2180 round_down(*start, fs_info->sectorsize),
2181 round_up(*len, fs_info->sectorsize));
2182 if (IS_ERR(em))
2183 return PTR_ERR(em);
2184
2185 /* Hole or vacuum extent(only exists in no-hole mode) */
2186 if (em->block_start == EXTENT_MAP_HOLE) {
2187 ret = 1;
2188 *len = em->start + em->len > *start + *len ?
2189 0 : *start + *len - em->start - em->len;
2190 *start = em->start + em->len;
2191 }
2192 free_extent_map(em);
2193 return ret;
2194}
2195
2196static void btrfs_punch_hole_lock_range(struct inode *inode,
2197 const u64 lockstart,
2198 const u64 lockend,
2199 struct extent_state **cached_state)
2200{
2201 /*
2202 * For subpage case, if the range is not at page boundary, we could
2203 * have pages at the leading/tailing part of the range.
2204 * This could lead to dead loop since filemap_range_has_page()
2205 * will always return true.
2206 * So here we need to do extra page alignment for
2207 * filemap_range_has_page().
2208 */
2209 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2210 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2211
2212 while (1) {
2213 truncate_pagecache_range(inode, lockstart, lockend);
2214
2215 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2216 cached_state);
2217 /*
2218 * We can't have ordered extents in the range, nor dirty/writeback
2219 * pages, because we have locked the inode's VFS lock in exclusive
2220 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2221 * we have flushed all delalloc in the range and we have waited
2222 * for any ordered extents in the range to complete.
2223 * We can race with anyone reading pages from this range, so after
2224 * locking the range check if we have pages in the range, and if
2225 * we do, unlock the range and retry.
2226 */
2227 if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2228 page_lockend))
2229 break;
2230
2231 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2232 cached_state);
2233 }
2234
2235 btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2236}
2237
2238static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2239 struct btrfs_inode *inode,
2240 struct btrfs_path *path,
2241 struct btrfs_replace_extent_info *extent_info,
2242 const u64 replace_len,
2243 const u64 bytes_to_drop)
2244{
2245 struct btrfs_fs_info *fs_info = trans->fs_info;
2246 struct btrfs_root *root = inode->root;
2247 struct btrfs_file_extent_item *extent;
2248 struct extent_buffer *leaf;
2249 struct btrfs_key key;
2250 int slot;
2251 struct btrfs_ref ref = { 0 };
2252 int ret;
2253
2254 if (replace_len == 0)
2255 return 0;
2256
2257 if (extent_info->disk_offset == 0 &&
2258 btrfs_fs_incompat(fs_info, NO_HOLES)) {
2259 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2260 return 0;
2261 }
2262
2263 key.objectid = btrfs_ino(inode);
2264 key.type = BTRFS_EXTENT_DATA_KEY;
2265 key.offset = extent_info->file_offset;
2266 ret = btrfs_insert_empty_item(trans, root, path, &key,
2267 sizeof(struct btrfs_file_extent_item));
2268 if (ret)
2269 return ret;
2270 leaf = path->nodes[0];
2271 slot = path->slots[0];
2272 write_extent_buffer(leaf, extent_info->extent_buf,
2273 btrfs_item_ptr_offset(leaf, slot),
2274 sizeof(struct btrfs_file_extent_item));
2275 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2276 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2277 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2278 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2279 if (extent_info->is_new_extent)
2280 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2281 btrfs_mark_buffer_dirty(trans, leaf);
2282 btrfs_release_path(path);
2283
2284 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2285 replace_len);
2286 if (ret)
2287 return ret;
2288
2289 /* If it's a hole, nothing more needs to be done. */
2290 if (extent_info->disk_offset == 0) {
2291 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2292 return 0;
2293 }
2294
2295 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2296
2297 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2298 key.objectid = extent_info->disk_offset;
2299 key.type = BTRFS_EXTENT_ITEM_KEY;
2300 key.offset = extent_info->disk_len;
2301 ret = btrfs_alloc_reserved_file_extent(trans, root,
2302 btrfs_ino(inode),
2303 extent_info->file_offset,
2304 extent_info->qgroup_reserved,
2305 &key);
2306 } else {
2307 u64 ref_offset;
2308
2309 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2310 extent_info->disk_offset,
2311 extent_info->disk_len, 0,
2312 root->root_key.objectid);
2313 ref_offset = extent_info->file_offset - extent_info->data_offset;
2314 btrfs_init_data_ref(&ref, root->root_key.objectid,
2315 btrfs_ino(inode), ref_offset, 0, false);
2316 ret = btrfs_inc_extent_ref(trans, &ref);
2317 }
2318
2319 extent_info->insertions++;
2320
2321 return ret;
2322}
2323
2324/*
2325 * The respective range must have been previously locked, as well as the inode.
2326 * The end offset is inclusive (last byte of the range).
2327 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2328 * the file range with an extent.
2329 * When not punching a hole, we don't want to end up in a state where we dropped
2330 * extents without inserting a new one, so we must abort the transaction to avoid
2331 * a corruption.
2332 */
2333int btrfs_replace_file_extents(struct btrfs_inode *inode,
2334 struct btrfs_path *path, const u64 start,
2335 const u64 end,
2336 struct btrfs_replace_extent_info *extent_info,
2337 struct btrfs_trans_handle **trans_out)
2338{
2339 struct btrfs_drop_extents_args drop_args = { 0 };
2340 struct btrfs_root *root = inode->root;
2341 struct btrfs_fs_info *fs_info = root->fs_info;
2342 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2343 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2344 struct btrfs_trans_handle *trans = NULL;
2345 struct btrfs_block_rsv *rsv;
2346 unsigned int rsv_count;
2347 u64 cur_offset;
2348 u64 len = end - start;
2349 int ret = 0;
2350
2351 if (end <= start)
2352 return -EINVAL;
2353
2354 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2355 if (!rsv) {
2356 ret = -ENOMEM;
2357 goto out;
2358 }
2359 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2360 rsv->failfast = true;
2361
2362 /*
2363 * 1 - update the inode
2364 * 1 - removing the extents in the range
2365 * 1 - adding the hole extent if no_holes isn't set or if we are
2366 * replacing the range with a new extent
2367 */
2368 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2369 rsv_count = 3;
2370 else
2371 rsv_count = 2;
2372
2373 trans = btrfs_start_transaction(root, rsv_count);
2374 if (IS_ERR(trans)) {
2375 ret = PTR_ERR(trans);
2376 trans = NULL;
2377 goto out_free;
2378 }
2379
2380 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2381 min_size, false);
2382 if (WARN_ON(ret))
2383 goto out_trans;
2384 trans->block_rsv = rsv;
2385
2386 cur_offset = start;
2387 drop_args.path = path;
2388 drop_args.end = end + 1;
2389 drop_args.drop_cache = true;
2390 while (cur_offset < end) {
2391 drop_args.start = cur_offset;
2392 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2393 /* If we are punching a hole decrement the inode's byte count */
2394 if (!extent_info)
2395 btrfs_update_inode_bytes(inode, 0,
2396 drop_args.bytes_found);
2397 if (ret != -ENOSPC) {
2398 /*
2399 * The only time we don't want to abort is if we are
2400 * attempting to clone a partial inline extent, in which
2401 * case we'll get EOPNOTSUPP. However if we aren't
2402 * clone we need to abort no matter what, because if we
2403 * got EOPNOTSUPP via prealloc then we messed up and
2404 * need to abort.
2405 */
2406 if (ret &&
2407 (ret != -EOPNOTSUPP ||
2408 (extent_info && extent_info->is_new_extent)))
2409 btrfs_abort_transaction(trans, ret);
2410 break;
2411 }
2412
2413 trans->block_rsv = &fs_info->trans_block_rsv;
2414
2415 if (!extent_info && cur_offset < drop_args.drop_end &&
2416 cur_offset < ino_size) {
2417 ret = fill_holes(trans, inode, path, cur_offset,
2418 drop_args.drop_end);
2419 if (ret) {
2420 /*
2421 * If we failed then we didn't insert our hole
2422 * entries for the area we dropped, so now the
2423 * fs is corrupted, so we must abort the
2424 * transaction.
2425 */
2426 btrfs_abort_transaction(trans, ret);
2427 break;
2428 }
2429 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2430 /*
2431 * We are past the i_size here, but since we didn't
2432 * insert holes we need to clear the mapped area so we
2433 * know to not set disk_i_size in this area until a new
2434 * file extent is inserted here.
2435 */
2436 ret = btrfs_inode_clear_file_extent_range(inode,
2437 cur_offset,
2438 drop_args.drop_end - cur_offset);
2439 if (ret) {
2440 /*
2441 * We couldn't clear our area, so we could
2442 * presumably adjust up and corrupt the fs, so
2443 * we need to abort.
2444 */
2445 btrfs_abort_transaction(trans, ret);
2446 break;
2447 }
2448 }
2449
2450 if (extent_info &&
2451 drop_args.drop_end > extent_info->file_offset) {
2452 u64 replace_len = drop_args.drop_end -
2453 extent_info->file_offset;
2454
2455 ret = btrfs_insert_replace_extent(trans, inode, path,
2456 extent_info, replace_len,
2457 drop_args.bytes_found);
2458 if (ret) {
2459 btrfs_abort_transaction(trans, ret);
2460 break;
2461 }
2462 extent_info->data_len -= replace_len;
2463 extent_info->data_offset += replace_len;
2464 extent_info->file_offset += replace_len;
2465 }
2466
2467 /*
2468 * We are releasing our handle on the transaction, balance the
2469 * dirty pages of the btree inode and flush delayed items, and
2470 * then get a new transaction handle, which may now point to a
2471 * new transaction in case someone else may have committed the
2472 * transaction we used to replace/drop file extent items. So
2473 * bump the inode's iversion and update mtime and ctime except
2474 * if we are called from a dedupe context. This is because a
2475 * power failure/crash may happen after the transaction is
2476 * committed and before we finish replacing/dropping all the
2477 * file extent items we need.
2478 */
2479 inode_inc_iversion(&inode->vfs_inode);
2480
2481 if (!extent_info || extent_info->update_times)
2482 inode_set_mtime_to_ts(&inode->vfs_inode,
2483 inode_set_ctime_current(&inode->vfs_inode));
2484
2485 ret = btrfs_update_inode(trans, inode);
2486 if (ret)
2487 break;
2488
2489 btrfs_end_transaction(trans);
2490 btrfs_btree_balance_dirty(fs_info);
2491
2492 trans = btrfs_start_transaction(root, rsv_count);
2493 if (IS_ERR(trans)) {
2494 ret = PTR_ERR(trans);
2495 trans = NULL;
2496 break;
2497 }
2498
2499 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2500 rsv, min_size, false);
2501 if (WARN_ON(ret))
2502 break;
2503 trans->block_rsv = rsv;
2504
2505 cur_offset = drop_args.drop_end;
2506 len = end - cur_offset;
2507 if (!extent_info && len) {
2508 ret = find_first_non_hole(inode, &cur_offset, &len);
2509 if (unlikely(ret < 0))
2510 break;
2511 if (ret && !len) {
2512 ret = 0;
2513 break;
2514 }
2515 }
2516 }
2517
2518 /*
2519 * If we were cloning, force the next fsync to be a full one since we
2520 * we replaced (or just dropped in the case of cloning holes when
2521 * NO_HOLES is enabled) file extent items and did not setup new extent
2522 * maps for the replacement extents (or holes).
2523 */
2524 if (extent_info && !extent_info->is_new_extent)
2525 btrfs_set_inode_full_sync(inode);
2526
2527 if (ret)
2528 goto out_trans;
2529
2530 trans->block_rsv = &fs_info->trans_block_rsv;
2531 /*
2532 * If we are using the NO_HOLES feature we might have had already an
2533 * hole that overlaps a part of the region [lockstart, lockend] and
2534 * ends at (or beyond) lockend. Since we have no file extent items to
2535 * represent holes, drop_end can be less than lockend and so we must
2536 * make sure we have an extent map representing the existing hole (the
2537 * call to __btrfs_drop_extents() might have dropped the existing extent
2538 * map representing the existing hole), otherwise the fast fsync path
2539 * will not record the existence of the hole region
2540 * [existing_hole_start, lockend].
2541 */
2542 if (drop_args.drop_end <= end)
2543 drop_args.drop_end = end + 1;
2544 /*
2545 * Don't insert file hole extent item if it's for a range beyond eof
2546 * (because it's useless) or if it represents a 0 bytes range (when
2547 * cur_offset == drop_end).
2548 */
2549 if (!extent_info && cur_offset < ino_size &&
2550 cur_offset < drop_args.drop_end) {
2551 ret = fill_holes(trans, inode, path, cur_offset,
2552 drop_args.drop_end);
2553 if (ret) {
2554 /* Same comment as above. */
2555 btrfs_abort_transaction(trans, ret);
2556 goto out_trans;
2557 }
2558 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2559 /* See the comment in the loop above for the reasoning here. */
2560 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2561 drop_args.drop_end - cur_offset);
2562 if (ret) {
2563 btrfs_abort_transaction(trans, ret);
2564 goto out_trans;
2565 }
2566
2567 }
2568 if (extent_info) {
2569 ret = btrfs_insert_replace_extent(trans, inode, path,
2570 extent_info, extent_info->data_len,
2571 drop_args.bytes_found);
2572 if (ret) {
2573 btrfs_abort_transaction(trans, ret);
2574 goto out_trans;
2575 }
2576 }
2577
2578out_trans:
2579 if (!trans)
2580 goto out_free;
2581
2582 trans->block_rsv = &fs_info->trans_block_rsv;
2583 if (ret)
2584 btrfs_end_transaction(trans);
2585 else
2586 *trans_out = trans;
2587out_free:
2588 btrfs_free_block_rsv(fs_info, rsv);
2589out:
2590 return ret;
2591}
2592
2593static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2594{
2595 struct inode *inode = file_inode(file);
2596 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2597 struct btrfs_root *root = BTRFS_I(inode)->root;
2598 struct extent_state *cached_state = NULL;
2599 struct btrfs_path *path;
2600 struct btrfs_trans_handle *trans = NULL;
2601 u64 lockstart;
2602 u64 lockend;
2603 u64 tail_start;
2604 u64 tail_len;
2605 u64 orig_start = offset;
2606 int ret = 0;
2607 bool same_block;
2608 u64 ino_size;
2609 bool truncated_block = false;
2610 bool updated_inode = false;
2611
2612 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2613
2614 ret = btrfs_wait_ordered_range(inode, offset, len);
2615 if (ret)
2616 goto out_only_mutex;
2617
2618 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2619 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2620 if (ret < 0)
2621 goto out_only_mutex;
2622 if (ret && !len) {
2623 /* Already in a large hole */
2624 ret = 0;
2625 goto out_only_mutex;
2626 }
2627
2628 ret = file_modified(file);
2629 if (ret)
2630 goto out_only_mutex;
2631
2632 lockstart = round_up(offset, fs_info->sectorsize);
2633 lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2634 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2635 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2636 /*
2637 * We needn't truncate any block which is beyond the end of the file
2638 * because we are sure there is no data there.
2639 */
2640 /*
2641 * Only do this if we are in the same block and we aren't doing the
2642 * entire block.
2643 */
2644 if (same_block && len < fs_info->sectorsize) {
2645 if (offset < ino_size) {
2646 truncated_block = true;
2647 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2648 0);
2649 } else {
2650 ret = 0;
2651 }
2652 goto out_only_mutex;
2653 }
2654
2655 /* zero back part of the first block */
2656 if (offset < ino_size) {
2657 truncated_block = true;
2658 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2659 if (ret) {
2660 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2661 return ret;
2662 }
2663 }
2664
2665 /* Check the aligned pages after the first unaligned page,
2666 * if offset != orig_start, which means the first unaligned page
2667 * including several following pages are already in holes,
2668 * the extra check can be skipped */
2669 if (offset == orig_start) {
2670 /* after truncate page, check hole again */
2671 len = offset + len - lockstart;
2672 offset = lockstart;
2673 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2674 if (ret < 0)
2675 goto out_only_mutex;
2676 if (ret && !len) {
2677 ret = 0;
2678 goto out_only_mutex;
2679 }
2680 lockstart = offset;
2681 }
2682
2683 /* Check the tail unaligned part is in a hole */
2684 tail_start = lockend + 1;
2685 tail_len = offset + len - tail_start;
2686 if (tail_len) {
2687 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2688 if (unlikely(ret < 0))
2689 goto out_only_mutex;
2690 if (!ret) {
2691 /* zero the front end of the last page */
2692 if (tail_start + tail_len < ino_size) {
2693 truncated_block = true;
2694 ret = btrfs_truncate_block(BTRFS_I(inode),
2695 tail_start + tail_len,
2696 0, 1);
2697 if (ret)
2698 goto out_only_mutex;
2699 }
2700 }
2701 }
2702
2703 if (lockend < lockstart) {
2704 ret = 0;
2705 goto out_only_mutex;
2706 }
2707
2708 btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2709
2710 path = btrfs_alloc_path();
2711 if (!path) {
2712 ret = -ENOMEM;
2713 goto out;
2714 }
2715
2716 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2717 lockend, NULL, &trans);
2718 btrfs_free_path(path);
2719 if (ret)
2720 goto out;
2721
2722 ASSERT(trans != NULL);
2723 inode_inc_iversion(inode);
2724 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2725 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2726 updated_inode = true;
2727 btrfs_end_transaction(trans);
2728 btrfs_btree_balance_dirty(fs_info);
2729out:
2730 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2731 &cached_state);
2732out_only_mutex:
2733 if (!updated_inode && truncated_block && !ret) {
2734 /*
2735 * If we only end up zeroing part of a page, we still need to
2736 * update the inode item, so that all the time fields are
2737 * updated as well as the necessary btrfs inode in memory fields
2738 * for detecting, at fsync time, if the inode isn't yet in the
2739 * log tree or it's there but not up to date.
2740 */
2741 struct timespec64 now = inode_set_ctime_current(inode);
2742
2743 inode_inc_iversion(inode);
2744 inode_set_mtime_to_ts(inode, now);
2745 trans = btrfs_start_transaction(root, 1);
2746 if (IS_ERR(trans)) {
2747 ret = PTR_ERR(trans);
2748 } else {
2749 int ret2;
2750
2751 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2752 ret2 = btrfs_end_transaction(trans);
2753 if (!ret)
2754 ret = ret2;
2755 }
2756 }
2757 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2758 return ret;
2759}
2760
2761/* Helper structure to record which range is already reserved */
2762struct falloc_range {
2763 struct list_head list;
2764 u64 start;
2765 u64 len;
2766};
2767
2768/*
2769 * Helper function to add falloc range
2770 *
2771 * Caller should have locked the larger range of extent containing
2772 * [start, len)
2773 */
2774static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2775{
2776 struct falloc_range *range = NULL;
2777
2778 if (!list_empty(head)) {
2779 /*
2780 * As fallocate iterates by bytenr order, we only need to check
2781 * the last range.
2782 */
2783 range = list_last_entry(head, struct falloc_range, list);
2784 if (range->start + range->len == start) {
2785 range->len += len;
2786 return 0;
2787 }
2788 }
2789
2790 range = kmalloc(sizeof(*range), GFP_KERNEL);
2791 if (!range)
2792 return -ENOMEM;
2793 range->start = start;
2794 range->len = len;
2795 list_add_tail(&range->list, head);
2796 return 0;
2797}
2798
2799static int btrfs_fallocate_update_isize(struct inode *inode,
2800 const u64 end,
2801 const int mode)
2802{
2803 struct btrfs_trans_handle *trans;
2804 struct btrfs_root *root = BTRFS_I(inode)->root;
2805 int ret;
2806 int ret2;
2807
2808 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2809 return 0;
2810
2811 trans = btrfs_start_transaction(root, 1);
2812 if (IS_ERR(trans))
2813 return PTR_ERR(trans);
2814
2815 inode_set_ctime_current(inode);
2816 i_size_write(inode, end);
2817 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2818 ret = btrfs_update_inode(trans, BTRFS_I(inode));
2819 ret2 = btrfs_end_transaction(trans);
2820
2821 return ret ? ret : ret2;
2822}
2823
2824enum {
2825 RANGE_BOUNDARY_WRITTEN_EXTENT,
2826 RANGE_BOUNDARY_PREALLOC_EXTENT,
2827 RANGE_BOUNDARY_HOLE,
2828};
2829
2830static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2831 u64 offset)
2832{
2833 const u64 sectorsize = inode->root->fs_info->sectorsize;
2834 struct extent_map *em;
2835 int ret;
2836
2837 offset = round_down(offset, sectorsize);
2838 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
2839 if (IS_ERR(em))
2840 return PTR_ERR(em);
2841
2842 if (em->block_start == EXTENT_MAP_HOLE)
2843 ret = RANGE_BOUNDARY_HOLE;
2844 else if (em->flags & EXTENT_FLAG_PREALLOC)
2845 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2846 else
2847 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2848
2849 free_extent_map(em);
2850 return ret;
2851}
2852
2853static int btrfs_zero_range(struct inode *inode,
2854 loff_t offset,
2855 loff_t len,
2856 const int mode)
2857{
2858 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2859 struct extent_map *em;
2860 struct extent_changeset *data_reserved = NULL;
2861 int ret;
2862 u64 alloc_hint = 0;
2863 const u64 sectorsize = fs_info->sectorsize;
2864 u64 alloc_start = round_down(offset, sectorsize);
2865 u64 alloc_end = round_up(offset + len, sectorsize);
2866 u64 bytes_to_reserve = 0;
2867 bool space_reserved = false;
2868
2869 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2870 alloc_end - alloc_start);
2871 if (IS_ERR(em)) {
2872 ret = PTR_ERR(em);
2873 goto out;
2874 }
2875
2876 /*
2877 * Avoid hole punching and extent allocation for some cases. More cases
2878 * could be considered, but these are unlikely common and we keep things
2879 * as simple as possible for now. Also, intentionally, if the target
2880 * range contains one or more prealloc extents together with regular
2881 * extents and holes, we drop all the existing extents and allocate a
2882 * new prealloc extent, so that we get a larger contiguous disk extent.
2883 */
2884 if (em->start <= alloc_start && (em->flags & EXTENT_FLAG_PREALLOC)) {
2885 const u64 em_end = em->start + em->len;
2886
2887 if (em_end >= offset + len) {
2888 /*
2889 * The whole range is already a prealloc extent,
2890 * do nothing except updating the inode's i_size if
2891 * needed.
2892 */
2893 free_extent_map(em);
2894 ret = btrfs_fallocate_update_isize(inode, offset + len,
2895 mode);
2896 goto out;
2897 }
2898 /*
2899 * Part of the range is already a prealloc extent, so operate
2900 * only on the remaining part of the range.
2901 */
2902 alloc_start = em_end;
2903 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2904 len = offset + len - alloc_start;
2905 offset = alloc_start;
2906 alloc_hint = em->block_start + em->len;
2907 }
2908 free_extent_map(em);
2909
2910 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2911 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2912 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2913 sectorsize);
2914 if (IS_ERR(em)) {
2915 ret = PTR_ERR(em);
2916 goto out;
2917 }
2918
2919 if (em->flags & EXTENT_FLAG_PREALLOC) {
2920 free_extent_map(em);
2921 ret = btrfs_fallocate_update_isize(inode, offset + len,
2922 mode);
2923 goto out;
2924 }
2925 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2926 free_extent_map(em);
2927 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2928 0);
2929 if (!ret)
2930 ret = btrfs_fallocate_update_isize(inode,
2931 offset + len,
2932 mode);
2933 return ret;
2934 }
2935 free_extent_map(em);
2936 alloc_start = round_down(offset, sectorsize);
2937 alloc_end = alloc_start + sectorsize;
2938 goto reserve_space;
2939 }
2940
2941 alloc_start = round_up(offset, sectorsize);
2942 alloc_end = round_down(offset + len, sectorsize);
2943
2944 /*
2945 * For unaligned ranges, check the pages at the boundaries, they might
2946 * map to an extent, in which case we need to partially zero them, or
2947 * they might map to a hole, in which case we need our allocation range
2948 * to cover them.
2949 */
2950 if (!IS_ALIGNED(offset, sectorsize)) {
2951 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2952 offset);
2953 if (ret < 0)
2954 goto out;
2955 if (ret == RANGE_BOUNDARY_HOLE) {
2956 alloc_start = round_down(offset, sectorsize);
2957 ret = 0;
2958 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2959 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2960 if (ret)
2961 goto out;
2962 } else {
2963 ret = 0;
2964 }
2965 }
2966
2967 if (!IS_ALIGNED(offset + len, sectorsize)) {
2968 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2969 offset + len);
2970 if (ret < 0)
2971 goto out;
2972 if (ret == RANGE_BOUNDARY_HOLE) {
2973 alloc_end = round_up(offset + len, sectorsize);
2974 ret = 0;
2975 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2976 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2977 0, 1);
2978 if (ret)
2979 goto out;
2980 } else {
2981 ret = 0;
2982 }
2983 }
2984
2985reserve_space:
2986 if (alloc_start < alloc_end) {
2987 struct extent_state *cached_state = NULL;
2988 const u64 lockstart = alloc_start;
2989 const u64 lockend = alloc_end - 1;
2990
2991 bytes_to_reserve = alloc_end - alloc_start;
2992 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2993 bytes_to_reserve);
2994 if (ret < 0)
2995 goto out;
2996 space_reserved = true;
2997 btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2998 &cached_state);
2999 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3000 alloc_start, bytes_to_reserve);
3001 if (ret) {
3002 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3003 lockend, &cached_state);
3004 goto out;
3005 }
3006 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3007 alloc_end - alloc_start,
3008 i_blocksize(inode),
3009 offset + len, &alloc_hint);
3010 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3011 &cached_state);
3012 /* btrfs_prealloc_file_range releases reserved space on error */
3013 if (ret) {
3014 space_reserved = false;
3015 goto out;
3016 }
3017 }
3018 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3019 out:
3020 if (ret && space_reserved)
3021 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3022 alloc_start, bytes_to_reserve);
3023 extent_changeset_free(data_reserved);
3024
3025 return ret;
3026}
3027
3028static long btrfs_fallocate(struct file *file, int mode,
3029 loff_t offset, loff_t len)
3030{
3031 struct inode *inode = file_inode(file);
3032 struct extent_state *cached_state = NULL;
3033 struct extent_changeset *data_reserved = NULL;
3034 struct falloc_range *range;
3035 struct falloc_range *tmp;
3036 LIST_HEAD(reserve_list);
3037 u64 cur_offset;
3038 u64 last_byte;
3039 u64 alloc_start;
3040 u64 alloc_end;
3041 u64 alloc_hint = 0;
3042 u64 locked_end;
3043 u64 actual_end = 0;
3044 u64 data_space_needed = 0;
3045 u64 data_space_reserved = 0;
3046 u64 qgroup_reserved = 0;
3047 struct extent_map *em;
3048 int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3049 int ret;
3050
3051 /* Do not allow fallocate in ZONED mode */
3052 if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3053 return -EOPNOTSUPP;
3054
3055 alloc_start = round_down(offset, blocksize);
3056 alloc_end = round_up(offset + len, blocksize);
3057 cur_offset = alloc_start;
3058
3059 /* Make sure we aren't being give some crap mode */
3060 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3061 FALLOC_FL_ZERO_RANGE))
3062 return -EOPNOTSUPP;
3063
3064 if (mode & FALLOC_FL_PUNCH_HOLE)
3065 return btrfs_punch_hole(file, offset, len);
3066
3067 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3068
3069 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3070 ret = inode_newsize_ok(inode, offset + len);
3071 if (ret)
3072 goto out;
3073 }
3074
3075 ret = file_modified(file);
3076 if (ret)
3077 goto out;
3078
3079 /*
3080 * TODO: Move these two operations after we have checked
3081 * accurate reserved space, or fallocate can still fail but
3082 * with page truncated or size expanded.
3083 *
3084 * But that's a minor problem and won't do much harm BTW.
3085 */
3086 if (alloc_start > inode->i_size) {
3087 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3088 alloc_start);
3089 if (ret)
3090 goto out;
3091 } else if (offset + len > inode->i_size) {
3092 /*
3093 * If we are fallocating from the end of the file onward we
3094 * need to zero out the end of the block if i_size lands in the
3095 * middle of a block.
3096 */
3097 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3098 if (ret)
3099 goto out;
3100 }
3101
3102 /*
3103 * We have locked the inode at the VFS level (in exclusive mode) and we
3104 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3105 * locking the file range, flush all dealloc in the range and wait for
3106 * all ordered extents in the range to complete. After this we can lock
3107 * the file range and, due to the previous locking we did, we know there
3108 * can't be more delalloc or ordered extents in the range.
3109 */
3110 ret = btrfs_wait_ordered_range(inode, alloc_start,
3111 alloc_end - alloc_start);
3112 if (ret)
3113 goto out;
3114
3115 if (mode & FALLOC_FL_ZERO_RANGE) {
3116 ret = btrfs_zero_range(inode, offset, len, mode);
3117 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3118 return ret;
3119 }
3120
3121 locked_end = alloc_end - 1;
3122 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3123 &cached_state);
3124
3125 btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3126
3127 /* First, check if we exceed the qgroup limit */
3128 while (cur_offset < alloc_end) {
3129 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3130 alloc_end - cur_offset);
3131 if (IS_ERR(em)) {
3132 ret = PTR_ERR(em);
3133 break;
3134 }
3135 last_byte = min(extent_map_end(em), alloc_end);
3136 actual_end = min_t(u64, extent_map_end(em), offset + len);
3137 last_byte = ALIGN(last_byte, blocksize);
3138 if (em->block_start == EXTENT_MAP_HOLE ||
3139 (cur_offset >= inode->i_size &&
3140 !(em->flags & EXTENT_FLAG_PREALLOC))) {
3141 const u64 range_len = last_byte - cur_offset;
3142
3143 ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3144 if (ret < 0) {
3145 free_extent_map(em);
3146 break;
3147 }
3148 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3149 &data_reserved, cur_offset, range_len);
3150 if (ret < 0) {
3151 free_extent_map(em);
3152 break;
3153 }
3154 qgroup_reserved += range_len;
3155 data_space_needed += range_len;
3156 }
3157 free_extent_map(em);
3158 cur_offset = last_byte;
3159 }
3160
3161 if (!ret && data_space_needed > 0) {
3162 /*
3163 * We are safe to reserve space here as we can't have delalloc
3164 * in the range, see above.
3165 */
3166 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3167 data_space_needed);
3168 if (!ret)
3169 data_space_reserved = data_space_needed;
3170 }
3171
3172 /*
3173 * If ret is still 0, means we're OK to fallocate.
3174 * Or just cleanup the list and exit.
3175 */
3176 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3177 if (!ret) {
3178 ret = btrfs_prealloc_file_range(inode, mode,
3179 range->start,
3180 range->len, i_blocksize(inode),
3181 offset + len, &alloc_hint);
3182 /*
3183 * btrfs_prealloc_file_range() releases space even
3184 * if it returns an error.
3185 */
3186 data_space_reserved -= range->len;
3187 qgroup_reserved -= range->len;
3188 } else if (data_space_reserved > 0) {
3189 btrfs_free_reserved_data_space(BTRFS_I(inode),
3190 data_reserved, range->start,
3191 range->len);
3192 data_space_reserved -= range->len;
3193 qgroup_reserved -= range->len;
3194 } else if (qgroup_reserved > 0) {
3195 btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3196 range->start, range->len, NULL);
3197 qgroup_reserved -= range->len;
3198 }
3199 list_del(&range->list);
3200 kfree(range);
3201 }
3202 if (ret < 0)
3203 goto out_unlock;
3204
3205 /*
3206 * We didn't need to allocate any more space, but we still extended the
3207 * size of the file so we need to update i_size and the inode item.
3208 */
3209 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3210out_unlock:
3211 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3212 &cached_state);
3213out:
3214 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3215 extent_changeset_free(data_reserved);
3216 return ret;
3217}
3218
3219/*
3220 * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3221 * that has unflushed and/or flushing delalloc. There might be other adjacent
3222 * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3223 * looping while it gets adjacent subranges, and merging them together.
3224 */
3225static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3226 struct extent_state **cached_state,
3227 bool *search_io_tree,
3228 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3229{
3230 u64 len = end + 1 - start;
3231 u64 delalloc_len = 0;
3232 struct btrfs_ordered_extent *oe;
3233 u64 oe_start;
3234 u64 oe_end;
3235
3236 /*
3237 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3238 * means we have delalloc (dirty pages) for which writeback has not
3239 * started yet.
3240 */
3241 if (*search_io_tree) {
3242 spin_lock(&inode->lock);
3243 if (inode->delalloc_bytes > 0) {
3244 spin_unlock(&inode->lock);
3245 *delalloc_start_ret = start;
3246 delalloc_len = count_range_bits(&inode->io_tree,
3247 delalloc_start_ret, end,
3248 len, EXTENT_DELALLOC, 1,
3249 cached_state);
3250 } else {
3251 spin_unlock(&inode->lock);
3252 }
3253 }
3254
3255 if (delalloc_len > 0) {
3256 /*
3257 * If delalloc was found then *delalloc_start_ret has a sector size
3258 * aligned value (rounded down).
3259 */
3260 *delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3261
3262 if (*delalloc_start_ret == start) {
3263 /* Delalloc for the whole range, nothing more to do. */
3264 if (*delalloc_end_ret == end)
3265 return true;
3266 /* Else trim our search range for ordered extents. */
3267 start = *delalloc_end_ret + 1;
3268 len = end + 1 - start;
3269 }
3270 } else {
3271 /* No delalloc, future calls don't need to search again. */
3272 *search_io_tree = false;
3273 }
3274
3275 /*
3276 * Now also check if there's any ordered extent in the range.
3277 * We do this because:
3278 *
3279 * 1) When delalloc is flushed, the file range is locked, we clear the
3280 * EXTENT_DELALLOC bit from the io tree and create an extent map and
3281 * an ordered extent for the write. So we might just have been called
3282 * after delalloc is flushed and before the ordered extent completes
3283 * and inserts the new file extent item in the subvolume's btree;
3284 *
3285 * 2) We may have an ordered extent created by flushing delalloc for a
3286 * subrange that starts before the subrange we found marked with
3287 * EXTENT_DELALLOC in the io tree.
3288 *
3289 * We could also use the extent map tree to find such delalloc that is
3290 * being flushed, but using the ordered extents tree is more efficient
3291 * because it's usually much smaller as ordered extents are removed from
3292 * the tree once they complete. With the extent maps, we mau have them
3293 * in the extent map tree for a very long time, and they were either
3294 * created by previous writes or loaded by read operations.
3295 */
3296 oe = btrfs_lookup_first_ordered_range(inode, start, len);
3297 if (!oe)
3298 return (delalloc_len > 0);
3299
3300 /* The ordered extent may span beyond our search range. */
3301 oe_start = max(oe->file_offset, start);
3302 oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3303
3304 btrfs_put_ordered_extent(oe);
3305
3306 /* Don't have unflushed delalloc, return the ordered extent range. */
3307 if (delalloc_len == 0) {
3308 *delalloc_start_ret = oe_start;
3309 *delalloc_end_ret = oe_end;
3310 return true;
3311 }
3312
3313 /*
3314 * We have both unflushed delalloc (io_tree) and an ordered extent.
3315 * If the ranges are adjacent returned a combined range, otherwise
3316 * return the leftmost range.
3317 */
3318 if (oe_start < *delalloc_start_ret) {
3319 if (oe_end < *delalloc_start_ret)
3320 *delalloc_end_ret = oe_end;
3321 *delalloc_start_ret = oe_start;
3322 } else if (*delalloc_end_ret + 1 == oe_start) {
3323 *delalloc_end_ret = oe_end;
3324 }
3325
3326 return true;
3327}
3328
3329/*
3330 * Check if there's delalloc in a given range.
3331 *
3332 * @inode: The inode.
3333 * @start: The start offset of the range. It does not need to be
3334 * sector size aligned.
3335 * @end: The end offset (inclusive value) of the search range.
3336 * It does not need to be sector size aligned.
3337 * @cached_state: Extent state record used for speeding up delalloc
3338 * searches in the inode's io_tree. Can be NULL.
3339 * @delalloc_start_ret: Output argument, set to the start offset of the
3340 * subrange found with delalloc (may not be sector size
3341 * aligned).
3342 * @delalloc_end_ret: Output argument, set to he end offset (inclusive value)
3343 * of the subrange found with delalloc.
3344 *
3345 * Returns true if a subrange with delalloc is found within the given range, and
3346 * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3347 * end offsets of the subrange.
3348 */
3349bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3350 struct extent_state **cached_state,
3351 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3352{
3353 u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3354 u64 prev_delalloc_end = 0;
3355 bool search_io_tree = true;
3356 bool ret = false;
3357
3358 while (cur_offset <= end) {
3359 u64 delalloc_start;
3360 u64 delalloc_end;
3361 bool delalloc;
3362
3363 delalloc = find_delalloc_subrange(inode, cur_offset, end,
3364 cached_state, &search_io_tree,
3365 &delalloc_start,
3366 &delalloc_end);
3367 if (!delalloc)
3368 break;
3369
3370 if (prev_delalloc_end == 0) {
3371 /* First subrange found. */
3372 *delalloc_start_ret = max(delalloc_start, start);
3373 *delalloc_end_ret = delalloc_end;
3374 ret = true;
3375 } else if (delalloc_start == prev_delalloc_end + 1) {
3376 /* Subrange adjacent to the previous one, merge them. */
3377 *delalloc_end_ret = delalloc_end;
3378 } else {
3379 /* Subrange not adjacent to the previous one, exit. */
3380 break;
3381 }
3382
3383 prev_delalloc_end = delalloc_end;
3384 cur_offset = delalloc_end + 1;
3385 cond_resched();
3386 }
3387
3388 return ret;
3389}
3390
3391/*
3392 * Check if there's a hole or delalloc range in a range representing a hole (or
3393 * prealloc extent) found in the inode's subvolume btree.
3394 *
3395 * @inode: The inode.
3396 * @whence: Seek mode (SEEK_DATA or SEEK_HOLE).
3397 * @start: Start offset of the hole region. It does not need to be sector
3398 * size aligned.
3399 * @end: End offset (inclusive value) of the hole region. It does not
3400 * need to be sector size aligned.
3401 * @start_ret: Return parameter, used to set the start of the subrange in the
3402 * hole that matches the search criteria (seek mode), if such
3403 * subrange is found (return value of the function is true).
3404 * The value returned here may not be sector size aligned.
3405 *
3406 * Returns true if a subrange matching the given seek mode is found, and if one
3407 * is found, it updates @start_ret with the start of the subrange.
3408 */
3409static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3410 struct extent_state **cached_state,
3411 u64 start, u64 end, u64 *start_ret)
3412{
3413 u64 delalloc_start;
3414 u64 delalloc_end;
3415 bool delalloc;
3416
3417 delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3418 &delalloc_start, &delalloc_end);
3419 if (delalloc && whence == SEEK_DATA) {
3420 *start_ret = delalloc_start;
3421 return true;
3422 }
3423
3424 if (delalloc && whence == SEEK_HOLE) {
3425 /*
3426 * We found delalloc but it starts after out start offset. So we
3427 * have a hole between our start offset and the delalloc start.
3428 */
3429 if (start < delalloc_start) {
3430 *start_ret = start;
3431 return true;
3432 }
3433 /*
3434 * Delalloc range starts at our start offset.
3435 * If the delalloc range's length is smaller than our range,
3436 * then it means we have a hole that starts where the delalloc
3437 * subrange ends.
3438 */
3439 if (delalloc_end < end) {
3440 *start_ret = delalloc_end + 1;
3441 return true;
3442 }
3443
3444 /* There's delalloc for the whole range. */
3445 return false;
3446 }
3447
3448 if (!delalloc && whence == SEEK_HOLE) {
3449 *start_ret = start;
3450 return true;
3451 }
3452
3453 /*
3454 * No delalloc in the range and we are seeking for data. The caller has
3455 * to iterate to the next extent item in the subvolume btree.
3456 */
3457 return false;
3458}
3459
3460static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3461{
3462 struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3463 struct btrfs_file_private *private = file->private_data;
3464 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3465 struct extent_state *cached_state = NULL;
3466 struct extent_state **delalloc_cached_state;
3467 const loff_t i_size = i_size_read(&inode->vfs_inode);
3468 const u64 ino = btrfs_ino(inode);
3469 struct btrfs_root *root = inode->root;
3470 struct btrfs_path *path;
3471 struct btrfs_key key;
3472 u64 last_extent_end;
3473 u64 lockstart;
3474 u64 lockend;
3475 u64 start;
3476 int ret;
3477 bool found = false;
3478
3479 if (i_size == 0 || offset >= i_size)
3480 return -ENXIO;
3481
3482 /*
3483 * Quick path. If the inode has no prealloc extents and its number of
3484 * bytes used matches its i_size, then it can not have holes.
3485 */
3486 if (whence == SEEK_HOLE &&
3487 !(inode->flags & BTRFS_INODE_PREALLOC) &&
3488 inode_get_bytes(&inode->vfs_inode) == i_size)
3489 return i_size;
3490
3491 if (!private) {
3492 private = kzalloc(sizeof(*private), GFP_KERNEL);
3493 /*
3494 * No worries if memory allocation failed.
3495 * The private structure is used only for speeding up multiple
3496 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3497 * so everything will still be correct.
3498 */
3499 file->private_data = private;
3500 }
3501
3502 if (private)
3503 delalloc_cached_state = &private->llseek_cached_state;
3504 else
3505 delalloc_cached_state = NULL;
3506
3507 /*
3508 * offset can be negative, in this case we start finding DATA/HOLE from
3509 * the very start of the file.
3510 */
3511 start = max_t(loff_t, 0, offset);
3512
3513 lockstart = round_down(start, fs_info->sectorsize);
3514 lockend = round_up(i_size, fs_info->sectorsize);
3515 if (lockend <= lockstart)
3516 lockend = lockstart + fs_info->sectorsize;
3517 lockend--;
3518
3519 path = btrfs_alloc_path();
3520 if (!path)
3521 return -ENOMEM;
3522 path->reada = READA_FORWARD;
3523
3524 key.objectid = ino;
3525 key.type = BTRFS_EXTENT_DATA_KEY;
3526 key.offset = start;
3527
3528 last_extent_end = lockstart;
3529
3530 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3531
3532 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3533 if (ret < 0) {
3534 goto out;
3535 } else if (ret > 0 && path->slots[0] > 0) {
3536 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3537 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3538 path->slots[0]--;
3539 }
3540
3541 while (start < i_size) {
3542 struct extent_buffer *leaf = path->nodes[0];
3543 struct btrfs_file_extent_item *extent;
3544 u64 extent_end;
3545 u8 type;
3546
3547 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3548 ret = btrfs_next_leaf(root, path);
3549 if (ret < 0)
3550 goto out;
3551 else if (ret > 0)
3552 break;
3553
3554 leaf = path->nodes[0];
3555 }
3556
3557 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3558 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3559 break;
3560
3561 extent_end = btrfs_file_extent_end(path);
3562
3563 /*
3564 * In the first iteration we may have a slot that points to an
3565 * extent that ends before our start offset, so skip it.
3566 */
3567 if (extent_end <= start) {
3568 path->slots[0]++;
3569 continue;
3570 }
3571
3572 /* We have an implicit hole, NO_HOLES feature is likely set. */
3573 if (last_extent_end < key.offset) {
3574 u64 search_start = last_extent_end;
3575 u64 found_start;
3576
3577 /*
3578 * First iteration, @start matches @offset and it's
3579 * within the hole.
3580 */
3581 if (start == offset)
3582 search_start = offset;
3583
3584 found = find_desired_extent_in_hole(inode, whence,
3585 delalloc_cached_state,
3586 search_start,
3587 key.offset - 1,
3588 &found_start);
3589 if (found) {
3590 start = found_start;
3591 break;
3592 }
3593 /*
3594 * Didn't find data or a hole (due to delalloc) in the
3595 * implicit hole range, so need to analyze the extent.
3596 */
3597 }
3598
3599 extent = btrfs_item_ptr(leaf, path->slots[0],
3600 struct btrfs_file_extent_item);
3601 type = btrfs_file_extent_type(leaf, extent);
3602
3603 /*
3604 * Can't access the extent's disk_bytenr field if this is an
3605 * inline extent, since at that offset, it's where the extent
3606 * data starts.
3607 */
3608 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3609 (type == BTRFS_FILE_EXTENT_REG &&
3610 btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3611 /*
3612 * Explicit hole or prealloc extent, search for delalloc.
3613 * A prealloc extent is treated like a hole.
3614 */
3615 u64 search_start = key.offset;
3616 u64 found_start;
3617
3618 /*
3619 * First iteration, @start matches @offset and it's
3620 * within the hole.
3621 */
3622 if (start == offset)
3623 search_start = offset;
3624
3625 found = find_desired_extent_in_hole(inode, whence,
3626 delalloc_cached_state,
3627 search_start,
3628 extent_end - 1,
3629 &found_start);
3630 if (found) {
3631 start = found_start;
3632 break;
3633 }
3634 /*
3635 * Didn't find data or a hole (due to delalloc) in the
3636 * implicit hole range, so need to analyze the next
3637 * extent item.
3638 */
3639 } else {
3640 /*
3641 * Found a regular or inline extent.
3642 * If we are seeking for data, adjust the start offset
3643 * and stop, we're done.
3644 */
3645 if (whence == SEEK_DATA) {
3646 start = max_t(u64, key.offset, offset);
3647 found = true;
3648 break;
3649 }
3650 /*
3651 * Else, we are seeking for a hole, check the next file
3652 * extent item.
3653 */
3654 }
3655
3656 start = extent_end;
3657 last_extent_end = extent_end;
3658 path->slots[0]++;
3659 if (fatal_signal_pending(current)) {
3660 ret = -EINTR;
3661 goto out;
3662 }
3663 cond_resched();
3664 }
3665
3666 /* We have an implicit hole from the last extent found up to i_size. */
3667 if (!found && start < i_size) {
3668 found = find_desired_extent_in_hole(inode, whence,
3669 delalloc_cached_state, start,
3670 i_size - 1, &start);
3671 if (!found)
3672 start = i_size;
3673 }
3674
3675out:
3676 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3677 btrfs_free_path(path);
3678
3679 if (ret < 0)
3680 return ret;
3681
3682 if (whence == SEEK_DATA && start >= i_size)
3683 return -ENXIO;
3684
3685 return min_t(loff_t, start, i_size);
3686}
3687
3688static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3689{
3690 struct inode *inode = file->f_mapping->host;
3691
3692 switch (whence) {
3693 default:
3694 return generic_file_llseek(file, offset, whence);
3695 case SEEK_DATA:
3696 case SEEK_HOLE:
3697 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3698 offset = find_desired_extent(file, offset, whence);
3699 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3700 break;
3701 }
3702
3703 if (offset < 0)
3704 return offset;
3705
3706 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3707}
3708
3709static int btrfs_file_open(struct inode *inode, struct file *filp)
3710{
3711 int ret;
3712
3713 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
3714 FMODE_CAN_ODIRECT;
3715
3716 ret = fsverity_file_open(inode, filp);
3717 if (ret)
3718 return ret;
3719 return generic_file_open(inode, filp);
3720}
3721
3722static int check_direct_read(struct btrfs_fs_info *fs_info,
3723 const struct iov_iter *iter, loff_t offset)
3724{
3725 int ret;
3726 int i, seg;
3727
3728 ret = check_direct_IO(fs_info, iter, offset);
3729 if (ret < 0)
3730 return ret;
3731
3732 if (!iter_is_iovec(iter))
3733 return 0;
3734
3735 for (seg = 0; seg < iter->nr_segs; seg++) {
3736 for (i = seg + 1; i < iter->nr_segs; i++) {
3737 const struct iovec *iov1 = iter_iov(iter) + seg;
3738 const struct iovec *iov2 = iter_iov(iter) + i;
3739
3740 if (iov1->iov_base == iov2->iov_base)
3741 return -EINVAL;
3742 }
3743 }
3744 return 0;
3745}
3746
3747static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3748{
3749 struct inode *inode = file_inode(iocb->ki_filp);
3750 size_t prev_left = 0;
3751 ssize_t read = 0;
3752 ssize_t ret;
3753
3754 if (fsverity_active(inode))
3755 return 0;
3756
3757 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3758 return 0;
3759
3760 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3761again:
3762 /*
3763 * This is similar to what we do for direct IO writes, see the comment
3764 * at btrfs_direct_write(), but we also disable page faults in addition
3765 * to disabling them only at the iov_iter level. This is because when
3766 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3767 * which can still trigger page fault ins despite having set ->nofault
3768 * to true of our 'to' iov_iter.
3769 *
3770 * The difference to direct IO writes is that we deadlock when trying
3771 * to lock the extent range in the inode's tree during he page reads
3772 * triggered by the fault in (while for writes it is due to waiting for
3773 * our own ordered extent). This is because for direct IO reads,
3774 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3775 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3776 */
3777 pagefault_disable();
3778 to->nofault = true;
3779 ret = btrfs_dio_read(iocb, to, read);
3780 to->nofault = false;
3781 pagefault_enable();
3782
3783 /* No increment (+=) because iomap returns a cumulative value. */
3784 if (ret > 0)
3785 read = ret;
3786
3787 if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3788 const size_t left = iov_iter_count(to);
3789
3790 if (left == prev_left) {
3791 /*
3792 * We didn't make any progress since the last attempt,
3793 * fallback to a buffered read for the remainder of the
3794 * range. This is just to avoid any possibility of looping
3795 * for too long.
3796 */
3797 ret = read;
3798 } else {
3799 /*
3800 * We made some progress since the last retry or this is
3801 * the first time we are retrying. Fault in as many pages
3802 * as possible and retry.
3803 */
3804 fault_in_iov_iter_writeable(to, left);
3805 prev_left = left;
3806 goto again;
3807 }
3808 }
3809 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3810 return ret < 0 ? ret : read;
3811}
3812
3813static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3814{
3815 ssize_t ret = 0;
3816
3817 if (iocb->ki_flags & IOCB_DIRECT) {
3818 ret = btrfs_direct_read(iocb, to);
3819 if (ret < 0 || !iov_iter_count(to) ||
3820 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3821 return ret;
3822 }
3823
3824 return filemap_read(iocb, to, ret);
3825}
3826
3827const struct file_operations btrfs_file_operations = {
3828 .llseek = btrfs_file_llseek,
3829 .read_iter = btrfs_file_read_iter,
3830 .splice_read = filemap_splice_read,
3831 .write_iter = btrfs_file_write_iter,
3832 .splice_write = iter_file_splice_write,
3833 .mmap = btrfs_file_mmap,
3834 .open = btrfs_file_open,
3835 .release = btrfs_release_file,
3836 .get_unmapped_area = thp_get_unmapped_area,
3837 .fsync = btrfs_sync_file,
3838 .fallocate = btrfs_fallocate,
3839 .unlocked_ioctl = btrfs_ioctl,
3840#ifdef CONFIG_COMPAT
3841 .compat_ioctl = btrfs_compat_ioctl,
3842#endif
3843 .remap_file_range = btrfs_remap_file_range,
3844};
3845
3846int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3847{
3848 int ret;
3849
3850 /*
3851 * So with compression we will find and lock a dirty page and clear the
3852 * first one as dirty, setup an async extent, and immediately return
3853 * with the entire range locked but with nobody actually marked with
3854 * writeback. So we can't just filemap_write_and_wait_range() and
3855 * expect it to work since it will just kick off a thread to do the
3856 * actual work. So we need to call filemap_fdatawrite_range _again_
3857 * since it will wait on the page lock, which won't be unlocked until
3858 * after the pages have been marked as writeback and so we're good to go
3859 * from there. We have to do this otherwise we'll miss the ordered
3860 * extents and that results in badness. Please Josef, do not think you
3861 * know better and pull this out at some point in the future, it is
3862 * right and you are wrong.
3863 */
3864 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3865 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3866 &BTRFS_I(inode)->runtime_flags))
3867 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3868
3869 return ret;
3870}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/time.h>
9#include <linux/init.h>
10#include <linux/string.h>
11#include <linux/backing-dev.h>
12#include <linux/falloc.h>
13#include <linux/writeback.h>
14#include <linux/compat.h>
15#include <linux/slab.h>
16#include <linux/btrfs.h>
17#include <linux/uio.h>
18#include <linux/iversion.h>
19#include "ctree.h"
20#include "disk-io.h"
21#include "transaction.h"
22#include "btrfs_inode.h"
23#include "print-tree.h"
24#include "tree-log.h"
25#include "locking.h"
26#include "volumes.h"
27#include "qgroup.h"
28#include "compression.h"
29#include "delalloc-space.h"
30#include "reflink.h"
31#include "subpage.h"
32
33static struct kmem_cache *btrfs_inode_defrag_cachep;
34/*
35 * when auto defrag is enabled we
36 * queue up these defrag structs to remember which
37 * inodes need defragging passes
38 */
39struct inode_defrag {
40 struct rb_node rb_node;
41 /* objectid */
42 u64 ino;
43 /*
44 * transid where the defrag was added, we search for
45 * extents newer than this
46 */
47 u64 transid;
48
49 /* root objectid */
50 u64 root;
51
52 /* last offset we were able to defrag */
53 u64 last_offset;
54
55 /* if we've wrapped around back to zero once already */
56 int cycled;
57};
58
59static int __compare_inode_defrag(struct inode_defrag *defrag1,
60 struct inode_defrag *defrag2)
61{
62 if (defrag1->root > defrag2->root)
63 return 1;
64 else if (defrag1->root < defrag2->root)
65 return -1;
66 else if (defrag1->ino > defrag2->ino)
67 return 1;
68 else if (defrag1->ino < defrag2->ino)
69 return -1;
70 else
71 return 0;
72}
73
74/* pop a record for an inode into the defrag tree. The lock
75 * must be held already
76 *
77 * If you're inserting a record for an older transid than an
78 * existing record, the transid already in the tree is lowered
79 *
80 * If an existing record is found the defrag item you
81 * pass in is freed
82 */
83static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
84 struct inode_defrag *defrag)
85{
86 struct btrfs_fs_info *fs_info = inode->root->fs_info;
87 struct inode_defrag *entry;
88 struct rb_node **p;
89 struct rb_node *parent = NULL;
90 int ret;
91
92 p = &fs_info->defrag_inodes.rb_node;
93 while (*p) {
94 parent = *p;
95 entry = rb_entry(parent, struct inode_defrag, rb_node);
96
97 ret = __compare_inode_defrag(defrag, entry);
98 if (ret < 0)
99 p = &parent->rb_left;
100 else if (ret > 0)
101 p = &parent->rb_right;
102 else {
103 /* if we're reinserting an entry for
104 * an old defrag run, make sure to
105 * lower the transid of our existing record
106 */
107 if (defrag->transid < entry->transid)
108 entry->transid = defrag->transid;
109 if (defrag->last_offset > entry->last_offset)
110 entry->last_offset = defrag->last_offset;
111 return -EEXIST;
112 }
113 }
114 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
115 rb_link_node(&defrag->rb_node, parent, p);
116 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
117 return 0;
118}
119
120static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
121{
122 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
123 return 0;
124
125 if (btrfs_fs_closing(fs_info))
126 return 0;
127
128 return 1;
129}
130
131/*
132 * insert a defrag record for this inode if auto defrag is
133 * enabled
134 */
135int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
136 struct btrfs_inode *inode)
137{
138 struct btrfs_root *root = inode->root;
139 struct btrfs_fs_info *fs_info = root->fs_info;
140 struct inode_defrag *defrag;
141 u64 transid;
142 int ret;
143
144 if (!__need_auto_defrag(fs_info))
145 return 0;
146
147 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
148 return 0;
149
150 if (trans)
151 transid = trans->transid;
152 else
153 transid = inode->root->last_trans;
154
155 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
156 if (!defrag)
157 return -ENOMEM;
158
159 defrag->ino = btrfs_ino(inode);
160 defrag->transid = transid;
161 defrag->root = root->root_key.objectid;
162
163 spin_lock(&fs_info->defrag_inodes_lock);
164 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
165 /*
166 * If we set IN_DEFRAG flag and evict the inode from memory,
167 * and then re-read this inode, this new inode doesn't have
168 * IN_DEFRAG flag. At the case, we may find the existed defrag.
169 */
170 ret = __btrfs_add_inode_defrag(inode, defrag);
171 if (ret)
172 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
173 } else {
174 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
175 }
176 spin_unlock(&fs_info->defrag_inodes_lock);
177 return 0;
178}
179
180/*
181 * Requeue the defrag object. If there is a defrag object that points to
182 * the same inode in the tree, we will merge them together (by
183 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
184 */
185static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
186 struct inode_defrag *defrag)
187{
188 struct btrfs_fs_info *fs_info = inode->root->fs_info;
189 int ret;
190
191 if (!__need_auto_defrag(fs_info))
192 goto out;
193
194 /*
195 * Here we don't check the IN_DEFRAG flag, because we need merge
196 * them together.
197 */
198 spin_lock(&fs_info->defrag_inodes_lock);
199 ret = __btrfs_add_inode_defrag(inode, defrag);
200 spin_unlock(&fs_info->defrag_inodes_lock);
201 if (ret)
202 goto out;
203 return;
204out:
205 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
206}
207
208/*
209 * pick the defragable inode that we want, if it doesn't exist, we will get
210 * the next one.
211 */
212static struct inode_defrag *
213btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
214{
215 struct inode_defrag *entry = NULL;
216 struct inode_defrag tmp;
217 struct rb_node *p;
218 struct rb_node *parent = NULL;
219 int ret;
220
221 tmp.ino = ino;
222 tmp.root = root;
223
224 spin_lock(&fs_info->defrag_inodes_lock);
225 p = fs_info->defrag_inodes.rb_node;
226 while (p) {
227 parent = p;
228 entry = rb_entry(parent, struct inode_defrag, rb_node);
229
230 ret = __compare_inode_defrag(&tmp, entry);
231 if (ret < 0)
232 p = parent->rb_left;
233 else if (ret > 0)
234 p = parent->rb_right;
235 else
236 goto out;
237 }
238
239 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
240 parent = rb_next(parent);
241 if (parent)
242 entry = rb_entry(parent, struct inode_defrag, rb_node);
243 else
244 entry = NULL;
245 }
246out:
247 if (entry)
248 rb_erase(parent, &fs_info->defrag_inodes);
249 spin_unlock(&fs_info->defrag_inodes_lock);
250 return entry;
251}
252
253void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
254{
255 struct inode_defrag *defrag;
256 struct rb_node *node;
257
258 spin_lock(&fs_info->defrag_inodes_lock);
259 node = rb_first(&fs_info->defrag_inodes);
260 while (node) {
261 rb_erase(node, &fs_info->defrag_inodes);
262 defrag = rb_entry(node, struct inode_defrag, rb_node);
263 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
264
265 cond_resched_lock(&fs_info->defrag_inodes_lock);
266
267 node = rb_first(&fs_info->defrag_inodes);
268 }
269 spin_unlock(&fs_info->defrag_inodes_lock);
270}
271
272#define BTRFS_DEFRAG_BATCH 1024
273
274static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
275 struct inode_defrag *defrag)
276{
277 struct btrfs_root *inode_root;
278 struct inode *inode;
279 struct btrfs_ioctl_defrag_range_args range;
280 int num_defrag;
281 int ret;
282
283 /* get the inode */
284 inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
285 if (IS_ERR(inode_root)) {
286 ret = PTR_ERR(inode_root);
287 goto cleanup;
288 }
289
290 inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
291 btrfs_put_root(inode_root);
292 if (IS_ERR(inode)) {
293 ret = PTR_ERR(inode);
294 goto cleanup;
295 }
296
297 /* do a chunk of defrag */
298 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
299 memset(&range, 0, sizeof(range));
300 range.len = (u64)-1;
301 range.start = defrag->last_offset;
302
303 sb_start_write(fs_info->sb);
304 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
305 BTRFS_DEFRAG_BATCH);
306 sb_end_write(fs_info->sb);
307 /*
308 * if we filled the whole defrag batch, there
309 * must be more work to do. Queue this defrag
310 * again
311 */
312 if (num_defrag == BTRFS_DEFRAG_BATCH) {
313 defrag->last_offset = range.start;
314 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
315 } else if (defrag->last_offset && !defrag->cycled) {
316 /*
317 * we didn't fill our defrag batch, but
318 * we didn't start at zero. Make sure we loop
319 * around to the start of the file.
320 */
321 defrag->last_offset = 0;
322 defrag->cycled = 1;
323 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
324 } else {
325 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
326 }
327
328 iput(inode);
329 return 0;
330cleanup:
331 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
332 return ret;
333}
334
335/*
336 * run through the list of inodes in the FS that need
337 * defragging
338 */
339int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
340{
341 struct inode_defrag *defrag;
342 u64 first_ino = 0;
343 u64 root_objectid = 0;
344
345 atomic_inc(&fs_info->defrag_running);
346 while (1) {
347 /* Pause the auto defragger. */
348 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
349 &fs_info->fs_state))
350 break;
351
352 if (!__need_auto_defrag(fs_info))
353 break;
354
355 /* find an inode to defrag */
356 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
357 first_ino);
358 if (!defrag) {
359 if (root_objectid || first_ino) {
360 root_objectid = 0;
361 first_ino = 0;
362 continue;
363 } else {
364 break;
365 }
366 }
367
368 first_ino = defrag->ino + 1;
369 root_objectid = defrag->root;
370
371 __btrfs_run_defrag_inode(fs_info, defrag);
372 }
373 atomic_dec(&fs_info->defrag_running);
374
375 /*
376 * during unmount, we use the transaction_wait queue to
377 * wait for the defragger to stop
378 */
379 wake_up(&fs_info->transaction_wait);
380 return 0;
381}
382
383/* simple helper to fault in pages and copy. This should go away
384 * and be replaced with calls into generic code.
385 */
386static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
387 struct page **prepared_pages,
388 struct iov_iter *i)
389{
390 size_t copied = 0;
391 size_t total_copied = 0;
392 int pg = 0;
393 int offset = offset_in_page(pos);
394
395 while (write_bytes > 0) {
396 size_t count = min_t(size_t,
397 PAGE_SIZE - offset, write_bytes);
398 struct page *page = prepared_pages[pg];
399 /*
400 * Copy data from userspace to the current page
401 */
402 copied = copy_page_from_iter_atomic(page, offset, count, i);
403
404 /* Flush processor's dcache for this page */
405 flush_dcache_page(page);
406
407 /*
408 * if we get a partial write, we can end up with
409 * partially up to date pages. These add
410 * a lot of complexity, so make sure they don't
411 * happen by forcing this copy to be retried.
412 *
413 * The rest of the btrfs_file_write code will fall
414 * back to page at a time copies after we return 0.
415 */
416 if (unlikely(copied < count)) {
417 if (!PageUptodate(page)) {
418 iov_iter_revert(i, copied);
419 copied = 0;
420 }
421 if (!copied)
422 break;
423 }
424
425 write_bytes -= copied;
426 total_copied += copied;
427 offset += copied;
428 if (offset == PAGE_SIZE) {
429 pg++;
430 offset = 0;
431 }
432 }
433 return total_copied;
434}
435
436/*
437 * unlocks pages after btrfs_file_write is done with them
438 */
439static void btrfs_drop_pages(struct page **pages, size_t num_pages)
440{
441 size_t i;
442 for (i = 0; i < num_pages; i++) {
443 /* page checked is some magic around finding pages that
444 * have been modified without going through btrfs_set_page_dirty
445 * clear it here. There should be no need to mark the pages
446 * accessed as prepare_pages should have marked them accessed
447 * in prepare_pages via find_or_create_page()
448 */
449 ClearPageChecked(pages[i]);
450 unlock_page(pages[i]);
451 put_page(pages[i]);
452 }
453}
454
455/*
456 * After btrfs_copy_from_user(), update the following things for delalloc:
457 * - Mark newly dirtied pages as DELALLOC in the io tree.
458 * Used to advise which range is to be written back.
459 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
460 * - Update inode size for past EOF write
461 */
462int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
463 size_t num_pages, loff_t pos, size_t write_bytes,
464 struct extent_state **cached, bool noreserve)
465{
466 struct btrfs_fs_info *fs_info = inode->root->fs_info;
467 int err = 0;
468 int i;
469 u64 num_bytes;
470 u64 start_pos;
471 u64 end_of_last_block;
472 u64 end_pos = pos + write_bytes;
473 loff_t isize = i_size_read(&inode->vfs_inode);
474 unsigned int extra_bits = 0;
475
476 if (write_bytes == 0)
477 return 0;
478
479 if (noreserve)
480 extra_bits |= EXTENT_NORESERVE;
481
482 start_pos = round_down(pos, fs_info->sectorsize);
483 num_bytes = round_up(write_bytes + pos - start_pos,
484 fs_info->sectorsize);
485 ASSERT(num_bytes <= U32_MAX);
486
487 end_of_last_block = start_pos + num_bytes - 1;
488
489 /*
490 * The pages may have already been dirty, clear out old accounting so
491 * we can set things up properly
492 */
493 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
494 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
495 0, 0, cached);
496
497 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
498 extra_bits, cached);
499 if (err)
500 return err;
501
502 for (i = 0; i < num_pages; i++) {
503 struct page *p = pages[i];
504
505 btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
506 ClearPageChecked(p);
507 btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
508 }
509
510 /*
511 * we've only changed i_size in ram, and we haven't updated
512 * the disk i_size. There is no need to log the inode
513 * at this time.
514 */
515 if (end_pos > isize)
516 i_size_write(&inode->vfs_inode, end_pos);
517 return 0;
518}
519
520/*
521 * this drops all the extents in the cache that intersect the range
522 * [start, end]. Existing extents are split as required.
523 */
524void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
525 int skip_pinned)
526{
527 struct extent_map *em;
528 struct extent_map *split = NULL;
529 struct extent_map *split2 = NULL;
530 struct extent_map_tree *em_tree = &inode->extent_tree;
531 u64 len = end - start + 1;
532 u64 gen;
533 int ret;
534 int testend = 1;
535 unsigned long flags;
536 int compressed = 0;
537 bool modified;
538
539 WARN_ON(end < start);
540 if (end == (u64)-1) {
541 len = (u64)-1;
542 testend = 0;
543 }
544 while (1) {
545 int no_splits = 0;
546
547 modified = false;
548 if (!split)
549 split = alloc_extent_map();
550 if (!split2)
551 split2 = alloc_extent_map();
552 if (!split || !split2)
553 no_splits = 1;
554
555 write_lock(&em_tree->lock);
556 em = lookup_extent_mapping(em_tree, start, len);
557 if (!em) {
558 write_unlock(&em_tree->lock);
559 break;
560 }
561 flags = em->flags;
562 gen = em->generation;
563 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
564 if (testend && em->start + em->len >= start + len) {
565 free_extent_map(em);
566 write_unlock(&em_tree->lock);
567 break;
568 }
569 start = em->start + em->len;
570 if (testend)
571 len = start + len - (em->start + em->len);
572 free_extent_map(em);
573 write_unlock(&em_tree->lock);
574 continue;
575 }
576 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
577 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
578 clear_bit(EXTENT_FLAG_LOGGING, &flags);
579 modified = !list_empty(&em->list);
580 if (no_splits)
581 goto next;
582
583 if (em->start < start) {
584 split->start = em->start;
585 split->len = start - em->start;
586
587 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
588 split->orig_start = em->orig_start;
589 split->block_start = em->block_start;
590
591 if (compressed)
592 split->block_len = em->block_len;
593 else
594 split->block_len = split->len;
595 split->orig_block_len = max(split->block_len,
596 em->orig_block_len);
597 split->ram_bytes = em->ram_bytes;
598 } else {
599 split->orig_start = split->start;
600 split->block_len = 0;
601 split->block_start = em->block_start;
602 split->orig_block_len = 0;
603 split->ram_bytes = split->len;
604 }
605
606 split->generation = gen;
607 split->flags = flags;
608 split->compress_type = em->compress_type;
609 replace_extent_mapping(em_tree, em, split, modified);
610 free_extent_map(split);
611 split = split2;
612 split2 = NULL;
613 }
614 if (testend && em->start + em->len > start + len) {
615 u64 diff = start + len - em->start;
616
617 split->start = start + len;
618 split->len = em->start + em->len - (start + len);
619 split->flags = flags;
620 split->compress_type = em->compress_type;
621 split->generation = gen;
622
623 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
624 split->orig_block_len = max(em->block_len,
625 em->orig_block_len);
626
627 split->ram_bytes = em->ram_bytes;
628 if (compressed) {
629 split->block_len = em->block_len;
630 split->block_start = em->block_start;
631 split->orig_start = em->orig_start;
632 } else {
633 split->block_len = split->len;
634 split->block_start = em->block_start
635 + diff;
636 split->orig_start = em->orig_start;
637 }
638 } else {
639 split->ram_bytes = split->len;
640 split->orig_start = split->start;
641 split->block_len = 0;
642 split->block_start = em->block_start;
643 split->orig_block_len = 0;
644 }
645
646 if (extent_map_in_tree(em)) {
647 replace_extent_mapping(em_tree, em, split,
648 modified);
649 } else {
650 ret = add_extent_mapping(em_tree, split,
651 modified);
652 ASSERT(ret == 0); /* Logic error */
653 }
654 free_extent_map(split);
655 split = NULL;
656 }
657next:
658 if (extent_map_in_tree(em))
659 remove_extent_mapping(em_tree, em);
660 write_unlock(&em_tree->lock);
661
662 /* once for us */
663 free_extent_map(em);
664 /* once for the tree*/
665 free_extent_map(em);
666 }
667 if (split)
668 free_extent_map(split);
669 if (split2)
670 free_extent_map(split2);
671}
672
673/*
674 * this is very complex, but the basic idea is to drop all extents
675 * in the range start - end. hint_block is filled in with a block number
676 * that would be a good hint to the block allocator for this file.
677 *
678 * If an extent intersects the range but is not entirely inside the range
679 * it is either truncated or split. Anything entirely inside the range
680 * is deleted from the tree.
681 *
682 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
683 * to deal with that. We set the field 'bytes_found' of the arguments structure
684 * with the number of allocated bytes found in the target range, so that the
685 * caller can update the inode's number of bytes in an atomic way when
686 * replacing extents in a range to avoid races with stat(2).
687 */
688int btrfs_drop_extents(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root, struct btrfs_inode *inode,
690 struct btrfs_drop_extents_args *args)
691{
692 struct btrfs_fs_info *fs_info = root->fs_info;
693 struct extent_buffer *leaf;
694 struct btrfs_file_extent_item *fi;
695 struct btrfs_ref ref = { 0 };
696 struct btrfs_key key;
697 struct btrfs_key new_key;
698 u64 ino = btrfs_ino(inode);
699 u64 search_start = args->start;
700 u64 disk_bytenr = 0;
701 u64 num_bytes = 0;
702 u64 extent_offset = 0;
703 u64 extent_end = 0;
704 u64 last_end = args->start;
705 int del_nr = 0;
706 int del_slot = 0;
707 int extent_type;
708 int recow;
709 int ret;
710 int modify_tree = -1;
711 int update_refs;
712 int found = 0;
713 int leafs_visited = 0;
714 struct btrfs_path *path = args->path;
715
716 args->bytes_found = 0;
717 args->extent_inserted = false;
718
719 /* Must always have a path if ->replace_extent is true */
720 ASSERT(!(args->replace_extent && !args->path));
721
722 if (!path) {
723 path = btrfs_alloc_path();
724 if (!path) {
725 ret = -ENOMEM;
726 goto out;
727 }
728 }
729
730 if (args->drop_cache)
731 btrfs_drop_extent_cache(inode, args->start, args->end - 1, 0);
732
733 if (args->start >= inode->disk_i_size && !args->replace_extent)
734 modify_tree = 0;
735
736 update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
737 while (1) {
738 recow = 0;
739 ret = btrfs_lookup_file_extent(trans, root, path, ino,
740 search_start, modify_tree);
741 if (ret < 0)
742 break;
743 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
744 leaf = path->nodes[0];
745 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
746 if (key.objectid == ino &&
747 key.type == BTRFS_EXTENT_DATA_KEY)
748 path->slots[0]--;
749 }
750 ret = 0;
751 leafs_visited++;
752next_slot:
753 leaf = path->nodes[0];
754 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
755 BUG_ON(del_nr > 0);
756 ret = btrfs_next_leaf(root, path);
757 if (ret < 0)
758 break;
759 if (ret > 0) {
760 ret = 0;
761 break;
762 }
763 leafs_visited++;
764 leaf = path->nodes[0];
765 recow = 1;
766 }
767
768 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
769
770 if (key.objectid > ino)
771 break;
772 if (WARN_ON_ONCE(key.objectid < ino) ||
773 key.type < BTRFS_EXTENT_DATA_KEY) {
774 ASSERT(del_nr == 0);
775 path->slots[0]++;
776 goto next_slot;
777 }
778 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
779 break;
780
781 fi = btrfs_item_ptr(leaf, path->slots[0],
782 struct btrfs_file_extent_item);
783 extent_type = btrfs_file_extent_type(leaf, fi);
784
785 if (extent_type == BTRFS_FILE_EXTENT_REG ||
786 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
787 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
788 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
789 extent_offset = btrfs_file_extent_offset(leaf, fi);
790 extent_end = key.offset +
791 btrfs_file_extent_num_bytes(leaf, fi);
792 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
793 extent_end = key.offset +
794 btrfs_file_extent_ram_bytes(leaf, fi);
795 } else {
796 /* can't happen */
797 BUG();
798 }
799
800 /*
801 * Don't skip extent items representing 0 byte lengths. They
802 * used to be created (bug) if while punching holes we hit
803 * -ENOSPC condition. So if we find one here, just ensure we
804 * delete it, otherwise we would insert a new file extent item
805 * with the same key (offset) as that 0 bytes length file
806 * extent item in the call to setup_items_for_insert() later
807 * in this function.
808 */
809 if (extent_end == key.offset && extent_end >= search_start) {
810 last_end = extent_end;
811 goto delete_extent_item;
812 }
813
814 if (extent_end <= search_start) {
815 path->slots[0]++;
816 goto next_slot;
817 }
818
819 found = 1;
820 search_start = max(key.offset, args->start);
821 if (recow || !modify_tree) {
822 modify_tree = -1;
823 btrfs_release_path(path);
824 continue;
825 }
826
827 /*
828 * | - range to drop - |
829 * | -------- extent -------- |
830 */
831 if (args->start > key.offset && args->end < extent_end) {
832 BUG_ON(del_nr > 0);
833 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
834 ret = -EOPNOTSUPP;
835 break;
836 }
837
838 memcpy(&new_key, &key, sizeof(new_key));
839 new_key.offset = args->start;
840 ret = btrfs_duplicate_item(trans, root, path,
841 &new_key);
842 if (ret == -EAGAIN) {
843 btrfs_release_path(path);
844 continue;
845 }
846 if (ret < 0)
847 break;
848
849 leaf = path->nodes[0];
850 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
851 struct btrfs_file_extent_item);
852 btrfs_set_file_extent_num_bytes(leaf, fi,
853 args->start - key.offset);
854
855 fi = btrfs_item_ptr(leaf, path->slots[0],
856 struct btrfs_file_extent_item);
857
858 extent_offset += args->start - key.offset;
859 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
860 btrfs_set_file_extent_num_bytes(leaf, fi,
861 extent_end - args->start);
862 btrfs_mark_buffer_dirty(leaf);
863
864 if (update_refs && disk_bytenr > 0) {
865 btrfs_init_generic_ref(&ref,
866 BTRFS_ADD_DELAYED_REF,
867 disk_bytenr, num_bytes, 0);
868 btrfs_init_data_ref(&ref,
869 root->root_key.objectid,
870 new_key.objectid,
871 args->start - extent_offset);
872 ret = btrfs_inc_extent_ref(trans, &ref);
873 BUG_ON(ret); /* -ENOMEM */
874 }
875 key.offset = args->start;
876 }
877 /*
878 * From here on out we will have actually dropped something, so
879 * last_end can be updated.
880 */
881 last_end = extent_end;
882
883 /*
884 * | ---- range to drop ----- |
885 * | -------- extent -------- |
886 */
887 if (args->start <= key.offset && args->end < extent_end) {
888 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
889 ret = -EOPNOTSUPP;
890 break;
891 }
892
893 memcpy(&new_key, &key, sizeof(new_key));
894 new_key.offset = args->end;
895 btrfs_set_item_key_safe(fs_info, path, &new_key);
896
897 extent_offset += args->end - key.offset;
898 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
899 btrfs_set_file_extent_num_bytes(leaf, fi,
900 extent_end - args->end);
901 btrfs_mark_buffer_dirty(leaf);
902 if (update_refs && disk_bytenr > 0)
903 args->bytes_found += args->end - key.offset;
904 break;
905 }
906
907 search_start = extent_end;
908 /*
909 * | ---- range to drop ----- |
910 * | -------- extent -------- |
911 */
912 if (args->start > key.offset && args->end >= extent_end) {
913 BUG_ON(del_nr > 0);
914 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
915 ret = -EOPNOTSUPP;
916 break;
917 }
918
919 btrfs_set_file_extent_num_bytes(leaf, fi,
920 args->start - key.offset);
921 btrfs_mark_buffer_dirty(leaf);
922 if (update_refs && disk_bytenr > 0)
923 args->bytes_found += extent_end - args->start;
924 if (args->end == extent_end)
925 break;
926
927 path->slots[0]++;
928 goto next_slot;
929 }
930
931 /*
932 * | ---- range to drop ----- |
933 * | ------ extent ------ |
934 */
935 if (args->start <= key.offset && args->end >= extent_end) {
936delete_extent_item:
937 if (del_nr == 0) {
938 del_slot = path->slots[0];
939 del_nr = 1;
940 } else {
941 BUG_ON(del_slot + del_nr != path->slots[0]);
942 del_nr++;
943 }
944
945 if (update_refs &&
946 extent_type == BTRFS_FILE_EXTENT_INLINE) {
947 args->bytes_found += extent_end - key.offset;
948 extent_end = ALIGN(extent_end,
949 fs_info->sectorsize);
950 } else if (update_refs && disk_bytenr > 0) {
951 btrfs_init_generic_ref(&ref,
952 BTRFS_DROP_DELAYED_REF,
953 disk_bytenr, num_bytes, 0);
954 btrfs_init_data_ref(&ref,
955 root->root_key.objectid,
956 key.objectid,
957 key.offset - extent_offset);
958 ret = btrfs_free_extent(trans, &ref);
959 BUG_ON(ret); /* -ENOMEM */
960 args->bytes_found += extent_end - key.offset;
961 }
962
963 if (args->end == extent_end)
964 break;
965
966 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
967 path->slots[0]++;
968 goto next_slot;
969 }
970
971 ret = btrfs_del_items(trans, root, path, del_slot,
972 del_nr);
973 if (ret) {
974 btrfs_abort_transaction(trans, ret);
975 break;
976 }
977
978 del_nr = 0;
979 del_slot = 0;
980
981 btrfs_release_path(path);
982 continue;
983 }
984
985 BUG();
986 }
987
988 if (!ret && del_nr > 0) {
989 /*
990 * Set path->slots[0] to first slot, so that after the delete
991 * if items are move off from our leaf to its immediate left or
992 * right neighbor leafs, we end up with a correct and adjusted
993 * path->slots[0] for our insertion (if args->replace_extent).
994 */
995 path->slots[0] = del_slot;
996 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
997 if (ret)
998 btrfs_abort_transaction(trans, ret);
999 }
1000
1001 leaf = path->nodes[0];
1002 /*
1003 * If btrfs_del_items() was called, it might have deleted a leaf, in
1004 * which case it unlocked our path, so check path->locks[0] matches a
1005 * write lock.
1006 */
1007 if (!ret && args->replace_extent && leafs_visited == 1 &&
1008 path->locks[0] == BTRFS_WRITE_LOCK &&
1009 btrfs_leaf_free_space(leaf) >=
1010 sizeof(struct btrfs_item) + args->extent_item_size) {
1011
1012 key.objectid = ino;
1013 key.type = BTRFS_EXTENT_DATA_KEY;
1014 key.offset = args->start;
1015 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1016 struct btrfs_key slot_key;
1017
1018 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1019 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1020 path->slots[0]++;
1021 }
1022 setup_items_for_insert(root, path, &key,
1023 &args->extent_item_size, 1);
1024 args->extent_inserted = true;
1025 }
1026
1027 if (!args->path)
1028 btrfs_free_path(path);
1029 else if (!args->extent_inserted)
1030 btrfs_release_path(path);
1031out:
1032 args->drop_end = found ? min(args->end, last_end) : args->end;
1033
1034 return ret;
1035}
1036
1037static int extent_mergeable(struct extent_buffer *leaf, int slot,
1038 u64 objectid, u64 bytenr, u64 orig_offset,
1039 u64 *start, u64 *end)
1040{
1041 struct btrfs_file_extent_item *fi;
1042 struct btrfs_key key;
1043 u64 extent_end;
1044
1045 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1046 return 0;
1047
1048 btrfs_item_key_to_cpu(leaf, &key, slot);
1049 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1050 return 0;
1051
1052 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1053 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1054 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1055 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1056 btrfs_file_extent_compression(leaf, fi) ||
1057 btrfs_file_extent_encryption(leaf, fi) ||
1058 btrfs_file_extent_other_encoding(leaf, fi))
1059 return 0;
1060
1061 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1062 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1063 return 0;
1064
1065 *start = key.offset;
1066 *end = extent_end;
1067 return 1;
1068}
1069
1070/*
1071 * Mark extent in the range start - end as written.
1072 *
1073 * This changes extent type from 'pre-allocated' to 'regular'. If only
1074 * part of extent is marked as written, the extent will be split into
1075 * two or three.
1076 */
1077int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1078 struct btrfs_inode *inode, u64 start, u64 end)
1079{
1080 struct btrfs_fs_info *fs_info = trans->fs_info;
1081 struct btrfs_root *root = inode->root;
1082 struct extent_buffer *leaf;
1083 struct btrfs_path *path;
1084 struct btrfs_file_extent_item *fi;
1085 struct btrfs_ref ref = { 0 };
1086 struct btrfs_key key;
1087 struct btrfs_key new_key;
1088 u64 bytenr;
1089 u64 num_bytes;
1090 u64 extent_end;
1091 u64 orig_offset;
1092 u64 other_start;
1093 u64 other_end;
1094 u64 split;
1095 int del_nr = 0;
1096 int del_slot = 0;
1097 int recow;
1098 int ret = 0;
1099 u64 ino = btrfs_ino(inode);
1100
1101 path = btrfs_alloc_path();
1102 if (!path)
1103 return -ENOMEM;
1104again:
1105 recow = 0;
1106 split = start;
1107 key.objectid = ino;
1108 key.type = BTRFS_EXTENT_DATA_KEY;
1109 key.offset = split;
1110
1111 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1112 if (ret < 0)
1113 goto out;
1114 if (ret > 0 && path->slots[0] > 0)
1115 path->slots[0]--;
1116
1117 leaf = path->nodes[0];
1118 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1119 if (key.objectid != ino ||
1120 key.type != BTRFS_EXTENT_DATA_KEY) {
1121 ret = -EINVAL;
1122 btrfs_abort_transaction(trans, ret);
1123 goto out;
1124 }
1125 fi = btrfs_item_ptr(leaf, path->slots[0],
1126 struct btrfs_file_extent_item);
1127 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1128 ret = -EINVAL;
1129 btrfs_abort_transaction(trans, ret);
1130 goto out;
1131 }
1132 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1133 if (key.offset > start || extent_end < end) {
1134 ret = -EINVAL;
1135 btrfs_abort_transaction(trans, ret);
1136 goto out;
1137 }
1138
1139 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1140 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1141 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1142 memcpy(&new_key, &key, sizeof(new_key));
1143
1144 if (start == key.offset && end < extent_end) {
1145 other_start = 0;
1146 other_end = start;
1147 if (extent_mergeable(leaf, path->slots[0] - 1,
1148 ino, bytenr, orig_offset,
1149 &other_start, &other_end)) {
1150 new_key.offset = end;
1151 btrfs_set_item_key_safe(fs_info, path, &new_key);
1152 fi = btrfs_item_ptr(leaf, path->slots[0],
1153 struct btrfs_file_extent_item);
1154 btrfs_set_file_extent_generation(leaf, fi,
1155 trans->transid);
1156 btrfs_set_file_extent_num_bytes(leaf, fi,
1157 extent_end - end);
1158 btrfs_set_file_extent_offset(leaf, fi,
1159 end - orig_offset);
1160 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1161 struct btrfs_file_extent_item);
1162 btrfs_set_file_extent_generation(leaf, fi,
1163 trans->transid);
1164 btrfs_set_file_extent_num_bytes(leaf, fi,
1165 end - other_start);
1166 btrfs_mark_buffer_dirty(leaf);
1167 goto out;
1168 }
1169 }
1170
1171 if (start > key.offset && end == extent_end) {
1172 other_start = end;
1173 other_end = 0;
1174 if (extent_mergeable(leaf, path->slots[0] + 1,
1175 ino, bytenr, orig_offset,
1176 &other_start, &other_end)) {
1177 fi = btrfs_item_ptr(leaf, path->slots[0],
1178 struct btrfs_file_extent_item);
1179 btrfs_set_file_extent_num_bytes(leaf, fi,
1180 start - key.offset);
1181 btrfs_set_file_extent_generation(leaf, fi,
1182 trans->transid);
1183 path->slots[0]++;
1184 new_key.offset = start;
1185 btrfs_set_item_key_safe(fs_info, path, &new_key);
1186
1187 fi = btrfs_item_ptr(leaf, path->slots[0],
1188 struct btrfs_file_extent_item);
1189 btrfs_set_file_extent_generation(leaf, fi,
1190 trans->transid);
1191 btrfs_set_file_extent_num_bytes(leaf, fi,
1192 other_end - start);
1193 btrfs_set_file_extent_offset(leaf, fi,
1194 start - orig_offset);
1195 btrfs_mark_buffer_dirty(leaf);
1196 goto out;
1197 }
1198 }
1199
1200 while (start > key.offset || end < extent_end) {
1201 if (key.offset == start)
1202 split = end;
1203
1204 new_key.offset = split;
1205 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1206 if (ret == -EAGAIN) {
1207 btrfs_release_path(path);
1208 goto again;
1209 }
1210 if (ret < 0) {
1211 btrfs_abort_transaction(trans, ret);
1212 goto out;
1213 }
1214
1215 leaf = path->nodes[0];
1216 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1217 struct btrfs_file_extent_item);
1218 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1219 btrfs_set_file_extent_num_bytes(leaf, fi,
1220 split - key.offset);
1221
1222 fi = btrfs_item_ptr(leaf, path->slots[0],
1223 struct btrfs_file_extent_item);
1224
1225 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1226 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1227 btrfs_set_file_extent_num_bytes(leaf, fi,
1228 extent_end - split);
1229 btrfs_mark_buffer_dirty(leaf);
1230
1231 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
1232 num_bytes, 0);
1233 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
1234 orig_offset);
1235 ret = btrfs_inc_extent_ref(trans, &ref);
1236 if (ret) {
1237 btrfs_abort_transaction(trans, ret);
1238 goto out;
1239 }
1240
1241 if (split == start) {
1242 key.offset = start;
1243 } else {
1244 if (start != key.offset) {
1245 ret = -EINVAL;
1246 btrfs_abort_transaction(trans, ret);
1247 goto out;
1248 }
1249 path->slots[0]--;
1250 extent_end = end;
1251 }
1252 recow = 1;
1253 }
1254
1255 other_start = end;
1256 other_end = 0;
1257 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1258 num_bytes, 0);
1259 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
1260 if (extent_mergeable(leaf, path->slots[0] + 1,
1261 ino, bytenr, orig_offset,
1262 &other_start, &other_end)) {
1263 if (recow) {
1264 btrfs_release_path(path);
1265 goto again;
1266 }
1267 extent_end = other_end;
1268 del_slot = path->slots[0] + 1;
1269 del_nr++;
1270 ret = btrfs_free_extent(trans, &ref);
1271 if (ret) {
1272 btrfs_abort_transaction(trans, ret);
1273 goto out;
1274 }
1275 }
1276 other_start = 0;
1277 other_end = start;
1278 if (extent_mergeable(leaf, path->slots[0] - 1,
1279 ino, bytenr, orig_offset,
1280 &other_start, &other_end)) {
1281 if (recow) {
1282 btrfs_release_path(path);
1283 goto again;
1284 }
1285 key.offset = other_start;
1286 del_slot = path->slots[0];
1287 del_nr++;
1288 ret = btrfs_free_extent(trans, &ref);
1289 if (ret) {
1290 btrfs_abort_transaction(trans, ret);
1291 goto out;
1292 }
1293 }
1294 if (del_nr == 0) {
1295 fi = btrfs_item_ptr(leaf, path->slots[0],
1296 struct btrfs_file_extent_item);
1297 btrfs_set_file_extent_type(leaf, fi,
1298 BTRFS_FILE_EXTENT_REG);
1299 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1300 btrfs_mark_buffer_dirty(leaf);
1301 } else {
1302 fi = btrfs_item_ptr(leaf, del_slot - 1,
1303 struct btrfs_file_extent_item);
1304 btrfs_set_file_extent_type(leaf, fi,
1305 BTRFS_FILE_EXTENT_REG);
1306 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1307 btrfs_set_file_extent_num_bytes(leaf, fi,
1308 extent_end - key.offset);
1309 btrfs_mark_buffer_dirty(leaf);
1310
1311 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1312 if (ret < 0) {
1313 btrfs_abort_transaction(trans, ret);
1314 goto out;
1315 }
1316 }
1317out:
1318 btrfs_free_path(path);
1319 return ret;
1320}
1321
1322/*
1323 * on error we return an unlocked page and the error value
1324 * on success we return a locked page and 0
1325 */
1326static int prepare_uptodate_page(struct inode *inode,
1327 struct page *page, u64 pos,
1328 bool force_uptodate)
1329{
1330 int ret = 0;
1331
1332 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1333 !PageUptodate(page)) {
1334 ret = btrfs_readpage(NULL, page);
1335 if (ret)
1336 return ret;
1337 lock_page(page);
1338 if (!PageUptodate(page)) {
1339 unlock_page(page);
1340 return -EIO;
1341 }
1342 if (page->mapping != inode->i_mapping) {
1343 unlock_page(page);
1344 return -EAGAIN;
1345 }
1346 }
1347 return 0;
1348}
1349
1350/*
1351 * this just gets pages into the page cache and locks them down.
1352 */
1353static noinline int prepare_pages(struct inode *inode, struct page **pages,
1354 size_t num_pages, loff_t pos,
1355 size_t write_bytes, bool force_uptodate)
1356{
1357 int i;
1358 unsigned long index = pos >> PAGE_SHIFT;
1359 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1360 int err = 0;
1361 int faili;
1362
1363 for (i = 0; i < num_pages; i++) {
1364again:
1365 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1366 mask | __GFP_WRITE);
1367 if (!pages[i]) {
1368 faili = i - 1;
1369 err = -ENOMEM;
1370 goto fail;
1371 }
1372
1373 err = set_page_extent_mapped(pages[i]);
1374 if (err < 0) {
1375 faili = i;
1376 goto fail;
1377 }
1378
1379 if (i == 0)
1380 err = prepare_uptodate_page(inode, pages[i], pos,
1381 force_uptodate);
1382 if (!err && i == num_pages - 1)
1383 err = prepare_uptodate_page(inode, pages[i],
1384 pos + write_bytes, false);
1385 if (err) {
1386 put_page(pages[i]);
1387 if (err == -EAGAIN) {
1388 err = 0;
1389 goto again;
1390 }
1391 faili = i - 1;
1392 goto fail;
1393 }
1394 wait_on_page_writeback(pages[i]);
1395 }
1396
1397 return 0;
1398fail:
1399 while (faili >= 0) {
1400 unlock_page(pages[faili]);
1401 put_page(pages[faili]);
1402 faili--;
1403 }
1404 return err;
1405
1406}
1407
1408/*
1409 * This function locks the extent and properly waits for data=ordered extents
1410 * to finish before allowing the pages to be modified if need.
1411 *
1412 * The return value:
1413 * 1 - the extent is locked
1414 * 0 - the extent is not locked, and everything is OK
1415 * -EAGAIN - need re-prepare the pages
1416 * the other < 0 number - Something wrong happens
1417 */
1418static noinline int
1419lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1420 size_t num_pages, loff_t pos,
1421 size_t write_bytes,
1422 u64 *lockstart, u64 *lockend,
1423 struct extent_state **cached_state)
1424{
1425 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1426 u64 start_pos;
1427 u64 last_pos;
1428 int i;
1429 int ret = 0;
1430
1431 start_pos = round_down(pos, fs_info->sectorsize);
1432 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
1433
1434 if (start_pos < inode->vfs_inode.i_size) {
1435 struct btrfs_ordered_extent *ordered;
1436
1437 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1438 cached_state);
1439 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1440 last_pos - start_pos + 1);
1441 if (ordered &&
1442 ordered->file_offset + ordered->num_bytes > start_pos &&
1443 ordered->file_offset <= last_pos) {
1444 unlock_extent_cached(&inode->io_tree, start_pos,
1445 last_pos, cached_state);
1446 for (i = 0; i < num_pages; i++) {
1447 unlock_page(pages[i]);
1448 put_page(pages[i]);
1449 }
1450 btrfs_start_ordered_extent(ordered, 1);
1451 btrfs_put_ordered_extent(ordered);
1452 return -EAGAIN;
1453 }
1454 if (ordered)
1455 btrfs_put_ordered_extent(ordered);
1456
1457 *lockstart = start_pos;
1458 *lockend = last_pos;
1459 ret = 1;
1460 }
1461
1462 /*
1463 * We should be called after prepare_pages() which should have locked
1464 * all pages in the range.
1465 */
1466 for (i = 0; i < num_pages; i++)
1467 WARN_ON(!PageLocked(pages[i]));
1468
1469 return ret;
1470}
1471
1472static int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1473 size_t *write_bytes, bool nowait)
1474{
1475 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1476 struct btrfs_root *root = inode->root;
1477 u64 lockstart, lockend;
1478 u64 num_bytes;
1479 int ret;
1480
1481 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1482 return 0;
1483
1484 if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
1485 return -EAGAIN;
1486
1487 lockstart = round_down(pos, fs_info->sectorsize);
1488 lockend = round_up(pos + *write_bytes,
1489 fs_info->sectorsize) - 1;
1490 num_bytes = lockend - lockstart + 1;
1491
1492 if (nowait) {
1493 struct btrfs_ordered_extent *ordered;
1494
1495 if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
1496 return -EAGAIN;
1497
1498 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1499 num_bytes);
1500 if (ordered) {
1501 btrfs_put_ordered_extent(ordered);
1502 ret = -EAGAIN;
1503 goto out_unlock;
1504 }
1505 } else {
1506 btrfs_lock_and_flush_ordered_range(inode, lockstart,
1507 lockend, NULL);
1508 }
1509
1510 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1511 NULL, NULL, NULL, false);
1512 if (ret <= 0) {
1513 ret = 0;
1514 if (!nowait)
1515 btrfs_drew_write_unlock(&root->snapshot_lock);
1516 } else {
1517 *write_bytes = min_t(size_t, *write_bytes ,
1518 num_bytes - pos + lockstart);
1519 }
1520out_unlock:
1521 unlock_extent(&inode->io_tree, lockstart, lockend);
1522
1523 return ret;
1524}
1525
1526static int check_nocow_nolock(struct btrfs_inode *inode, loff_t pos,
1527 size_t *write_bytes)
1528{
1529 return check_can_nocow(inode, pos, write_bytes, true);
1530}
1531
1532/*
1533 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1534 *
1535 * @pos: File offset
1536 * @write_bytes: The length to write, will be updated to the nocow writeable
1537 * range
1538 *
1539 * This function will flush ordered extents in the range to ensure proper
1540 * nocow checks.
1541 *
1542 * Return:
1543 * >0 and update @write_bytes if we can do nocow write
1544 * 0 if we can't do nocow write
1545 * -EAGAIN if we can't get the needed lock or there are ordered extents
1546 * for * (nowait == true) case
1547 * <0 if other error happened
1548 *
1549 * NOTE: Callers need to release the lock by btrfs_check_nocow_unlock().
1550 */
1551int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1552 size_t *write_bytes)
1553{
1554 return check_can_nocow(inode, pos, write_bytes, false);
1555}
1556
1557void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1558{
1559 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1560}
1561
1562static void update_time_for_write(struct inode *inode)
1563{
1564 struct timespec64 now;
1565
1566 if (IS_NOCMTIME(inode))
1567 return;
1568
1569 now = current_time(inode);
1570 if (!timespec64_equal(&inode->i_mtime, &now))
1571 inode->i_mtime = now;
1572
1573 if (!timespec64_equal(&inode->i_ctime, &now))
1574 inode->i_ctime = now;
1575
1576 if (IS_I_VERSION(inode))
1577 inode_inc_iversion(inode);
1578}
1579
1580static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1581 size_t count)
1582{
1583 struct file *file = iocb->ki_filp;
1584 struct inode *inode = file_inode(file);
1585 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1586 loff_t pos = iocb->ki_pos;
1587 int ret;
1588 loff_t oldsize;
1589 loff_t start_pos;
1590
1591 if (iocb->ki_flags & IOCB_NOWAIT) {
1592 size_t nocow_bytes = count;
1593
1594 /* We will allocate space in case nodatacow is not set, so bail */
1595 if (check_nocow_nolock(BTRFS_I(inode), pos, &nocow_bytes) <= 0)
1596 return -EAGAIN;
1597 /*
1598 * There are holes in the range or parts of the range that must
1599 * be COWed (shared extents, RO block groups, etc), so just bail
1600 * out.
1601 */
1602 if (nocow_bytes < count)
1603 return -EAGAIN;
1604 }
1605
1606 current->backing_dev_info = inode_to_bdi(inode);
1607 ret = file_remove_privs(file);
1608 if (ret)
1609 return ret;
1610
1611 /*
1612 * We reserve space for updating the inode when we reserve space for the
1613 * extent we are going to write, so we will enospc out there. We don't
1614 * need to start yet another transaction to update the inode as we will
1615 * update the inode when we finish writing whatever data we write.
1616 */
1617 update_time_for_write(inode);
1618
1619 start_pos = round_down(pos, fs_info->sectorsize);
1620 oldsize = i_size_read(inode);
1621 if (start_pos > oldsize) {
1622 /* Expand hole size to cover write data, preventing empty gap */
1623 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1624
1625 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1626 if (ret) {
1627 current->backing_dev_info = NULL;
1628 return ret;
1629 }
1630 }
1631
1632 return 0;
1633}
1634
1635static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1636 struct iov_iter *i)
1637{
1638 struct file *file = iocb->ki_filp;
1639 loff_t pos;
1640 struct inode *inode = file_inode(file);
1641 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1642 struct page **pages = NULL;
1643 struct extent_changeset *data_reserved = NULL;
1644 u64 release_bytes = 0;
1645 u64 lockstart;
1646 u64 lockend;
1647 size_t num_written = 0;
1648 int nrptrs;
1649 ssize_t ret;
1650 bool only_release_metadata = false;
1651 bool force_page_uptodate = false;
1652 loff_t old_isize = i_size_read(inode);
1653 unsigned int ilock_flags = 0;
1654
1655 if (iocb->ki_flags & IOCB_NOWAIT)
1656 ilock_flags |= BTRFS_ILOCK_TRY;
1657
1658 ret = btrfs_inode_lock(inode, ilock_flags);
1659 if (ret < 0)
1660 return ret;
1661
1662 ret = generic_write_checks(iocb, i);
1663 if (ret <= 0)
1664 goto out;
1665
1666 ret = btrfs_write_check(iocb, i, ret);
1667 if (ret < 0)
1668 goto out;
1669
1670 pos = iocb->ki_pos;
1671 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1672 PAGE_SIZE / (sizeof(struct page *)));
1673 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1674 nrptrs = max(nrptrs, 8);
1675 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1676 if (!pages) {
1677 ret = -ENOMEM;
1678 goto out;
1679 }
1680
1681 while (iov_iter_count(i) > 0) {
1682 struct extent_state *cached_state = NULL;
1683 size_t offset = offset_in_page(pos);
1684 size_t sector_offset;
1685 size_t write_bytes = min(iov_iter_count(i),
1686 nrptrs * (size_t)PAGE_SIZE -
1687 offset);
1688 size_t num_pages;
1689 size_t reserve_bytes;
1690 size_t dirty_pages;
1691 size_t copied;
1692 size_t dirty_sectors;
1693 size_t num_sectors;
1694 int extents_locked;
1695
1696 /*
1697 * Fault pages before locking them in prepare_pages
1698 * to avoid recursive lock
1699 */
1700 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1701 ret = -EFAULT;
1702 break;
1703 }
1704
1705 only_release_metadata = false;
1706 sector_offset = pos & (fs_info->sectorsize - 1);
1707
1708 extent_changeset_release(data_reserved);
1709 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1710 &data_reserved, pos,
1711 write_bytes);
1712 if (ret < 0) {
1713 /*
1714 * If we don't have to COW at the offset, reserve
1715 * metadata only. write_bytes may get smaller than
1716 * requested here.
1717 */
1718 if (btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1719 &write_bytes) > 0)
1720 only_release_metadata = true;
1721 else
1722 break;
1723 }
1724
1725 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1726 WARN_ON(num_pages > nrptrs);
1727 reserve_bytes = round_up(write_bytes + sector_offset,
1728 fs_info->sectorsize);
1729 WARN_ON(reserve_bytes == 0);
1730 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1731 reserve_bytes);
1732 if (ret) {
1733 if (!only_release_metadata)
1734 btrfs_free_reserved_data_space(BTRFS_I(inode),
1735 data_reserved, pos,
1736 write_bytes);
1737 else
1738 btrfs_check_nocow_unlock(BTRFS_I(inode));
1739 break;
1740 }
1741
1742 release_bytes = reserve_bytes;
1743again:
1744 /*
1745 * This is going to setup the pages array with the number of
1746 * pages we want, so we don't really need to worry about the
1747 * contents of pages from loop to loop
1748 */
1749 ret = prepare_pages(inode, pages, num_pages,
1750 pos, write_bytes,
1751 force_page_uptodate);
1752 if (ret) {
1753 btrfs_delalloc_release_extents(BTRFS_I(inode),
1754 reserve_bytes);
1755 break;
1756 }
1757
1758 extents_locked = lock_and_cleanup_extent_if_need(
1759 BTRFS_I(inode), pages,
1760 num_pages, pos, write_bytes, &lockstart,
1761 &lockend, &cached_state);
1762 if (extents_locked < 0) {
1763 if (extents_locked == -EAGAIN)
1764 goto again;
1765 btrfs_delalloc_release_extents(BTRFS_I(inode),
1766 reserve_bytes);
1767 ret = extents_locked;
1768 break;
1769 }
1770
1771 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1772
1773 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1774 dirty_sectors = round_up(copied + sector_offset,
1775 fs_info->sectorsize);
1776 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1777
1778 /*
1779 * if we have trouble faulting in the pages, fall
1780 * back to one page at a time
1781 */
1782 if (copied < write_bytes)
1783 nrptrs = 1;
1784
1785 if (copied == 0) {
1786 force_page_uptodate = true;
1787 dirty_sectors = 0;
1788 dirty_pages = 0;
1789 } else {
1790 force_page_uptodate = false;
1791 dirty_pages = DIV_ROUND_UP(copied + offset,
1792 PAGE_SIZE);
1793 }
1794
1795 if (num_sectors > dirty_sectors) {
1796 /* release everything except the sectors we dirtied */
1797 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1798 if (only_release_metadata) {
1799 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1800 release_bytes, true);
1801 } else {
1802 u64 __pos;
1803
1804 __pos = round_down(pos,
1805 fs_info->sectorsize) +
1806 (dirty_pages << PAGE_SHIFT);
1807 btrfs_delalloc_release_space(BTRFS_I(inode),
1808 data_reserved, __pos,
1809 release_bytes, true);
1810 }
1811 }
1812
1813 release_bytes = round_up(copied + sector_offset,
1814 fs_info->sectorsize);
1815
1816 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1817 dirty_pages, pos, copied,
1818 &cached_state, only_release_metadata);
1819
1820 /*
1821 * If we have not locked the extent range, because the range's
1822 * start offset is >= i_size, we might still have a non-NULL
1823 * cached extent state, acquired while marking the extent range
1824 * as delalloc through btrfs_dirty_pages(). Therefore free any
1825 * possible cached extent state to avoid a memory leak.
1826 */
1827 if (extents_locked)
1828 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1829 lockstart, lockend, &cached_state);
1830 else
1831 free_extent_state(cached_state);
1832
1833 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1834 if (ret) {
1835 btrfs_drop_pages(pages, num_pages);
1836 break;
1837 }
1838
1839 release_bytes = 0;
1840 if (only_release_metadata)
1841 btrfs_check_nocow_unlock(BTRFS_I(inode));
1842
1843 btrfs_drop_pages(pages, num_pages);
1844
1845 cond_resched();
1846
1847 balance_dirty_pages_ratelimited(inode->i_mapping);
1848
1849 pos += copied;
1850 num_written += copied;
1851 }
1852
1853 kfree(pages);
1854
1855 if (release_bytes) {
1856 if (only_release_metadata) {
1857 btrfs_check_nocow_unlock(BTRFS_I(inode));
1858 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1859 release_bytes, true);
1860 } else {
1861 btrfs_delalloc_release_space(BTRFS_I(inode),
1862 data_reserved,
1863 round_down(pos, fs_info->sectorsize),
1864 release_bytes, true);
1865 }
1866 }
1867
1868 extent_changeset_free(data_reserved);
1869 if (num_written > 0) {
1870 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1871 iocb->ki_pos += num_written;
1872 }
1873out:
1874 btrfs_inode_unlock(inode, ilock_flags);
1875 return num_written ? num_written : ret;
1876}
1877
1878static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1879 const struct iov_iter *iter, loff_t offset)
1880{
1881 const u32 blocksize_mask = fs_info->sectorsize - 1;
1882
1883 if (offset & blocksize_mask)
1884 return -EINVAL;
1885
1886 if (iov_iter_alignment(iter) & blocksize_mask)
1887 return -EINVAL;
1888
1889 return 0;
1890}
1891
1892static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1893{
1894 struct file *file = iocb->ki_filp;
1895 struct inode *inode = file_inode(file);
1896 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1897 loff_t pos;
1898 ssize_t written = 0;
1899 ssize_t written_buffered;
1900 loff_t endbyte;
1901 ssize_t err;
1902 unsigned int ilock_flags = 0;
1903 struct iomap_dio *dio = NULL;
1904
1905 if (iocb->ki_flags & IOCB_NOWAIT)
1906 ilock_flags |= BTRFS_ILOCK_TRY;
1907
1908 /* If the write DIO is within EOF, use a shared lock */
1909 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1910 ilock_flags |= BTRFS_ILOCK_SHARED;
1911
1912relock:
1913 err = btrfs_inode_lock(inode, ilock_flags);
1914 if (err < 0)
1915 return err;
1916
1917 err = generic_write_checks(iocb, from);
1918 if (err <= 0) {
1919 btrfs_inode_unlock(inode, ilock_flags);
1920 return err;
1921 }
1922
1923 err = btrfs_write_check(iocb, from, err);
1924 if (err < 0) {
1925 btrfs_inode_unlock(inode, ilock_flags);
1926 goto out;
1927 }
1928
1929 pos = iocb->ki_pos;
1930 /*
1931 * Re-check since file size may have changed just before taking the
1932 * lock or pos may have changed because of O_APPEND in generic_write_check()
1933 */
1934 if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1935 pos + iov_iter_count(from) > i_size_read(inode)) {
1936 btrfs_inode_unlock(inode, ilock_flags);
1937 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1938 goto relock;
1939 }
1940
1941 if (check_direct_IO(fs_info, from, pos)) {
1942 btrfs_inode_unlock(inode, ilock_flags);
1943 goto buffered;
1944 }
1945
1946 dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
1947 0);
1948
1949 btrfs_inode_unlock(inode, ilock_flags);
1950
1951 if (IS_ERR_OR_NULL(dio)) {
1952 err = PTR_ERR_OR_ZERO(dio);
1953 if (err < 0 && err != -ENOTBLK)
1954 goto out;
1955 } else {
1956 written = iomap_dio_complete(dio);
1957 }
1958
1959 if (written < 0 || !iov_iter_count(from)) {
1960 err = written;
1961 goto out;
1962 }
1963
1964buffered:
1965 pos = iocb->ki_pos;
1966 written_buffered = btrfs_buffered_write(iocb, from);
1967 if (written_buffered < 0) {
1968 err = written_buffered;
1969 goto out;
1970 }
1971 /*
1972 * Ensure all data is persisted. We want the next direct IO read to be
1973 * able to read what was just written.
1974 */
1975 endbyte = pos + written_buffered - 1;
1976 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1977 if (err)
1978 goto out;
1979 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1980 if (err)
1981 goto out;
1982 written += written_buffered;
1983 iocb->ki_pos = pos + written_buffered;
1984 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1985 endbyte >> PAGE_SHIFT);
1986out:
1987 return written ? written : err;
1988}
1989
1990static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1991 struct iov_iter *from)
1992{
1993 struct file *file = iocb->ki_filp;
1994 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1995 ssize_t num_written = 0;
1996 const bool sync = iocb->ki_flags & IOCB_DSYNC;
1997
1998 /*
1999 * If the fs flips readonly due to some impossible error, although we
2000 * have opened a file as writable, we have to stop this write operation
2001 * to ensure consistency.
2002 */
2003 if (test_bit(BTRFS_FS_STATE_ERROR, &inode->root->fs_info->fs_state))
2004 return -EROFS;
2005
2006 if (!(iocb->ki_flags & IOCB_DIRECT) &&
2007 (iocb->ki_flags & IOCB_NOWAIT))
2008 return -EOPNOTSUPP;
2009
2010 if (sync)
2011 atomic_inc(&inode->sync_writers);
2012
2013 if (iocb->ki_flags & IOCB_DIRECT)
2014 num_written = btrfs_direct_write(iocb, from);
2015 else
2016 num_written = btrfs_buffered_write(iocb, from);
2017
2018 btrfs_set_inode_last_sub_trans(inode);
2019
2020 if (num_written > 0)
2021 num_written = generic_write_sync(iocb, num_written);
2022
2023 if (sync)
2024 atomic_dec(&inode->sync_writers);
2025
2026 current->backing_dev_info = NULL;
2027 return num_written;
2028}
2029
2030int btrfs_release_file(struct inode *inode, struct file *filp)
2031{
2032 struct btrfs_file_private *private = filp->private_data;
2033
2034 if (private && private->filldir_buf)
2035 kfree(private->filldir_buf);
2036 kfree(private);
2037 filp->private_data = NULL;
2038
2039 /*
2040 * Set by setattr when we are about to truncate a file from a non-zero
2041 * size to a zero size. This tries to flush down new bytes that may
2042 * have been written if the application were using truncate to replace
2043 * a file in place.
2044 */
2045 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
2046 &BTRFS_I(inode)->runtime_flags))
2047 filemap_flush(inode->i_mapping);
2048 return 0;
2049}
2050
2051static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2052{
2053 int ret;
2054 struct blk_plug plug;
2055
2056 /*
2057 * This is only called in fsync, which would do synchronous writes, so
2058 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2059 * multiple disks using raid profile, a large IO can be split to
2060 * several segments of stripe length (currently 64K).
2061 */
2062 blk_start_plug(&plug);
2063 atomic_inc(&BTRFS_I(inode)->sync_writers);
2064 ret = btrfs_fdatawrite_range(inode, start, end);
2065 atomic_dec(&BTRFS_I(inode)->sync_writers);
2066 blk_finish_plug(&plug);
2067
2068 return ret;
2069}
2070
2071static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
2072{
2073 struct btrfs_inode *inode = BTRFS_I(ctx->inode);
2074 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2075
2076 if (btrfs_inode_in_log(inode, fs_info->generation) &&
2077 list_empty(&ctx->ordered_extents))
2078 return true;
2079
2080 /*
2081 * If we are doing a fast fsync we can not bail out if the inode's
2082 * last_trans is <= then the last committed transaction, because we only
2083 * update the last_trans of the inode during ordered extent completion,
2084 * and for a fast fsync we don't wait for that, we only wait for the
2085 * writeback to complete.
2086 */
2087 if (inode->last_trans <= fs_info->last_trans_committed &&
2088 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
2089 list_empty(&ctx->ordered_extents)))
2090 return true;
2091
2092 return false;
2093}
2094
2095/*
2096 * fsync call for both files and directories. This logs the inode into
2097 * the tree log instead of forcing full commits whenever possible.
2098 *
2099 * It needs to call filemap_fdatawait so that all ordered extent updates are
2100 * in the metadata btree are up to date for copying to the log.
2101 *
2102 * It drops the inode mutex before doing the tree log commit. This is an
2103 * important optimization for directories because holding the mutex prevents
2104 * new operations on the dir while we write to disk.
2105 */
2106int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2107{
2108 struct dentry *dentry = file_dentry(file);
2109 struct inode *inode = d_inode(dentry);
2110 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2111 struct btrfs_root *root = BTRFS_I(inode)->root;
2112 struct btrfs_trans_handle *trans;
2113 struct btrfs_log_ctx ctx;
2114 int ret = 0, err;
2115 u64 len;
2116 bool full_sync;
2117
2118 trace_btrfs_sync_file(file, datasync);
2119
2120 btrfs_init_log_ctx(&ctx, inode);
2121
2122 /*
2123 * Always set the range to a full range, otherwise we can get into
2124 * several problems, from missing file extent items to represent holes
2125 * when not using the NO_HOLES feature, to log tree corruption due to
2126 * races between hole detection during logging and completion of ordered
2127 * extents outside the range, to missing checksums due to ordered extents
2128 * for which we flushed only a subset of their pages.
2129 */
2130 start = 0;
2131 end = LLONG_MAX;
2132 len = (u64)LLONG_MAX + 1;
2133
2134 /*
2135 * We write the dirty pages in the range and wait until they complete
2136 * out of the ->i_mutex. If so, we can flush the dirty pages by
2137 * multi-task, and make the performance up. See
2138 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2139 */
2140 ret = start_ordered_ops(inode, start, end);
2141 if (ret)
2142 goto out;
2143
2144 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2145
2146 atomic_inc(&root->log_batch);
2147
2148 /*
2149 * Always check for the full sync flag while holding the inode's lock,
2150 * to avoid races with other tasks. The flag must be either set all the
2151 * time during logging or always off all the time while logging.
2152 */
2153 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2154 &BTRFS_I(inode)->runtime_flags);
2155
2156 /*
2157 * Before we acquired the inode's lock and the mmap lock, someone may
2158 * have dirtied more pages in the target range. We need to make sure
2159 * that writeback for any such pages does not start while we are logging
2160 * the inode, because if it does, any of the following might happen when
2161 * we are not doing a full inode sync:
2162 *
2163 * 1) We log an extent after its writeback finishes but before its
2164 * checksums are added to the csum tree, leading to -EIO errors
2165 * when attempting to read the extent after a log replay.
2166 *
2167 * 2) We can end up logging an extent before its writeback finishes.
2168 * Therefore after the log replay we will have a file extent item
2169 * pointing to an unwritten extent (and no data checksums as well).
2170 *
2171 * So trigger writeback for any eventual new dirty pages and then we
2172 * wait for all ordered extents to complete below.
2173 */
2174 ret = start_ordered_ops(inode, start, end);
2175 if (ret) {
2176 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2177 goto out;
2178 }
2179
2180 /*
2181 * We have to do this here to avoid the priority inversion of waiting on
2182 * IO of a lower priority task while holding a transaction open.
2183 *
2184 * For a full fsync we wait for the ordered extents to complete while
2185 * for a fast fsync we wait just for writeback to complete, and then
2186 * attach the ordered extents to the transaction so that a transaction
2187 * commit waits for their completion, to avoid data loss if we fsync,
2188 * the current transaction commits before the ordered extents complete
2189 * and a power failure happens right after that.
2190 *
2191 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
2192 * logical address recorded in the ordered extent may change. We need
2193 * to wait for the IO to stabilize the logical address.
2194 */
2195 if (full_sync || btrfs_is_zoned(fs_info)) {
2196 ret = btrfs_wait_ordered_range(inode, start, len);
2197 } else {
2198 /*
2199 * Get our ordered extents as soon as possible to avoid doing
2200 * checksum lookups in the csum tree, and use instead the
2201 * checksums attached to the ordered extents.
2202 */
2203 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
2204 &ctx.ordered_extents);
2205 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
2206 }
2207
2208 if (ret)
2209 goto out_release_extents;
2210
2211 atomic_inc(&root->log_batch);
2212
2213 smp_mb();
2214 if (skip_inode_logging(&ctx)) {
2215 /*
2216 * We've had everything committed since the last time we were
2217 * modified so clear this flag in case it was set for whatever
2218 * reason, it's no longer relevant.
2219 */
2220 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2221 &BTRFS_I(inode)->runtime_flags);
2222 /*
2223 * An ordered extent might have started before and completed
2224 * already with io errors, in which case the inode was not
2225 * updated and we end up here. So check the inode's mapping
2226 * for any errors that might have happened since we last
2227 * checked called fsync.
2228 */
2229 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2230 goto out_release_extents;
2231 }
2232
2233 /*
2234 * We use start here because we will need to wait on the IO to complete
2235 * in btrfs_sync_log, which could require joining a transaction (for
2236 * example checking cross references in the nocow path). If we use join
2237 * here we could get into a situation where we're waiting on IO to
2238 * happen that is blocked on a transaction trying to commit. With start
2239 * we inc the extwriter counter, so we wait for all extwriters to exit
2240 * before we start blocking joiners. This comment is to keep somebody
2241 * from thinking they are super smart and changing this to
2242 * btrfs_join_transaction *cough*Josef*cough*.
2243 */
2244 trans = btrfs_start_transaction(root, 0);
2245 if (IS_ERR(trans)) {
2246 ret = PTR_ERR(trans);
2247 goto out_release_extents;
2248 }
2249 trans->in_fsync = true;
2250
2251 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
2252 btrfs_release_log_ctx_extents(&ctx);
2253 if (ret < 0) {
2254 /* Fallthrough and commit/free transaction. */
2255 ret = 1;
2256 }
2257
2258 /* we've logged all the items and now have a consistent
2259 * version of the file in the log. It is possible that
2260 * someone will come in and modify the file, but that's
2261 * fine because the log is consistent on disk, and we
2262 * have references to all of the file's extents
2263 *
2264 * It is possible that someone will come in and log the
2265 * file again, but that will end up using the synchronization
2266 * inside btrfs_sync_log to keep things safe.
2267 */
2268 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2269
2270 if (ret != BTRFS_NO_LOG_SYNC) {
2271 if (!ret) {
2272 ret = btrfs_sync_log(trans, root, &ctx);
2273 if (!ret) {
2274 ret = btrfs_end_transaction(trans);
2275 goto out;
2276 }
2277 }
2278 if (!full_sync) {
2279 ret = btrfs_wait_ordered_range(inode, start, len);
2280 if (ret) {
2281 btrfs_end_transaction(trans);
2282 goto out;
2283 }
2284 }
2285 ret = btrfs_commit_transaction(trans);
2286 } else {
2287 ret = btrfs_end_transaction(trans);
2288 }
2289out:
2290 ASSERT(list_empty(&ctx.list));
2291 err = file_check_and_advance_wb_err(file);
2292 if (!ret)
2293 ret = err;
2294 return ret > 0 ? -EIO : ret;
2295
2296out_release_extents:
2297 btrfs_release_log_ctx_extents(&ctx);
2298 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2299 goto out;
2300}
2301
2302static const struct vm_operations_struct btrfs_file_vm_ops = {
2303 .fault = filemap_fault,
2304 .map_pages = filemap_map_pages,
2305 .page_mkwrite = btrfs_page_mkwrite,
2306};
2307
2308static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2309{
2310 struct address_space *mapping = filp->f_mapping;
2311
2312 if (!mapping->a_ops->readpage)
2313 return -ENOEXEC;
2314
2315 file_accessed(filp);
2316 vma->vm_ops = &btrfs_file_vm_ops;
2317
2318 return 0;
2319}
2320
2321static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2322 int slot, u64 start, u64 end)
2323{
2324 struct btrfs_file_extent_item *fi;
2325 struct btrfs_key key;
2326
2327 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2328 return 0;
2329
2330 btrfs_item_key_to_cpu(leaf, &key, slot);
2331 if (key.objectid != btrfs_ino(inode) ||
2332 key.type != BTRFS_EXTENT_DATA_KEY)
2333 return 0;
2334
2335 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2336
2337 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2338 return 0;
2339
2340 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2341 return 0;
2342
2343 if (key.offset == end)
2344 return 1;
2345 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2346 return 1;
2347 return 0;
2348}
2349
2350static int fill_holes(struct btrfs_trans_handle *trans,
2351 struct btrfs_inode *inode,
2352 struct btrfs_path *path, u64 offset, u64 end)
2353{
2354 struct btrfs_fs_info *fs_info = trans->fs_info;
2355 struct btrfs_root *root = inode->root;
2356 struct extent_buffer *leaf;
2357 struct btrfs_file_extent_item *fi;
2358 struct extent_map *hole_em;
2359 struct extent_map_tree *em_tree = &inode->extent_tree;
2360 struct btrfs_key key;
2361 int ret;
2362
2363 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2364 goto out;
2365
2366 key.objectid = btrfs_ino(inode);
2367 key.type = BTRFS_EXTENT_DATA_KEY;
2368 key.offset = offset;
2369
2370 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2371 if (ret <= 0) {
2372 /*
2373 * We should have dropped this offset, so if we find it then
2374 * something has gone horribly wrong.
2375 */
2376 if (ret == 0)
2377 ret = -EINVAL;
2378 return ret;
2379 }
2380
2381 leaf = path->nodes[0];
2382 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2383 u64 num_bytes;
2384
2385 path->slots[0]--;
2386 fi = btrfs_item_ptr(leaf, path->slots[0],
2387 struct btrfs_file_extent_item);
2388 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2389 end - offset;
2390 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2391 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2392 btrfs_set_file_extent_offset(leaf, fi, 0);
2393 btrfs_mark_buffer_dirty(leaf);
2394 goto out;
2395 }
2396
2397 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2398 u64 num_bytes;
2399
2400 key.offset = offset;
2401 btrfs_set_item_key_safe(fs_info, path, &key);
2402 fi = btrfs_item_ptr(leaf, path->slots[0],
2403 struct btrfs_file_extent_item);
2404 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2405 offset;
2406 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2407 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2408 btrfs_set_file_extent_offset(leaf, fi, 0);
2409 btrfs_mark_buffer_dirty(leaf);
2410 goto out;
2411 }
2412 btrfs_release_path(path);
2413
2414 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2415 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2416 if (ret)
2417 return ret;
2418
2419out:
2420 btrfs_release_path(path);
2421
2422 hole_em = alloc_extent_map();
2423 if (!hole_em) {
2424 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2425 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2426 } else {
2427 hole_em->start = offset;
2428 hole_em->len = end - offset;
2429 hole_em->ram_bytes = hole_em->len;
2430 hole_em->orig_start = offset;
2431
2432 hole_em->block_start = EXTENT_MAP_HOLE;
2433 hole_em->block_len = 0;
2434 hole_em->orig_block_len = 0;
2435 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2436 hole_em->generation = trans->transid;
2437
2438 do {
2439 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2440 write_lock(&em_tree->lock);
2441 ret = add_extent_mapping(em_tree, hole_em, 1);
2442 write_unlock(&em_tree->lock);
2443 } while (ret == -EEXIST);
2444 free_extent_map(hole_em);
2445 if (ret)
2446 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2447 &inode->runtime_flags);
2448 }
2449
2450 return 0;
2451}
2452
2453/*
2454 * Find a hole extent on given inode and change start/len to the end of hole
2455 * extent.(hole/vacuum extent whose em->start <= start &&
2456 * em->start + em->len > start)
2457 * When a hole extent is found, return 1 and modify start/len.
2458 */
2459static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2460{
2461 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2462 struct extent_map *em;
2463 int ret = 0;
2464
2465 em = btrfs_get_extent(inode, NULL, 0,
2466 round_down(*start, fs_info->sectorsize),
2467 round_up(*len, fs_info->sectorsize));
2468 if (IS_ERR(em))
2469 return PTR_ERR(em);
2470
2471 /* Hole or vacuum extent(only exists in no-hole mode) */
2472 if (em->block_start == EXTENT_MAP_HOLE) {
2473 ret = 1;
2474 *len = em->start + em->len > *start + *len ?
2475 0 : *start + *len - em->start - em->len;
2476 *start = em->start + em->len;
2477 }
2478 free_extent_map(em);
2479 return ret;
2480}
2481
2482static int btrfs_punch_hole_lock_range(struct inode *inode,
2483 const u64 lockstart,
2484 const u64 lockend,
2485 struct extent_state **cached_state)
2486{
2487 /*
2488 * For subpage case, if the range is not at page boundary, we could
2489 * have pages at the leading/tailing part of the range.
2490 * This could lead to dead loop since filemap_range_has_page()
2491 * will always return true.
2492 * So here we need to do extra page alignment for
2493 * filemap_range_has_page().
2494 */
2495 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2496 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2497
2498 while (1) {
2499 struct btrfs_ordered_extent *ordered;
2500 int ret;
2501
2502 truncate_pagecache_range(inode, lockstart, lockend);
2503
2504 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2505 cached_state);
2506 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
2507 lockend);
2508
2509 /*
2510 * We need to make sure we have no ordered extents in this range
2511 * and nobody raced in and read a page in this range, if we did
2512 * we need to try again.
2513 */
2514 if ((!ordered ||
2515 (ordered->file_offset + ordered->num_bytes <= lockstart ||
2516 ordered->file_offset > lockend)) &&
2517 !filemap_range_has_page(inode->i_mapping,
2518 page_lockstart, page_lockend)) {
2519 if (ordered)
2520 btrfs_put_ordered_extent(ordered);
2521 break;
2522 }
2523 if (ordered)
2524 btrfs_put_ordered_extent(ordered);
2525 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2526 lockend, cached_state);
2527 ret = btrfs_wait_ordered_range(inode, lockstart,
2528 lockend - lockstart + 1);
2529 if (ret)
2530 return ret;
2531 }
2532 return 0;
2533}
2534
2535static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2536 struct btrfs_inode *inode,
2537 struct btrfs_path *path,
2538 struct btrfs_replace_extent_info *extent_info,
2539 const u64 replace_len,
2540 const u64 bytes_to_drop)
2541{
2542 struct btrfs_fs_info *fs_info = trans->fs_info;
2543 struct btrfs_root *root = inode->root;
2544 struct btrfs_file_extent_item *extent;
2545 struct extent_buffer *leaf;
2546 struct btrfs_key key;
2547 int slot;
2548 struct btrfs_ref ref = { 0 };
2549 int ret;
2550
2551 if (replace_len == 0)
2552 return 0;
2553
2554 if (extent_info->disk_offset == 0 &&
2555 btrfs_fs_incompat(fs_info, NO_HOLES)) {
2556 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2557 return 0;
2558 }
2559
2560 key.objectid = btrfs_ino(inode);
2561 key.type = BTRFS_EXTENT_DATA_KEY;
2562 key.offset = extent_info->file_offset;
2563 ret = btrfs_insert_empty_item(trans, root, path, &key,
2564 sizeof(struct btrfs_file_extent_item));
2565 if (ret)
2566 return ret;
2567 leaf = path->nodes[0];
2568 slot = path->slots[0];
2569 write_extent_buffer(leaf, extent_info->extent_buf,
2570 btrfs_item_ptr_offset(leaf, slot),
2571 sizeof(struct btrfs_file_extent_item));
2572 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2573 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2574 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2575 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2576 if (extent_info->is_new_extent)
2577 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2578 btrfs_mark_buffer_dirty(leaf);
2579 btrfs_release_path(path);
2580
2581 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2582 replace_len);
2583 if (ret)
2584 return ret;
2585
2586 /* If it's a hole, nothing more needs to be done. */
2587 if (extent_info->disk_offset == 0) {
2588 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2589 return 0;
2590 }
2591
2592 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2593
2594 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2595 key.objectid = extent_info->disk_offset;
2596 key.type = BTRFS_EXTENT_ITEM_KEY;
2597 key.offset = extent_info->disk_len;
2598 ret = btrfs_alloc_reserved_file_extent(trans, root,
2599 btrfs_ino(inode),
2600 extent_info->file_offset,
2601 extent_info->qgroup_reserved,
2602 &key);
2603 } else {
2604 u64 ref_offset;
2605
2606 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2607 extent_info->disk_offset,
2608 extent_info->disk_len, 0);
2609 ref_offset = extent_info->file_offset - extent_info->data_offset;
2610 btrfs_init_data_ref(&ref, root->root_key.objectid,
2611 btrfs_ino(inode), ref_offset);
2612 ret = btrfs_inc_extent_ref(trans, &ref);
2613 }
2614
2615 extent_info->insertions++;
2616
2617 return ret;
2618}
2619
2620/*
2621 * The respective range must have been previously locked, as well as the inode.
2622 * The end offset is inclusive (last byte of the range).
2623 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2624 * the file range with an extent.
2625 * When not punching a hole, we don't want to end up in a state where we dropped
2626 * extents without inserting a new one, so we must abort the transaction to avoid
2627 * a corruption.
2628 */
2629int btrfs_replace_file_extents(struct btrfs_inode *inode,
2630 struct btrfs_path *path, const u64 start,
2631 const u64 end,
2632 struct btrfs_replace_extent_info *extent_info,
2633 struct btrfs_trans_handle **trans_out)
2634{
2635 struct btrfs_drop_extents_args drop_args = { 0 };
2636 struct btrfs_root *root = inode->root;
2637 struct btrfs_fs_info *fs_info = root->fs_info;
2638 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2639 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2640 struct btrfs_trans_handle *trans = NULL;
2641 struct btrfs_block_rsv *rsv;
2642 unsigned int rsv_count;
2643 u64 cur_offset;
2644 u64 len = end - start;
2645 int ret = 0;
2646
2647 if (end <= start)
2648 return -EINVAL;
2649
2650 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2651 if (!rsv) {
2652 ret = -ENOMEM;
2653 goto out;
2654 }
2655 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2656 rsv->failfast = 1;
2657
2658 /*
2659 * 1 - update the inode
2660 * 1 - removing the extents in the range
2661 * 1 - adding the hole extent if no_holes isn't set or if we are
2662 * replacing the range with a new extent
2663 */
2664 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2665 rsv_count = 3;
2666 else
2667 rsv_count = 2;
2668
2669 trans = btrfs_start_transaction(root, rsv_count);
2670 if (IS_ERR(trans)) {
2671 ret = PTR_ERR(trans);
2672 trans = NULL;
2673 goto out_free;
2674 }
2675
2676 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2677 min_size, false);
2678 BUG_ON(ret);
2679 trans->block_rsv = rsv;
2680
2681 cur_offset = start;
2682 drop_args.path = path;
2683 drop_args.end = end + 1;
2684 drop_args.drop_cache = true;
2685 while (cur_offset < end) {
2686 drop_args.start = cur_offset;
2687 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2688 /* If we are punching a hole decrement the inode's byte count */
2689 if (!extent_info)
2690 btrfs_update_inode_bytes(inode, 0,
2691 drop_args.bytes_found);
2692 if (ret != -ENOSPC) {
2693 /*
2694 * The only time we don't want to abort is if we are
2695 * attempting to clone a partial inline extent, in which
2696 * case we'll get EOPNOTSUPP. However if we aren't
2697 * clone we need to abort no matter what, because if we
2698 * got EOPNOTSUPP via prealloc then we messed up and
2699 * need to abort.
2700 */
2701 if (ret &&
2702 (ret != -EOPNOTSUPP ||
2703 (extent_info && extent_info->is_new_extent)))
2704 btrfs_abort_transaction(trans, ret);
2705 break;
2706 }
2707
2708 trans->block_rsv = &fs_info->trans_block_rsv;
2709
2710 if (!extent_info && cur_offset < drop_args.drop_end &&
2711 cur_offset < ino_size) {
2712 ret = fill_holes(trans, inode, path, cur_offset,
2713 drop_args.drop_end);
2714 if (ret) {
2715 /*
2716 * If we failed then we didn't insert our hole
2717 * entries for the area we dropped, so now the
2718 * fs is corrupted, so we must abort the
2719 * transaction.
2720 */
2721 btrfs_abort_transaction(trans, ret);
2722 break;
2723 }
2724 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2725 /*
2726 * We are past the i_size here, but since we didn't
2727 * insert holes we need to clear the mapped area so we
2728 * know to not set disk_i_size in this area until a new
2729 * file extent is inserted here.
2730 */
2731 ret = btrfs_inode_clear_file_extent_range(inode,
2732 cur_offset,
2733 drop_args.drop_end - cur_offset);
2734 if (ret) {
2735 /*
2736 * We couldn't clear our area, so we could
2737 * presumably adjust up and corrupt the fs, so
2738 * we need to abort.
2739 */
2740 btrfs_abort_transaction(trans, ret);
2741 break;
2742 }
2743 }
2744
2745 if (extent_info &&
2746 drop_args.drop_end > extent_info->file_offset) {
2747 u64 replace_len = drop_args.drop_end -
2748 extent_info->file_offset;
2749
2750 ret = btrfs_insert_replace_extent(trans, inode, path,
2751 extent_info, replace_len,
2752 drop_args.bytes_found);
2753 if (ret) {
2754 btrfs_abort_transaction(trans, ret);
2755 break;
2756 }
2757 extent_info->data_len -= replace_len;
2758 extent_info->data_offset += replace_len;
2759 extent_info->file_offset += replace_len;
2760 }
2761
2762 ret = btrfs_update_inode(trans, root, inode);
2763 if (ret)
2764 break;
2765
2766 btrfs_end_transaction(trans);
2767 btrfs_btree_balance_dirty(fs_info);
2768
2769 trans = btrfs_start_transaction(root, rsv_count);
2770 if (IS_ERR(trans)) {
2771 ret = PTR_ERR(trans);
2772 trans = NULL;
2773 break;
2774 }
2775
2776 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2777 rsv, min_size, false);
2778 BUG_ON(ret); /* shouldn't happen */
2779 trans->block_rsv = rsv;
2780
2781 cur_offset = drop_args.drop_end;
2782 len = end - cur_offset;
2783 if (!extent_info && len) {
2784 ret = find_first_non_hole(inode, &cur_offset, &len);
2785 if (unlikely(ret < 0))
2786 break;
2787 if (ret && !len) {
2788 ret = 0;
2789 break;
2790 }
2791 }
2792 }
2793
2794 /*
2795 * If we were cloning, force the next fsync to be a full one since we
2796 * we replaced (or just dropped in the case of cloning holes when
2797 * NO_HOLES is enabled) file extent items and did not setup new extent
2798 * maps for the replacement extents (or holes).
2799 */
2800 if (extent_info && !extent_info->is_new_extent)
2801 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2802
2803 if (ret)
2804 goto out_trans;
2805
2806 trans->block_rsv = &fs_info->trans_block_rsv;
2807 /*
2808 * If we are using the NO_HOLES feature we might have had already an
2809 * hole that overlaps a part of the region [lockstart, lockend] and
2810 * ends at (or beyond) lockend. Since we have no file extent items to
2811 * represent holes, drop_end can be less than lockend and so we must
2812 * make sure we have an extent map representing the existing hole (the
2813 * call to __btrfs_drop_extents() might have dropped the existing extent
2814 * map representing the existing hole), otherwise the fast fsync path
2815 * will not record the existence of the hole region
2816 * [existing_hole_start, lockend].
2817 */
2818 if (drop_args.drop_end <= end)
2819 drop_args.drop_end = end + 1;
2820 /*
2821 * Don't insert file hole extent item if it's for a range beyond eof
2822 * (because it's useless) or if it represents a 0 bytes range (when
2823 * cur_offset == drop_end).
2824 */
2825 if (!extent_info && cur_offset < ino_size &&
2826 cur_offset < drop_args.drop_end) {
2827 ret = fill_holes(trans, inode, path, cur_offset,
2828 drop_args.drop_end);
2829 if (ret) {
2830 /* Same comment as above. */
2831 btrfs_abort_transaction(trans, ret);
2832 goto out_trans;
2833 }
2834 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2835 /* See the comment in the loop above for the reasoning here. */
2836 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2837 drop_args.drop_end - cur_offset);
2838 if (ret) {
2839 btrfs_abort_transaction(trans, ret);
2840 goto out_trans;
2841 }
2842
2843 }
2844 if (extent_info) {
2845 ret = btrfs_insert_replace_extent(trans, inode, path,
2846 extent_info, extent_info->data_len,
2847 drop_args.bytes_found);
2848 if (ret) {
2849 btrfs_abort_transaction(trans, ret);
2850 goto out_trans;
2851 }
2852 }
2853
2854out_trans:
2855 if (!trans)
2856 goto out_free;
2857
2858 trans->block_rsv = &fs_info->trans_block_rsv;
2859 if (ret)
2860 btrfs_end_transaction(trans);
2861 else
2862 *trans_out = trans;
2863out_free:
2864 btrfs_free_block_rsv(fs_info, rsv);
2865out:
2866 return ret;
2867}
2868
2869static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2870{
2871 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2872 struct btrfs_root *root = BTRFS_I(inode)->root;
2873 struct extent_state *cached_state = NULL;
2874 struct btrfs_path *path;
2875 struct btrfs_trans_handle *trans = NULL;
2876 u64 lockstart;
2877 u64 lockend;
2878 u64 tail_start;
2879 u64 tail_len;
2880 u64 orig_start = offset;
2881 int ret = 0;
2882 bool same_block;
2883 u64 ino_size;
2884 bool truncated_block = false;
2885 bool updated_inode = false;
2886
2887 ret = btrfs_wait_ordered_range(inode, offset, len);
2888 if (ret)
2889 return ret;
2890
2891 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
2892 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2893 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2894 if (ret < 0)
2895 goto out_only_mutex;
2896 if (ret && !len) {
2897 /* Already in a large hole */
2898 ret = 0;
2899 goto out_only_mutex;
2900 }
2901
2902 lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
2903 lockend = round_down(offset + len,
2904 btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
2905 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2906 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2907 /*
2908 * We needn't truncate any block which is beyond the end of the file
2909 * because we are sure there is no data there.
2910 */
2911 /*
2912 * Only do this if we are in the same block and we aren't doing the
2913 * entire block.
2914 */
2915 if (same_block && len < fs_info->sectorsize) {
2916 if (offset < ino_size) {
2917 truncated_block = true;
2918 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2919 0);
2920 } else {
2921 ret = 0;
2922 }
2923 goto out_only_mutex;
2924 }
2925
2926 /* zero back part of the first block */
2927 if (offset < ino_size) {
2928 truncated_block = true;
2929 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2930 if (ret) {
2931 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
2932 return ret;
2933 }
2934 }
2935
2936 /* Check the aligned pages after the first unaligned page,
2937 * if offset != orig_start, which means the first unaligned page
2938 * including several following pages are already in holes,
2939 * the extra check can be skipped */
2940 if (offset == orig_start) {
2941 /* after truncate page, check hole again */
2942 len = offset + len - lockstart;
2943 offset = lockstart;
2944 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2945 if (ret < 0)
2946 goto out_only_mutex;
2947 if (ret && !len) {
2948 ret = 0;
2949 goto out_only_mutex;
2950 }
2951 lockstart = offset;
2952 }
2953
2954 /* Check the tail unaligned part is in a hole */
2955 tail_start = lockend + 1;
2956 tail_len = offset + len - tail_start;
2957 if (tail_len) {
2958 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2959 if (unlikely(ret < 0))
2960 goto out_only_mutex;
2961 if (!ret) {
2962 /* zero the front end of the last page */
2963 if (tail_start + tail_len < ino_size) {
2964 truncated_block = true;
2965 ret = btrfs_truncate_block(BTRFS_I(inode),
2966 tail_start + tail_len,
2967 0, 1);
2968 if (ret)
2969 goto out_only_mutex;
2970 }
2971 }
2972 }
2973
2974 if (lockend < lockstart) {
2975 ret = 0;
2976 goto out_only_mutex;
2977 }
2978
2979 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2980 &cached_state);
2981 if (ret)
2982 goto out_only_mutex;
2983
2984 path = btrfs_alloc_path();
2985 if (!path) {
2986 ret = -ENOMEM;
2987 goto out;
2988 }
2989
2990 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2991 lockend, NULL, &trans);
2992 btrfs_free_path(path);
2993 if (ret)
2994 goto out;
2995
2996 ASSERT(trans != NULL);
2997 inode_inc_iversion(inode);
2998 inode->i_mtime = inode->i_ctime = current_time(inode);
2999 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3000 updated_inode = true;
3001 btrfs_end_transaction(trans);
3002 btrfs_btree_balance_dirty(fs_info);
3003out:
3004 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3005 &cached_state);
3006out_only_mutex:
3007 if (!updated_inode && truncated_block && !ret) {
3008 /*
3009 * If we only end up zeroing part of a page, we still need to
3010 * update the inode item, so that all the time fields are
3011 * updated as well as the necessary btrfs inode in memory fields
3012 * for detecting, at fsync time, if the inode isn't yet in the
3013 * log tree or it's there but not up to date.
3014 */
3015 struct timespec64 now = current_time(inode);
3016
3017 inode_inc_iversion(inode);
3018 inode->i_mtime = now;
3019 inode->i_ctime = now;
3020 trans = btrfs_start_transaction(root, 1);
3021 if (IS_ERR(trans)) {
3022 ret = PTR_ERR(trans);
3023 } else {
3024 int ret2;
3025
3026 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3027 ret2 = btrfs_end_transaction(trans);
3028 if (!ret)
3029 ret = ret2;
3030 }
3031 }
3032 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3033 return ret;
3034}
3035
3036/* Helper structure to record which range is already reserved */
3037struct falloc_range {
3038 struct list_head list;
3039 u64 start;
3040 u64 len;
3041};
3042
3043/*
3044 * Helper function to add falloc range
3045 *
3046 * Caller should have locked the larger range of extent containing
3047 * [start, len)
3048 */
3049static int add_falloc_range(struct list_head *head, u64 start, u64 len)
3050{
3051 struct falloc_range *range = NULL;
3052
3053 if (!list_empty(head)) {
3054 /*
3055 * As fallocate iterates by bytenr order, we only need to check
3056 * the last range.
3057 */
3058 range = list_last_entry(head, struct falloc_range, list);
3059 if (range->start + range->len == start) {
3060 range->len += len;
3061 return 0;
3062 }
3063 }
3064
3065 range = kmalloc(sizeof(*range), GFP_KERNEL);
3066 if (!range)
3067 return -ENOMEM;
3068 range->start = start;
3069 range->len = len;
3070 list_add_tail(&range->list, head);
3071 return 0;
3072}
3073
3074static int btrfs_fallocate_update_isize(struct inode *inode,
3075 const u64 end,
3076 const int mode)
3077{
3078 struct btrfs_trans_handle *trans;
3079 struct btrfs_root *root = BTRFS_I(inode)->root;
3080 int ret;
3081 int ret2;
3082
3083 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
3084 return 0;
3085
3086 trans = btrfs_start_transaction(root, 1);
3087 if (IS_ERR(trans))
3088 return PTR_ERR(trans);
3089
3090 inode->i_ctime = current_time(inode);
3091 i_size_write(inode, end);
3092 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
3093 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
3094 ret2 = btrfs_end_transaction(trans);
3095
3096 return ret ? ret : ret2;
3097}
3098
3099enum {
3100 RANGE_BOUNDARY_WRITTEN_EXTENT,
3101 RANGE_BOUNDARY_PREALLOC_EXTENT,
3102 RANGE_BOUNDARY_HOLE,
3103};
3104
3105static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
3106 u64 offset)
3107{
3108 const u64 sectorsize = btrfs_inode_sectorsize(inode);
3109 struct extent_map *em;
3110 int ret;
3111
3112 offset = round_down(offset, sectorsize);
3113 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
3114 if (IS_ERR(em))
3115 return PTR_ERR(em);
3116
3117 if (em->block_start == EXTENT_MAP_HOLE)
3118 ret = RANGE_BOUNDARY_HOLE;
3119 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3120 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
3121 else
3122 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
3123
3124 free_extent_map(em);
3125 return ret;
3126}
3127
3128static int btrfs_zero_range(struct inode *inode,
3129 loff_t offset,
3130 loff_t len,
3131 const int mode)
3132{
3133 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3134 struct extent_map *em;
3135 struct extent_changeset *data_reserved = NULL;
3136 int ret;
3137 u64 alloc_hint = 0;
3138 const u64 sectorsize = btrfs_inode_sectorsize(BTRFS_I(inode));
3139 u64 alloc_start = round_down(offset, sectorsize);
3140 u64 alloc_end = round_up(offset + len, sectorsize);
3141 u64 bytes_to_reserve = 0;
3142 bool space_reserved = false;
3143
3144 inode_dio_wait(inode);
3145
3146 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3147 alloc_end - alloc_start);
3148 if (IS_ERR(em)) {
3149 ret = PTR_ERR(em);
3150 goto out;
3151 }
3152
3153 /*
3154 * Avoid hole punching and extent allocation for some cases. More cases
3155 * could be considered, but these are unlikely common and we keep things
3156 * as simple as possible for now. Also, intentionally, if the target
3157 * range contains one or more prealloc extents together with regular
3158 * extents and holes, we drop all the existing extents and allocate a
3159 * new prealloc extent, so that we get a larger contiguous disk extent.
3160 */
3161 if (em->start <= alloc_start &&
3162 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3163 const u64 em_end = em->start + em->len;
3164
3165 if (em_end >= offset + len) {
3166 /*
3167 * The whole range is already a prealloc extent,
3168 * do nothing except updating the inode's i_size if
3169 * needed.
3170 */
3171 free_extent_map(em);
3172 ret = btrfs_fallocate_update_isize(inode, offset + len,
3173 mode);
3174 goto out;
3175 }
3176 /*
3177 * Part of the range is already a prealloc extent, so operate
3178 * only on the remaining part of the range.
3179 */
3180 alloc_start = em_end;
3181 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
3182 len = offset + len - alloc_start;
3183 offset = alloc_start;
3184 alloc_hint = em->block_start + em->len;
3185 }
3186 free_extent_map(em);
3187
3188 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
3189 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
3190 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
3191 sectorsize);
3192 if (IS_ERR(em)) {
3193 ret = PTR_ERR(em);
3194 goto out;
3195 }
3196
3197 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
3198 free_extent_map(em);
3199 ret = btrfs_fallocate_update_isize(inode, offset + len,
3200 mode);
3201 goto out;
3202 }
3203 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
3204 free_extent_map(em);
3205 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
3206 0);
3207 if (!ret)
3208 ret = btrfs_fallocate_update_isize(inode,
3209 offset + len,
3210 mode);
3211 return ret;
3212 }
3213 free_extent_map(em);
3214 alloc_start = round_down(offset, sectorsize);
3215 alloc_end = alloc_start + sectorsize;
3216 goto reserve_space;
3217 }
3218
3219 alloc_start = round_up(offset, sectorsize);
3220 alloc_end = round_down(offset + len, sectorsize);
3221
3222 /*
3223 * For unaligned ranges, check the pages at the boundaries, they might
3224 * map to an extent, in which case we need to partially zero them, or
3225 * they might map to a hole, in which case we need our allocation range
3226 * to cover them.
3227 */
3228 if (!IS_ALIGNED(offset, sectorsize)) {
3229 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3230 offset);
3231 if (ret < 0)
3232 goto out;
3233 if (ret == RANGE_BOUNDARY_HOLE) {
3234 alloc_start = round_down(offset, sectorsize);
3235 ret = 0;
3236 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3237 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
3238 if (ret)
3239 goto out;
3240 } else {
3241 ret = 0;
3242 }
3243 }
3244
3245 if (!IS_ALIGNED(offset + len, sectorsize)) {
3246 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
3247 offset + len);
3248 if (ret < 0)
3249 goto out;
3250 if (ret == RANGE_BOUNDARY_HOLE) {
3251 alloc_end = round_up(offset + len, sectorsize);
3252 ret = 0;
3253 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3254 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3255 0, 1);
3256 if (ret)
3257 goto out;
3258 } else {
3259 ret = 0;
3260 }
3261 }
3262
3263reserve_space:
3264 if (alloc_start < alloc_end) {
3265 struct extent_state *cached_state = NULL;
3266 const u64 lockstart = alloc_start;
3267 const u64 lockend = alloc_end - 1;
3268
3269 bytes_to_reserve = alloc_end - alloc_start;
3270 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3271 bytes_to_reserve);
3272 if (ret < 0)
3273 goto out;
3274 space_reserved = true;
3275 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3276 &cached_state);
3277 if (ret)
3278 goto out;
3279 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3280 alloc_start, bytes_to_reserve);
3281 if (ret) {
3282 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3283 lockend, &cached_state);
3284 goto out;
3285 }
3286 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3287 alloc_end - alloc_start,
3288 i_blocksize(inode),
3289 offset + len, &alloc_hint);
3290 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3291 lockend, &cached_state);
3292 /* btrfs_prealloc_file_range releases reserved space on error */
3293 if (ret) {
3294 space_reserved = false;
3295 goto out;
3296 }
3297 }
3298 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3299 out:
3300 if (ret && space_reserved)
3301 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3302 alloc_start, bytes_to_reserve);
3303 extent_changeset_free(data_reserved);
3304
3305 return ret;
3306}
3307
3308static long btrfs_fallocate(struct file *file, int mode,
3309 loff_t offset, loff_t len)
3310{
3311 struct inode *inode = file_inode(file);
3312 struct extent_state *cached_state = NULL;
3313 struct extent_changeset *data_reserved = NULL;
3314 struct falloc_range *range;
3315 struct falloc_range *tmp;
3316 struct list_head reserve_list;
3317 u64 cur_offset;
3318 u64 last_byte;
3319 u64 alloc_start;
3320 u64 alloc_end;
3321 u64 alloc_hint = 0;
3322 u64 locked_end;
3323 u64 actual_end = 0;
3324 struct extent_map *em;
3325 int blocksize = btrfs_inode_sectorsize(BTRFS_I(inode));
3326 int ret;
3327
3328 /* Do not allow fallocate in ZONED mode */
3329 if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3330 return -EOPNOTSUPP;
3331
3332 alloc_start = round_down(offset, blocksize);
3333 alloc_end = round_up(offset + len, blocksize);
3334 cur_offset = alloc_start;
3335
3336 /* Make sure we aren't being give some crap mode */
3337 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3338 FALLOC_FL_ZERO_RANGE))
3339 return -EOPNOTSUPP;
3340
3341 if (mode & FALLOC_FL_PUNCH_HOLE)
3342 return btrfs_punch_hole(inode, offset, len);
3343
3344 /*
3345 * Only trigger disk allocation, don't trigger qgroup reserve
3346 *
3347 * For qgroup space, it will be checked later.
3348 */
3349 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3350 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3351 alloc_end - alloc_start);
3352 if (ret < 0)
3353 return ret;
3354 }
3355
3356 btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
3357
3358 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3359 ret = inode_newsize_ok(inode, offset + len);
3360 if (ret)
3361 goto out;
3362 }
3363
3364 /*
3365 * TODO: Move these two operations after we have checked
3366 * accurate reserved space, or fallocate can still fail but
3367 * with page truncated or size expanded.
3368 *
3369 * But that's a minor problem and won't do much harm BTW.
3370 */
3371 if (alloc_start > inode->i_size) {
3372 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3373 alloc_start);
3374 if (ret)
3375 goto out;
3376 } else if (offset + len > inode->i_size) {
3377 /*
3378 * If we are fallocating from the end of the file onward we
3379 * need to zero out the end of the block if i_size lands in the
3380 * middle of a block.
3381 */
3382 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3383 if (ret)
3384 goto out;
3385 }
3386
3387 /*
3388 * wait for ordered IO before we have any locks. We'll loop again
3389 * below with the locks held.
3390 */
3391 ret = btrfs_wait_ordered_range(inode, alloc_start,
3392 alloc_end - alloc_start);
3393 if (ret)
3394 goto out;
3395
3396 if (mode & FALLOC_FL_ZERO_RANGE) {
3397 ret = btrfs_zero_range(inode, offset, len, mode);
3398 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3399 return ret;
3400 }
3401
3402 locked_end = alloc_end - 1;
3403 while (1) {
3404 struct btrfs_ordered_extent *ordered;
3405
3406 /* the extent lock is ordered inside the running
3407 * transaction
3408 */
3409 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3410 locked_end, &cached_state);
3411 ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode),
3412 locked_end);
3413
3414 if (ordered &&
3415 ordered->file_offset + ordered->num_bytes > alloc_start &&
3416 ordered->file_offset < alloc_end) {
3417 btrfs_put_ordered_extent(ordered);
3418 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3419 alloc_start, locked_end,
3420 &cached_state);
3421 /*
3422 * we can't wait on the range with the transaction
3423 * running or with the extent lock held
3424 */
3425 ret = btrfs_wait_ordered_range(inode, alloc_start,
3426 alloc_end - alloc_start);
3427 if (ret)
3428 goto out;
3429 } else {
3430 if (ordered)
3431 btrfs_put_ordered_extent(ordered);
3432 break;
3433 }
3434 }
3435
3436 /* First, check if we exceed the qgroup limit */
3437 INIT_LIST_HEAD(&reserve_list);
3438 while (cur_offset < alloc_end) {
3439 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3440 alloc_end - cur_offset);
3441 if (IS_ERR(em)) {
3442 ret = PTR_ERR(em);
3443 break;
3444 }
3445 last_byte = min(extent_map_end(em), alloc_end);
3446 actual_end = min_t(u64, extent_map_end(em), offset + len);
3447 last_byte = ALIGN(last_byte, blocksize);
3448 if (em->block_start == EXTENT_MAP_HOLE ||
3449 (cur_offset >= inode->i_size &&
3450 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3451 ret = add_falloc_range(&reserve_list, cur_offset,
3452 last_byte - cur_offset);
3453 if (ret < 0) {
3454 free_extent_map(em);
3455 break;
3456 }
3457 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3458 &data_reserved, cur_offset,
3459 last_byte - cur_offset);
3460 if (ret < 0) {
3461 cur_offset = last_byte;
3462 free_extent_map(em);
3463 break;
3464 }
3465 } else {
3466 /*
3467 * Do not need to reserve unwritten extent for this
3468 * range, free reserved data space first, otherwise
3469 * it'll result in false ENOSPC error.
3470 */
3471 btrfs_free_reserved_data_space(BTRFS_I(inode),
3472 data_reserved, cur_offset,
3473 last_byte - cur_offset);
3474 }
3475 free_extent_map(em);
3476 cur_offset = last_byte;
3477 }
3478
3479 /*
3480 * If ret is still 0, means we're OK to fallocate.
3481 * Or just cleanup the list and exit.
3482 */
3483 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3484 if (!ret)
3485 ret = btrfs_prealloc_file_range(inode, mode,
3486 range->start,
3487 range->len, i_blocksize(inode),
3488 offset + len, &alloc_hint);
3489 else
3490 btrfs_free_reserved_data_space(BTRFS_I(inode),
3491 data_reserved, range->start,
3492 range->len);
3493 list_del(&range->list);
3494 kfree(range);
3495 }
3496 if (ret < 0)
3497 goto out_unlock;
3498
3499 /*
3500 * We didn't need to allocate any more space, but we still extended the
3501 * size of the file so we need to update i_size and the inode item.
3502 */
3503 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3504out_unlock:
3505 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3506 &cached_state);
3507out:
3508 btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
3509 /* Let go of our reservation. */
3510 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3511 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3512 cur_offset, alloc_end - cur_offset);
3513 extent_changeset_free(data_reserved);
3514 return ret;
3515}
3516
3517static loff_t find_desired_extent(struct btrfs_inode *inode, loff_t offset,
3518 int whence)
3519{
3520 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3521 struct extent_map *em = NULL;
3522 struct extent_state *cached_state = NULL;
3523 loff_t i_size = inode->vfs_inode.i_size;
3524 u64 lockstart;
3525 u64 lockend;
3526 u64 start;
3527 u64 len;
3528 int ret = 0;
3529
3530 if (i_size == 0 || offset >= i_size)
3531 return -ENXIO;
3532
3533 /*
3534 * offset can be negative, in this case we start finding DATA/HOLE from
3535 * the very start of the file.
3536 */
3537 start = max_t(loff_t, 0, offset);
3538
3539 lockstart = round_down(start, fs_info->sectorsize);
3540 lockend = round_up(i_size, fs_info->sectorsize);
3541 if (lockend <= lockstart)
3542 lockend = lockstart + fs_info->sectorsize;
3543 lockend--;
3544 len = lockend - lockstart + 1;
3545
3546 lock_extent_bits(&inode->io_tree, lockstart, lockend, &cached_state);
3547
3548 while (start < i_size) {
3549 em = btrfs_get_extent_fiemap(inode, start, len);
3550 if (IS_ERR(em)) {
3551 ret = PTR_ERR(em);
3552 em = NULL;
3553 break;
3554 }
3555
3556 if (whence == SEEK_HOLE &&
3557 (em->block_start == EXTENT_MAP_HOLE ||
3558 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3559 break;
3560 else if (whence == SEEK_DATA &&
3561 (em->block_start != EXTENT_MAP_HOLE &&
3562 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3563 break;
3564
3565 start = em->start + em->len;
3566 free_extent_map(em);
3567 em = NULL;
3568 cond_resched();
3569 }
3570 free_extent_map(em);
3571 unlock_extent_cached(&inode->io_tree, lockstart, lockend,
3572 &cached_state);
3573 if (ret) {
3574 offset = ret;
3575 } else {
3576 if (whence == SEEK_DATA && start >= i_size)
3577 offset = -ENXIO;
3578 else
3579 offset = min_t(loff_t, start, i_size);
3580 }
3581
3582 return offset;
3583}
3584
3585static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3586{
3587 struct inode *inode = file->f_mapping->host;
3588
3589 switch (whence) {
3590 default:
3591 return generic_file_llseek(file, offset, whence);
3592 case SEEK_DATA:
3593 case SEEK_HOLE:
3594 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3595 offset = find_desired_extent(BTRFS_I(inode), offset, whence);
3596 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3597 break;
3598 }
3599
3600 if (offset < 0)
3601 return offset;
3602
3603 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3604}
3605
3606static int btrfs_file_open(struct inode *inode, struct file *filp)
3607{
3608 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
3609 return generic_file_open(inode, filp);
3610}
3611
3612static int check_direct_read(struct btrfs_fs_info *fs_info,
3613 const struct iov_iter *iter, loff_t offset)
3614{
3615 int ret;
3616 int i, seg;
3617
3618 ret = check_direct_IO(fs_info, iter, offset);
3619 if (ret < 0)
3620 return ret;
3621
3622 if (!iter_is_iovec(iter))
3623 return 0;
3624
3625 for (seg = 0; seg < iter->nr_segs; seg++)
3626 for (i = seg + 1; i < iter->nr_segs; i++)
3627 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3628 return -EINVAL;
3629 return 0;
3630}
3631
3632static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3633{
3634 struct inode *inode = file_inode(iocb->ki_filp);
3635 ssize_t ret;
3636
3637 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3638 return 0;
3639
3640 btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
3641 ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0);
3642 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
3643 return ret;
3644}
3645
3646static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3647{
3648 ssize_t ret = 0;
3649
3650 if (iocb->ki_flags & IOCB_DIRECT) {
3651 ret = btrfs_direct_read(iocb, to);
3652 if (ret < 0 || !iov_iter_count(to) ||
3653 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3654 return ret;
3655 }
3656
3657 return filemap_read(iocb, to, ret);
3658}
3659
3660const struct file_operations btrfs_file_operations = {
3661 .llseek = btrfs_file_llseek,
3662 .read_iter = btrfs_file_read_iter,
3663 .splice_read = generic_file_splice_read,
3664 .write_iter = btrfs_file_write_iter,
3665 .splice_write = iter_file_splice_write,
3666 .mmap = btrfs_file_mmap,
3667 .open = btrfs_file_open,
3668 .release = btrfs_release_file,
3669 .fsync = btrfs_sync_file,
3670 .fallocate = btrfs_fallocate,
3671 .unlocked_ioctl = btrfs_ioctl,
3672#ifdef CONFIG_COMPAT
3673 .compat_ioctl = btrfs_compat_ioctl,
3674#endif
3675 .remap_file_range = btrfs_remap_file_range,
3676};
3677
3678void __cold btrfs_auto_defrag_exit(void)
3679{
3680 kmem_cache_destroy(btrfs_inode_defrag_cachep);
3681}
3682
3683int __init btrfs_auto_defrag_init(void)
3684{
3685 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3686 sizeof(struct inode_defrag), 0,
3687 SLAB_MEM_SPREAD,
3688 NULL);
3689 if (!btrfs_inode_defrag_cachep)
3690 return -ENOMEM;
3691
3692 return 0;
3693}
3694
3695int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3696{
3697 int ret;
3698
3699 /*
3700 * So with compression we will find and lock a dirty page and clear the
3701 * first one as dirty, setup an async extent, and immediately return
3702 * with the entire range locked but with nobody actually marked with
3703 * writeback. So we can't just filemap_write_and_wait_range() and
3704 * expect it to work since it will just kick off a thread to do the
3705 * actual work. So we need to call filemap_fdatawrite_range _again_
3706 * since it will wait on the page lock, which won't be unlocked until
3707 * after the pages have been marked as writeback and so we're good to go
3708 * from there. We have to do this otherwise we'll miss the ordered
3709 * extents and that results in badness. Please Josef, do not think you
3710 * know better and pull this out at some point in the future, it is
3711 * right and you are wrong.
3712 */
3713 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3714 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3715 &BTRFS_I(inode)->runtime_flags))
3716 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3717
3718 return ret;
3719}