Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/time.h>
9#include <linux/init.h>
10#include <linux/string.h>
11#include <linux/backing-dev.h>
12#include <linux/falloc.h>
13#include <linux/writeback.h>
14#include <linux/compat.h>
15#include <linux/slab.h>
16#include <linux/btrfs.h>
17#include <linux/uio.h>
18#include <linux/iversion.h>
19#include <linux/fsverity.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "transaction.h"
23#include "btrfs_inode.h"
24#include "print-tree.h"
25#include "tree-log.h"
26#include "locking.h"
27#include "volumes.h"
28#include "qgroup.h"
29#include "compression.h"
30#include "delalloc-space.h"
31#include "reflink.h"
32#include "subpage.h"
33#include "fs.h"
34#include "accessors.h"
35#include "extent-tree.h"
36#include "file-item.h"
37#include "ioctl.h"
38#include "file.h"
39#include "super.h"
40
41/* simple helper to fault in pages and copy. This should go away
42 * and be replaced with calls into generic code.
43 */
44static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
45 struct page **prepared_pages,
46 struct iov_iter *i)
47{
48 size_t copied = 0;
49 size_t total_copied = 0;
50 int pg = 0;
51 int offset = offset_in_page(pos);
52
53 while (write_bytes > 0) {
54 size_t count = min_t(size_t,
55 PAGE_SIZE - offset, write_bytes);
56 struct page *page = prepared_pages[pg];
57 /*
58 * Copy data from userspace to the current page
59 */
60 copied = copy_page_from_iter_atomic(page, offset, count, i);
61
62 /* Flush processor's dcache for this page */
63 flush_dcache_page(page);
64
65 /*
66 * if we get a partial write, we can end up with
67 * partially up to date pages. These add
68 * a lot of complexity, so make sure they don't
69 * happen by forcing this copy to be retried.
70 *
71 * The rest of the btrfs_file_write code will fall
72 * back to page at a time copies after we return 0.
73 */
74 if (unlikely(copied < count)) {
75 if (!PageUptodate(page)) {
76 iov_iter_revert(i, copied);
77 copied = 0;
78 }
79 if (!copied)
80 break;
81 }
82
83 write_bytes -= copied;
84 total_copied += copied;
85 offset += copied;
86 if (offset == PAGE_SIZE) {
87 pg++;
88 offset = 0;
89 }
90 }
91 return total_copied;
92}
93
94/*
95 * unlocks pages after btrfs_file_write is done with them
96 */
97static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
98 struct page **pages, size_t num_pages,
99 u64 pos, u64 copied)
100{
101 size_t i;
102 u64 block_start = round_down(pos, fs_info->sectorsize);
103 u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
104
105 ASSERT(block_len <= U32_MAX);
106 for (i = 0; i < num_pages; i++) {
107 /* page checked is some magic around finding pages that
108 * have been modified without going through btrfs_set_page_dirty
109 * clear it here. There should be no need to mark the pages
110 * accessed as prepare_pages should have marked them accessed
111 * in prepare_pages via find_or_create_page()
112 */
113 btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
114 block_len);
115 unlock_page(pages[i]);
116 put_page(pages[i]);
117 }
118}
119
120/*
121 * After btrfs_copy_from_user(), update the following things for delalloc:
122 * - Mark newly dirtied pages as DELALLOC in the io tree.
123 * Used to advise which range is to be written back.
124 * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
125 * - Update inode size for past EOF write
126 */
127int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
128 size_t num_pages, loff_t pos, size_t write_bytes,
129 struct extent_state **cached, bool noreserve)
130{
131 struct btrfs_fs_info *fs_info = inode->root->fs_info;
132 int err = 0;
133 int i;
134 u64 num_bytes;
135 u64 start_pos;
136 u64 end_of_last_block;
137 u64 end_pos = pos + write_bytes;
138 loff_t isize = i_size_read(&inode->vfs_inode);
139 unsigned int extra_bits = 0;
140
141 if (write_bytes == 0)
142 return 0;
143
144 if (noreserve)
145 extra_bits |= EXTENT_NORESERVE;
146
147 start_pos = round_down(pos, fs_info->sectorsize);
148 num_bytes = round_up(write_bytes + pos - start_pos,
149 fs_info->sectorsize);
150 ASSERT(num_bytes <= U32_MAX);
151
152 end_of_last_block = start_pos + num_bytes - 1;
153
154 /*
155 * The pages may have already been dirty, clear out old accounting so
156 * we can set things up properly
157 */
158 clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
159 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
160 cached);
161
162 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
163 extra_bits, cached);
164 if (err)
165 return err;
166
167 for (i = 0; i < num_pages; i++) {
168 struct page *p = pages[i];
169
170 btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
171 btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
172 btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
173 }
174
175 /*
176 * we've only changed i_size in ram, and we haven't updated
177 * the disk i_size. There is no need to log the inode
178 * at this time.
179 */
180 if (end_pos > isize)
181 i_size_write(&inode->vfs_inode, end_pos);
182 return 0;
183}
184
185/*
186 * this is very complex, but the basic idea is to drop all extents
187 * in the range start - end. hint_block is filled in with a block number
188 * that would be a good hint to the block allocator for this file.
189 *
190 * If an extent intersects the range but is not entirely inside the range
191 * it is either truncated or split. Anything entirely inside the range
192 * is deleted from the tree.
193 *
194 * Note: the VFS' inode number of bytes is not updated, it's up to the caller
195 * to deal with that. We set the field 'bytes_found' of the arguments structure
196 * with the number of allocated bytes found in the target range, so that the
197 * caller can update the inode's number of bytes in an atomic way when
198 * replacing extents in a range to avoid races with stat(2).
199 */
200int btrfs_drop_extents(struct btrfs_trans_handle *trans,
201 struct btrfs_root *root, struct btrfs_inode *inode,
202 struct btrfs_drop_extents_args *args)
203{
204 struct btrfs_fs_info *fs_info = root->fs_info;
205 struct extent_buffer *leaf;
206 struct btrfs_file_extent_item *fi;
207 struct btrfs_ref ref = { 0 };
208 struct btrfs_key key;
209 struct btrfs_key new_key;
210 u64 ino = btrfs_ino(inode);
211 u64 search_start = args->start;
212 u64 disk_bytenr = 0;
213 u64 num_bytes = 0;
214 u64 extent_offset = 0;
215 u64 extent_end = 0;
216 u64 last_end = args->start;
217 int del_nr = 0;
218 int del_slot = 0;
219 int extent_type;
220 int recow;
221 int ret;
222 int modify_tree = -1;
223 int update_refs;
224 int found = 0;
225 struct btrfs_path *path = args->path;
226
227 args->bytes_found = 0;
228 args->extent_inserted = false;
229
230 /* Must always have a path if ->replace_extent is true */
231 ASSERT(!(args->replace_extent && !args->path));
232
233 if (!path) {
234 path = btrfs_alloc_path();
235 if (!path) {
236 ret = -ENOMEM;
237 goto out;
238 }
239 }
240
241 if (args->drop_cache)
242 btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
243
244 if (args->start >= inode->disk_i_size && !args->replace_extent)
245 modify_tree = 0;
246
247 update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
248 while (1) {
249 recow = 0;
250 ret = btrfs_lookup_file_extent(trans, root, path, ino,
251 search_start, modify_tree);
252 if (ret < 0)
253 break;
254 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
255 leaf = path->nodes[0];
256 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
257 if (key.objectid == ino &&
258 key.type == BTRFS_EXTENT_DATA_KEY)
259 path->slots[0]--;
260 }
261 ret = 0;
262next_slot:
263 leaf = path->nodes[0];
264 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
265 BUG_ON(del_nr > 0);
266 ret = btrfs_next_leaf(root, path);
267 if (ret < 0)
268 break;
269 if (ret > 0) {
270 ret = 0;
271 break;
272 }
273 leaf = path->nodes[0];
274 recow = 1;
275 }
276
277 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
278
279 if (key.objectid > ino)
280 break;
281 if (WARN_ON_ONCE(key.objectid < ino) ||
282 key.type < BTRFS_EXTENT_DATA_KEY) {
283 ASSERT(del_nr == 0);
284 path->slots[0]++;
285 goto next_slot;
286 }
287 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
288 break;
289
290 fi = btrfs_item_ptr(leaf, path->slots[0],
291 struct btrfs_file_extent_item);
292 extent_type = btrfs_file_extent_type(leaf, fi);
293
294 if (extent_type == BTRFS_FILE_EXTENT_REG ||
295 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
296 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
297 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
298 extent_offset = btrfs_file_extent_offset(leaf, fi);
299 extent_end = key.offset +
300 btrfs_file_extent_num_bytes(leaf, fi);
301 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
302 extent_end = key.offset +
303 btrfs_file_extent_ram_bytes(leaf, fi);
304 } else {
305 /* can't happen */
306 BUG();
307 }
308
309 /*
310 * Don't skip extent items representing 0 byte lengths. They
311 * used to be created (bug) if while punching holes we hit
312 * -ENOSPC condition. So if we find one here, just ensure we
313 * delete it, otherwise we would insert a new file extent item
314 * with the same key (offset) as that 0 bytes length file
315 * extent item in the call to setup_items_for_insert() later
316 * in this function.
317 */
318 if (extent_end == key.offset && extent_end >= search_start) {
319 last_end = extent_end;
320 goto delete_extent_item;
321 }
322
323 if (extent_end <= search_start) {
324 path->slots[0]++;
325 goto next_slot;
326 }
327
328 found = 1;
329 search_start = max(key.offset, args->start);
330 if (recow || !modify_tree) {
331 modify_tree = -1;
332 btrfs_release_path(path);
333 continue;
334 }
335
336 /*
337 * | - range to drop - |
338 * | -------- extent -------- |
339 */
340 if (args->start > key.offset && args->end < extent_end) {
341 BUG_ON(del_nr > 0);
342 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
343 ret = -EOPNOTSUPP;
344 break;
345 }
346
347 memcpy(&new_key, &key, sizeof(new_key));
348 new_key.offset = args->start;
349 ret = btrfs_duplicate_item(trans, root, path,
350 &new_key);
351 if (ret == -EAGAIN) {
352 btrfs_release_path(path);
353 continue;
354 }
355 if (ret < 0)
356 break;
357
358 leaf = path->nodes[0];
359 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
360 struct btrfs_file_extent_item);
361 btrfs_set_file_extent_num_bytes(leaf, fi,
362 args->start - key.offset);
363
364 fi = btrfs_item_ptr(leaf, path->slots[0],
365 struct btrfs_file_extent_item);
366
367 extent_offset += args->start - key.offset;
368 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
369 btrfs_set_file_extent_num_bytes(leaf, fi,
370 extent_end - args->start);
371 btrfs_mark_buffer_dirty(leaf);
372
373 if (update_refs && disk_bytenr > 0) {
374 btrfs_init_generic_ref(&ref,
375 BTRFS_ADD_DELAYED_REF,
376 disk_bytenr, num_bytes, 0);
377 btrfs_init_data_ref(&ref,
378 root->root_key.objectid,
379 new_key.objectid,
380 args->start - extent_offset,
381 0, false);
382 ret = btrfs_inc_extent_ref(trans, &ref);
383 if (ret) {
384 btrfs_abort_transaction(trans, ret);
385 break;
386 }
387 }
388 key.offset = args->start;
389 }
390 /*
391 * From here on out we will have actually dropped something, so
392 * last_end can be updated.
393 */
394 last_end = extent_end;
395
396 /*
397 * | ---- range to drop ----- |
398 * | -------- extent -------- |
399 */
400 if (args->start <= key.offset && args->end < extent_end) {
401 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
402 ret = -EOPNOTSUPP;
403 break;
404 }
405
406 memcpy(&new_key, &key, sizeof(new_key));
407 new_key.offset = args->end;
408 btrfs_set_item_key_safe(fs_info, path, &new_key);
409
410 extent_offset += args->end - key.offset;
411 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
412 btrfs_set_file_extent_num_bytes(leaf, fi,
413 extent_end - args->end);
414 btrfs_mark_buffer_dirty(leaf);
415 if (update_refs && disk_bytenr > 0)
416 args->bytes_found += args->end - key.offset;
417 break;
418 }
419
420 search_start = extent_end;
421 /*
422 * | ---- range to drop ----- |
423 * | -------- extent -------- |
424 */
425 if (args->start > key.offset && args->end >= extent_end) {
426 BUG_ON(del_nr > 0);
427 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
428 ret = -EOPNOTSUPP;
429 break;
430 }
431
432 btrfs_set_file_extent_num_bytes(leaf, fi,
433 args->start - key.offset);
434 btrfs_mark_buffer_dirty(leaf);
435 if (update_refs && disk_bytenr > 0)
436 args->bytes_found += extent_end - args->start;
437 if (args->end == extent_end)
438 break;
439
440 path->slots[0]++;
441 goto next_slot;
442 }
443
444 /*
445 * | ---- range to drop ----- |
446 * | ------ extent ------ |
447 */
448 if (args->start <= key.offset && args->end >= extent_end) {
449delete_extent_item:
450 if (del_nr == 0) {
451 del_slot = path->slots[0];
452 del_nr = 1;
453 } else {
454 BUG_ON(del_slot + del_nr != path->slots[0]);
455 del_nr++;
456 }
457
458 if (update_refs &&
459 extent_type == BTRFS_FILE_EXTENT_INLINE) {
460 args->bytes_found += extent_end - key.offset;
461 extent_end = ALIGN(extent_end,
462 fs_info->sectorsize);
463 } else if (update_refs && disk_bytenr > 0) {
464 btrfs_init_generic_ref(&ref,
465 BTRFS_DROP_DELAYED_REF,
466 disk_bytenr, num_bytes, 0);
467 btrfs_init_data_ref(&ref,
468 root->root_key.objectid,
469 key.objectid,
470 key.offset - extent_offset, 0,
471 false);
472 ret = btrfs_free_extent(trans, &ref);
473 if (ret) {
474 btrfs_abort_transaction(trans, ret);
475 break;
476 }
477 args->bytes_found += extent_end - key.offset;
478 }
479
480 if (args->end == extent_end)
481 break;
482
483 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
484 path->slots[0]++;
485 goto next_slot;
486 }
487
488 ret = btrfs_del_items(trans, root, path, del_slot,
489 del_nr);
490 if (ret) {
491 btrfs_abort_transaction(trans, ret);
492 break;
493 }
494
495 del_nr = 0;
496 del_slot = 0;
497
498 btrfs_release_path(path);
499 continue;
500 }
501
502 BUG();
503 }
504
505 if (!ret && del_nr > 0) {
506 /*
507 * Set path->slots[0] to first slot, so that after the delete
508 * if items are move off from our leaf to its immediate left or
509 * right neighbor leafs, we end up with a correct and adjusted
510 * path->slots[0] for our insertion (if args->replace_extent).
511 */
512 path->slots[0] = del_slot;
513 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
514 if (ret)
515 btrfs_abort_transaction(trans, ret);
516 }
517
518 leaf = path->nodes[0];
519 /*
520 * If btrfs_del_items() was called, it might have deleted a leaf, in
521 * which case it unlocked our path, so check path->locks[0] matches a
522 * write lock.
523 */
524 if (!ret && args->replace_extent &&
525 path->locks[0] == BTRFS_WRITE_LOCK &&
526 btrfs_leaf_free_space(leaf) >=
527 sizeof(struct btrfs_item) + args->extent_item_size) {
528
529 key.objectid = ino;
530 key.type = BTRFS_EXTENT_DATA_KEY;
531 key.offset = args->start;
532 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
533 struct btrfs_key slot_key;
534
535 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
536 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
537 path->slots[0]++;
538 }
539 btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
540 args->extent_inserted = true;
541 }
542
543 if (!args->path)
544 btrfs_free_path(path);
545 else if (!args->extent_inserted)
546 btrfs_release_path(path);
547out:
548 args->drop_end = found ? min(args->end, last_end) : args->end;
549
550 return ret;
551}
552
553static int extent_mergeable(struct extent_buffer *leaf, int slot,
554 u64 objectid, u64 bytenr, u64 orig_offset,
555 u64 *start, u64 *end)
556{
557 struct btrfs_file_extent_item *fi;
558 struct btrfs_key key;
559 u64 extent_end;
560
561 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
562 return 0;
563
564 btrfs_item_key_to_cpu(leaf, &key, slot);
565 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
566 return 0;
567
568 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
569 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
570 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
571 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
572 btrfs_file_extent_compression(leaf, fi) ||
573 btrfs_file_extent_encryption(leaf, fi) ||
574 btrfs_file_extent_other_encoding(leaf, fi))
575 return 0;
576
577 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
578 if ((*start && *start != key.offset) || (*end && *end != extent_end))
579 return 0;
580
581 *start = key.offset;
582 *end = extent_end;
583 return 1;
584}
585
586/*
587 * Mark extent in the range start - end as written.
588 *
589 * This changes extent type from 'pre-allocated' to 'regular'. If only
590 * part of extent is marked as written, the extent will be split into
591 * two or three.
592 */
593int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
594 struct btrfs_inode *inode, u64 start, u64 end)
595{
596 struct btrfs_fs_info *fs_info = trans->fs_info;
597 struct btrfs_root *root = inode->root;
598 struct extent_buffer *leaf;
599 struct btrfs_path *path;
600 struct btrfs_file_extent_item *fi;
601 struct btrfs_ref ref = { 0 };
602 struct btrfs_key key;
603 struct btrfs_key new_key;
604 u64 bytenr;
605 u64 num_bytes;
606 u64 extent_end;
607 u64 orig_offset;
608 u64 other_start;
609 u64 other_end;
610 u64 split;
611 int del_nr = 0;
612 int del_slot = 0;
613 int recow;
614 int ret = 0;
615 u64 ino = btrfs_ino(inode);
616
617 path = btrfs_alloc_path();
618 if (!path)
619 return -ENOMEM;
620again:
621 recow = 0;
622 split = start;
623 key.objectid = ino;
624 key.type = BTRFS_EXTENT_DATA_KEY;
625 key.offset = split;
626
627 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
628 if (ret < 0)
629 goto out;
630 if (ret > 0 && path->slots[0] > 0)
631 path->slots[0]--;
632
633 leaf = path->nodes[0];
634 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
635 if (key.objectid != ino ||
636 key.type != BTRFS_EXTENT_DATA_KEY) {
637 ret = -EINVAL;
638 btrfs_abort_transaction(trans, ret);
639 goto out;
640 }
641 fi = btrfs_item_ptr(leaf, path->slots[0],
642 struct btrfs_file_extent_item);
643 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
644 ret = -EINVAL;
645 btrfs_abort_transaction(trans, ret);
646 goto out;
647 }
648 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
649 if (key.offset > start || extent_end < end) {
650 ret = -EINVAL;
651 btrfs_abort_transaction(trans, ret);
652 goto out;
653 }
654
655 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
656 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
657 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
658 memcpy(&new_key, &key, sizeof(new_key));
659
660 if (start == key.offset && end < extent_end) {
661 other_start = 0;
662 other_end = start;
663 if (extent_mergeable(leaf, path->slots[0] - 1,
664 ino, bytenr, orig_offset,
665 &other_start, &other_end)) {
666 new_key.offset = end;
667 btrfs_set_item_key_safe(fs_info, path, &new_key);
668 fi = btrfs_item_ptr(leaf, path->slots[0],
669 struct btrfs_file_extent_item);
670 btrfs_set_file_extent_generation(leaf, fi,
671 trans->transid);
672 btrfs_set_file_extent_num_bytes(leaf, fi,
673 extent_end - end);
674 btrfs_set_file_extent_offset(leaf, fi,
675 end - orig_offset);
676 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
677 struct btrfs_file_extent_item);
678 btrfs_set_file_extent_generation(leaf, fi,
679 trans->transid);
680 btrfs_set_file_extent_num_bytes(leaf, fi,
681 end - other_start);
682 btrfs_mark_buffer_dirty(leaf);
683 goto out;
684 }
685 }
686
687 if (start > key.offset && end == extent_end) {
688 other_start = end;
689 other_end = 0;
690 if (extent_mergeable(leaf, path->slots[0] + 1,
691 ino, bytenr, orig_offset,
692 &other_start, &other_end)) {
693 fi = btrfs_item_ptr(leaf, path->slots[0],
694 struct btrfs_file_extent_item);
695 btrfs_set_file_extent_num_bytes(leaf, fi,
696 start - key.offset);
697 btrfs_set_file_extent_generation(leaf, fi,
698 trans->transid);
699 path->slots[0]++;
700 new_key.offset = start;
701 btrfs_set_item_key_safe(fs_info, path, &new_key);
702
703 fi = btrfs_item_ptr(leaf, path->slots[0],
704 struct btrfs_file_extent_item);
705 btrfs_set_file_extent_generation(leaf, fi,
706 trans->transid);
707 btrfs_set_file_extent_num_bytes(leaf, fi,
708 other_end - start);
709 btrfs_set_file_extent_offset(leaf, fi,
710 start - orig_offset);
711 btrfs_mark_buffer_dirty(leaf);
712 goto out;
713 }
714 }
715
716 while (start > key.offset || end < extent_end) {
717 if (key.offset == start)
718 split = end;
719
720 new_key.offset = split;
721 ret = btrfs_duplicate_item(trans, root, path, &new_key);
722 if (ret == -EAGAIN) {
723 btrfs_release_path(path);
724 goto again;
725 }
726 if (ret < 0) {
727 btrfs_abort_transaction(trans, ret);
728 goto out;
729 }
730
731 leaf = path->nodes[0];
732 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
733 struct btrfs_file_extent_item);
734 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
735 btrfs_set_file_extent_num_bytes(leaf, fi,
736 split - key.offset);
737
738 fi = btrfs_item_ptr(leaf, path->slots[0],
739 struct btrfs_file_extent_item);
740
741 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
742 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
743 btrfs_set_file_extent_num_bytes(leaf, fi,
744 extent_end - split);
745 btrfs_mark_buffer_dirty(leaf);
746
747 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
748 num_bytes, 0);
749 btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
750 orig_offset, 0, false);
751 ret = btrfs_inc_extent_ref(trans, &ref);
752 if (ret) {
753 btrfs_abort_transaction(trans, ret);
754 goto out;
755 }
756
757 if (split == start) {
758 key.offset = start;
759 } else {
760 if (start != key.offset) {
761 ret = -EINVAL;
762 btrfs_abort_transaction(trans, ret);
763 goto out;
764 }
765 path->slots[0]--;
766 extent_end = end;
767 }
768 recow = 1;
769 }
770
771 other_start = end;
772 other_end = 0;
773 btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
774 num_bytes, 0);
775 btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
776 0, false);
777 if (extent_mergeable(leaf, path->slots[0] + 1,
778 ino, bytenr, orig_offset,
779 &other_start, &other_end)) {
780 if (recow) {
781 btrfs_release_path(path);
782 goto again;
783 }
784 extent_end = other_end;
785 del_slot = path->slots[0] + 1;
786 del_nr++;
787 ret = btrfs_free_extent(trans, &ref);
788 if (ret) {
789 btrfs_abort_transaction(trans, ret);
790 goto out;
791 }
792 }
793 other_start = 0;
794 other_end = start;
795 if (extent_mergeable(leaf, path->slots[0] - 1,
796 ino, bytenr, orig_offset,
797 &other_start, &other_end)) {
798 if (recow) {
799 btrfs_release_path(path);
800 goto again;
801 }
802 key.offset = other_start;
803 del_slot = path->slots[0];
804 del_nr++;
805 ret = btrfs_free_extent(trans, &ref);
806 if (ret) {
807 btrfs_abort_transaction(trans, ret);
808 goto out;
809 }
810 }
811 if (del_nr == 0) {
812 fi = btrfs_item_ptr(leaf, path->slots[0],
813 struct btrfs_file_extent_item);
814 btrfs_set_file_extent_type(leaf, fi,
815 BTRFS_FILE_EXTENT_REG);
816 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
817 btrfs_mark_buffer_dirty(leaf);
818 } else {
819 fi = btrfs_item_ptr(leaf, del_slot - 1,
820 struct btrfs_file_extent_item);
821 btrfs_set_file_extent_type(leaf, fi,
822 BTRFS_FILE_EXTENT_REG);
823 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
824 btrfs_set_file_extent_num_bytes(leaf, fi,
825 extent_end - key.offset);
826 btrfs_mark_buffer_dirty(leaf);
827
828 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
829 if (ret < 0) {
830 btrfs_abort_transaction(trans, ret);
831 goto out;
832 }
833 }
834out:
835 btrfs_free_path(path);
836 return ret;
837}
838
839/*
840 * on error we return an unlocked page and the error value
841 * on success we return a locked page and 0
842 */
843static int prepare_uptodate_page(struct inode *inode,
844 struct page *page, u64 pos,
845 bool force_uptodate)
846{
847 struct folio *folio = page_folio(page);
848 int ret = 0;
849
850 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
851 !PageUptodate(page)) {
852 ret = btrfs_read_folio(NULL, folio);
853 if (ret)
854 return ret;
855 lock_page(page);
856 if (!PageUptodate(page)) {
857 unlock_page(page);
858 return -EIO;
859 }
860
861 /*
862 * Since btrfs_read_folio() will unlock the folio before it
863 * returns, there is a window where btrfs_release_folio() can be
864 * called to release the page. Here we check both inode
865 * mapping and PagePrivate() to make sure the page was not
866 * released.
867 *
868 * The private flag check is essential for subpage as we need
869 * to store extra bitmap using page->private.
870 */
871 if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
872 unlock_page(page);
873 return -EAGAIN;
874 }
875 }
876 return 0;
877}
878
879static unsigned int get_prepare_fgp_flags(bool nowait)
880{
881 unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
882
883 if (nowait)
884 fgp_flags |= FGP_NOWAIT;
885
886 return fgp_flags;
887}
888
889static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
890{
891 gfp_t gfp;
892
893 gfp = btrfs_alloc_write_mask(inode->i_mapping);
894 if (nowait) {
895 gfp &= ~__GFP_DIRECT_RECLAIM;
896 gfp |= GFP_NOWAIT;
897 }
898
899 return gfp;
900}
901
902/*
903 * this just gets pages into the page cache and locks them down.
904 */
905static noinline int prepare_pages(struct inode *inode, struct page **pages,
906 size_t num_pages, loff_t pos,
907 size_t write_bytes, bool force_uptodate,
908 bool nowait)
909{
910 int i;
911 unsigned long index = pos >> PAGE_SHIFT;
912 gfp_t mask = get_prepare_gfp_flags(inode, nowait);
913 unsigned int fgp_flags = get_prepare_fgp_flags(nowait);
914 int err = 0;
915 int faili;
916
917 for (i = 0; i < num_pages; i++) {
918again:
919 pages[i] = pagecache_get_page(inode->i_mapping, index + i,
920 fgp_flags, mask | __GFP_WRITE);
921 if (!pages[i]) {
922 faili = i - 1;
923 if (nowait)
924 err = -EAGAIN;
925 else
926 err = -ENOMEM;
927 goto fail;
928 }
929
930 err = set_page_extent_mapped(pages[i]);
931 if (err < 0) {
932 faili = i;
933 goto fail;
934 }
935
936 if (i == 0)
937 err = prepare_uptodate_page(inode, pages[i], pos,
938 force_uptodate);
939 if (!err && i == num_pages - 1)
940 err = prepare_uptodate_page(inode, pages[i],
941 pos + write_bytes, false);
942 if (err) {
943 put_page(pages[i]);
944 if (!nowait && err == -EAGAIN) {
945 err = 0;
946 goto again;
947 }
948 faili = i - 1;
949 goto fail;
950 }
951 wait_on_page_writeback(pages[i]);
952 }
953
954 return 0;
955fail:
956 while (faili >= 0) {
957 unlock_page(pages[faili]);
958 put_page(pages[faili]);
959 faili--;
960 }
961 return err;
962
963}
964
965/*
966 * This function locks the extent and properly waits for data=ordered extents
967 * to finish before allowing the pages to be modified if need.
968 *
969 * The return value:
970 * 1 - the extent is locked
971 * 0 - the extent is not locked, and everything is OK
972 * -EAGAIN - need re-prepare the pages
973 * the other < 0 number - Something wrong happens
974 */
975static noinline int
976lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
977 size_t num_pages, loff_t pos,
978 size_t write_bytes,
979 u64 *lockstart, u64 *lockend, bool nowait,
980 struct extent_state **cached_state)
981{
982 struct btrfs_fs_info *fs_info = inode->root->fs_info;
983 u64 start_pos;
984 u64 last_pos;
985 int i;
986 int ret = 0;
987
988 start_pos = round_down(pos, fs_info->sectorsize);
989 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
990
991 if (start_pos < inode->vfs_inode.i_size) {
992 struct btrfs_ordered_extent *ordered;
993
994 if (nowait) {
995 if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
996 cached_state)) {
997 for (i = 0; i < num_pages; i++) {
998 unlock_page(pages[i]);
999 put_page(pages[i]);
1000 pages[i] = NULL;
1001 }
1002
1003 return -EAGAIN;
1004 }
1005 } else {
1006 lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1007 }
1008
1009 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1010 last_pos - start_pos + 1);
1011 if (ordered &&
1012 ordered->file_offset + ordered->num_bytes > start_pos &&
1013 ordered->file_offset <= last_pos) {
1014 unlock_extent(&inode->io_tree, start_pos, last_pos,
1015 cached_state);
1016 for (i = 0; i < num_pages; i++) {
1017 unlock_page(pages[i]);
1018 put_page(pages[i]);
1019 }
1020 btrfs_start_ordered_extent(ordered, 1);
1021 btrfs_put_ordered_extent(ordered);
1022 return -EAGAIN;
1023 }
1024 if (ordered)
1025 btrfs_put_ordered_extent(ordered);
1026
1027 *lockstart = start_pos;
1028 *lockend = last_pos;
1029 ret = 1;
1030 }
1031
1032 /*
1033 * We should be called after prepare_pages() which should have locked
1034 * all pages in the range.
1035 */
1036 for (i = 0; i < num_pages; i++)
1037 WARN_ON(!PageLocked(pages[i]));
1038
1039 return ret;
1040}
1041
1042/*
1043 * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1044 *
1045 * @pos: File offset.
1046 * @write_bytes: The length to write, will be updated to the nocow writeable
1047 * range.
1048 *
1049 * This function will flush ordered extents in the range to ensure proper
1050 * nocow checks.
1051 *
1052 * Return:
1053 * > 0 If we can nocow, and updates @write_bytes.
1054 * 0 If we can't do a nocow write.
1055 * -EAGAIN If we can't do a nocow write because snapshoting of the inode's
1056 * root is in progress.
1057 * < 0 If an error happened.
1058 *
1059 * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1060 */
1061int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1062 size_t *write_bytes, bool nowait)
1063{
1064 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1065 struct btrfs_root *root = inode->root;
1066 struct extent_state *cached_state = NULL;
1067 u64 lockstart, lockend;
1068 u64 num_bytes;
1069 int ret;
1070
1071 if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1072 return 0;
1073
1074 if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1075 return -EAGAIN;
1076
1077 lockstart = round_down(pos, fs_info->sectorsize);
1078 lockend = round_up(pos + *write_bytes,
1079 fs_info->sectorsize) - 1;
1080 num_bytes = lockend - lockstart + 1;
1081
1082 if (nowait) {
1083 if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1084 &cached_state)) {
1085 btrfs_drew_write_unlock(&root->snapshot_lock);
1086 return -EAGAIN;
1087 }
1088 } else {
1089 btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1090 &cached_state);
1091 }
1092 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1093 NULL, NULL, NULL, nowait, false);
1094 if (ret <= 0)
1095 btrfs_drew_write_unlock(&root->snapshot_lock);
1096 else
1097 *write_bytes = min_t(size_t, *write_bytes ,
1098 num_bytes - pos + lockstart);
1099 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1100
1101 return ret;
1102}
1103
1104void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1105{
1106 btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1107}
1108
1109static void update_time_for_write(struct inode *inode)
1110{
1111 struct timespec64 now;
1112
1113 if (IS_NOCMTIME(inode))
1114 return;
1115
1116 now = current_time(inode);
1117 if (!timespec64_equal(&inode->i_mtime, &now))
1118 inode->i_mtime = now;
1119
1120 if (!timespec64_equal(&inode->i_ctime, &now))
1121 inode->i_ctime = now;
1122
1123 if (IS_I_VERSION(inode))
1124 inode_inc_iversion(inode);
1125}
1126
1127static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1128 size_t count)
1129{
1130 struct file *file = iocb->ki_filp;
1131 struct inode *inode = file_inode(file);
1132 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1133 loff_t pos = iocb->ki_pos;
1134 int ret;
1135 loff_t oldsize;
1136 loff_t start_pos;
1137
1138 /*
1139 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1140 * prealloc flags, as without those flags we always have to COW. We will
1141 * later check if we can really COW into the target range (using
1142 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1143 */
1144 if ((iocb->ki_flags & IOCB_NOWAIT) &&
1145 !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1146 return -EAGAIN;
1147
1148 current->backing_dev_info = inode_to_bdi(inode);
1149 ret = file_remove_privs(file);
1150 if (ret)
1151 return ret;
1152
1153 /*
1154 * We reserve space for updating the inode when we reserve space for the
1155 * extent we are going to write, so we will enospc out there. We don't
1156 * need to start yet another transaction to update the inode as we will
1157 * update the inode when we finish writing whatever data we write.
1158 */
1159 update_time_for_write(inode);
1160
1161 start_pos = round_down(pos, fs_info->sectorsize);
1162 oldsize = i_size_read(inode);
1163 if (start_pos > oldsize) {
1164 /* Expand hole size to cover write data, preventing empty gap */
1165 loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1166
1167 ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1168 if (ret) {
1169 current->backing_dev_info = NULL;
1170 return ret;
1171 }
1172 }
1173
1174 return 0;
1175}
1176
1177static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1178 struct iov_iter *i)
1179{
1180 struct file *file = iocb->ki_filp;
1181 loff_t pos;
1182 struct inode *inode = file_inode(file);
1183 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1184 struct page **pages = NULL;
1185 struct extent_changeset *data_reserved = NULL;
1186 u64 release_bytes = 0;
1187 u64 lockstart;
1188 u64 lockend;
1189 size_t num_written = 0;
1190 int nrptrs;
1191 ssize_t ret;
1192 bool only_release_metadata = false;
1193 bool force_page_uptodate = false;
1194 loff_t old_isize = i_size_read(inode);
1195 unsigned int ilock_flags = 0;
1196 const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1197 unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1198
1199 if (nowait)
1200 ilock_flags |= BTRFS_ILOCK_TRY;
1201
1202 ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1203 if (ret < 0)
1204 return ret;
1205
1206 ret = generic_write_checks(iocb, i);
1207 if (ret <= 0)
1208 goto out;
1209
1210 ret = btrfs_write_check(iocb, i, ret);
1211 if (ret < 0)
1212 goto out;
1213
1214 pos = iocb->ki_pos;
1215 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1216 PAGE_SIZE / (sizeof(struct page *)));
1217 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1218 nrptrs = max(nrptrs, 8);
1219 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1220 if (!pages) {
1221 ret = -ENOMEM;
1222 goto out;
1223 }
1224
1225 while (iov_iter_count(i) > 0) {
1226 struct extent_state *cached_state = NULL;
1227 size_t offset = offset_in_page(pos);
1228 size_t sector_offset;
1229 size_t write_bytes = min(iov_iter_count(i),
1230 nrptrs * (size_t)PAGE_SIZE -
1231 offset);
1232 size_t num_pages;
1233 size_t reserve_bytes;
1234 size_t dirty_pages;
1235 size_t copied;
1236 size_t dirty_sectors;
1237 size_t num_sectors;
1238 int extents_locked;
1239
1240 /*
1241 * Fault pages before locking them in prepare_pages
1242 * to avoid recursive lock
1243 */
1244 if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1245 ret = -EFAULT;
1246 break;
1247 }
1248
1249 only_release_metadata = false;
1250 sector_offset = pos & (fs_info->sectorsize - 1);
1251
1252 extent_changeset_release(data_reserved);
1253 ret = btrfs_check_data_free_space(BTRFS_I(inode),
1254 &data_reserved, pos,
1255 write_bytes, nowait);
1256 if (ret < 0) {
1257 int can_nocow;
1258
1259 if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1260 ret = -EAGAIN;
1261 break;
1262 }
1263
1264 /*
1265 * If we don't have to COW at the offset, reserve
1266 * metadata only. write_bytes may get smaller than
1267 * requested here.
1268 */
1269 can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1270 &write_bytes, nowait);
1271 if (can_nocow < 0)
1272 ret = can_nocow;
1273 if (can_nocow > 0)
1274 ret = 0;
1275 if (ret)
1276 break;
1277 only_release_metadata = true;
1278 }
1279
1280 num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1281 WARN_ON(num_pages > nrptrs);
1282 reserve_bytes = round_up(write_bytes + sector_offset,
1283 fs_info->sectorsize);
1284 WARN_ON(reserve_bytes == 0);
1285 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1286 reserve_bytes,
1287 reserve_bytes, nowait);
1288 if (ret) {
1289 if (!only_release_metadata)
1290 btrfs_free_reserved_data_space(BTRFS_I(inode),
1291 data_reserved, pos,
1292 write_bytes);
1293 else
1294 btrfs_check_nocow_unlock(BTRFS_I(inode));
1295
1296 if (nowait && ret == -ENOSPC)
1297 ret = -EAGAIN;
1298 break;
1299 }
1300
1301 release_bytes = reserve_bytes;
1302again:
1303 ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1304 if (ret) {
1305 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1306 break;
1307 }
1308
1309 /*
1310 * This is going to setup the pages array with the number of
1311 * pages we want, so we don't really need to worry about the
1312 * contents of pages from loop to loop
1313 */
1314 ret = prepare_pages(inode, pages, num_pages,
1315 pos, write_bytes, force_page_uptodate, false);
1316 if (ret) {
1317 btrfs_delalloc_release_extents(BTRFS_I(inode),
1318 reserve_bytes);
1319 break;
1320 }
1321
1322 extents_locked = lock_and_cleanup_extent_if_need(
1323 BTRFS_I(inode), pages,
1324 num_pages, pos, write_bytes, &lockstart,
1325 &lockend, nowait, &cached_state);
1326 if (extents_locked < 0) {
1327 if (!nowait && extents_locked == -EAGAIN)
1328 goto again;
1329
1330 btrfs_delalloc_release_extents(BTRFS_I(inode),
1331 reserve_bytes);
1332 ret = extents_locked;
1333 break;
1334 }
1335
1336 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1337
1338 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1339 dirty_sectors = round_up(copied + sector_offset,
1340 fs_info->sectorsize);
1341 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1342
1343 /*
1344 * if we have trouble faulting in the pages, fall
1345 * back to one page at a time
1346 */
1347 if (copied < write_bytes)
1348 nrptrs = 1;
1349
1350 if (copied == 0) {
1351 force_page_uptodate = true;
1352 dirty_sectors = 0;
1353 dirty_pages = 0;
1354 } else {
1355 force_page_uptodate = false;
1356 dirty_pages = DIV_ROUND_UP(copied + offset,
1357 PAGE_SIZE);
1358 }
1359
1360 if (num_sectors > dirty_sectors) {
1361 /* release everything except the sectors we dirtied */
1362 release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1363 if (only_release_metadata) {
1364 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1365 release_bytes, true);
1366 } else {
1367 u64 __pos;
1368
1369 __pos = round_down(pos,
1370 fs_info->sectorsize) +
1371 (dirty_pages << PAGE_SHIFT);
1372 btrfs_delalloc_release_space(BTRFS_I(inode),
1373 data_reserved, __pos,
1374 release_bytes, true);
1375 }
1376 }
1377
1378 release_bytes = round_up(copied + sector_offset,
1379 fs_info->sectorsize);
1380
1381 ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1382 dirty_pages, pos, copied,
1383 &cached_state, only_release_metadata);
1384
1385 /*
1386 * If we have not locked the extent range, because the range's
1387 * start offset is >= i_size, we might still have a non-NULL
1388 * cached extent state, acquired while marking the extent range
1389 * as delalloc through btrfs_dirty_pages(). Therefore free any
1390 * possible cached extent state to avoid a memory leak.
1391 */
1392 if (extents_locked)
1393 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1394 lockend, &cached_state);
1395 else
1396 free_extent_state(cached_state);
1397
1398 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1399 if (ret) {
1400 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1401 break;
1402 }
1403
1404 release_bytes = 0;
1405 if (only_release_metadata)
1406 btrfs_check_nocow_unlock(BTRFS_I(inode));
1407
1408 btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1409
1410 cond_resched();
1411
1412 pos += copied;
1413 num_written += copied;
1414 }
1415
1416 kfree(pages);
1417
1418 if (release_bytes) {
1419 if (only_release_metadata) {
1420 btrfs_check_nocow_unlock(BTRFS_I(inode));
1421 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1422 release_bytes, true);
1423 } else {
1424 btrfs_delalloc_release_space(BTRFS_I(inode),
1425 data_reserved,
1426 round_down(pos, fs_info->sectorsize),
1427 release_bytes, true);
1428 }
1429 }
1430
1431 extent_changeset_free(data_reserved);
1432 if (num_written > 0) {
1433 pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1434 iocb->ki_pos += num_written;
1435 }
1436out:
1437 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1438 return num_written ? num_written : ret;
1439}
1440
1441static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1442 const struct iov_iter *iter, loff_t offset)
1443{
1444 const u32 blocksize_mask = fs_info->sectorsize - 1;
1445
1446 if (offset & blocksize_mask)
1447 return -EINVAL;
1448
1449 if (iov_iter_alignment(iter) & blocksize_mask)
1450 return -EINVAL;
1451
1452 return 0;
1453}
1454
1455static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1456{
1457 struct file *file = iocb->ki_filp;
1458 struct inode *inode = file_inode(file);
1459 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1460 loff_t pos;
1461 ssize_t written = 0;
1462 ssize_t written_buffered;
1463 size_t prev_left = 0;
1464 loff_t endbyte;
1465 ssize_t err;
1466 unsigned int ilock_flags = 0;
1467 struct iomap_dio *dio;
1468
1469 if (iocb->ki_flags & IOCB_NOWAIT)
1470 ilock_flags |= BTRFS_ILOCK_TRY;
1471
1472 /* If the write DIO is within EOF, use a shared lock */
1473 if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
1474 ilock_flags |= BTRFS_ILOCK_SHARED;
1475
1476relock:
1477 err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1478 if (err < 0)
1479 return err;
1480
1481 err = generic_write_checks(iocb, from);
1482 if (err <= 0) {
1483 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1484 return err;
1485 }
1486
1487 err = btrfs_write_check(iocb, from, err);
1488 if (err < 0) {
1489 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1490 goto out;
1491 }
1492
1493 pos = iocb->ki_pos;
1494 /*
1495 * Re-check since file size may have changed just before taking the
1496 * lock or pos may have changed because of O_APPEND in generic_write_check()
1497 */
1498 if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1499 pos + iov_iter_count(from) > i_size_read(inode)) {
1500 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1501 ilock_flags &= ~BTRFS_ILOCK_SHARED;
1502 goto relock;
1503 }
1504
1505 if (check_direct_IO(fs_info, from, pos)) {
1506 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1507 goto buffered;
1508 }
1509
1510 /*
1511 * The iov_iter can be mapped to the same file range we are writing to.
1512 * If that's the case, then we will deadlock in the iomap code, because
1513 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1514 * an ordered extent, and after that it will fault in the pages that the
1515 * iov_iter refers to. During the fault in we end up in the readahead
1516 * pages code (starting at btrfs_readahead()), which will lock the range,
1517 * find that ordered extent and then wait for it to complete (at
1518 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1519 * obviously the ordered extent can never complete as we didn't submit
1520 * yet the respective bio(s). This always happens when the buffer is
1521 * memory mapped to the same file range, since the iomap DIO code always
1522 * invalidates pages in the target file range (after starting and waiting
1523 * for any writeback).
1524 *
1525 * So here we disable page faults in the iov_iter and then retry if we
1526 * got -EFAULT, faulting in the pages before the retry.
1527 */
1528 from->nofault = true;
1529 dio = btrfs_dio_write(iocb, from, written);
1530 from->nofault = false;
1531
1532 /*
1533 * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
1534 * iocb, and that needs to lock the inode. So unlock it before calling
1535 * iomap_dio_complete() to avoid a deadlock.
1536 */
1537 btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1538
1539 if (IS_ERR_OR_NULL(dio))
1540 err = PTR_ERR_OR_ZERO(dio);
1541 else
1542 err = iomap_dio_complete(dio);
1543
1544 /* No increment (+=) because iomap returns a cumulative value. */
1545 if (err > 0)
1546 written = err;
1547
1548 if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1549 const size_t left = iov_iter_count(from);
1550 /*
1551 * We have more data left to write. Try to fault in as many as
1552 * possible of the remainder pages and retry. We do this without
1553 * releasing and locking again the inode, to prevent races with
1554 * truncate.
1555 *
1556 * Also, in case the iov refers to pages in the file range of the
1557 * file we want to write to (due to a mmap), we could enter an
1558 * infinite loop if we retry after faulting the pages in, since
1559 * iomap will invalidate any pages in the range early on, before
1560 * it tries to fault in the pages of the iov. So we keep track of
1561 * how much was left of iov in the previous EFAULT and fallback
1562 * to buffered IO in case we haven't made any progress.
1563 */
1564 if (left == prev_left) {
1565 err = -ENOTBLK;
1566 } else {
1567 fault_in_iov_iter_readable(from, left);
1568 prev_left = left;
1569 goto relock;
1570 }
1571 }
1572
1573 /*
1574 * If 'err' is -ENOTBLK or we have not written all data, then it means
1575 * we must fallback to buffered IO.
1576 */
1577 if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1578 goto out;
1579
1580buffered:
1581 /*
1582 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1583 * it must retry the operation in a context where blocking is acceptable,
1584 * because even if we end up not blocking during the buffered IO attempt
1585 * below, we will block when flushing and waiting for the IO.
1586 */
1587 if (iocb->ki_flags & IOCB_NOWAIT) {
1588 err = -EAGAIN;
1589 goto out;
1590 }
1591
1592 pos = iocb->ki_pos;
1593 written_buffered = btrfs_buffered_write(iocb, from);
1594 if (written_buffered < 0) {
1595 err = written_buffered;
1596 goto out;
1597 }
1598 /*
1599 * Ensure all data is persisted. We want the next direct IO read to be
1600 * able to read what was just written.
1601 */
1602 endbyte = pos + written_buffered - 1;
1603 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1604 if (err)
1605 goto out;
1606 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1607 if (err)
1608 goto out;
1609 written += written_buffered;
1610 iocb->ki_pos = pos + written_buffered;
1611 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1612 endbyte >> PAGE_SHIFT);
1613out:
1614 return err < 0 ? err : written;
1615}
1616
1617static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1618 const struct btrfs_ioctl_encoded_io_args *encoded)
1619{
1620 struct file *file = iocb->ki_filp;
1621 struct inode *inode = file_inode(file);
1622 loff_t count;
1623 ssize_t ret;
1624
1625 btrfs_inode_lock(BTRFS_I(inode), 0);
1626 count = encoded->len;
1627 ret = generic_write_checks_count(iocb, &count);
1628 if (ret == 0 && count != encoded->len) {
1629 /*
1630 * The write got truncated by generic_write_checks_count(). We
1631 * can't do a partial encoded write.
1632 */
1633 ret = -EFBIG;
1634 }
1635 if (ret || encoded->len == 0)
1636 goto out;
1637
1638 ret = btrfs_write_check(iocb, from, encoded->len);
1639 if (ret < 0)
1640 goto out;
1641
1642 ret = btrfs_do_encoded_write(iocb, from, encoded);
1643out:
1644 btrfs_inode_unlock(BTRFS_I(inode), 0);
1645 return ret;
1646}
1647
1648ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1649 const struct btrfs_ioctl_encoded_io_args *encoded)
1650{
1651 struct file *file = iocb->ki_filp;
1652 struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1653 ssize_t num_written, num_sync;
1654 const bool sync = iocb_is_dsync(iocb);
1655
1656 /*
1657 * If the fs flips readonly due to some impossible error, although we
1658 * have opened a file as writable, we have to stop this write operation
1659 * to ensure consistency.
1660 */
1661 if (BTRFS_FS_ERROR(inode->root->fs_info))
1662 return -EROFS;
1663
1664 if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1665 return -EOPNOTSUPP;
1666
1667 if (sync)
1668 atomic_inc(&inode->sync_writers);
1669
1670 if (encoded) {
1671 num_written = btrfs_encoded_write(iocb, from, encoded);
1672 num_sync = encoded->len;
1673 } else if (iocb->ki_flags & IOCB_DIRECT) {
1674 num_written = btrfs_direct_write(iocb, from);
1675 num_sync = num_written;
1676 } else {
1677 num_written = btrfs_buffered_write(iocb, from);
1678 num_sync = num_written;
1679 }
1680
1681 btrfs_set_inode_last_sub_trans(inode);
1682
1683 if (num_sync > 0) {
1684 num_sync = generic_write_sync(iocb, num_sync);
1685 if (num_sync < 0)
1686 num_written = num_sync;
1687 }
1688
1689 if (sync)
1690 atomic_dec(&inode->sync_writers);
1691
1692 current->backing_dev_info = NULL;
1693 return num_written;
1694}
1695
1696static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1697{
1698 return btrfs_do_write_iter(iocb, from, NULL);
1699}
1700
1701int btrfs_release_file(struct inode *inode, struct file *filp)
1702{
1703 struct btrfs_file_private *private = filp->private_data;
1704
1705 if (private) {
1706 kfree(private->filldir_buf);
1707 free_extent_state(private->llseek_cached_state);
1708 kfree(private);
1709 filp->private_data = NULL;
1710 }
1711
1712 /*
1713 * Set by setattr when we are about to truncate a file from a non-zero
1714 * size to a zero size. This tries to flush down new bytes that may
1715 * have been written if the application were using truncate to replace
1716 * a file in place.
1717 */
1718 if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1719 &BTRFS_I(inode)->runtime_flags))
1720 filemap_flush(inode->i_mapping);
1721 return 0;
1722}
1723
1724static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1725{
1726 int ret;
1727 struct blk_plug plug;
1728
1729 /*
1730 * This is only called in fsync, which would do synchronous writes, so
1731 * a plug can merge adjacent IOs as much as possible. Esp. in case of
1732 * multiple disks using raid profile, a large IO can be split to
1733 * several segments of stripe length (currently 64K).
1734 */
1735 blk_start_plug(&plug);
1736 atomic_inc(&BTRFS_I(inode)->sync_writers);
1737 ret = btrfs_fdatawrite_range(inode, start, end);
1738 atomic_dec(&BTRFS_I(inode)->sync_writers);
1739 blk_finish_plug(&plug);
1740
1741 return ret;
1742}
1743
1744static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1745{
1746 struct btrfs_inode *inode = BTRFS_I(ctx->inode);
1747 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1748
1749 if (btrfs_inode_in_log(inode, fs_info->generation) &&
1750 list_empty(&ctx->ordered_extents))
1751 return true;
1752
1753 /*
1754 * If we are doing a fast fsync we can not bail out if the inode's
1755 * last_trans is <= then the last committed transaction, because we only
1756 * update the last_trans of the inode during ordered extent completion,
1757 * and for a fast fsync we don't wait for that, we only wait for the
1758 * writeback to complete.
1759 */
1760 if (inode->last_trans <= fs_info->last_trans_committed &&
1761 (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1762 list_empty(&ctx->ordered_extents)))
1763 return true;
1764
1765 return false;
1766}
1767
1768/*
1769 * fsync call for both files and directories. This logs the inode into
1770 * the tree log instead of forcing full commits whenever possible.
1771 *
1772 * It needs to call filemap_fdatawait so that all ordered extent updates are
1773 * in the metadata btree are up to date for copying to the log.
1774 *
1775 * It drops the inode mutex before doing the tree log commit. This is an
1776 * important optimization for directories because holding the mutex prevents
1777 * new operations on the dir while we write to disk.
1778 */
1779int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1780{
1781 struct dentry *dentry = file_dentry(file);
1782 struct inode *inode = d_inode(dentry);
1783 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1784 struct btrfs_root *root = BTRFS_I(inode)->root;
1785 struct btrfs_trans_handle *trans;
1786 struct btrfs_log_ctx ctx;
1787 int ret = 0, err;
1788 u64 len;
1789 bool full_sync;
1790
1791 trace_btrfs_sync_file(file, datasync);
1792
1793 btrfs_init_log_ctx(&ctx, inode);
1794
1795 /*
1796 * Always set the range to a full range, otherwise we can get into
1797 * several problems, from missing file extent items to represent holes
1798 * when not using the NO_HOLES feature, to log tree corruption due to
1799 * races between hole detection during logging and completion of ordered
1800 * extents outside the range, to missing checksums due to ordered extents
1801 * for which we flushed only a subset of their pages.
1802 */
1803 start = 0;
1804 end = LLONG_MAX;
1805 len = (u64)LLONG_MAX + 1;
1806
1807 /*
1808 * We write the dirty pages in the range and wait until they complete
1809 * out of the ->i_mutex. If so, we can flush the dirty pages by
1810 * multi-task, and make the performance up. See
1811 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1812 */
1813 ret = start_ordered_ops(inode, start, end);
1814 if (ret)
1815 goto out;
1816
1817 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1818
1819 atomic_inc(&root->log_batch);
1820
1821 /*
1822 * Before we acquired the inode's lock and the mmap lock, someone may
1823 * have dirtied more pages in the target range. We need to make sure
1824 * that writeback for any such pages does not start while we are logging
1825 * the inode, because if it does, any of the following might happen when
1826 * we are not doing a full inode sync:
1827 *
1828 * 1) We log an extent after its writeback finishes but before its
1829 * checksums are added to the csum tree, leading to -EIO errors
1830 * when attempting to read the extent after a log replay.
1831 *
1832 * 2) We can end up logging an extent before its writeback finishes.
1833 * Therefore after the log replay we will have a file extent item
1834 * pointing to an unwritten extent (and no data checksums as well).
1835 *
1836 * So trigger writeback for any eventual new dirty pages and then we
1837 * wait for all ordered extents to complete below.
1838 */
1839 ret = start_ordered_ops(inode, start, end);
1840 if (ret) {
1841 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1842 goto out;
1843 }
1844
1845 /*
1846 * Always check for the full sync flag while holding the inode's lock,
1847 * to avoid races with other tasks. The flag must be either set all the
1848 * time during logging or always off all the time while logging.
1849 * We check the flag here after starting delalloc above, because when
1850 * running delalloc the full sync flag may be set if we need to drop
1851 * extra extent map ranges due to temporary memory allocation failures.
1852 */
1853 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1854 &BTRFS_I(inode)->runtime_flags);
1855
1856 /*
1857 * We have to do this here to avoid the priority inversion of waiting on
1858 * IO of a lower priority task while holding a transaction open.
1859 *
1860 * For a full fsync we wait for the ordered extents to complete while
1861 * for a fast fsync we wait just for writeback to complete, and then
1862 * attach the ordered extents to the transaction so that a transaction
1863 * commit waits for their completion, to avoid data loss if we fsync,
1864 * the current transaction commits before the ordered extents complete
1865 * and a power failure happens right after that.
1866 *
1867 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1868 * logical address recorded in the ordered extent may change. We need
1869 * to wait for the IO to stabilize the logical address.
1870 */
1871 if (full_sync || btrfs_is_zoned(fs_info)) {
1872 ret = btrfs_wait_ordered_range(inode, start, len);
1873 } else {
1874 /*
1875 * Get our ordered extents as soon as possible to avoid doing
1876 * checksum lookups in the csum tree, and use instead the
1877 * checksums attached to the ordered extents.
1878 */
1879 btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
1880 &ctx.ordered_extents);
1881 ret = filemap_fdatawait_range(inode->i_mapping, start, end);
1882 }
1883
1884 if (ret)
1885 goto out_release_extents;
1886
1887 atomic_inc(&root->log_batch);
1888
1889 smp_mb();
1890 if (skip_inode_logging(&ctx)) {
1891 /*
1892 * We've had everything committed since the last time we were
1893 * modified so clear this flag in case it was set for whatever
1894 * reason, it's no longer relevant.
1895 */
1896 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1897 &BTRFS_I(inode)->runtime_flags);
1898 /*
1899 * An ordered extent might have started before and completed
1900 * already with io errors, in which case the inode was not
1901 * updated and we end up here. So check the inode's mapping
1902 * for any errors that might have happened since we last
1903 * checked called fsync.
1904 */
1905 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
1906 goto out_release_extents;
1907 }
1908
1909 /*
1910 * We use start here because we will need to wait on the IO to complete
1911 * in btrfs_sync_log, which could require joining a transaction (for
1912 * example checking cross references in the nocow path). If we use join
1913 * here we could get into a situation where we're waiting on IO to
1914 * happen that is blocked on a transaction trying to commit. With start
1915 * we inc the extwriter counter, so we wait for all extwriters to exit
1916 * before we start blocking joiners. This comment is to keep somebody
1917 * from thinking they are super smart and changing this to
1918 * btrfs_join_transaction *cough*Josef*cough*.
1919 */
1920 trans = btrfs_start_transaction(root, 0);
1921 if (IS_ERR(trans)) {
1922 ret = PTR_ERR(trans);
1923 goto out_release_extents;
1924 }
1925 trans->in_fsync = true;
1926
1927 ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1928 btrfs_release_log_ctx_extents(&ctx);
1929 if (ret < 0) {
1930 /* Fallthrough and commit/free transaction. */
1931 ret = BTRFS_LOG_FORCE_COMMIT;
1932 }
1933
1934 /* we've logged all the items and now have a consistent
1935 * version of the file in the log. It is possible that
1936 * someone will come in and modify the file, but that's
1937 * fine because the log is consistent on disk, and we
1938 * have references to all of the file's extents
1939 *
1940 * It is possible that someone will come in and log the
1941 * file again, but that will end up using the synchronization
1942 * inside btrfs_sync_log to keep things safe.
1943 */
1944 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1945
1946 if (ret == BTRFS_NO_LOG_SYNC) {
1947 ret = btrfs_end_transaction(trans);
1948 goto out;
1949 }
1950
1951 /* We successfully logged the inode, attempt to sync the log. */
1952 if (!ret) {
1953 ret = btrfs_sync_log(trans, root, &ctx);
1954 if (!ret) {
1955 ret = btrfs_end_transaction(trans);
1956 goto out;
1957 }
1958 }
1959
1960 /*
1961 * At this point we need to commit the transaction because we had
1962 * btrfs_need_log_full_commit() or some other error.
1963 *
1964 * If we didn't do a full sync we have to stop the trans handle, wait on
1965 * the ordered extents, start it again and commit the transaction. If
1966 * we attempt to wait on the ordered extents here we could deadlock with
1967 * something like fallocate() that is holding the extent lock trying to
1968 * start a transaction while some other thread is trying to commit the
1969 * transaction while we (fsync) are currently holding the transaction
1970 * open.
1971 */
1972 if (!full_sync) {
1973 ret = btrfs_end_transaction(trans);
1974 if (ret)
1975 goto out;
1976 ret = btrfs_wait_ordered_range(inode, start, len);
1977 if (ret)
1978 goto out;
1979
1980 /*
1981 * This is safe to use here because we're only interested in
1982 * making sure the transaction that had the ordered extents is
1983 * committed. We aren't waiting on anything past this point,
1984 * we're purely getting the transaction and committing it.
1985 */
1986 trans = btrfs_attach_transaction_barrier(root);
1987 if (IS_ERR(trans)) {
1988 ret = PTR_ERR(trans);
1989
1990 /*
1991 * We committed the transaction and there's no currently
1992 * running transaction, this means everything we care
1993 * about made it to disk and we are done.
1994 */
1995 if (ret == -ENOENT)
1996 ret = 0;
1997 goto out;
1998 }
1999 }
2000
2001 ret = btrfs_commit_transaction(trans);
2002out:
2003 ASSERT(list_empty(&ctx.list));
2004 ASSERT(list_empty(&ctx.conflict_inodes));
2005 err = file_check_and_advance_wb_err(file);
2006 if (!ret)
2007 ret = err;
2008 return ret > 0 ? -EIO : ret;
2009
2010out_release_extents:
2011 btrfs_release_log_ctx_extents(&ctx);
2012 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2013 goto out;
2014}
2015
2016static const struct vm_operations_struct btrfs_file_vm_ops = {
2017 .fault = filemap_fault,
2018 .map_pages = filemap_map_pages,
2019 .page_mkwrite = btrfs_page_mkwrite,
2020};
2021
2022static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2023{
2024 struct address_space *mapping = filp->f_mapping;
2025
2026 if (!mapping->a_ops->read_folio)
2027 return -ENOEXEC;
2028
2029 file_accessed(filp);
2030 vma->vm_ops = &btrfs_file_vm_ops;
2031
2032 return 0;
2033}
2034
2035static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2036 int slot, u64 start, u64 end)
2037{
2038 struct btrfs_file_extent_item *fi;
2039 struct btrfs_key key;
2040
2041 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2042 return 0;
2043
2044 btrfs_item_key_to_cpu(leaf, &key, slot);
2045 if (key.objectid != btrfs_ino(inode) ||
2046 key.type != BTRFS_EXTENT_DATA_KEY)
2047 return 0;
2048
2049 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2050
2051 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2052 return 0;
2053
2054 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2055 return 0;
2056
2057 if (key.offset == end)
2058 return 1;
2059 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2060 return 1;
2061 return 0;
2062}
2063
2064static int fill_holes(struct btrfs_trans_handle *trans,
2065 struct btrfs_inode *inode,
2066 struct btrfs_path *path, u64 offset, u64 end)
2067{
2068 struct btrfs_fs_info *fs_info = trans->fs_info;
2069 struct btrfs_root *root = inode->root;
2070 struct extent_buffer *leaf;
2071 struct btrfs_file_extent_item *fi;
2072 struct extent_map *hole_em;
2073 struct btrfs_key key;
2074 int ret;
2075
2076 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2077 goto out;
2078
2079 key.objectid = btrfs_ino(inode);
2080 key.type = BTRFS_EXTENT_DATA_KEY;
2081 key.offset = offset;
2082
2083 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2084 if (ret <= 0) {
2085 /*
2086 * We should have dropped this offset, so if we find it then
2087 * something has gone horribly wrong.
2088 */
2089 if (ret == 0)
2090 ret = -EINVAL;
2091 return ret;
2092 }
2093
2094 leaf = path->nodes[0];
2095 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2096 u64 num_bytes;
2097
2098 path->slots[0]--;
2099 fi = btrfs_item_ptr(leaf, path->slots[0],
2100 struct btrfs_file_extent_item);
2101 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2102 end - offset;
2103 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2104 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2105 btrfs_set_file_extent_offset(leaf, fi, 0);
2106 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2107 btrfs_mark_buffer_dirty(leaf);
2108 goto out;
2109 }
2110
2111 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2112 u64 num_bytes;
2113
2114 key.offset = offset;
2115 btrfs_set_item_key_safe(fs_info, path, &key);
2116 fi = btrfs_item_ptr(leaf, path->slots[0],
2117 struct btrfs_file_extent_item);
2118 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2119 offset;
2120 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2121 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2122 btrfs_set_file_extent_offset(leaf, fi, 0);
2123 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2124 btrfs_mark_buffer_dirty(leaf);
2125 goto out;
2126 }
2127 btrfs_release_path(path);
2128
2129 ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2130 end - offset);
2131 if (ret)
2132 return ret;
2133
2134out:
2135 btrfs_release_path(path);
2136
2137 hole_em = alloc_extent_map();
2138 if (!hole_em) {
2139 btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2140 btrfs_set_inode_full_sync(inode);
2141 } else {
2142 hole_em->start = offset;
2143 hole_em->len = end - offset;
2144 hole_em->ram_bytes = hole_em->len;
2145 hole_em->orig_start = offset;
2146
2147 hole_em->block_start = EXTENT_MAP_HOLE;
2148 hole_em->block_len = 0;
2149 hole_em->orig_block_len = 0;
2150 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2151 hole_em->generation = trans->transid;
2152
2153 ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2154 free_extent_map(hole_em);
2155 if (ret)
2156 btrfs_set_inode_full_sync(inode);
2157 }
2158
2159 return 0;
2160}
2161
2162/*
2163 * Find a hole extent on given inode and change start/len to the end of hole
2164 * extent.(hole/vacuum extent whose em->start <= start &&
2165 * em->start + em->len > start)
2166 * When a hole extent is found, return 1 and modify start/len.
2167 */
2168static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2169{
2170 struct btrfs_fs_info *fs_info = inode->root->fs_info;
2171 struct extent_map *em;
2172 int ret = 0;
2173
2174 em = btrfs_get_extent(inode, NULL, 0,
2175 round_down(*start, fs_info->sectorsize),
2176 round_up(*len, fs_info->sectorsize));
2177 if (IS_ERR(em))
2178 return PTR_ERR(em);
2179
2180 /* Hole or vacuum extent(only exists in no-hole mode) */
2181 if (em->block_start == EXTENT_MAP_HOLE) {
2182 ret = 1;
2183 *len = em->start + em->len > *start + *len ?
2184 0 : *start + *len - em->start - em->len;
2185 *start = em->start + em->len;
2186 }
2187 free_extent_map(em);
2188 return ret;
2189}
2190
2191static void btrfs_punch_hole_lock_range(struct inode *inode,
2192 const u64 lockstart,
2193 const u64 lockend,
2194 struct extent_state **cached_state)
2195{
2196 /*
2197 * For subpage case, if the range is not at page boundary, we could
2198 * have pages at the leading/tailing part of the range.
2199 * This could lead to dead loop since filemap_range_has_page()
2200 * will always return true.
2201 * So here we need to do extra page alignment for
2202 * filemap_range_has_page().
2203 */
2204 const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2205 const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2206
2207 while (1) {
2208 truncate_pagecache_range(inode, lockstart, lockend);
2209
2210 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2211 cached_state);
2212 /*
2213 * We can't have ordered extents in the range, nor dirty/writeback
2214 * pages, because we have locked the inode's VFS lock in exclusive
2215 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2216 * we have flushed all delalloc in the range and we have waited
2217 * for any ordered extents in the range to complete.
2218 * We can race with anyone reading pages from this range, so after
2219 * locking the range check if we have pages in the range, and if
2220 * we do, unlock the range and retry.
2221 */
2222 if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2223 page_lockend))
2224 break;
2225
2226 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2227 cached_state);
2228 }
2229
2230 btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2231}
2232
2233static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2234 struct btrfs_inode *inode,
2235 struct btrfs_path *path,
2236 struct btrfs_replace_extent_info *extent_info,
2237 const u64 replace_len,
2238 const u64 bytes_to_drop)
2239{
2240 struct btrfs_fs_info *fs_info = trans->fs_info;
2241 struct btrfs_root *root = inode->root;
2242 struct btrfs_file_extent_item *extent;
2243 struct extent_buffer *leaf;
2244 struct btrfs_key key;
2245 int slot;
2246 struct btrfs_ref ref = { 0 };
2247 int ret;
2248
2249 if (replace_len == 0)
2250 return 0;
2251
2252 if (extent_info->disk_offset == 0 &&
2253 btrfs_fs_incompat(fs_info, NO_HOLES)) {
2254 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2255 return 0;
2256 }
2257
2258 key.objectid = btrfs_ino(inode);
2259 key.type = BTRFS_EXTENT_DATA_KEY;
2260 key.offset = extent_info->file_offset;
2261 ret = btrfs_insert_empty_item(trans, root, path, &key,
2262 sizeof(struct btrfs_file_extent_item));
2263 if (ret)
2264 return ret;
2265 leaf = path->nodes[0];
2266 slot = path->slots[0];
2267 write_extent_buffer(leaf, extent_info->extent_buf,
2268 btrfs_item_ptr_offset(leaf, slot),
2269 sizeof(struct btrfs_file_extent_item));
2270 extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2271 ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2272 btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2273 btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2274 if (extent_info->is_new_extent)
2275 btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2276 btrfs_mark_buffer_dirty(leaf);
2277 btrfs_release_path(path);
2278
2279 ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2280 replace_len);
2281 if (ret)
2282 return ret;
2283
2284 /* If it's a hole, nothing more needs to be done. */
2285 if (extent_info->disk_offset == 0) {
2286 btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2287 return 0;
2288 }
2289
2290 btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2291
2292 if (extent_info->is_new_extent && extent_info->insertions == 0) {
2293 key.objectid = extent_info->disk_offset;
2294 key.type = BTRFS_EXTENT_ITEM_KEY;
2295 key.offset = extent_info->disk_len;
2296 ret = btrfs_alloc_reserved_file_extent(trans, root,
2297 btrfs_ino(inode),
2298 extent_info->file_offset,
2299 extent_info->qgroup_reserved,
2300 &key);
2301 } else {
2302 u64 ref_offset;
2303
2304 btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2305 extent_info->disk_offset,
2306 extent_info->disk_len, 0);
2307 ref_offset = extent_info->file_offset - extent_info->data_offset;
2308 btrfs_init_data_ref(&ref, root->root_key.objectid,
2309 btrfs_ino(inode), ref_offset, 0, false);
2310 ret = btrfs_inc_extent_ref(trans, &ref);
2311 }
2312
2313 extent_info->insertions++;
2314
2315 return ret;
2316}
2317
2318/*
2319 * The respective range must have been previously locked, as well as the inode.
2320 * The end offset is inclusive (last byte of the range).
2321 * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2322 * the file range with an extent.
2323 * When not punching a hole, we don't want to end up in a state where we dropped
2324 * extents without inserting a new one, so we must abort the transaction to avoid
2325 * a corruption.
2326 */
2327int btrfs_replace_file_extents(struct btrfs_inode *inode,
2328 struct btrfs_path *path, const u64 start,
2329 const u64 end,
2330 struct btrfs_replace_extent_info *extent_info,
2331 struct btrfs_trans_handle **trans_out)
2332{
2333 struct btrfs_drop_extents_args drop_args = { 0 };
2334 struct btrfs_root *root = inode->root;
2335 struct btrfs_fs_info *fs_info = root->fs_info;
2336 u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2337 u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2338 struct btrfs_trans_handle *trans = NULL;
2339 struct btrfs_block_rsv *rsv;
2340 unsigned int rsv_count;
2341 u64 cur_offset;
2342 u64 len = end - start;
2343 int ret = 0;
2344
2345 if (end <= start)
2346 return -EINVAL;
2347
2348 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2349 if (!rsv) {
2350 ret = -ENOMEM;
2351 goto out;
2352 }
2353 rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2354 rsv->failfast = true;
2355
2356 /*
2357 * 1 - update the inode
2358 * 1 - removing the extents in the range
2359 * 1 - adding the hole extent if no_holes isn't set or if we are
2360 * replacing the range with a new extent
2361 */
2362 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2363 rsv_count = 3;
2364 else
2365 rsv_count = 2;
2366
2367 trans = btrfs_start_transaction(root, rsv_count);
2368 if (IS_ERR(trans)) {
2369 ret = PTR_ERR(trans);
2370 trans = NULL;
2371 goto out_free;
2372 }
2373
2374 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2375 min_size, false);
2376 if (WARN_ON(ret))
2377 goto out_trans;
2378 trans->block_rsv = rsv;
2379
2380 cur_offset = start;
2381 drop_args.path = path;
2382 drop_args.end = end + 1;
2383 drop_args.drop_cache = true;
2384 while (cur_offset < end) {
2385 drop_args.start = cur_offset;
2386 ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2387 /* If we are punching a hole decrement the inode's byte count */
2388 if (!extent_info)
2389 btrfs_update_inode_bytes(inode, 0,
2390 drop_args.bytes_found);
2391 if (ret != -ENOSPC) {
2392 /*
2393 * The only time we don't want to abort is if we are
2394 * attempting to clone a partial inline extent, in which
2395 * case we'll get EOPNOTSUPP. However if we aren't
2396 * clone we need to abort no matter what, because if we
2397 * got EOPNOTSUPP via prealloc then we messed up and
2398 * need to abort.
2399 */
2400 if (ret &&
2401 (ret != -EOPNOTSUPP ||
2402 (extent_info && extent_info->is_new_extent)))
2403 btrfs_abort_transaction(trans, ret);
2404 break;
2405 }
2406
2407 trans->block_rsv = &fs_info->trans_block_rsv;
2408
2409 if (!extent_info && cur_offset < drop_args.drop_end &&
2410 cur_offset < ino_size) {
2411 ret = fill_holes(trans, inode, path, cur_offset,
2412 drop_args.drop_end);
2413 if (ret) {
2414 /*
2415 * If we failed then we didn't insert our hole
2416 * entries for the area we dropped, so now the
2417 * fs is corrupted, so we must abort the
2418 * transaction.
2419 */
2420 btrfs_abort_transaction(trans, ret);
2421 break;
2422 }
2423 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2424 /*
2425 * We are past the i_size here, but since we didn't
2426 * insert holes we need to clear the mapped area so we
2427 * know to not set disk_i_size in this area until a new
2428 * file extent is inserted here.
2429 */
2430 ret = btrfs_inode_clear_file_extent_range(inode,
2431 cur_offset,
2432 drop_args.drop_end - cur_offset);
2433 if (ret) {
2434 /*
2435 * We couldn't clear our area, so we could
2436 * presumably adjust up and corrupt the fs, so
2437 * we need to abort.
2438 */
2439 btrfs_abort_transaction(trans, ret);
2440 break;
2441 }
2442 }
2443
2444 if (extent_info &&
2445 drop_args.drop_end > extent_info->file_offset) {
2446 u64 replace_len = drop_args.drop_end -
2447 extent_info->file_offset;
2448
2449 ret = btrfs_insert_replace_extent(trans, inode, path,
2450 extent_info, replace_len,
2451 drop_args.bytes_found);
2452 if (ret) {
2453 btrfs_abort_transaction(trans, ret);
2454 break;
2455 }
2456 extent_info->data_len -= replace_len;
2457 extent_info->data_offset += replace_len;
2458 extent_info->file_offset += replace_len;
2459 }
2460
2461 /*
2462 * We are releasing our handle on the transaction, balance the
2463 * dirty pages of the btree inode and flush delayed items, and
2464 * then get a new transaction handle, which may now point to a
2465 * new transaction in case someone else may have committed the
2466 * transaction we used to replace/drop file extent items. So
2467 * bump the inode's iversion and update mtime and ctime except
2468 * if we are called from a dedupe context. This is because a
2469 * power failure/crash may happen after the transaction is
2470 * committed and before we finish replacing/dropping all the
2471 * file extent items we need.
2472 */
2473 inode_inc_iversion(&inode->vfs_inode);
2474
2475 if (!extent_info || extent_info->update_times) {
2476 inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode);
2477 inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime;
2478 }
2479
2480 ret = btrfs_update_inode(trans, root, inode);
2481 if (ret)
2482 break;
2483
2484 btrfs_end_transaction(trans);
2485 btrfs_btree_balance_dirty(fs_info);
2486
2487 trans = btrfs_start_transaction(root, rsv_count);
2488 if (IS_ERR(trans)) {
2489 ret = PTR_ERR(trans);
2490 trans = NULL;
2491 break;
2492 }
2493
2494 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2495 rsv, min_size, false);
2496 if (WARN_ON(ret))
2497 break;
2498 trans->block_rsv = rsv;
2499
2500 cur_offset = drop_args.drop_end;
2501 len = end - cur_offset;
2502 if (!extent_info && len) {
2503 ret = find_first_non_hole(inode, &cur_offset, &len);
2504 if (unlikely(ret < 0))
2505 break;
2506 if (ret && !len) {
2507 ret = 0;
2508 break;
2509 }
2510 }
2511 }
2512
2513 /*
2514 * If we were cloning, force the next fsync to be a full one since we
2515 * we replaced (or just dropped in the case of cloning holes when
2516 * NO_HOLES is enabled) file extent items and did not setup new extent
2517 * maps for the replacement extents (or holes).
2518 */
2519 if (extent_info && !extent_info->is_new_extent)
2520 btrfs_set_inode_full_sync(inode);
2521
2522 if (ret)
2523 goto out_trans;
2524
2525 trans->block_rsv = &fs_info->trans_block_rsv;
2526 /*
2527 * If we are using the NO_HOLES feature we might have had already an
2528 * hole that overlaps a part of the region [lockstart, lockend] and
2529 * ends at (or beyond) lockend. Since we have no file extent items to
2530 * represent holes, drop_end can be less than lockend and so we must
2531 * make sure we have an extent map representing the existing hole (the
2532 * call to __btrfs_drop_extents() might have dropped the existing extent
2533 * map representing the existing hole), otherwise the fast fsync path
2534 * will not record the existence of the hole region
2535 * [existing_hole_start, lockend].
2536 */
2537 if (drop_args.drop_end <= end)
2538 drop_args.drop_end = end + 1;
2539 /*
2540 * Don't insert file hole extent item if it's for a range beyond eof
2541 * (because it's useless) or if it represents a 0 bytes range (when
2542 * cur_offset == drop_end).
2543 */
2544 if (!extent_info && cur_offset < ino_size &&
2545 cur_offset < drop_args.drop_end) {
2546 ret = fill_holes(trans, inode, path, cur_offset,
2547 drop_args.drop_end);
2548 if (ret) {
2549 /* Same comment as above. */
2550 btrfs_abort_transaction(trans, ret);
2551 goto out_trans;
2552 }
2553 } else if (!extent_info && cur_offset < drop_args.drop_end) {
2554 /* See the comment in the loop above for the reasoning here. */
2555 ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2556 drop_args.drop_end - cur_offset);
2557 if (ret) {
2558 btrfs_abort_transaction(trans, ret);
2559 goto out_trans;
2560 }
2561
2562 }
2563 if (extent_info) {
2564 ret = btrfs_insert_replace_extent(trans, inode, path,
2565 extent_info, extent_info->data_len,
2566 drop_args.bytes_found);
2567 if (ret) {
2568 btrfs_abort_transaction(trans, ret);
2569 goto out_trans;
2570 }
2571 }
2572
2573out_trans:
2574 if (!trans)
2575 goto out_free;
2576
2577 trans->block_rsv = &fs_info->trans_block_rsv;
2578 if (ret)
2579 btrfs_end_transaction(trans);
2580 else
2581 *trans_out = trans;
2582out_free:
2583 btrfs_free_block_rsv(fs_info, rsv);
2584out:
2585 return ret;
2586}
2587
2588static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2589{
2590 struct inode *inode = file_inode(file);
2591 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2592 struct btrfs_root *root = BTRFS_I(inode)->root;
2593 struct extent_state *cached_state = NULL;
2594 struct btrfs_path *path;
2595 struct btrfs_trans_handle *trans = NULL;
2596 u64 lockstart;
2597 u64 lockend;
2598 u64 tail_start;
2599 u64 tail_len;
2600 u64 orig_start = offset;
2601 int ret = 0;
2602 bool same_block;
2603 u64 ino_size;
2604 bool truncated_block = false;
2605 bool updated_inode = false;
2606
2607 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2608
2609 ret = btrfs_wait_ordered_range(inode, offset, len);
2610 if (ret)
2611 goto out_only_mutex;
2612
2613 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2614 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2615 if (ret < 0)
2616 goto out_only_mutex;
2617 if (ret && !len) {
2618 /* Already in a large hole */
2619 ret = 0;
2620 goto out_only_mutex;
2621 }
2622
2623 ret = file_modified(file);
2624 if (ret)
2625 goto out_only_mutex;
2626
2627 lockstart = round_up(offset, fs_info->sectorsize);
2628 lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2629 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2630 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2631 /*
2632 * We needn't truncate any block which is beyond the end of the file
2633 * because we are sure there is no data there.
2634 */
2635 /*
2636 * Only do this if we are in the same block and we aren't doing the
2637 * entire block.
2638 */
2639 if (same_block && len < fs_info->sectorsize) {
2640 if (offset < ino_size) {
2641 truncated_block = true;
2642 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2643 0);
2644 } else {
2645 ret = 0;
2646 }
2647 goto out_only_mutex;
2648 }
2649
2650 /* zero back part of the first block */
2651 if (offset < ino_size) {
2652 truncated_block = true;
2653 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2654 if (ret) {
2655 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2656 return ret;
2657 }
2658 }
2659
2660 /* Check the aligned pages after the first unaligned page,
2661 * if offset != orig_start, which means the first unaligned page
2662 * including several following pages are already in holes,
2663 * the extra check can be skipped */
2664 if (offset == orig_start) {
2665 /* after truncate page, check hole again */
2666 len = offset + len - lockstart;
2667 offset = lockstart;
2668 ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2669 if (ret < 0)
2670 goto out_only_mutex;
2671 if (ret && !len) {
2672 ret = 0;
2673 goto out_only_mutex;
2674 }
2675 lockstart = offset;
2676 }
2677
2678 /* Check the tail unaligned part is in a hole */
2679 tail_start = lockend + 1;
2680 tail_len = offset + len - tail_start;
2681 if (tail_len) {
2682 ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2683 if (unlikely(ret < 0))
2684 goto out_only_mutex;
2685 if (!ret) {
2686 /* zero the front end of the last page */
2687 if (tail_start + tail_len < ino_size) {
2688 truncated_block = true;
2689 ret = btrfs_truncate_block(BTRFS_I(inode),
2690 tail_start + tail_len,
2691 0, 1);
2692 if (ret)
2693 goto out_only_mutex;
2694 }
2695 }
2696 }
2697
2698 if (lockend < lockstart) {
2699 ret = 0;
2700 goto out_only_mutex;
2701 }
2702
2703 btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2704
2705 path = btrfs_alloc_path();
2706 if (!path) {
2707 ret = -ENOMEM;
2708 goto out;
2709 }
2710
2711 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2712 lockend, NULL, &trans);
2713 btrfs_free_path(path);
2714 if (ret)
2715 goto out;
2716
2717 ASSERT(trans != NULL);
2718 inode_inc_iversion(inode);
2719 inode->i_mtime = current_time(inode);
2720 inode->i_ctime = inode->i_mtime;
2721 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2722 updated_inode = true;
2723 btrfs_end_transaction(trans);
2724 btrfs_btree_balance_dirty(fs_info);
2725out:
2726 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2727 &cached_state);
2728out_only_mutex:
2729 if (!updated_inode && truncated_block && !ret) {
2730 /*
2731 * If we only end up zeroing part of a page, we still need to
2732 * update the inode item, so that all the time fields are
2733 * updated as well as the necessary btrfs inode in memory fields
2734 * for detecting, at fsync time, if the inode isn't yet in the
2735 * log tree or it's there but not up to date.
2736 */
2737 struct timespec64 now = current_time(inode);
2738
2739 inode_inc_iversion(inode);
2740 inode->i_mtime = now;
2741 inode->i_ctime = now;
2742 trans = btrfs_start_transaction(root, 1);
2743 if (IS_ERR(trans)) {
2744 ret = PTR_ERR(trans);
2745 } else {
2746 int ret2;
2747
2748 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2749 ret2 = btrfs_end_transaction(trans);
2750 if (!ret)
2751 ret = ret2;
2752 }
2753 }
2754 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2755 return ret;
2756}
2757
2758/* Helper structure to record which range is already reserved */
2759struct falloc_range {
2760 struct list_head list;
2761 u64 start;
2762 u64 len;
2763};
2764
2765/*
2766 * Helper function to add falloc range
2767 *
2768 * Caller should have locked the larger range of extent containing
2769 * [start, len)
2770 */
2771static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2772{
2773 struct falloc_range *range = NULL;
2774
2775 if (!list_empty(head)) {
2776 /*
2777 * As fallocate iterates by bytenr order, we only need to check
2778 * the last range.
2779 */
2780 range = list_last_entry(head, struct falloc_range, list);
2781 if (range->start + range->len == start) {
2782 range->len += len;
2783 return 0;
2784 }
2785 }
2786
2787 range = kmalloc(sizeof(*range), GFP_KERNEL);
2788 if (!range)
2789 return -ENOMEM;
2790 range->start = start;
2791 range->len = len;
2792 list_add_tail(&range->list, head);
2793 return 0;
2794}
2795
2796static int btrfs_fallocate_update_isize(struct inode *inode,
2797 const u64 end,
2798 const int mode)
2799{
2800 struct btrfs_trans_handle *trans;
2801 struct btrfs_root *root = BTRFS_I(inode)->root;
2802 int ret;
2803 int ret2;
2804
2805 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2806 return 0;
2807
2808 trans = btrfs_start_transaction(root, 1);
2809 if (IS_ERR(trans))
2810 return PTR_ERR(trans);
2811
2812 inode->i_ctime = current_time(inode);
2813 i_size_write(inode, end);
2814 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2815 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2816 ret2 = btrfs_end_transaction(trans);
2817
2818 return ret ? ret : ret2;
2819}
2820
2821enum {
2822 RANGE_BOUNDARY_WRITTEN_EXTENT,
2823 RANGE_BOUNDARY_PREALLOC_EXTENT,
2824 RANGE_BOUNDARY_HOLE,
2825};
2826
2827static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2828 u64 offset)
2829{
2830 const u64 sectorsize = inode->root->fs_info->sectorsize;
2831 struct extent_map *em;
2832 int ret;
2833
2834 offset = round_down(offset, sectorsize);
2835 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
2836 if (IS_ERR(em))
2837 return PTR_ERR(em);
2838
2839 if (em->block_start == EXTENT_MAP_HOLE)
2840 ret = RANGE_BOUNDARY_HOLE;
2841 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2842 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2843 else
2844 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2845
2846 free_extent_map(em);
2847 return ret;
2848}
2849
2850static int btrfs_zero_range(struct inode *inode,
2851 loff_t offset,
2852 loff_t len,
2853 const int mode)
2854{
2855 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2856 struct extent_map *em;
2857 struct extent_changeset *data_reserved = NULL;
2858 int ret;
2859 u64 alloc_hint = 0;
2860 const u64 sectorsize = fs_info->sectorsize;
2861 u64 alloc_start = round_down(offset, sectorsize);
2862 u64 alloc_end = round_up(offset + len, sectorsize);
2863 u64 bytes_to_reserve = 0;
2864 bool space_reserved = false;
2865
2866 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2867 alloc_end - alloc_start);
2868 if (IS_ERR(em)) {
2869 ret = PTR_ERR(em);
2870 goto out;
2871 }
2872
2873 /*
2874 * Avoid hole punching and extent allocation for some cases. More cases
2875 * could be considered, but these are unlikely common and we keep things
2876 * as simple as possible for now. Also, intentionally, if the target
2877 * range contains one or more prealloc extents together with regular
2878 * extents and holes, we drop all the existing extents and allocate a
2879 * new prealloc extent, so that we get a larger contiguous disk extent.
2880 */
2881 if (em->start <= alloc_start &&
2882 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2883 const u64 em_end = em->start + em->len;
2884
2885 if (em_end >= offset + len) {
2886 /*
2887 * The whole range is already a prealloc extent,
2888 * do nothing except updating the inode's i_size if
2889 * needed.
2890 */
2891 free_extent_map(em);
2892 ret = btrfs_fallocate_update_isize(inode, offset + len,
2893 mode);
2894 goto out;
2895 }
2896 /*
2897 * Part of the range is already a prealloc extent, so operate
2898 * only on the remaining part of the range.
2899 */
2900 alloc_start = em_end;
2901 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2902 len = offset + len - alloc_start;
2903 offset = alloc_start;
2904 alloc_hint = em->block_start + em->len;
2905 }
2906 free_extent_map(em);
2907
2908 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2909 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2910 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2911 sectorsize);
2912 if (IS_ERR(em)) {
2913 ret = PTR_ERR(em);
2914 goto out;
2915 }
2916
2917 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2918 free_extent_map(em);
2919 ret = btrfs_fallocate_update_isize(inode, offset + len,
2920 mode);
2921 goto out;
2922 }
2923 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2924 free_extent_map(em);
2925 ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2926 0);
2927 if (!ret)
2928 ret = btrfs_fallocate_update_isize(inode,
2929 offset + len,
2930 mode);
2931 return ret;
2932 }
2933 free_extent_map(em);
2934 alloc_start = round_down(offset, sectorsize);
2935 alloc_end = alloc_start + sectorsize;
2936 goto reserve_space;
2937 }
2938
2939 alloc_start = round_up(offset, sectorsize);
2940 alloc_end = round_down(offset + len, sectorsize);
2941
2942 /*
2943 * For unaligned ranges, check the pages at the boundaries, they might
2944 * map to an extent, in which case we need to partially zero them, or
2945 * they might map to a hole, in which case we need our allocation range
2946 * to cover them.
2947 */
2948 if (!IS_ALIGNED(offset, sectorsize)) {
2949 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2950 offset);
2951 if (ret < 0)
2952 goto out;
2953 if (ret == RANGE_BOUNDARY_HOLE) {
2954 alloc_start = round_down(offset, sectorsize);
2955 ret = 0;
2956 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2957 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2958 if (ret)
2959 goto out;
2960 } else {
2961 ret = 0;
2962 }
2963 }
2964
2965 if (!IS_ALIGNED(offset + len, sectorsize)) {
2966 ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2967 offset + len);
2968 if (ret < 0)
2969 goto out;
2970 if (ret == RANGE_BOUNDARY_HOLE) {
2971 alloc_end = round_up(offset + len, sectorsize);
2972 ret = 0;
2973 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2974 ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2975 0, 1);
2976 if (ret)
2977 goto out;
2978 } else {
2979 ret = 0;
2980 }
2981 }
2982
2983reserve_space:
2984 if (alloc_start < alloc_end) {
2985 struct extent_state *cached_state = NULL;
2986 const u64 lockstart = alloc_start;
2987 const u64 lockend = alloc_end - 1;
2988
2989 bytes_to_reserve = alloc_end - alloc_start;
2990 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2991 bytes_to_reserve);
2992 if (ret < 0)
2993 goto out;
2994 space_reserved = true;
2995 btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2996 &cached_state);
2997 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
2998 alloc_start, bytes_to_reserve);
2999 if (ret) {
3000 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3001 lockend, &cached_state);
3002 goto out;
3003 }
3004 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3005 alloc_end - alloc_start,
3006 i_blocksize(inode),
3007 offset + len, &alloc_hint);
3008 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3009 &cached_state);
3010 /* btrfs_prealloc_file_range releases reserved space on error */
3011 if (ret) {
3012 space_reserved = false;
3013 goto out;
3014 }
3015 }
3016 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3017 out:
3018 if (ret && space_reserved)
3019 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3020 alloc_start, bytes_to_reserve);
3021 extent_changeset_free(data_reserved);
3022
3023 return ret;
3024}
3025
3026static long btrfs_fallocate(struct file *file, int mode,
3027 loff_t offset, loff_t len)
3028{
3029 struct inode *inode = file_inode(file);
3030 struct extent_state *cached_state = NULL;
3031 struct extent_changeset *data_reserved = NULL;
3032 struct falloc_range *range;
3033 struct falloc_range *tmp;
3034 struct list_head reserve_list;
3035 u64 cur_offset;
3036 u64 last_byte;
3037 u64 alloc_start;
3038 u64 alloc_end;
3039 u64 alloc_hint = 0;
3040 u64 locked_end;
3041 u64 actual_end = 0;
3042 u64 data_space_needed = 0;
3043 u64 data_space_reserved = 0;
3044 u64 qgroup_reserved = 0;
3045 struct extent_map *em;
3046 int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3047 int ret;
3048
3049 /* Do not allow fallocate in ZONED mode */
3050 if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3051 return -EOPNOTSUPP;
3052
3053 alloc_start = round_down(offset, blocksize);
3054 alloc_end = round_up(offset + len, blocksize);
3055 cur_offset = alloc_start;
3056
3057 /* Make sure we aren't being give some crap mode */
3058 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3059 FALLOC_FL_ZERO_RANGE))
3060 return -EOPNOTSUPP;
3061
3062 if (mode & FALLOC_FL_PUNCH_HOLE)
3063 return btrfs_punch_hole(file, offset, len);
3064
3065 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3066
3067 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3068 ret = inode_newsize_ok(inode, offset + len);
3069 if (ret)
3070 goto out;
3071 }
3072
3073 ret = file_modified(file);
3074 if (ret)
3075 goto out;
3076
3077 /*
3078 * TODO: Move these two operations after we have checked
3079 * accurate reserved space, or fallocate can still fail but
3080 * with page truncated or size expanded.
3081 *
3082 * But that's a minor problem and won't do much harm BTW.
3083 */
3084 if (alloc_start > inode->i_size) {
3085 ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3086 alloc_start);
3087 if (ret)
3088 goto out;
3089 } else if (offset + len > inode->i_size) {
3090 /*
3091 * If we are fallocating from the end of the file onward we
3092 * need to zero out the end of the block if i_size lands in the
3093 * middle of a block.
3094 */
3095 ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3096 if (ret)
3097 goto out;
3098 }
3099
3100 /*
3101 * We have locked the inode at the VFS level (in exclusive mode) and we
3102 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3103 * locking the file range, flush all dealloc in the range and wait for
3104 * all ordered extents in the range to complete. After this we can lock
3105 * the file range and, due to the previous locking we did, we know there
3106 * can't be more delalloc or ordered extents in the range.
3107 */
3108 ret = btrfs_wait_ordered_range(inode, alloc_start,
3109 alloc_end - alloc_start);
3110 if (ret)
3111 goto out;
3112
3113 if (mode & FALLOC_FL_ZERO_RANGE) {
3114 ret = btrfs_zero_range(inode, offset, len, mode);
3115 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3116 return ret;
3117 }
3118
3119 locked_end = alloc_end - 1;
3120 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3121 &cached_state);
3122
3123 btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3124
3125 /* First, check if we exceed the qgroup limit */
3126 INIT_LIST_HEAD(&reserve_list);
3127 while (cur_offset < alloc_end) {
3128 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3129 alloc_end - cur_offset);
3130 if (IS_ERR(em)) {
3131 ret = PTR_ERR(em);
3132 break;
3133 }
3134 last_byte = min(extent_map_end(em), alloc_end);
3135 actual_end = min_t(u64, extent_map_end(em), offset + len);
3136 last_byte = ALIGN(last_byte, blocksize);
3137 if (em->block_start == EXTENT_MAP_HOLE ||
3138 (cur_offset >= inode->i_size &&
3139 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3140 const u64 range_len = last_byte - cur_offset;
3141
3142 ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3143 if (ret < 0) {
3144 free_extent_map(em);
3145 break;
3146 }
3147 ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3148 &data_reserved, cur_offset, range_len);
3149 if (ret < 0) {
3150 free_extent_map(em);
3151 break;
3152 }
3153 qgroup_reserved += range_len;
3154 data_space_needed += range_len;
3155 }
3156 free_extent_map(em);
3157 cur_offset = last_byte;
3158 }
3159
3160 if (!ret && data_space_needed > 0) {
3161 /*
3162 * We are safe to reserve space here as we can't have delalloc
3163 * in the range, see above.
3164 */
3165 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3166 data_space_needed);
3167 if (!ret)
3168 data_space_reserved = data_space_needed;
3169 }
3170
3171 /*
3172 * If ret is still 0, means we're OK to fallocate.
3173 * Or just cleanup the list and exit.
3174 */
3175 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3176 if (!ret) {
3177 ret = btrfs_prealloc_file_range(inode, mode,
3178 range->start,
3179 range->len, i_blocksize(inode),
3180 offset + len, &alloc_hint);
3181 /*
3182 * btrfs_prealloc_file_range() releases space even
3183 * if it returns an error.
3184 */
3185 data_space_reserved -= range->len;
3186 qgroup_reserved -= range->len;
3187 } else if (data_space_reserved > 0) {
3188 btrfs_free_reserved_data_space(BTRFS_I(inode),
3189 data_reserved, range->start,
3190 range->len);
3191 data_space_reserved -= range->len;
3192 qgroup_reserved -= range->len;
3193 } else if (qgroup_reserved > 0) {
3194 btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3195 range->start, range->len);
3196 qgroup_reserved -= range->len;
3197 }
3198 list_del(&range->list);
3199 kfree(range);
3200 }
3201 if (ret < 0)
3202 goto out_unlock;
3203
3204 /*
3205 * We didn't need to allocate any more space, but we still extended the
3206 * size of the file so we need to update i_size and the inode item.
3207 */
3208 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3209out_unlock:
3210 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3211 &cached_state);
3212out:
3213 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3214 extent_changeset_free(data_reserved);
3215 return ret;
3216}
3217
3218/*
3219 * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3220 * that has unflushed and/or flushing delalloc. There might be other adjacent
3221 * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3222 * looping while it gets adjacent subranges, and merging them together.
3223 */
3224static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3225 struct extent_state **cached_state,
3226 bool *search_io_tree,
3227 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3228{
3229 u64 len = end + 1 - start;
3230 u64 delalloc_len = 0;
3231 struct btrfs_ordered_extent *oe;
3232 u64 oe_start;
3233 u64 oe_end;
3234
3235 /*
3236 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3237 * means we have delalloc (dirty pages) for which writeback has not
3238 * started yet.
3239 */
3240 if (*search_io_tree) {
3241 spin_lock(&inode->lock);
3242 if (inode->delalloc_bytes > 0) {
3243 spin_unlock(&inode->lock);
3244 *delalloc_start_ret = start;
3245 delalloc_len = count_range_bits(&inode->io_tree,
3246 delalloc_start_ret, end,
3247 len, EXTENT_DELALLOC, 1,
3248 cached_state);
3249 } else {
3250 spin_unlock(&inode->lock);
3251 }
3252 }
3253
3254 if (delalloc_len > 0) {
3255 /*
3256 * If delalloc was found then *delalloc_start_ret has a sector size
3257 * aligned value (rounded down).
3258 */
3259 *delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3260
3261 if (*delalloc_start_ret == start) {
3262 /* Delalloc for the whole range, nothing more to do. */
3263 if (*delalloc_end_ret == end)
3264 return true;
3265 /* Else trim our search range for ordered extents. */
3266 start = *delalloc_end_ret + 1;
3267 len = end + 1 - start;
3268 }
3269 } else {
3270 /* No delalloc, future calls don't need to search again. */
3271 *search_io_tree = false;
3272 }
3273
3274 /*
3275 * Now also check if there's any ordered extent in the range.
3276 * We do this because:
3277 *
3278 * 1) When delalloc is flushed, the file range is locked, we clear the
3279 * EXTENT_DELALLOC bit from the io tree and create an extent map and
3280 * an ordered extent for the write. So we might just have been called
3281 * after delalloc is flushed and before the ordered extent completes
3282 * and inserts the new file extent item in the subvolume's btree;
3283 *
3284 * 2) We may have an ordered extent created by flushing delalloc for a
3285 * subrange that starts before the subrange we found marked with
3286 * EXTENT_DELALLOC in the io tree.
3287 *
3288 * We could also use the extent map tree to find such delalloc that is
3289 * being flushed, but using the ordered extents tree is more efficient
3290 * because it's usually much smaller as ordered extents are removed from
3291 * the tree once they complete. With the extent maps, we mau have them
3292 * in the extent map tree for a very long time, and they were either
3293 * created by previous writes or loaded by read operations.
3294 */
3295 oe = btrfs_lookup_first_ordered_range(inode, start, len);
3296 if (!oe)
3297 return (delalloc_len > 0);
3298
3299 /* The ordered extent may span beyond our search range. */
3300 oe_start = max(oe->file_offset, start);
3301 oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3302
3303 btrfs_put_ordered_extent(oe);
3304
3305 /* Don't have unflushed delalloc, return the ordered extent range. */
3306 if (delalloc_len == 0) {
3307 *delalloc_start_ret = oe_start;
3308 *delalloc_end_ret = oe_end;
3309 return true;
3310 }
3311
3312 /*
3313 * We have both unflushed delalloc (io_tree) and an ordered extent.
3314 * If the ranges are adjacent returned a combined range, otherwise
3315 * return the leftmost range.
3316 */
3317 if (oe_start < *delalloc_start_ret) {
3318 if (oe_end < *delalloc_start_ret)
3319 *delalloc_end_ret = oe_end;
3320 *delalloc_start_ret = oe_start;
3321 } else if (*delalloc_end_ret + 1 == oe_start) {
3322 *delalloc_end_ret = oe_end;
3323 }
3324
3325 return true;
3326}
3327
3328/*
3329 * Check if there's delalloc in a given range.
3330 *
3331 * @inode: The inode.
3332 * @start: The start offset of the range. It does not need to be
3333 * sector size aligned.
3334 * @end: The end offset (inclusive value) of the search range.
3335 * It does not need to be sector size aligned.
3336 * @cached_state: Extent state record used for speeding up delalloc
3337 * searches in the inode's io_tree. Can be NULL.
3338 * @delalloc_start_ret: Output argument, set to the start offset of the
3339 * subrange found with delalloc (may not be sector size
3340 * aligned).
3341 * @delalloc_end_ret: Output argument, set to he end offset (inclusive value)
3342 * of the subrange found with delalloc.
3343 *
3344 * Returns true if a subrange with delalloc is found within the given range, and
3345 * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3346 * end offsets of the subrange.
3347 */
3348bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3349 struct extent_state **cached_state,
3350 u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3351{
3352 u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3353 u64 prev_delalloc_end = 0;
3354 bool search_io_tree = true;
3355 bool ret = false;
3356
3357 while (cur_offset <= end) {
3358 u64 delalloc_start;
3359 u64 delalloc_end;
3360 bool delalloc;
3361
3362 delalloc = find_delalloc_subrange(inode, cur_offset, end,
3363 cached_state, &search_io_tree,
3364 &delalloc_start,
3365 &delalloc_end);
3366 if (!delalloc)
3367 break;
3368
3369 if (prev_delalloc_end == 0) {
3370 /* First subrange found. */
3371 *delalloc_start_ret = max(delalloc_start, start);
3372 *delalloc_end_ret = delalloc_end;
3373 ret = true;
3374 } else if (delalloc_start == prev_delalloc_end + 1) {
3375 /* Subrange adjacent to the previous one, merge them. */
3376 *delalloc_end_ret = delalloc_end;
3377 } else {
3378 /* Subrange not adjacent to the previous one, exit. */
3379 break;
3380 }
3381
3382 prev_delalloc_end = delalloc_end;
3383 cur_offset = delalloc_end + 1;
3384 cond_resched();
3385 }
3386
3387 return ret;
3388}
3389
3390/*
3391 * Check if there's a hole or delalloc range in a range representing a hole (or
3392 * prealloc extent) found in the inode's subvolume btree.
3393 *
3394 * @inode: The inode.
3395 * @whence: Seek mode (SEEK_DATA or SEEK_HOLE).
3396 * @start: Start offset of the hole region. It does not need to be sector
3397 * size aligned.
3398 * @end: End offset (inclusive value) of the hole region. It does not
3399 * need to be sector size aligned.
3400 * @start_ret: Return parameter, used to set the start of the subrange in the
3401 * hole that matches the search criteria (seek mode), if such
3402 * subrange is found (return value of the function is true).
3403 * The value returned here may not be sector size aligned.
3404 *
3405 * Returns true if a subrange matching the given seek mode is found, and if one
3406 * is found, it updates @start_ret with the start of the subrange.
3407 */
3408static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3409 struct extent_state **cached_state,
3410 u64 start, u64 end, u64 *start_ret)
3411{
3412 u64 delalloc_start;
3413 u64 delalloc_end;
3414 bool delalloc;
3415
3416 delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3417 &delalloc_start, &delalloc_end);
3418 if (delalloc && whence == SEEK_DATA) {
3419 *start_ret = delalloc_start;
3420 return true;
3421 }
3422
3423 if (delalloc && whence == SEEK_HOLE) {
3424 /*
3425 * We found delalloc but it starts after out start offset. So we
3426 * have a hole between our start offset and the delalloc start.
3427 */
3428 if (start < delalloc_start) {
3429 *start_ret = start;
3430 return true;
3431 }
3432 /*
3433 * Delalloc range starts at our start offset.
3434 * If the delalloc range's length is smaller than our range,
3435 * then it means we have a hole that starts where the delalloc
3436 * subrange ends.
3437 */
3438 if (delalloc_end < end) {
3439 *start_ret = delalloc_end + 1;
3440 return true;
3441 }
3442
3443 /* There's delalloc for the whole range. */
3444 return false;
3445 }
3446
3447 if (!delalloc && whence == SEEK_HOLE) {
3448 *start_ret = start;
3449 return true;
3450 }
3451
3452 /*
3453 * No delalloc in the range and we are seeking for data. The caller has
3454 * to iterate to the next extent item in the subvolume btree.
3455 */
3456 return false;
3457}
3458
3459static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3460{
3461 struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3462 struct btrfs_file_private *private = file->private_data;
3463 struct btrfs_fs_info *fs_info = inode->root->fs_info;
3464 struct extent_state *cached_state = NULL;
3465 struct extent_state **delalloc_cached_state;
3466 const loff_t i_size = i_size_read(&inode->vfs_inode);
3467 const u64 ino = btrfs_ino(inode);
3468 struct btrfs_root *root = inode->root;
3469 struct btrfs_path *path;
3470 struct btrfs_key key;
3471 u64 last_extent_end;
3472 u64 lockstart;
3473 u64 lockend;
3474 u64 start;
3475 int ret;
3476 bool found = false;
3477
3478 if (i_size == 0 || offset >= i_size)
3479 return -ENXIO;
3480
3481 /*
3482 * Quick path. If the inode has no prealloc extents and its number of
3483 * bytes used matches its i_size, then it can not have holes.
3484 */
3485 if (whence == SEEK_HOLE &&
3486 !(inode->flags & BTRFS_INODE_PREALLOC) &&
3487 inode_get_bytes(&inode->vfs_inode) == i_size)
3488 return i_size;
3489
3490 if (!private) {
3491 private = kzalloc(sizeof(*private), GFP_KERNEL);
3492 /*
3493 * No worries if memory allocation failed.
3494 * The private structure is used only for speeding up multiple
3495 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3496 * so everything will still be correct.
3497 */
3498 file->private_data = private;
3499 }
3500
3501 if (private)
3502 delalloc_cached_state = &private->llseek_cached_state;
3503 else
3504 delalloc_cached_state = NULL;
3505
3506 /*
3507 * offset can be negative, in this case we start finding DATA/HOLE from
3508 * the very start of the file.
3509 */
3510 start = max_t(loff_t, 0, offset);
3511
3512 lockstart = round_down(start, fs_info->sectorsize);
3513 lockend = round_up(i_size, fs_info->sectorsize);
3514 if (lockend <= lockstart)
3515 lockend = lockstart + fs_info->sectorsize;
3516 lockend--;
3517
3518 path = btrfs_alloc_path();
3519 if (!path)
3520 return -ENOMEM;
3521 path->reada = READA_FORWARD;
3522
3523 key.objectid = ino;
3524 key.type = BTRFS_EXTENT_DATA_KEY;
3525 key.offset = start;
3526
3527 last_extent_end = lockstart;
3528
3529 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3530
3531 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3532 if (ret < 0) {
3533 goto out;
3534 } else if (ret > 0 && path->slots[0] > 0) {
3535 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3536 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3537 path->slots[0]--;
3538 }
3539
3540 while (start < i_size) {
3541 struct extent_buffer *leaf = path->nodes[0];
3542 struct btrfs_file_extent_item *extent;
3543 u64 extent_end;
3544 u8 type;
3545
3546 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3547 ret = btrfs_next_leaf(root, path);
3548 if (ret < 0)
3549 goto out;
3550 else if (ret > 0)
3551 break;
3552
3553 leaf = path->nodes[0];
3554 }
3555
3556 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3557 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3558 break;
3559
3560 extent_end = btrfs_file_extent_end(path);
3561
3562 /*
3563 * In the first iteration we may have a slot that points to an
3564 * extent that ends before our start offset, so skip it.
3565 */
3566 if (extent_end <= start) {
3567 path->slots[0]++;
3568 continue;
3569 }
3570
3571 /* We have an implicit hole, NO_HOLES feature is likely set. */
3572 if (last_extent_end < key.offset) {
3573 u64 search_start = last_extent_end;
3574 u64 found_start;
3575
3576 /*
3577 * First iteration, @start matches @offset and it's
3578 * within the hole.
3579 */
3580 if (start == offset)
3581 search_start = offset;
3582
3583 found = find_desired_extent_in_hole(inode, whence,
3584 delalloc_cached_state,
3585 search_start,
3586 key.offset - 1,
3587 &found_start);
3588 if (found) {
3589 start = found_start;
3590 break;
3591 }
3592 /*
3593 * Didn't find data or a hole (due to delalloc) in the
3594 * implicit hole range, so need to analyze the extent.
3595 */
3596 }
3597
3598 extent = btrfs_item_ptr(leaf, path->slots[0],
3599 struct btrfs_file_extent_item);
3600 type = btrfs_file_extent_type(leaf, extent);
3601
3602 /*
3603 * Can't access the extent's disk_bytenr field if this is an
3604 * inline extent, since at that offset, it's where the extent
3605 * data starts.
3606 */
3607 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3608 (type == BTRFS_FILE_EXTENT_REG &&
3609 btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3610 /*
3611 * Explicit hole or prealloc extent, search for delalloc.
3612 * A prealloc extent is treated like a hole.
3613 */
3614 u64 search_start = key.offset;
3615 u64 found_start;
3616
3617 /*
3618 * First iteration, @start matches @offset and it's
3619 * within the hole.
3620 */
3621 if (start == offset)
3622 search_start = offset;
3623
3624 found = find_desired_extent_in_hole(inode, whence,
3625 delalloc_cached_state,
3626 search_start,
3627 extent_end - 1,
3628 &found_start);
3629 if (found) {
3630 start = found_start;
3631 break;
3632 }
3633 /*
3634 * Didn't find data or a hole (due to delalloc) in the
3635 * implicit hole range, so need to analyze the next
3636 * extent item.
3637 */
3638 } else {
3639 /*
3640 * Found a regular or inline extent.
3641 * If we are seeking for data, adjust the start offset
3642 * and stop, we're done.
3643 */
3644 if (whence == SEEK_DATA) {
3645 start = max_t(u64, key.offset, offset);
3646 found = true;
3647 break;
3648 }
3649 /*
3650 * Else, we are seeking for a hole, check the next file
3651 * extent item.
3652 */
3653 }
3654
3655 start = extent_end;
3656 last_extent_end = extent_end;
3657 path->slots[0]++;
3658 if (fatal_signal_pending(current)) {
3659 ret = -EINTR;
3660 goto out;
3661 }
3662 cond_resched();
3663 }
3664
3665 /* We have an implicit hole from the last extent found up to i_size. */
3666 if (!found && start < i_size) {
3667 found = find_desired_extent_in_hole(inode, whence,
3668 delalloc_cached_state, start,
3669 i_size - 1, &start);
3670 if (!found)
3671 start = i_size;
3672 }
3673
3674out:
3675 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3676 btrfs_free_path(path);
3677
3678 if (ret < 0)
3679 return ret;
3680
3681 if (whence == SEEK_DATA && start >= i_size)
3682 return -ENXIO;
3683
3684 return min_t(loff_t, start, i_size);
3685}
3686
3687static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3688{
3689 struct inode *inode = file->f_mapping->host;
3690
3691 switch (whence) {
3692 default:
3693 return generic_file_llseek(file, offset, whence);
3694 case SEEK_DATA:
3695 case SEEK_HOLE:
3696 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3697 offset = find_desired_extent(file, offset, whence);
3698 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3699 break;
3700 }
3701
3702 if (offset < 0)
3703 return offset;
3704
3705 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3706}
3707
3708static int btrfs_file_open(struct inode *inode, struct file *filp)
3709{
3710 int ret;
3711
3712 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC;
3713
3714 ret = fsverity_file_open(inode, filp);
3715 if (ret)
3716 return ret;
3717 return generic_file_open(inode, filp);
3718}
3719
3720static int check_direct_read(struct btrfs_fs_info *fs_info,
3721 const struct iov_iter *iter, loff_t offset)
3722{
3723 int ret;
3724 int i, seg;
3725
3726 ret = check_direct_IO(fs_info, iter, offset);
3727 if (ret < 0)
3728 return ret;
3729
3730 if (!iter_is_iovec(iter))
3731 return 0;
3732
3733 for (seg = 0; seg < iter->nr_segs; seg++)
3734 for (i = seg + 1; i < iter->nr_segs; i++)
3735 if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
3736 return -EINVAL;
3737 return 0;
3738}
3739
3740static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3741{
3742 struct inode *inode = file_inode(iocb->ki_filp);
3743 size_t prev_left = 0;
3744 ssize_t read = 0;
3745 ssize_t ret;
3746
3747 if (fsverity_active(inode))
3748 return 0;
3749
3750 if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3751 return 0;
3752
3753 btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3754again:
3755 /*
3756 * This is similar to what we do for direct IO writes, see the comment
3757 * at btrfs_direct_write(), but we also disable page faults in addition
3758 * to disabling them only at the iov_iter level. This is because when
3759 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3760 * which can still trigger page fault ins despite having set ->nofault
3761 * to true of our 'to' iov_iter.
3762 *
3763 * The difference to direct IO writes is that we deadlock when trying
3764 * to lock the extent range in the inode's tree during he page reads
3765 * triggered by the fault in (while for writes it is due to waiting for
3766 * our own ordered extent). This is because for direct IO reads,
3767 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3768 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3769 */
3770 pagefault_disable();
3771 to->nofault = true;
3772 ret = btrfs_dio_read(iocb, to, read);
3773 to->nofault = false;
3774 pagefault_enable();
3775
3776 /* No increment (+=) because iomap returns a cumulative value. */
3777 if (ret > 0)
3778 read = ret;
3779
3780 if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3781 const size_t left = iov_iter_count(to);
3782
3783 if (left == prev_left) {
3784 /*
3785 * We didn't make any progress since the last attempt,
3786 * fallback to a buffered read for the remainder of the
3787 * range. This is just to avoid any possibility of looping
3788 * for too long.
3789 */
3790 ret = read;
3791 } else {
3792 /*
3793 * We made some progress since the last retry or this is
3794 * the first time we are retrying. Fault in as many pages
3795 * as possible and retry.
3796 */
3797 fault_in_iov_iter_writeable(to, left);
3798 prev_left = left;
3799 goto again;
3800 }
3801 }
3802 btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3803 return ret < 0 ? ret : read;
3804}
3805
3806static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3807{
3808 ssize_t ret = 0;
3809
3810 if (iocb->ki_flags & IOCB_DIRECT) {
3811 ret = btrfs_direct_read(iocb, to);
3812 if (ret < 0 || !iov_iter_count(to) ||
3813 iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3814 return ret;
3815 }
3816
3817 return filemap_read(iocb, to, ret);
3818}
3819
3820const struct file_operations btrfs_file_operations = {
3821 .llseek = btrfs_file_llseek,
3822 .read_iter = btrfs_file_read_iter,
3823 .splice_read = generic_file_splice_read,
3824 .write_iter = btrfs_file_write_iter,
3825 .splice_write = iter_file_splice_write,
3826 .mmap = btrfs_file_mmap,
3827 .open = btrfs_file_open,
3828 .release = btrfs_release_file,
3829 .get_unmapped_area = thp_get_unmapped_area,
3830 .fsync = btrfs_sync_file,
3831 .fallocate = btrfs_fallocate,
3832 .unlocked_ioctl = btrfs_ioctl,
3833#ifdef CONFIG_COMPAT
3834 .compat_ioctl = btrfs_compat_ioctl,
3835#endif
3836 .remap_file_range = btrfs_remap_file_range,
3837};
3838
3839int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3840{
3841 int ret;
3842
3843 /*
3844 * So with compression we will find and lock a dirty page and clear the
3845 * first one as dirty, setup an async extent, and immediately return
3846 * with the entire range locked but with nobody actually marked with
3847 * writeback. So we can't just filemap_write_and_wait_range() and
3848 * expect it to work since it will just kick off a thread to do the
3849 * actual work. So we need to call filemap_fdatawrite_range _again_
3850 * since it will wait on the page lock, which won't be unlocked until
3851 * after the pages have been marked as writeback and so we're good to go
3852 * from there. We have to do this otherwise we'll miss the ordered
3853 * extents and that results in badness. Please Josef, do not think you
3854 * know better and pull this out at some point in the future, it is
3855 * right and you are wrong.
3856 */
3857 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3858 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3859 &BTRFS_I(inode)->runtime_flags))
3860 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3861
3862 return ret;
3863}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/highmem.h>
9#include <linux/time.h>
10#include <linux/init.h>
11#include <linux/string.h>
12#include <linux/backing-dev.h>
13#include <linux/mpage.h>
14#include <linux/falloc.h>
15#include <linux/swap.h>
16#include <linux/writeback.h>
17#include <linux/compat.h>
18#include <linux/slab.h>
19#include <linux/btrfs.h>
20#include <linux/uio.h>
21#include <linux/iversion.h>
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "print-tree.h"
27#include "tree-log.h"
28#include "locking.h"
29#include "volumes.h"
30#include "qgroup.h"
31#include "compression.h"
32
33static struct kmem_cache *btrfs_inode_defrag_cachep;
34/*
35 * when auto defrag is enabled we
36 * queue up these defrag structs to remember which
37 * inodes need defragging passes
38 */
39struct inode_defrag {
40 struct rb_node rb_node;
41 /* objectid */
42 u64 ino;
43 /*
44 * transid where the defrag was added, we search for
45 * extents newer than this
46 */
47 u64 transid;
48
49 /* root objectid */
50 u64 root;
51
52 /* last offset we were able to defrag */
53 u64 last_offset;
54
55 /* if we've wrapped around back to zero once already */
56 int cycled;
57};
58
59static int __compare_inode_defrag(struct inode_defrag *defrag1,
60 struct inode_defrag *defrag2)
61{
62 if (defrag1->root > defrag2->root)
63 return 1;
64 else if (defrag1->root < defrag2->root)
65 return -1;
66 else if (defrag1->ino > defrag2->ino)
67 return 1;
68 else if (defrag1->ino < defrag2->ino)
69 return -1;
70 else
71 return 0;
72}
73
74/* pop a record for an inode into the defrag tree. The lock
75 * must be held already
76 *
77 * If you're inserting a record for an older transid than an
78 * existing record, the transid already in the tree is lowered
79 *
80 * If an existing record is found the defrag item you
81 * pass in is freed
82 */
83static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
84 struct inode_defrag *defrag)
85{
86 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
87 struct inode_defrag *entry;
88 struct rb_node **p;
89 struct rb_node *parent = NULL;
90 int ret;
91
92 p = &fs_info->defrag_inodes.rb_node;
93 while (*p) {
94 parent = *p;
95 entry = rb_entry(parent, struct inode_defrag, rb_node);
96
97 ret = __compare_inode_defrag(defrag, entry);
98 if (ret < 0)
99 p = &parent->rb_left;
100 else if (ret > 0)
101 p = &parent->rb_right;
102 else {
103 /* if we're reinserting an entry for
104 * an old defrag run, make sure to
105 * lower the transid of our existing record
106 */
107 if (defrag->transid < entry->transid)
108 entry->transid = defrag->transid;
109 if (defrag->last_offset > entry->last_offset)
110 entry->last_offset = defrag->last_offset;
111 return -EEXIST;
112 }
113 }
114 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
115 rb_link_node(&defrag->rb_node, parent, p);
116 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
117 return 0;
118}
119
120static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
121{
122 if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
123 return 0;
124
125 if (btrfs_fs_closing(fs_info))
126 return 0;
127
128 return 1;
129}
130
131/*
132 * insert a defrag record for this inode if auto defrag is
133 * enabled
134 */
135int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
136 struct btrfs_inode *inode)
137{
138 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
139 struct btrfs_root *root = inode->root;
140 struct inode_defrag *defrag;
141 u64 transid;
142 int ret;
143
144 if (!__need_auto_defrag(fs_info))
145 return 0;
146
147 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
148 return 0;
149
150 if (trans)
151 transid = trans->transid;
152 else
153 transid = inode->root->last_trans;
154
155 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
156 if (!defrag)
157 return -ENOMEM;
158
159 defrag->ino = btrfs_ino(inode);
160 defrag->transid = transid;
161 defrag->root = root->root_key.objectid;
162
163 spin_lock(&fs_info->defrag_inodes_lock);
164 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
165 /*
166 * If we set IN_DEFRAG flag and evict the inode from memory,
167 * and then re-read this inode, this new inode doesn't have
168 * IN_DEFRAG flag. At the case, we may find the existed defrag.
169 */
170 ret = __btrfs_add_inode_defrag(inode, defrag);
171 if (ret)
172 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
173 } else {
174 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
175 }
176 spin_unlock(&fs_info->defrag_inodes_lock);
177 return 0;
178}
179
180/*
181 * Requeue the defrag object. If there is a defrag object that points to
182 * the same inode in the tree, we will merge them together (by
183 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
184 */
185static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
186 struct inode_defrag *defrag)
187{
188 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
189 int ret;
190
191 if (!__need_auto_defrag(fs_info))
192 goto out;
193
194 /*
195 * Here we don't check the IN_DEFRAG flag, because we need merge
196 * them together.
197 */
198 spin_lock(&fs_info->defrag_inodes_lock);
199 ret = __btrfs_add_inode_defrag(inode, defrag);
200 spin_unlock(&fs_info->defrag_inodes_lock);
201 if (ret)
202 goto out;
203 return;
204out:
205 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
206}
207
208/*
209 * pick the defragable inode that we want, if it doesn't exist, we will get
210 * the next one.
211 */
212static struct inode_defrag *
213btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
214{
215 struct inode_defrag *entry = NULL;
216 struct inode_defrag tmp;
217 struct rb_node *p;
218 struct rb_node *parent = NULL;
219 int ret;
220
221 tmp.ino = ino;
222 tmp.root = root;
223
224 spin_lock(&fs_info->defrag_inodes_lock);
225 p = fs_info->defrag_inodes.rb_node;
226 while (p) {
227 parent = p;
228 entry = rb_entry(parent, struct inode_defrag, rb_node);
229
230 ret = __compare_inode_defrag(&tmp, entry);
231 if (ret < 0)
232 p = parent->rb_left;
233 else if (ret > 0)
234 p = parent->rb_right;
235 else
236 goto out;
237 }
238
239 if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
240 parent = rb_next(parent);
241 if (parent)
242 entry = rb_entry(parent, struct inode_defrag, rb_node);
243 else
244 entry = NULL;
245 }
246out:
247 if (entry)
248 rb_erase(parent, &fs_info->defrag_inodes);
249 spin_unlock(&fs_info->defrag_inodes_lock);
250 return entry;
251}
252
253void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
254{
255 struct inode_defrag *defrag;
256 struct rb_node *node;
257
258 spin_lock(&fs_info->defrag_inodes_lock);
259 node = rb_first(&fs_info->defrag_inodes);
260 while (node) {
261 rb_erase(node, &fs_info->defrag_inodes);
262 defrag = rb_entry(node, struct inode_defrag, rb_node);
263 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
264
265 cond_resched_lock(&fs_info->defrag_inodes_lock);
266
267 node = rb_first(&fs_info->defrag_inodes);
268 }
269 spin_unlock(&fs_info->defrag_inodes_lock);
270}
271
272#define BTRFS_DEFRAG_BATCH 1024
273
274static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
275 struct inode_defrag *defrag)
276{
277 struct btrfs_root *inode_root;
278 struct inode *inode;
279 struct btrfs_key key;
280 struct btrfs_ioctl_defrag_range_args range;
281 int num_defrag;
282 int index;
283 int ret;
284
285 /* get the inode */
286 key.objectid = defrag->root;
287 key.type = BTRFS_ROOT_ITEM_KEY;
288 key.offset = (u64)-1;
289
290 index = srcu_read_lock(&fs_info->subvol_srcu);
291
292 inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
293 if (IS_ERR(inode_root)) {
294 ret = PTR_ERR(inode_root);
295 goto cleanup;
296 }
297
298 key.objectid = defrag->ino;
299 key.type = BTRFS_INODE_ITEM_KEY;
300 key.offset = 0;
301 inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
302 if (IS_ERR(inode)) {
303 ret = PTR_ERR(inode);
304 goto cleanup;
305 }
306 srcu_read_unlock(&fs_info->subvol_srcu, index);
307
308 /* do a chunk of defrag */
309 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
310 memset(&range, 0, sizeof(range));
311 range.len = (u64)-1;
312 range.start = defrag->last_offset;
313
314 sb_start_write(fs_info->sb);
315 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
316 BTRFS_DEFRAG_BATCH);
317 sb_end_write(fs_info->sb);
318 /*
319 * if we filled the whole defrag batch, there
320 * must be more work to do. Queue this defrag
321 * again
322 */
323 if (num_defrag == BTRFS_DEFRAG_BATCH) {
324 defrag->last_offset = range.start;
325 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
326 } else if (defrag->last_offset && !defrag->cycled) {
327 /*
328 * we didn't fill our defrag batch, but
329 * we didn't start at zero. Make sure we loop
330 * around to the start of the file.
331 */
332 defrag->last_offset = 0;
333 defrag->cycled = 1;
334 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
335 } else {
336 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
337 }
338
339 iput(inode);
340 return 0;
341cleanup:
342 srcu_read_unlock(&fs_info->subvol_srcu, index);
343 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
344 return ret;
345}
346
347/*
348 * run through the list of inodes in the FS that need
349 * defragging
350 */
351int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
352{
353 struct inode_defrag *defrag;
354 u64 first_ino = 0;
355 u64 root_objectid = 0;
356
357 atomic_inc(&fs_info->defrag_running);
358 while (1) {
359 /* Pause the auto defragger. */
360 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
361 &fs_info->fs_state))
362 break;
363
364 if (!__need_auto_defrag(fs_info))
365 break;
366
367 /* find an inode to defrag */
368 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
369 first_ino);
370 if (!defrag) {
371 if (root_objectid || first_ino) {
372 root_objectid = 0;
373 first_ino = 0;
374 continue;
375 } else {
376 break;
377 }
378 }
379
380 first_ino = defrag->ino + 1;
381 root_objectid = defrag->root;
382
383 __btrfs_run_defrag_inode(fs_info, defrag);
384 }
385 atomic_dec(&fs_info->defrag_running);
386
387 /*
388 * during unmount, we use the transaction_wait queue to
389 * wait for the defragger to stop
390 */
391 wake_up(&fs_info->transaction_wait);
392 return 0;
393}
394
395/* simple helper to fault in pages and copy. This should go away
396 * and be replaced with calls into generic code.
397 */
398static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
399 struct page **prepared_pages,
400 struct iov_iter *i)
401{
402 size_t copied = 0;
403 size_t total_copied = 0;
404 int pg = 0;
405 int offset = pos & (PAGE_SIZE - 1);
406
407 while (write_bytes > 0) {
408 size_t count = min_t(size_t,
409 PAGE_SIZE - offset, write_bytes);
410 struct page *page = prepared_pages[pg];
411 /*
412 * Copy data from userspace to the current page
413 */
414 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
415
416 /* Flush processor's dcache for this page */
417 flush_dcache_page(page);
418
419 /*
420 * if we get a partial write, we can end up with
421 * partially up to date pages. These add
422 * a lot of complexity, so make sure they don't
423 * happen by forcing this copy to be retried.
424 *
425 * The rest of the btrfs_file_write code will fall
426 * back to page at a time copies after we return 0.
427 */
428 if (!PageUptodate(page) && copied < count)
429 copied = 0;
430
431 iov_iter_advance(i, copied);
432 write_bytes -= copied;
433 total_copied += copied;
434
435 /* Return to btrfs_file_write_iter to fault page */
436 if (unlikely(copied == 0))
437 break;
438
439 if (copied < PAGE_SIZE - offset) {
440 offset += copied;
441 } else {
442 pg++;
443 offset = 0;
444 }
445 }
446 return total_copied;
447}
448
449/*
450 * unlocks pages after btrfs_file_write is done with them
451 */
452static void btrfs_drop_pages(struct page **pages, size_t num_pages)
453{
454 size_t i;
455 for (i = 0; i < num_pages; i++) {
456 /* page checked is some magic around finding pages that
457 * have been modified without going through btrfs_set_page_dirty
458 * clear it here. There should be no need to mark the pages
459 * accessed as prepare_pages should have marked them accessed
460 * in prepare_pages via find_or_create_page()
461 */
462 ClearPageChecked(pages[i]);
463 unlock_page(pages[i]);
464 put_page(pages[i]);
465 }
466}
467
468static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
469 const u64 start,
470 const u64 len,
471 struct extent_state **cached_state)
472{
473 u64 search_start = start;
474 const u64 end = start + len - 1;
475
476 while (search_start < end) {
477 const u64 search_len = end - search_start + 1;
478 struct extent_map *em;
479 u64 em_len;
480 int ret = 0;
481
482 em = btrfs_get_extent(inode, NULL, 0, search_start,
483 search_len, 0);
484 if (IS_ERR(em))
485 return PTR_ERR(em);
486
487 if (em->block_start != EXTENT_MAP_HOLE)
488 goto next;
489
490 em_len = em->len;
491 if (em->start < search_start)
492 em_len -= search_start - em->start;
493 if (em_len > search_len)
494 em_len = search_len;
495
496 ret = set_extent_bit(&inode->io_tree, search_start,
497 search_start + em_len - 1,
498 EXTENT_DELALLOC_NEW,
499 NULL, cached_state, GFP_NOFS);
500next:
501 search_start = extent_map_end(em);
502 free_extent_map(em);
503 if (ret)
504 return ret;
505 }
506 return 0;
507}
508
509/*
510 * after copy_from_user, pages need to be dirtied and we need to make
511 * sure holes are created between the current EOF and the start of
512 * any next extents (if required).
513 *
514 * this also makes the decision about creating an inline extent vs
515 * doing real data extents, marking pages dirty and delalloc as required.
516 */
517int btrfs_dirty_pages(struct inode *inode, struct page **pages,
518 size_t num_pages, loff_t pos, size_t write_bytes,
519 struct extent_state **cached)
520{
521 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
522 int err = 0;
523 int i;
524 u64 num_bytes;
525 u64 start_pos;
526 u64 end_of_last_block;
527 u64 end_pos = pos + write_bytes;
528 loff_t isize = i_size_read(inode);
529 unsigned int extra_bits = 0;
530
531 start_pos = pos & ~((u64) fs_info->sectorsize - 1);
532 num_bytes = round_up(write_bytes + pos - start_pos,
533 fs_info->sectorsize);
534
535 end_of_last_block = start_pos + num_bytes - 1;
536
537 if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
538 if (start_pos >= isize &&
539 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
540 /*
541 * There can't be any extents following eof in this case
542 * so just set the delalloc new bit for the range
543 * directly.
544 */
545 extra_bits |= EXTENT_DELALLOC_NEW;
546 } else {
547 err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
548 start_pos,
549 num_bytes, cached);
550 if (err)
551 return err;
552 }
553 }
554
555 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
556 extra_bits, cached, 0);
557 if (err)
558 return err;
559
560 for (i = 0; i < num_pages; i++) {
561 struct page *p = pages[i];
562 SetPageUptodate(p);
563 ClearPageChecked(p);
564 set_page_dirty(p);
565 }
566
567 /*
568 * we've only changed i_size in ram, and we haven't updated
569 * the disk i_size. There is no need to log the inode
570 * at this time.
571 */
572 if (end_pos > isize)
573 i_size_write(inode, end_pos);
574 return 0;
575}
576
577/*
578 * this drops all the extents in the cache that intersect the range
579 * [start, end]. Existing extents are split as required.
580 */
581void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
582 int skip_pinned)
583{
584 struct extent_map *em;
585 struct extent_map *split = NULL;
586 struct extent_map *split2 = NULL;
587 struct extent_map_tree *em_tree = &inode->extent_tree;
588 u64 len = end - start + 1;
589 u64 gen;
590 int ret;
591 int testend = 1;
592 unsigned long flags;
593 int compressed = 0;
594 bool modified;
595
596 WARN_ON(end < start);
597 if (end == (u64)-1) {
598 len = (u64)-1;
599 testend = 0;
600 }
601 while (1) {
602 int no_splits = 0;
603
604 modified = false;
605 if (!split)
606 split = alloc_extent_map();
607 if (!split2)
608 split2 = alloc_extent_map();
609 if (!split || !split2)
610 no_splits = 1;
611
612 write_lock(&em_tree->lock);
613 em = lookup_extent_mapping(em_tree, start, len);
614 if (!em) {
615 write_unlock(&em_tree->lock);
616 break;
617 }
618 flags = em->flags;
619 gen = em->generation;
620 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
621 if (testend && em->start + em->len >= start + len) {
622 free_extent_map(em);
623 write_unlock(&em_tree->lock);
624 break;
625 }
626 start = em->start + em->len;
627 if (testend)
628 len = start + len - (em->start + em->len);
629 free_extent_map(em);
630 write_unlock(&em_tree->lock);
631 continue;
632 }
633 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
634 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
635 clear_bit(EXTENT_FLAG_LOGGING, &flags);
636 modified = !list_empty(&em->list);
637 if (no_splits)
638 goto next;
639
640 if (em->start < start) {
641 split->start = em->start;
642 split->len = start - em->start;
643
644 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
645 split->orig_start = em->orig_start;
646 split->block_start = em->block_start;
647
648 if (compressed)
649 split->block_len = em->block_len;
650 else
651 split->block_len = split->len;
652 split->orig_block_len = max(split->block_len,
653 em->orig_block_len);
654 split->ram_bytes = em->ram_bytes;
655 } else {
656 split->orig_start = split->start;
657 split->block_len = 0;
658 split->block_start = em->block_start;
659 split->orig_block_len = 0;
660 split->ram_bytes = split->len;
661 }
662
663 split->generation = gen;
664 split->bdev = em->bdev;
665 split->flags = flags;
666 split->compress_type = em->compress_type;
667 replace_extent_mapping(em_tree, em, split, modified);
668 free_extent_map(split);
669 split = split2;
670 split2 = NULL;
671 }
672 if (testend && em->start + em->len > start + len) {
673 u64 diff = start + len - em->start;
674
675 split->start = start + len;
676 split->len = em->start + em->len - (start + len);
677 split->bdev = em->bdev;
678 split->flags = flags;
679 split->compress_type = em->compress_type;
680 split->generation = gen;
681
682 if (em->block_start < EXTENT_MAP_LAST_BYTE) {
683 split->orig_block_len = max(em->block_len,
684 em->orig_block_len);
685
686 split->ram_bytes = em->ram_bytes;
687 if (compressed) {
688 split->block_len = em->block_len;
689 split->block_start = em->block_start;
690 split->orig_start = em->orig_start;
691 } else {
692 split->block_len = split->len;
693 split->block_start = em->block_start
694 + diff;
695 split->orig_start = em->orig_start;
696 }
697 } else {
698 split->ram_bytes = split->len;
699 split->orig_start = split->start;
700 split->block_len = 0;
701 split->block_start = em->block_start;
702 split->orig_block_len = 0;
703 }
704
705 if (extent_map_in_tree(em)) {
706 replace_extent_mapping(em_tree, em, split,
707 modified);
708 } else {
709 ret = add_extent_mapping(em_tree, split,
710 modified);
711 ASSERT(ret == 0); /* Logic error */
712 }
713 free_extent_map(split);
714 split = NULL;
715 }
716next:
717 if (extent_map_in_tree(em))
718 remove_extent_mapping(em_tree, em);
719 write_unlock(&em_tree->lock);
720
721 /* once for us */
722 free_extent_map(em);
723 /* once for the tree*/
724 free_extent_map(em);
725 }
726 if (split)
727 free_extent_map(split);
728 if (split2)
729 free_extent_map(split2);
730}
731
732/*
733 * this is very complex, but the basic idea is to drop all extents
734 * in the range start - end. hint_block is filled in with a block number
735 * that would be a good hint to the block allocator for this file.
736 *
737 * If an extent intersects the range but is not entirely inside the range
738 * it is either truncated or split. Anything entirely inside the range
739 * is deleted from the tree.
740 */
741int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
742 struct btrfs_root *root, struct inode *inode,
743 struct btrfs_path *path, u64 start, u64 end,
744 u64 *drop_end, int drop_cache,
745 int replace_extent,
746 u32 extent_item_size,
747 int *key_inserted)
748{
749 struct btrfs_fs_info *fs_info = root->fs_info;
750 struct extent_buffer *leaf;
751 struct btrfs_file_extent_item *fi;
752 struct btrfs_key key;
753 struct btrfs_key new_key;
754 u64 ino = btrfs_ino(BTRFS_I(inode));
755 u64 search_start = start;
756 u64 disk_bytenr = 0;
757 u64 num_bytes = 0;
758 u64 extent_offset = 0;
759 u64 extent_end = 0;
760 u64 last_end = start;
761 int del_nr = 0;
762 int del_slot = 0;
763 int extent_type;
764 int recow;
765 int ret;
766 int modify_tree = -1;
767 int update_refs;
768 int found = 0;
769 int leafs_visited = 0;
770
771 if (drop_cache)
772 btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
773
774 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
775 modify_tree = 0;
776
777 update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
778 root == fs_info->tree_root);
779 while (1) {
780 recow = 0;
781 ret = btrfs_lookup_file_extent(trans, root, path, ino,
782 search_start, modify_tree);
783 if (ret < 0)
784 break;
785 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
786 leaf = path->nodes[0];
787 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
788 if (key.objectid == ino &&
789 key.type == BTRFS_EXTENT_DATA_KEY)
790 path->slots[0]--;
791 }
792 ret = 0;
793 leafs_visited++;
794next_slot:
795 leaf = path->nodes[0];
796 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
797 BUG_ON(del_nr > 0);
798 ret = btrfs_next_leaf(root, path);
799 if (ret < 0)
800 break;
801 if (ret > 0) {
802 ret = 0;
803 break;
804 }
805 leafs_visited++;
806 leaf = path->nodes[0];
807 recow = 1;
808 }
809
810 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
811
812 if (key.objectid > ino)
813 break;
814 if (WARN_ON_ONCE(key.objectid < ino) ||
815 key.type < BTRFS_EXTENT_DATA_KEY) {
816 ASSERT(del_nr == 0);
817 path->slots[0]++;
818 goto next_slot;
819 }
820 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
821 break;
822
823 fi = btrfs_item_ptr(leaf, path->slots[0],
824 struct btrfs_file_extent_item);
825 extent_type = btrfs_file_extent_type(leaf, fi);
826
827 if (extent_type == BTRFS_FILE_EXTENT_REG ||
828 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
829 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
830 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
831 extent_offset = btrfs_file_extent_offset(leaf, fi);
832 extent_end = key.offset +
833 btrfs_file_extent_num_bytes(leaf, fi);
834 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
835 extent_end = key.offset +
836 btrfs_file_extent_inline_len(leaf,
837 path->slots[0], fi);
838 } else {
839 /* can't happen */
840 BUG();
841 }
842
843 /*
844 * Don't skip extent items representing 0 byte lengths. They
845 * used to be created (bug) if while punching holes we hit
846 * -ENOSPC condition. So if we find one here, just ensure we
847 * delete it, otherwise we would insert a new file extent item
848 * with the same key (offset) as that 0 bytes length file
849 * extent item in the call to setup_items_for_insert() later
850 * in this function.
851 */
852 if (extent_end == key.offset && extent_end >= search_start) {
853 last_end = extent_end;
854 goto delete_extent_item;
855 }
856
857 if (extent_end <= search_start) {
858 path->slots[0]++;
859 goto next_slot;
860 }
861
862 found = 1;
863 search_start = max(key.offset, start);
864 if (recow || !modify_tree) {
865 modify_tree = -1;
866 btrfs_release_path(path);
867 continue;
868 }
869
870 /*
871 * | - range to drop - |
872 * | -------- extent -------- |
873 */
874 if (start > key.offset && end < extent_end) {
875 BUG_ON(del_nr > 0);
876 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
877 ret = -EOPNOTSUPP;
878 break;
879 }
880
881 memcpy(&new_key, &key, sizeof(new_key));
882 new_key.offset = start;
883 ret = btrfs_duplicate_item(trans, root, path,
884 &new_key);
885 if (ret == -EAGAIN) {
886 btrfs_release_path(path);
887 continue;
888 }
889 if (ret < 0)
890 break;
891
892 leaf = path->nodes[0];
893 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
894 struct btrfs_file_extent_item);
895 btrfs_set_file_extent_num_bytes(leaf, fi,
896 start - key.offset);
897
898 fi = btrfs_item_ptr(leaf, path->slots[0],
899 struct btrfs_file_extent_item);
900
901 extent_offset += start - key.offset;
902 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
903 btrfs_set_file_extent_num_bytes(leaf, fi,
904 extent_end - start);
905 btrfs_mark_buffer_dirty(leaf);
906
907 if (update_refs && disk_bytenr > 0) {
908 ret = btrfs_inc_extent_ref(trans, root,
909 disk_bytenr, num_bytes, 0,
910 root->root_key.objectid,
911 new_key.objectid,
912 start - extent_offset);
913 BUG_ON(ret); /* -ENOMEM */
914 }
915 key.offset = start;
916 }
917 /*
918 * From here on out we will have actually dropped something, so
919 * last_end can be updated.
920 */
921 last_end = extent_end;
922
923 /*
924 * | ---- range to drop ----- |
925 * | -------- extent -------- |
926 */
927 if (start <= key.offset && end < extent_end) {
928 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
929 ret = -EOPNOTSUPP;
930 break;
931 }
932
933 memcpy(&new_key, &key, sizeof(new_key));
934 new_key.offset = end;
935 btrfs_set_item_key_safe(fs_info, path, &new_key);
936
937 extent_offset += end - key.offset;
938 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
939 btrfs_set_file_extent_num_bytes(leaf, fi,
940 extent_end - end);
941 btrfs_mark_buffer_dirty(leaf);
942 if (update_refs && disk_bytenr > 0)
943 inode_sub_bytes(inode, end - key.offset);
944 break;
945 }
946
947 search_start = extent_end;
948 /*
949 * | ---- range to drop ----- |
950 * | -------- extent -------- |
951 */
952 if (start > key.offset && end >= extent_end) {
953 BUG_ON(del_nr > 0);
954 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
955 ret = -EOPNOTSUPP;
956 break;
957 }
958
959 btrfs_set_file_extent_num_bytes(leaf, fi,
960 start - key.offset);
961 btrfs_mark_buffer_dirty(leaf);
962 if (update_refs && disk_bytenr > 0)
963 inode_sub_bytes(inode, extent_end - start);
964 if (end == extent_end)
965 break;
966
967 path->slots[0]++;
968 goto next_slot;
969 }
970
971 /*
972 * | ---- range to drop ----- |
973 * | ------ extent ------ |
974 */
975 if (start <= key.offset && end >= extent_end) {
976delete_extent_item:
977 if (del_nr == 0) {
978 del_slot = path->slots[0];
979 del_nr = 1;
980 } else {
981 BUG_ON(del_slot + del_nr != path->slots[0]);
982 del_nr++;
983 }
984
985 if (update_refs &&
986 extent_type == BTRFS_FILE_EXTENT_INLINE) {
987 inode_sub_bytes(inode,
988 extent_end - key.offset);
989 extent_end = ALIGN(extent_end,
990 fs_info->sectorsize);
991 } else if (update_refs && disk_bytenr > 0) {
992 ret = btrfs_free_extent(trans, root,
993 disk_bytenr, num_bytes, 0,
994 root->root_key.objectid,
995 key.objectid, key.offset -
996 extent_offset);
997 BUG_ON(ret); /* -ENOMEM */
998 inode_sub_bytes(inode,
999 extent_end - key.offset);
1000 }
1001
1002 if (end == extent_end)
1003 break;
1004
1005 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
1006 path->slots[0]++;
1007 goto next_slot;
1008 }
1009
1010 ret = btrfs_del_items(trans, root, path, del_slot,
1011 del_nr);
1012 if (ret) {
1013 btrfs_abort_transaction(trans, ret);
1014 break;
1015 }
1016
1017 del_nr = 0;
1018 del_slot = 0;
1019
1020 btrfs_release_path(path);
1021 continue;
1022 }
1023
1024 BUG_ON(1);
1025 }
1026
1027 if (!ret && del_nr > 0) {
1028 /*
1029 * Set path->slots[0] to first slot, so that after the delete
1030 * if items are move off from our leaf to its immediate left or
1031 * right neighbor leafs, we end up with a correct and adjusted
1032 * path->slots[0] for our insertion (if replace_extent != 0).
1033 */
1034 path->slots[0] = del_slot;
1035 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1036 if (ret)
1037 btrfs_abort_transaction(trans, ret);
1038 }
1039
1040 leaf = path->nodes[0];
1041 /*
1042 * If btrfs_del_items() was called, it might have deleted a leaf, in
1043 * which case it unlocked our path, so check path->locks[0] matches a
1044 * write lock.
1045 */
1046 if (!ret && replace_extent && leafs_visited == 1 &&
1047 (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
1048 path->locks[0] == BTRFS_WRITE_LOCK) &&
1049 btrfs_leaf_free_space(fs_info, leaf) >=
1050 sizeof(struct btrfs_item) + extent_item_size) {
1051
1052 key.objectid = ino;
1053 key.type = BTRFS_EXTENT_DATA_KEY;
1054 key.offset = start;
1055 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
1056 struct btrfs_key slot_key;
1057
1058 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
1059 if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
1060 path->slots[0]++;
1061 }
1062 setup_items_for_insert(root, path, &key,
1063 &extent_item_size,
1064 extent_item_size,
1065 sizeof(struct btrfs_item) +
1066 extent_item_size, 1);
1067 *key_inserted = 1;
1068 }
1069
1070 if (!replace_extent || !(*key_inserted))
1071 btrfs_release_path(path);
1072 if (drop_end)
1073 *drop_end = found ? min(end, last_end) : end;
1074 return ret;
1075}
1076
1077int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1078 struct btrfs_root *root, struct inode *inode, u64 start,
1079 u64 end, int drop_cache)
1080{
1081 struct btrfs_path *path;
1082 int ret;
1083
1084 path = btrfs_alloc_path();
1085 if (!path)
1086 return -ENOMEM;
1087 ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1088 drop_cache, 0, 0, NULL);
1089 btrfs_free_path(path);
1090 return ret;
1091}
1092
1093static int extent_mergeable(struct extent_buffer *leaf, int slot,
1094 u64 objectid, u64 bytenr, u64 orig_offset,
1095 u64 *start, u64 *end)
1096{
1097 struct btrfs_file_extent_item *fi;
1098 struct btrfs_key key;
1099 u64 extent_end;
1100
1101 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1102 return 0;
1103
1104 btrfs_item_key_to_cpu(leaf, &key, slot);
1105 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1106 return 0;
1107
1108 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1109 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1110 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1111 btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1112 btrfs_file_extent_compression(leaf, fi) ||
1113 btrfs_file_extent_encryption(leaf, fi) ||
1114 btrfs_file_extent_other_encoding(leaf, fi))
1115 return 0;
1116
1117 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1118 if ((*start && *start != key.offset) || (*end && *end != extent_end))
1119 return 0;
1120
1121 *start = key.offset;
1122 *end = extent_end;
1123 return 1;
1124}
1125
1126/*
1127 * Mark extent in the range start - end as written.
1128 *
1129 * This changes extent type from 'pre-allocated' to 'regular'. If only
1130 * part of extent is marked as written, the extent will be split into
1131 * two or three.
1132 */
1133int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1134 struct btrfs_inode *inode, u64 start, u64 end)
1135{
1136 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1137 struct btrfs_root *root = inode->root;
1138 struct extent_buffer *leaf;
1139 struct btrfs_path *path;
1140 struct btrfs_file_extent_item *fi;
1141 struct btrfs_key key;
1142 struct btrfs_key new_key;
1143 u64 bytenr;
1144 u64 num_bytes;
1145 u64 extent_end;
1146 u64 orig_offset;
1147 u64 other_start;
1148 u64 other_end;
1149 u64 split;
1150 int del_nr = 0;
1151 int del_slot = 0;
1152 int recow;
1153 int ret;
1154 u64 ino = btrfs_ino(inode);
1155
1156 path = btrfs_alloc_path();
1157 if (!path)
1158 return -ENOMEM;
1159again:
1160 recow = 0;
1161 split = start;
1162 key.objectid = ino;
1163 key.type = BTRFS_EXTENT_DATA_KEY;
1164 key.offset = split;
1165
1166 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1167 if (ret < 0)
1168 goto out;
1169 if (ret > 0 && path->slots[0] > 0)
1170 path->slots[0]--;
1171
1172 leaf = path->nodes[0];
1173 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1174 if (key.objectid != ino ||
1175 key.type != BTRFS_EXTENT_DATA_KEY) {
1176 ret = -EINVAL;
1177 btrfs_abort_transaction(trans, ret);
1178 goto out;
1179 }
1180 fi = btrfs_item_ptr(leaf, path->slots[0],
1181 struct btrfs_file_extent_item);
1182 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
1183 ret = -EINVAL;
1184 btrfs_abort_transaction(trans, ret);
1185 goto out;
1186 }
1187 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1188 if (key.offset > start || extent_end < end) {
1189 ret = -EINVAL;
1190 btrfs_abort_transaction(trans, ret);
1191 goto out;
1192 }
1193
1194 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1195 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1196 orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1197 memcpy(&new_key, &key, sizeof(new_key));
1198
1199 if (start == key.offset && end < extent_end) {
1200 other_start = 0;
1201 other_end = start;
1202 if (extent_mergeable(leaf, path->slots[0] - 1,
1203 ino, bytenr, orig_offset,
1204 &other_start, &other_end)) {
1205 new_key.offset = end;
1206 btrfs_set_item_key_safe(fs_info, path, &new_key);
1207 fi = btrfs_item_ptr(leaf, path->slots[0],
1208 struct btrfs_file_extent_item);
1209 btrfs_set_file_extent_generation(leaf, fi,
1210 trans->transid);
1211 btrfs_set_file_extent_num_bytes(leaf, fi,
1212 extent_end - end);
1213 btrfs_set_file_extent_offset(leaf, fi,
1214 end - orig_offset);
1215 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1216 struct btrfs_file_extent_item);
1217 btrfs_set_file_extent_generation(leaf, fi,
1218 trans->transid);
1219 btrfs_set_file_extent_num_bytes(leaf, fi,
1220 end - other_start);
1221 btrfs_mark_buffer_dirty(leaf);
1222 goto out;
1223 }
1224 }
1225
1226 if (start > key.offset && end == extent_end) {
1227 other_start = end;
1228 other_end = 0;
1229 if (extent_mergeable(leaf, path->slots[0] + 1,
1230 ino, bytenr, orig_offset,
1231 &other_start, &other_end)) {
1232 fi = btrfs_item_ptr(leaf, path->slots[0],
1233 struct btrfs_file_extent_item);
1234 btrfs_set_file_extent_num_bytes(leaf, fi,
1235 start - key.offset);
1236 btrfs_set_file_extent_generation(leaf, fi,
1237 trans->transid);
1238 path->slots[0]++;
1239 new_key.offset = start;
1240 btrfs_set_item_key_safe(fs_info, path, &new_key);
1241
1242 fi = btrfs_item_ptr(leaf, path->slots[0],
1243 struct btrfs_file_extent_item);
1244 btrfs_set_file_extent_generation(leaf, fi,
1245 trans->transid);
1246 btrfs_set_file_extent_num_bytes(leaf, fi,
1247 other_end - start);
1248 btrfs_set_file_extent_offset(leaf, fi,
1249 start - orig_offset);
1250 btrfs_mark_buffer_dirty(leaf);
1251 goto out;
1252 }
1253 }
1254
1255 while (start > key.offset || end < extent_end) {
1256 if (key.offset == start)
1257 split = end;
1258
1259 new_key.offset = split;
1260 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1261 if (ret == -EAGAIN) {
1262 btrfs_release_path(path);
1263 goto again;
1264 }
1265 if (ret < 0) {
1266 btrfs_abort_transaction(trans, ret);
1267 goto out;
1268 }
1269
1270 leaf = path->nodes[0];
1271 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1272 struct btrfs_file_extent_item);
1273 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1274 btrfs_set_file_extent_num_bytes(leaf, fi,
1275 split - key.offset);
1276
1277 fi = btrfs_item_ptr(leaf, path->slots[0],
1278 struct btrfs_file_extent_item);
1279
1280 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1281 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1282 btrfs_set_file_extent_num_bytes(leaf, fi,
1283 extent_end - split);
1284 btrfs_mark_buffer_dirty(leaf);
1285
1286 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
1287 0, root->root_key.objectid,
1288 ino, orig_offset);
1289 if (ret) {
1290 btrfs_abort_transaction(trans, ret);
1291 goto out;
1292 }
1293
1294 if (split == start) {
1295 key.offset = start;
1296 } else {
1297 if (start != key.offset) {
1298 ret = -EINVAL;
1299 btrfs_abort_transaction(trans, ret);
1300 goto out;
1301 }
1302 path->slots[0]--;
1303 extent_end = end;
1304 }
1305 recow = 1;
1306 }
1307
1308 other_start = end;
1309 other_end = 0;
1310 if (extent_mergeable(leaf, path->slots[0] + 1,
1311 ino, bytenr, orig_offset,
1312 &other_start, &other_end)) {
1313 if (recow) {
1314 btrfs_release_path(path);
1315 goto again;
1316 }
1317 extent_end = other_end;
1318 del_slot = path->slots[0] + 1;
1319 del_nr++;
1320 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1321 0, root->root_key.objectid,
1322 ino, orig_offset);
1323 if (ret) {
1324 btrfs_abort_transaction(trans, ret);
1325 goto out;
1326 }
1327 }
1328 other_start = 0;
1329 other_end = start;
1330 if (extent_mergeable(leaf, path->slots[0] - 1,
1331 ino, bytenr, orig_offset,
1332 &other_start, &other_end)) {
1333 if (recow) {
1334 btrfs_release_path(path);
1335 goto again;
1336 }
1337 key.offset = other_start;
1338 del_slot = path->slots[0];
1339 del_nr++;
1340 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1341 0, root->root_key.objectid,
1342 ino, orig_offset);
1343 if (ret) {
1344 btrfs_abort_transaction(trans, ret);
1345 goto out;
1346 }
1347 }
1348 if (del_nr == 0) {
1349 fi = btrfs_item_ptr(leaf, path->slots[0],
1350 struct btrfs_file_extent_item);
1351 btrfs_set_file_extent_type(leaf, fi,
1352 BTRFS_FILE_EXTENT_REG);
1353 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1354 btrfs_mark_buffer_dirty(leaf);
1355 } else {
1356 fi = btrfs_item_ptr(leaf, del_slot - 1,
1357 struct btrfs_file_extent_item);
1358 btrfs_set_file_extent_type(leaf, fi,
1359 BTRFS_FILE_EXTENT_REG);
1360 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1361 btrfs_set_file_extent_num_bytes(leaf, fi,
1362 extent_end - key.offset);
1363 btrfs_mark_buffer_dirty(leaf);
1364
1365 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1366 if (ret < 0) {
1367 btrfs_abort_transaction(trans, ret);
1368 goto out;
1369 }
1370 }
1371out:
1372 btrfs_free_path(path);
1373 return 0;
1374}
1375
1376/*
1377 * on error we return an unlocked page and the error value
1378 * on success we return a locked page and 0
1379 */
1380static int prepare_uptodate_page(struct inode *inode,
1381 struct page *page, u64 pos,
1382 bool force_uptodate)
1383{
1384 int ret = 0;
1385
1386 if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
1387 !PageUptodate(page)) {
1388 ret = btrfs_readpage(NULL, page);
1389 if (ret)
1390 return ret;
1391 lock_page(page);
1392 if (!PageUptodate(page)) {
1393 unlock_page(page);
1394 return -EIO;
1395 }
1396 if (page->mapping != inode->i_mapping) {
1397 unlock_page(page);
1398 return -EAGAIN;
1399 }
1400 }
1401 return 0;
1402}
1403
1404/*
1405 * this just gets pages into the page cache and locks them down.
1406 */
1407static noinline int prepare_pages(struct inode *inode, struct page **pages,
1408 size_t num_pages, loff_t pos,
1409 size_t write_bytes, bool force_uptodate)
1410{
1411 int i;
1412 unsigned long index = pos >> PAGE_SHIFT;
1413 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1414 int err = 0;
1415 int faili;
1416
1417 for (i = 0; i < num_pages; i++) {
1418again:
1419 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1420 mask | __GFP_WRITE);
1421 if (!pages[i]) {
1422 faili = i - 1;
1423 err = -ENOMEM;
1424 goto fail;
1425 }
1426
1427 if (i == 0)
1428 err = prepare_uptodate_page(inode, pages[i], pos,
1429 force_uptodate);
1430 if (!err && i == num_pages - 1)
1431 err = prepare_uptodate_page(inode, pages[i],
1432 pos + write_bytes, false);
1433 if (err) {
1434 put_page(pages[i]);
1435 if (err == -EAGAIN) {
1436 err = 0;
1437 goto again;
1438 }
1439 faili = i - 1;
1440 goto fail;
1441 }
1442 wait_on_page_writeback(pages[i]);
1443 }
1444
1445 return 0;
1446fail:
1447 while (faili >= 0) {
1448 unlock_page(pages[faili]);
1449 put_page(pages[faili]);
1450 faili--;
1451 }
1452 return err;
1453
1454}
1455
1456/*
1457 * This function locks the extent and properly waits for data=ordered extents
1458 * to finish before allowing the pages to be modified if need.
1459 *
1460 * The return value:
1461 * 1 - the extent is locked
1462 * 0 - the extent is not locked, and everything is OK
1463 * -EAGAIN - need re-prepare the pages
1464 * the other < 0 number - Something wrong happens
1465 */
1466static noinline int
1467lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1468 size_t num_pages, loff_t pos,
1469 size_t write_bytes,
1470 u64 *lockstart, u64 *lockend,
1471 struct extent_state **cached_state)
1472{
1473 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1474 u64 start_pos;
1475 u64 last_pos;
1476 int i;
1477 int ret = 0;
1478
1479 start_pos = round_down(pos, fs_info->sectorsize);
1480 last_pos = start_pos
1481 + round_up(pos + write_bytes - start_pos,
1482 fs_info->sectorsize) - 1;
1483
1484 if (start_pos < inode->vfs_inode.i_size) {
1485 struct btrfs_ordered_extent *ordered;
1486
1487 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1488 cached_state);
1489 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1490 last_pos - start_pos + 1);
1491 if (ordered &&
1492 ordered->file_offset + ordered->len > start_pos &&
1493 ordered->file_offset <= last_pos) {
1494 unlock_extent_cached(&inode->io_tree, start_pos,
1495 last_pos, cached_state);
1496 for (i = 0; i < num_pages; i++) {
1497 unlock_page(pages[i]);
1498 put_page(pages[i]);
1499 }
1500 btrfs_start_ordered_extent(&inode->vfs_inode,
1501 ordered, 1);
1502 btrfs_put_ordered_extent(ordered);
1503 return -EAGAIN;
1504 }
1505 if (ordered)
1506 btrfs_put_ordered_extent(ordered);
1507 clear_extent_bit(&inode->io_tree, start_pos, last_pos,
1508 EXTENT_DIRTY | EXTENT_DELALLOC |
1509 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1510 0, 0, cached_state);
1511 *lockstart = start_pos;
1512 *lockend = last_pos;
1513 ret = 1;
1514 }
1515
1516 for (i = 0; i < num_pages; i++) {
1517 if (clear_page_dirty_for_io(pages[i]))
1518 account_page_redirty(pages[i]);
1519 set_page_extent_mapped(pages[i]);
1520 WARN_ON(!PageLocked(pages[i]));
1521 }
1522
1523 return ret;
1524}
1525
1526static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1527 size_t *write_bytes)
1528{
1529 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1530 struct btrfs_root *root = inode->root;
1531 struct btrfs_ordered_extent *ordered;
1532 u64 lockstart, lockend;
1533 u64 num_bytes;
1534 int ret;
1535
1536 ret = btrfs_start_write_no_snapshotting(root);
1537 if (!ret)
1538 return -ENOSPC;
1539
1540 lockstart = round_down(pos, fs_info->sectorsize);
1541 lockend = round_up(pos + *write_bytes,
1542 fs_info->sectorsize) - 1;
1543
1544 while (1) {
1545 lock_extent(&inode->io_tree, lockstart, lockend);
1546 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1547 lockend - lockstart + 1);
1548 if (!ordered) {
1549 break;
1550 }
1551 unlock_extent(&inode->io_tree, lockstart, lockend);
1552 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1553 btrfs_put_ordered_extent(ordered);
1554 }
1555
1556 num_bytes = lockend - lockstart + 1;
1557 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1558 NULL, NULL, NULL);
1559 if (ret <= 0) {
1560 ret = 0;
1561 btrfs_end_write_no_snapshotting(root);
1562 } else {
1563 *write_bytes = min_t(size_t, *write_bytes ,
1564 num_bytes - pos + lockstart);
1565 }
1566
1567 unlock_extent(&inode->io_tree, lockstart, lockend);
1568
1569 return ret;
1570}
1571
1572static noinline ssize_t __btrfs_buffered_write(struct file *file,
1573 struct iov_iter *i,
1574 loff_t pos)
1575{
1576 struct inode *inode = file_inode(file);
1577 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1578 struct btrfs_root *root = BTRFS_I(inode)->root;
1579 struct page **pages = NULL;
1580 struct extent_state *cached_state = NULL;
1581 struct extent_changeset *data_reserved = NULL;
1582 u64 release_bytes = 0;
1583 u64 lockstart;
1584 u64 lockend;
1585 size_t num_written = 0;
1586 int nrptrs;
1587 int ret = 0;
1588 bool only_release_metadata = false;
1589 bool force_page_uptodate = false;
1590
1591 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1592 PAGE_SIZE / (sizeof(struct page *)));
1593 nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1594 nrptrs = max(nrptrs, 8);
1595 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1596 if (!pages)
1597 return -ENOMEM;
1598
1599 while (iov_iter_count(i) > 0) {
1600 size_t offset = pos & (PAGE_SIZE - 1);
1601 size_t sector_offset;
1602 size_t write_bytes = min(iov_iter_count(i),
1603 nrptrs * (size_t)PAGE_SIZE -
1604 offset);
1605 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1606 PAGE_SIZE);
1607 size_t reserve_bytes;
1608 size_t dirty_pages;
1609 size_t copied;
1610 size_t dirty_sectors;
1611 size_t num_sectors;
1612 int extents_locked;
1613
1614 WARN_ON(num_pages > nrptrs);
1615
1616 /*
1617 * Fault pages before locking them in prepare_pages
1618 * to avoid recursive lock
1619 */
1620 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1621 ret = -EFAULT;
1622 break;
1623 }
1624
1625 sector_offset = pos & (fs_info->sectorsize - 1);
1626 reserve_bytes = round_up(write_bytes + sector_offset,
1627 fs_info->sectorsize);
1628
1629 extent_changeset_release(data_reserved);
1630 ret = btrfs_check_data_free_space(inode, &data_reserved, pos,
1631 write_bytes);
1632 if (ret < 0) {
1633 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1634 BTRFS_INODE_PREALLOC)) &&
1635 check_can_nocow(BTRFS_I(inode), pos,
1636 &write_bytes) > 0) {
1637 /*
1638 * For nodata cow case, no need to reserve
1639 * data space.
1640 */
1641 only_release_metadata = true;
1642 /*
1643 * our prealloc extent may be smaller than
1644 * write_bytes, so scale down.
1645 */
1646 num_pages = DIV_ROUND_UP(write_bytes + offset,
1647 PAGE_SIZE);
1648 reserve_bytes = round_up(write_bytes +
1649 sector_offset,
1650 fs_info->sectorsize);
1651 } else {
1652 break;
1653 }
1654 }
1655
1656 WARN_ON(reserve_bytes == 0);
1657 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1658 reserve_bytes);
1659 if (ret) {
1660 if (!only_release_metadata)
1661 btrfs_free_reserved_data_space(inode,
1662 data_reserved, pos,
1663 write_bytes);
1664 else
1665 btrfs_end_write_no_snapshotting(root);
1666 break;
1667 }
1668
1669 release_bytes = reserve_bytes;
1670again:
1671 /*
1672 * This is going to setup the pages array with the number of
1673 * pages we want, so we don't really need to worry about the
1674 * contents of pages from loop to loop
1675 */
1676 ret = prepare_pages(inode, pages, num_pages,
1677 pos, write_bytes,
1678 force_page_uptodate);
1679 if (ret) {
1680 btrfs_delalloc_release_extents(BTRFS_I(inode),
1681 reserve_bytes, true);
1682 break;
1683 }
1684
1685 extents_locked = lock_and_cleanup_extent_if_need(
1686 BTRFS_I(inode), pages,
1687 num_pages, pos, write_bytes, &lockstart,
1688 &lockend, &cached_state);
1689 if (extents_locked < 0) {
1690 if (extents_locked == -EAGAIN)
1691 goto again;
1692 btrfs_delalloc_release_extents(BTRFS_I(inode),
1693 reserve_bytes, true);
1694 ret = extents_locked;
1695 break;
1696 }
1697
1698 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1699
1700 num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1701 dirty_sectors = round_up(copied + sector_offset,
1702 fs_info->sectorsize);
1703 dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1704
1705 /*
1706 * if we have trouble faulting in the pages, fall
1707 * back to one page at a time
1708 */
1709 if (copied < write_bytes)
1710 nrptrs = 1;
1711
1712 if (copied == 0) {
1713 force_page_uptodate = true;
1714 dirty_sectors = 0;
1715 dirty_pages = 0;
1716 } else {
1717 force_page_uptodate = false;
1718 dirty_pages = DIV_ROUND_UP(copied + offset,
1719 PAGE_SIZE);
1720 }
1721
1722 if (num_sectors > dirty_sectors) {
1723 /* release everything except the sectors we dirtied */
1724 release_bytes -= dirty_sectors <<
1725 fs_info->sb->s_blocksize_bits;
1726 if (only_release_metadata) {
1727 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1728 release_bytes, true);
1729 } else {
1730 u64 __pos;
1731
1732 __pos = round_down(pos,
1733 fs_info->sectorsize) +
1734 (dirty_pages << PAGE_SHIFT);
1735 btrfs_delalloc_release_space(inode,
1736 data_reserved, __pos,
1737 release_bytes, true);
1738 }
1739 }
1740
1741 release_bytes = round_up(copied + sector_offset,
1742 fs_info->sectorsize);
1743
1744 if (copied > 0)
1745 ret = btrfs_dirty_pages(inode, pages, dirty_pages,
1746 pos, copied, &cached_state);
1747 if (extents_locked)
1748 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1749 lockstart, lockend, &cached_state);
1750 btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
1751 true);
1752 if (ret) {
1753 btrfs_drop_pages(pages, num_pages);
1754 break;
1755 }
1756
1757 release_bytes = 0;
1758 if (only_release_metadata)
1759 btrfs_end_write_no_snapshotting(root);
1760
1761 if (only_release_metadata && copied > 0) {
1762 lockstart = round_down(pos,
1763 fs_info->sectorsize);
1764 lockend = round_up(pos + copied,
1765 fs_info->sectorsize) - 1;
1766
1767 set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1768 lockend, EXTENT_NORESERVE, NULL,
1769 NULL, GFP_NOFS);
1770 only_release_metadata = false;
1771 }
1772
1773 btrfs_drop_pages(pages, num_pages);
1774
1775 cond_resched();
1776
1777 balance_dirty_pages_ratelimited(inode->i_mapping);
1778 if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
1779 btrfs_btree_balance_dirty(fs_info);
1780
1781 pos += copied;
1782 num_written += copied;
1783 }
1784
1785 kfree(pages);
1786
1787 if (release_bytes) {
1788 if (only_release_metadata) {
1789 btrfs_end_write_no_snapshotting(root);
1790 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1791 release_bytes, true);
1792 } else {
1793 btrfs_delalloc_release_space(inode, data_reserved,
1794 round_down(pos, fs_info->sectorsize),
1795 release_bytes, true);
1796 }
1797 }
1798
1799 extent_changeset_free(data_reserved);
1800 return num_written ? num_written : ret;
1801}
1802
1803static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1804{
1805 struct file *file = iocb->ki_filp;
1806 struct inode *inode = file_inode(file);
1807 loff_t pos = iocb->ki_pos;
1808 ssize_t written;
1809 ssize_t written_buffered;
1810 loff_t endbyte;
1811 int err;
1812
1813 written = generic_file_direct_write(iocb, from);
1814
1815 if (written < 0 || !iov_iter_count(from))
1816 return written;
1817
1818 pos += written;
1819 written_buffered = __btrfs_buffered_write(file, from, pos);
1820 if (written_buffered < 0) {
1821 err = written_buffered;
1822 goto out;
1823 }
1824 /*
1825 * Ensure all data is persisted. We want the next direct IO read to be
1826 * able to read what was just written.
1827 */
1828 endbyte = pos + written_buffered - 1;
1829 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1830 if (err)
1831 goto out;
1832 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1833 if (err)
1834 goto out;
1835 written += written_buffered;
1836 iocb->ki_pos = pos + written_buffered;
1837 invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1838 endbyte >> PAGE_SHIFT);
1839out:
1840 return written ? written : err;
1841}
1842
1843static void update_time_for_write(struct inode *inode)
1844{
1845 struct timespec now;
1846
1847 if (IS_NOCMTIME(inode))
1848 return;
1849
1850 now = current_time(inode);
1851 if (!timespec_equal(&inode->i_mtime, &now))
1852 inode->i_mtime = now;
1853
1854 if (!timespec_equal(&inode->i_ctime, &now))
1855 inode->i_ctime = now;
1856
1857 if (IS_I_VERSION(inode))
1858 inode_inc_iversion(inode);
1859}
1860
1861static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1862 struct iov_iter *from)
1863{
1864 struct file *file = iocb->ki_filp;
1865 struct inode *inode = file_inode(file);
1866 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1867 struct btrfs_root *root = BTRFS_I(inode)->root;
1868 u64 start_pos;
1869 u64 end_pos;
1870 ssize_t num_written = 0;
1871 bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1872 ssize_t err;
1873 loff_t pos;
1874 size_t count = iov_iter_count(from);
1875 loff_t oldsize;
1876 int clean_page = 0;
1877
1878 if (!(iocb->ki_flags & IOCB_DIRECT) &&
1879 (iocb->ki_flags & IOCB_NOWAIT))
1880 return -EOPNOTSUPP;
1881
1882 if (!inode_trylock(inode)) {
1883 if (iocb->ki_flags & IOCB_NOWAIT)
1884 return -EAGAIN;
1885 inode_lock(inode);
1886 }
1887
1888 err = generic_write_checks(iocb, from);
1889 if (err <= 0) {
1890 inode_unlock(inode);
1891 return err;
1892 }
1893
1894 pos = iocb->ki_pos;
1895 if (iocb->ki_flags & IOCB_NOWAIT) {
1896 /*
1897 * We will allocate space in case nodatacow is not set,
1898 * so bail
1899 */
1900 if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1901 BTRFS_INODE_PREALLOC)) ||
1902 check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
1903 inode_unlock(inode);
1904 return -EAGAIN;
1905 }
1906 }
1907
1908 current->backing_dev_info = inode_to_bdi(inode);
1909 err = file_remove_privs(file);
1910 if (err) {
1911 inode_unlock(inode);
1912 goto out;
1913 }
1914
1915 /*
1916 * If BTRFS flips readonly due to some impossible error
1917 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1918 * although we have opened a file as writable, we have
1919 * to stop this write operation to ensure FS consistency.
1920 */
1921 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1922 inode_unlock(inode);
1923 err = -EROFS;
1924 goto out;
1925 }
1926
1927 /*
1928 * We reserve space for updating the inode when we reserve space for the
1929 * extent we are going to write, so we will enospc out there. We don't
1930 * need to start yet another transaction to update the inode as we will
1931 * update the inode when we finish writing whatever data we write.
1932 */
1933 update_time_for_write(inode);
1934
1935 start_pos = round_down(pos, fs_info->sectorsize);
1936 oldsize = i_size_read(inode);
1937 if (start_pos > oldsize) {
1938 /* Expand hole size to cover write data, preventing empty gap */
1939 end_pos = round_up(pos + count,
1940 fs_info->sectorsize);
1941 err = btrfs_cont_expand(inode, oldsize, end_pos);
1942 if (err) {
1943 inode_unlock(inode);
1944 goto out;
1945 }
1946 if (start_pos > round_up(oldsize, fs_info->sectorsize))
1947 clean_page = 1;
1948 }
1949
1950 if (sync)
1951 atomic_inc(&BTRFS_I(inode)->sync_writers);
1952
1953 if (iocb->ki_flags & IOCB_DIRECT) {
1954 num_written = __btrfs_direct_write(iocb, from);
1955 } else {
1956 num_written = __btrfs_buffered_write(file, from, pos);
1957 if (num_written > 0)
1958 iocb->ki_pos = pos + num_written;
1959 if (clean_page)
1960 pagecache_isize_extended(inode, oldsize,
1961 i_size_read(inode));
1962 }
1963
1964 inode_unlock(inode);
1965
1966 /*
1967 * We also have to set last_sub_trans to the current log transid,
1968 * otherwise subsequent syncs to a file that's been synced in this
1969 * transaction will appear to have already occurred.
1970 */
1971 spin_lock(&BTRFS_I(inode)->lock);
1972 BTRFS_I(inode)->last_sub_trans = root->log_transid;
1973 spin_unlock(&BTRFS_I(inode)->lock);
1974 if (num_written > 0)
1975 num_written = generic_write_sync(iocb, num_written);
1976
1977 if (sync)
1978 atomic_dec(&BTRFS_I(inode)->sync_writers);
1979out:
1980 current->backing_dev_info = NULL;
1981 return num_written ? num_written : err;
1982}
1983
1984int btrfs_release_file(struct inode *inode, struct file *filp)
1985{
1986 struct btrfs_file_private *private = filp->private_data;
1987
1988 if (private && private->filldir_buf)
1989 kfree(private->filldir_buf);
1990 kfree(private);
1991 filp->private_data = NULL;
1992
1993 /*
1994 * ordered_data_close is set by settattr when we are about to truncate
1995 * a file from a non-zero size to a zero size. This tries to
1996 * flush down new bytes that may have been written if the
1997 * application were using truncate to replace a file in place.
1998 */
1999 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
2000 &BTRFS_I(inode)->runtime_flags))
2001 filemap_flush(inode->i_mapping);
2002 return 0;
2003}
2004
2005static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
2006{
2007 int ret;
2008 struct blk_plug plug;
2009
2010 /*
2011 * This is only called in fsync, which would do synchronous writes, so
2012 * a plug can merge adjacent IOs as much as possible. Esp. in case of
2013 * multiple disks using raid profile, a large IO can be split to
2014 * several segments of stripe length (currently 64K).
2015 */
2016 blk_start_plug(&plug);
2017 atomic_inc(&BTRFS_I(inode)->sync_writers);
2018 ret = btrfs_fdatawrite_range(inode, start, end);
2019 atomic_dec(&BTRFS_I(inode)->sync_writers);
2020 blk_finish_plug(&plug);
2021
2022 return ret;
2023}
2024
2025/*
2026 * fsync call for both files and directories. This logs the inode into
2027 * the tree log instead of forcing full commits whenever possible.
2028 *
2029 * It needs to call filemap_fdatawait so that all ordered extent updates are
2030 * in the metadata btree are up to date for copying to the log.
2031 *
2032 * It drops the inode mutex before doing the tree log commit. This is an
2033 * important optimization for directories because holding the mutex prevents
2034 * new operations on the dir while we write to disk.
2035 */
2036int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2037{
2038 struct dentry *dentry = file_dentry(file);
2039 struct inode *inode = d_inode(dentry);
2040 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2041 struct btrfs_root *root = BTRFS_I(inode)->root;
2042 struct btrfs_trans_handle *trans;
2043 struct btrfs_log_ctx ctx;
2044 int ret = 0, err;
2045 bool full_sync = false;
2046 u64 len;
2047
2048 /*
2049 * The range length can be represented by u64, we have to do the typecasts
2050 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
2051 */
2052 len = (u64)end - (u64)start + 1;
2053 trace_btrfs_sync_file(file, datasync);
2054
2055 btrfs_init_log_ctx(&ctx, inode);
2056
2057 /*
2058 * We write the dirty pages in the range and wait until they complete
2059 * out of the ->i_mutex. If so, we can flush the dirty pages by
2060 * multi-task, and make the performance up. See
2061 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
2062 */
2063 ret = start_ordered_ops(inode, start, end);
2064 if (ret)
2065 goto out;
2066
2067 inode_lock(inode);
2068 atomic_inc(&root->log_batch);
2069 full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2070 &BTRFS_I(inode)->runtime_flags);
2071 /*
2072 * We might have have had more pages made dirty after calling
2073 * start_ordered_ops and before acquiring the inode's i_mutex.
2074 */
2075 if (full_sync) {
2076 /*
2077 * For a full sync, we need to make sure any ordered operations
2078 * start and finish before we start logging the inode, so that
2079 * all extents are persisted and the respective file extent
2080 * items are in the fs/subvol btree.
2081 */
2082 ret = btrfs_wait_ordered_range(inode, start, len);
2083 } else {
2084 /*
2085 * Start any new ordered operations before starting to log the
2086 * inode. We will wait for them to finish in btrfs_sync_log().
2087 *
2088 * Right before acquiring the inode's mutex, we might have new
2089 * writes dirtying pages, which won't immediately start the
2090 * respective ordered operations - that is done through the
2091 * fill_delalloc callbacks invoked from the writepage and
2092 * writepages address space operations. So make sure we start
2093 * all ordered operations before starting to log our inode. Not
2094 * doing this means that while logging the inode, writeback
2095 * could start and invoke writepage/writepages, which would call
2096 * the fill_delalloc callbacks (cow_file_range,
2097 * submit_compressed_extents). These callbacks add first an
2098 * extent map to the modified list of extents and then create
2099 * the respective ordered operation, which means in
2100 * tree-log.c:btrfs_log_inode() we might capture all existing
2101 * ordered operations (with btrfs_get_logged_extents()) before
2102 * the fill_delalloc callback adds its ordered operation, and by
2103 * the time we visit the modified list of extent maps (with
2104 * btrfs_log_changed_extents()), we see and process the extent
2105 * map they created. We then use the extent map to construct a
2106 * file extent item for logging without waiting for the
2107 * respective ordered operation to finish - this file extent
2108 * item points to a disk location that might not have yet been
2109 * written to, containing random data - so after a crash a log
2110 * replay will make our inode have file extent items that point
2111 * to disk locations containing invalid data, as we returned
2112 * success to userspace without waiting for the respective
2113 * ordered operation to finish, because it wasn't captured by
2114 * btrfs_get_logged_extents().
2115 */
2116 ret = start_ordered_ops(inode, start, end);
2117 }
2118 if (ret) {
2119 inode_unlock(inode);
2120 goto out;
2121 }
2122 atomic_inc(&root->log_batch);
2123
2124 /*
2125 * If the last transaction that changed this file was before the current
2126 * transaction and we have the full sync flag set in our inode, we can
2127 * bail out now without any syncing.
2128 *
2129 * Note that we can't bail out if the full sync flag isn't set. This is
2130 * because when the full sync flag is set we start all ordered extents
2131 * and wait for them to fully complete - when they complete they update
2132 * the inode's last_trans field through:
2133 *
2134 * btrfs_finish_ordered_io() ->
2135 * btrfs_update_inode_fallback() ->
2136 * btrfs_update_inode() ->
2137 * btrfs_set_inode_last_trans()
2138 *
2139 * So we are sure that last_trans is up to date and can do this check to
2140 * bail out safely. For the fast path, when the full sync flag is not
2141 * set in our inode, we can not do it because we start only our ordered
2142 * extents and don't wait for them to complete (that is when
2143 * btrfs_finish_ordered_io runs), so here at this point their last_trans
2144 * value might be less than or equals to fs_info->last_trans_committed,
2145 * and setting a speculative last_trans for an inode when a buffered
2146 * write is made (such as fs_info->generation + 1 for example) would not
2147 * be reliable since after setting the value and before fsync is called
2148 * any number of transactions can start and commit (transaction kthread
2149 * commits the current transaction periodically), and a transaction
2150 * commit does not start nor waits for ordered extents to complete.
2151 */
2152 smp_mb();
2153 if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
2154 (full_sync && BTRFS_I(inode)->last_trans <=
2155 fs_info->last_trans_committed) ||
2156 (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
2157 BTRFS_I(inode)->last_trans
2158 <= fs_info->last_trans_committed)) {
2159 /*
2160 * We've had everything committed since the last time we were
2161 * modified so clear this flag in case it was set for whatever
2162 * reason, it's no longer relevant.
2163 */
2164 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2165 &BTRFS_I(inode)->runtime_flags);
2166 /*
2167 * An ordered extent might have started before and completed
2168 * already with io errors, in which case the inode was not
2169 * updated and we end up here. So check the inode's mapping
2170 * for any errors that might have happened since we last
2171 * checked called fsync.
2172 */
2173 ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
2174 inode_unlock(inode);
2175 goto out;
2176 }
2177
2178 /*
2179 * We use start here because we will need to wait on the IO to complete
2180 * in btrfs_sync_log, which could require joining a transaction (for
2181 * example checking cross references in the nocow path). If we use join
2182 * here we could get into a situation where we're waiting on IO to
2183 * happen that is blocked on a transaction trying to commit. With start
2184 * we inc the extwriter counter, so we wait for all extwriters to exit
2185 * before we start blocking join'ers. This comment is to keep somebody
2186 * from thinking they are super smart and changing this to
2187 * btrfs_join_transaction *cough*Josef*cough*.
2188 */
2189 trans = btrfs_start_transaction(root, 0);
2190 if (IS_ERR(trans)) {
2191 ret = PTR_ERR(trans);
2192 inode_unlock(inode);
2193 goto out;
2194 }
2195 trans->sync = true;
2196
2197 ret = btrfs_log_dentry_safe(trans, dentry, start, end, &ctx);
2198 if (ret < 0) {
2199 /* Fallthrough and commit/free transaction. */
2200 ret = 1;
2201 }
2202
2203 /* we've logged all the items and now have a consistent
2204 * version of the file in the log. It is possible that
2205 * someone will come in and modify the file, but that's
2206 * fine because the log is consistent on disk, and we
2207 * have references to all of the file's extents
2208 *
2209 * It is possible that someone will come in and log the
2210 * file again, but that will end up using the synchronization
2211 * inside btrfs_sync_log to keep things safe.
2212 */
2213 inode_unlock(inode);
2214
2215 /*
2216 * If any of the ordered extents had an error, just return it to user
2217 * space, so that the application knows some writes didn't succeed and
2218 * can take proper action (retry for e.g.). Blindly committing the
2219 * transaction in this case, would fool userspace that everything was
2220 * successful. And we also want to make sure our log doesn't contain
2221 * file extent items pointing to extents that weren't fully written to -
2222 * just like in the non fast fsync path, where we check for the ordered
2223 * operation's error flag before writing to the log tree and return -EIO
2224 * if any of them had this flag set (btrfs_wait_ordered_range) -
2225 * therefore we need to check for errors in the ordered operations,
2226 * which are indicated by ctx.io_err.
2227 */
2228 if (ctx.io_err) {
2229 btrfs_end_transaction(trans);
2230 ret = ctx.io_err;
2231 goto out;
2232 }
2233
2234 if (ret != BTRFS_NO_LOG_SYNC) {
2235 if (!ret) {
2236 ret = btrfs_sync_log(trans, root, &ctx);
2237 if (!ret) {
2238 ret = btrfs_end_transaction(trans);
2239 goto out;
2240 }
2241 }
2242 if (!full_sync) {
2243 ret = btrfs_wait_ordered_range(inode, start, len);
2244 if (ret) {
2245 btrfs_end_transaction(trans);
2246 goto out;
2247 }
2248 }
2249 ret = btrfs_commit_transaction(trans);
2250 } else {
2251 ret = btrfs_end_transaction(trans);
2252 }
2253out:
2254 ASSERT(list_empty(&ctx.list));
2255 err = file_check_and_advance_wb_err(file);
2256 if (!ret)
2257 ret = err;
2258 return ret > 0 ? -EIO : ret;
2259}
2260
2261static const struct vm_operations_struct btrfs_file_vm_ops = {
2262 .fault = filemap_fault,
2263 .map_pages = filemap_map_pages,
2264 .page_mkwrite = btrfs_page_mkwrite,
2265};
2266
2267static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2268{
2269 struct address_space *mapping = filp->f_mapping;
2270
2271 if (!mapping->a_ops->readpage)
2272 return -ENOEXEC;
2273
2274 file_accessed(filp);
2275 vma->vm_ops = &btrfs_file_vm_ops;
2276
2277 return 0;
2278}
2279
2280static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2281 int slot, u64 start, u64 end)
2282{
2283 struct btrfs_file_extent_item *fi;
2284 struct btrfs_key key;
2285
2286 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2287 return 0;
2288
2289 btrfs_item_key_to_cpu(leaf, &key, slot);
2290 if (key.objectid != btrfs_ino(inode) ||
2291 key.type != BTRFS_EXTENT_DATA_KEY)
2292 return 0;
2293
2294 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2295
2296 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2297 return 0;
2298
2299 if (btrfs_file_extent_disk_bytenr(leaf, fi))
2300 return 0;
2301
2302 if (key.offset == end)
2303 return 1;
2304 if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2305 return 1;
2306 return 0;
2307}
2308
2309static int fill_holes(struct btrfs_trans_handle *trans,
2310 struct btrfs_inode *inode,
2311 struct btrfs_path *path, u64 offset, u64 end)
2312{
2313 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
2314 struct btrfs_root *root = inode->root;
2315 struct extent_buffer *leaf;
2316 struct btrfs_file_extent_item *fi;
2317 struct extent_map *hole_em;
2318 struct extent_map_tree *em_tree = &inode->extent_tree;
2319 struct btrfs_key key;
2320 int ret;
2321
2322 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2323 goto out;
2324
2325 key.objectid = btrfs_ino(inode);
2326 key.type = BTRFS_EXTENT_DATA_KEY;
2327 key.offset = offset;
2328
2329 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2330 if (ret <= 0) {
2331 /*
2332 * We should have dropped this offset, so if we find it then
2333 * something has gone horribly wrong.
2334 */
2335 if (ret == 0)
2336 ret = -EINVAL;
2337 return ret;
2338 }
2339
2340 leaf = path->nodes[0];
2341 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2342 u64 num_bytes;
2343
2344 path->slots[0]--;
2345 fi = btrfs_item_ptr(leaf, path->slots[0],
2346 struct btrfs_file_extent_item);
2347 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2348 end - offset;
2349 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2350 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2351 btrfs_set_file_extent_offset(leaf, fi, 0);
2352 btrfs_mark_buffer_dirty(leaf);
2353 goto out;
2354 }
2355
2356 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2357 u64 num_bytes;
2358
2359 key.offset = offset;
2360 btrfs_set_item_key_safe(fs_info, path, &key);
2361 fi = btrfs_item_ptr(leaf, path->slots[0],
2362 struct btrfs_file_extent_item);
2363 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2364 offset;
2365 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2366 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2367 btrfs_set_file_extent_offset(leaf, fi, 0);
2368 btrfs_mark_buffer_dirty(leaf);
2369 goto out;
2370 }
2371 btrfs_release_path(path);
2372
2373 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2374 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2375 if (ret)
2376 return ret;
2377
2378out:
2379 btrfs_release_path(path);
2380
2381 hole_em = alloc_extent_map();
2382 if (!hole_em) {
2383 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2384 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2385 } else {
2386 hole_em->start = offset;
2387 hole_em->len = end - offset;
2388 hole_em->ram_bytes = hole_em->len;
2389 hole_em->orig_start = offset;
2390
2391 hole_em->block_start = EXTENT_MAP_HOLE;
2392 hole_em->block_len = 0;
2393 hole_em->orig_block_len = 0;
2394 hole_em->bdev = fs_info->fs_devices->latest_bdev;
2395 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2396 hole_em->generation = trans->transid;
2397
2398 do {
2399 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2400 write_lock(&em_tree->lock);
2401 ret = add_extent_mapping(em_tree, hole_em, 1);
2402 write_unlock(&em_tree->lock);
2403 } while (ret == -EEXIST);
2404 free_extent_map(hole_em);
2405 if (ret)
2406 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2407 &inode->runtime_flags);
2408 }
2409
2410 return 0;
2411}
2412
2413/*
2414 * Find a hole extent on given inode and change start/len to the end of hole
2415 * extent.(hole/vacuum extent whose em->start <= start &&
2416 * em->start + em->len > start)
2417 * When a hole extent is found, return 1 and modify start/len.
2418 */
2419static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2420{
2421 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2422 struct extent_map *em;
2423 int ret = 0;
2424
2425 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2426 round_down(*start, fs_info->sectorsize),
2427 round_up(*len, fs_info->sectorsize), 0);
2428 if (IS_ERR(em))
2429 return PTR_ERR(em);
2430
2431 /* Hole or vacuum extent(only exists in no-hole mode) */
2432 if (em->block_start == EXTENT_MAP_HOLE) {
2433 ret = 1;
2434 *len = em->start + em->len > *start + *len ?
2435 0 : *start + *len - em->start - em->len;
2436 *start = em->start + em->len;
2437 }
2438 free_extent_map(em);
2439 return ret;
2440}
2441
2442static int btrfs_punch_hole_lock_range(struct inode *inode,
2443 const u64 lockstart,
2444 const u64 lockend,
2445 struct extent_state **cached_state)
2446{
2447 while (1) {
2448 struct btrfs_ordered_extent *ordered;
2449 int ret;
2450
2451 truncate_pagecache_range(inode, lockstart, lockend);
2452
2453 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2454 cached_state);
2455 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2456
2457 /*
2458 * We need to make sure we have no ordered extents in this range
2459 * and nobody raced in and read a page in this range, if we did
2460 * we need to try again.
2461 */
2462 if ((!ordered ||
2463 (ordered->file_offset + ordered->len <= lockstart ||
2464 ordered->file_offset > lockend)) &&
2465 !filemap_range_has_page(inode->i_mapping,
2466 lockstart, lockend)) {
2467 if (ordered)
2468 btrfs_put_ordered_extent(ordered);
2469 break;
2470 }
2471 if (ordered)
2472 btrfs_put_ordered_extent(ordered);
2473 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2474 lockend, cached_state);
2475 ret = btrfs_wait_ordered_range(inode, lockstart,
2476 lockend - lockstart + 1);
2477 if (ret)
2478 return ret;
2479 }
2480 return 0;
2481}
2482
2483static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2484{
2485 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2486 struct btrfs_root *root = BTRFS_I(inode)->root;
2487 struct extent_state *cached_state = NULL;
2488 struct btrfs_path *path;
2489 struct btrfs_block_rsv *rsv;
2490 struct btrfs_trans_handle *trans;
2491 u64 lockstart;
2492 u64 lockend;
2493 u64 tail_start;
2494 u64 tail_len;
2495 u64 orig_start = offset;
2496 u64 cur_offset;
2497 u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
2498 u64 drop_end;
2499 int ret = 0;
2500 int err = 0;
2501 unsigned int rsv_count;
2502 bool same_block;
2503 bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
2504 u64 ino_size;
2505 bool truncated_block = false;
2506 bool updated_inode = false;
2507
2508 ret = btrfs_wait_ordered_range(inode, offset, len);
2509 if (ret)
2510 return ret;
2511
2512 inode_lock(inode);
2513 ino_size = round_up(inode->i_size, fs_info->sectorsize);
2514 ret = find_first_non_hole(inode, &offset, &len);
2515 if (ret < 0)
2516 goto out_only_mutex;
2517 if (ret && !len) {
2518 /* Already in a large hole */
2519 ret = 0;
2520 goto out_only_mutex;
2521 }
2522
2523 lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
2524 lockend = round_down(offset + len,
2525 btrfs_inode_sectorsize(inode)) - 1;
2526 same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2527 == (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2528 /*
2529 * We needn't truncate any block which is beyond the end of the file
2530 * because we are sure there is no data there.
2531 */
2532 /*
2533 * Only do this if we are in the same block and we aren't doing the
2534 * entire block.
2535 */
2536 if (same_block && len < fs_info->sectorsize) {
2537 if (offset < ino_size) {
2538 truncated_block = true;
2539 ret = btrfs_truncate_block(inode, offset, len, 0);
2540 } else {
2541 ret = 0;
2542 }
2543 goto out_only_mutex;
2544 }
2545
2546 /* zero back part of the first block */
2547 if (offset < ino_size) {
2548 truncated_block = true;
2549 ret = btrfs_truncate_block(inode, offset, 0, 0);
2550 if (ret) {
2551 inode_unlock(inode);
2552 return ret;
2553 }
2554 }
2555
2556 /* Check the aligned pages after the first unaligned page,
2557 * if offset != orig_start, which means the first unaligned page
2558 * including several following pages are already in holes,
2559 * the extra check can be skipped */
2560 if (offset == orig_start) {
2561 /* after truncate page, check hole again */
2562 len = offset + len - lockstart;
2563 offset = lockstart;
2564 ret = find_first_non_hole(inode, &offset, &len);
2565 if (ret < 0)
2566 goto out_only_mutex;
2567 if (ret && !len) {
2568 ret = 0;
2569 goto out_only_mutex;
2570 }
2571 lockstart = offset;
2572 }
2573
2574 /* Check the tail unaligned part is in a hole */
2575 tail_start = lockend + 1;
2576 tail_len = offset + len - tail_start;
2577 if (tail_len) {
2578 ret = find_first_non_hole(inode, &tail_start, &tail_len);
2579 if (unlikely(ret < 0))
2580 goto out_only_mutex;
2581 if (!ret) {
2582 /* zero the front end of the last page */
2583 if (tail_start + tail_len < ino_size) {
2584 truncated_block = true;
2585 ret = btrfs_truncate_block(inode,
2586 tail_start + tail_len,
2587 0, 1);
2588 if (ret)
2589 goto out_only_mutex;
2590 }
2591 }
2592 }
2593
2594 if (lockend < lockstart) {
2595 ret = 0;
2596 goto out_only_mutex;
2597 }
2598
2599 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
2600 &cached_state);
2601 if (ret) {
2602 inode_unlock(inode);
2603 goto out_only_mutex;
2604 }
2605
2606 path = btrfs_alloc_path();
2607 if (!path) {
2608 ret = -ENOMEM;
2609 goto out;
2610 }
2611
2612 rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2613 if (!rsv) {
2614 ret = -ENOMEM;
2615 goto out_free;
2616 }
2617 rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
2618 rsv->failfast = 1;
2619
2620 /*
2621 * 1 - update the inode
2622 * 1 - removing the extents in the range
2623 * 1 - adding the hole extent if no_holes isn't set
2624 */
2625 rsv_count = no_holes ? 2 : 3;
2626 trans = btrfs_start_transaction(root, rsv_count);
2627 if (IS_ERR(trans)) {
2628 err = PTR_ERR(trans);
2629 goto out_free;
2630 }
2631
2632 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2633 min_size, 0);
2634 BUG_ON(ret);
2635 trans->block_rsv = rsv;
2636
2637 cur_offset = lockstart;
2638 len = lockend - cur_offset;
2639 while (cur_offset < lockend) {
2640 ret = __btrfs_drop_extents(trans, root, inode, path,
2641 cur_offset, lockend + 1,
2642 &drop_end, 1, 0, 0, NULL);
2643 if (ret != -ENOSPC)
2644 break;
2645
2646 trans->block_rsv = &fs_info->trans_block_rsv;
2647
2648 if (cur_offset < drop_end && cur_offset < ino_size) {
2649 ret = fill_holes(trans, BTRFS_I(inode), path,
2650 cur_offset, drop_end);
2651 if (ret) {
2652 /*
2653 * If we failed then we didn't insert our hole
2654 * entries for the area we dropped, so now the
2655 * fs is corrupted, so we must abort the
2656 * transaction.
2657 */
2658 btrfs_abort_transaction(trans, ret);
2659 err = ret;
2660 break;
2661 }
2662 }
2663
2664 cur_offset = drop_end;
2665
2666 ret = btrfs_update_inode(trans, root, inode);
2667 if (ret) {
2668 err = ret;
2669 break;
2670 }
2671
2672 btrfs_end_transaction(trans);
2673 btrfs_btree_balance_dirty(fs_info);
2674
2675 trans = btrfs_start_transaction(root, rsv_count);
2676 if (IS_ERR(trans)) {
2677 ret = PTR_ERR(trans);
2678 trans = NULL;
2679 break;
2680 }
2681
2682 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2683 rsv, min_size, 0);
2684 BUG_ON(ret); /* shouldn't happen */
2685 trans->block_rsv = rsv;
2686
2687 ret = find_first_non_hole(inode, &cur_offset, &len);
2688 if (unlikely(ret < 0))
2689 break;
2690 if (ret && !len) {
2691 ret = 0;
2692 break;
2693 }
2694 }
2695
2696 if (ret) {
2697 err = ret;
2698 goto out_trans;
2699 }
2700
2701 trans->block_rsv = &fs_info->trans_block_rsv;
2702 /*
2703 * If we are using the NO_HOLES feature we might have had already an
2704 * hole that overlaps a part of the region [lockstart, lockend] and
2705 * ends at (or beyond) lockend. Since we have no file extent items to
2706 * represent holes, drop_end can be less than lockend and so we must
2707 * make sure we have an extent map representing the existing hole (the
2708 * call to __btrfs_drop_extents() might have dropped the existing extent
2709 * map representing the existing hole), otherwise the fast fsync path
2710 * will not record the existence of the hole region
2711 * [existing_hole_start, lockend].
2712 */
2713 if (drop_end <= lockend)
2714 drop_end = lockend + 1;
2715 /*
2716 * Don't insert file hole extent item if it's for a range beyond eof
2717 * (because it's useless) or if it represents a 0 bytes range (when
2718 * cur_offset == drop_end).
2719 */
2720 if (cur_offset < ino_size && cur_offset < drop_end) {
2721 ret = fill_holes(trans, BTRFS_I(inode), path,
2722 cur_offset, drop_end);
2723 if (ret) {
2724 /* Same comment as above. */
2725 btrfs_abort_transaction(trans, ret);
2726 err = ret;
2727 goto out_trans;
2728 }
2729 }
2730
2731out_trans:
2732 if (!trans)
2733 goto out_free;
2734
2735 inode_inc_iversion(inode);
2736 inode->i_mtime = inode->i_ctime = current_time(inode);
2737
2738 trans->block_rsv = &fs_info->trans_block_rsv;
2739 ret = btrfs_update_inode(trans, root, inode);
2740 updated_inode = true;
2741 btrfs_end_transaction(trans);
2742 btrfs_btree_balance_dirty(fs_info);
2743out_free:
2744 btrfs_free_path(path);
2745 btrfs_free_block_rsv(fs_info, rsv);
2746out:
2747 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2748 &cached_state);
2749out_only_mutex:
2750 if (!updated_inode && truncated_block && !ret && !err) {
2751 /*
2752 * If we only end up zeroing part of a page, we still need to
2753 * update the inode item, so that all the time fields are
2754 * updated as well as the necessary btrfs inode in memory fields
2755 * for detecting, at fsync time, if the inode isn't yet in the
2756 * log tree or it's there but not up to date.
2757 */
2758 trans = btrfs_start_transaction(root, 1);
2759 if (IS_ERR(trans)) {
2760 err = PTR_ERR(trans);
2761 } else {
2762 err = btrfs_update_inode(trans, root, inode);
2763 ret = btrfs_end_transaction(trans);
2764 }
2765 }
2766 inode_unlock(inode);
2767 if (ret && !err)
2768 err = ret;
2769 return err;
2770}
2771
2772/* Helper structure to record which range is already reserved */
2773struct falloc_range {
2774 struct list_head list;
2775 u64 start;
2776 u64 len;
2777};
2778
2779/*
2780 * Helper function to add falloc range
2781 *
2782 * Caller should have locked the larger range of extent containing
2783 * [start, len)
2784 */
2785static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2786{
2787 struct falloc_range *prev = NULL;
2788 struct falloc_range *range = NULL;
2789
2790 if (list_empty(head))
2791 goto insert;
2792
2793 /*
2794 * As fallocate iterate by bytenr order, we only need to check
2795 * the last range.
2796 */
2797 prev = list_entry(head->prev, struct falloc_range, list);
2798 if (prev->start + prev->len == start) {
2799 prev->len += len;
2800 return 0;
2801 }
2802insert:
2803 range = kmalloc(sizeof(*range), GFP_KERNEL);
2804 if (!range)
2805 return -ENOMEM;
2806 range->start = start;
2807 range->len = len;
2808 list_add_tail(&range->list, head);
2809 return 0;
2810}
2811
2812static int btrfs_fallocate_update_isize(struct inode *inode,
2813 const u64 end,
2814 const int mode)
2815{
2816 struct btrfs_trans_handle *trans;
2817 struct btrfs_root *root = BTRFS_I(inode)->root;
2818 int ret;
2819 int ret2;
2820
2821 if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2822 return 0;
2823
2824 trans = btrfs_start_transaction(root, 1);
2825 if (IS_ERR(trans))
2826 return PTR_ERR(trans);
2827
2828 inode->i_ctime = current_time(inode);
2829 i_size_write(inode, end);
2830 btrfs_ordered_update_i_size(inode, end, NULL);
2831 ret = btrfs_update_inode(trans, root, inode);
2832 ret2 = btrfs_end_transaction(trans);
2833
2834 return ret ? ret : ret2;
2835}
2836
2837enum {
2838 RANGE_BOUNDARY_WRITTEN_EXTENT = 0,
2839 RANGE_BOUNDARY_PREALLOC_EXTENT = 1,
2840 RANGE_BOUNDARY_HOLE = 2,
2841};
2842
2843static int btrfs_zero_range_check_range_boundary(struct inode *inode,
2844 u64 offset)
2845{
2846 const u64 sectorsize = btrfs_inode_sectorsize(inode);
2847 struct extent_map *em;
2848 int ret;
2849
2850 offset = round_down(offset, sectorsize);
2851 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
2852 if (IS_ERR(em))
2853 return PTR_ERR(em);
2854
2855 if (em->block_start == EXTENT_MAP_HOLE)
2856 ret = RANGE_BOUNDARY_HOLE;
2857 else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2858 ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2859 else
2860 ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2861
2862 free_extent_map(em);
2863 return ret;
2864}
2865
2866static int btrfs_zero_range(struct inode *inode,
2867 loff_t offset,
2868 loff_t len,
2869 const int mode)
2870{
2871 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2872 struct extent_map *em;
2873 struct extent_changeset *data_reserved = NULL;
2874 int ret;
2875 u64 alloc_hint = 0;
2876 const u64 sectorsize = btrfs_inode_sectorsize(inode);
2877 u64 alloc_start = round_down(offset, sectorsize);
2878 u64 alloc_end = round_up(offset + len, sectorsize);
2879 u64 bytes_to_reserve = 0;
2880 bool space_reserved = false;
2881
2882 inode_dio_wait(inode);
2883
2884 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2885 alloc_start, alloc_end - alloc_start, 0);
2886 if (IS_ERR(em)) {
2887 ret = PTR_ERR(em);
2888 goto out;
2889 }
2890
2891 /*
2892 * Avoid hole punching and extent allocation for some cases. More cases
2893 * could be considered, but these are unlikely common and we keep things
2894 * as simple as possible for now. Also, intentionally, if the target
2895 * range contains one or more prealloc extents together with regular
2896 * extents and holes, we drop all the existing extents and allocate a
2897 * new prealloc extent, so that we get a larger contiguous disk extent.
2898 */
2899 if (em->start <= alloc_start &&
2900 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2901 const u64 em_end = em->start + em->len;
2902
2903 if (em_end >= offset + len) {
2904 /*
2905 * The whole range is already a prealloc extent,
2906 * do nothing except updating the inode's i_size if
2907 * needed.
2908 */
2909 free_extent_map(em);
2910 ret = btrfs_fallocate_update_isize(inode, offset + len,
2911 mode);
2912 goto out;
2913 }
2914 /*
2915 * Part of the range is already a prealloc extent, so operate
2916 * only on the remaining part of the range.
2917 */
2918 alloc_start = em_end;
2919 ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2920 len = offset + len - alloc_start;
2921 offset = alloc_start;
2922 alloc_hint = em->block_start + em->len;
2923 }
2924 free_extent_map(em);
2925
2926 if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2927 BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2928 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
2929 alloc_start, sectorsize, 0);
2930 if (IS_ERR(em)) {
2931 ret = PTR_ERR(em);
2932 goto out;
2933 }
2934
2935 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2936 free_extent_map(em);
2937 ret = btrfs_fallocate_update_isize(inode, offset + len,
2938 mode);
2939 goto out;
2940 }
2941 if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2942 free_extent_map(em);
2943 ret = btrfs_truncate_block(inode, offset, len, 0);
2944 if (!ret)
2945 ret = btrfs_fallocate_update_isize(inode,
2946 offset + len,
2947 mode);
2948 return ret;
2949 }
2950 free_extent_map(em);
2951 alloc_start = round_down(offset, sectorsize);
2952 alloc_end = alloc_start + sectorsize;
2953 goto reserve_space;
2954 }
2955
2956 alloc_start = round_up(offset, sectorsize);
2957 alloc_end = round_down(offset + len, sectorsize);
2958
2959 /*
2960 * For unaligned ranges, check the pages at the boundaries, they might
2961 * map to an extent, in which case we need to partially zero them, or
2962 * they might map to a hole, in which case we need our allocation range
2963 * to cover them.
2964 */
2965 if (!IS_ALIGNED(offset, sectorsize)) {
2966 ret = btrfs_zero_range_check_range_boundary(inode, offset);
2967 if (ret < 0)
2968 goto out;
2969 if (ret == RANGE_BOUNDARY_HOLE) {
2970 alloc_start = round_down(offset, sectorsize);
2971 ret = 0;
2972 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2973 ret = btrfs_truncate_block(inode, offset, 0, 0);
2974 if (ret)
2975 goto out;
2976 } else {
2977 ret = 0;
2978 }
2979 }
2980
2981 if (!IS_ALIGNED(offset + len, sectorsize)) {
2982 ret = btrfs_zero_range_check_range_boundary(inode,
2983 offset + len);
2984 if (ret < 0)
2985 goto out;
2986 if (ret == RANGE_BOUNDARY_HOLE) {
2987 alloc_end = round_up(offset + len, sectorsize);
2988 ret = 0;
2989 } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2990 ret = btrfs_truncate_block(inode, offset + len, 0, 1);
2991 if (ret)
2992 goto out;
2993 } else {
2994 ret = 0;
2995 }
2996 }
2997
2998reserve_space:
2999 if (alloc_start < alloc_end) {
3000 struct extent_state *cached_state = NULL;
3001 const u64 lockstart = alloc_start;
3002 const u64 lockend = alloc_end - 1;
3003
3004 bytes_to_reserve = alloc_end - alloc_start;
3005 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3006 bytes_to_reserve);
3007 if (ret < 0)
3008 goto out;
3009 space_reserved = true;
3010 ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3011 alloc_start, bytes_to_reserve);
3012 if (ret)
3013 goto out;
3014 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3015 &cached_state);
3016 if (ret)
3017 goto out;
3018 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3019 alloc_end - alloc_start,
3020 i_blocksize(inode),
3021 offset + len, &alloc_hint);
3022 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
3023 lockend, &cached_state);
3024 /* btrfs_prealloc_file_range releases reserved space on error */
3025 if (ret) {
3026 space_reserved = false;
3027 goto out;
3028 }
3029 }
3030 ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3031 out:
3032 if (ret && space_reserved)
3033 btrfs_free_reserved_data_space(inode, data_reserved,
3034 alloc_start, bytes_to_reserve);
3035 extent_changeset_free(data_reserved);
3036
3037 return ret;
3038}
3039
3040static long btrfs_fallocate(struct file *file, int mode,
3041 loff_t offset, loff_t len)
3042{
3043 struct inode *inode = file_inode(file);
3044 struct extent_state *cached_state = NULL;
3045 struct extent_changeset *data_reserved = NULL;
3046 struct falloc_range *range;
3047 struct falloc_range *tmp;
3048 struct list_head reserve_list;
3049 u64 cur_offset;
3050 u64 last_byte;
3051 u64 alloc_start;
3052 u64 alloc_end;
3053 u64 alloc_hint = 0;
3054 u64 locked_end;
3055 u64 actual_end = 0;
3056 struct extent_map *em;
3057 int blocksize = btrfs_inode_sectorsize(inode);
3058 int ret;
3059
3060 alloc_start = round_down(offset, blocksize);
3061 alloc_end = round_up(offset + len, blocksize);
3062 cur_offset = alloc_start;
3063
3064 /* Make sure we aren't being give some crap mode */
3065 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3066 FALLOC_FL_ZERO_RANGE))
3067 return -EOPNOTSUPP;
3068
3069 if (mode & FALLOC_FL_PUNCH_HOLE)
3070 return btrfs_punch_hole(inode, offset, len);
3071
3072 /*
3073 * Only trigger disk allocation, don't trigger qgroup reserve
3074 *
3075 * For qgroup space, it will be checked later.
3076 */
3077 if (!(mode & FALLOC_FL_ZERO_RANGE)) {
3078 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3079 alloc_end - alloc_start);
3080 if (ret < 0)
3081 return ret;
3082 }
3083
3084 inode_lock(inode);
3085
3086 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3087 ret = inode_newsize_ok(inode, offset + len);
3088 if (ret)
3089 goto out;
3090 }
3091
3092 /*
3093 * TODO: Move these two operations after we have checked
3094 * accurate reserved space, or fallocate can still fail but
3095 * with page truncated or size expanded.
3096 *
3097 * But that's a minor problem and won't do much harm BTW.
3098 */
3099 if (alloc_start > inode->i_size) {
3100 ret = btrfs_cont_expand(inode, i_size_read(inode),
3101 alloc_start);
3102 if (ret)
3103 goto out;
3104 } else if (offset + len > inode->i_size) {
3105 /*
3106 * If we are fallocating from the end of the file onward we
3107 * need to zero out the end of the block if i_size lands in the
3108 * middle of a block.
3109 */
3110 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
3111 if (ret)
3112 goto out;
3113 }
3114
3115 /*
3116 * wait for ordered IO before we have any locks. We'll loop again
3117 * below with the locks held.
3118 */
3119 ret = btrfs_wait_ordered_range(inode, alloc_start,
3120 alloc_end - alloc_start);
3121 if (ret)
3122 goto out;
3123
3124 if (mode & FALLOC_FL_ZERO_RANGE) {
3125 ret = btrfs_zero_range(inode, offset, len, mode);
3126 inode_unlock(inode);
3127 return ret;
3128 }
3129
3130 locked_end = alloc_end - 1;
3131 while (1) {
3132 struct btrfs_ordered_extent *ordered;
3133
3134 /* the extent lock is ordered inside the running
3135 * transaction
3136 */
3137 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
3138 locked_end, &cached_state);
3139 ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
3140
3141 if (ordered &&
3142 ordered->file_offset + ordered->len > alloc_start &&
3143 ordered->file_offset < alloc_end) {
3144 btrfs_put_ordered_extent(ordered);
3145 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
3146 alloc_start, locked_end,
3147 &cached_state);
3148 /*
3149 * we can't wait on the range with the transaction
3150 * running or with the extent lock held
3151 */
3152 ret = btrfs_wait_ordered_range(inode, alloc_start,
3153 alloc_end - alloc_start);
3154 if (ret)
3155 goto out;
3156 } else {
3157 if (ordered)
3158 btrfs_put_ordered_extent(ordered);
3159 break;
3160 }
3161 }
3162
3163 /* First, check if we exceed the qgroup limit */
3164 INIT_LIST_HEAD(&reserve_list);
3165 while (cur_offset < alloc_end) {
3166 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3167 alloc_end - cur_offset, 0);
3168 if (IS_ERR(em)) {
3169 ret = PTR_ERR(em);
3170 break;
3171 }
3172 last_byte = min(extent_map_end(em), alloc_end);
3173 actual_end = min_t(u64, extent_map_end(em), offset + len);
3174 last_byte = ALIGN(last_byte, blocksize);
3175 if (em->block_start == EXTENT_MAP_HOLE ||
3176 (cur_offset >= inode->i_size &&
3177 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3178 ret = add_falloc_range(&reserve_list, cur_offset,
3179 last_byte - cur_offset);
3180 if (ret < 0) {
3181 free_extent_map(em);
3182 break;
3183 }
3184 ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
3185 cur_offset, last_byte - cur_offset);
3186 if (ret < 0) {
3187 free_extent_map(em);
3188 break;
3189 }
3190 } else {
3191 /*
3192 * Do not need to reserve unwritten extent for this
3193 * range, free reserved data space first, otherwise
3194 * it'll result in false ENOSPC error.
3195 */
3196 btrfs_free_reserved_data_space(inode, data_reserved,
3197 cur_offset, last_byte - cur_offset);
3198 }
3199 free_extent_map(em);
3200 cur_offset = last_byte;
3201 }
3202
3203 /*
3204 * If ret is still 0, means we're OK to fallocate.
3205 * Or just cleanup the list and exit.
3206 */
3207 list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3208 if (!ret)
3209 ret = btrfs_prealloc_file_range(inode, mode,
3210 range->start,
3211 range->len, i_blocksize(inode),
3212 offset + len, &alloc_hint);
3213 else
3214 btrfs_free_reserved_data_space(inode,
3215 data_reserved, range->start,
3216 range->len);
3217 list_del(&range->list);
3218 kfree(range);
3219 }
3220 if (ret < 0)
3221 goto out_unlock;
3222
3223 /*
3224 * We didn't need to allocate any more space, but we still extended the
3225 * size of the file so we need to update i_size and the inode item.
3226 */
3227 ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3228out_unlock:
3229 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3230 &cached_state);
3231out:
3232 inode_unlock(inode);
3233 /* Let go of our reservation. */
3234 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
3235 btrfs_free_reserved_data_space(inode, data_reserved,
3236 alloc_start, alloc_end - cur_offset);
3237 extent_changeset_free(data_reserved);
3238 return ret;
3239}
3240
3241static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
3242{
3243 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3244 struct extent_map *em = NULL;
3245 struct extent_state *cached_state = NULL;
3246 u64 lockstart;
3247 u64 lockend;
3248 u64 start;
3249 u64 len;
3250 int ret = 0;
3251
3252 if (inode->i_size == 0)
3253 return -ENXIO;
3254
3255 /*
3256 * *offset can be negative, in this case we start finding DATA/HOLE from
3257 * the very start of the file.
3258 */
3259 start = max_t(loff_t, 0, *offset);
3260
3261 lockstart = round_down(start, fs_info->sectorsize);
3262 lockend = round_up(i_size_read(inode),
3263 fs_info->sectorsize);
3264 if (lockend <= lockstart)
3265 lockend = lockstart + fs_info->sectorsize;
3266 lockend--;
3267 len = lockend - lockstart + 1;
3268
3269 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3270 &cached_state);
3271
3272 while (start < inode->i_size) {
3273 em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
3274 start, len, 0);
3275 if (IS_ERR(em)) {
3276 ret = PTR_ERR(em);
3277 em = NULL;
3278 break;
3279 }
3280
3281 if (whence == SEEK_HOLE &&
3282 (em->block_start == EXTENT_MAP_HOLE ||
3283 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3284 break;
3285 else if (whence == SEEK_DATA &&
3286 (em->block_start != EXTENT_MAP_HOLE &&
3287 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
3288 break;
3289
3290 start = em->start + em->len;
3291 free_extent_map(em);
3292 em = NULL;
3293 cond_resched();
3294 }
3295 free_extent_map(em);
3296 if (!ret) {
3297 if (whence == SEEK_DATA && start >= inode->i_size)
3298 ret = -ENXIO;
3299 else
3300 *offset = min_t(loff_t, start, inode->i_size);
3301 }
3302 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3303 &cached_state);
3304 return ret;
3305}
3306
3307static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3308{
3309 struct inode *inode = file->f_mapping->host;
3310 int ret;
3311
3312 inode_lock(inode);
3313 switch (whence) {
3314 case SEEK_END:
3315 case SEEK_CUR:
3316 offset = generic_file_llseek(file, offset, whence);
3317 goto out;
3318 case SEEK_DATA:
3319 case SEEK_HOLE:
3320 if (offset >= i_size_read(inode)) {
3321 inode_unlock(inode);
3322 return -ENXIO;
3323 }
3324
3325 ret = find_desired_extent(inode, &offset, whence);
3326 if (ret) {
3327 inode_unlock(inode);
3328 return ret;
3329 }
3330 }
3331
3332 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3333out:
3334 inode_unlock(inode);
3335 return offset;
3336}
3337
3338static int btrfs_file_open(struct inode *inode, struct file *filp)
3339{
3340 filp->f_mode |= FMODE_NOWAIT;
3341 return generic_file_open(inode, filp);
3342}
3343
3344const struct file_operations btrfs_file_operations = {
3345 .llseek = btrfs_file_llseek,
3346 .read_iter = generic_file_read_iter,
3347 .splice_read = generic_file_splice_read,
3348 .write_iter = btrfs_file_write_iter,
3349 .mmap = btrfs_file_mmap,
3350 .open = btrfs_file_open,
3351 .release = btrfs_release_file,
3352 .fsync = btrfs_sync_file,
3353 .fallocate = btrfs_fallocate,
3354 .unlocked_ioctl = btrfs_ioctl,
3355#ifdef CONFIG_COMPAT
3356 .compat_ioctl = btrfs_compat_ioctl,
3357#endif
3358 .clone_file_range = btrfs_clone_file_range,
3359 .dedupe_file_range = btrfs_dedupe_file_range,
3360};
3361
3362void __cold btrfs_auto_defrag_exit(void)
3363{
3364 kmem_cache_destroy(btrfs_inode_defrag_cachep);
3365}
3366
3367int __init btrfs_auto_defrag_init(void)
3368{
3369 btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
3370 sizeof(struct inode_defrag), 0,
3371 SLAB_MEM_SPREAD,
3372 NULL);
3373 if (!btrfs_inode_defrag_cachep)
3374 return -ENOMEM;
3375
3376 return 0;
3377}
3378
3379int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3380{
3381 int ret;
3382
3383 /*
3384 * So with compression we will find and lock a dirty page and clear the
3385 * first one as dirty, setup an async extent, and immediately return
3386 * with the entire range locked but with nobody actually marked with
3387 * writeback. So we can't just filemap_write_and_wait_range() and
3388 * expect it to work since it will just kick off a thread to do the
3389 * actual work. So we need to call filemap_fdatawrite_range _again_
3390 * since it will wait on the page lock, which won't be unlocked until
3391 * after the pages have been marked as writeback and so we're good to go
3392 * from there. We have to do this otherwise we'll miss the ordered
3393 * extents and that results in badness. Please Josef, do not think you
3394 * know better and pull this out at some point in the future, it is
3395 * right and you are wrong.
3396 */
3397 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3398 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3399 &BTRFS_I(inode)->runtime_flags))
3400 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3401
3402 return ret;
3403}