Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/sched/mm.h>
10#include <linux/spinlock.h>
11#include <linux/blkdev.h>
12#include <linux/swap.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include <linux/prefetch.h>
16#include <linux/fsverity.h>
17#include "misc.h"
18#include "extent_io.h"
19#include "extent-io-tree.h"
20#include "extent_map.h"
21#include "ctree.h"
22#include "btrfs_inode.h"
23#include "bio.h"
24#include "locking.h"
25#include "rcu-string.h"
26#include "backref.h"
27#include "disk-io.h"
28#include "subpage.h"
29#include "zoned.h"
30#include "block-group.h"
31#include "compression.h"
32#include "fs.h"
33#include "accessors.h"
34#include "file-item.h"
35#include "file.h"
36#include "dev-replace.h"
37#include "super.h"
38#include "transaction.h"
39
40static struct kmem_cache *extent_buffer_cache;
41
42#ifdef CONFIG_BTRFS_DEBUG
43static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
44{
45 struct btrfs_fs_info *fs_info = eb->fs_info;
46 unsigned long flags;
47
48 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
49 list_add(&eb->leak_list, &fs_info->allocated_ebs);
50 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
51}
52
53static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
54{
55 struct btrfs_fs_info *fs_info = eb->fs_info;
56 unsigned long flags;
57
58 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
59 list_del(&eb->leak_list);
60 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
61}
62
63void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
64{
65 struct extent_buffer *eb;
66 unsigned long flags;
67
68 /*
69 * If we didn't get into open_ctree our allocated_ebs will not be
70 * initialized, so just skip this.
71 */
72 if (!fs_info->allocated_ebs.next)
73 return;
74
75 WARN_ON(!list_empty(&fs_info->allocated_ebs));
76 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
77 while (!list_empty(&fs_info->allocated_ebs)) {
78 eb = list_first_entry(&fs_info->allocated_ebs,
79 struct extent_buffer, leak_list);
80 pr_err(
81 "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
82 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
83 btrfs_header_owner(eb));
84 list_del(&eb->leak_list);
85 kmem_cache_free(extent_buffer_cache, eb);
86 }
87 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
88}
89#else
90#define btrfs_leak_debug_add_eb(eb) do {} while (0)
91#define btrfs_leak_debug_del_eb(eb) do {} while (0)
92#endif
93
94/*
95 * Structure to record info about the bio being assembled, and other info like
96 * how many bytes are there before stripe/ordered extent boundary.
97 */
98struct btrfs_bio_ctrl {
99 struct btrfs_bio *bbio;
100 enum btrfs_compression_type compress_type;
101 u32 len_to_oe_boundary;
102 blk_opf_t opf;
103 btrfs_bio_end_io_t end_io_func;
104 struct writeback_control *wbc;
105};
106
107static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
108{
109 struct btrfs_bio *bbio = bio_ctrl->bbio;
110
111 if (!bbio)
112 return;
113
114 /* Caller should ensure the bio has at least some range added */
115 ASSERT(bbio->bio.bi_iter.bi_size);
116
117 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
118 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
119 btrfs_submit_compressed_read(bbio);
120 else
121 btrfs_submit_bio(bbio, 0);
122
123 /* The bbio is owned by the end_io handler now */
124 bio_ctrl->bbio = NULL;
125}
126
127/*
128 * Submit or fail the current bio in the bio_ctrl structure.
129 */
130static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
131{
132 struct btrfs_bio *bbio = bio_ctrl->bbio;
133
134 if (!bbio)
135 return;
136
137 if (ret) {
138 ASSERT(ret < 0);
139 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
140 /* The bio is owned by the end_io handler now */
141 bio_ctrl->bbio = NULL;
142 } else {
143 submit_one_bio(bio_ctrl);
144 }
145}
146
147int __init extent_buffer_init_cachep(void)
148{
149 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
150 sizeof(struct extent_buffer), 0,
151 SLAB_MEM_SPREAD, NULL);
152 if (!extent_buffer_cache)
153 return -ENOMEM;
154
155 return 0;
156}
157
158void __cold extent_buffer_free_cachep(void)
159{
160 /*
161 * Make sure all delayed rcu free are flushed before we
162 * destroy caches.
163 */
164 rcu_barrier();
165 kmem_cache_destroy(extent_buffer_cache);
166}
167
168void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
169{
170 unsigned long index = start >> PAGE_SHIFT;
171 unsigned long end_index = end >> PAGE_SHIFT;
172 struct page *page;
173
174 while (index <= end_index) {
175 page = find_get_page(inode->i_mapping, index);
176 BUG_ON(!page); /* Pages should be in the extent_io_tree */
177 clear_page_dirty_for_io(page);
178 put_page(page);
179 index++;
180 }
181}
182
183static void process_one_page(struct btrfs_fs_info *fs_info,
184 struct page *page, struct page *locked_page,
185 unsigned long page_ops, u64 start, u64 end)
186{
187 struct folio *folio = page_folio(page);
188 u32 len;
189
190 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
191 len = end + 1 - start;
192
193 if (page_ops & PAGE_SET_ORDERED)
194 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
195 if (page_ops & PAGE_START_WRITEBACK) {
196 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
197 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
198 }
199 if (page_ops & PAGE_END_WRITEBACK)
200 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
201
202 if (page != locked_page && (page_ops & PAGE_UNLOCK))
203 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
204}
205
206static void __process_pages_contig(struct address_space *mapping,
207 struct page *locked_page, u64 start, u64 end,
208 unsigned long page_ops)
209{
210 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
211 pgoff_t start_index = start >> PAGE_SHIFT;
212 pgoff_t end_index = end >> PAGE_SHIFT;
213 pgoff_t index = start_index;
214 struct folio_batch fbatch;
215 int i;
216
217 folio_batch_init(&fbatch);
218 while (index <= end_index) {
219 int found_folios;
220
221 found_folios = filemap_get_folios_contig(mapping, &index,
222 end_index, &fbatch);
223 for (i = 0; i < found_folios; i++) {
224 struct folio *folio = fbatch.folios[i];
225
226 process_one_page(fs_info, &folio->page, locked_page,
227 page_ops, start, end);
228 }
229 folio_batch_release(&fbatch);
230 cond_resched();
231 }
232}
233
234static noinline void __unlock_for_delalloc(struct inode *inode,
235 struct page *locked_page,
236 u64 start, u64 end)
237{
238 unsigned long index = start >> PAGE_SHIFT;
239 unsigned long end_index = end >> PAGE_SHIFT;
240
241 ASSERT(locked_page);
242 if (index == locked_page->index && end_index == index)
243 return;
244
245 __process_pages_contig(inode->i_mapping, locked_page, start, end,
246 PAGE_UNLOCK);
247}
248
249static noinline int lock_delalloc_pages(struct inode *inode,
250 struct page *locked_page,
251 u64 start,
252 u64 end)
253{
254 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
255 struct address_space *mapping = inode->i_mapping;
256 pgoff_t start_index = start >> PAGE_SHIFT;
257 pgoff_t end_index = end >> PAGE_SHIFT;
258 pgoff_t index = start_index;
259 u64 processed_end = start;
260 struct folio_batch fbatch;
261
262 if (index == locked_page->index && index == end_index)
263 return 0;
264
265 folio_batch_init(&fbatch);
266 while (index <= end_index) {
267 unsigned int found_folios, i;
268
269 found_folios = filemap_get_folios_contig(mapping, &index,
270 end_index, &fbatch);
271 if (found_folios == 0)
272 goto out;
273
274 for (i = 0; i < found_folios; i++) {
275 struct folio *folio = fbatch.folios[i];
276 struct page *page = folio_page(folio, 0);
277 u32 len = end + 1 - start;
278
279 if (page == locked_page)
280 continue;
281
282 if (btrfs_folio_start_writer_lock(fs_info, folio, start,
283 len))
284 goto out;
285
286 if (!PageDirty(page) || page->mapping != mapping) {
287 btrfs_folio_end_writer_lock(fs_info, folio, start,
288 len);
289 goto out;
290 }
291
292 processed_end = page_offset(page) + PAGE_SIZE - 1;
293 }
294 folio_batch_release(&fbatch);
295 cond_resched();
296 }
297
298 return 0;
299out:
300 folio_batch_release(&fbatch);
301 if (processed_end > start)
302 __unlock_for_delalloc(inode, locked_page, start, processed_end);
303 return -EAGAIN;
304}
305
306/*
307 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
308 * more than @max_bytes.
309 *
310 * @start: The original start bytenr to search.
311 * Will store the extent range start bytenr.
312 * @end: The original end bytenr of the search range
313 * Will store the extent range end bytenr.
314 *
315 * Return true if we find a delalloc range which starts inside the original
316 * range, and @start/@end will store the delalloc range start/end.
317 *
318 * Return false if we can't find any delalloc range which starts inside the
319 * original range, and @start/@end will be the non-delalloc range start/end.
320 */
321EXPORT_FOR_TESTS
322noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
323 struct page *locked_page, u64 *start,
324 u64 *end)
325{
326 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
327 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
328 const u64 orig_start = *start;
329 const u64 orig_end = *end;
330 /* The sanity tests may not set a valid fs_info. */
331 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
332 u64 delalloc_start;
333 u64 delalloc_end;
334 bool found;
335 struct extent_state *cached_state = NULL;
336 int ret;
337 int loops = 0;
338
339 /* Caller should pass a valid @end to indicate the search range end */
340 ASSERT(orig_end > orig_start);
341
342 /* The range should at least cover part of the page */
343 ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
344 orig_end <= page_offset(locked_page)));
345again:
346 /* step one, find a bunch of delalloc bytes starting at start */
347 delalloc_start = *start;
348 delalloc_end = 0;
349 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
350 max_bytes, &cached_state);
351 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
352 *start = delalloc_start;
353
354 /* @delalloc_end can be -1, never go beyond @orig_end */
355 *end = min(delalloc_end, orig_end);
356 free_extent_state(cached_state);
357 return false;
358 }
359
360 /*
361 * start comes from the offset of locked_page. We have to lock
362 * pages in order, so we can't process delalloc bytes before
363 * locked_page
364 */
365 if (delalloc_start < *start)
366 delalloc_start = *start;
367
368 /*
369 * make sure to limit the number of pages we try to lock down
370 */
371 if (delalloc_end + 1 - delalloc_start > max_bytes)
372 delalloc_end = delalloc_start + max_bytes - 1;
373
374 /* step two, lock all the pages after the page that has start */
375 ret = lock_delalloc_pages(inode, locked_page,
376 delalloc_start, delalloc_end);
377 ASSERT(!ret || ret == -EAGAIN);
378 if (ret == -EAGAIN) {
379 /* some of the pages are gone, lets avoid looping by
380 * shortening the size of the delalloc range we're searching
381 */
382 free_extent_state(cached_state);
383 cached_state = NULL;
384 if (!loops) {
385 max_bytes = PAGE_SIZE;
386 loops = 1;
387 goto again;
388 } else {
389 found = false;
390 goto out_failed;
391 }
392 }
393
394 /* step three, lock the state bits for the whole range */
395 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
396
397 /* then test to make sure it is all still delalloc */
398 ret = test_range_bit(tree, delalloc_start, delalloc_end,
399 EXTENT_DELALLOC, cached_state);
400 if (!ret) {
401 unlock_extent(tree, delalloc_start, delalloc_end,
402 &cached_state);
403 __unlock_for_delalloc(inode, locked_page,
404 delalloc_start, delalloc_end);
405 cond_resched();
406 goto again;
407 }
408 free_extent_state(cached_state);
409 *start = delalloc_start;
410 *end = delalloc_end;
411out_failed:
412 return found;
413}
414
415void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
416 struct page *locked_page,
417 u32 clear_bits, unsigned long page_ops)
418{
419 clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
420
421 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
422 start, end, page_ops);
423}
424
425static bool btrfs_verify_page(struct page *page, u64 start)
426{
427 if (!fsverity_active(page->mapping->host) ||
428 PageUptodate(page) ||
429 start >= i_size_read(page->mapping->host))
430 return true;
431 return fsverity_verify_page(page);
432}
433
434static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
435{
436 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
437 struct folio *folio = page_folio(page);
438
439 ASSERT(page_offset(page) <= start &&
440 start + len <= page_offset(page) + PAGE_SIZE);
441
442 if (uptodate && btrfs_verify_page(page, start))
443 btrfs_folio_set_uptodate(fs_info, folio, start, len);
444 else
445 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
446
447 if (!btrfs_is_subpage(fs_info, page->mapping))
448 unlock_page(page);
449 else
450 btrfs_subpage_end_reader(fs_info, folio, start, len);
451}
452
453/*
454 * After a write IO is done, we need to:
455 *
456 * - clear the uptodate bits on error
457 * - clear the writeback bits in the extent tree for the range
458 * - filio_end_writeback() if there is no more pending io for the folio
459 *
460 * Scheduling is not allowed, so the extent state tree is expected
461 * to have one and only one object corresponding to this IO.
462 */
463static void end_bbio_data_write(struct btrfs_bio *bbio)
464{
465 struct bio *bio = &bbio->bio;
466 int error = blk_status_to_errno(bio->bi_status);
467 struct folio_iter fi;
468
469 ASSERT(!bio_flagged(bio, BIO_CLONED));
470 bio_for_each_folio_all(fi, bio) {
471 struct folio *folio = fi.folio;
472 struct inode *inode = folio->mapping->host;
473 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
474 const u32 sectorsize = fs_info->sectorsize;
475 u64 start = folio_pos(folio) + fi.offset;
476 u32 len = fi.length;
477
478 /* Only order 0 (single page) folios are allowed for data. */
479 ASSERT(folio_order(folio) == 0);
480
481 /* Our read/write should always be sector aligned. */
482 if (!IS_ALIGNED(fi.offset, sectorsize))
483 btrfs_err(fs_info,
484 "partial page write in btrfs with offset %zu and length %zu",
485 fi.offset, fi.length);
486 else if (!IS_ALIGNED(fi.length, sectorsize))
487 btrfs_info(fs_info,
488 "incomplete page write with offset %zu and length %zu",
489 fi.offset, fi.length);
490
491 btrfs_finish_ordered_extent(bbio->ordered,
492 folio_page(folio, 0), start, len, !error);
493 if (error)
494 mapping_set_error(folio->mapping, error);
495 btrfs_folio_clear_writeback(fs_info, folio, start, len);
496 }
497
498 bio_put(bio);
499}
500
501/*
502 * Record previously processed extent range
503 *
504 * For endio_readpage_release_extent() to handle a full extent range, reducing
505 * the extent io operations.
506 */
507struct processed_extent {
508 struct btrfs_inode *inode;
509 /* Start of the range in @inode */
510 u64 start;
511 /* End of the range in @inode */
512 u64 end;
513 bool uptodate;
514};
515
516/*
517 * Try to release processed extent range
518 *
519 * May not release the extent range right now if the current range is
520 * contiguous to processed extent.
521 *
522 * Will release processed extent when any of @inode, @uptodate, the range is
523 * no longer contiguous to the processed range.
524 *
525 * Passing @inode == NULL will force processed extent to be released.
526 */
527static void endio_readpage_release_extent(struct processed_extent *processed,
528 struct btrfs_inode *inode, u64 start, u64 end,
529 bool uptodate)
530{
531 struct extent_state *cached = NULL;
532 struct extent_io_tree *tree;
533
534 /* The first extent, initialize @processed */
535 if (!processed->inode)
536 goto update;
537
538 /*
539 * Contiguous to processed extent, just uptodate the end.
540 *
541 * Several things to notice:
542 *
543 * - bio can be merged as long as on-disk bytenr is contiguous
544 * This means we can have page belonging to other inodes, thus need to
545 * check if the inode still matches.
546 * - bvec can contain range beyond current page for multi-page bvec
547 * Thus we need to do processed->end + 1 >= start check
548 */
549 if (processed->inode == inode && processed->uptodate == uptodate &&
550 processed->end + 1 >= start && end >= processed->end) {
551 processed->end = end;
552 return;
553 }
554
555 tree = &processed->inode->io_tree;
556 /*
557 * Now we don't have range contiguous to the processed range, release
558 * the processed range now.
559 */
560 unlock_extent(tree, processed->start, processed->end, &cached);
561
562update:
563 /* Update processed to current range */
564 processed->inode = inode;
565 processed->start = start;
566 processed->end = end;
567 processed->uptodate = uptodate;
568}
569
570static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
571{
572 struct folio *folio = page_folio(page);
573
574 ASSERT(folio_test_locked(folio));
575 if (!btrfs_is_subpage(fs_info, folio->mapping))
576 return;
577
578 ASSERT(folio_test_private(folio));
579 btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
580}
581
582/*
583 * After a data read IO is done, we need to:
584 *
585 * - clear the uptodate bits on error
586 * - set the uptodate bits if things worked
587 * - set the folio up to date if all extents in the tree are uptodate
588 * - clear the lock bit in the extent tree
589 * - unlock the folio if there are no other extents locked for it
590 *
591 * Scheduling is not allowed, so the extent state tree is expected
592 * to have one and only one object corresponding to this IO.
593 */
594static void end_bbio_data_read(struct btrfs_bio *bbio)
595{
596 struct bio *bio = &bbio->bio;
597 struct processed_extent processed = { 0 };
598 struct folio_iter fi;
599 /*
600 * The offset to the beginning of a bio, since one bio can never be
601 * larger than UINT_MAX, u32 here is enough.
602 */
603 u32 bio_offset = 0;
604
605 ASSERT(!bio_flagged(bio, BIO_CLONED));
606 bio_for_each_folio_all(fi, &bbio->bio) {
607 bool uptodate = !bio->bi_status;
608 struct folio *folio = fi.folio;
609 struct inode *inode = folio->mapping->host;
610 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
611 const u32 sectorsize = fs_info->sectorsize;
612 u64 start;
613 u64 end;
614 u32 len;
615
616 /* For now only order 0 folios are supported for data. */
617 ASSERT(folio_order(folio) == 0);
618 btrfs_debug(fs_info,
619 "%s: bi_sector=%llu, err=%d, mirror=%u",
620 __func__, bio->bi_iter.bi_sector, bio->bi_status,
621 bbio->mirror_num);
622
623 /*
624 * We always issue full-sector reads, but if some block in a
625 * folio fails to read, blk_update_request() will advance
626 * bv_offset and adjust bv_len to compensate. Print a warning
627 * for unaligned offsets, and an error if they don't add up to
628 * a full sector.
629 */
630 if (!IS_ALIGNED(fi.offset, sectorsize))
631 btrfs_err(fs_info,
632 "partial page read in btrfs with offset %zu and length %zu",
633 fi.offset, fi.length);
634 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
635 btrfs_info(fs_info,
636 "incomplete page read with offset %zu and length %zu",
637 fi.offset, fi.length);
638
639 start = folio_pos(folio) + fi.offset;
640 end = start + fi.length - 1;
641 len = fi.length;
642
643 if (likely(uptodate)) {
644 loff_t i_size = i_size_read(inode);
645 pgoff_t end_index = i_size >> folio_shift(folio);
646
647 /*
648 * Zero out the remaining part if this range straddles
649 * i_size.
650 *
651 * Here we should only zero the range inside the folio,
652 * not touch anything else.
653 *
654 * NOTE: i_size is exclusive while end is inclusive.
655 */
656 if (folio_index(folio) == end_index && i_size <= end) {
657 u32 zero_start = max(offset_in_folio(folio, i_size),
658 offset_in_folio(folio, start));
659 u32 zero_len = offset_in_folio(folio, end) + 1 -
660 zero_start;
661
662 folio_zero_range(folio, zero_start, zero_len);
663 }
664 }
665
666 /* Update page status and unlock. */
667 end_page_read(folio_page(folio, 0), uptodate, start, len);
668 endio_readpage_release_extent(&processed, BTRFS_I(inode),
669 start, end, uptodate);
670
671 ASSERT(bio_offset + len > bio_offset);
672 bio_offset += len;
673
674 }
675 /* Release the last extent */
676 endio_readpage_release_extent(&processed, NULL, 0, 0, false);
677 bio_put(bio);
678}
679
680/*
681 * Populate every free slot in a provided array with pages.
682 *
683 * @nr_pages: number of pages to allocate
684 * @page_array: the array to fill with pages; any existing non-null entries in
685 * the array will be skipped
686 * @extra_gfp: the extra GFP flags for the allocation.
687 *
688 * Return: 0 if all pages were able to be allocated;
689 * -ENOMEM otherwise, the partially allocated pages would be freed and
690 * the array slots zeroed
691 */
692int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
693 gfp_t extra_gfp)
694{
695 unsigned int allocated;
696
697 for (allocated = 0; allocated < nr_pages;) {
698 unsigned int last = allocated;
699
700 allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
701 nr_pages, page_array);
702
703 if (allocated == nr_pages)
704 return 0;
705
706 /*
707 * During this iteration, no page could be allocated, even
708 * though alloc_pages_bulk_array() falls back to alloc_page()
709 * if it could not bulk-allocate. So we must be out of memory.
710 */
711 if (allocated == last) {
712 for (int i = 0; i < allocated; i++) {
713 __free_page(page_array[i]);
714 page_array[i] = NULL;
715 }
716 return -ENOMEM;
717 }
718
719 memalloc_retry_wait(GFP_NOFS);
720 }
721 return 0;
722}
723
724/*
725 * Populate needed folios for the extent buffer.
726 *
727 * For now, the folios populated are always in order 0 (aka, single page).
728 */
729static int alloc_eb_folio_array(struct extent_buffer *eb, gfp_t extra_gfp)
730{
731 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
732 int num_pages = num_extent_pages(eb);
733 int ret;
734
735 ret = btrfs_alloc_page_array(num_pages, page_array, extra_gfp);
736 if (ret < 0)
737 return ret;
738
739 for (int i = 0; i < num_pages; i++)
740 eb->folios[i] = page_folio(page_array[i]);
741 return 0;
742}
743
744static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
745 struct page *page, u64 disk_bytenr,
746 unsigned int pg_offset)
747{
748 struct bio *bio = &bio_ctrl->bbio->bio;
749 struct bio_vec *bvec = bio_last_bvec_all(bio);
750 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
751
752 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
753 /*
754 * For compression, all IO should have its logical bytenr set
755 * to the starting bytenr of the compressed extent.
756 */
757 return bio->bi_iter.bi_sector == sector;
758 }
759
760 /*
761 * The contig check requires the following conditions to be met:
762 *
763 * 1) The pages are belonging to the same inode
764 * This is implied by the call chain.
765 *
766 * 2) The range has adjacent logical bytenr
767 *
768 * 3) The range has adjacent file offset
769 * This is required for the usage of btrfs_bio->file_offset.
770 */
771 return bio_end_sector(bio) == sector &&
772 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
773 page_offset(page) + pg_offset;
774}
775
776static void alloc_new_bio(struct btrfs_inode *inode,
777 struct btrfs_bio_ctrl *bio_ctrl,
778 u64 disk_bytenr, u64 file_offset)
779{
780 struct btrfs_fs_info *fs_info = inode->root->fs_info;
781 struct btrfs_bio *bbio;
782
783 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
784 bio_ctrl->end_io_func, NULL);
785 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
786 bbio->inode = inode;
787 bbio->file_offset = file_offset;
788 bio_ctrl->bbio = bbio;
789 bio_ctrl->len_to_oe_boundary = U32_MAX;
790
791 /* Limit data write bios to the ordered boundary. */
792 if (bio_ctrl->wbc) {
793 struct btrfs_ordered_extent *ordered;
794
795 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
796 if (ordered) {
797 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
798 ordered->file_offset +
799 ordered->disk_num_bytes - file_offset);
800 bbio->ordered = ordered;
801 }
802
803 /*
804 * Pick the last added device to support cgroup writeback. For
805 * multi-device file systems this means blk-cgroup policies have
806 * to always be set on the last added/replaced device.
807 * This is a bit odd but has been like that for a long time.
808 */
809 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
810 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
811 }
812}
813
814/*
815 * @disk_bytenr: logical bytenr where the write will be
816 * @page: page to add to the bio
817 * @size: portion of page that we want to write to
818 * @pg_offset: offset of the new bio or to check whether we are adding
819 * a contiguous page to the previous one
820 *
821 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
822 * new one in @bio_ctrl->bbio.
823 * The mirror number for this IO should already be initizlied in
824 * @bio_ctrl->mirror_num.
825 */
826static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
827 u64 disk_bytenr, struct page *page,
828 size_t size, unsigned long pg_offset)
829{
830 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
831
832 ASSERT(pg_offset + size <= PAGE_SIZE);
833 ASSERT(bio_ctrl->end_io_func);
834
835 if (bio_ctrl->bbio &&
836 !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
837 submit_one_bio(bio_ctrl);
838
839 do {
840 u32 len = size;
841
842 /* Allocate new bio if needed */
843 if (!bio_ctrl->bbio) {
844 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
845 page_offset(page) + pg_offset);
846 }
847
848 /* Cap to the current ordered extent boundary if there is one. */
849 if (len > bio_ctrl->len_to_oe_boundary) {
850 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
851 ASSERT(is_data_inode(&inode->vfs_inode));
852 len = bio_ctrl->len_to_oe_boundary;
853 }
854
855 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
856 /* bio full: move on to a new one */
857 submit_one_bio(bio_ctrl);
858 continue;
859 }
860
861 if (bio_ctrl->wbc)
862 wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
863
864 size -= len;
865 pg_offset += len;
866 disk_bytenr += len;
867
868 /*
869 * len_to_oe_boundary defaults to U32_MAX, which isn't page or
870 * sector aligned. alloc_new_bio() then sets it to the end of
871 * our ordered extent for writes into zoned devices.
872 *
873 * When len_to_oe_boundary is tracking an ordered extent, we
874 * trust the ordered extent code to align things properly, and
875 * the check above to cap our write to the ordered extent
876 * boundary is correct.
877 *
878 * When len_to_oe_boundary is U32_MAX, the cap above would
879 * result in a 4095 byte IO for the last page right before
880 * we hit the bio limit of UINT_MAX. bio_add_page() has all
881 * the checks required to make sure we don't overflow the bio,
882 * and we should just ignore len_to_oe_boundary completely
883 * unless we're using it to track an ordered extent.
884 *
885 * It's pretty hard to make a bio sized U32_MAX, but it can
886 * happen when the page cache is able to feed us contiguous
887 * pages for large extents.
888 */
889 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
890 bio_ctrl->len_to_oe_boundary -= len;
891
892 /* Ordered extent boundary: move on to a new bio. */
893 if (bio_ctrl->len_to_oe_boundary == 0)
894 submit_one_bio(bio_ctrl);
895 } while (size);
896}
897
898static int attach_extent_buffer_folio(struct extent_buffer *eb,
899 struct folio *folio,
900 struct btrfs_subpage *prealloc)
901{
902 struct btrfs_fs_info *fs_info = eb->fs_info;
903 int ret = 0;
904
905 /*
906 * If the page is mapped to btree inode, we should hold the private
907 * lock to prevent race.
908 * For cloned or dummy extent buffers, their pages are not mapped and
909 * will not race with any other ebs.
910 */
911 if (folio->mapping)
912 lockdep_assert_held(&folio->mapping->i_private_lock);
913
914 if (fs_info->nodesize >= PAGE_SIZE) {
915 if (!folio_test_private(folio))
916 folio_attach_private(folio, eb);
917 else
918 WARN_ON(folio_get_private(folio) != eb);
919 return 0;
920 }
921
922 /* Already mapped, just free prealloc */
923 if (folio_test_private(folio)) {
924 btrfs_free_subpage(prealloc);
925 return 0;
926 }
927
928 if (prealloc)
929 /* Has preallocated memory for subpage */
930 folio_attach_private(folio, prealloc);
931 else
932 /* Do new allocation to attach subpage */
933 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
934 return ret;
935}
936
937int set_page_extent_mapped(struct page *page)
938{
939 struct folio *folio = page_folio(page);
940 struct btrfs_fs_info *fs_info;
941
942 ASSERT(page->mapping);
943
944 if (folio_test_private(folio))
945 return 0;
946
947 fs_info = btrfs_sb(page->mapping->host->i_sb);
948
949 if (btrfs_is_subpage(fs_info, page->mapping))
950 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
951
952 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
953 return 0;
954}
955
956void clear_page_extent_mapped(struct page *page)
957{
958 struct folio *folio = page_folio(page);
959 struct btrfs_fs_info *fs_info;
960
961 ASSERT(page->mapping);
962
963 if (!folio_test_private(folio))
964 return;
965
966 fs_info = btrfs_sb(page->mapping->host->i_sb);
967 if (btrfs_is_subpage(fs_info, page->mapping))
968 return btrfs_detach_subpage(fs_info, folio);
969
970 folio_detach_private(folio);
971}
972
973static struct extent_map *
974__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
975 u64 start, u64 len, struct extent_map **em_cached)
976{
977 struct extent_map *em;
978
979 if (em_cached && *em_cached) {
980 em = *em_cached;
981 if (extent_map_in_tree(em) && start >= em->start &&
982 start < extent_map_end(em)) {
983 refcount_inc(&em->refs);
984 return em;
985 }
986
987 free_extent_map(em);
988 *em_cached = NULL;
989 }
990
991 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
992 if (em_cached && !IS_ERR(em)) {
993 BUG_ON(*em_cached);
994 refcount_inc(&em->refs);
995 *em_cached = em;
996 }
997 return em;
998}
999/*
1000 * basic readpage implementation. Locked extent state structs are inserted
1001 * into the tree that are removed when the IO is done (by the end_io
1002 * handlers)
1003 * XXX JDM: This needs looking at to ensure proper page locking
1004 * return 0 on success, otherwise return error
1005 */
1006static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1007 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
1008{
1009 struct inode *inode = page->mapping->host;
1010 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1011 u64 start = page_offset(page);
1012 const u64 end = start + PAGE_SIZE - 1;
1013 u64 cur = start;
1014 u64 extent_offset;
1015 u64 last_byte = i_size_read(inode);
1016 u64 block_start;
1017 struct extent_map *em;
1018 int ret = 0;
1019 size_t pg_offset = 0;
1020 size_t iosize;
1021 size_t blocksize = inode->i_sb->s_blocksize;
1022 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1023
1024 ret = set_page_extent_mapped(page);
1025 if (ret < 0) {
1026 unlock_extent(tree, start, end, NULL);
1027 unlock_page(page);
1028 return ret;
1029 }
1030
1031 if (page->index == last_byte >> PAGE_SHIFT) {
1032 size_t zero_offset = offset_in_page(last_byte);
1033
1034 if (zero_offset) {
1035 iosize = PAGE_SIZE - zero_offset;
1036 memzero_page(page, zero_offset, iosize);
1037 }
1038 }
1039 bio_ctrl->end_io_func = end_bbio_data_read;
1040 begin_page_read(fs_info, page);
1041 while (cur <= end) {
1042 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1043 bool force_bio_submit = false;
1044 u64 disk_bytenr;
1045
1046 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1047 if (cur >= last_byte) {
1048 iosize = PAGE_SIZE - pg_offset;
1049 memzero_page(page, pg_offset, iosize);
1050 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1051 end_page_read(page, true, cur, iosize);
1052 break;
1053 }
1054 em = __get_extent_map(inode, page, pg_offset, cur,
1055 end - cur + 1, em_cached);
1056 if (IS_ERR(em)) {
1057 unlock_extent(tree, cur, end, NULL);
1058 end_page_read(page, false, cur, end + 1 - cur);
1059 return PTR_ERR(em);
1060 }
1061 extent_offset = cur - em->start;
1062 BUG_ON(extent_map_end(em) <= cur);
1063 BUG_ON(end < cur);
1064
1065 compress_type = extent_map_compression(em);
1066
1067 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1068 iosize = ALIGN(iosize, blocksize);
1069 if (compress_type != BTRFS_COMPRESS_NONE)
1070 disk_bytenr = em->block_start;
1071 else
1072 disk_bytenr = em->block_start + extent_offset;
1073 block_start = em->block_start;
1074 if (em->flags & EXTENT_FLAG_PREALLOC)
1075 block_start = EXTENT_MAP_HOLE;
1076
1077 /*
1078 * If we have a file range that points to a compressed extent
1079 * and it's followed by a consecutive file range that points
1080 * to the same compressed extent (possibly with a different
1081 * offset and/or length, so it either points to the whole extent
1082 * or only part of it), we must make sure we do not submit a
1083 * single bio to populate the pages for the 2 ranges because
1084 * this makes the compressed extent read zero out the pages
1085 * belonging to the 2nd range. Imagine the following scenario:
1086 *
1087 * File layout
1088 * [0 - 8K] [8K - 24K]
1089 * | |
1090 * | |
1091 * points to extent X, points to extent X,
1092 * offset 4K, length of 8K offset 0, length 16K
1093 *
1094 * [extent X, compressed length = 4K uncompressed length = 16K]
1095 *
1096 * If the bio to read the compressed extent covers both ranges,
1097 * it will decompress extent X into the pages belonging to the
1098 * first range and then it will stop, zeroing out the remaining
1099 * pages that belong to the other range that points to extent X.
1100 * So here we make sure we submit 2 bios, one for the first
1101 * range and another one for the third range. Both will target
1102 * the same physical extent from disk, but we can't currently
1103 * make the compressed bio endio callback populate the pages
1104 * for both ranges because each compressed bio is tightly
1105 * coupled with a single extent map, and each range can have
1106 * an extent map with a different offset value relative to the
1107 * uncompressed data of our extent and different lengths. This
1108 * is a corner case so we prioritize correctness over
1109 * non-optimal behavior (submitting 2 bios for the same extent).
1110 */
1111 if (compress_type != BTRFS_COMPRESS_NONE &&
1112 prev_em_start && *prev_em_start != (u64)-1 &&
1113 *prev_em_start != em->start)
1114 force_bio_submit = true;
1115
1116 if (prev_em_start)
1117 *prev_em_start = em->start;
1118
1119 free_extent_map(em);
1120 em = NULL;
1121
1122 /* we've found a hole, just zero and go on */
1123 if (block_start == EXTENT_MAP_HOLE) {
1124 memzero_page(page, pg_offset, iosize);
1125
1126 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1127 end_page_read(page, true, cur, iosize);
1128 cur = cur + iosize;
1129 pg_offset += iosize;
1130 continue;
1131 }
1132 /* the get_extent function already copied into the page */
1133 if (block_start == EXTENT_MAP_INLINE) {
1134 unlock_extent(tree, cur, cur + iosize - 1, NULL);
1135 end_page_read(page, true, cur, iosize);
1136 cur = cur + iosize;
1137 pg_offset += iosize;
1138 continue;
1139 }
1140
1141 if (bio_ctrl->compress_type != compress_type) {
1142 submit_one_bio(bio_ctrl);
1143 bio_ctrl->compress_type = compress_type;
1144 }
1145
1146 if (force_bio_submit)
1147 submit_one_bio(bio_ctrl);
1148 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1149 pg_offset);
1150 cur = cur + iosize;
1151 pg_offset += iosize;
1152 }
1153
1154 return 0;
1155}
1156
1157int btrfs_read_folio(struct file *file, struct folio *folio)
1158{
1159 struct page *page = &folio->page;
1160 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
1161 u64 start = page_offset(page);
1162 u64 end = start + PAGE_SIZE - 1;
1163 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1164 int ret;
1165
1166 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1167
1168 ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
1169 /*
1170 * If btrfs_do_readpage() failed we will want to submit the assembled
1171 * bio to do the cleanup.
1172 */
1173 submit_one_bio(&bio_ctrl);
1174 return ret;
1175}
1176
1177static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1178 u64 start, u64 end,
1179 struct extent_map **em_cached,
1180 struct btrfs_bio_ctrl *bio_ctrl,
1181 u64 *prev_em_start)
1182{
1183 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
1184 int index;
1185
1186 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1187
1188 for (index = 0; index < nr_pages; index++) {
1189 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1190 prev_em_start);
1191 put_page(pages[index]);
1192 }
1193}
1194
1195/*
1196 * helper for __extent_writepage, doing all of the delayed allocation setup.
1197 *
1198 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1199 * to write the page (copy into inline extent). In this case the IO has
1200 * been started and the page is already unlocked.
1201 *
1202 * This returns 0 if all went well (page still locked)
1203 * This returns < 0 if there were errors (page still locked)
1204 */
1205static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1206 struct page *page, struct writeback_control *wbc)
1207{
1208 const u64 page_start = page_offset(page);
1209 const u64 page_end = page_start + PAGE_SIZE - 1;
1210 u64 delalloc_start = page_start;
1211 u64 delalloc_end = page_end;
1212 u64 delalloc_to_write = 0;
1213 int ret = 0;
1214
1215 while (delalloc_start < page_end) {
1216 delalloc_end = page_end;
1217 if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1218 &delalloc_start, &delalloc_end)) {
1219 delalloc_start = delalloc_end + 1;
1220 continue;
1221 }
1222
1223 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
1224 delalloc_end, wbc);
1225 if (ret < 0)
1226 return ret;
1227
1228 delalloc_start = delalloc_end + 1;
1229 }
1230
1231 /*
1232 * delalloc_end is already one less than the total length, so
1233 * we don't subtract one from PAGE_SIZE
1234 */
1235 delalloc_to_write +=
1236 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1237
1238 /*
1239 * If btrfs_run_dealloc_range() already started I/O and unlocked
1240 * the pages, we just need to account for them here.
1241 */
1242 if (ret == 1) {
1243 wbc->nr_to_write -= delalloc_to_write;
1244 return 1;
1245 }
1246
1247 if (wbc->nr_to_write < delalloc_to_write) {
1248 int thresh = 8192;
1249
1250 if (delalloc_to_write < thresh * 2)
1251 thresh = delalloc_to_write;
1252 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1253 thresh);
1254 }
1255
1256 return 0;
1257}
1258
1259/*
1260 * Find the first byte we need to write.
1261 *
1262 * For subpage, one page can contain several sectors, and
1263 * __extent_writepage_io() will just grab all extent maps in the page
1264 * range and try to submit all non-inline/non-compressed extents.
1265 *
1266 * This is a big problem for subpage, we shouldn't re-submit already written
1267 * data at all.
1268 * This function will lookup subpage dirty bit to find which range we really
1269 * need to submit.
1270 *
1271 * Return the next dirty range in [@start, @end).
1272 * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1273 */
1274static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
1275 struct page *page, u64 *start, u64 *end)
1276{
1277 struct folio *folio = page_folio(page);
1278 struct btrfs_subpage *subpage = folio_get_private(folio);
1279 struct btrfs_subpage_info *spi = fs_info->subpage_info;
1280 u64 orig_start = *start;
1281 /* Declare as unsigned long so we can use bitmap ops */
1282 unsigned long flags;
1283 int range_start_bit;
1284 int range_end_bit;
1285
1286 /*
1287 * For regular sector size == page size case, since one page only
1288 * contains one sector, we return the page offset directly.
1289 */
1290 if (!btrfs_is_subpage(fs_info, page->mapping)) {
1291 *start = page_offset(page);
1292 *end = page_offset(page) + PAGE_SIZE;
1293 return;
1294 }
1295
1296 range_start_bit = spi->dirty_offset +
1297 (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1298
1299 /* We should have the page locked, but just in case */
1300 spin_lock_irqsave(&subpage->lock, flags);
1301 bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1302 spi->dirty_offset + spi->bitmap_nr_bits);
1303 spin_unlock_irqrestore(&subpage->lock, flags);
1304
1305 range_start_bit -= spi->dirty_offset;
1306 range_end_bit -= spi->dirty_offset;
1307
1308 *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1309 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1310}
1311
1312/*
1313 * helper for __extent_writepage. This calls the writepage start hooks,
1314 * and does the loop to map the page into extents and bios.
1315 *
1316 * We return 1 if the IO is started and the page is unlocked,
1317 * 0 if all went well (page still locked)
1318 * < 0 if there were errors (page still locked)
1319 */
1320static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1321 struct page *page,
1322 struct btrfs_bio_ctrl *bio_ctrl,
1323 loff_t i_size,
1324 int *nr_ret)
1325{
1326 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1327 u64 cur = page_offset(page);
1328 u64 end = cur + PAGE_SIZE - 1;
1329 u64 extent_offset;
1330 u64 block_start;
1331 struct extent_map *em;
1332 int ret = 0;
1333 int nr = 0;
1334
1335 ret = btrfs_writepage_cow_fixup(page);
1336 if (ret) {
1337 /* Fixup worker will requeue */
1338 redirty_page_for_writepage(bio_ctrl->wbc, page);
1339 unlock_page(page);
1340 return 1;
1341 }
1342
1343 bio_ctrl->end_io_func = end_bbio_data_write;
1344 while (cur <= end) {
1345 u32 len = end - cur + 1;
1346 u64 disk_bytenr;
1347 u64 em_end;
1348 u64 dirty_range_start = cur;
1349 u64 dirty_range_end;
1350 u32 iosize;
1351
1352 if (cur >= i_size) {
1353 btrfs_mark_ordered_io_finished(inode, page, cur, len,
1354 true);
1355 /*
1356 * This range is beyond i_size, thus we don't need to
1357 * bother writing back.
1358 * But we still need to clear the dirty subpage bit, or
1359 * the next time the page gets dirtied, we will try to
1360 * writeback the sectors with subpage dirty bits,
1361 * causing writeback without ordered extent.
1362 */
1363 btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1364 break;
1365 }
1366
1367 find_next_dirty_byte(fs_info, page, &dirty_range_start,
1368 &dirty_range_end);
1369 if (cur < dirty_range_start) {
1370 cur = dirty_range_start;
1371 continue;
1372 }
1373
1374 em = btrfs_get_extent(inode, NULL, 0, cur, len);
1375 if (IS_ERR(em)) {
1376 ret = PTR_ERR_OR_ZERO(em);
1377 goto out_error;
1378 }
1379
1380 extent_offset = cur - em->start;
1381 em_end = extent_map_end(em);
1382 ASSERT(cur <= em_end);
1383 ASSERT(cur < end);
1384 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1385 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1386
1387 block_start = em->block_start;
1388 disk_bytenr = em->block_start + extent_offset;
1389
1390 ASSERT(!extent_map_is_compressed(em));
1391 ASSERT(block_start != EXTENT_MAP_HOLE);
1392 ASSERT(block_start != EXTENT_MAP_INLINE);
1393
1394 /*
1395 * Note that em_end from extent_map_end() and dirty_range_end from
1396 * find_next_dirty_byte() are all exclusive
1397 */
1398 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1399 free_extent_map(em);
1400 em = NULL;
1401
1402 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1403 if (!PageWriteback(page)) {
1404 btrfs_err(inode->root->fs_info,
1405 "page %lu not writeback, cur %llu end %llu",
1406 page->index, cur, end);
1407 }
1408
1409 /*
1410 * Although the PageDirty bit is cleared before entering this
1411 * function, subpage dirty bit is not cleared.
1412 * So clear subpage dirty bit here so next time we won't submit
1413 * page for range already written to disk.
1414 */
1415 btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1416
1417 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1418 cur - page_offset(page));
1419 cur += iosize;
1420 nr++;
1421 }
1422
1423 btrfs_folio_assert_not_dirty(fs_info, page_folio(page));
1424 *nr_ret = nr;
1425 return 0;
1426
1427out_error:
1428 /*
1429 * If we finish without problem, we should not only clear page dirty,
1430 * but also empty subpage dirty bits
1431 */
1432 *nr_ret = nr;
1433 return ret;
1434}
1435
1436/*
1437 * the writepage semantics are similar to regular writepage. extent
1438 * records are inserted to lock ranges in the tree, and as dirty areas
1439 * are found, they are marked writeback. Then the lock bits are removed
1440 * and the end_io handler clears the writeback ranges
1441 *
1442 * Return 0 if everything goes well.
1443 * Return <0 for error.
1444 */
1445static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1446{
1447 struct folio *folio = page_folio(page);
1448 struct inode *inode = page->mapping->host;
1449 const u64 page_start = page_offset(page);
1450 int ret;
1451 int nr = 0;
1452 size_t pg_offset;
1453 loff_t i_size = i_size_read(inode);
1454 unsigned long end_index = i_size >> PAGE_SHIFT;
1455
1456 trace___extent_writepage(page, inode, bio_ctrl->wbc);
1457
1458 WARN_ON(!PageLocked(page));
1459
1460 pg_offset = offset_in_page(i_size);
1461 if (page->index > end_index ||
1462 (page->index == end_index && !pg_offset)) {
1463 folio_invalidate(folio, 0, folio_size(folio));
1464 folio_unlock(folio);
1465 return 0;
1466 }
1467
1468 if (page->index == end_index)
1469 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1470
1471 ret = set_page_extent_mapped(page);
1472 if (ret < 0)
1473 goto done;
1474
1475 ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1476 if (ret == 1)
1477 return 0;
1478 if (ret)
1479 goto done;
1480
1481 ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
1482 if (ret == 1)
1483 return 0;
1484
1485 bio_ctrl->wbc->nr_to_write--;
1486
1487done:
1488 if (nr == 0) {
1489 /* make sure the mapping tag for page dirty gets cleared */
1490 set_page_writeback(page);
1491 end_page_writeback(page);
1492 }
1493 if (ret) {
1494 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1495 PAGE_SIZE, !ret);
1496 mapping_set_error(page->mapping, ret);
1497 }
1498 unlock_page(page);
1499 ASSERT(ret <= 0);
1500 return ret;
1501}
1502
1503void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1504{
1505 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1506 TASK_UNINTERRUPTIBLE);
1507}
1508
1509/*
1510 * Lock extent buffer status and pages for writeback.
1511 *
1512 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1513 * extent buffer is not dirty)
1514 * Return %true is the extent buffer is submitted to bio.
1515 */
1516static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1517 struct writeback_control *wbc)
1518{
1519 struct btrfs_fs_info *fs_info = eb->fs_info;
1520 bool ret = false;
1521
1522 btrfs_tree_lock(eb);
1523 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1524 btrfs_tree_unlock(eb);
1525 if (wbc->sync_mode != WB_SYNC_ALL)
1526 return false;
1527 wait_on_extent_buffer_writeback(eb);
1528 btrfs_tree_lock(eb);
1529 }
1530
1531 /*
1532 * We need to do this to prevent races in people who check if the eb is
1533 * under IO since we can end up having no IO bits set for a short period
1534 * of time.
1535 */
1536 spin_lock(&eb->refs_lock);
1537 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1538 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1539 spin_unlock(&eb->refs_lock);
1540 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1541 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1542 -eb->len,
1543 fs_info->dirty_metadata_batch);
1544 ret = true;
1545 } else {
1546 spin_unlock(&eb->refs_lock);
1547 }
1548 btrfs_tree_unlock(eb);
1549 return ret;
1550}
1551
1552static void set_btree_ioerr(struct extent_buffer *eb)
1553{
1554 struct btrfs_fs_info *fs_info = eb->fs_info;
1555
1556 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1557
1558 /*
1559 * A read may stumble upon this buffer later, make sure that it gets an
1560 * error and knows there was an error.
1561 */
1562 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1563
1564 /*
1565 * We need to set the mapping with the io error as well because a write
1566 * error will flip the file system readonly, and then syncfs() will
1567 * return a 0 because we are readonly if we don't modify the err seq for
1568 * the superblock.
1569 */
1570 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1571
1572 /*
1573 * If writeback for a btree extent that doesn't belong to a log tree
1574 * failed, increment the counter transaction->eb_write_errors.
1575 * We do this because while the transaction is running and before it's
1576 * committing (when we call filemap_fdata[write|wait]_range against
1577 * the btree inode), we might have
1578 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1579 * returns an error or an error happens during writeback, when we're
1580 * committing the transaction we wouldn't know about it, since the pages
1581 * can be no longer dirty nor marked anymore for writeback (if a
1582 * subsequent modification to the extent buffer didn't happen before the
1583 * transaction commit), which makes filemap_fdata[write|wait]_range not
1584 * able to find the pages tagged with SetPageError at transaction
1585 * commit time. So if this happens we must abort the transaction,
1586 * otherwise we commit a super block with btree roots that point to
1587 * btree nodes/leafs whose content on disk is invalid - either garbage
1588 * or the content of some node/leaf from a past generation that got
1589 * cowed or deleted and is no longer valid.
1590 *
1591 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1592 * not be enough - we need to distinguish between log tree extents vs
1593 * non-log tree extents, and the next filemap_fdatawait_range() call
1594 * will catch and clear such errors in the mapping - and that call might
1595 * be from a log sync and not from a transaction commit. Also, checking
1596 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1597 * not done and would not be reliable - the eb might have been released
1598 * from memory and reading it back again means that flag would not be
1599 * set (since it's a runtime flag, not persisted on disk).
1600 *
1601 * Using the flags below in the btree inode also makes us achieve the
1602 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1603 * writeback for all dirty pages and before filemap_fdatawait_range()
1604 * is called, the writeback for all dirty pages had already finished
1605 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1606 * filemap_fdatawait_range() would return success, as it could not know
1607 * that writeback errors happened (the pages were no longer tagged for
1608 * writeback).
1609 */
1610 switch (eb->log_index) {
1611 case -1:
1612 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1613 break;
1614 case 0:
1615 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1616 break;
1617 case 1:
1618 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1619 break;
1620 default:
1621 BUG(); /* unexpected, logic error */
1622 }
1623}
1624
1625/*
1626 * The endio specific version which won't touch any unsafe spinlock in endio
1627 * context.
1628 */
1629static struct extent_buffer *find_extent_buffer_nolock(
1630 struct btrfs_fs_info *fs_info, u64 start)
1631{
1632 struct extent_buffer *eb;
1633
1634 rcu_read_lock();
1635 eb = radix_tree_lookup(&fs_info->buffer_radix,
1636 start >> fs_info->sectorsize_bits);
1637 if (eb && atomic_inc_not_zero(&eb->refs)) {
1638 rcu_read_unlock();
1639 return eb;
1640 }
1641 rcu_read_unlock();
1642 return NULL;
1643}
1644
1645static void end_bbio_meta_write(struct btrfs_bio *bbio)
1646{
1647 struct extent_buffer *eb = bbio->private;
1648 struct btrfs_fs_info *fs_info = eb->fs_info;
1649 bool uptodate = !bbio->bio.bi_status;
1650 struct folio_iter fi;
1651 u32 bio_offset = 0;
1652
1653 if (!uptodate)
1654 set_btree_ioerr(eb);
1655
1656 bio_for_each_folio_all(fi, &bbio->bio) {
1657 u64 start = eb->start + bio_offset;
1658 struct folio *folio = fi.folio;
1659 u32 len = fi.length;
1660
1661 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1662 bio_offset += len;
1663 }
1664
1665 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1666 smp_mb__after_atomic();
1667 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1668
1669 bio_put(&bbio->bio);
1670}
1671
1672static void prepare_eb_write(struct extent_buffer *eb)
1673{
1674 u32 nritems;
1675 unsigned long start;
1676 unsigned long end;
1677
1678 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1679
1680 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1681 nritems = btrfs_header_nritems(eb);
1682 if (btrfs_header_level(eb) > 0) {
1683 end = btrfs_node_key_ptr_offset(eb, nritems);
1684 memzero_extent_buffer(eb, end, eb->len - end);
1685 } else {
1686 /*
1687 * Leaf:
1688 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1689 */
1690 start = btrfs_item_nr_offset(eb, nritems);
1691 end = btrfs_item_nr_offset(eb, 0);
1692 if (nritems == 0)
1693 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1694 else
1695 end += btrfs_item_offset(eb, nritems - 1);
1696 memzero_extent_buffer(eb, start, end - start);
1697 }
1698}
1699
1700static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1701 struct writeback_control *wbc)
1702{
1703 struct btrfs_fs_info *fs_info = eb->fs_info;
1704 struct btrfs_bio *bbio;
1705
1706 prepare_eb_write(eb);
1707
1708 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1709 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1710 eb->fs_info, end_bbio_meta_write, eb);
1711 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1712 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1713 wbc_init_bio(wbc, &bbio->bio);
1714 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1715 bbio->file_offset = eb->start;
1716 if (fs_info->nodesize < PAGE_SIZE) {
1717 struct folio *folio = eb->folios[0];
1718 bool ret;
1719
1720 folio_lock(folio);
1721 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1722 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1723 eb->len)) {
1724 folio_clear_dirty_for_io(folio);
1725 wbc->nr_to_write--;
1726 }
1727 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1728 eb->start - folio_pos(folio));
1729 ASSERT(ret);
1730 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1731 folio_unlock(folio);
1732 } else {
1733 int num_folios = num_extent_folios(eb);
1734
1735 for (int i = 0; i < num_folios; i++) {
1736 struct folio *folio = eb->folios[i];
1737 bool ret;
1738
1739 folio_lock(folio);
1740 folio_clear_dirty_for_io(folio);
1741 folio_start_writeback(folio);
1742 ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
1743 ASSERT(ret);
1744 wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1745 folio_size(folio));
1746 wbc->nr_to_write -= folio_nr_pages(folio);
1747 folio_unlock(folio);
1748 }
1749 }
1750 btrfs_submit_bio(bbio, 0);
1751}
1752
1753/*
1754 * Submit one subpage btree page.
1755 *
1756 * The main difference to submit_eb_page() is:
1757 * - Page locking
1758 * For subpage, we don't rely on page locking at all.
1759 *
1760 * - Flush write bio
1761 * We only flush bio if we may be unable to fit current extent buffers into
1762 * current bio.
1763 *
1764 * Return >=0 for the number of submitted extent buffers.
1765 * Return <0 for fatal error.
1766 */
1767static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1768{
1769 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
1770 struct folio *folio = page_folio(page);
1771 int submitted = 0;
1772 u64 page_start = page_offset(page);
1773 int bit_start = 0;
1774 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1775
1776 /* Lock and write each dirty extent buffers in the range */
1777 while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1778 struct btrfs_subpage *subpage = folio_get_private(folio);
1779 struct extent_buffer *eb;
1780 unsigned long flags;
1781 u64 start;
1782
1783 /*
1784 * Take private lock to ensure the subpage won't be detached
1785 * in the meantime.
1786 */
1787 spin_lock(&page->mapping->i_private_lock);
1788 if (!folio_test_private(folio)) {
1789 spin_unlock(&page->mapping->i_private_lock);
1790 break;
1791 }
1792 spin_lock_irqsave(&subpage->lock, flags);
1793 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1794 subpage->bitmaps)) {
1795 spin_unlock_irqrestore(&subpage->lock, flags);
1796 spin_unlock(&page->mapping->i_private_lock);
1797 bit_start++;
1798 continue;
1799 }
1800
1801 start = page_start + bit_start * fs_info->sectorsize;
1802 bit_start += sectors_per_node;
1803
1804 /*
1805 * Here we just want to grab the eb without touching extra
1806 * spin locks, so call find_extent_buffer_nolock().
1807 */
1808 eb = find_extent_buffer_nolock(fs_info, start);
1809 spin_unlock_irqrestore(&subpage->lock, flags);
1810 spin_unlock(&page->mapping->i_private_lock);
1811
1812 /*
1813 * The eb has already reached 0 refs thus find_extent_buffer()
1814 * doesn't return it. We don't need to write back such eb
1815 * anyway.
1816 */
1817 if (!eb)
1818 continue;
1819
1820 if (lock_extent_buffer_for_io(eb, wbc)) {
1821 write_one_eb(eb, wbc);
1822 submitted++;
1823 }
1824 free_extent_buffer(eb);
1825 }
1826 return submitted;
1827}
1828
1829/*
1830 * Submit all page(s) of one extent buffer.
1831 *
1832 * @page: the page of one extent buffer
1833 * @eb_context: to determine if we need to submit this page, if current page
1834 * belongs to this eb, we don't need to submit
1835 *
1836 * The caller should pass each page in their bytenr order, and here we use
1837 * @eb_context to determine if we have submitted pages of one extent buffer.
1838 *
1839 * If we have, we just skip until we hit a new page that doesn't belong to
1840 * current @eb_context.
1841 *
1842 * If not, we submit all the page(s) of the extent buffer.
1843 *
1844 * Return >0 if we have submitted the extent buffer successfully.
1845 * Return 0 if we don't need to submit the page, as it's already submitted by
1846 * previous call.
1847 * Return <0 for fatal error.
1848 */
1849static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1850{
1851 struct writeback_control *wbc = ctx->wbc;
1852 struct address_space *mapping = page->mapping;
1853 struct folio *folio = page_folio(page);
1854 struct extent_buffer *eb;
1855 int ret;
1856
1857 if (!folio_test_private(folio))
1858 return 0;
1859
1860 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
1861 return submit_eb_subpage(page, wbc);
1862
1863 spin_lock(&mapping->i_private_lock);
1864 if (!folio_test_private(folio)) {
1865 spin_unlock(&mapping->i_private_lock);
1866 return 0;
1867 }
1868
1869 eb = folio_get_private(folio);
1870
1871 /*
1872 * Shouldn't happen and normally this would be a BUG_ON but no point
1873 * crashing the machine for something we can survive anyway.
1874 */
1875 if (WARN_ON(!eb)) {
1876 spin_unlock(&mapping->i_private_lock);
1877 return 0;
1878 }
1879
1880 if (eb == ctx->eb) {
1881 spin_unlock(&mapping->i_private_lock);
1882 return 0;
1883 }
1884 ret = atomic_inc_not_zero(&eb->refs);
1885 spin_unlock(&mapping->i_private_lock);
1886 if (!ret)
1887 return 0;
1888
1889 ctx->eb = eb;
1890
1891 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1892 if (ret) {
1893 if (ret == -EBUSY)
1894 ret = 0;
1895 free_extent_buffer(eb);
1896 return ret;
1897 }
1898
1899 if (!lock_extent_buffer_for_io(eb, wbc)) {
1900 free_extent_buffer(eb);
1901 return 0;
1902 }
1903 /* Implies write in zoned mode. */
1904 if (ctx->zoned_bg) {
1905 /* Mark the last eb in the block group. */
1906 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1907 ctx->zoned_bg->meta_write_pointer += eb->len;
1908 }
1909 write_one_eb(eb, wbc);
1910 free_extent_buffer(eb);
1911 return 1;
1912}
1913
1914int btree_write_cache_pages(struct address_space *mapping,
1915 struct writeback_control *wbc)
1916{
1917 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1918 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
1919 int ret = 0;
1920 int done = 0;
1921 int nr_to_write_done = 0;
1922 struct folio_batch fbatch;
1923 unsigned int nr_folios;
1924 pgoff_t index;
1925 pgoff_t end; /* Inclusive */
1926 int scanned = 0;
1927 xa_mark_t tag;
1928
1929 folio_batch_init(&fbatch);
1930 if (wbc->range_cyclic) {
1931 index = mapping->writeback_index; /* Start from prev offset */
1932 end = -1;
1933 /*
1934 * Start from the beginning does not need to cycle over the
1935 * range, mark it as scanned.
1936 */
1937 scanned = (index == 0);
1938 } else {
1939 index = wbc->range_start >> PAGE_SHIFT;
1940 end = wbc->range_end >> PAGE_SHIFT;
1941 scanned = 1;
1942 }
1943 if (wbc->sync_mode == WB_SYNC_ALL)
1944 tag = PAGECACHE_TAG_TOWRITE;
1945 else
1946 tag = PAGECACHE_TAG_DIRTY;
1947 btrfs_zoned_meta_io_lock(fs_info);
1948retry:
1949 if (wbc->sync_mode == WB_SYNC_ALL)
1950 tag_pages_for_writeback(mapping, index, end);
1951 while (!done && !nr_to_write_done && (index <= end) &&
1952 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
1953 tag, &fbatch))) {
1954 unsigned i;
1955
1956 for (i = 0; i < nr_folios; i++) {
1957 struct folio *folio = fbatch.folios[i];
1958
1959 ret = submit_eb_page(&folio->page, &ctx);
1960 if (ret == 0)
1961 continue;
1962 if (ret < 0) {
1963 done = 1;
1964 break;
1965 }
1966
1967 /*
1968 * the filesystem may choose to bump up nr_to_write.
1969 * We have to make sure to honor the new nr_to_write
1970 * at any time
1971 */
1972 nr_to_write_done = wbc->nr_to_write <= 0;
1973 }
1974 folio_batch_release(&fbatch);
1975 cond_resched();
1976 }
1977 if (!scanned && !done) {
1978 /*
1979 * We hit the last page and there is more work to be done: wrap
1980 * back to the start of the file
1981 */
1982 scanned = 1;
1983 index = 0;
1984 goto retry;
1985 }
1986 /*
1987 * If something went wrong, don't allow any metadata write bio to be
1988 * submitted.
1989 *
1990 * This would prevent use-after-free if we had dirty pages not
1991 * cleaned up, which can still happen by fuzzed images.
1992 *
1993 * - Bad extent tree
1994 * Allowing existing tree block to be allocated for other trees.
1995 *
1996 * - Log tree operations
1997 * Exiting tree blocks get allocated to log tree, bumps its
1998 * generation, then get cleaned in tree re-balance.
1999 * Such tree block will not be written back, since it's clean,
2000 * thus no WRITTEN flag set.
2001 * And after log writes back, this tree block is not traced by
2002 * any dirty extent_io_tree.
2003 *
2004 * - Offending tree block gets re-dirtied from its original owner
2005 * Since it has bumped generation, no WRITTEN flag, it can be
2006 * reused without COWing. This tree block will not be traced
2007 * by btrfs_transaction::dirty_pages.
2008 *
2009 * Now such dirty tree block will not be cleaned by any dirty
2010 * extent io tree. Thus we don't want to submit such wild eb
2011 * if the fs already has error.
2012 *
2013 * We can get ret > 0 from submit_extent_page() indicating how many ebs
2014 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2015 */
2016 if (ret > 0)
2017 ret = 0;
2018 if (!ret && BTRFS_FS_ERROR(fs_info))
2019 ret = -EROFS;
2020
2021 if (ctx.zoned_bg)
2022 btrfs_put_block_group(ctx.zoned_bg);
2023 btrfs_zoned_meta_io_unlock(fs_info);
2024 return ret;
2025}
2026
2027/*
2028 * Walk the list of dirty pages of the given address space and write all of them.
2029 *
2030 * @mapping: address space structure to write
2031 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2032 * @bio_ctrl: holds context for the write, namely the bio
2033 *
2034 * If a page is already under I/O, write_cache_pages() skips it, even
2035 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2036 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2037 * and msync() need to guarantee that all the data which was dirty at the time
2038 * the call was made get new I/O started against them. If wbc->sync_mode is
2039 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2040 * existing IO to complete.
2041 */
2042static int extent_write_cache_pages(struct address_space *mapping,
2043 struct btrfs_bio_ctrl *bio_ctrl)
2044{
2045 struct writeback_control *wbc = bio_ctrl->wbc;
2046 struct inode *inode = mapping->host;
2047 int ret = 0;
2048 int done = 0;
2049 int nr_to_write_done = 0;
2050 struct folio_batch fbatch;
2051 unsigned int nr_folios;
2052 pgoff_t index;
2053 pgoff_t end; /* Inclusive */
2054 pgoff_t done_index;
2055 int range_whole = 0;
2056 int scanned = 0;
2057 xa_mark_t tag;
2058
2059 /*
2060 * We have to hold onto the inode so that ordered extents can do their
2061 * work when the IO finishes. The alternative to this is failing to add
2062 * an ordered extent if the igrab() fails there and that is a huge pain
2063 * to deal with, so instead just hold onto the inode throughout the
2064 * writepages operation. If it fails here we are freeing up the inode
2065 * anyway and we'd rather not waste our time writing out stuff that is
2066 * going to be truncated anyway.
2067 */
2068 if (!igrab(inode))
2069 return 0;
2070
2071 folio_batch_init(&fbatch);
2072 if (wbc->range_cyclic) {
2073 index = mapping->writeback_index; /* Start from prev offset */
2074 end = -1;
2075 /*
2076 * Start from the beginning does not need to cycle over the
2077 * range, mark it as scanned.
2078 */
2079 scanned = (index == 0);
2080 } else {
2081 index = wbc->range_start >> PAGE_SHIFT;
2082 end = wbc->range_end >> PAGE_SHIFT;
2083 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2084 range_whole = 1;
2085 scanned = 1;
2086 }
2087
2088 /*
2089 * We do the tagged writepage as long as the snapshot flush bit is set
2090 * and we are the first one who do the filemap_flush() on this inode.
2091 *
2092 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2093 * not race in and drop the bit.
2094 */
2095 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2096 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2097 &BTRFS_I(inode)->runtime_flags))
2098 wbc->tagged_writepages = 1;
2099
2100 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2101 tag = PAGECACHE_TAG_TOWRITE;
2102 else
2103 tag = PAGECACHE_TAG_DIRTY;
2104retry:
2105 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2106 tag_pages_for_writeback(mapping, index, end);
2107 done_index = index;
2108 while (!done && !nr_to_write_done && (index <= end) &&
2109 (nr_folios = filemap_get_folios_tag(mapping, &index,
2110 end, tag, &fbatch))) {
2111 unsigned i;
2112
2113 for (i = 0; i < nr_folios; i++) {
2114 struct folio *folio = fbatch.folios[i];
2115
2116 done_index = folio_next_index(folio);
2117 /*
2118 * At this point we hold neither the i_pages lock nor
2119 * the page lock: the page may be truncated or
2120 * invalidated (changing page->mapping to NULL),
2121 * or even swizzled back from swapper_space to
2122 * tmpfs file mapping
2123 */
2124 if (!folio_trylock(folio)) {
2125 submit_write_bio(bio_ctrl, 0);
2126 folio_lock(folio);
2127 }
2128
2129 if (unlikely(folio->mapping != mapping)) {
2130 folio_unlock(folio);
2131 continue;
2132 }
2133
2134 if (!folio_test_dirty(folio)) {
2135 /* Someone wrote it for us. */
2136 folio_unlock(folio);
2137 continue;
2138 }
2139
2140 if (wbc->sync_mode != WB_SYNC_NONE) {
2141 if (folio_test_writeback(folio))
2142 submit_write_bio(bio_ctrl, 0);
2143 folio_wait_writeback(folio);
2144 }
2145
2146 if (folio_test_writeback(folio) ||
2147 !folio_clear_dirty_for_io(folio)) {
2148 folio_unlock(folio);
2149 continue;
2150 }
2151
2152 ret = __extent_writepage(&folio->page, bio_ctrl);
2153 if (ret < 0) {
2154 done = 1;
2155 break;
2156 }
2157
2158 /*
2159 * The filesystem may choose to bump up nr_to_write.
2160 * We have to make sure to honor the new nr_to_write
2161 * at any time.
2162 */
2163 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2164 wbc->nr_to_write <= 0);
2165 }
2166 folio_batch_release(&fbatch);
2167 cond_resched();
2168 }
2169 if (!scanned && !done) {
2170 /*
2171 * We hit the last page and there is more work to be done: wrap
2172 * back to the start of the file
2173 */
2174 scanned = 1;
2175 index = 0;
2176
2177 /*
2178 * If we're looping we could run into a page that is locked by a
2179 * writer and that writer could be waiting on writeback for a
2180 * page in our current bio, and thus deadlock, so flush the
2181 * write bio here.
2182 */
2183 submit_write_bio(bio_ctrl, 0);
2184 goto retry;
2185 }
2186
2187 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2188 mapping->writeback_index = done_index;
2189
2190 btrfs_add_delayed_iput(BTRFS_I(inode));
2191 return ret;
2192}
2193
2194/*
2195 * Submit the pages in the range to bio for call sites which delalloc range has
2196 * already been ran (aka, ordered extent inserted) and all pages are still
2197 * locked.
2198 */
2199void extent_write_locked_range(struct inode *inode, struct page *locked_page,
2200 u64 start, u64 end, struct writeback_control *wbc,
2201 bool pages_dirty)
2202{
2203 bool found_error = false;
2204 int ret = 0;
2205 struct address_space *mapping = inode->i_mapping;
2206 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2207 const u32 sectorsize = fs_info->sectorsize;
2208 loff_t i_size = i_size_read(inode);
2209 u64 cur = start;
2210 struct btrfs_bio_ctrl bio_ctrl = {
2211 .wbc = wbc,
2212 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2213 };
2214
2215 if (wbc->no_cgroup_owner)
2216 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2217
2218 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2219
2220 while (cur <= end) {
2221 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2222 u32 cur_len = cur_end + 1 - cur;
2223 struct page *page;
2224 int nr = 0;
2225
2226 page = find_get_page(mapping, cur >> PAGE_SHIFT);
2227 ASSERT(PageLocked(page));
2228 if (pages_dirty && page != locked_page) {
2229 ASSERT(PageDirty(page));
2230 clear_page_dirty_for_io(page);
2231 }
2232
2233 ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
2234 i_size, &nr);
2235 if (ret == 1)
2236 goto next_page;
2237
2238 /* Make sure the mapping tag for page dirty gets cleared. */
2239 if (nr == 0) {
2240 set_page_writeback(page);
2241 end_page_writeback(page);
2242 }
2243 if (ret) {
2244 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2245 cur, cur_len, !ret);
2246 mapping_set_error(page->mapping, ret);
2247 }
2248 btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2249 if (ret < 0)
2250 found_error = true;
2251next_page:
2252 put_page(page);
2253 cur = cur_end + 1;
2254 }
2255
2256 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2257}
2258
2259int extent_writepages(struct address_space *mapping,
2260 struct writeback_control *wbc)
2261{
2262 struct inode *inode = mapping->host;
2263 int ret = 0;
2264 struct btrfs_bio_ctrl bio_ctrl = {
2265 .wbc = wbc,
2266 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2267 };
2268
2269 /*
2270 * Allow only a single thread to do the reloc work in zoned mode to
2271 * protect the write pointer updates.
2272 */
2273 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2274 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2275 submit_write_bio(&bio_ctrl, ret);
2276 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2277 return ret;
2278}
2279
2280void extent_readahead(struct readahead_control *rac)
2281{
2282 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2283 struct page *pagepool[16];
2284 struct extent_map *em_cached = NULL;
2285 u64 prev_em_start = (u64)-1;
2286 int nr;
2287
2288 while ((nr = readahead_page_batch(rac, pagepool))) {
2289 u64 contig_start = readahead_pos(rac);
2290 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2291
2292 contiguous_readpages(pagepool, nr, contig_start, contig_end,
2293 &em_cached, &bio_ctrl, &prev_em_start);
2294 }
2295
2296 if (em_cached)
2297 free_extent_map(em_cached);
2298 submit_one_bio(&bio_ctrl);
2299}
2300
2301/*
2302 * basic invalidate_folio code, this waits on any locked or writeback
2303 * ranges corresponding to the folio, and then deletes any extent state
2304 * records from the tree
2305 */
2306int extent_invalidate_folio(struct extent_io_tree *tree,
2307 struct folio *folio, size_t offset)
2308{
2309 struct extent_state *cached_state = NULL;
2310 u64 start = folio_pos(folio);
2311 u64 end = start + folio_size(folio) - 1;
2312 size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
2313
2314 /* This function is only called for the btree inode */
2315 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2316
2317 start += ALIGN(offset, blocksize);
2318 if (start > end)
2319 return 0;
2320
2321 lock_extent(tree, start, end, &cached_state);
2322 folio_wait_writeback(folio);
2323
2324 /*
2325 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2326 * so here we only need to unlock the extent range to free any
2327 * existing extent state.
2328 */
2329 unlock_extent(tree, start, end, &cached_state);
2330 return 0;
2331}
2332
2333/*
2334 * a helper for release_folio, this tests for areas of the page that
2335 * are locked or under IO and drops the related state bits if it is safe
2336 * to drop the page.
2337 */
2338static int try_release_extent_state(struct extent_io_tree *tree,
2339 struct page *page, gfp_t mask)
2340{
2341 u64 start = page_offset(page);
2342 u64 end = start + PAGE_SIZE - 1;
2343 int ret = 1;
2344
2345 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2346 ret = 0;
2347 } else {
2348 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2349 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2350 EXTENT_QGROUP_RESERVED);
2351
2352 /*
2353 * At this point we can safely clear everything except the
2354 * locked bit, the nodatasum bit and the delalloc new bit.
2355 * The delalloc new bit will be cleared by ordered extent
2356 * completion.
2357 */
2358 ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2359
2360 /* if clear_extent_bit failed for enomem reasons,
2361 * we can't allow the release to continue.
2362 */
2363 if (ret < 0)
2364 ret = 0;
2365 else
2366 ret = 1;
2367 }
2368 return ret;
2369}
2370
2371/*
2372 * a helper for release_folio. As long as there are no locked extents
2373 * in the range corresponding to the page, both state records and extent
2374 * map records are removed
2375 */
2376int try_release_extent_mapping(struct page *page, gfp_t mask)
2377{
2378 struct extent_map *em;
2379 u64 start = page_offset(page);
2380 u64 end = start + PAGE_SIZE - 1;
2381 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
2382 struct extent_io_tree *tree = &btrfs_inode->io_tree;
2383 struct extent_map_tree *map = &btrfs_inode->extent_tree;
2384
2385 if (gfpflags_allow_blocking(mask) &&
2386 page->mapping->host->i_size > SZ_16M) {
2387 u64 len;
2388 while (start <= end) {
2389 struct btrfs_fs_info *fs_info;
2390 u64 cur_gen;
2391
2392 len = end - start + 1;
2393 write_lock(&map->lock);
2394 em = lookup_extent_mapping(map, start, len);
2395 if (!em) {
2396 write_unlock(&map->lock);
2397 break;
2398 }
2399 if ((em->flags & EXTENT_FLAG_PINNED) ||
2400 em->start != start) {
2401 write_unlock(&map->lock);
2402 free_extent_map(em);
2403 break;
2404 }
2405 if (test_range_bit_exists(tree, em->start,
2406 extent_map_end(em) - 1,
2407 EXTENT_LOCKED))
2408 goto next;
2409 /*
2410 * If it's not in the list of modified extents, used
2411 * by a fast fsync, we can remove it. If it's being
2412 * logged we can safely remove it since fsync took an
2413 * extra reference on the em.
2414 */
2415 if (list_empty(&em->list) ||
2416 (em->flags & EXTENT_FLAG_LOGGING))
2417 goto remove_em;
2418 /*
2419 * If it's in the list of modified extents, remove it
2420 * only if its generation is older then the current one,
2421 * in which case we don't need it for a fast fsync.
2422 * Otherwise don't remove it, we could be racing with an
2423 * ongoing fast fsync that could miss the new extent.
2424 */
2425 fs_info = btrfs_inode->root->fs_info;
2426 spin_lock(&fs_info->trans_lock);
2427 cur_gen = fs_info->generation;
2428 spin_unlock(&fs_info->trans_lock);
2429 if (em->generation >= cur_gen)
2430 goto next;
2431remove_em:
2432 /*
2433 * We only remove extent maps that are not in the list of
2434 * modified extents or that are in the list but with a
2435 * generation lower then the current generation, so there
2436 * is no need to set the full fsync flag on the inode (it
2437 * hurts the fsync performance for workloads with a data
2438 * size that exceeds or is close to the system's memory).
2439 */
2440 remove_extent_mapping(map, em);
2441 /* once for the rb tree */
2442 free_extent_map(em);
2443next:
2444 start = extent_map_end(em);
2445 write_unlock(&map->lock);
2446
2447 /* once for us */
2448 free_extent_map(em);
2449
2450 cond_resched(); /* Allow large-extent preemption. */
2451 }
2452 }
2453 return try_release_extent_state(tree, page, mask);
2454}
2455
2456/*
2457 * To cache previous fiemap extent
2458 *
2459 * Will be used for merging fiemap extent
2460 */
2461struct fiemap_cache {
2462 u64 offset;
2463 u64 phys;
2464 u64 len;
2465 u32 flags;
2466 bool cached;
2467};
2468
2469/*
2470 * Helper to submit fiemap extent.
2471 *
2472 * Will try to merge current fiemap extent specified by @offset, @phys,
2473 * @len and @flags with cached one.
2474 * And only when we fails to merge, cached one will be submitted as
2475 * fiemap extent.
2476 *
2477 * Return value is the same as fiemap_fill_next_extent().
2478 */
2479static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
2480 struct fiemap_cache *cache,
2481 u64 offset, u64 phys, u64 len, u32 flags)
2482{
2483 u64 cache_end;
2484 int ret = 0;
2485
2486 /* Set at the end of extent_fiemap(). */
2487 ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
2488
2489 if (!cache->cached)
2490 goto assign;
2491
2492 /*
2493 * When iterating the extents of the inode, at extent_fiemap(), we may
2494 * find an extent that starts at an offset behind the end offset of the
2495 * previous extent we processed. This happens if fiemap is called
2496 * without FIEMAP_FLAG_SYNC and there are ordered extents completing
2497 * while we call btrfs_next_leaf() (through fiemap_next_leaf_item()).
2498 *
2499 * For example we are in leaf X processing its last item, which is the
2500 * file extent item for file range [512K, 1M[, and after
2501 * btrfs_next_leaf() releases the path, there's an ordered extent that
2502 * completes for the file range [768K, 2M[, and that results in trimming
2503 * the file extent item so that it now corresponds to the file range
2504 * [512K, 768K[ and a new file extent item is inserted for the file
2505 * range [768K, 2M[, which may end up as the last item of leaf X or as
2506 * the first item of the next leaf - in either case btrfs_next_leaf()
2507 * will leave us with a path pointing to the new extent item, for the
2508 * file range [768K, 2M[, since that's the first key that follows the
2509 * last one we processed. So in order not to report overlapping extents
2510 * to user space, we trim the length of the previously cached extent and
2511 * emit it.
2512 *
2513 * Upon calling btrfs_next_leaf() we may also find an extent with an
2514 * offset smaller than or equals to cache->offset, and this happens
2515 * when we had a hole or prealloc extent with several delalloc ranges in
2516 * it, but after btrfs_next_leaf() released the path, delalloc was
2517 * flushed and the resulting ordered extents were completed, so we can
2518 * now have found a file extent item for an offset that is smaller than
2519 * or equals to what we have in cache->offset. We deal with this as
2520 * described below.
2521 */
2522 cache_end = cache->offset + cache->len;
2523 if (cache_end > offset) {
2524 if (offset == cache->offset) {
2525 /*
2526 * We cached a dealloc range (found in the io tree) for
2527 * a hole or prealloc extent and we have now found a
2528 * file extent item for the same offset. What we have
2529 * now is more recent and up to date, so discard what
2530 * we had in the cache and use what we have just found.
2531 */
2532 goto assign;
2533 } else if (offset > cache->offset) {
2534 /*
2535 * The extent range we previously found ends after the
2536 * offset of the file extent item we found and that
2537 * offset falls somewhere in the middle of that previous
2538 * extent range. So adjust the range we previously found
2539 * to end at the offset of the file extent item we have
2540 * just found, since this extent is more up to date.
2541 * Emit that adjusted range and cache the file extent
2542 * item we have just found. This corresponds to the case
2543 * where a previously found file extent item was split
2544 * due to an ordered extent completing.
2545 */
2546 cache->len = offset - cache->offset;
2547 goto emit;
2548 } else {
2549 const u64 range_end = offset + len;
2550
2551 /*
2552 * The offset of the file extent item we have just found
2553 * is behind the cached offset. This means we were
2554 * processing a hole or prealloc extent for which we
2555 * have found delalloc ranges (in the io tree), so what
2556 * we have in the cache is the last delalloc range we
2557 * found while the file extent item we found can be
2558 * either for a whole delalloc range we previously
2559 * emmitted or only a part of that range.
2560 *
2561 * We have two cases here:
2562 *
2563 * 1) The file extent item's range ends at or behind the
2564 * cached extent's end. In this case just ignore the
2565 * current file extent item because we don't want to
2566 * overlap with previous ranges that may have been
2567 * emmitted already;
2568 *
2569 * 2) The file extent item starts behind the currently
2570 * cached extent but its end offset goes beyond the
2571 * end offset of the cached extent. We don't want to
2572 * overlap with a previous range that may have been
2573 * emmitted already, so we emit the currently cached
2574 * extent and then partially store the current file
2575 * extent item's range in the cache, for the subrange
2576 * going the cached extent's end to the end of the
2577 * file extent item.
2578 */
2579 if (range_end <= cache_end)
2580 return 0;
2581
2582 if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
2583 phys += cache_end - offset;
2584
2585 offset = cache_end;
2586 len = range_end - cache_end;
2587 goto emit;
2588 }
2589 }
2590
2591 /*
2592 * Only merges fiemap extents if
2593 * 1) Their logical addresses are continuous
2594 *
2595 * 2) Their physical addresses are continuous
2596 * So truly compressed (physical size smaller than logical size)
2597 * extents won't get merged with each other
2598 *
2599 * 3) Share same flags
2600 */
2601 if (cache->offset + cache->len == offset &&
2602 cache->phys + cache->len == phys &&
2603 cache->flags == flags) {
2604 cache->len += len;
2605 return 0;
2606 }
2607
2608emit:
2609 /* Not mergeable, need to submit cached one */
2610 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2611 cache->len, cache->flags);
2612 cache->cached = false;
2613 if (ret)
2614 return ret;
2615assign:
2616 cache->cached = true;
2617 cache->offset = offset;
2618 cache->phys = phys;
2619 cache->len = len;
2620 cache->flags = flags;
2621
2622 return 0;
2623}
2624
2625/*
2626 * Emit last fiemap cache
2627 *
2628 * The last fiemap cache may still be cached in the following case:
2629 * 0 4k 8k
2630 * |<- Fiemap range ->|
2631 * |<------------ First extent ----------->|
2632 *
2633 * In this case, the first extent range will be cached but not emitted.
2634 * So we must emit it before ending extent_fiemap().
2635 */
2636static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
2637 struct fiemap_cache *cache)
2638{
2639 int ret;
2640
2641 if (!cache->cached)
2642 return 0;
2643
2644 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
2645 cache->len, cache->flags);
2646 cache->cached = false;
2647 if (ret > 0)
2648 ret = 0;
2649 return ret;
2650}
2651
2652static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
2653{
2654 struct extent_buffer *clone;
2655 struct btrfs_key key;
2656 int slot;
2657 int ret;
2658
2659 path->slots[0]++;
2660 if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
2661 return 0;
2662
2663 ret = btrfs_next_leaf(inode->root, path);
2664 if (ret != 0)
2665 return ret;
2666
2667 /*
2668 * Don't bother with cloning if there are no more file extent items for
2669 * our inode.
2670 */
2671 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2672 if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
2673 return 1;
2674
2675 /* See the comment at fiemap_search_slot() about why we clone. */
2676 clone = btrfs_clone_extent_buffer(path->nodes[0]);
2677 if (!clone)
2678 return -ENOMEM;
2679
2680 slot = path->slots[0];
2681 btrfs_release_path(path);
2682 path->nodes[0] = clone;
2683 path->slots[0] = slot;
2684
2685 return 0;
2686}
2687
2688/*
2689 * Search for the first file extent item that starts at a given file offset or
2690 * the one that starts immediately before that offset.
2691 * Returns: 0 on success, < 0 on error, 1 if not found.
2692 */
2693static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
2694 u64 file_offset)
2695{
2696 const u64 ino = btrfs_ino(inode);
2697 struct btrfs_root *root = inode->root;
2698 struct extent_buffer *clone;
2699 struct btrfs_key key;
2700 int slot;
2701 int ret;
2702
2703 key.objectid = ino;
2704 key.type = BTRFS_EXTENT_DATA_KEY;
2705 key.offset = file_offset;
2706
2707 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2708 if (ret < 0)
2709 return ret;
2710
2711 if (ret > 0 && path->slots[0] > 0) {
2712 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
2713 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
2714 path->slots[0]--;
2715 }
2716
2717 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2718 ret = btrfs_next_leaf(root, path);
2719 if (ret != 0)
2720 return ret;
2721
2722 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2723 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
2724 return 1;
2725 }
2726
2727 /*
2728 * We clone the leaf and use it during fiemap. This is because while
2729 * using the leaf we do expensive things like checking if an extent is
2730 * shared, which can take a long time. In order to prevent blocking
2731 * other tasks for too long, we use a clone of the leaf. We have locked
2732 * the file range in the inode's io tree, so we know none of our file
2733 * extent items can change. This way we avoid blocking other tasks that
2734 * want to insert items for other inodes in the same leaf or b+tree
2735 * rebalance operations (triggered for example when someone is trying
2736 * to push items into this leaf when trying to insert an item in a
2737 * neighbour leaf).
2738 * We also need the private clone because holding a read lock on an
2739 * extent buffer of the subvolume's b+tree will make lockdep unhappy
2740 * when we call fiemap_fill_next_extent(), because that may cause a page
2741 * fault when filling the user space buffer with fiemap data.
2742 */
2743 clone = btrfs_clone_extent_buffer(path->nodes[0]);
2744 if (!clone)
2745 return -ENOMEM;
2746
2747 slot = path->slots[0];
2748 btrfs_release_path(path);
2749 path->nodes[0] = clone;
2750 path->slots[0] = slot;
2751
2752 return 0;
2753}
2754
2755/*
2756 * Process a range which is a hole or a prealloc extent in the inode's subvolume
2757 * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
2758 * extent. The end offset (@end) is inclusive.
2759 */
2760static int fiemap_process_hole(struct btrfs_inode *inode,
2761 struct fiemap_extent_info *fieinfo,
2762 struct fiemap_cache *cache,
2763 struct extent_state **delalloc_cached_state,
2764 struct btrfs_backref_share_check_ctx *backref_ctx,
2765 u64 disk_bytenr, u64 extent_offset,
2766 u64 extent_gen,
2767 u64 start, u64 end)
2768{
2769 const u64 i_size = i_size_read(&inode->vfs_inode);
2770 u64 cur_offset = start;
2771 u64 last_delalloc_end = 0;
2772 u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
2773 bool checked_extent_shared = false;
2774 int ret;
2775
2776 /*
2777 * There can be no delalloc past i_size, so don't waste time looking for
2778 * it beyond i_size.
2779 */
2780 while (cur_offset < end && cur_offset < i_size) {
2781 struct extent_state *cached_state = NULL;
2782 u64 delalloc_start;
2783 u64 delalloc_end;
2784 u64 prealloc_start;
2785 u64 lockstart;
2786 u64 lockend;
2787 u64 prealloc_len = 0;
2788 bool delalloc;
2789
2790 lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
2791 lockend = round_up(end, inode->root->fs_info->sectorsize);
2792
2793 /*
2794 * We are only locking for the delalloc range because that's the
2795 * only thing that can change here. With fiemap we have a lock
2796 * on the inode, so no buffered or direct writes can happen.
2797 *
2798 * However mmaps and normal page writeback will cause this to
2799 * change arbitrarily. We have to lock the extent lock here to
2800 * make sure that nobody messes with the tree while we're doing
2801 * btrfs_find_delalloc_in_range.
2802 */
2803 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2804 delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2805 delalloc_cached_state,
2806 &delalloc_start,
2807 &delalloc_end);
2808 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
2809 if (!delalloc)
2810 break;
2811
2812 /*
2813 * If this is a prealloc extent we have to report every section
2814 * of it that has no delalloc.
2815 */
2816 if (disk_bytenr != 0) {
2817 if (last_delalloc_end == 0) {
2818 prealloc_start = start;
2819 prealloc_len = delalloc_start - start;
2820 } else {
2821 prealloc_start = last_delalloc_end + 1;
2822 prealloc_len = delalloc_start - prealloc_start;
2823 }
2824 }
2825
2826 if (prealloc_len > 0) {
2827 if (!checked_extent_shared && fieinfo->fi_extents_max) {
2828 ret = btrfs_is_data_extent_shared(inode,
2829 disk_bytenr,
2830 extent_gen,
2831 backref_ctx);
2832 if (ret < 0)
2833 return ret;
2834 else if (ret > 0)
2835 prealloc_flags |= FIEMAP_EXTENT_SHARED;
2836
2837 checked_extent_shared = true;
2838 }
2839 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2840 disk_bytenr + extent_offset,
2841 prealloc_len, prealloc_flags);
2842 if (ret)
2843 return ret;
2844 extent_offset += prealloc_len;
2845 }
2846
2847 ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
2848 delalloc_end + 1 - delalloc_start,
2849 FIEMAP_EXTENT_DELALLOC |
2850 FIEMAP_EXTENT_UNKNOWN);
2851 if (ret)
2852 return ret;
2853
2854 last_delalloc_end = delalloc_end;
2855 cur_offset = delalloc_end + 1;
2856 extent_offset += cur_offset - delalloc_start;
2857 cond_resched();
2858 }
2859
2860 /*
2861 * Either we found no delalloc for the whole prealloc extent or we have
2862 * a prealloc extent that spans i_size or starts at or after i_size.
2863 */
2864 if (disk_bytenr != 0 && last_delalloc_end < end) {
2865 u64 prealloc_start;
2866 u64 prealloc_len;
2867
2868 if (last_delalloc_end == 0) {
2869 prealloc_start = start;
2870 prealloc_len = end + 1 - start;
2871 } else {
2872 prealloc_start = last_delalloc_end + 1;
2873 prealloc_len = end + 1 - prealloc_start;
2874 }
2875
2876 if (!checked_extent_shared && fieinfo->fi_extents_max) {
2877 ret = btrfs_is_data_extent_shared(inode,
2878 disk_bytenr,
2879 extent_gen,
2880 backref_ctx);
2881 if (ret < 0)
2882 return ret;
2883 else if (ret > 0)
2884 prealloc_flags |= FIEMAP_EXTENT_SHARED;
2885 }
2886 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
2887 disk_bytenr + extent_offset,
2888 prealloc_len, prealloc_flags);
2889 if (ret)
2890 return ret;
2891 }
2892
2893 return 0;
2894}
2895
2896static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
2897 struct btrfs_path *path,
2898 u64 *last_extent_end_ret)
2899{
2900 const u64 ino = btrfs_ino(inode);
2901 struct btrfs_root *root = inode->root;
2902 struct extent_buffer *leaf;
2903 struct btrfs_file_extent_item *ei;
2904 struct btrfs_key key;
2905 u64 disk_bytenr;
2906 int ret;
2907
2908 /*
2909 * Lookup the last file extent. We're not using i_size here because
2910 * there might be preallocation past i_size.
2911 */
2912 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
2913 /* There can't be a file extent item at offset (u64)-1 */
2914 ASSERT(ret != 0);
2915 if (ret < 0)
2916 return ret;
2917
2918 /*
2919 * For a non-existing key, btrfs_search_slot() always leaves us at a
2920 * slot > 0, except if the btree is empty, which is impossible because
2921 * at least it has the inode item for this inode and all the items for
2922 * the root inode 256.
2923 */
2924 ASSERT(path->slots[0] > 0);
2925 path->slots[0]--;
2926 leaf = path->nodes[0];
2927 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2928 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
2929 /* No file extent items in the subvolume tree. */
2930 *last_extent_end_ret = 0;
2931 return 0;
2932 }
2933
2934 /*
2935 * For an inline extent, the disk_bytenr is where inline data starts at,
2936 * so first check if we have an inline extent item before checking if we
2937 * have an implicit hole (disk_bytenr == 0).
2938 */
2939 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
2940 if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
2941 *last_extent_end_ret = btrfs_file_extent_end(path);
2942 return 0;
2943 }
2944
2945 /*
2946 * Find the last file extent item that is not a hole (when NO_HOLES is
2947 * not enabled). This should take at most 2 iterations in the worst
2948 * case: we have one hole file extent item at slot 0 of a leaf and
2949 * another hole file extent item as the last item in the previous leaf.
2950 * This is because we merge file extent items that represent holes.
2951 */
2952 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2953 while (disk_bytenr == 0) {
2954 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
2955 if (ret < 0) {
2956 return ret;
2957 } else if (ret > 0) {
2958 /* No file extent items that are not holes. */
2959 *last_extent_end_ret = 0;
2960 return 0;
2961 }
2962 leaf = path->nodes[0];
2963 ei = btrfs_item_ptr(leaf, path->slots[0],
2964 struct btrfs_file_extent_item);
2965 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
2966 }
2967
2968 *last_extent_end_ret = btrfs_file_extent_end(path);
2969 return 0;
2970}
2971
2972int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
2973 u64 start, u64 len)
2974{
2975 const u64 ino = btrfs_ino(inode);
2976 struct extent_state *delalloc_cached_state = NULL;
2977 struct btrfs_path *path;
2978 struct fiemap_cache cache = { 0 };
2979 struct btrfs_backref_share_check_ctx *backref_ctx;
2980 u64 last_extent_end;
2981 u64 prev_extent_end;
2982 u64 range_start;
2983 u64 range_end;
2984 const u64 sectorsize = inode->root->fs_info->sectorsize;
2985 bool stopped = false;
2986 int ret;
2987
2988 backref_ctx = btrfs_alloc_backref_share_check_ctx();
2989 path = btrfs_alloc_path();
2990 if (!backref_ctx || !path) {
2991 ret = -ENOMEM;
2992 goto out;
2993 }
2994
2995 range_start = round_down(start, sectorsize);
2996 range_end = round_up(start + len, sectorsize);
2997 prev_extent_end = range_start;
2998
2999 ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
3000 if (ret < 0)
3001 goto out;
3002 btrfs_release_path(path);
3003
3004 path->reada = READA_FORWARD;
3005 ret = fiemap_search_slot(inode, path, range_start);
3006 if (ret < 0) {
3007 goto out;
3008 } else if (ret > 0) {
3009 /*
3010 * No file extent item found, but we may have delalloc between
3011 * the current offset and i_size. So check for that.
3012 */
3013 ret = 0;
3014 goto check_eof_delalloc;
3015 }
3016
3017 while (prev_extent_end < range_end) {
3018 struct extent_buffer *leaf = path->nodes[0];
3019 struct btrfs_file_extent_item *ei;
3020 struct btrfs_key key;
3021 u64 extent_end;
3022 u64 extent_len;
3023 u64 extent_offset = 0;
3024 u64 extent_gen;
3025 u64 disk_bytenr = 0;
3026 u64 flags = 0;
3027 int extent_type;
3028 u8 compression;
3029
3030 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3031 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3032 break;
3033
3034 extent_end = btrfs_file_extent_end(path);
3035
3036 /*
3037 * The first iteration can leave us at an extent item that ends
3038 * before our range's start. Move to the next item.
3039 */
3040 if (extent_end <= range_start)
3041 goto next_item;
3042
3043 backref_ctx->curr_leaf_bytenr = leaf->start;
3044
3045 /* We have in implicit hole (NO_HOLES feature enabled). */
3046 if (prev_extent_end < key.offset) {
3047 const u64 hole_end = min(key.offset, range_end) - 1;
3048
3049 ret = fiemap_process_hole(inode, fieinfo, &cache,
3050 &delalloc_cached_state,
3051 backref_ctx, 0, 0, 0,
3052 prev_extent_end, hole_end);
3053 if (ret < 0) {
3054 goto out;
3055 } else if (ret > 0) {
3056 /* fiemap_fill_next_extent() told us to stop. */
3057 stopped = true;
3058 break;
3059 }
3060
3061 /* We've reached the end of the fiemap range, stop. */
3062 if (key.offset >= range_end) {
3063 stopped = true;
3064 break;
3065 }
3066 }
3067
3068 extent_len = extent_end - key.offset;
3069 ei = btrfs_item_ptr(leaf, path->slots[0],
3070 struct btrfs_file_extent_item);
3071 compression = btrfs_file_extent_compression(leaf, ei);
3072 extent_type = btrfs_file_extent_type(leaf, ei);
3073 extent_gen = btrfs_file_extent_generation(leaf, ei);
3074
3075 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
3076 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
3077 if (compression == BTRFS_COMPRESS_NONE)
3078 extent_offset = btrfs_file_extent_offset(leaf, ei);
3079 }
3080
3081 if (compression != BTRFS_COMPRESS_NONE)
3082 flags |= FIEMAP_EXTENT_ENCODED;
3083
3084 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
3085 flags |= FIEMAP_EXTENT_DATA_INLINE;
3086 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
3087 ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
3088 extent_len, flags);
3089 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
3090 ret = fiemap_process_hole(inode, fieinfo, &cache,
3091 &delalloc_cached_state,
3092 backref_ctx,
3093 disk_bytenr, extent_offset,
3094 extent_gen, key.offset,
3095 extent_end - 1);
3096 } else if (disk_bytenr == 0) {
3097 /* We have an explicit hole. */
3098 ret = fiemap_process_hole(inode, fieinfo, &cache,
3099 &delalloc_cached_state,
3100 backref_ctx, 0, 0, 0,
3101 key.offset, extent_end - 1);
3102 } else {
3103 /* We have a regular extent. */
3104 if (fieinfo->fi_extents_max) {
3105 ret = btrfs_is_data_extent_shared(inode,
3106 disk_bytenr,
3107 extent_gen,
3108 backref_ctx);
3109 if (ret < 0)
3110 goto out;
3111 else if (ret > 0)
3112 flags |= FIEMAP_EXTENT_SHARED;
3113 }
3114
3115 ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
3116 disk_bytenr + extent_offset,
3117 extent_len, flags);
3118 }
3119
3120 if (ret < 0) {
3121 goto out;
3122 } else if (ret > 0) {
3123 /* fiemap_fill_next_extent() told us to stop. */
3124 stopped = true;
3125 break;
3126 }
3127
3128 prev_extent_end = extent_end;
3129next_item:
3130 if (fatal_signal_pending(current)) {
3131 ret = -EINTR;
3132 goto out;
3133 }
3134
3135 ret = fiemap_next_leaf_item(inode, path);
3136 if (ret < 0) {
3137 goto out;
3138 } else if (ret > 0) {
3139 /* No more file extent items for this inode. */
3140 break;
3141 }
3142 cond_resched();
3143 }
3144
3145check_eof_delalloc:
3146 /*
3147 * Release (and free) the path before emitting any final entries to
3148 * fiemap_fill_next_extent() to keep lockdep happy. This is because
3149 * once we find no more file extent items exist, we may have a
3150 * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
3151 * faults when copying data to the user space buffer.
3152 */
3153 btrfs_free_path(path);
3154 path = NULL;
3155
3156 if (!stopped && prev_extent_end < range_end) {
3157 ret = fiemap_process_hole(inode, fieinfo, &cache,
3158 &delalloc_cached_state, backref_ctx,
3159 0, 0, 0, prev_extent_end, range_end - 1);
3160 if (ret < 0)
3161 goto out;
3162 prev_extent_end = range_end;
3163 }
3164
3165 if (cache.cached && cache.offset + cache.len >= last_extent_end) {
3166 const u64 i_size = i_size_read(&inode->vfs_inode);
3167
3168 if (prev_extent_end < i_size) {
3169 struct extent_state *cached_state = NULL;
3170 u64 delalloc_start;
3171 u64 delalloc_end;
3172 u64 lockstart;
3173 u64 lockend;
3174 bool delalloc;
3175
3176 lockstart = round_down(prev_extent_end, sectorsize);
3177 lockend = round_up(i_size, sectorsize);
3178
3179 /*
3180 * See the comment in fiemap_process_hole as to why
3181 * we're doing the locking here.
3182 */
3183 lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3184 delalloc = btrfs_find_delalloc_in_range(inode,
3185 prev_extent_end,
3186 i_size - 1,
3187 &delalloc_cached_state,
3188 &delalloc_start,
3189 &delalloc_end);
3190 unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3191 if (!delalloc)
3192 cache.flags |= FIEMAP_EXTENT_LAST;
3193 } else {
3194 cache.flags |= FIEMAP_EXTENT_LAST;
3195 }
3196 }
3197
3198 ret = emit_last_fiemap_cache(fieinfo, &cache);
3199out:
3200 free_extent_state(delalloc_cached_state);
3201 btrfs_free_backref_share_ctx(backref_ctx);
3202 btrfs_free_path(path);
3203 return ret;
3204}
3205
3206static void __free_extent_buffer(struct extent_buffer *eb)
3207{
3208 kmem_cache_free(extent_buffer_cache, eb);
3209}
3210
3211static int extent_buffer_under_io(const struct extent_buffer *eb)
3212{
3213 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3214 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3215}
3216
3217static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3218{
3219 struct btrfs_subpage *subpage;
3220
3221 lockdep_assert_held(&folio->mapping->i_private_lock);
3222
3223 if (folio_test_private(folio)) {
3224 subpage = folio_get_private(folio);
3225 if (atomic_read(&subpage->eb_refs))
3226 return true;
3227 /*
3228 * Even there is no eb refs here, we may still have
3229 * end_page_read() call relying on page::private.
3230 */
3231 if (atomic_read(&subpage->readers))
3232 return true;
3233 }
3234 return false;
3235}
3236
3237static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3238{
3239 struct btrfs_fs_info *fs_info = eb->fs_info;
3240 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3241
3242 /*
3243 * For mapped eb, we're going to change the folio private, which should
3244 * be done under the i_private_lock.
3245 */
3246 if (mapped)
3247 spin_lock(&folio->mapping->i_private_lock);
3248
3249 if (!folio_test_private(folio)) {
3250 if (mapped)
3251 spin_unlock(&folio->mapping->i_private_lock);
3252 return;
3253 }
3254
3255 if (fs_info->nodesize >= PAGE_SIZE) {
3256 /*
3257 * We do this since we'll remove the pages after we've
3258 * removed the eb from the radix tree, so we could race
3259 * and have this page now attached to the new eb. So
3260 * only clear folio if it's still connected to
3261 * this eb.
3262 */
3263 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3264 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3265 BUG_ON(folio_test_dirty(folio));
3266 BUG_ON(folio_test_writeback(folio));
3267 /* We need to make sure we haven't be attached to a new eb. */
3268 folio_detach_private(folio);
3269 }
3270 if (mapped)
3271 spin_unlock(&folio->mapping->i_private_lock);
3272 return;
3273 }
3274
3275 /*
3276 * For subpage, we can have dummy eb with folio private attached. In
3277 * this case, we can directly detach the private as such folio is only
3278 * attached to one dummy eb, no sharing.
3279 */
3280 if (!mapped) {
3281 btrfs_detach_subpage(fs_info, folio);
3282 return;
3283 }
3284
3285 btrfs_folio_dec_eb_refs(fs_info, folio);
3286
3287 /*
3288 * We can only detach the folio private if there are no other ebs in the
3289 * page range and no unfinished IO.
3290 */
3291 if (!folio_range_has_eb(fs_info, folio))
3292 btrfs_detach_subpage(fs_info, folio);
3293
3294 spin_unlock(&folio->mapping->i_private_lock);
3295}
3296
3297/* Release all pages attached to the extent buffer */
3298static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
3299{
3300 ASSERT(!extent_buffer_under_io(eb));
3301
3302 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
3303 struct folio *folio = eb->folios[i];
3304
3305 if (!folio)
3306 continue;
3307
3308 detach_extent_buffer_folio(eb, folio);
3309
3310 /* One for when we allocated the folio. */
3311 folio_put(folio);
3312 }
3313}
3314
3315/*
3316 * Helper for releasing the extent buffer.
3317 */
3318static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3319{
3320 btrfs_release_extent_buffer_pages(eb);
3321 btrfs_leak_debug_del_eb(eb);
3322 __free_extent_buffer(eb);
3323}
3324
3325static struct extent_buffer *
3326__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
3327 unsigned long len)
3328{
3329 struct extent_buffer *eb = NULL;
3330
3331 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
3332 eb->start = start;
3333 eb->len = len;
3334 eb->fs_info = fs_info;
3335 init_rwsem(&eb->lock);
3336
3337 btrfs_leak_debug_add_eb(eb);
3338
3339 spin_lock_init(&eb->refs_lock);
3340 atomic_set(&eb->refs, 1);
3341
3342 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
3343
3344 return eb;
3345}
3346
3347struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
3348{
3349 struct extent_buffer *new;
3350 int num_folios = num_extent_folios(src);
3351 int ret;
3352
3353 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
3354 if (new == NULL)
3355 return NULL;
3356
3357 /*
3358 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
3359 * btrfs_release_extent_buffer() have different behavior for
3360 * UNMAPPED subpage extent buffer.
3361 */
3362 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
3363
3364 ret = alloc_eb_folio_array(new, 0);
3365 if (ret) {
3366 btrfs_release_extent_buffer(new);
3367 return NULL;
3368 }
3369
3370 for (int i = 0; i < num_folios; i++) {
3371 struct folio *folio = new->folios[i];
3372 int ret;
3373
3374 ret = attach_extent_buffer_folio(new, folio, NULL);
3375 if (ret < 0) {
3376 btrfs_release_extent_buffer(new);
3377 return NULL;
3378 }
3379 WARN_ON(folio_test_dirty(folio));
3380 }
3381 copy_extent_buffer_full(new, src);
3382 set_extent_buffer_uptodate(new);
3383
3384 return new;
3385}
3386
3387struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3388 u64 start, unsigned long len)
3389{
3390 struct extent_buffer *eb;
3391 int num_folios = 0;
3392 int ret;
3393
3394 eb = __alloc_extent_buffer(fs_info, start, len);
3395 if (!eb)
3396 return NULL;
3397
3398 ret = alloc_eb_folio_array(eb, 0);
3399 if (ret)
3400 goto err;
3401
3402 num_folios = num_extent_folios(eb);
3403 for (int i = 0; i < num_folios; i++) {
3404 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
3405 if (ret < 0)
3406 goto err;
3407 }
3408
3409 set_extent_buffer_uptodate(eb);
3410 btrfs_set_header_nritems(eb, 0);
3411 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3412
3413 return eb;
3414err:
3415 for (int i = 0; i < num_folios; i++) {
3416 if (eb->folios[i]) {
3417 detach_extent_buffer_folio(eb, eb->folios[i]);
3418 __folio_put(eb->folios[i]);
3419 }
3420 }
3421 __free_extent_buffer(eb);
3422 return NULL;
3423}
3424
3425struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
3426 u64 start)
3427{
3428 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
3429}
3430
3431static void check_buffer_tree_ref(struct extent_buffer *eb)
3432{
3433 int refs;
3434 /*
3435 * The TREE_REF bit is first set when the extent_buffer is added
3436 * to the radix tree. It is also reset, if unset, when a new reference
3437 * is created by find_extent_buffer.
3438 *
3439 * It is only cleared in two cases: freeing the last non-tree
3440 * reference to the extent_buffer when its STALE bit is set or
3441 * calling release_folio when the tree reference is the only reference.
3442 *
3443 * In both cases, care is taken to ensure that the extent_buffer's
3444 * pages are not under io. However, release_folio can be concurrently
3445 * called with creating new references, which is prone to race
3446 * conditions between the calls to check_buffer_tree_ref in those
3447 * codepaths and clearing TREE_REF in try_release_extent_buffer.
3448 *
3449 * The actual lifetime of the extent_buffer in the radix tree is
3450 * adequately protected by the refcount, but the TREE_REF bit and
3451 * its corresponding reference are not. To protect against this
3452 * class of races, we call check_buffer_tree_ref from the codepaths
3453 * which trigger io. Note that once io is initiated, TREE_REF can no
3454 * longer be cleared, so that is the moment at which any such race is
3455 * best fixed.
3456 */
3457 refs = atomic_read(&eb->refs);
3458 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3459 return;
3460
3461 spin_lock(&eb->refs_lock);
3462 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3463 atomic_inc(&eb->refs);
3464 spin_unlock(&eb->refs_lock);
3465}
3466
3467static void mark_extent_buffer_accessed(struct extent_buffer *eb)
3468{
3469 int num_folios= num_extent_folios(eb);
3470
3471 check_buffer_tree_ref(eb);
3472
3473 for (int i = 0; i < num_folios; i++)
3474 folio_mark_accessed(eb->folios[i]);
3475}
3476
3477struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
3478 u64 start)
3479{
3480 struct extent_buffer *eb;
3481
3482 eb = find_extent_buffer_nolock(fs_info, start);
3483 if (!eb)
3484 return NULL;
3485 /*
3486 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
3487 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
3488 * another task running free_extent_buffer() might have seen that flag
3489 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
3490 * writeback flags not set) and it's still in the tree (flag
3491 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
3492 * decrementing the extent buffer's reference count twice. So here we
3493 * could race and increment the eb's reference count, clear its stale
3494 * flag, mark it as dirty and drop our reference before the other task
3495 * finishes executing free_extent_buffer, which would later result in
3496 * an attempt to free an extent buffer that is dirty.
3497 */
3498 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
3499 spin_lock(&eb->refs_lock);
3500 spin_unlock(&eb->refs_lock);
3501 }
3502 mark_extent_buffer_accessed(eb);
3503 return eb;
3504}
3505
3506#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3507struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3508 u64 start)
3509{
3510 struct extent_buffer *eb, *exists = NULL;
3511 int ret;
3512
3513 eb = find_extent_buffer(fs_info, start);
3514 if (eb)
3515 return eb;
3516 eb = alloc_dummy_extent_buffer(fs_info, start);
3517 if (!eb)
3518 return ERR_PTR(-ENOMEM);
3519 eb->fs_info = fs_info;
3520again:
3521 ret = radix_tree_preload(GFP_NOFS);
3522 if (ret) {
3523 exists = ERR_PTR(ret);
3524 goto free_eb;
3525 }
3526 spin_lock(&fs_info->buffer_lock);
3527 ret = radix_tree_insert(&fs_info->buffer_radix,
3528 start >> fs_info->sectorsize_bits, eb);
3529 spin_unlock(&fs_info->buffer_lock);
3530 radix_tree_preload_end();
3531 if (ret == -EEXIST) {
3532 exists = find_extent_buffer(fs_info, start);
3533 if (exists)
3534 goto free_eb;
3535 else
3536 goto again;
3537 }
3538 check_buffer_tree_ref(eb);
3539 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3540
3541 return eb;
3542free_eb:
3543 btrfs_release_extent_buffer(eb);
3544 return exists;
3545}
3546#endif
3547
3548static struct extent_buffer *grab_extent_buffer(
3549 struct btrfs_fs_info *fs_info, struct page *page)
3550{
3551 struct folio *folio = page_folio(page);
3552 struct extent_buffer *exists;
3553
3554 /*
3555 * For subpage case, we completely rely on radix tree to ensure we
3556 * don't try to insert two ebs for the same bytenr. So here we always
3557 * return NULL and just continue.
3558 */
3559 if (fs_info->nodesize < PAGE_SIZE)
3560 return NULL;
3561
3562 /* Page not yet attached to an extent buffer */
3563 if (!folio_test_private(folio))
3564 return NULL;
3565
3566 /*
3567 * We could have already allocated an eb for this page and attached one
3568 * so lets see if we can get a ref on the existing eb, and if we can we
3569 * know it's good and we can just return that one, else we know we can
3570 * just overwrite folio private.
3571 */
3572 exists = folio_get_private(folio);
3573 if (atomic_inc_not_zero(&exists->refs))
3574 return exists;
3575
3576 WARN_ON(PageDirty(page));
3577 folio_detach_private(folio);
3578 return NULL;
3579}
3580
3581static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3582{
3583 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
3584 btrfs_err(fs_info, "bad tree block start %llu", start);
3585 return -EINVAL;
3586 }
3587
3588 if (fs_info->nodesize < PAGE_SIZE &&
3589 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
3590 btrfs_err(fs_info,
3591 "tree block crosses page boundary, start %llu nodesize %u",
3592 start, fs_info->nodesize);
3593 return -EINVAL;
3594 }
3595 if (fs_info->nodesize >= PAGE_SIZE &&
3596 !PAGE_ALIGNED(start)) {
3597 btrfs_err(fs_info,
3598 "tree block is not page aligned, start %llu nodesize %u",
3599 start, fs_info->nodesize);
3600 return -EINVAL;
3601 }
3602 if (!IS_ALIGNED(start, fs_info->nodesize) &&
3603 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
3604 btrfs_warn(fs_info,
3605"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
3606 start, fs_info->nodesize);
3607 }
3608 return 0;
3609}
3610
3611
3612/*
3613 * Return 0 if eb->folios[i] is attached to btree inode successfully.
3614 * Return >0 if there is already another extent buffer for the range,
3615 * and @found_eb_ret would be updated.
3616 * Return -EAGAIN if the filemap has an existing folio but with different size
3617 * than @eb.
3618 * The caller needs to free the existing folios and retry using the same order.
3619 */
3620static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3621 struct extent_buffer **found_eb_ret)
3622{
3623
3624 struct btrfs_fs_info *fs_info = eb->fs_info;
3625 struct address_space *mapping = fs_info->btree_inode->i_mapping;
3626 const unsigned long index = eb->start >> PAGE_SHIFT;
3627 struct folio *existing_folio;
3628 int ret;
3629
3630 ASSERT(found_eb_ret);
3631
3632 /* Caller should ensure the folio exists. */
3633 ASSERT(eb->folios[i]);
3634
3635retry:
3636 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
3637 GFP_NOFS | __GFP_NOFAIL);
3638 if (!ret)
3639 return 0;
3640
3641 existing_folio = filemap_lock_folio(mapping, index + i);
3642 /* The page cache only exists for a very short time, just retry. */
3643 if (IS_ERR(existing_folio))
3644 goto retry;
3645
3646 /* For now, we should only have single-page folios for btree inode. */
3647 ASSERT(folio_nr_pages(existing_folio) == 1);
3648
3649 if (folio_size(existing_folio) != folio_size(eb->folios[0])) {
3650 folio_unlock(existing_folio);
3651 folio_put(existing_folio);
3652 return -EAGAIN;
3653 }
3654
3655 if (fs_info->nodesize < PAGE_SIZE) {
3656 /*
3657 * We're going to reuse the existing page, can drop our page
3658 * and subpage structure now.
3659 */
3660 __free_page(folio_page(eb->folios[i], 0));
3661 eb->folios[i] = existing_folio;
3662 } else {
3663 struct extent_buffer *existing_eb;
3664
3665 existing_eb = grab_extent_buffer(fs_info,
3666 folio_page(existing_folio, 0));
3667 if (existing_eb) {
3668 /* The extent buffer still exists, we can use it directly. */
3669 *found_eb_ret = existing_eb;
3670 folio_unlock(existing_folio);
3671 folio_put(existing_folio);
3672 return 1;
3673 }
3674 /* The extent buffer no longer exists, we can reuse the folio. */
3675 __free_page(folio_page(eb->folios[i], 0));
3676 eb->folios[i] = existing_folio;
3677 }
3678 return 0;
3679}
3680
3681struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3682 u64 start, u64 owner_root, int level)
3683{
3684 unsigned long len = fs_info->nodesize;
3685 int num_folios;
3686 int attached = 0;
3687 struct extent_buffer *eb;
3688 struct extent_buffer *existing_eb = NULL;
3689 struct address_space *mapping = fs_info->btree_inode->i_mapping;
3690 struct btrfs_subpage *prealloc = NULL;
3691 u64 lockdep_owner = owner_root;
3692 bool page_contig = true;
3693 int uptodate = 1;
3694 int ret;
3695
3696 if (check_eb_alignment(fs_info, start))
3697 return ERR_PTR(-EINVAL);
3698
3699#if BITS_PER_LONG == 32
3700 if (start >= MAX_LFS_FILESIZE) {
3701 btrfs_err_rl(fs_info,
3702 "extent buffer %llu is beyond 32bit page cache limit", start);
3703 btrfs_err_32bit_limit(fs_info);
3704 return ERR_PTR(-EOVERFLOW);
3705 }
3706 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3707 btrfs_warn_32bit_limit(fs_info);
3708#endif
3709
3710 eb = find_extent_buffer(fs_info, start);
3711 if (eb)
3712 return eb;
3713
3714 eb = __alloc_extent_buffer(fs_info, start, len);
3715 if (!eb)
3716 return ERR_PTR(-ENOMEM);
3717
3718 /*
3719 * The reloc trees are just snapshots, so we need them to appear to be
3720 * just like any other fs tree WRT lockdep.
3721 */
3722 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3723 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3724
3725 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3726
3727 /*
3728 * Preallocate folio private for subpage case, so that we won't
3729 * allocate memory with i_private_lock nor page lock hold.
3730 *
3731 * The memory will be freed by attach_extent_buffer_page() or freed
3732 * manually if we exit earlier.
3733 */
3734 if (fs_info->nodesize < PAGE_SIZE) {
3735 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3736 if (IS_ERR(prealloc)) {
3737 ret = PTR_ERR(prealloc);
3738 goto out;
3739 }
3740 }
3741
3742reallocate:
3743 /* Allocate all pages first. */
3744 ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
3745 if (ret < 0) {
3746 btrfs_free_subpage(prealloc);
3747 goto out;
3748 }
3749
3750 num_folios = num_extent_folios(eb);
3751 /* Attach all pages to the filemap. */
3752 for (int i = 0; i < num_folios; i++) {
3753 struct folio *folio;
3754
3755 ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
3756 if (ret > 0) {
3757 ASSERT(existing_eb);
3758 goto out;
3759 }
3760
3761 /*
3762 * TODO: Special handling for a corner case where the order of
3763 * folios mismatch between the new eb and filemap.
3764 *
3765 * This happens when:
3766 *
3767 * - the new eb is using higher order folio
3768 *
3769 * - the filemap is still using 0-order folios for the range
3770 * This can happen at the previous eb allocation, and we don't
3771 * have higher order folio for the call.
3772 *
3773 * - the existing eb has already been freed
3774 *
3775 * In this case, we have to free the existing folios first, and
3776 * re-allocate using the same order.
3777 * Thankfully this is not going to happen yet, as we're still
3778 * using 0-order folios.
3779 */
3780 if (unlikely(ret == -EAGAIN)) {
3781 ASSERT(0);
3782 goto reallocate;
3783 }
3784 attached++;
3785
3786 /*
3787 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3788 * reliable, as we may choose to reuse the existing page cache
3789 * and free the allocated page.
3790 */
3791 folio = eb->folios[i];
3792 spin_lock(&mapping->i_private_lock);
3793 /* Should not fail, as we have preallocated the memory */
3794 ret = attach_extent_buffer_folio(eb, folio, prealloc);
3795 ASSERT(!ret);
3796 /*
3797 * To inform we have extra eb under allocation, so that
3798 * detach_extent_buffer_page() won't release the folio private
3799 * when the eb hasn't yet been inserted into radix tree.
3800 *
3801 * The ref will be decreased when the eb released the page, in
3802 * detach_extent_buffer_page().
3803 * Thus needs no special handling in error path.
3804 */
3805 btrfs_folio_inc_eb_refs(fs_info, folio);
3806 spin_unlock(&mapping->i_private_lock);
3807
3808 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3809
3810 /*
3811 * Check if the current page is physically contiguous with previous eb
3812 * page.
3813 * At this stage, either we allocated a large folio, thus @i
3814 * would only be 0, or we fall back to per-page allocation.
3815 */
3816 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3817 page_contig = false;
3818
3819 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3820 uptodate = 0;
3821
3822 /*
3823 * We can't unlock the pages just yet since the extent buffer
3824 * hasn't been properly inserted in the radix tree, this
3825 * opens a race with btree_release_folio which can free a page
3826 * while we are still filling in all pages for the buffer and
3827 * we could crash.
3828 */
3829 }
3830 if (uptodate)
3831 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3832 /* All pages are physically contiguous, can skip cross page handling. */
3833 if (page_contig)
3834 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3835again:
3836 ret = radix_tree_preload(GFP_NOFS);
3837 if (ret)
3838 goto out;
3839
3840 spin_lock(&fs_info->buffer_lock);
3841 ret = radix_tree_insert(&fs_info->buffer_radix,
3842 start >> fs_info->sectorsize_bits, eb);
3843 spin_unlock(&fs_info->buffer_lock);
3844 radix_tree_preload_end();
3845 if (ret == -EEXIST) {
3846 ret = 0;
3847 existing_eb = find_extent_buffer(fs_info, start);
3848 if (existing_eb)
3849 goto out;
3850 else
3851 goto again;
3852 }
3853 /* add one reference for the tree */
3854 check_buffer_tree_ref(eb);
3855 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3856
3857 /*
3858 * Now it's safe to unlock the pages because any calls to
3859 * btree_release_folio will correctly detect that a page belongs to a
3860 * live buffer and won't free them prematurely.
3861 */
3862 for (int i = 0; i < num_folios; i++)
3863 unlock_page(folio_page(eb->folios[i], 0));
3864 return eb;
3865
3866out:
3867 WARN_ON(!atomic_dec_and_test(&eb->refs));
3868
3869 /*
3870 * Any attached folios need to be detached before we unlock them. This
3871 * is because when we're inserting our new folios into the mapping, and
3872 * then attaching our eb to that folio. If we fail to insert our folio
3873 * we'll lookup the folio for that index, and grab that EB. We do not
3874 * want that to grab this eb, as we're getting ready to free it. So we
3875 * have to detach it first and then unlock it.
3876 *
3877 * We have to drop our reference and NULL it out here because in the
3878 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3879 * Below when we call btrfs_release_extent_buffer() we will call
3880 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3881 * case. If we left eb->folios[i] populated in the subpage case we'd
3882 * double put our reference and be super sad.
3883 */
3884 for (int i = 0; i < attached; i++) {
3885 ASSERT(eb->folios[i]);
3886 detach_extent_buffer_folio(eb, eb->folios[i]);
3887 unlock_page(folio_page(eb->folios[i], 0));
3888 folio_put(eb->folios[i]);
3889 eb->folios[i] = NULL;
3890 }
3891 /*
3892 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3893 * so it can be cleaned up without utlizing page->mapping.
3894 */
3895 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3896
3897 btrfs_release_extent_buffer(eb);
3898 if (ret < 0)
3899 return ERR_PTR(ret);
3900 ASSERT(existing_eb);
3901 return existing_eb;
3902}
3903
3904static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3905{
3906 struct extent_buffer *eb =
3907 container_of(head, struct extent_buffer, rcu_head);
3908
3909 __free_extent_buffer(eb);
3910}
3911
3912static int release_extent_buffer(struct extent_buffer *eb)
3913 __releases(&eb->refs_lock)
3914{
3915 lockdep_assert_held(&eb->refs_lock);
3916
3917 WARN_ON(atomic_read(&eb->refs) == 0);
3918 if (atomic_dec_and_test(&eb->refs)) {
3919 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3920 struct btrfs_fs_info *fs_info = eb->fs_info;
3921
3922 spin_unlock(&eb->refs_lock);
3923
3924 spin_lock(&fs_info->buffer_lock);
3925 radix_tree_delete(&fs_info->buffer_radix,
3926 eb->start >> fs_info->sectorsize_bits);
3927 spin_unlock(&fs_info->buffer_lock);
3928 } else {
3929 spin_unlock(&eb->refs_lock);
3930 }
3931
3932 btrfs_leak_debug_del_eb(eb);
3933 /* Should be safe to release our pages at this point */
3934 btrfs_release_extent_buffer_pages(eb);
3935#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3936 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3937 __free_extent_buffer(eb);
3938 return 1;
3939 }
3940#endif
3941 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3942 return 1;
3943 }
3944 spin_unlock(&eb->refs_lock);
3945
3946 return 0;
3947}
3948
3949void free_extent_buffer(struct extent_buffer *eb)
3950{
3951 int refs;
3952 if (!eb)
3953 return;
3954
3955 refs = atomic_read(&eb->refs);
3956 while (1) {
3957 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3958 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3959 refs == 1))
3960 break;
3961 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3962 return;
3963 }
3964
3965 spin_lock(&eb->refs_lock);
3966 if (atomic_read(&eb->refs) == 2 &&
3967 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3968 !extent_buffer_under_io(eb) &&
3969 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3970 atomic_dec(&eb->refs);
3971
3972 /*
3973 * I know this is terrible, but it's temporary until we stop tracking
3974 * the uptodate bits and such for the extent buffers.
3975 */
3976 release_extent_buffer(eb);
3977}
3978
3979void free_extent_buffer_stale(struct extent_buffer *eb)
3980{
3981 if (!eb)
3982 return;
3983
3984 spin_lock(&eb->refs_lock);
3985 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3986
3987 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3988 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3989 atomic_dec(&eb->refs);
3990 release_extent_buffer(eb);
3991}
3992
3993static void btree_clear_folio_dirty(struct folio *folio)
3994{
3995 ASSERT(folio_test_dirty(folio));
3996 ASSERT(folio_test_locked(folio));
3997 folio_clear_dirty_for_io(folio);
3998 xa_lock_irq(&folio->mapping->i_pages);
3999 if (!folio_test_dirty(folio))
4000 __xa_clear_mark(&folio->mapping->i_pages,
4001 folio_index(folio), PAGECACHE_TAG_DIRTY);
4002 xa_unlock_irq(&folio->mapping->i_pages);
4003}
4004
4005static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
4006{
4007 struct btrfs_fs_info *fs_info = eb->fs_info;
4008 struct folio *folio = eb->folios[0];
4009 bool last;
4010
4011 /* btree_clear_folio_dirty() needs page locked. */
4012 folio_lock(folio);
4013 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4014 if (last)
4015 btree_clear_folio_dirty(folio);
4016 folio_unlock(folio);
4017 WARN_ON(atomic_read(&eb->refs) == 0);
4018}
4019
4020void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
4021 struct extent_buffer *eb)
4022{
4023 struct btrfs_fs_info *fs_info = eb->fs_info;
4024 int num_folios;
4025
4026 btrfs_assert_tree_write_locked(eb);
4027
4028 if (trans && btrfs_header_generation(eb) != trans->transid)
4029 return;
4030
4031 /*
4032 * Instead of clearing the dirty flag off of the buffer, mark it as
4033 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
4034 * write-ordering in zoned mode, without the need to later re-dirty
4035 * the extent_buffer.
4036 *
4037 * The actual zeroout of the buffer will happen later in
4038 * btree_csum_one_bio.
4039 */
4040 if (btrfs_is_zoned(fs_info)) {
4041 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
4042 return;
4043 }
4044
4045 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
4046 return;
4047
4048 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
4049 fs_info->dirty_metadata_batch);
4050
4051 if (eb->fs_info->nodesize < PAGE_SIZE)
4052 return clear_subpage_extent_buffer_dirty(eb);
4053
4054 num_folios = num_extent_folios(eb);
4055 for (int i = 0; i < num_folios; i++) {
4056 struct folio *folio = eb->folios[i];
4057
4058 if (!folio_test_dirty(folio))
4059 continue;
4060 folio_lock(folio);
4061 btree_clear_folio_dirty(folio);
4062 folio_unlock(folio);
4063 }
4064 WARN_ON(atomic_read(&eb->refs) == 0);
4065}
4066
4067void set_extent_buffer_dirty(struct extent_buffer *eb)
4068{
4069 int num_folios;
4070 bool was_dirty;
4071
4072 check_buffer_tree_ref(eb);
4073
4074 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4075
4076 num_folios = num_extent_folios(eb);
4077 WARN_ON(atomic_read(&eb->refs) == 0);
4078 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4079
4080 if (!was_dirty) {
4081 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
4082
4083 /*
4084 * For subpage case, we can have other extent buffers in the
4085 * same page, and in clear_subpage_extent_buffer_dirty() we
4086 * have to clear page dirty without subpage lock held.
4087 * This can cause race where our page gets dirty cleared after
4088 * we just set it.
4089 *
4090 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
4091 * its page for other reasons, we can use page lock to prevent
4092 * the above race.
4093 */
4094 if (subpage)
4095 lock_page(folio_page(eb->folios[0], 0));
4096 for (int i = 0; i < num_folios; i++)
4097 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
4098 eb->start, eb->len);
4099 if (subpage)
4100 unlock_page(folio_page(eb->folios[0], 0));
4101 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
4102 eb->len,
4103 eb->fs_info->dirty_metadata_batch);
4104 }
4105#ifdef CONFIG_BTRFS_DEBUG
4106 for (int i = 0; i < num_folios; i++)
4107 ASSERT(folio_test_dirty(eb->folios[i]));
4108#endif
4109}
4110
4111void clear_extent_buffer_uptodate(struct extent_buffer *eb)
4112{
4113 struct btrfs_fs_info *fs_info = eb->fs_info;
4114 int num_folios = num_extent_folios(eb);
4115
4116 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4117 for (int i = 0; i < num_folios; i++) {
4118 struct folio *folio = eb->folios[i];
4119
4120 if (!folio)
4121 continue;
4122
4123 /*
4124 * This is special handling for metadata subpage, as regular
4125 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4126 */
4127 if (fs_info->nodesize >= PAGE_SIZE)
4128 folio_clear_uptodate(folio);
4129 else
4130 btrfs_subpage_clear_uptodate(fs_info, folio,
4131 eb->start, eb->len);
4132 }
4133}
4134
4135void set_extent_buffer_uptodate(struct extent_buffer *eb)
4136{
4137 struct btrfs_fs_info *fs_info = eb->fs_info;
4138 int num_folios = num_extent_folios(eb);
4139
4140 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4141 for (int i = 0; i < num_folios; i++) {
4142 struct folio *folio = eb->folios[i];
4143
4144 /*
4145 * This is special handling for metadata subpage, as regular
4146 * btrfs_is_subpage() can not handle cloned/dummy metadata.
4147 */
4148 if (fs_info->nodesize >= PAGE_SIZE)
4149 folio_mark_uptodate(folio);
4150 else
4151 btrfs_subpage_set_uptodate(fs_info, folio,
4152 eb->start, eb->len);
4153 }
4154}
4155
4156static void end_bbio_meta_read(struct btrfs_bio *bbio)
4157{
4158 struct extent_buffer *eb = bbio->private;
4159 struct btrfs_fs_info *fs_info = eb->fs_info;
4160 bool uptodate = !bbio->bio.bi_status;
4161 struct folio_iter fi;
4162 u32 bio_offset = 0;
4163
4164 eb->read_mirror = bbio->mirror_num;
4165
4166 if (uptodate &&
4167 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
4168 uptodate = false;
4169
4170 if (uptodate) {
4171 set_extent_buffer_uptodate(eb);
4172 } else {
4173 clear_extent_buffer_uptodate(eb);
4174 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4175 }
4176
4177 bio_for_each_folio_all(fi, &bbio->bio) {
4178 struct folio *folio = fi.folio;
4179 u64 start = eb->start + bio_offset;
4180 u32 len = fi.length;
4181
4182 if (uptodate)
4183 btrfs_folio_set_uptodate(fs_info, folio, start, len);
4184 else
4185 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4186
4187 bio_offset += len;
4188 }
4189
4190 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
4191 smp_mb__after_atomic();
4192 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
4193 free_extent_buffer(eb);
4194
4195 bio_put(&bbio->bio);
4196}
4197
4198int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
4199 struct btrfs_tree_parent_check *check)
4200{
4201 struct btrfs_bio *bbio;
4202 bool ret;
4203
4204 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4205 return 0;
4206
4207 /*
4208 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
4209 * operation, which could potentially still be in flight. In this case
4210 * we simply want to return an error.
4211 */
4212 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
4213 return -EIO;
4214
4215 /* Someone else is already reading the buffer, just wait for it. */
4216 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
4217 goto done;
4218
4219 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
4220 eb->read_mirror = 0;
4221 check_buffer_tree_ref(eb);
4222 atomic_inc(&eb->refs);
4223
4224 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
4225 REQ_OP_READ | REQ_META, eb->fs_info,
4226 end_bbio_meta_read, eb);
4227 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
4228 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
4229 bbio->file_offset = eb->start;
4230 memcpy(&bbio->parent_check, check, sizeof(*check));
4231 if (eb->fs_info->nodesize < PAGE_SIZE) {
4232 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
4233 eb->start - folio_pos(eb->folios[0]));
4234 ASSERT(ret);
4235 } else {
4236 int num_folios = num_extent_folios(eb);
4237
4238 for (int i = 0; i < num_folios; i++) {
4239 struct folio *folio = eb->folios[i];
4240
4241 ret = bio_add_folio(&bbio->bio, folio, folio_size(folio), 0);
4242 ASSERT(ret);
4243 }
4244 }
4245 btrfs_submit_bio(bbio, mirror_num);
4246
4247done:
4248 if (wait == WAIT_COMPLETE) {
4249 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
4250 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4251 return -EIO;
4252 }
4253
4254 return 0;
4255}
4256
4257static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
4258 unsigned long len)
4259{
4260 btrfs_warn(eb->fs_info,
4261 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
4262 eb->start, eb->len, start, len);
4263 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4264
4265 return true;
4266}
4267
4268/*
4269 * Check if the [start, start + len) range is valid before reading/writing
4270 * the eb.
4271 * NOTE: @start and @len are offset inside the eb, not logical address.
4272 *
4273 * Caller should not touch the dst/src memory if this function returns error.
4274 */
4275static inline int check_eb_range(const struct extent_buffer *eb,
4276 unsigned long start, unsigned long len)
4277{
4278 unsigned long offset;
4279
4280 /* start, start + len should not go beyond eb->len nor overflow */
4281 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
4282 return report_eb_range(eb, start, len);
4283
4284 return false;
4285}
4286
4287void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
4288 unsigned long start, unsigned long len)
4289{
4290 const int unit_size = folio_size(eb->folios[0]);
4291 size_t cur;
4292 size_t offset;
4293 char *dst = (char *)dstv;
4294 unsigned long i = get_eb_folio_index(eb, start);
4295
4296 if (check_eb_range(eb, start, len)) {
4297 /*
4298 * Invalid range hit, reset the memory, so callers won't get
4299 * some random garbage for their uninitialized memory.
4300 */
4301 memset(dstv, 0, len);
4302 return;
4303 }
4304
4305 if (eb->addr) {
4306 memcpy(dstv, eb->addr + start, len);
4307 return;
4308 }
4309
4310 offset = get_eb_offset_in_folio(eb, start);
4311
4312 while (len > 0) {
4313 char *kaddr;
4314
4315 cur = min(len, unit_size - offset);
4316 kaddr = folio_address(eb->folios[i]);
4317 memcpy(dst, kaddr + offset, cur);
4318
4319 dst += cur;
4320 len -= cur;
4321 offset = 0;
4322 i++;
4323 }
4324}
4325
4326int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
4327 void __user *dstv,
4328 unsigned long start, unsigned long len)
4329{
4330 const int unit_size = folio_size(eb->folios[0]);
4331 size_t cur;
4332 size_t offset;
4333 char __user *dst = (char __user *)dstv;
4334 unsigned long i = get_eb_folio_index(eb, start);
4335 int ret = 0;
4336
4337 WARN_ON(start > eb->len);
4338 WARN_ON(start + len > eb->start + eb->len);
4339
4340 if (eb->addr) {
4341 if (copy_to_user_nofault(dstv, eb->addr + start, len))
4342 ret = -EFAULT;
4343 return ret;
4344 }
4345
4346 offset = get_eb_offset_in_folio(eb, start);
4347
4348 while (len > 0) {
4349 char *kaddr;
4350
4351 cur = min(len, unit_size - offset);
4352 kaddr = folio_address(eb->folios[i]);
4353 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
4354 ret = -EFAULT;
4355 break;
4356 }
4357
4358 dst += cur;
4359 len -= cur;
4360 offset = 0;
4361 i++;
4362 }
4363
4364 return ret;
4365}
4366
4367int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
4368 unsigned long start, unsigned long len)
4369{
4370 const int unit_size = folio_size(eb->folios[0]);
4371 size_t cur;
4372 size_t offset;
4373 char *kaddr;
4374 char *ptr = (char *)ptrv;
4375 unsigned long i = get_eb_folio_index(eb, start);
4376 int ret = 0;
4377
4378 if (check_eb_range(eb, start, len))
4379 return -EINVAL;
4380
4381 if (eb->addr)
4382 return memcmp(ptrv, eb->addr + start, len);
4383
4384 offset = get_eb_offset_in_folio(eb, start);
4385
4386 while (len > 0) {
4387 cur = min(len, unit_size - offset);
4388 kaddr = folio_address(eb->folios[i]);
4389 ret = memcmp(ptr, kaddr + offset, cur);
4390 if (ret)
4391 break;
4392
4393 ptr += cur;
4394 len -= cur;
4395 offset = 0;
4396 i++;
4397 }
4398 return ret;
4399}
4400
4401/*
4402 * Check that the extent buffer is uptodate.
4403 *
4404 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
4405 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4406 */
4407static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
4408{
4409 struct btrfs_fs_info *fs_info = eb->fs_info;
4410 struct folio *folio = eb->folios[i];
4411
4412 ASSERT(folio);
4413
4414 /*
4415 * If we are using the commit root we could potentially clear a page
4416 * Uptodate while we're using the extent buffer that we've previously
4417 * looked up. We don't want to complain in this case, as the page was
4418 * valid before, we just didn't write it out. Instead we want to catch
4419 * the case where we didn't actually read the block properly, which
4420 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
4421 */
4422 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
4423 return;
4424
4425 if (fs_info->nodesize < PAGE_SIZE) {
4426 struct folio *folio = eb->folios[0];
4427
4428 ASSERT(i == 0);
4429 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4430 eb->start, eb->len)))
4431 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4432 } else {
4433 WARN_ON(!folio_test_uptodate(folio));
4434 }
4435}
4436
4437static void __write_extent_buffer(const struct extent_buffer *eb,
4438 const void *srcv, unsigned long start,
4439 unsigned long len, bool use_memmove)
4440{
4441 const int unit_size = folio_size(eb->folios[0]);
4442 size_t cur;
4443 size_t offset;
4444 char *kaddr;
4445 char *src = (char *)srcv;
4446 unsigned long i = get_eb_folio_index(eb, start);
4447 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
4448 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4449
4450 if (check_eb_range(eb, start, len))
4451 return;
4452
4453 if (eb->addr) {
4454 if (use_memmove)
4455 memmove(eb->addr + start, srcv, len);
4456 else
4457 memcpy(eb->addr + start, srcv, len);
4458 return;
4459 }
4460
4461 offset = get_eb_offset_in_folio(eb, start);
4462
4463 while (len > 0) {
4464 if (check_uptodate)
4465 assert_eb_folio_uptodate(eb, i);
4466
4467 cur = min(len, unit_size - offset);
4468 kaddr = folio_address(eb->folios[i]);
4469 if (use_memmove)
4470 memmove(kaddr + offset, src, cur);
4471 else
4472 memcpy(kaddr + offset, src, cur);
4473
4474 src += cur;
4475 len -= cur;
4476 offset = 0;
4477 i++;
4478 }
4479}
4480
4481void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
4482 unsigned long start, unsigned long len)
4483{
4484 return __write_extent_buffer(eb, srcv, start, len, false);
4485}
4486
4487static void memset_extent_buffer(const struct extent_buffer *eb, int c,
4488 unsigned long start, unsigned long len)
4489{
4490 const int unit_size = folio_size(eb->folios[0]);
4491 unsigned long cur = start;
4492
4493 if (eb->addr) {
4494 memset(eb->addr + start, c, len);
4495 return;
4496 }
4497
4498 while (cur < start + len) {
4499 unsigned long index = get_eb_folio_index(eb, cur);
4500 unsigned int offset = get_eb_offset_in_folio(eb, cur);
4501 unsigned int cur_len = min(start + len - cur, unit_size - offset);
4502
4503 assert_eb_folio_uptodate(eb, index);
4504 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
4505
4506 cur += cur_len;
4507 }
4508}
4509
4510void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
4511 unsigned long len)
4512{
4513 if (check_eb_range(eb, start, len))
4514 return;
4515 return memset_extent_buffer(eb, 0, start, len);
4516}
4517
4518void copy_extent_buffer_full(const struct extent_buffer *dst,
4519 const struct extent_buffer *src)
4520{
4521 const int unit_size = folio_size(src->folios[0]);
4522 unsigned long cur = 0;
4523
4524 ASSERT(dst->len == src->len);
4525
4526 while (cur < src->len) {
4527 unsigned long index = get_eb_folio_index(src, cur);
4528 unsigned long offset = get_eb_offset_in_folio(src, cur);
4529 unsigned long cur_len = min(src->len, unit_size - offset);
4530 void *addr = folio_address(src->folios[index]) + offset;
4531
4532 write_extent_buffer(dst, addr, cur, cur_len);
4533
4534 cur += cur_len;
4535 }
4536}
4537
4538void copy_extent_buffer(const struct extent_buffer *dst,
4539 const struct extent_buffer *src,
4540 unsigned long dst_offset, unsigned long src_offset,
4541 unsigned long len)
4542{
4543 const int unit_size = folio_size(dst->folios[0]);
4544 u64 dst_len = dst->len;
4545 size_t cur;
4546 size_t offset;
4547 char *kaddr;
4548 unsigned long i = get_eb_folio_index(dst, dst_offset);
4549
4550 if (check_eb_range(dst, dst_offset, len) ||
4551 check_eb_range(src, src_offset, len))
4552 return;
4553
4554 WARN_ON(src->len != dst_len);
4555
4556 offset = get_eb_offset_in_folio(dst, dst_offset);
4557
4558 while (len > 0) {
4559 assert_eb_folio_uptodate(dst, i);
4560
4561 cur = min(len, (unsigned long)(unit_size - offset));
4562
4563 kaddr = folio_address(dst->folios[i]);
4564 read_extent_buffer(src, kaddr + offset, src_offset, cur);
4565
4566 src_offset += cur;
4567 len -= cur;
4568 offset = 0;
4569 i++;
4570 }
4571}
4572
4573/*
4574 * Calculate the folio and offset of the byte containing the given bit number.
4575 *
4576 * @eb: the extent buffer
4577 * @start: offset of the bitmap item in the extent buffer
4578 * @nr: bit number
4579 * @folio_index: return index of the folio in the extent buffer that contains
4580 * the given bit number
4581 * @folio_offset: return offset into the folio given by folio_index
4582 *
4583 * This helper hides the ugliness of finding the byte in an extent buffer which
4584 * contains a given bit.
4585 */
4586static inline void eb_bitmap_offset(const struct extent_buffer *eb,
4587 unsigned long start, unsigned long nr,
4588 unsigned long *folio_index,
4589 size_t *folio_offset)
4590{
4591 size_t byte_offset = BIT_BYTE(nr);
4592 size_t offset;
4593
4594 /*
4595 * The byte we want is the offset of the extent buffer + the offset of
4596 * the bitmap item in the extent buffer + the offset of the byte in the
4597 * bitmap item.
4598 */
4599 offset = start + offset_in_folio(eb->folios[0], eb->start) + byte_offset;
4600
4601 *folio_index = offset >> folio_shift(eb->folios[0]);
4602 *folio_offset = offset_in_folio(eb->folios[0], offset);
4603}
4604
4605/*
4606 * Determine whether a bit in a bitmap item is set.
4607 *
4608 * @eb: the extent buffer
4609 * @start: offset of the bitmap item in the extent buffer
4610 * @nr: bit number to test
4611 */
4612int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
4613 unsigned long nr)
4614{
4615 unsigned long i;
4616 size_t offset;
4617 u8 *kaddr;
4618
4619 eb_bitmap_offset(eb, start, nr, &i, &offset);
4620 assert_eb_folio_uptodate(eb, i);
4621 kaddr = folio_address(eb->folios[i]);
4622 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
4623}
4624
4625static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
4626{
4627 unsigned long index = get_eb_folio_index(eb, bytenr);
4628
4629 if (check_eb_range(eb, bytenr, 1))
4630 return NULL;
4631 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4632}
4633
4634/*
4635 * Set an area of a bitmap to 1.
4636 *
4637 * @eb: the extent buffer
4638 * @start: offset of the bitmap item in the extent buffer
4639 * @pos: bit number of the first bit
4640 * @len: number of bits to set
4641 */
4642void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4643 unsigned long pos, unsigned long len)
4644{
4645 unsigned int first_byte = start + BIT_BYTE(pos);
4646 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4647 const bool same_byte = (first_byte == last_byte);
4648 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4649 u8 *kaddr;
4650
4651 if (same_byte)
4652 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4653
4654 /* Handle the first byte. */
4655 kaddr = extent_buffer_get_byte(eb, first_byte);
4656 *kaddr |= mask;
4657 if (same_byte)
4658 return;
4659
4660 /* Handle the byte aligned part. */
4661 ASSERT(first_byte + 1 <= last_byte);
4662 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4663
4664 /* Handle the last byte. */
4665 kaddr = extent_buffer_get_byte(eb, last_byte);
4666 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4667}
4668
4669
4670/*
4671 * Clear an area of a bitmap.
4672 *
4673 * @eb: the extent buffer
4674 * @start: offset of the bitmap item in the extent buffer
4675 * @pos: bit number of the first bit
4676 * @len: number of bits to clear
4677 */
4678void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4679 unsigned long start, unsigned long pos,
4680 unsigned long len)
4681{
4682 unsigned int first_byte = start + BIT_BYTE(pos);
4683 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4684 const bool same_byte = (first_byte == last_byte);
4685 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4686 u8 *kaddr;
4687
4688 if (same_byte)
4689 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4690
4691 /* Handle the first byte. */
4692 kaddr = extent_buffer_get_byte(eb, first_byte);
4693 *kaddr &= ~mask;
4694 if (same_byte)
4695 return;
4696
4697 /* Handle the byte aligned part. */
4698 ASSERT(first_byte + 1 <= last_byte);
4699 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4700
4701 /* Handle the last byte. */
4702 kaddr = extent_buffer_get_byte(eb, last_byte);
4703 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4704}
4705
4706static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4707{
4708 unsigned long distance = (src > dst) ? src - dst : dst - src;
4709 return distance < len;
4710}
4711
4712void memcpy_extent_buffer(const struct extent_buffer *dst,
4713 unsigned long dst_offset, unsigned long src_offset,
4714 unsigned long len)
4715{
4716 const int unit_size = folio_size(dst->folios[0]);
4717 unsigned long cur_off = 0;
4718
4719 if (check_eb_range(dst, dst_offset, len) ||
4720 check_eb_range(dst, src_offset, len))
4721 return;
4722
4723 if (dst->addr) {
4724 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4725
4726 if (use_memmove)
4727 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4728 else
4729 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4730 return;
4731 }
4732
4733 while (cur_off < len) {
4734 unsigned long cur_src = cur_off + src_offset;
4735 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4736 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4737 unsigned long cur_len = min(src_offset + len - cur_src,
4738 unit_size - folio_off);
4739 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4740 const bool use_memmove = areas_overlap(src_offset + cur_off,
4741 dst_offset + cur_off, cur_len);
4742
4743 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4744 use_memmove);
4745 cur_off += cur_len;
4746 }
4747}
4748
4749void memmove_extent_buffer(const struct extent_buffer *dst,
4750 unsigned long dst_offset, unsigned long src_offset,
4751 unsigned long len)
4752{
4753 unsigned long dst_end = dst_offset + len - 1;
4754 unsigned long src_end = src_offset + len - 1;
4755
4756 if (check_eb_range(dst, dst_offset, len) ||
4757 check_eb_range(dst, src_offset, len))
4758 return;
4759
4760 if (dst_offset < src_offset) {
4761 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4762 return;
4763 }
4764
4765 if (dst->addr) {
4766 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4767 return;
4768 }
4769
4770 while (len > 0) {
4771 unsigned long src_i;
4772 size_t cur;
4773 size_t dst_off_in_folio;
4774 size_t src_off_in_folio;
4775 void *src_addr;
4776 bool use_memmove;
4777
4778 src_i = get_eb_folio_index(dst, src_end);
4779
4780 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4781 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4782
4783 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4784 cur = min(cur, dst_off_in_folio + 1);
4785
4786 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4787 cur + 1;
4788 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4789 cur);
4790
4791 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4792 use_memmove);
4793
4794 dst_end -= cur;
4795 src_end -= cur;
4796 len -= cur;
4797 }
4798}
4799
4800#define GANG_LOOKUP_SIZE 16
4801static struct extent_buffer *get_next_extent_buffer(
4802 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4803{
4804 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4805 struct extent_buffer *found = NULL;
4806 u64 page_start = page_offset(page);
4807 u64 cur = page_start;
4808
4809 ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4810 lockdep_assert_held(&fs_info->buffer_lock);
4811
4812 while (cur < page_start + PAGE_SIZE) {
4813 int ret;
4814 int i;
4815
4816 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4817 (void **)gang, cur >> fs_info->sectorsize_bits,
4818 min_t(unsigned int, GANG_LOOKUP_SIZE,
4819 PAGE_SIZE / fs_info->nodesize));
4820 if (ret == 0)
4821 goto out;
4822 for (i = 0; i < ret; i++) {
4823 /* Already beyond page end */
4824 if (gang[i]->start >= page_start + PAGE_SIZE)
4825 goto out;
4826 /* Found one */
4827 if (gang[i]->start >= bytenr) {
4828 found = gang[i];
4829 goto out;
4830 }
4831 }
4832 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4833 }
4834out:
4835 return found;
4836}
4837
4838static int try_release_subpage_extent_buffer(struct page *page)
4839{
4840 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
4841 u64 cur = page_offset(page);
4842 const u64 end = page_offset(page) + PAGE_SIZE;
4843 int ret;
4844
4845 while (cur < end) {
4846 struct extent_buffer *eb = NULL;
4847
4848 /*
4849 * Unlike try_release_extent_buffer() which uses folio private
4850 * to grab buffer, for subpage case we rely on radix tree, thus
4851 * we need to ensure radix tree consistency.
4852 *
4853 * We also want an atomic snapshot of the radix tree, thus go
4854 * with spinlock rather than RCU.
4855 */
4856 spin_lock(&fs_info->buffer_lock);
4857 eb = get_next_extent_buffer(fs_info, page, cur);
4858 if (!eb) {
4859 /* No more eb in the page range after or at cur */
4860 spin_unlock(&fs_info->buffer_lock);
4861 break;
4862 }
4863 cur = eb->start + eb->len;
4864
4865 /*
4866 * The same as try_release_extent_buffer(), to ensure the eb
4867 * won't disappear out from under us.
4868 */
4869 spin_lock(&eb->refs_lock);
4870 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4871 spin_unlock(&eb->refs_lock);
4872 spin_unlock(&fs_info->buffer_lock);
4873 break;
4874 }
4875 spin_unlock(&fs_info->buffer_lock);
4876
4877 /*
4878 * If tree ref isn't set then we know the ref on this eb is a
4879 * real ref, so just return, this eb will likely be freed soon
4880 * anyway.
4881 */
4882 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4883 spin_unlock(&eb->refs_lock);
4884 break;
4885 }
4886
4887 /*
4888 * Here we don't care about the return value, we will always
4889 * check the folio private at the end. And
4890 * release_extent_buffer() will release the refs_lock.
4891 */
4892 release_extent_buffer(eb);
4893 }
4894 /*
4895 * Finally to check if we have cleared folio private, as if we have
4896 * released all ebs in the page, the folio private should be cleared now.
4897 */
4898 spin_lock(&page->mapping->i_private_lock);
4899 if (!folio_test_private(page_folio(page)))
4900 ret = 1;
4901 else
4902 ret = 0;
4903 spin_unlock(&page->mapping->i_private_lock);
4904 return ret;
4905
4906}
4907
4908int try_release_extent_buffer(struct page *page)
4909{
4910 struct folio *folio = page_folio(page);
4911 struct extent_buffer *eb;
4912
4913 if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
4914 return try_release_subpage_extent_buffer(page);
4915
4916 /*
4917 * We need to make sure nobody is changing folio private, as we rely on
4918 * folio private as the pointer to extent buffer.
4919 */
4920 spin_lock(&page->mapping->i_private_lock);
4921 if (!folio_test_private(folio)) {
4922 spin_unlock(&page->mapping->i_private_lock);
4923 return 1;
4924 }
4925
4926 eb = folio_get_private(folio);
4927 BUG_ON(!eb);
4928
4929 /*
4930 * This is a little awful but should be ok, we need to make sure that
4931 * the eb doesn't disappear out from under us while we're looking at
4932 * this page.
4933 */
4934 spin_lock(&eb->refs_lock);
4935 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4936 spin_unlock(&eb->refs_lock);
4937 spin_unlock(&page->mapping->i_private_lock);
4938 return 0;
4939 }
4940 spin_unlock(&page->mapping->i_private_lock);
4941
4942 /*
4943 * If tree ref isn't set then we know the ref on this eb is a real ref,
4944 * so just return, this page will likely be freed soon anyway.
4945 */
4946 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4947 spin_unlock(&eb->refs_lock);
4948 return 0;
4949 }
4950
4951 return release_extent_buffer(eb);
4952}
4953
4954/*
4955 * Attempt to readahead a child block.
4956 *
4957 * @fs_info: the fs_info
4958 * @bytenr: bytenr to read
4959 * @owner_root: objectid of the root that owns this eb
4960 * @gen: generation for the uptodate check, can be 0
4961 * @level: level for the eb
4962 *
4963 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4964 * normal uptodate check of the eb, without checking the generation. If we have
4965 * to read the block we will not block on anything.
4966 */
4967void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4968 u64 bytenr, u64 owner_root, u64 gen, int level)
4969{
4970 struct btrfs_tree_parent_check check = {
4971 .has_first_key = 0,
4972 .level = level,
4973 .transid = gen
4974 };
4975 struct extent_buffer *eb;
4976 int ret;
4977
4978 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4979 if (IS_ERR(eb))
4980 return;
4981
4982 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4983 free_extent_buffer(eb);
4984 return;
4985 }
4986
4987 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4988 if (ret < 0)
4989 free_extent_buffer_stale(eb);
4990 else
4991 free_extent_buffer(eb);
4992}
4993
4994/*
4995 * Readahead a node's child block.
4996 *
4997 * @node: parent node we're reading from
4998 * @slot: slot in the parent node for the child we want to read
4999 *
5000 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
5001 * the slot in the node provided.
5002 */
5003void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
5004{
5005 btrfs_readahead_tree_block(node->fs_info,
5006 btrfs_node_blockptr(node, slot),
5007 btrfs_header_owner(node),
5008 btrfs_node_ptr_generation(node, slot),
5009 btrfs_header_level(node) - 1);
5010}
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
10#include <linux/writeback.h>
11#include <linux/pagevec.h>
12#include <linux/prefetch.h>
13#include <linux/cleancache.h>
14#include "extent_io.h"
15#include "extent_map.h"
16#include "ctree.h"
17#include "btrfs_inode.h"
18#include "volumes.h"
19#include "check-integrity.h"
20#include "locking.h"
21#include "rcu-string.h"
22#include "backref.h"
23
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26static struct bio_set *btrfs_bioset;
27
28#ifdef CONFIG_BTRFS_DEBUG
29static LIST_HEAD(buffers);
30static LIST_HEAD(states);
31
32static DEFINE_SPINLOCK(leak_lock);
33
34static inline
35void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
36{
37 unsigned long flags;
38
39 spin_lock_irqsave(&leak_lock, flags);
40 list_add(new, head);
41 spin_unlock_irqrestore(&leak_lock, flags);
42}
43
44static inline
45void btrfs_leak_debug_del(struct list_head *entry)
46{
47 unsigned long flags;
48
49 spin_lock_irqsave(&leak_lock, flags);
50 list_del(entry);
51 spin_unlock_irqrestore(&leak_lock, flags);
52}
53
54static inline
55void btrfs_leak_debug_check(void)
56{
57 struct extent_state *state;
58 struct extent_buffer *eb;
59
60 while (!list_empty(&states)) {
61 state = list_entry(states.next, struct extent_state, leak_list);
62 printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
63 "state %lu in tree %p refs %d\n",
64 state->start, state->end, state->state, state->tree,
65 atomic_read(&state->refs));
66 list_del(&state->leak_list);
67 kmem_cache_free(extent_state_cache, state);
68 }
69
70 while (!list_empty(&buffers)) {
71 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
72 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
73 "refs %d\n",
74 eb->start, eb->len, atomic_read(&eb->refs));
75 list_del(&eb->leak_list);
76 kmem_cache_free(extent_buffer_cache, eb);
77 }
78}
79
80#define btrfs_debug_check_extent_io_range(tree, start, end) \
81 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
82static inline void __btrfs_debug_check_extent_io_range(const char *caller,
83 struct extent_io_tree *tree, u64 start, u64 end)
84{
85 struct inode *inode;
86 u64 isize;
87
88 if (!tree->mapping)
89 return;
90
91 inode = tree->mapping->host;
92 isize = i_size_read(inode);
93 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
94 printk_ratelimited(KERN_DEBUG
95 "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
96 caller, btrfs_ino(inode), isize, start, end);
97 }
98}
99#else
100#define btrfs_leak_debug_add(new, head) do {} while (0)
101#define btrfs_leak_debug_del(entry) do {} while (0)
102#define btrfs_leak_debug_check() do {} while (0)
103#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
104#endif
105
106#define BUFFER_LRU_MAX 64
107
108struct tree_entry {
109 u64 start;
110 u64 end;
111 struct rb_node rb_node;
112};
113
114struct extent_page_data {
115 struct bio *bio;
116 struct extent_io_tree *tree;
117 get_extent_t *get_extent;
118 unsigned long bio_flags;
119
120 /* tells writepage not to lock the state bits for this range
121 * it still does the unlocking
122 */
123 unsigned int extent_locked:1;
124
125 /* tells the submit_bio code to use a WRITE_SYNC */
126 unsigned int sync_io:1;
127};
128
129static noinline void flush_write_bio(void *data);
130static inline struct btrfs_fs_info *
131tree_fs_info(struct extent_io_tree *tree)
132{
133 if (!tree->mapping)
134 return NULL;
135 return btrfs_sb(tree->mapping->host->i_sb);
136}
137
138int __init extent_io_init(void)
139{
140 extent_state_cache = kmem_cache_create("btrfs_extent_state",
141 sizeof(struct extent_state), 0,
142 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
143 if (!extent_state_cache)
144 return -ENOMEM;
145
146 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
147 sizeof(struct extent_buffer), 0,
148 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
149 if (!extent_buffer_cache)
150 goto free_state_cache;
151
152 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
153 offsetof(struct btrfs_io_bio, bio));
154 if (!btrfs_bioset)
155 goto free_buffer_cache;
156
157 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
158 goto free_bioset;
159
160 return 0;
161
162free_bioset:
163 bioset_free(btrfs_bioset);
164 btrfs_bioset = NULL;
165
166free_buffer_cache:
167 kmem_cache_destroy(extent_buffer_cache);
168 extent_buffer_cache = NULL;
169
170free_state_cache:
171 kmem_cache_destroy(extent_state_cache);
172 extent_state_cache = NULL;
173 return -ENOMEM;
174}
175
176void extent_io_exit(void)
177{
178 btrfs_leak_debug_check();
179
180 /*
181 * Make sure all delayed rcu free are flushed before we
182 * destroy caches.
183 */
184 rcu_barrier();
185 if (extent_state_cache)
186 kmem_cache_destroy(extent_state_cache);
187 if (extent_buffer_cache)
188 kmem_cache_destroy(extent_buffer_cache);
189 if (btrfs_bioset)
190 bioset_free(btrfs_bioset);
191}
192
193void extent_io_tree_init(struct extent_io_tree *tree,
194 struct address_space *mapping)
195{
196 tree->state = RB_ROOT;
197 tree->ops = NULL;
198 tree->dirty_bytes = 0;
199 spin_lock_init(&tree->lock);
200 tree->mapping = mapping;
201}
202
203static struct extent_state *alloc_extent_state(gfp_t mask)
204{
205 struct extent_state *state;
206
207 state = kmem_cache_alloc(extent_state_cache, mask);
208 if (!state)
209 return state;
210 state->state = 0;
211 state->private = 0;
212 state->tree = NULL;
213 btrfs_leak_debug_add(&state->leak_list, &states);
214 atomic_set(&state->refs, 1);
215 init_waitqueue_head(&state->wq);
216 trace_alloc_extent_state(state, mask, _RET_IP_);
217 return state;
218}
219
220void free_extent_state(struct extent_state *state)
221{
222 if (!state)
223 return;
224 if (atomic_dec_and_test(&state->refs)) {
225 WARN_ON(state->tree);
226 btrfs_leak_debug_del(&state->leak_list);
227 trace_free_extent_state(state, _RET_IP_);
228 kmem_cache_free(extent_state_cache, state);
229 }
230}
231
232static struct rb_node *tree_insert(struct rb_root *root,
233 struct rb_node *search_start,
234 u64 offset,
235 struct rb_node *node,
236 struct rb_node ***p_in,
237 struct rb_node **parent_in)
238{
239 struct rb_node **p;
240 struct rb_node *parent = NULL;
241 struct tree_entry *entry;
242
243 if (p_in && parent_in) {
244 p = *p_in;
245 parent = *parent_in;
246 goto do_insert;
247 }
248
249 p = search_start ? &search_start : &root->rb_node;
250 while (*p) {
251 parent = *p;
252 entry = rb_entry(parent, struct tree_entry, rb_node);
253
254 if (offset < entry->start)
255 p = &(*p)->rb_left;
256 else if (offset > entry->end)
257 p = &(*p)->rb_right;
258 else
259 return parent;
260 }
261
262do_insert:
263 rb_link_node(node, parent, p);
264 rb_insert_color(node, root);
265 return NULL;
266}
267
268static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
269 struct rb_node **prev_ret,
270 struct rb_node **next_ret,
271 struct rb_node ***p_ret,
272 struct rb_node **parent_ret)
273{
274 struct rb_root *root = &tree->state;
275 struct rb_node **n = &root->rb_node;
276 struct rb_node *prev = NULL;
277 struct rb_node *orig_prev = NULL;
278 struct tree_entry *entry;
279 struct tree_entry *prev_entry = NULL;
280
281 while (*n) {
282 prev = *n;
283 entry = rb_entry(prev, struct tree_entry, rb_node);
284 prev_entry = entry;
285
286 if (offset < entry->start)
287 n = &(*n)->rb_left;
288 else if (offset > entry->end)
289 n = &(*n)->rb_right;
290 else
291 return *n;
292 }
293
294 if (p_ret)
295 *p_ret = n;
296 if (parent_ret)
297 *parent_ret = prev;
298
299 if (prev_ret) {
300 orig_prev = prev;
301 while (prev && offset > prev_entry->end) {
302 prev = rb_next(prev);
303 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
304 }
305 *prev_ret = prev;
306 prev = orig_prev;
307 }
308
309 if (next_ret) {
310 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
311 while (prev && offset < prev_entry->start) {
312 prev = rb_prev(prev);
313 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
314 }
315 *next_ret = prev;
316 }
317 return NULL;
318}
319
320static inline struct rb_node *
321tree_search_for_insert(struct extent_io_tree *tree,
322 u64 offset,
323 struct rb_node ***p_ret,
324 struct rb_node **parent_ret)
325{
326 struct rb_node *prev = NULL;
327 struct rb_node *ret;
328
329 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
330 if (!ret)
331 return prev;
332 return ret;
333}
334
335static inline struct rb_node *tree_search(struct extent_io_tree *tree,
336 u64 offset)
337{
338 return tree_search_for_insert(tree, offset, NULL, NULL);
339}
340
341static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
342 struct extent_state *other)
343{
344 if (tree->ops && tree->ops->merge_extent_hook)
345 tree->ops->merge_extent_hook(tree->mapping->host, new,
346 other);
347}
348
349/*
350 * utility function to look for merge candidates inside a given range.
351 * Any extents with matching state are merged together into a single
352 * extent in the tree. Extents with EXTENT_IO in their state field
353 * are not merged because the end_io handlers need to be able to do
354 * operations on them without sleeping (or doing allocations/splits).
355 *
356 * This should be called with the tree lock held.
357 */
358static void merge_state(struct extent_io_tree *tree,
359 struct extent_state *state)
360{
361 struct extent_state *other;
362 struct rb_node *other_node;
363
364 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
365 return;
366
367 other_node = rb_prev(&state->rb_node);
368 if (other_node) {
369 other = rb_entry(other_node, struct extent_state, rb_node);
370 if (other->end == state->start - 1 &&
371 other->state == state->state) {
372 merge_cb(tree, state, other);
373 state->start = other->start;
374 other->tree = NULL;
375 rb_erase(&other->rb_node, &tree->state);
376 free_extent_state(other);
377 }
378 }
379 other_node = rb_next(&state->rb_node);
380 if (other_node) {
381 other = rb_entry(other_node, struct extent_state, rb_node);
382 if (other->start == state->end + 1 &&
383 other->state == state->state) {
384 merge_cb(tree, state, other);
385 state->end = other->end;
386 other->tree = NULL;
387 rb_erase(&other->rb_node, &tree->state);
388 free_extent_state(other);
389 }
390 }
391}
392
393static void set_state_cb(struct extent_io_tree *tree,
394 struct extent_state *state, unsigned long *bits)
395{
396 if (tree->ops && tree->ops->set_bit_hook)
397 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
398}
399
400static void clear_state_cb(struct extent_io_tree *tree,
401 struct extent_state *state, unsigned long *bits)
402{
403 if (tree->ops && tree->ops->clear_bit_hook)
404 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
405}
406
407static void set_state_bits(struct extent_io_tree *tree,
408 struct extent_state *state, unsigned long *bits);
409
410/*
411 * insert an extent_state struct into the tree. 'bits' are set on the
412 * struct before it is inserted.
413 *
414 * This may return -EEXIST if the extent is already there, in which case the
415 * state struct is freed.
416 *
417 * The tree lock is not taken internally. This is a utility function and
418 * probably isn't what you want to call (see set/clear_extent_bit).
419 */
420static int insert_state(struct extent_io_tree *tree,
421 struct extent_state *state, u64 start, u64 end,
422 struct rb_node ***p,
423 struct rb_node **parent,
424 unsigned long *bits)
425{
426 struct rb_node *node;
427
428 if (end < start)
429 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
430 end, start);
431 state->start = start;
432 state->end = end;
433
434 set_state_bits(tree, state, bits);
435
436 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
437 if (node) {
438 struct extent_state *found;
439 found = rb_entry(node, struct extent_state, rb_node);
440 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
441 "%llu %llu\n",
442 found->start, found->end, start, end);
443 return -EEXIST;
444 }
445 state->tree = tree;
446 merge_state(tree, state);
447 return 0;
448}
449
450static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
451 u64 split)
452{
453 if (tree->ops && tree->ops->split_extent_hook)
454 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
455}
456
457/*
458 * split a given extent state struct in two, inserting the preallocated
459 * struct 'prealloc' as the newly created second half. 'split' indicates an
460 * offset inside 'orig' where it should be split.
461 *
462 * Before calling,
463 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
464 * are two extent state structs in the tree:
465 * prealloc: [orig->start, split - 1]
466 * orig: [ split, orig->end ]
467 *
468 * The tree locks are not taken by this function. They need to be held
469 * by the caller.
470 */
471static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
472 struct extent_state *prealloc, u64 split)
473{
474 struct rb_node *node;
475
476 split_cb(tree, orig, split);
477
478 prealloc->start = orig->start;
479 prealloc->end = split - 1;
480 prealloc->state = orig->state;
481 orig->start = split;
482
483 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
484 &prealloc->rb_node, NULL, NULL);
485 if (node) {
486 free_extent_state(prealloc);
487 return -EEXIST;
488 }
489 prealloc->tree = tree;
490 return 0;
491}
492
493static struct extent_state *next_state(struct extent_state *state)
494{
495 struct rb_node *next = rb_next(&state->rb_node);
496 if (next)
497 return rb_entry(next, struct extent_state, rb_node);
498 else
499 return NULL;
500}
501
502/*
503 * utility function to clear some bits in an extent state struct.
504 * it will optionally wake up any one waiting on this state (wake == 1).
505 *
506 * If no bits are set on the state struct after clearing things, the
507 * struct is freed and removed from the tree
508 */
509static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
510 struct extent_state *state,
511 unsigned long *bits, int wake)
512{
513 struct extent_state *next;
514 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
515
516 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
517 u64 range = state->end - state->start + 1;
518 WARN_ON(range > tree->dirty_bytes);
519 tree->dirty_bytes -= range;
520 }
521 clear_state_cb(tree, state, bits);
522 state->state &= ~bits_to_clear;
523 if (wake)
524 wake_up(&state->wq);
525 if (state->state == 0) {
526 next = next_state(state);
527 if (state->tree) {
528 rb_erase(&state->rb_node, &tree->state);
529 state->tree = NULL;
530 free_extent_state(state);
531 } else {
532 WARN_ON(1);
533 }
534 } else {
535 merge_state(tree, state);
536 next = next_state(state);
537 }
538 return next;
539}
540
541static struct extent_state *
542alloc_extent_state_atomic(struct extent_state *prealloc)
543{
544 if (!prealloc)
545 prealloc = alloc_extent_state(GFP_ATOMIC);
546
547 return prealloc;
548}
549
550static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
551{
552 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
553 "Extent tree was modified by another "
554 "thread while locked.");
555}
556
557/*
558 * clear some bits on a range in the tree. This may require splitting
559 * or inserting elements in the tree, so the gfp mask is used to
560 * indicate which allocations or sleeping are allowed.
561 *
562 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
563 * the given range from the tree regardless of state (ie for truncate).
564 *
565 * the range [start, end] is inclusive.
566 *
567 * This takes the tree lock, and returns 0 on success and < 0 on error.
568 */
569int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
570 unsigned long bits, int wake, int delete,
571 struct extent_state **cached_state,
572 gfp_t mask)
573{
574 struct extent_state *state;
575 struct extent_state *cached;
576 struct extent_state *prealloc = NULL;
577 struct rb_node *node;
578 u64 last_end;
579 int err;
580 int clear = 0;
581
582 btrfs_debug_check_extent_io_range(tree, start, end);
583
584 if (bits & EXTENT_DELALLOC)
585 bits |= EXTENT_NORESERVE;
586
587 if (delete)
588 bits |= ~EXTENT_CTLBITS;
589 bits |= EXTENT_FIRST_DELALLOC;
590
591 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
592 clear = 1;
593again:
594 if (!prealloc && (mask & __GFP_WAIT)) {
595 prealloc = alloc_extent_state(mask);
596 if (!prealloc)
597 return -ENOMEM;
598 }
599
600 spin_lock(&tree->lock);
601 if (cached_state) {
602 cached = *cached_state;
603
604 if (clear) {
605 *cached_state = NULL;
606 cached_state = NULL;
607 }
608
609 if (cached && cached->tree && cached->start <= start &&
610 cached->end > start) {
611 if (clear)
612 atomic_dec(&cached->refs);
613 state = cached;
614 goto hit_next;
615 }
616 if (clear)
617 free_extent_state(cached);
618 }
619 /*
620 * this search will find the extents that end after
621 * our range starts
622 */
623 node = tree_search(tree, start);
624 if (!node)
625 goto out;
626 state = rb_entry(node, struct extent_state, rb_node);
627hit_next:
628 if (state->start > end)
629 goto out;
630 WARN_ON(state->end < start);
631 last_end = state->end;
632
633 /* the state doesn't have the wanted bits, go ahead */
634 if (!(state->state & bits)) {
635 state = next_state(state);
636 goto next;
637 }
638
639 /*
640 * | ---- desired range ---- |
641 * | state | or
642 * | ------------- state -------------- |
643 *
644 * We need to split the extent we found, and may flip
645 * bits on second half.
646 *
647 * If the extent we found extends past our range, we
648 * just split and search again. It'll get split again
649 * the next time though.
650 *
651 * If the extent we found is inside our range, we clear
652 * the desired bit on it.
653 */
654
655 if (state->start < start) {
656 prealloc = alloc_extent_state_atomic(prealloc);
657 BUG_ON(!prealloc);
658 err = split_state(tree, state, prealloc, start);
659 if (err)
660 extent_io_tree_panic(tree, err);
661
662 prealloc = NULL;
663 if (err)
664 goto out;
665 if (state->end <= end) {
666 state = clear_state_bit(tree, state, &bits, wake);
667 goto next;
668 }
669 goto search_again;
670 }
671 /*
672 * | ---- desired range ---- |
673 * | state |
674 * We need to split the extent, and clear the bit
675 * on the first half
676 */
677 if (state->start <= end && state->end > end) {
678 prealloc = alloc_extent_state_atomic(prealloc);
679 BUG_ON(!prealloc);
680 err = split_state(tree, state, prealloc, end + 1);
681 if (err)
682 extent_io_tree_panic(tree, err);
683
684 if (wake)
685 wake_up(&state->wq);
686
687 clear_state_bit(tree, prealloc, &bits, wake);
688
689 prealloc = NULL;
690 goto out;
691 }
692
693 state = clear_state_bit(tree, state, &bits, wake);
694next:
695 if (last_end == (u64)-1)
696 goto out;
697 start = last_end + 1;
698 if (start <= end && state && !need_resched())
699 goto hit_next;
700 goto search_again;
701
702out:
703 spin_unlock(&tree->lock);
704 if (prealloc)
705 free_extent_state(prealloc);
706
707 return 0;
708
709search_again:
710 if (start > end)
711 goto out;
712 spin_unlock(&tree->lock);
713 if (mask & __GFP_WAIT)
714 cond_resched();
715 goto again;
716}
717
718static void wait_on_state(struct extent_io_tree *tree,
719 struct extent_state *state)
720 __releases(tree->lock)
721 __acquires(tree->lock)
722{
723 DEFINE_WAIT(wait);
724 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
725 spin_unlock(&tree->lock);
726 schedule();
727 spin_lock(&tree->lock);
728 finish_wait(&state->wq, &wait);
729}
730
731/*
732 * waits for one or more bits to clear on a range in the state tree.
733 * The range [start, end] is inclusive.
734 * The tree lock is taken by this function
735 */
736static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
737 unsigned long bits)
738{
739 struct extent_state *state;
740 struct rb_node *node;
741
742 btrfs_debug_check_extent_io_range(tree, start, end);
743
744 spin_lock(&tree->lock);
745again:
746 while (1) {
747 /*
748 * this search will find all the extents that end after
749 * our range starts
750 */
751 node = tree_search(tree, start);
752process_node:
753 if (!node)
754 break;
755
756 state = rb_entry(node, struct extent_state, rb_node);
757
758 if (state->start > end)
759 goto out;
760
761 if (state->state & bits) {
762 start = state->start;
763 atomic_inc(&state->refs);
764 wait_on_state(tree, state);
765 free_extent_state(state);
766 goto again;
767 }
768 start = state->end + 1;
769
770 if (start > end)
771 break;
772
773 if (!cond_resched_lock(&tree->lock)) {
774 node = rb_next(node);
775 goto process_node;
776 }
777 }
778out:
779 spin_unlock(&tree->lock);
780}
781
782static void set_state_bits(struct extent_io_tree *tree,
783 struct extent_state *state,
784 unsigned long *bits)
785{
786 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
787
788 set_state_cb(tree, state, bits);
789 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
790 u64 range = state->end - state->start + 1;
791 tree->dirty_bytes += range;
792 }
793 state->state |= bits_to_set;
794}
795
796static void cache_state(struct extent_state *state,
797 struct extent_state **cached_ptr)
798{
799 if (cached_ptr && !(*cached_ptr)) {
800 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
801 *cached_ptr = state;
802 atomic_inc(&state->refs);
803 }
804 }
805}
806
807/*
808 * set some bits on a range in the tree. This may require allocations or
809 * sleeping, so the gfp mask is used to indicate what is allowed.
810 *
811 * If any of the exclusive bits are set, this will fail with -EEXIST if some
812 * part of the range already has the desired bits set. The start of the
813 * existing range is returned in failed_start in this case.
814 *
815 * [start, end] is inclusive This takes the tree lock.
816 */
817
818static int __must_check
819__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
820 unsigned long bits, unsigned long exclusive_bits,
821 u64 *failed_start, struct extent_state **cached_state,
822 gfp_t mask)
823{
824 struct extent_state *state;
825 struct extent_state *prealloc = NULL;
826 struct rb_node *node;
827 struct rb_node **p;
828 struct rb_node *parent;
829 int err = 0;
830 u64 last_start;
831 u64 last_end;
832
833 btrfs_debug_check_extent_io_range(tree, start, end);
834
835 bits |= EXTENT_FIRST_DELALLOC;
836again:
837 if (!prealloc && (mask & __GFP_WAIT)) {
838 prealloc = alloc_extent_state(mask);
839 BUG_ON(!prealloc);
840 }
841
842 spin_lock(&tree->lock);
843 if (cached_state && *cached_state) {
844 state = *cached_state;
845 if (state->start <= start && state->end > start &&
846 state->tree) {
847 node = &state->rb_node;
848 goto hit_next;
849 }
850 }
851 /*
852 * this search will find all the extents that end after
853 * our range starts.
854 */
855 node = tree_search_for_insert(tree, start, &p, &parent);
856 if (!node) {
857 prealloc = alloc_extent_state_atomic(prealloc);
858 BUG_ON(!prealloc);
859 err = insert_state(tree, prealloc, start, end,
860 &p, &parent, &bits);
861 if (err)
862 extent_io_tree_panic(tree, err);
863
864 cache_state(prealloc, cached_state);
865 prealloc = NULL;
866 goto out;
867 }
868 state = rb_entry(node, struct extent_state, rb_node);
869hit_next:
870 last_start = state->start;
871 last_end = state->end;
872
873 /*
874 * | ---- desired range ---- |
875 * | state |
876 *
877 * Just lock what we found and keep going
878 */
879 if (state->start == start && state->end <= end) {
880 if (state->state & exclusive_bits) {
881 *failed_start = state->start;
882 err = -EEXIST;
883 goto out;
884 }
885
886 set_state_bits(tree, state, &bits);
887 cache_state(state, cached_state);
888 merge_state(tree, state);
889 if (last_end == (u64)-1)
890 goto out;
891 start = last_end + 1;
892 state = next_state(state);
893 if (start < end && state && state->start == start &&
894 !need_resched())
895 goto hit_next;
896 goto search_again;
897 }
898
899 /*
900 * | ---- desired range ---- |
901 * | state |
902 * or
903 * | ------------- state -------------- |
904 *
905 * We need to split the extent we found, and may flip bits on
906 * second half.
907 *
908 * If the extent we found extends past our
909 * range, we just split and search again. It'll get split
910 * again the next time though.
911 *
912 * If the extent we found is inside our range, we set the
913 * desired bit on it.
914 */
915 if (state->start < start) {
916 if (state->state & exclusive_bits) {
917 *failed_start = start;
918 err = -EEXIST;
919 goto out;
920 }
921
922 prealloc = alloc_extent_state_atomic(prealloc);
923 BUG_ON(!prealloc);
924 err = split_state(tree, state, prealloc, start);
925 if (err)
926 extent_io_tree_panic(tree, err);
927
928 prealloc = NULL;
929 if (err)
930 goto out;
931 if (state->end <= end) {
932 set_state_bits(tree, state, &bits);
933 cache_state(state, cached_state);
934 merge_state(tree, state);
935 if (last_end == (u64)-1)
936 goto out;
937 start = last_end + 1;
938 state = next_state(state);
939 if (start < end && state && state->start == start &&
940 !need_resched())
941 goto hit_next;
942 }
943 goto search_again;
944 }
945 /*
946 * | ---- desired range ---- |
947 * | state | or | state |
948 *
949 * There's a hole, we need to insert something in it and
950 * ignore the extent we found.
951 */
952 if (state->start > start) {
953 u64 this_end;
954 if (end < last_start)
955 this_end = end;
956 else
957 this_end = last_start - 1;
958
959 prealloc = alloc_extent_state_atomic(prealloc);
960 BUG_ON(!prealloc);
961
962 /*
963 * Avoid to free 'prealloc' if it can be merged with
964 * the later extent.
965 */
966 err = insert_state(tree, prealloc, start, this_end,
967 NULL, NULL, &bits);
968 if (err)
969 extent_io_tree_panic(tree, err);
970
971 cache_state(prealloc, cached_state);
972 prealloc = NULL;
973 start = this_end + 1;
974 goto search_again;
975 }
976 /*
977 * | ---- desired range ---- |
978 * | state |
979 * We need to split the extent, and set the bit
980 * on the first half
981 */
982 if (state->start <= end && state->end > end) {
983 if (state->state & exclusive_bits) {
984 *failed_start = start;
985 err = -EEXIST;
986 goto out;
987 }
988
989 prealloc = alloc_extent_state_atomic(prealloc);
990 BUG_ON(!prealloc);
991 err = split_state(tree, state, prealloc, end + 1);
992 if (err)
993 extent_io_tree_panic(tree, err);
994
995 set_state_bits(tree, prealloc, &bits);
996 cache_state(prealloc, cached_state);
997 merge_state(tree, prealloc);
998 prealloc = NULL;
999 goto out;
1000 }
1001
1002 goto search_again;
1003
1004out:
1005 spin_unlock(&tree->lock);
1006 if (prealloc)
1007 free_extent_state(prealloc);
1008
1009 return err;
1010
1011search_again:
1012 if (start > end)
1013 goto out;
1014 spin_unlock(&tree->lock);
1015 if (mask & __GFP_WAIT)
1016 cond_resched();
1017 goto again;
1018}
1019
1020int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1021 unsigned long bits, u64 * failed_start,
1022 struct extent_state **cached_state, gfp_t mask)
1023{
1024 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1025 cached_state, mask);
1026}
1027
1028
1029/**
1030 * convert_extent_bit - convert all bits in a given range from one bit to
1031 * another
1032 * @tree: the io tree to search
1033 * @start: the start offset in bytes
1034 * @end: the end offset in bytes (inclusive)
1035 * @bits: the bits to set in this range
1036 * @clear_bits: the bits to clear in this range
1037 * @cached_state: state that we're going to cache
1038 * @mask: the allocation mask
1039 *
1040 * This will go through and set bits for the given range. If any states exist
1041 * already in this range they are set with the given bit and cleared of the
1042 * clear_bits. This is only meant to be used by things that are mergeable, ie
1043 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1044 * boundary bits like LOCK.
1045 */
1046int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1047 unsigned long bits, unsigned long clear_bits,
1048 struct extent_state **cached_state, gfp_t mask)
1049{
1050 struct extent_state *state;
1051 struct extent_state *prealloc = NULL;
1052 struct rb_node *node;
1053 struct rb_node **p;
1054 struct rb_node *parent;
1055 int err = 0;
1056 u64 last_start;
1057 u64 last_end;
1058
1059 btrfs_debug_check_extent_io_range(tree, start, end);
1060
1061again:
1062 if (!prealloc && (mask & __GFP_WAIT)) {
1063 prealloc = alloc_extent_state(mask);
1064 if (!prealloc)
1065 return -ENOMEM;
1066 }
1067
1068 spin_lock(&tree->lock);
1069 if (cached_state && *cached_state) {
1070 state = *cached_state;
1071 if (state->start <= start && state->end > start &&
1072 state->tree) {
1073 node = &state->rb_node;
1074 goto hit_next;
1075 }
1076 }
1077
1078 /*
1079 * this search will find all the extents that end after
1080 * our range starts.
1081 */
1082 node = tree_search_for_insert(tree, start, &p, &parent);
1083 if (!node) {
1084 prealloc = alloc_extent_state_atomic(prealloc);
1085 if (!prealloc) {
1086 err = -ENOMEM;
1087 goto out;
1088 }
1089 err = insert_state(tree, prealloc, start, end,
1090 &p, &parent, &bits);
1091 if (err)
1092 extent_io_tree_panic(tree, err);
1093 cache_state(prealloc, cached_state);
1094 prealloc = NULL;
1095 goto out;
1096 }
1097 state = rb_entry(node, struct extent_state, rb_node);
1098hit_next:
1099 last_start = state->start;
1100 last_end = state->end;
1101
1102 /*
1103 * | ---- desired range ---- |
1104 * | state |
1105 *
1106 * Just lock what we found and keep going
1107 */
1108 if (state->start == start && state->end <= end) {
1109 set_state_bits(tree, state, &bits);
1110 cache_state(state, cached_state);
1111 state = clear_state_bit(tree, state, &clear_bits, 0);
1112 if (last_end == (u64)-1)
1113 goto out;
1114 start = last_end + 1;
1115 if (start < end && state && state->start == start &&
1116 !need_resched())
1117 goto hit_next;
1118 goto search_again;
1119 }
1120
1121 /*
1122 * | ---- desired range ---- |
1123 * | state |
1124 * or
1125 * | ------------- state -------------- |
1126 *
1127 * We need to split the extent we found, and may flip bits on
1128 * second half.
1129 *
1130 * If the extent we found extends past our
1131 * range, we just split and search again. It'll get split
1132 * again the next time though.
1133 *
1134 * If the extent we found is inside our range, we set the
1135 * desired bit on it.
1136 */
1137 if (state->start < start) {
1138 prealloc = alloc_extent_state_atomic(prealloc);
1139 if (!prealloc) {
1140 err = -ENOMEM;
1141 goto out;
1142 }
1143 err = split_state(tree, state, prealloc, start);
1144 if (err)
1145 extent_io_tree_panic(tree, err);
1146 prealloc = NULL;
1147 if (err)
1148 goto out;
1149 if (state->end <= end) {
1150 set_state_bits(tree, state, &bits);
1151 cache_state(state, cached_state);
1152 state = clear_state_bit(tree, state, &clear_bits, 0);
1153 if (last_end == (u64)-1)
1154 goto out;
1155 start = last_end + 1;
1156 if (start < end && state && state->start == start &&
1157 !need_resched())
1158 goto hit_next;
1159 }
1160 goto search_again;
1161 }
1162 /*
1163 * | ---- desired range ---- |
1164 * | state | or | state |
1165 *
1166 * There's a hole, we need to insert something in it and
1167 * ignore the extent we found.
1168 */
1169 if (state->start > start) {
1170 u64 this_end;
1171 if (end < last_start)
1172 this_end = end;
1173 else
1174 this_end = last_start - 1;
1175
1176 prealloc = alloc_extent_state_atomic(prealloc);
1177 if (!prealloc) {
1178 err = -ENOMEM;
1179 goto out;
1180 }
1181
1182 /*
1183 * Avoid to free 'prealloc' if it can be merged with
1184 * the later extent.
1185 */
1186 err = insert_state(tree, prealloc, start, this_end,
1187 NULL, NULL, &bits);
1188 if (err)
1189 extent_io_tree_panic(tree, err);
1190 cache_state(prealloc, cached_state);
1191 prealloc = NULL;
1192 start = this_end + 1;
1193 goto search_again;
1194 }
1195 /*
1196 * | ---- desired range ---- |
1197 * | state |
1198 * We need to split the extent, and set the bit
1199 * on the first half
1200 */
1201 if (state->start <= end && state->end > end) {
1202 prealloc = alloc_extent_state_atomic(prealloc);
1203 if (!prealloc) {
1204 err = -ENOMEM;
1205 goto out;
1206 }
1207
1208 err = split_state(tree, state, prealloc, end + 1);
1209 if (err)
1210 extent_io_tree_panic(tree, err);
1211
1212 set_state_bits(tree, prealloc, &bits);
1213 cache_state(prealloc, cached_state);
1214 clear_state_bit(tree, prealloc, &clear_bits, 0);
1215 prealloc = NULL;
1216 goto out;
1217 }
1218
1219 goto search_again;
1220
1221out:
1222 spin_unlock(&tree->lock);
1223 if (prealloc)
1224 free_extent_state(prealloc);
1225
1226 return err;
1227
1228search_again:
1229 if (start > end)
1230 goto out;
1231 spin_unlock(&tree->lock);
1232 if (mask & __GFP_WAIT)
1233 cond_resched();
1234 goto again;
1235}
1236
1237/* wrappers around set/clear extent bit */
1238int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1239 gfp_t mask)
1240{
1241 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1242 NULL, mask);
1243}
1244
1245int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1246 unsigned long bits, gfp_t mask)
1247{
1248 return set_extent_bit(tree, start, end, bits, NULL,
1249 NULL, mask);
1250}
1251
1252int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1253 unsigned long bits, gfp_t mask)
1254{
1255 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1256}
1257
1258int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1259 struct extent_state **cached_state, gfp_t mask)
1260{
1261 return set_extent_bit(tree, start, end,
1262 EXTENT_DELALLOC | EXTENT_UPTODATE,
1263 NULL, cached_state, mask);
1264}
1265
1266int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1267 struct extent_state **cached_state, gfp_t mask)
1268{
1269 return set_extent_bit(tree, start, end,
1270 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1271 NULL, cached_state, mask);
1272}
1273
1274int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1275 gfp_t mask)
1276{
1277 return clear_extent_bit(tree, start, end,
1278 EXTENT_DIRTY | EXTENT_DELALLOC |
1279 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1280}
1281
1282int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1283 gfp_t mask)
1284{
1285 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1286 NULL, mask);
1287}
1288
1289int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1290 struct extent_state **cached_state, gfp_t mask)
1291{
1292 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1293 cached_state, mask);
1294}
1295
1296int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1297 struct extent_state **cached_state, gfp_t mask)
1298{
1299 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1300 cached_state, mask);
1301}
1302
1303/*
1304 * either insert or lock state struct between start and end use mask to tell
1305 * us if waiting is desired.
1306 */
1307int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1308 unsigned long bits, struct extent_state **cached_state)
1309{
1310 int err;
1311 u64 failed_start;
1312 while (1) {
1313 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1314 EXTENT_LOCKED, &failed_start,
1315 cached_state, GFP_NOFS);
1316 if (err == -EEXIST) {
1317 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1318 start = failed_start;
1319 } else
1320 break;
1321 WARN_ON(start > end);
1322 }
1323 return err;
1324}
1325
1326int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1327{
1328 return lock_extent_bits(tree, start, end, 0, NULL);
1329}
1330
1331int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1332{
1333 int err;
1334 u64 failed_start;
1335
1336 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1337 &failed_start, NULL, GFP_NOFS);
1338 if (err == -EEXIST) {
1339 if (failed_start > start)
1340 clear_extent_bit(tree, start, failed_start - 1,
1341 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1342 return 0;
1343 }
1344 return 1;
1345}
1346
1347int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1348 struct extent_state **cached, gfp_t mask)
1349{
1350 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1351 mask);
1352}
1353
1354int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1355{
1356 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1357 GFP_NOFS);
1358}
1359
1360int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1361{
1362 unsigned long index = start >> PAGE_CACHE_SHIFT;
1363 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1364 struct page *page;
1365
1366 while (index <= end_index) {
1367 page = find_get_page(inode->i_mapping, index);
1368 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1369 clear_page_dirty_for_io(page);
1370 page_cache_release(page);
1371 index++;
1372 }
1373 return 0;
1374}
1375
1376int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1377{
1378 unsigned long index = start >> PAGE_CACHE_SHIFT;
1379 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1380 struct page *page;
1381
1382 while (index <= end_index) {
1383 page = find_get_page(inode->i_mapping, index);
1384 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1385 account_page_redirty(page);
1386 __set_page_dirty_nobuffers(page);
1387 page_cache_release(page);
1388 index++;
1389 }
1390 return 0;
1391}
1392
1393/*
1394 * helper function to set both pages and extents in the tree writeback
1395 */
1396static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1397{
1398 unsigned long index = start >> PAGE_CACHE_SHIFT;
1399 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1400 struct page *page;
1401
1402 while (index <= end_index) {
1403 page = find_get_page(tree->mapping, index);
1404 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1405 set_page_writeback(page);
1406 page_cache_release(page);
1407 index++;
1408 }
1409 return 0;
1410}
1411
1412/* find the first state struct with 'bits' set after 'start', and
1413 * return it. tree->lock must be held. NULL will returned if
1414 * nothing was found after 'start'
1415 */
1416static struct extent_state *
1417find_first_extent_bit_state(struct extent_io_tree *tree,
1418 u64 start, unsigned long bits)
1419{
1420 struct rb_node *node;
1421 struct extent_state *state;
1422
1423 /*
1424 * this search will find all the extents that end after
1425 * our range starts.
1426 */
1427 node = tree_search(tree, start);
1428 if (!node)
1429 goto out;
1430
1431 while (1) {
1432 state = rb_entry(node, struct extent_state, rb_node);
1433 if (state->end >= start && (state->state & bits))
1434 return state;
1435
1436 node = rb_next(node);
1437 if (!node)
1438 break;
1439 }
1440out:
1441 return NULL;
1442}
1443
1444/*
1445 * find the first offset in the io tree with 'bits' set. zero is
1446 * returned if we find something, and *start_ret and *end_ret are
1447 * set to reflect the state struct that was found.
1448 *
1449 * If nothing was found, 1 is returned. If found something, return 0.
1450 */
1451int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1452 u64 *start_ret, u64 *end_ret, unsigned long bits,
1453 struct extent_state **cached_state)
1454{
1455 struct extent_state *state;
1456 struct rb_node *n;
1457 int ret = 1;
1458
1459 spin_lock(&tree->lock);
1460 if (cached_state && *cached_state) {
1461 state = *cached_state;
1462 if (state->end == start - 1 && state->tree) {
1463 n = rb_next(&state->rb_node);
1464 while (n) {
1465 state = rb_entry(n, struct extent_state,
1466 rb_node);
1467 if (state->state & bits)
1468 goto got_it;
1469 n = rb_next(n);
1470 }
1471 free_extent_state(*cached_state);
1472 *cached_state = NULL;
1473 goto out;
1474 }
1475 free_extent_state(*cached_state);
1476 *cached_state = NULL;
1477 }
1478
1479 state = find_first_extent_bit_state(tree, start, bits);
1480got_it:
1481 if (state) {
1482 cache_state(state, cached_state);
1483 *start_ret = state->start;
1484 *end_ret = state->end;
1485 ret = 0;
1486 }
1487out:
1488 spin_unlock(&tree->lock);
1489 return ret;
1490}
1491
1492/*
1493 * find a contiguous range of bytes in the file marked as delalloc, not
1494 * more than 'max_bytes'. start and end are used to return the range,
1495 *
1496 * 1 is returned if we find something, 0 if nothing was in the tree
1497 */
1498static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1499 u64 *start, u64 *end, u64 max_bytes,
1500 struct extent_state **cached_state)
1501{
1502 struct rb_node *node;
1503 struct extent_state *state;
1504 u64 cur_start = *start;
1505 u64 found = 0;
1506 u64 total_bytes = 0;
1507
1508 spin_lock(&tree->lock);
1509
1510 /*
1511 * this search will find all the extents that end after
1512 * our range starts.
1513 */
1514 node = tree_search(tree, cur_start);
1515 if (!node) {
1516 if (!found)
1517 *end = (u64)-1;
1518 goto out;
1519 }
1520
1521 while (1) {
1522 state = rb_entry(node, struct extent_state, rb_node);
1523 if (found && (state->start != cur_start ||
1524 (state->state & EXTENT_BOUNDARY))) {
1525 goto out;
1526 }
1527 if (!(state->state & EXTENT_DELALLOC)) {
1528 if (!found)
1529 *end = state->end;
1530 goto out;
1531 }
1532 if (!found) {
1533 *start = state->start;
1534 *cached_state = state;
1535 atomic_inc(&state->refs);
1536 }
1537 found++;
1538 *end = state->end;
1539 cur_start = state->end + 1;
1540 node = rb_next(node);
1541 total_bytes += state->end - state->start + 1;
1542 if (total_bytes >= max_bytes)
1543 break;
1544 if (!node)
1545 break;
1546 }
1547out:
1548 spin_unlock(&tree->lock);
1549 return found;
1550}
1551
1552static noinline void __unlock_for_delalloc(struct inode *inode,
1553 struct page *locked_page,
1554 u64 start, u64 end)
1555{
1556 int ret;
1557 struct page *pages[16];
1558 unsigned long index = start >> PAGE_CACHE_SHIFT;
1559 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1560 unsigned long nr_pages = end_index - index + 1;
1561 int i;
1562
1563 if (index == locked_page->index && end_index == index)
1564 return;
1565
1566 while (nr_pages > 0) {
1567 ret = find_get_pages_contig(inode->i_mapping, index,
1568 min_t(unsigned long, nr_pages,
1569 ARRAY_SIZE(pages)), pages);
1570 for (i = 0; i < ret; i++) {
1571 if (pages[i] != locked_page)
1572 unlock_page(pages[i]);
1573 page_cache_release(pages[i]);
1574 }
1575 nr_pages -= ret;
1576 index += ret;
1577 cond_resched();
1578 }
1579}
1580
1581static noinline int lock_delalloc_pages(struct inode *inode,
1582 struct page *locked_page,
1583 u64 delalloc_start,
1584 u64 delalloc_end)
1585{
1586 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1587 unsigned long start_index = index;
1588 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1589 unsigned long pages_locked = 0;
1590 struct page *pages[16];
1591 unsigned long nrpages;
1592 int ret;
1593 int i;
1594
1595 /* the caller is responsible for locking the start index */
1596 if (index == locked_page->index && index == end_index)
1597 return 0;
1598
1599 /* skip the page at the start index */
1600 nrpages = end_index - index + 1;
1601 while (nrpages > 0) {
1602 ret = find_get_pages_contig(inode->i_mapping, index,
1603 min_t(unsigned long,
1604 nrpages, ARRAY_SIZE(pages)), pages);
1605 if (ret == 0) {
1606 ret = -EAGAIN;
1607 goto done;
1608 }
1609 /* now we have an array of pages, lock them all */
1610 for (i = 0; i < ret; i++) {
1611 /*
1612 * the caller is taking responsibility for
1613 * locked_page
1614 */
1615 if (pages[i] != locked_page) {
1616 lock_page(pages[i]);
1617 if (!PageDirty(pages[i]) ||
1618 pages[i]->mapping != inode->i_mapping) {
1619 ret = -EAGAIN;
1620 unlock_page(pages[i]);
1621 page_cache_release(pages[i]);
1622 goto done;
1623 }
1624 }
1625 page_cache_release(pages[i]);
1626 pages_locked++;
1627 }
1628 nrpages -= ret;
1629 index += ret;
1630 cond_resched();
1631 }
1632 ret = 0;
1633done:
1634 if (ret && pages_locked) {
1635 __unlock_for_delalloc(inode, locked_page,
1636 delalloc_start,
1637 ((u64)(start_index + pages_locked - 1)) <<
1638 PAGE_CACHE_SHIFT);
1639 }
1640 return ret;
1641}
1642
1643/*
1644 * find a contiguous range of bytes in the file marked as delalloc, not
1645 * more than 'max_bytes'. start and end are used to return the range,
1646 *
1647 * 1 is returned if we find something, 0 if nothing was in the tree
1648 */
1649STATIC u64 find_lock_delalloc_range(struct inode *inode,
1650 struct extent_io_tree *tree,
1651 struct page *locked_page, u64 *start,
1652 u64 *end, u64 max_bytes)
1653{
1654 u64 delalloc_start;
1655 u64 delalloc_end;
1656 u64 found;
1657 struct extent_state *cached_state = NULL;
1658 int ret;
1659 int loops = 0;
1660
1661again:
1662 /* step one, find a bunch of delalloc bytes starting at start */
1663 delalloc_start = *start;
1664 delalloc_end = 0;
1665 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1666 max_bytes, &cached_state);
1667 if (!found || delalloc_end <= *start) {
1668 *start = delalloc_start;
1669 *end = delalloc_end;
1670 free_extent_state(cached_state);
1671 return 0;
1672 }
1673
1674 /*
1675 * start comes from the offset of locked_page. We have to lock
1676 * pages in order, so we can't process delalloc bytes before
1677 * locked_page
1678 */
1679 if (delalloc_start < *start)
1680 delalloc_start = *start;
1681
1682 /*
1683 * make sure to limit the number of pages we try to lock down
1684 */
1685 if (delalloc_end + 1 - delalloc_start > max_bytes)
1686 delalloc_end = delalloc_start + max_bytes - 1;
1687
1688 /* step two, lock all the pages after the page that has start */
1689 ret = lock_delalloc_pages(inode, locked_page,
1690 delalloc_start, delalloc_end);
1691 if (ret == -EAGAIN) {
1692 /* some of the pages are gone, lets avoid looping by
1693 * shortening the size of the delalloc range we're searching
1694 */
1695 free_extent_state(cached_state);
1696 if (!loops) {
1697 max_bytes = PAGE_CACHE_SIZE;
1698 loops = 1;
1699 goto again;
1700 } else {
1701 found = 0;
1702 goto out_failed;
1703 }
1704 }
1705 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1706
1707 /* step three, lock the state bits for the whole range */
1708 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1709
1710 /* then test to make sure it is all still delalloc */
1711 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1712 EXTENT_DELALLOC, 1, cached_state);
1713 if (!ret) {
1714 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1715 &cached_state, GFP_NOFS);
1716 __unlock_for_delalloc(inode, locked_page,
1717 delalloc_start, delalloc_end);
1718 cond_resched();
1719 goto again;
1720 }
1721 free_extent_state(cached_state);
1722 *start = delalloc_start;
1723 *end = delalloc_end;
1724out_failed:
1725 return found;
1726}
1727
1728int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1729 struct page *locked_page,
1730 unsigned long clear_bits,
1731 unsigned long page_ops)
1732{
1733 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1734 int ret;
1735 struct page *pages[16];
1736 unsigned long index = start >> PAGE_CACHE_SHIFT;
1737 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1738 unsigned long nr_pages = end_index - index + 1;
1739 int i;
1740
1741 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1742 if (page_ops == 0)
1743 return 0;
1744
1745 while (nr_pages > 0) {
1746 ret = find_get_pages_contig(inode->i_mapping, index,
1747 min_t(unsigned long,
1748 nr_pages, ARRAY_SIZE(pages)), pages);
1749 for (i = 0; i < ret; i++) {
1750
1751 if (page_ops & PAGE_SET_PRIVATE2)
1752 SetPagePrivate2(pages[i]);
1753
1754 if (pages[i] == locked_page) {
1755 page_cache_release(pages[i]);
1756 continue;
1757 }
1758 if (page_ops & PAGE_CLEAR_DIRTY)
1759 clear_page_dirty_for_io(pages[i]);
1760 if (page_ops & PAGE_SET_WRITEBACK)
1761 set_page_writeback(pages[i]);
1762 if (page_ops & PAGE_END_WRITEBACK)
1763 end_page_writeback(pages[i]);
1764 if (page_ops & PAGE_UNLOCK)
1765 unlock_page(pages[i]);
1766 page_cache_release(pages[i]);
1767 }
1768 nr_pages -= ret;
1769 index += ret;
1770 cond_resched();
1771 }
1772 return 0;
1773}
1774
1775/*
1776 * count the number of bytes in the tree that have a given bit(s)
1777 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1778 * cached. The total number found is returned.
1779 */
1780u64 count_range_bits(struct extent_io_tree *tree,
1781 u64 *start, u64 search_end, u64 max_bytes,
1782 unsigned long bits, int contig)
1783{
1784 struct rb_node *node;
1785 struct extent_state *state;
1786 u64 cur_start = *start;
1787 u64 total_bytes = 0;
1788 u64 last = 0;
1789 int found = 0;
1790
1791 if (WARN_ON(search_end <= cur_start))
1792 return 0;
1793
1794 spin_lock(&tree->lock);
1795 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1796 total_bytes = tree->dirty_bytes;
1797 goto out;
1798 }
1799 /*
1800 * this search will find all the extents that end after
1801 * our range starts.
1802 */
1803 node = tree_search(tree, cur_start);
1804 if (!node)
1805 goto out;
1806
1807 while (1) {
1808 state = rb_entry(node, struct extent_state, rb_node);
1809 if (state->start > search_end)
1810 break;
1811 if (contig && found && state->start > last + 1)
1812 break;
1813 if (state->end >= cur_start && (state->state & bits) == bits) {
1814 total_bytes += min(search_end, state->end) + 1 -
1815 max(cur_start, state->start);
1816 if (total_bytes >= max_bytes)
1817 break;
1818 if (!found) {
1819 *start = max(cur_start, state->start);
1820 found = 1;
1821 }
1822 last = state->end;
1823 } else if (contig && found) {
1824 break;
1825 }
1826 node = rb_next(node);
1827 if (!node)
1828 break;
1829 }
1830out:
1831 spin_unlock(&tree->lock);
1832 return total_bytes;
1833}
1834
1835/*
1836 * set the private field for a given byte offset in the tree. If there isn't
1837 * an extent_state there already, this does nothing.
1838 */
1839static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1840{
1841 struct rb_node *node;
1842 struct extent_state *state;
1843 int ret = 0;
1844
1845 spin_lock(&tree->lock);
1846 /*
1847 * this search will find all the extents that end after
1848 * our range starts.
1849 */
1850 node = tree_search(tree, start);
1851 if (!node) {
1852 ret = -ENOENT;
1853 goto out;
1854 }
1855 state = rb_entry(node, struct extent_state, rb_node);
1856 if (state->start != start) {
1857 ret = -ENOENT;
1858 goto out;
1859 }
1860 state->private = private;
1861out:
1862 spin_unlock(&tree->lock);
1863 return ret;
1864}
1865
1866int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1867{
1868 struct rb_node *node;
1869 struct extent_state *state;
1870 int ret = 0;
1871
1872 spin_lock(&tree->lock);
1873 /*
1874 * this search will find all the extents that end after
1875 * our range starts.
1876 */
1877 node = tree_search(tree, start);
1878 if (!node) {
1879 ret = -ENOENT;
1880 goto out;
1881 }
1882 state = rb_entry(node, struct extent_state, rb_node);
1883 if (state->start != start) {
1884 ret = -ENOENT;
1885 goto out;
1886 }
1887 *private = state->private;
1888out:
1889 spin_unlock(&tree->lock);
1890 return ret;
1891}
1892
1893/*
1894 * searches a range in the state tree for a given mask.
1895 * If 'filled' == 1, this returns 1 only if every extent in the tree
1896 * has the bits set. Otherwise, 1 is returned if any bit in the
1897 * range is found set.
1898 */
1899int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1900 unsigned long bits, int filled, struct extent_state *cached)
1901{
1902 struct extent_state *state = NULL;
1903 struct rb_node *node;
1904 int bitset = 0;
1905
1906 spin_lock(&tree->lock);
1907 if (cached && cached->tree && cached->start <= start &&
1908 cached->end > start)
1909 node = &cached->rb_node;
1910 else
1911 node = tree_search(tree, start);
1912 while (node && start <= end) {
1913 state = rb_entry(node, struct extent_state, rb_node);
1914
1915 if (filled && state->start > start) {
1916 bitset = 0;
1917 break;
1918 }
1919
1920 if (state->start > end)
1921 break;
1922
1923 if (state->state & bits) {
1924 bitset = 1;
1925 if (!filled)
1926 break;
1927 } else if (filled) {
1928 bitset = 0;
1929 break;
1930 }
1931
1932 if (state->end == (u64)-1)
1933 break;
1934
1935 start = state->end + 1;
1936 if (start > end)
1937 break;
1938 node = rb_next(node);
1939 if (!node) {
1940 if (filled)
1941 bitset = 0;
1942 break;
1943 }
1944 }
1945 spin_unlock(&tree->lock);
1946 return bitset;
1947}
1948
1949/*
1950 * helper function to set a given page up to date if all the
1951 * extents in the tree for that page are up to date
1952 */
1953static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1954{
1955 u64 start = page_offset(page);
1956 u64 end = start + PAGE_CACHE_SIZE - 1;
1957 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1958 SetPageUptodate(page);
1959}
1960
1961/*
1962 * When IO fails, either with EIO or csum verification fails, we
1963 * try other mirrors that might have a good copy of the data. This
1964 * io_failure_record is used to record state as we go through all the
1965 * mirrors. If another mirror has good data, the page is set up to date
1966 * and things continue. If a good mirror can't be found, the original
1967 * bio end_io callback is called to indicate things have failed.
1968 */
1969struct io_failure_record {
1970 struct page *page;
1971 u64 start;
1972 u64 len;
1973 u64 logical;
1974 unsigned long bio_flags;
1975 int this_mirror;
1976 int failed_mirror;
1977 int in_validation;
1978};
1979
1980static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1981 int did_repair)
1982{
1983 int ret;
1984 int err = 0;
1985 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1986
1987 set_state_private(failure_tree, rec->start, 0);
1988 ret = clear_extent_bits(failure_tree, rec->start,
1989 rec->start + rec->len - 1,
1990 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1991 if (ret)
1992 err = ret;
1993
1994 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1995 rec->start + rec->len - 1,
1996 EXTENT_DAMAGED, GFP_NOFS);
1997 if (ret && !err)
1998 err = ret;
1999
2000 kfree(rec);
2001 return err;
2002}
2003
2004/*
2005 * this bypasses the standard btrfs submit functions deliberately, as
2006 * the standard behavior is to write all copies in a raid setup. here we only
2007 * want to write the one bad copy. so we do the mapping for ourselves and issue
2008 * submit_bio directly.
2009 * to avoid any synchronization issues, wait for the data after writing, which
2010 * actually prevents the read that triggered the error from finishing.
2011 * currently, there can be no more than two copies of every data bit. thus,
2012 * exactly one rewrite is required.
2013 */
2014int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2015 u64 length, u64 logical, struct page *page,
2016 int mirror_num)
2017{
2018 struct bio *bio;
2019 struct btrfs_device *dev;
2020 u64 map_length = 0;
2021 u64 sector;
2022 struct btrfs_bio *bbio = NULL;
2023 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2024 int ret;
2025
2026 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2027 BUG_ON(!mirror_num);
2028
2029 /* we can't repair anything in raid56 yet */
2030 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2031 return 0;
2032
2033 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2034 if (!bio)
2035 return -EIO;
2036 bio->bi_iter.bi_size = 0;
2037 map_length = length;
2038
2039 ret = btrfs_map_block(fs_info, WRITE, logical,
2040 &map_length, &bbio, mirror_num);
2041 if (ret) {
2042 bio_put(bio);
2043 return -EIO;
2044 }
2045 BUG_ON(mirror_num != bbio->mirror_num);
2046 sector = bbio->stripes[mirror_num-1].physical >> 9;
2047 bio->bi_iter.bi_sector = sector;
2048 dev = bbio->stripes[mirror_num-1].dev;
2049 kfree(bbio);
2050 if (!dev || !dev->bdev || !dev->writeable) {
2051 bio_put(bio);
2052 return -EIO;
2053 }
2054 bio->bi_bdev = dev->bdev;
2055 bio_add_page(bio, page, length, start - page_offset(page));
2056
2057 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2058 /* try to remap that extent elsewhere? */
2059 bio_put(bio);
2060 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2061 return -EIO;
2062 }
2063
2064 printk_ratelimited_in_rcu(KERN_INFO
2065 "BTRFS: read error corrected: ino %lu off %llu "
2066 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2067 start, rcu_str_deref(dev->name), sector);
2068
2069 bio_put(bio);
2070 return 0;
2071}
2072
2073int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2074 int mirror_num)
2075{
2076 u64 start = eb->start;
2077 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2078 int ret = 0;
2079
2080 if (root->fs_info->sb->s_flags & MS_RDONLY)
2081 return -EROFS;
2082
2083 for (i = 0; i < num_pages; i++) {
2084 struct page *p = extent_buffer_page(eb, i);
2085 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2086 start, p, mirror_num);
2087 if (ret)
2088 break;
2089 start += PAGE_CACHE_SIZE;
2090 }
2091
2092 return ret;
2093}
2094
2095/*
2096 * each time an IO finishes, we do a fast check in the IO failure tree
2097 * to see if we need to process or clean up an io_failure_record
2098 */
2099static int clean_io_failure(u64 start, struct page *page)
2100{
2101 u64 private;
2102 u64 private_failure;
2103 struct io_failure_record *failrec;
2104 struct inode *inode = page->mapping->host;
2105 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2106 struct extent_state *state;
2107 int num_copies;
2108 int did_repair = 0;
2109 int ret;
2110
2111 private = 0;
2112 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2113 (u64)-1, 1, EXTENT_DIRTY, 0);
2114 if (!ret)
2115 return 0;
2116
2117 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2118 &private_failure);
2119 if (ret)
2120 return 0;
2121
2122 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2123 BUG_ON(!failrec->this_mirror);
2124
2125 if (failrec->in_validation) {
2126 /* there was no real error, just free the record */
2127 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2128 failrec->start);
2129 did_repair = 1;
2130 goto out;
2131 }
2132 if (fs_info->sb->s_flags & MS_RDONLY)
2133 goto out;
2134
2135 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2136 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2137 failrec->start,
2138 EXTENT_LOCKED);
2139 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2140
2141 if (state && state->start <= failrec->start &&
2142 state->end >= failrec->start + failrec->len - 1) {
2143 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2144 failrec->len);
2145 if (num_copies > 1) {
2146 ret = repair_io_failure(fs_info, start, failrec->len,
2147 failrec->logical, page,
2148 failrec->failed_mirror);
2149 did_repair = !ret;
2150 }
2151 ret = 0;
2152 }
2153
2154out:
2155 if (!ret)
2156 ret = free_io_failure(inode, failrec, did_repair);
2157
2158 return ret;
2159}
2160
2161/*
2162 * this is a generic handler for readpage errors (default
2163 * readpage_io_failed_hook). if other copies exist, read those and write back
2164 * good data to the failed position. does not investigate in remapping the
2165 * failed extent elsewhere, hoping the device will be smart enough to do this as
2166 * needed
2167 */
2168
2169static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2170 struct page *page, u64 start, u64 end,
2171 int failed_mirror)
2172{
2173 struct io_failure_record *failrec = NULL;
2174 u64 private;
2175 struct extent_map *em;
2176 struct inode *inode = page->mapping->host;
2177 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2178 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2179 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2180 struct bio *bio;
2181 struct btrfs_io_bio *btrfs_failed_bio;
2182 struct btrfs_io_bio *btrfs_bio;
2183 int num_copies;
2184 int ret;
2185 int read_mode;
2186 u64 logical;
2187
2188 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2189
2190 ret = get_state_private(failure_tree, start, &private);
2191 if (ret) {
2192 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2193 if (!failrec)
2194 return -ENOMEM;
2195 failrec->start = start;
2196 failrec->len = end - start + 1;
2197 failrec->this_mirror = 0;
2198 failrec->bio_flags = 0;
2199 failrec->in_validation = 0;
2200
2201 read_lock(&em_tree->lock);
2202 em = lookup_extent_mapping(em_tree, start, failrec->len);
2203 if (!em) {
2204 read_unlock(&em_tree->lock);
2205 kfree(failrec);
2206 return -EIO;
2207 }
2208
2209 if (em->start > start || em->start + em->len <= start) {
2210 free_extent_map(em);
2211 em = NULL;
2212 }
2213 read_unlock(&em_tree->lock);
2214
2215 if (!em) {
2216 kfree(failrec);
2217 return -EIO;
2218 }
2219 logical = start - em->start;
2220 logical = em->block_start + logical;
2221 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2222 logical = em->block_start;
2223 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2224 extent_set_compress_type(&failrec->bio_flags,
2225 em->compress_type);
2226 }
2227 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2228 "len=%llu\n", logical, start, failrec->len);
2229 failrec->logical = logical;
2230 free_extent_map(em);
2231
2232 /* set the bits in the private failure tree */
2233 ret = set_extent_bits(failure_tree, start, end,
2234 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2235 if (ret >= 0)
2236 ret = set_state_private(failure_tree, start,
2237 (u64)(unsigned long)failrec);
2238 /* set the bits in the inode's tree */
2239 if (ret >= 0)
2240 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241 GFP_NOFS);
2242 if (ret < 0) {
2243 kfree(failrec);
2244 return ret;
2245 }
2246 } else {
2247 failrec = (struct io_failure_record *)(unsigned long)private;
2248 pr_debug("bio_readpage_error: (found) logical=%llu, "
2249 "start=%llu, len=%llu, validation=%d\n",
2250 failrec->logical, failrec->start, failrec->len,
2251 failrec->in_validation);
2252 /*
2253 * when data can be on disk more than twice, add to failrec here
2254 * (e.g. with a list for failed_mirror) to make
2255 * clean_io_failure() clean all those errors at once.
2256 */
2257 }
2258 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2259 failrec->logical, failrec->len);
2260 if (num_copies == 1) {
2261 /*
2262 * we only have a single copy of the data, so don't bother with
2263 * all the retry and error correction code that follows. no
2264 * matter what the error is, it is very likely to persist.
2265 */
2266 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2267 num_copies, failrec->this_mirror, failed_mirror);
2268 free_io_failure(inode, failrec, 0);
2269 return -EIO;
2270 }
2271
2272 /*
2273 * there are two premises:
2274 * a) deliver good data to the caller
2275 * b) correct the bad sectors on disk
2276 */
2277 if (failed_bio->bi_vcnt > 1) {
2278 /*
2279 * to fulfill b), we need to know the exact failing sectors, as
2280 * we don't want to rewrite any more than the failed ones. thus,
2281 * we need separate read requests for the failed bio
2282 *
2283 * if the following BUG_ON triggers, our validation request got
2284 * merged. we need separate requests for our algorithm to work.
2285 */
2286 BUG_ON(failrec->in_validation);
2287 failrec->in_validation = 1;
2288 failrec->this_mirror = failed_mirror;
2289 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2290 } else {
2291 /*
2292 * we're ready to fulfill a) and b) alongside. get a good copy
2293 * of the failed sector and if we succeed, we have setup
2294 * everything for repair_io_failure to do the rest for us.
2295 */
2296 if (failrec->in_validation) {
2297 BUG_ON(failrec->this_mirror != failed_mirror);
2298 failrec->in_validation = 0;
2299 failrec->this_mirror = 0;
2300 }
2301 failrec->failed_mirror = failed_mirror;
2302 failrec->this_mirror++;
2303 if (failrec->this_mirror == failed_mirror)
2304 failrec->this_mirror++;
2305 read_mode = READ_SYNC;
2306 }
2307
2308 if (failrec->this_mirror > num_copies) {
2309 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2310 num_copies, failrec->this_mirror, failed_mirror);
2311 free_io_failure(inode, failrec, 0);
2312 return -EIO;
2313 }
2314
2315 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2316 if (!bio) {
2317 free_io_failure(inode, failrec, 0);
2318 return -EIO;
2319 }
2320 bio->bi_end_io = failed_bio->bi_end_io;
2321 bio->bi_iter.bi_sector = failrec->logical >> 9;
2322 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2323 bio->bi_iter.bi_size = 0;
2324
2325 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2326 if (btrfs_failed_bio->csum) {
2327 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2328 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2329
2330 btrfs_bio = btrfs_io_bio(bio);
2331 btrfs_bio->csum = btrfs_bio->csum_inline;
2332 phy_offset >>= inode->i_sb->s_blocksize_bits;
2333 phy_offset *= csum_size;
2334 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2335 csum_size);
2336 }
2337
2338 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2339
2340 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2341 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2342 failrec->this_mirror, num_copies, failrec->in_validation);
2343
2344 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2345 failrec->this_mirror,
2346 failrec->bio_flags, 0);
2347 return ret;
2348}
2349
2350/* lots and lots of room for performance fixes in the end_bio funcs */
2351
2352int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2353{
2354 int uptodate = (err == 0);
2355 struct extent_io_tree *tree;
2356 int ret;
2357
2358 tree = &BTRFS_I(page->mapping->host)->io_tree;
2359
2360 if (tree->ops && tree->ops->writepage_end_io_hook) {
2361 ret = tree->ops->writepage_end_io_hook(page, start,
2362 end, NULL, uptodate);
2363 if (ret)
2364 uptodate = 0;
2365 }
2366
2367 if (!uptodate) {
2368 ClearPageUptodate(page);
2369 SetPageError(page);
2370 }
2371 return 0;
2372}
2373
2374/*
2375 * after a writepage IO is done, we need to:
2376 * clear the uptodate bits on error
2377 * clear the writeback bits in the extent tree for this IO
2378 * end_page_writeback if the page has no more pending IO
2379 *
2380 * Scheduling is not allowed, so the extent state tree is expected
2381 * to have one and only one object corresponding to this IO.
2382 */
2383static void end_bio_extent_writepage(struct bio *bio, int err)
2384{
2385 struct bio_vec *bvec;
2386 u64 start;
2387 u64 end;
2388 int i;
2389
2390 bio_for_each_segment_all(bvec, bio, i) {
2391 struct page *page = bvec->bv_page;
2392
2393 /* We always issue full-page reads, but if some block
2394 * in a page fails to read, blk_update_request() will
2395 * advance bv_offset and adjust bv_len to compensate.
2396 * Print a warning for nonzero offsets, and an error
2397 * if they don't add up to a full page. */
2398 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2399 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2400 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2401 "partial page write in btrfs with offset %u and length %u",
2402 bvec->bv_offset, bvec->bv_len);
2403 else
2404 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2405 "incomplete page write in btrfs with offset %u and "
2406 "length %u",
2407 bvec->bv_offset, bvec->bv_len);
2408 }
2409
2410 start = page_offset(page);
2411 end = start + bvec->bv_offset + bvec->bv_len - 1;
2412
2413 if (end_extent_writepage(page, err, start, end))
2414 continue;
2415
2416 end_page_writeback(page);
2417 }
2418
2419 bio_put(bio);
2420}
2421
2422static void
2423endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2424 int uptodate)
2425{
2426 struct extent_state *cached = NULL;
2427 u64 end = start + len - 1;
2428
2429 if (uptodate && tree->track_uptodate)
2430 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2431 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2432}
2433
2434/*
2435 * after a readpage IO is done, we need to:
2436 * clear the uptodate bits on error
2437 * set the uptodate bits if things worked
2438 * set the page up to date if all extents in the tree are uptodate
2439 * clear the lock bit in the extent tree
2440 * unlock the page if there are no other extents locked for it
2441 *
2442 * Scheduling is not allowed, so the extent state tree is expected
2443 * to have one and only one object corresponding to this IO.
2444 */
2445static void end_bio_extent_readpage(struct bio *bio, int err)
2446{
2447 struct bio_vec *bvec;
2448 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2449 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2450 struct extent_io_tree *tree;
2451 u64 offset = 0;
2452 u64 start;
2453 u64 end;
2454 u64 len;
2455 u64 extent_start = 0;
2456 u64 extent_len = 0;
2457 int mirror;
2458 int ret;
2459 int i;
2460
2461 if (err)
2462 uptodate = 0;
2463
2464 bio_for_each_segment_all(bvec, bio, i) {
2465 struct page *page = bvec->bv_page;
2466 struct inode *inode = page->mapping->host;
2467
2468 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2469 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
2470 io_bio->mirror_num);
2471 tree = &BTRFS_I(inode)->io_tree;
2472
2473 /* We always issue full-page reads, but if some block
2474 * in a page fails to read, blk_update_request() will
2475 * advance bv_offset and adjust bv_len to compensate.
2476 * Print a warning for nonzero offsets, and an error
2477 * if they don't add up to a full page. */
2478 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2479 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2480 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2481 "partial page read in btrfs with offset %u and length %u",
2482 bvec->bv_offset, bvec->bv_len);
2483 else
2484 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2485 "incomplete page read in btrfs with offset %u and "
2486 "length %u",
2487 bvec->bv_offset, bvec->bv_len);
2488 }
2489
2490 start = page_offset(page);
2491 end = start + bvec->bv_offset + bvec->bv_len - 1;
2492 len = bvec->bv_len;
2493
2494 mirror = io_bio->mirror_num;
2495 if (likely(uptodate && tree->ops &&
2496 tree->ops->readpage_end_io_hook)) {
2497 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2498 page, start, end,
2499 mirror);
2500 if (ret)
2501 uptodate = 0;
2502 else
2503 clean_io_failure(start, page);
2504 }
2505
2506 if (likely(uptodate))
2507 goto readpage_ok;
2508
2509 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2510 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2511 if (!ret && !err &&
2512 test_bit(BIO_UPTODATE, &bio->bi_flags))
2513 uptodate = 1;
2514 } else {
2515 /*
2516 * The generic bio_readpage_error handles errors the
2517 * following way: If possible, new read requests are
2518 * created and submitted and will end up in
2519 * end_bio_extent_readpage as well (if we're lucky, not
2520 * in the !uptodate case). In that case it returns 0 and
2521 * we just go on with the next page in our bio. If it
2522 * can't handle the error it will return -EIO and we
2523 * remain responsible for that page.
2524 */
2525 ret = bio_readpage_error(bio, offset, page, start, end,
2526 mirror);
2527 if (ret == 0) {
2528 uptodate =
2529 test_bit(BIO_UPTODATE, &bio->bi_flags);
2530 if (err)
2531 uptodate = 0;
2532 continue;
2533 }
2534 }
2535readpage_ok:
2536 if (likely(uptodate)) {
2537 loff_t i_size = i_size_read(inode);
2538 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2539 unsigned offset;
2540
2541 /* Zero out the end if this page straddles i_size */
2542 offset = i_size & (PAGE_CACHE_SIZE-1);
2543 if (page->index == end_index && offset)
2544 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2545 SetPageUptodate(page);
2546 } else {
2547 ClearPageUptodate(page);
2548 SetPageError(page);
2549 }
2550 unlock_page(page);
2551 offset += len;
2552
2553 if (unlikely(!uptodate)) {
2554 if (extent_len) {
2555 endio_readpage_release_extent(tree,
2556 extent_start,
2557 extent_len, 1);
2558 extent_start = 0;
2559 extent_len = 0;
2560 }
2561 endio_readpage_release_extent(tree, start,
2562 end - start + 1, 0);
2563 } else if (!extent_len) {
2564 extent_start = start;
2565 extent_len = end + 1 - start;
2566 } else if (extent_start + extent_len == start) {
2567 extent_len += end + 1 - start;
2568 } else {
2569 endio_readpage_release_extent(tree, extent_start,
2570 extent_len, uptodate);
2571 extent_start = start;
2572 extent_len = end + 1 - start;
2573 }
2574 }
2575
2576 if (extent_len)
2577 endio_readpage_release_extent(tree, extent_start, extent_len,
2578 uptodate);
2579 if (io_bio->end_io)
2580 io_bio->end_io(io_bio, err);
2581 bio_put(bio);
2582}
2583
2584/*
2585 * this allocates from the btrfs_bioset. We're returning a bio right now
2586 * but you can call btrfs_io_bio for the appropriate container_of magic
2587 */
2588struct bio *
2589btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2590 gfp_t gfp_flags)
2591{
2592 struct btrfs_io_bio *btrfs_bio;
2593 struct bio *bio;
2594
2595 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2596
2597 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2598 while (!bio && (nr_vecs /= 2)) {
2599 bio = bio_alloc_bioset(gfp_flags,
2600 nr_vecs, btrfs_bioset);
2601 }
2602 }
2603
2604 if (bio) {
2605 bio->bi_bdev = bdev;
2606 bio->bi_iter.bi_sector = first_sector;
2607 btrfs_bio = btrfs_io_bio(bio);
2608 btrfs_bio->csum = NULL;
2609 btrfs_bio->csum_allocated = NULL;
2610 btrfs_bio->end_io = NULL;
2611 }
2612 return bio;
2613}
2614
2615struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2616{
2617 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2618}
2619
2620
2621/* this also allocates from the btrfs_bioset */
2622struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2623{
2624 struct btrfs_io_bio *btrfs_bio;
2625 struct bio *bio;
2626
2627 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2628 if (bio) {
2629 btrfs_bio = btrfs_io_bio(bio);
2630 btrfs_bio->csum = NULL;
2631 btrfs_bio->csum_allocated = NULL;
2632 btrfs_bio->end_io = NULL;
2633 }
2634 return bio;
2635}
2636
2637
2638static int __must_check submit_one_bio(int rw, struct bio *bio,
2639 int mirror_num, unsigned long bio_flags)
2640{
2641 int ret = 0;
2642 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2643 struct page *page = bvec->bv_page;
2644 struct extent_io_tree *tree = bio->bi_private;
2645 u64 start;
2646
2647 start = page_offset(page) + bvec->bv_offset;
2648
2649 bio->bi_private = NULL;
2650
2651 bio_get(bio);
2652
2653 if (tree->ops && tree->ops->submit_bio_hook)
2654 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2655 mirror_num, bio_flags, start);
2656 else
2657 btrfsic_submit_bio(rw, bio);
2658
2659 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2660 ret = -EOPNOTSUPP;
2661 bio_put(bio);
2662 return ret;
2663}
2664
2665static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2666 unsigned long offset, size_t size, struct bio *bio,
2667 unsigned long bio_flags)
2668{
2669 int ret = 0;
2670 if (tree->ops && tree->ops->merge_bio_hook)
2671 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2672 bio_flags);
2673 BUG_ON(ret < 0);
2674 return ret;
2675
2676}
2677
2678static int submit_extent_page(int rw, struct extent_io_tree *tree,
2679 struct page *page, sector_t sector,
2680 size_t size, unsigned long offset,
2681 struct block_device *bdev,
2682 struct bio **bio_ret,
2683 unsigned long max_pages,
2684 bio_end_io_t end_io_func,
2685 int mirror_num,
2686 unsigned long prev_bio_flags,
2687 unsigned long bio_flags)
2688{
2689 int ret = 0;
2690 struct bio *bio;
2691 int nr;
2692 int contig = 0;
2693 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2694 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2695 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2696
2697 if (bio_ret && *bio_ret) {
2698 bio = *bio_ret;
2699 if (old_compressed)
2700 contig = bio->bi_iter.bi_sector == sector;
2701 else
2702 contig = bio_end_sector(bio) == sector;
2703
2704 if (prev_bio_flags != bio_flags || !contig ||
2705 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2706 bio_add_page(bio, page, page_size, offset) < page_size) {
2707 ret = submit_one_bio(rw, bio, mirror_num,
2708 prev_bio_flags);
2709 if (ret < 0)
2710 return ret;
2711 bio = NULL;
2712 } else {
2713 return 0;
2714 }
2715 }
2716 if (this_compressed)
2717 nr = BIO_MAX_PAGES;
2718 else
2719 nr = bio_get_nr_vecs(bdev);
2720
2721 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2722 if (!bio)
2723 return -ENOMEM;
2724
2725 bio_add_page(bio, page, page_size, offset);
2726 bio->bi_end_io = end_io_func;
2727 bio->bi_private = tree;
2728
2729 if (bio_ret)
2730 *bio_ret = bio;
2731 else
2732 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2733
2734 return ret;
2735}
2736
2737static void attach_extent_buffer_page(struct extent_buffer *eb,
2738 struct page *page)
2739{
2740 if (!PagePrivate(page)) {
2741 SetPagePrivate(page);
2742 page_cache_get(page);
2743 set_page_private(page, (unsigned long)eb);
2744 } else {
2745 WARN_ON(page->private != (unsigned long)eb);
2746 }
2747}
2748
2749void set_page_extent_mapped(struct page *page)
2750{
2751 if (!PagePrivate(page)) {
2752 SetPagePrivate(page);
2753 page_cache_get(page);
2754 set_page_private(page, EXTENT_PAGE_PRIVATE);
2755 }
2756}
2757
2758static struct extent_map *
2759__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2760 u64 start, u64 len, get_extent_t *get_extent,
2761 struct extent_map **em_cached)
2762{
2763 struct extent_map *em;
2764
2765 if (em_cached && *em_cached) {
2766 em = *em_cached;
2767 if (extent_map_in_tree(em) && start >= em->start &&
2768 start < extent_map_end(em)) {
2769 atomic_inc(&em->refs);
2770 return em;
2771 }
2772
2773 free_extent_map(em);
2774 *em_cached = NULL;
2775 }
2776
2777 em = get_extent(inode, page, pg_offset, start, len, 0);
2778 if (em_cached && !IS_ERR_OR_NULL(em)) {
2779 BUG_ON(*em_cached);
2780 atomic_inc(&em->refs);
2781 *em_cached = em;
2782 }
2783 return em;
2784}
2785/*
2786 * basic readpage implementation. Locked extent state structs are inserted
2787 * into the tree that are removed when the IO is done (by the end_io
2788 * handlers)
2789 * XXX JDM: This needs looking at to ensure proper page locking
2790 */
2791static int __do_readpage(struct extent_io_tree *tree,
2792 struct page *page,
2793 get_extent_t *get_extent,
2794 struct extent_map **em_cached,
2795 struct bio **bio, int mirror_num,
2796 unsigned long *bio_flags, int rw)
2797{
2798 struct inode *inode = page->mapping->host;
2799 u64 start = page_offset(page);
2800 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2801 u64 end;
2802 u64 cur = start;
2803 u64 extent_offset;
2804 u64 last_byte = i_size_read(inode);
2805 u64 block_start;
2806 u64 cur_end;
2807 sector_t sector;
2808 struct extent_map *em;
2809 struct block_device *bdev;
2810 int ret;
2811 int nr = 0;
2812 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2813 size_t pg_offset = 0;
2814 size_t iosize;
2815 size_t disk_io_size;
2816 size_t blocksize = inode->i_sb->s_blocksize;
2817 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2818
2819 set_page_extent_mapped(page);
2820
2821 end = page_end;
2822 if (!PageUptodate(page)) {
2823 if (cleancache_get_page(page) == 0) {
2824 BUG_ON(blocksize != PAGE_SIZE);
2825 unlock_extent(tree, start, end);
2826 goto out;
2827 }
2828 }
2829
2830 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2831 char *userpage;
2832 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2833
2834 if (zero_offset) {
2835 iosize = PAGE_CACHE_SIZE - zero_offset;
2836 userpage = kmap_atomic(page);
2837 memset(userpage + zero_offset, 0, iosize);
2838 flush_dcache_page(page);
2839 kunmap_atomic(userpage);
2840 }
2841 }
2842 while (cur <= end) {
2843 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2844
2845 if (cur >= last_byte) {
2846 char *userpage;
2847 struct extent_state *cached = NULL;
2848
2849 iosize = PAGE_CACHE_SIZE - pg_offset;
2850 userpage = kmap_atomic(page);
2851 memset(userpage + pg_offset, 0, iosize);
2852 flush_dcache_page(page);
2853 kunmap_atomic(userpage);
2854 set_extent_uptodate(tree, cur, cur + iosize - 1,
2855 &cached, GFP_NOFS);
2856 if (!parent_locked)
2857 unlock_extent_cached(tree, cur,
2858 cur + iosize - 1,
2859 &cached, GFP_NOFS);
2860 break;
2861 }
2862 em = __get_extent_map(inode, page, pg_offset, cur,
2863 end - cur + 1, get_extent, em_cached);
2864 if (IS_ERR_OR_NULL(em)) {
2865 SetPageError(page);
2866 if (!parent_locked)
2867 unlock_extent(tree, cur, end);
2868 break;
2869 }
2870 extent_offset = cur - em->start;
2871 BUG_ON(extent_map_end(em) <= cur);
2872 BUG_ON(end < cur);
2873
2874 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2875 this_bio_flag |= EXTENT_BIO_COMPRESSED;
2876 extent_set_compress_type(&this_bio_flag,
2877 em->compress_type);
2878 }
2879
2880 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2881 cur_end = min(extent_map_end(em) - 1, end);
2882 iosize = ALIGN(iosize, blocksize);
2883 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2884 disk_io_size = em->block_len;
2885 sector = em->block_start >> 9;
2886 } else {
2887 sector = (em->block_start + extent_offset) >> 9;
2888 disk_io_size = iosize;
2889 }
2890 bdev = em->bdev;
2891 block_start = em->block_start;
2892 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2893 block_start = EXTENT_MAP_HOLE;
2894 free_extent_map(em);
2895 em = NULL;
2896
2897 /* we've found a hole, just zero and go on */
2898 if (block_start == EXTENT_MAP_HOLE) {
2899 char *userpage;
2900 struct extent_state *cached = NULL;
2901
2902 userpage = kmap_atomic(page);
2903 memset(userpage + pg_offset, 0, iosize);
2904 flush_dcache_page(page);
2905 kunmap_atomic(userpage);
2906
2907 set_extent_uptodate(tree, cur, cur + iosize - 1,
2908 &cached, GFP_NOFS);
2909 unlock_extent_cached(tree, cur, cur + iosize - 1,
2910 &cached, GFP_NOFS);
2911 cur = cur + iosize;
2912 pg_offset += iosize;
2913 continue;
2914 }
2915 /* the get_extent function already copied into the page */
2916 if (test_range_bit(tree, cur, cur_end,
2917 EXTENT_UPTODATE, 1, NULL)) {
2918 check_page_uptodate(tree, page);
2919 if (!parent_locked)
2920 unlock_extent(tree, cur, cur + iosize - 1);
2921 cur = cur + iosize;
2922 pg_offset += iosize;
2923 continue;
2924 }
2925 /* we have an inline extent but it didn't get marked up
2926 * to date. Error out
2927 */
2928 if (block_start == EXTENT_MAP_INLINE) {
2929 SetPageError(page);
2930 if (!parent_locked)
2931 unlock_extent(tree, cur, cur + iosize - 1);
2932 cur = cur + iosize;
2933 pg_offset += iosize;
2934 continue;
2935 }
2936
2937 pnr -= page->index;
2938 ret = submit_extent_page(rw, tree, page,
2939 sector, disk_io_size, pg_offset,
2940 bdev, bio, pnr,
2941 end_bio_extent_readpage, mirror_num,
2942 *bio_flags,
2943 this_bio_flag);
2944 if (!ret) {
2945 nr++;
2946 *bio_flags = this_bio_flag;
2947 } else {
2948 SetPageError(page);
2949 if (!parent_locked)
2950 unlock_extent(tree, cur, cur + iosize - 1);
2951 }
2952 cur = cur + iosize;
2953 pg_offset += iosize;
2954 }
2955out:
2956 if (!nr) {
2957 if (!PageError(page))
2958 SetPageUptodate(page);
2959 unlock_page(page);
2960 }
2961 return 0;
2962}
2963
2964static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2965 struct page *pages[], int nr_pages,
2966 u64 start, u64 end,
2967 get_extent_t *get_extent,
2968 struct extent_map **em_cached,
2969 struct bio **bio, int mirror_num,
2970 unsigned long *bio_flags, int rw)
2971{
2972 struct inode *inode;
2973 struct btrfs_ordered_extent *ordered;
2974 int index;
2975
2976 inode = pages[0]->mapping->host;
2977 while (1) {
2978 lock_extent(tree, start, end);
2979 ordered = btrfs_lookup_ordered_range(inode, start,
2980 end - start + 1);
2981 if (!ordered)
2982 break;
2983 unlock_extent(tree, start, end);
2984 btrfs_start_ordered_extent(inode, ordered, 1);
2985 btrfs_put_ordered_extent(ordered);
2986 }
2987
2988 for (index = 0; index < nr_pages; index++) {
2989 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
2990 mirror_num, bio_flags, rw);
2991 page_cache_release(pages[index]);
2992 }
2993}
2994
2995static void __extent_readpages(struct extent_io_tree *tree,
2996 struct page *pages[],
2997 int nr_pages, get_extent_t *get_extent,
2998 struct extent_map **em_cached,
2999 struct bio **bio, int mirror_num,
3000 unsigned long *bio_flags, int rw)
3001{
3002 u64 start = 0;
3003 u64 end = 0;
3004 u64 page_start;
3005 int index;
3006 int first_index = 0;
3007
3008 for (index = 0; index < nr_pages; index++) {
3009 page_start = page_offset(pages[index]);
3010 if (!end) {
3011 start = page_start;
3012 end = start + PAGE_CACHE_SIZE - 1;
3013 first_index = index;
3014 } else if (end + 1 == page_start) {
3015 end += PAGE_CACHE_SIZE;
3016 } else {
3017 __do_contiguous_readpages(tree, &pages[first_index],
3018 index - first_index, start,
3019 end, get_extent, em_cached,
3020 bio, mirror_num, bio_flags,
3021 rw);
3022 start = page_start;
3023 end = start + PAGE_CACHE_SIZE - 1;
3024 first_index = index;
3025 }
3026 }
3027
3028 if (end)
3029 __do_contiguous_readpages(tree, &pages[first_index],
3030 index - first_index, start,
3031 end, get_extent, em_cached, bio,
3032 mirror_num, bio_flags, rw);
3033}
3034
3035static int __extent_read_full_page(struct extent_io_tree *tree,
3036 struct page *page,
3037 get_extent_t *get_extent,
3038 struct bio **bio, int mirror_num,
3039 unsigned long *bio_flags, int rw)
3040{
3041 struct inode *inode = page->mapping->host;
3042 struct btrfs_ordered_extent *ordered;
3043 u64 start = page_offset(page);
3044 u64 end = start + PAGE_CACHE_SIZE - 1;
3045 int ret;
3046
3047 while (1) {
3048 lock_extent(tree, start, end);
3049 ordered = btrfs_lookup_ordered_extent(inode, start);
3050 if (!ordered)
3051 break;
3052 unlock_extent(tree, start, end);
3053 btrfs_start_ordered_extent(inode, ordered, 1);
3054 btrfs_put_ordered_extent(ordered);
3055 }
3056
3057 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3058 bio_flags, rw);
3059 return ret;
3060}
3061
3062int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3063 get_extent_t *get_extent, int mirror_num)
3064{
3065 struct bio *bio = NULL;
3066 unsigned long bio_flags = 0;
3067 int ret;
3068
3069 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3070 &bio_flags, READ);
3071 if (bio)
3072 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3073 return ret;
3074}
3075
3076int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3077 get_extent_t *get_extent, int mirror_num)
3078{
3079 struct bio *bio = NULL;
3080 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3081 int ret;
3082
3083 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3084 &bio_flags, READ);
3085 if (bio)
3086 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3087 return ret;
3088}
3089
3090static noinline void update_nr_written(struct page *page,
3091 struct writeback_control *wbc,
3092 unsigned long nr_written)
3093{
3094 wbc->nr_to_write -= nr_written;
3095 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3096 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3097 page->mapping->writeback_index = page->index + nr_written;
3098}
3099
3100/*
3101 * the writepage semantics are similar to regular writepage. extent
3102 * records are inserted to lock ranges in the tree, and as dirty areas
3103 * are found, they are marked writeback. Then the lock bits are removed
3104 * and the end_io handler clears the writeback ranges
3105 */
3106static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3107 void *data)
3108{
3109 struct inode *inode = page->mapping->host;
3110 struct extent_page_data *epd = data;
3111 struct extent_io_tree *tree = epd->tree;
3112 u64 start = page_offset(page);
3113 u64 delalloc_start;
3114 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3115 u64 end;
3116 u64 cur = start;
3117 u64 extent_offset;
3118 u64 last_byte = i_size_read(inode);
3119 u64 block_start;
3120 u64 iosize;
3121 sector_t sector;
3122 struct extent_state *cached_state = NULL;
3123 struct extent_map *em;
3124 struct block_device *bdev;
3125 int ret;
3126 int nr = 0;
3127 size_t pg_offset = 0;
3128 size_t blocksize;
3129 loff_t i_size = i_size_read(inode);
3130 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3131 u64 nr_delalloc;
3132 u64 delalloc_end;
3133 int page_started;
3134 int compressed;
3135 int write_flags;
3136 unsigned long nr_written = 0;
3137 bool fill_delalloc = true;
3138
3139 if (wbc->sync_mode == WB_SYNC_ALL)
3140 write_flags = WRITE_SYNC;
3141 else
3142 write_flags = WRITE;
3143
3144 trace___extent_writepage(page, inode, wbc);
3145
3146 WARN_ON(!PageLocked(page));
3147
3148 ClearPageError(page);
3149
3150 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3151 if (page->index > end_index ||
3152 (page->index == end_index && !pg_offset)) {
3153 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3154 unlock_page(page);
3155 return 0;
3156 }
3157
3158 if (page->index == end_index) {
3159 char *userpage;
3160
3161 userpage = kmap_atomic(page);
3162 memset(userpage + pg_offset, 0,
3163 PAGE_CACHE_SIZE - pg_offset);
3164 kunmap_atomic(userpage);
3165 flush_dcache_page(page);
3166 }
3167 pg_offset = 0;
3168
3169 set_page_extent_mapped(page);
3170
3171 if (!tree->ops || !tree->ops->fill_delalloc)
3172 fill_delalloc = false;
3173
3174 delalloc_start = start;
3175 delalloc_end = 0;
3176 page_started = 0;
3177 if (!epd->extent_locked && fill_delalloc) {
3178 u64 delalloc_to_write = 0;
3179 /*
3180 * make sure the wbc mapping index is at least updated
3181 * to this page.
3182 */
3183 update_nr_written(page, wbc, 0);
3184
3185 while (delalloc_end < page_end) {
3186 nr_delalloc = find_lock_delalloc_range(inode, tree,
3187 page,
3188 &delalloc_start,
3189 &delalloc_end,
3190 128 * 1024 * 1024);
3191 if (nr_delalloc == 0) {
3192 delalloc_start = delalloc_end + 1;
3193 continue;
3194 }
3195 ret = tree->ops->fill_delalloc(inode, page,
3196 delalloc_start,
3197 delalloc_end,
3198 &page_started,
3199 &nr_written);
3200 /* File system has been set read-only */
3201 if (ret) {
3202 SetPageError(page);
3203 goto done;
3204 }
3205 /*
3206 * delalloc_end is already one less than the total
3207 * length, so we don't subtract one from
3208 * PAGE_CACHE_SIZE
3209 */
3210 delalloc_to_write += (delalloc_end - delalloc_start +
3211 PAGE_CACHE_SIZE) >>
3212 PAGE_CACHE_SHIFT;
3213 delalloc_start = delalloc_end + 1;
3214 }
3215 if (wbc->nr_to_write < delalloc_to_write) {
3216 int thresh = 8192;
3217
3218 if (delalloc_to_write < thresh * 2)
3219 thresh = delalloc_to_write;
3220 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3221 thresh);
3222 }
3223
3224 /* did the fill delalloc function already unlock and start
3225 * the IO?
3226 */
3227 if (page_started) {
3228 ret = 0;
3229 /*
3230 * we've unlocked the page, so we can't update
3231 * the mapping's writeback index, just update
3232 * nr_to_write.
3233 */
3234 wbc->nr_to_write -= nr_written;
3235 goto done_unlocked;
3236 }
3237 }
3238 if (tree->ops && tree->ops->writepage_start_hook) {
3239 ret = tree->ops->writepage_start_hook(page, start,
3240 page_end);
3241 if (ret) {
3242 /* Fixup worker will requeue */
3243 if (ret == -EBUSY)
3244 wbc->pages_skipped++;
3245 else
3246 redirty_page_for_writepage(wbc, page);
3247 update_nr_written(page, wbc, nr_written);
3248 unlock_page(page);
3249 ret = 0;
3250 goto done_unlocked;
3251 }
3252 }
3253
3254 /*
3255 * we don't want to touch the inode after unlocking the page,
3256 * so we update the mapping writeback index now
3257 */
3258 update_nr_written(page, wbc, nr_written + 1);
3259
3260 end = page_end;
3261 if (last_byte <= start) {
3262 if (tree->ops && tree->ops->writepage_end_io_hook)
3263 tree->ops->writepage_end_io_hook(page, start,
3264 page_end, NULL, 1);
3265 goto done;
3266 }
3267
3268 blocksize = inode->i_sb->s_blocksize;
3269
3270 while (cur <= end) {
3271 if (cur >= last_byte) {
3272 if (tree->ops && tree->ops->writepage_end_io_hook)
3273 tree->ops->writepage_end_io_hook(page, cur,
3274 page_end, NULL, 1);
3275 break;
3276 }
3277 em = epd->get_extent(inode, page, pg_offset, cur,
3278 end - cur + 1, 1);
3279 if (IS_ERR_OR_NULL(em)) {
3280 SetPageError(page);
3281 break;
3282 }
3283
3284 extent_offset = cur - em->start;
3285 BUG_ON(extent_map_end(em) <= cur);
3286 BUG_ON(end < cur);
3287 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3288 iosize = ALIGN(iosize, blocksize);
3289 sector = (em->block_start + extent_offset) >> 9;
3290 bdev = em->bdev;
3291 block_start = em->block_start;
3292 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3293 free_extent_map(em);
3294 em = NULL;
3295
3296 /*
3297 * compressed and inline extents are written through other
3298 * paths in the FS
3299 */
3300 if (compressed || block_start == EXTENT_MAP_HOLE ||
3301 block_start == EXTENT_MAP_INLINE) {
3302 /*
3303 * end_io notification does not happen here for
3304 * compressed extents
3305 */
3306 if (!compressed && tree->ops &&
3307 tree->ops->writepage_end_io_hook)
3308 tree->ops->writepage_end_io_hook(page, cur,
3309 cur + iosize - 1,
3310 NULL, 1);
3311 else if (compressed) {
3312 /* we don't want to end_page_writeback on
3313 * a compressed extent. this happens
3314 * elsewhere
3315 */
3316 nr++;
3317 }
3318
3319 cur += iosize;
3320 pg_offset += iosize;
3321 continue;
3322 }
3323 /* leave this out until we have a page_mkwrite call */
3324 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3325 EXTENT_DIRTY, 0, NULL)) {
3326 cur = cur + iosize;
3327 pg_offset += iosize;
3328 continue;
3329 }
3330
3331 if (tree->ops && tree->ops->writepage_io_hook) {
3332 ret = tree->ops->writepage_io_hook(page, cur,
3333 cur + iosize - 1);
3334 } else {
3335 ret = 0;
3336 }
3337 if (ret) {
3338 SetPageError(page);
3339 } else {
3340 unsigned long max_nr = end_index + 1;
3341
3342 set_range_writeback(tree, cur, cur + iosize - 1);
3343 if (!PageWriteback(page)) {
3344 btrfs_err(BTRFS_I(inode)->root->fs_info,
3345 "page %lu not writeback, cur %llu end %llu",
3346 page->index, cur, end);
3347 }
3348
3349 ret = submit_extent_page(write_flags, tree, page,
3350 sector, iosize, pg_offset,
3351 bdev, &epd->bio, max_nr,
3352 end_bio_extent_writepage,
3353 0, 0, 0);
3354 if (ret)
3355 SetPageError(page);
3356 }
3357 cur = cur + iosize;
3358 pg_offset += iosize;
3359 nr++;
3360 }
3361done:
3362 if (nr == 0) {
3363 /* make sure the mapping tag for page dirty gets cleared */
3364 set_page_writeback(page);
3365 end_page_writeback(page);
3366 }
3367 unlock_page(page);
3368
3369done_unlocked:
3370
3371 /* drop our reference on any cached states */
3372 free_extent_state(cached_state);
3373 return 0;
3374}
3375
3376static int eb_wait(void *word)
3377{
3378 io_schedule();
3379 return 0;
3380}
3381
3382void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3383{
3384 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3385 TASK_UNINTERRUPTIBLE);
3386}
3387
3388static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3389 struct btrfs_fs_info *fs_info,
3390 struct extent_page_data *epd)
3391{
3392 unsigned long i, num_pages;
3393 int flush = 0;
3394 int ret = 0;
3395
3396 if (!btrfs_try_tree_write_lock(eb)) {
3397 flush = 1;
3398 flush_write_bio(epd);
3399 btrfs_tree_lock(eb);
3400 }
3401
3402 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3403 btrfs_tree_unlock(eb);
3404 if (!epd->sync_io)
3405 return 0;
3406 if (!flush) {
3407 flush_write_bio(epd);
3408 flush = 1;
3409 }
3410 while (1) {
3411 wait_on_extent_buffer_writeback(eb);
3412 btrfs_tree_lock(eb);
3413 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3414 break;
3415 btrfs_tree_unlock(eb);
3416 }
3417 }
3418
3419 /*
3420 * We need to do this to prevent races in people who check if the eb is
3421 * under IO since we can end up having no IO bits set for a short period
3422 * of time.
3423 */
3424 spin_lock(&eb->refs_lock);
3425 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3426 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3427 spin_unlock(&eb->refs_lock);
3428 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3429 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3430 -eb->len,
3431 fs_info->dirty_metadata_batch);
3432 ret = 1;
3433 } else {
3434 spin_unlock(&eb->refs_lock);
3435 }
3436
3437 btrfs_tree_unlock(eb);
3438
3439 if (!ret)
3440 return ret;
3441
3442 num_pages = num_extent_pages(eb->start, eb->len);
3443 for (i = 0; i < num_pages; i++) {
3444 struct page *p = extent_buffer_page(eb, i);
3445
3446 if (!trylock_page(p)) {
3447 if (!flush) {
3448 flush_write_bio(epd);
3449 flush = 1;
3450 }
3451 lock_page(p);
3452 }
3453 }
3454
3455 return ret;
3456}
3457
3458static void end_extent_buffer_writeback(struct extent_buffer *eb)
3459{
3460 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3461 smp_mb__after_clear_bit();
3462 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3463}
3464
3465static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3466{
3467 struct bio_vec *bvec;
3468 struct extent_buffer *eb;
3469 int i, done;
3470
3471 bio_for_each_segment_all(bvec, bio, i) {
3472 struct page *page = bvec->bv_page;
3473
3474 eb = (struct extent_buffer *)page->private;
3475 BUG_ON(!eb);
3476 done = atomic_dec_and_test(&eb->io_pages);
3477
3478 if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3479 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3480 ClearPageUptodate(page);
3481 SetPageError(page);
3482 }
3483
3484 end_page_writeback(page);
3485
3486 if (!done)
3487 continue;
3488
3489 end_extent_buffer_writeback(eb);
3490 }
3491
3492 bio_put(bio);
3493}
3494
3495static int write_one_eb(struct extent_buffer *eb,
3496 struct btrfs_fs_info *fs_info,
3497 struct writeback_control *wbc,
3498 struct extent_page_data *epd)
3499{
3500 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3501 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3502 u64 offset = eb->start;
3503 unsigned long i, num_pages;
3504 unsigned long bio_flags = 0;
3505 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3506 int ret = 0;
3507
3508 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3509 num_pages = num_extent_pages(eb->start, eb->len);
3510 atomic_set(&eb->io_pages, num_pages);
3511 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3512 bio_flags = EXTENT_BIO_TREE_LOG;
3513
3514 for (i = 0; i < num_pages; i++) {
3515 struct page *p = extent_buffer_page(eb, i);
3516
3517 clear_page_dirty_for_io(p);
3518 set_page_writeback(p);
3519 ret = submit_extent_page(rw, tree, p, offset >> 9,
3520 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3521 -1, end_bio_extent_buffer_writepage,
3522 0, epd->bio_flags, bio_flags);
3523 epd->bio_flags = bio_flags;
3524 if (ret) {
3525 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3526 SetPageError(p);
3527 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3528 end_extent_buffer_writeback(eb);
3529 ret = -EIO;
3530 break;
3531 }
3532 offset += PAGE_CACHE_SIZE;
3533 update_nr_written(p, wbc, 1);
3534 unlock_page(p);
3535 }
3536
3537 if (unlikely(ret)) {
3538 for (; i < num_pages; i++) {
3539 struct page *p = extent_buffer_page(eb, i);
3540 unlock_page(p);
3541 }
3542 }
3543
3544 return ret;
3545}
3546
3547int btree_write_cache_pages(struct address_space *mapping,
3548 struct writeback_control *wbc)
3549{
3550 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3551 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3552 struct extent_buffer *eb, *prev_eb = NULL;
3553 struct extent_page_data epd = {
3554 .bio = NULL,
3555 .tree = tree,
3556 .extent_locked = 0,
3557 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3558 .bio_flags = 0,
3559 };
3560 int ret = 0;
3561 int done = 0;
3562 int nr_to_write_done = 0;
3563 struct pagevec pvec;
3564 int nr_pages;
3565 pgoff_t index;
3566 pgoff_t end; /* Inclusive */
3567 int scanned = 0;
3568 int tag;
3569
3570 pagevec_init(&pvec, 0);
3571 if (wbc->range_cyclic) {
3572 index = mapping->writeback_index; /* Start from prev offset */
3573 end = -1;
3574 } else {
3575 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3576 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3577 scanned = 1;
3578 }
3579 if (wbc->sync_mode == WB_SYNC_ALL)
3580 tag = PAGECACHE_TAG_TOWRITE;
3581 else
3582 tag = PAGECACHE_TAG_DIRTY;
3583retry:
3584 if (wbc->sync_mode == WB_SYNC_ALL)
3585 tag_pages_for_writeback(mapping, index, end);
3586 while (!done && !nr_to_write_done && (index <= end) &&
3587 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3588 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3589 unsigned i;
3590
3591 scanned = 1;
3592 for (i = 0; i < nr_pages; i++) {
3593 struct page *page = pvec.pages[i];
3594
3595 if (!PagePrivate(page))
3596 continue;
3597
3598 if (!wbc->range_cyclic && page->index > end) {
3599 done = 1;
3600 break;
3601 }
3602
3603 spin_lock(&mapping->private_lock);
3604 if (!PagePrivate(page)) {
3605 spin_unlock(&mapping->private_lock);
3606 continue;
3607 }
3608
3609 eb = (struct extent_buffer *)page->private;
3610
3611 /*
3612 * Shouldn't happen and normally this would be a BUG_ON
3613 * but no sense in crashing the users box for something
3614 * we can survive anyway.
3615 */
3616 if (WARN_ON(!eb)) {
3617 spin_unlock(&mapping->private_lock);
3618 continue;
3619 }
3620
3621 if (eb == prev_eb) {
3622 spin_unlock(&mapping->private_lock);
3623 continue;
3624 }
3625
3626 ret = atomic_inc_not_zero(&eb->refs);
3627 spin_unlock(&mapping->private_lock);
3628 if (!ret)
3629 continue;
3630
3631 prev_eb = eb;
3632 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3633 if (!ret) {
3634 free_extent_buffer(eb);
3635 continue;
3636 }
3637
3638 ret = write_one_eb(eb, fs_info, wbc, &epd);
3639 if (ret) {
3640 done = 1;
3641 free_extent_buffer(eb);
3642 break;
3643 }
3644 free_extent_buffer(eb);
3645
3646 /*
3647 * the filesystem may choose to bump up nr_to_write.
3648 * We have to make sure to honor the new nr_to_write
3649 * at any time
3650 */
3651 nr_to_write_done = wbc->nr_to_write <= 0;
3652 }
3653 pagevec_release(&pvec);
3654 cond_resched();
3655 }
3656 if (!scanned && !done) {
3657 /*
3658 * We hit the last page and there is more work to be done: wrap
3659 * back to the start of the file
3660 */
3661 scanned = 1;
3662 index = 0;
3663 goto retry;
3664 }
3665 flush_write_bio(&epd);
3666 return ret;
3667}
3668
3669/**
3670 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3671 * @mapping: address space structure to write
3672 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3673 * @writepage: function called for each page
3674 * @data: data passed to writepage function
3675 *
3676 * If a page is already under I/O, write_cache_pages() skips it, even
3677 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3678 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3679 * and msync() need to guarantee that all the data which was dirty at the time
3680 * the call was made get new I/O started against them. If wbc->sync_mode is
3681 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3682 * existing IO to complete.
3683 */
3684static int extent_write_cache_pages(struct extent_io_tree *tree,
3685 struct address_space *mapping,
3686 struct writeback_control *wbc,
3687 writepage_t writepage, void *data,
3688 void (*flush_fn)(void *))
3689{
3690 struct inode *inode = mapping->host;
3691 int ret = 0;
3692 int done = 0;
3693 int nr_to_write_done = 0;
3694 struct pagevec pvec;
3695 int nr_pages;
3696 pgoff_t index;
3697 pgoff_t end; /* Inclusive */
3698 int scanned = 0;
3699 int tag;
3700
3701 /*
3702 * We have to hold onto the inode so that ordered extents can do their
3703 * work when the IO finishes. The alternative to this is failing to add
3704 * an ordered extent if the igrab() fails there and that is a huge pain
3705 * to deal with, so instead just hold onto the inode throughout the
3706 * writepages operation. If it fails here we are freeing up the inode
3707 * anyway and we'd rather not waste our time writing out stuff that is
3708 * going to be truncated anyway.
3709 */
3710 if (!igrab(inode))
3711 return 0;
3712
3713 pagevec_init(&pvec, 0);
3714 if (wbc->range_cyclic) {
3715 index = mapping->writeback_index; /* Start from prev offset */
3716 end = -1;
3717 } else {
3718 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3719 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3720 scanned = 1;
3721 }
3722 if (wbc->sync_mode == WB_SYNC_ALL)
3723 tag = PAGECACHE_TAG_TOWRITE;
3724 else
3725 tag = PAGECACHE_TAG_DIRTY;
3726retry:
3727 if (wbc->sync_mode == WB_SYNC_ALL)
3728 tag_pages_for_writeback(mapping, index, end);
3729 while (!done && !nr_to_write_done && (index <= end) &&
3730 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3731 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3732 unsigned i;
3733
3734 scanned = 1;
3735 for (i = 0; i < nr_pages; i++) {
3736 struct page *page = pvec.pages[i];
3737
3738 /*
3739 * At this point we hold neither mapping->tree_lock nor
3740 * lock on the page itself: the page may be truncated or
3741 * invalidated (changing page->mapping to NULL), or even
3742 * swizzled back from swapper_space to tmpfs file
3743 * mapping
3744 */
3745 if (!trylock_page(page)) {
3746 flush_fn(data);
3747 lock_page(page);
3748 }
3749
3750 if (unlikely(page->mapping != mapping)) {
3751 unlock_page(page);
3752 continue;
3753 }
3754
3755 if (!wbc->range_cyclic && page->index > end) {
3756 done = 1;
3757 unlock_page(page);
3758 continue;
3759 }
3760
3761 if (wbc->sync_mode != WB_SYNC_NONE) {
3762 if (PageWriteback(page))
3763 flush_fn(data);
3764 wait_on_page_writeback(page);
3765 }
3766
3767 if (PageWriteback(page) ||
3768 !clear_page_dirty_for_io(page)) {
3769 unlock_page(page);
3770 continue;
3771 }
3772
3773 ret = (*writepage)(page, wbc, data);
3774
3775 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3776 unlock_page(page);
3777 ret = 0;
3778 }
3779 if (ret)
3780 done = 1;
3781
3782 /*
3783 * the filesystem may choose to bump up nr_to_write.
3784 * We have to make sure to honor the new nr_to_write
3785 * at any time
3786 */
3787 nr_to_write_done = wbc->nr_to_write <= 0;
3788 }
3789 pagevec_release(&pvec);
3790 cond_resched();
3791 }
3792 if (!scanned && !done) {
3793 /*
3794 * We hit the last page and there is more work to be done: wrap
3795 * back to the start of the file
3796 */
3797 scanned = 1;
3798 index = 0;
3799 goto retry;
3800 }
3801 btrfs_add_delayed_iput(inode);
3802 return ret;
3803}
3804
3805static void flush_epd_write_bio(struct extent_page_data *epd)
3806{
3807 if (epd->bio) {
3808 int rw = WRITE;
3809 int ret;
3810
3811 if (epd->sync_io)
3812 rw = WRITE_SYNC;
3813
3814 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3815 BUG_ON(ret < 0); /* -ENOMEM */
3816 epd->bio = NULL;
3817 }
3818}
3819
3820static noinline void flush_write_bio(void *data)
3821{
3822 struct extent_page_data *epd = data;
3823 flush_epd_write_bio(epd);
3824}
3825
3826int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3827 get_extent_t *get_extent,
3828 struct writeback_control *wbc)
3829{
3830 int ret;
3831 struct extent_page_data epd = {
3832 .bio = NULL,
3833 .tree = tree,
3834 .get_extent = get_extent,
3835 .extent_locked = 0,
3836 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3837 .bio_flags = 0,
3838 };
3839
3840 ret = __extent_writepage(page, wbc, &epd);
3841
3842 flush_epd_write_bio(&epd);
3843 return ret;
3844}
3845
3846int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3847 u64 start, u64 end, get_extent_t *get_extent,
3848 int mode)
3849{
3850 int ret = 0;
3851 struct address_space *mapping = inode->i_mapping;
3852 struct page *page;
3853 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3854 PAGE_CACHE_SHIFT;
3855
3856 struct extent_page_data epd = {
3857 .bio = NULL,
3858 .tree = tree,
3859 .get_extent = get_extent,
3860 .extent_locked = 1,
3861 .sync_io = mode == WB_SYNC_ALL,
3862 .bio_flags = 0,
3863 };
3864 struct writeback_control wbc_writepages = {
3865 .sync_mode = mode,
3866 .nr_to_write = nr_pages * 2,
3867 .range_start = start,
3868 .range_end = end + 1,
3869 };
3870
3871 while (start <= end) {
3872 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3873 if (clear_page_dirty_for_io(page))
3874 ret = __extent_writepage(page, &wbc_writepages, &epd);
3875 else {
3876 if (tree->ops && tree->ops->writepage_end_io_hook)
3877 tree->ops->writepage_end_io_hook(page, start,
3878 start + PAGE_CACHE_SIZE - 1,
3879 NULL, 1);
3880 unlock_page(page);
3881 }
3882 page_cache_release(page);
3883 start += PAGE_CACHE_SIZE;
3884 }
3885
3886 flush_epd_write_bio(&epd);
3887 return ret;
3888}
3889
3890int extent_writepages(struct extent_io_tree *tree,
3891 struct address_space *mapping,
3892 get_extent_t *get_extent,
3893 struct writeback_control *wbc)
3894{
3895 int ret = 0;
3896 struct extent_page_data epd = {
3897 .bio = NULL,
3898 .tree = tree,
3899 .get_extent = get_extent,
3900 .extent_locked = 0,
3901 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3902 .bio_flags = 0,
3903 };
3904
3905 ret = extent_write_cache_pages(tree, mapping, wbc,
3906 __extent_writepage, &epd,
3907 flush_write_bio);
3908 flush_epd_write_bio(&epd);
3909 return ret;
3910}
3911
3912int extent_readpages(struct extent_io_tree *tree,
3913 struct address_space *mapping,
3914 struct list_head *pages, unsigned nr_pages,
3915 get_extent_t get_extent)
3916{
3917 struct bio *bio = NULL;
3918 unsigned page_idx;
3919 unsigned long bio_flags = 0;
3920 struct page *pagepool[16];
3921 struct page *page;
3922 struct extent_map *em_cached = NULL;
3923 int nr = 0;
3924
3925 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3926 page = list_entry(pages->prev, struct page, lru);
3927
3928 prefetchw(&page->flags);
3929 list_del(&page->lru);
3930 if (add_to_page_cache_lru(page, mapping,
3931 page->index, GFP_NOFS)) {
3932 page_cache_release(page);
3933 continue;
3934 }
3935
3936 pagepool[nr++] = page;
3937 if (nr < ARRAY_SIZE(pagepool))
3938 continue;
3939 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3940 &bio, 0, &bio_flags, READ);
3941 nr = 0;
3942 }
3943 if (nr)
3944 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3945 &bio, 0, &bio_flags, READ);
3946
3947 if (em_cached)
3948 free_extent_map(em_cached);
3949
3950 BUG_ON(!list_empty(pages));
3951 if (bio)
3952 return submit_one_bio(READ, bio, 0, bio_flags);
3953 return 0;
3954}
3955
3956/*
3957 * basic invalidatepage code, this waits on any locked or writeback
3958 * ranges corresponding to the page, and then deletes any extent state
3959 * records from the tree
3960 */
3961int extent_invalidatepage(struct extent_io_tree *tree,
3962 struct page *page, unsigned long offset)
3963{
3964 struct extent_state *cached_state = NULL;
3965 u64 start = page_offset(page);
3966 u64 end = start + PAGE_CACHE_SIZE - 1;
3967 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3968
3969 start += ALIGN(offset, blocksize);
3970 if (start > end)
3971 return 0;
3972
3973 lock_extent_bits(tree, start, end, 0, &cached_state);
3974 wait_on_page_writeback(page);
3975 clear_extent_bit(tree, start, end,
3976 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3977 EXTENT_DO_ACCOUNTING,
3978 1, 1, &cached_state, GFP_NOFS);
3979 return 0;
3980}
3981
3982/*
3983 * a helper for releasepage, this tests for areas of the page that
3984 * are locked or under IO and drops the related state bits if it is safe
3985 * to drop the page.
3986 */
3987static int try_release_extent_state(struct extent_map_tree *map,
3988 struct extent_io_tree *tree,
3989 struct page *page, gfp_t mask)
3990{
3991 u64 start = page_offset(page);
3992 u64 end = start + PAGE_CACHE_SIZE - 1;
3993 int ret = 1;
3994
3995 if (test_range_bit(tree, start, end,
3996 EXTENT_IOBITS, 0, NULL))
3997 ret = 0;
3998 else {
3999 if ((mask & GFP_NOFS) == GFP_NOFS)
4000 mask = GFP_NOFS;
4001 /*
4002 * at this point we can safely clear everything except the
4003 * locked bit and the nodatasum bit
4004 */
4005 ret = clear_extent_bit(tree, start, end,
4006 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4007 0, 0, NULL, mask);
4008
4009 /* if clear_extent_bit failed for enomem reasons,
4010 * we can't allow the release to continue.
4011 */
4012 if (ret < 0)
4013 ret = 0;
4014 else
4015 ret = 1;
4016 }
4017 return ret;
4018}
4019
4020/*
4021 * a helper for releasepage. As long as there are no locked extents
4022 * in the range corresponding to the page, both state records and extent
4023 * map records are removed
4024 */
4025int try_release_extent_mapping(struct extent_map_tree *map,
4026 struct extent_io_tree *tree, struct page *page,
4027 gfp_t mask)
4028{
4029 struct extent_map *em;
4030 u64 start = page_offset(page);
4031 u64 end = start + PAGE_CACHE_SIZE - 1;
4032
4033 if ((mask & __GFP_WAIT) &&
4034 page->mapping->host->i_size > 16 * 1024 * 1024) {
4035 u64 len;
4036 while (start <= end) {
4037 len = end - start + 1;
4038 write_lock(&map->lock);
4039 em = lookup_extent_mapping(map, start, len);
4040 if (!em) {
4041 write_unlock(&map->lock);
4042 break;
4043 }
4044 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4045 em->start != start) {
4046 write_unlock(&map->lock);
4047 free_extent_map(em);
4048 break;
4049 }
4050 if (!test_range_bit(tree, em->start,
4051 extent_map_end(em) - 1,
4052 EXTENT_LOCKED | EXTENT_WRITEBACK,
4053 0, NULL)) {
4054 remove_extent_mapping(map, em);
4055 /* once for the rb tree */
4056 free_extent_map(em);
4057 }
4058 start = extent_map_end(em);
4059 write_unlock(&map->lock);
4060
4061 /* once for us */
4062 free_extent_map(em);
4063 }
4064 }
4065 return try_release_extent_state(map, tree, page, mask);
4066}
4067
4068/*
4069 * helper function for fiemap, which doesn't want to see any holes.
4070 * This maps until we find something past 'last'
4071 */
4072static struct extent_map *get_extent_skip_holes(struct inode *inode,
4073 u64 offset,
4074 u64 last,
4075 get_extent_t *get_extent)
4076{
4077 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4078 struct extent_map *em;
4079 u64 len;
4080
4081 if (offset >= last)
4082 return NULL;
4083
4084 while (1) {
4085 len = last - offset;
4086 if (len == 0)
4087 break;
4088 len = ALIGN(len, sectorsize);
4089 em = get_extent(inode, NULL, 0, offset, len, 0);
4090 if (IS_ERR_OR_NULL(em))
4091 return em;
4092
4093 /* if this isn't a hole return it */
4094 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4095 em->block_start != EXTENT_MAP_HOLE) {
4096 return em;
4097 }
4098
4099 /* this is a hole, advance to the next extent */
4100 offset = extent_map_end(em);
4101 free_extent_map(em);
4102 if (offset >= last)
4103 break;
4104 }
4105 return NULL;
4106}
4107
4108static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
4109{
4110 unsigned long cnt = *((unsigned long *)ctx);
4111
4112 cnt++;
4113 *((unsigned long *)ctx) = cnt;
4114
4115 /* Now we're sure that the extent is shared. */
4116 if (cnt > 1)
4117 return 1;
4118 return 0;
4119}
4120
4121int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4122 __u64 start, __u64 len, get_extent_t *get_extent)
4123{
4124 int ret = 0;
4125 u64 off = start;
4126 u64 max = start + len;
4127 u32 flags = 0;
4128 u32 found_type;
4129 u64 last;
4130 u64 last_for_get_extent = 0;
4131 u64 disko = 0;
4132 u64 isize = i_size_read(inode);
4133 struct btrfs_key found_key;
4134 struct extent_map *em = NULL;
4135 struct extent_state *cached_state = NULL;
4136 struct btrfs_path *path;
4137 int end = 0;
4138 u64 em_start = 0;
4139 u64 em_len = 0;
4140 u64 em_end = 0;
4141
4142 if (len == 0)
4143 return -EINVAL;
4144
4145 path = btrfs_alloc_path();
4146 if (!path)
4147 return -ENOMEM;
4148 path->leave_spinning = 1;
4149
4150 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4151 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4152
4153 /*
4154 * lookup the last file extent. We're not using i_size here
4155 * because there might be preallocation past i_size
4156 */
4157 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
4158 path, btrfs_ino(inode), -1, 0);
4159 if (ret < 0) {
4160 btrfs_free_path(path);
4161 return ret;
4162 }
4163 WARN_ON(!ret);
4164 path->slots[0]--;
4165 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4166 found_type = btrfs_key_type(&found_key);
4167
4168 /* No extents, but there might be delalloc bits */
4169 if (found_key.objectid != btrfs_ino(inode) ||
4170 found_type != BTRFS_EXTENT_DATA_KEY) {
4171 /* have to trust i_size as the end */
4172 last = (u64)-1;
4173 last_for_get_extent = isize;
4174 } else {
4175 /*
4176 * remember the start of the last extent. There are a
4177 * bunch of different factors that go into the length of the
4178 * extent, so its much less complex to remember where it started
4179 */
4180 last = found_key.offset;
4181 last_for_get_extent = last + 1;
4182 }
4183 btrfs_release_path(path);
4184
4185 /*
4186 * we might have some extents allocated but more delalloc past those
4187 * extents. so, we trust isize unless the start of the last extent is
4188 * beyond isize
4189 */
4190 if (last < isize) {
4191 last = (u64)-1;
4192 last_for_get_extent = isize;
4193 }
4194
4195 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4196 &cached_state);
4197
4198 em = get_extent_skip_holes(inode, start, last_for_get_extent,
4199 get_extent);
4200 if (!em)
4201 goto out;
4202 if (IS_ERR(em)) {
4203 ret = PTR_ERR(em);
4204 goto out;
4205 }
4206
4207 while (!end) {
4208 u64 offset_in_extent = 0;
4209
4210 /* break if the extent we found is outside the range */
4211 if (em->start >= max || extent_map_end(em) < off)
4212 break;
4213
4214 /*
4215 * get_extent may return an extent that starts before our
4216 * requested range. We have to make sure the ranges
4217 * we return to fiemap always move forward and don't
4218 * overlap, so adjust the offsets here
4219 */
4220 em_start = max(em->start, off);
4221
4222 /*
4223 * record the offset from the start of the extent
4224 * for adjusting the disk offset below. Only do this if the
4225 * extent isn't compressed since our in ram offset may be past
4226 * what we have actually allocated on disk.
4227 */
4228 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4229 offset_in_extent = em_start - em->start;
4230 em_end = extent_map_end(em);
4231 em_len = em_end - em_start;
4232 disko = 0;
4233 flags = 0;
4234
4235 /*
4236 * bump off for our next call to get_extent
4237 */
4238 off = extent_map_end(em);
4239 if (off >= max)
4240 end = 1;
4241
4242 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4243 end = 1;
4244 flags |= FIEMAP_EXTENT_LAST;
4245 } else if (em->block_start == EXTENT_MAP_INLINE) {
4246 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4247 FIEMAP_EXTENT_NOT_ALIGNED);
4248 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4249 flags |= (FIEMAP_EXTENT_DELALLOC |
4250 FIEMAP_EXTENT_UNKNOWN);
4251 } else {
4252 unsigned long ref_cnt = 0;
4253
4254 disko = em->block_start + offset_in_extent;
4255
4256 /*
4257 * As btrfs supports shared space, this information
4258 * can be exported to userspace tools via
4259 * flag FIEMAP_EXTENT_SHARED.
4260 */
4261 ret = iterate_inodes_from_logical(
4262 em->block_start,
4263 BTRFS_I(inode)->root->fs_info,
4264 path, count_ext_ref, &ref_cnt);
4265 if (ret < 0 && ret != -ENOENT)
4266 goto out_free;
4267
4268 if (ref_cnt > 1)
4269 flags |= FIEMAP_EXTENT_SHARED;
4270 }
4271 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4272 flags |= FIEMAP_EXTENT_ENCODED;
4273
4274 free_extent_map(em);
4275 em = NULL;
4276 if ((em_start >= last) || em_len == (u64)-1 ||
4277 (last == (u64)-1 && isize <= em_end)) {
4278 flags |= FIEMAP_EXTENT_LAST;
4279 end = 1;
4280 }
4281
4282 /* now scan forward to see if this is really the last extent. */
4283 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4284 get_extent);
4285 if (IS_ERR(em)) {
4286 ret = PTR_ERR(em);
4287 goto out;
4288 }
4289 if (!em) {
4290 flags |= FIEMAP_EXTENT_LAST;
4291 end = 1;
4292 }
4293 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4294 em_len, flags);
4295 if (ret)
4296 goto out_free;
4297 }
4298out_free:
4299 free_extent_map(em);
4300out:
4301 btrfs_free_path(path);
4302 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4303 &cached_state, GFP_NOFS);
4304 return ret;
4305}
4306
4307static void __free_extent_buffer(struct extent_buffer *eb)
4308{
4309 btrfs_leak_debug_del(&eb->leak_list);
4310 kmem_cache_free(extent_buffer_cache, eb);
4311}
4312
4313int extent_buffer_under_io(struct extent_buffer *eb)
4314{
4315 return (atomic_read(&eb->io_pages) ||
4316 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4317 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4318}
4319
4320/*
4321 * Helper for releasing extent buffer page.
4322 */
4323static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4324 unsigned long start_idx)
4325{
4326 unsigned long index;
4327 unsigned long num_pages;
4328 struct page *page;
4329 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4330
4331 BUG_ON(extent_buffer_under_io(eb));
4332
4333 num_pages = num_extent_pages(eb->start, eb->len);
4334 index = start_idx + num_pages;
4335 if (start_idx >= index)
4336 return;
4337
4338 do {
4339 index--;
4340 page = extent_buffer_page(eb, index);
4341 if (page && mapped) {
4342 spin_lock(&page->mapping->private_lock);
4343 /*
4344 * We do this since we'll remove the pages after we've
4345 * removed the eb from the radix tree, so we could race
4346 * and have this page now attached to the new eb. So
4347 * only clear page_private if it's still connected to
4348 * this eb.
4349 */
4350 if (PagePrivate(page) &&
4351 page->private == (unsigned long)eb) {
4352 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4353 BUG_ON(PageDirty(page));
4354 BUG_ON(PageWriteback(page));
4355 /*
4356 * We need to make sure we haven't be attached
4357 * to a new eb.
4358 */
4359 ClearPagePrivate(page);
4360 set_page_private(page, 0);
4361 /* One for the page private */
4362 page_cache_release(page);
4363 }
4364 spin_unlock(&page->mapping->private_lock);
4365
4366 }
4367 if (page) {
4368 /* One for when we alloced the page */
4369 page_cache_release(page);
4370 }
4371 } while (index != start_idx);
4372}
4373
4374/*
4375 * Helper for releasing the extent buffer.
4376 */
4377static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4378{
4379 btrfs_release_extent_buffer_page(eb, 0);
4380 __free_extent_buffer(eb);
4381}
4382
4383static struct extent_buffer *
4384__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4385 unsigned long len, gfp_t mask)
4386{
4387 struct extent_buffer *eb = NULL;
4388
4389 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4390 if (eb == NULL)
4391 return NULL;
4392 eb->start = start;
4393 eb->len = len;
4394 eb->fs_info = fs_info;
4395 eb->bflags = 0;
4396 rwlock_init(&eb->lock);
4397 atomic_set(&eb->write_locks, 0);
4398 atomic_set(&eb->read_locks, 0);
4399 atomic_set(&eb->blocking_readers, 0);
4400 atomic_set(&eb->blocking_writers, 0);
4401 atomic_set(&eb->spinning_readers, 0);
4402 atomic_set(&eb->spinning_writers, 0);
4403 eb->lock_nested = 0;
4404 init_waitqueue_head(&eb->write_lock_wq);
4405 init_waitqueue_head(&eb->read_lock_wq);
4406
4407 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4408
4409 spin_lock_init(&eb->refs_lock);
4410 atomic_set(&eb->refs, 1);
4411 atomic_set(&eb->io_pages, 0);
4412
4413 /*
4414 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4415 */
4416 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4417 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4418 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4419
4420 return eb;
4421}
4422
4423struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4424{
4425 unsigned long i;
4426 struct page *p;
4427 struct extent_buffer *new;
4428 unsigned long num_pages = num_extent_pages(src->start, src->len);
4429
4430 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4431 if (new == NULL)
4432 return NULL;
4433
4434 for (i = 0; i < num_pages; i++) {
4435 p = alloc_page(GFP_NOFS);
4436 if (!p) {
4437 btrfs_release_extent_buffer(new);
4438 return NULL;
4439 }
4440 attach_extent_buffer_page(new, p);
4441 WARN_ON(PageDirty(p));
4442 SetPageUptodate(p);
4443 new->pages[i] = p;
4444 }
4445
4446 copy_extent_buffer(new, src, 0, 0, src->len);
4447 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4448 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4449
4450 return new;
4451}
4452
4453struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4454{
4455 struct extent_buffer *eb;
4456 unsigned long num_pages = num_extent_pages(0, len);
4457 unsigned long i;
4458
4459 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4460 if (!eb)
4461 return NULL;
4462
4463 for (i = 0; i < num_pages; i++) {
4464 eb->pages[i] = alloc_page(GFP_NOFS);
4465 if (!eb->pages[i])
4466 goto err;
4467 }
4468 set_extent_buffer_uptodate(eb);
4469 btrfs_set_header_nritems(eb, 0);
4470 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4471
4472 return eb;
4473err:
4474 for (; i > 0; i--)
4475 __free_page(eb->pages[i - 1]);
4476 __free_extent_buffer(eb);
4477 return NULL;
4478}
4479
4480static void check_buffer_tree_ref(struct extent_buffer *eb)
4481{
4482 int refs;
4483 /* the ref bit is tricky. We have to make sure it is set
4484 * if we have the buffer dirty. Otherwise the
4485 * code to free a buffer can end up dropping a dirty
4486 * page
4487 *
4488 * Once the ref bit is set, it won't go away while the
4489 * buffer is dirty or in writeback, and it also won't
4490 * go away while we have the reference count on the
4491 * eb bumped.
4492 *
4493 * We can't just set the ref bit without bumping the
4494 * ref on the eb because free_extent_buffer might
4495 * see the ref bit and try to clear it. If this happens
4496 * free_extent_buffer might end up dropping our original
4497 * ref by mistake and freeing the page before we are able
4498 * to add one more ref.
4499 *
4500 * So bump the ref count first, then set the bit. If someone
4501 * beat us to it, drop the ref we added.
4502 */
4503 refs = atomic_read(&eb->refs);
4504 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4505 return;
4506
4507 spin_lock(&eb->refs_lock);
4508 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4509 atomic_inc(&eb->refs);
4510 spin_unlock(&eb->refs_lock);
4511}
4512
4513static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4514{
4515 unsigned long num_pages, i;
4516
4517 check_buffer_tree_ref(eb);
4518
4519 num_pages = num_extent_pages(eb->start, eb->len);
4520 for (i = 0; i < num_pages; i++) {
4521 struct page *p = extent_buffer_page(eb, i);
4522 mark_page_accessed(p);
4523 }
4524}
4525
4526struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4527 u64 start)
4528{
4529 struct extent_buffer *eb;
4530
4531 rcu_read_lock();
4532 eb = radix_tree_lookup(&fs_info->buffer_radix,
4533 start >> PAGE_CACHE_SHIFT);
4534 if (eb && atomic_inc_not_zero(&eb->refs)) {
4535 rcu_read_unlock();
4536 mark_extent_buffer_accessed(eb);
4537 return eb;
4538 }
4539 rcu_read_unlock();
4540
4541 return NULL;
4542}
4543
4544struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4545 u64 start, unsigned long len)
4546{
4547 unsigned long num_pages = num_extent_pages(start, len);
4548 unsigned long i;
4549 unsigned long index = start >> PAGE_CACHE_SHIFT;
4550 struct extent_buffer *eb;
4551 struct extent_buffer *exists = NULL;
4552 struct page *p;
4553 struct address_space *mapping = fs_info->btree_inode->i_mapping;
4554 int uptodate = 1;
4555 int ret;
4556
4557 eb = find_extent_buffer(fs_info, start);
4558 if (eb)
4559 return eb;
4560
4561 eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
4562 if (!eb)
4563 return NULL;
4564
4565 for (i = 0; i < num_pages; i++, index++) {
4566 p = find_or_create_page(mapping, index, GFP_NOFS);
4567 if (!p)
4568 goto free_eb;
4569
4570 spin_lock(&mapping->private_lock);
4571 if (PagePrivate(p)) {
4572 /*
4573 * We could have already allocated an eb for this page
4574 * and attached one so lets see if we can get a ref on
4575 * the existing eb, and if we can we know it's good and
4576 * we can just return that one, else we know we can just
4577 * overwrite page->private.
4578 */
4579 exists = (struct extent_buffer *)p->private;
4580 if (atomic_inc_not_zero(&exists->refs)) {
4581 spin_unlock(&mapping->private_lock);
4582 unlock_page(p);
4583 page_cache_release(p);
4584 mark_extent_buffer_accessed(exists);
4585 goto free_eb;
4586 }
4587
4588 /*
4589 * Do this so attach doesn't complain and we need to
4590 * drop the ref the old guy had.
4591 */
4592 ClearPagePrivate(p);
4593 WARN_ON(PageDirty(p));
4594 page_cache_release(p);
4595 }
4596 attach_extent_buffer_page(eb, p);
4597 spin_unlock(&mapping->private_lock);
4598 WARN_ON(PageDirty(p));
4599 mark_page_accessed(p);
4600 eb->pages[i] = p;
4601 if (!PageUptodate(p))
4602 uptodate = 0;
4603
4604 /*
4605 * see below about how we avoid a nasty race with release page
4606 * and why we unlock later
4607 */
4608 }
4609 if (uptodate)
4610 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4611again:
4612 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4613 if (ret)
4614 goto free_eb;
4615
4616 spin_lock(&fs_info->buffer_lock);
4617 ret = radix_tree_insert(&fs_info->buffer_radix,
4618 start >> PAGE_CACHE_SHIFT, eb);
4619 spin_unlock(&fs_info->buffer_lock);
4620 radix_tree_preload_end();
4621 if (ret == -EEXIST) {
4622 exists = find_extent_buffer(fs_info, start);
4623 if (exists)
4624 goto free_eb;
4625 else
4626 goto again;
4627 }
4628 /* add one reference for the tree */
4629 check_buffer_tree_ref(eb);
4630 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4631
4632 /*
4633 * there is a race where release page may have
4634 * tried to find this extent buffer in the radix
4635 * but failed. It will tell the VM it is safe to
4636 * reclaim the, and it will clear the page private bit.
4637 * We must make sure to set the page private bit properly
4638 * after the extent buffer is in the radix tree so
4639 * it doesn't get lost
4640 */
4641 SetPageChecked(eb->pages[0]);
4642 for (i = 1; i < num_pages; i++) {
4643 p = extent_buffer_page(eb, i);
4644 ClearPageChecked(p);
4645 unlock_page(p);
4646 }
4647 unlock_page(eb->pages[0]);
4648 return eb;
4649
4650free_eb:
4651 for (i = 0; i < num_pages; i++) {
4652 if (eb->pages[i])
4653 unlock_page(eb->pages[i]);
4654 }
4655
4656 WARN_ON(!atomic_dec_and_test(&eb->refs));
4657 btrfs_release_extent_buffer(eb);
4658 return exists;
4659}
4660
4661static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4662{
4663 struct extent_buffer *eb =
4664 container_of(head, struct extent_buffer, rcu_head);
4665
4666 __free_extent_buffer(eb);
4667}
4668
4669/* Expects to have eb->eb_lock already held */
4670static int release_extent_buffer(struct extent_buffer *eb)
4671{
4672 WARN_ON(atomic_read(&eb->refs) == 0);
4673 if (atomic_dec_and_test(&eb->refs)) {
4674 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4675 struct btrfs_fs_info *fs_info = eb->fs_info;
4676
4677 spin_unlock(&eb->refs_lock);
4678
4679 spin_lock(&fs_info->buffer_lock);
4680 radix_tree_delete(&fs_info->buffer_radix,
4681 eb->start >> PAGE_CACHE_SHIFT);
4682 spin_unlock(&fs_info->buffer_lock);
4683 } else {
4684 spin_unlock(&eb->refs_lock);
4685 }
4686
4687 /* Should be safe to release our pages at this point */
4688 btrfs_release_extent_buffer_page(eb, 0);
4689 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4690 return 1;
4691 }
4692 spin_unlock(&eb->refs_lock);
4693
4694 return 0;
4695}
4696
4697void free_extent_buffer(struct extent_buffer *eb)
4698{
4699 int refs;
4700 int old;
4701 if (!eb)
4702 return;
4703
4704 while (1) {
4705 refs = atomic_read(&eb->refs);
4706 if (refs <= 3)
4707 break;
4708 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4709 if (old == refs)
4710 return;
4711 }
4712
4713 spin_lock(&eb->refs_lock);
4714 if (atomic_read(&eb->refs) == 2 &&
4715 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4716 atomic_dec(&eb->refs);
4717
4718 if (atomic_read(&eb->refs) == 2 &&
4719 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4720 !extent_buffer_under_io(eb) &&
4721 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4722 atomic_dec(&eb->refs);
4723
4724 /*
4725 * I know this is terrible, but it's temporary until we stop tracking
4726 * the uptodate bits and such for the extent buffers.
4727 */
4728 release_extent_buffer(eb);
4729}
4730
4731void free_extent_buffer_stale(struct extent_buffer *eb)
4732{
4733 if (!eb)
4734 return;
4735
4736 spin_lock(&eb->refs_lock);
4737 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4738
4739 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4740 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4741 atomic_dec(&eb->refs);
4742 release_extent_buffer(eb);
4743}
4744
4745void clear_extent_buffer_dirty(struct extent_buffer *eb)
4746{
4747 unsigned long i;
4748 unsigned long num_pages;
4749 struct page *page;
4750
4751 num_pages = num_extent_pages(eb->start, eb->len);
4752
4753 for (i = 0; i < num_pages; i++) {
4754 page = extent_buffer_page(eb, i);
4755 if (!PageDirty(page))
4756 continue;
4757
4758 lock_page(page);
4759 WARN_ON(!PagePrivate(page));
4760
4761 clear_page_dirty_for_io(page);
4762 spin_lock_irq(&page->mapping->tree_lock);
4763 if (!PageDirty(page)) {
4764 radix_tree_tag_clear(&page->mapping->page_tree,
4765 page_index(page),
4766 PAGECACHE_TAG_DIRTY);
4767 }
4768 spin_unlock_irq(&page->mapping->tree_lock);
4769 ClearPageError(page);
4770 unlock_page(page);
4771 }
4772 WARN_ON(atomic_read(&eb->refs) == 0);
4773}
4774
4775int set_extent_buffer_dirty(struct extent_buffer *eb)
4776{
4777 unsigned long i;
4778 unsigned long num_pages;
4779 int was_dirty = 0;
4780
4781 check_buffer_tree_ref(eb);
4782
4783 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4784
4785 num_pages = num_extent_pages(eb->start, eb->len);
4786 WARN_ON(atomic_read(&eb->refs) == 0);
4787 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4788
4789 for (i = 0; i < num_pages; i++)
4790 set_page_dirty(extent_buffer_page(eb, i));
4791 return was_dirty;
4792}
4793
4794int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4795{
4796 unsigned long i;
4797 struct page *page;
4798 unsigned long num_pages;
4799
4800 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4801 num_pages = num_extent_pages(eb->start, eb->len);
4802 for (i = 0; i < num_pages; i++) {
4803 page = extent_buffer_page(eb, i);
4804 if (page)
4805 ClearPageUptodate(page);
4806 }
4807 return 0;
4808}
4809
4810int set_extent_buffer_uptodate(struct extent_buffer *eb)
4811{
4812 unsigned long i;
4813 struct page *page;
4814 unsigned long num_pages;
4815
4816 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4817 num_pages = num_extent_pages(eb->start, eb->len);
4818 for (i = 0; i < num_pages; i++) {
4819 page = extent_buffer_page(eb, i);
4820 SetPageUptodate(page);
4821 }
4822 return 0;
4823}
4824
4825int extent_buffer_uptodate(struct extent_buffer *eb)
4826{
4827 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4828}
4829
4830int read_extent_buffer_pages(struct extent_io_tree *tree,
4831 struct extent_buffer *eb, u64 start, int wait,
4832 get_extent_t *get_extent, int mirror_num)
4833{
4834 unsigned long i;
4835 unsigned long start_i;
4836 struct page *page;
4837 int err;
4838 int ret = 0;
4839 int locked_pages = 0;
4840 int all_uptodate = 1;
4841 unsigned long num_pages;
4842 unsigned long num_reads = 0;
4843 struct bio *bio = NULL;
4844 unsigned long bio_flags = 0;
4845
4846 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4847 return 0;
4848
4849 if (start) {
4850 WARN_ON(start < eb->start);
4851 start_i = (start >> PAGE_CACHE_SHIFT) -
4852 (eb->start >> PAGE_CACHE_SHIFT);
4853 } else {
4854 start_i = 0;
4855 }
4856
4857 num_pages = num_extent_pages(eb->start, eb->len);
4858 for (i = start_i; i < num_pages; i++) {
4859 page = extent_buffer_page(eb, i);
4860 if (wait == WAIT_NONE) {
4861 if (!trylock_page(page))
4862 goto unlock_exit;
4863 } else {
4864 lock_page(page);
4865 }
4866 locked_pages++;
4867 if (!PageUptodate(page)) {
4868 num_reads++;
4869 all_uptodate = 0;
4870 }
4871 }
4872 if (all_uptodate) {
4873 if (start_i == 0)
4874 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4875 goto unlock_exit;
4876 }
4877
4878 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4879 eb->read_mirror = 0;
4880 atomic_set(&eb->io_pages, num_reads);
4881 for (i = start_i; i < num_pages; i++) {
4882 page = extent_buffer_page(eb, i);
4883 if (!PageUptodate(page)) {
4884 ClearPageError(page);
4885 err = __extent_read_full_page(tree, page,
4886 get_extent, &bio,
4887 mirror_num, &bio_flags,
4888 READ | REQ_META);
4889 if (err)
4890 ret = err;
4891 } else {
4892 unlock_page(page);
4893 }
4894 }
4895
4896 if (bio) {
4897 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4898 bio_flags);
4899 if (err)
4900 return err;
4901 }
4902
4903 if (ret || wait != WAIT_COMPLETE)
4904 return ret;
4905
4906 for (i = start_i; i < num_pages; i++) {
4907 page = extent_buffer_page(eb, i);
4908 wait_on_page_locked(page);
4909 if (!PageUptodate(page))
4910 ret = -EIO;
4911 }
4912
4913 return ret;
4914
4915unlock_exit:
4916 i = start_i;
4917 while (locked_pages > 0) {
4918 page = extent_buffer_page(eb, i);
4919 i++;
4920 unlock_page(page);
4921 locked_pages--;
4922 }
4923 return ret;
4924}
4925
4926void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4927 unsigned long start,
4928 unsigned long len)
4929{
4930 size_t cur;
4931 size_t offset;
4932 struct page *page;
4933 char *kaddr;
4934 char *dst = (char *)dstv;
4935 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4936 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4937
4938 WARN_ON(start > eb->len);
4939 WARN_ON(start + len > eb->start + eb->len);
4940
4941 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4942
4943 while (len > 0) {
4944 page = extent_buffer_page(eb, i);
4945
4946 cur = min(len, (PAGE_CACHE_SIZE - offset));
4947 kaddr = page_address(page);
4948 memcpy(dst, kaddr + offset, cur);
4949
4950 dst += cur;
4951 len -= cur;
4952 offset = 0;
4953 i++;
4954 }
4955}
4956
4957int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4958 unsigned long min_len, char **map,
4959 unsigned long *map_start,
4960 unsigned long *map_len)
4961{
4962 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4963 char *kaddr;
4964 struct page *p;
4965 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4966 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4967 unsigned long end_i = (start_offset + start + min_len - 1) >>
4968 PAGE_CACHE_SHIFT;
4969
4970 if (i != end_i)
4971 return -EINVAL;
4972
4973 if (i == 0) {
4974 offset = start_offset;
4975 *map_start = 0;
4976 } else {
4977 offset = 0;
4978 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4979 }
4980
4981 if (start + min_len > eb->len) {
4982 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4983 "wanted %lu %lu\n",
4984 eb->start, eb->len, start, min_len);
4985 return -EINVAL;
4986 }
4987
4988 p = extent_buffer_page(eb, i);
4989 kaddr = page_address(p);
4990 *map = kaddr + offset;
4991 *map_len = PAGE_CACHE_SIZE - offset;
4992 return 0;
4993}
4994
4995int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4996 unsigned long start,
4997 unsigned long len)
4998{
4999 size_t cur;
5000 size_t offset;
5001 struct page *page;
5002 char *kaddr;
5003 char *ptr = (char *)ptrv;
5004 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5005 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5006 int ret = 0;
5007
5008 WARN_ON(start > eb->len);
5009 WARN_ON(start + len > eb->start + eb->len);
5010
5011 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5012
5013 while (len > 0) {
5014 page = extent_buffer_page(eb, i);
5015
5016 cur = min(len, (PAGE_CACHE_SIZE - offset));
5017
5018 kaddr = page_address(page);
5019 ret = memcmp(ptr, kaddr + offset, cur);
5020 if (ret)
5021 break;
5022
5023 ptr += cur;
5024 len -= cur;
5025 offset = 0;
5026 i++;
5027 }
5028 return ret;
5029}
5030
5031void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5032 unsigned long start, unsigned long len)
5033{
5034 size_t cur;
5035 size_t offset;
5036 struct page *page;
5037 char *kaddr;
5038 char *src = (char *)srcv;
5039 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5040 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5041
5042 WARN_ON(start > eb->len);
5043 WARN_ON(start + len > eb->start + eb->len);
5044
5045 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5046
5047 while (len > 0) {
5048 page = extent_buffer_page(eb, i);
5049 WARN_ON(!PageUptodate(page));
5050
5051 cur = min(len, PAGE_CACHE_SIZE - offset);
5052 kaddr = page_address(page);
5053 memcpy(kaddr + offset, src, cur);
5054
5055 src += cur;
5056 len -= cur;
5057 offset = 0;
5058 i++;
5059 }
5060}
5061
5062void memset_extent_buffer(struct extent_buffer *eb, char c,
5063 unsigned long start, unsigned long len)
5064{
5065 size_t cur;
5066 size_t offset;
5067 struct page *page;
5068 char *kaddr;
5069 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5070 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5071
5072 WARN_ON(start > eb->len);
5073 WARN_ON(start + len > eb->start + eb->len);
5074
5075 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5076
5077 while (len > 0) {
5078 page = extent_buffer_page(eb, i);
5079 WARN_ON(!PageUptodate(page));
5080
5081 cur = min(len, PAGE_CACHE_SIZE - offset);
5082 kaddr = page_address(page);
5083 memset(kaddr + offset, c, cur);
5084
5085 len -= cur;
5086 offset = 0;
5087 i++;
5088 }
5089}
5090
5091void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5092 unsigned long dst_offset, unsigned long src_offset,
5093 unsigned long len)
5094{
5095 u64 dst_len = dst->len;
5096 size_t cur;
5097 size_t offset;
5098 struct page *page;
5099 char *kaddr;
5100 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5101 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5102
5103 WARN_ON(src->len != dst_len);
5104
5105 offset = (start_offset + dst_offset) &
5106 (PAGE_CACHE_SIZE - 1);
5107
5108 while (len > 0) {
5109 page = extent_buffer_page(dst, i);
5110 WARN_ON(!PageUptodate(page));
5111
5112 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5113
5114 kaddr = page_address(page);
5115 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5116
5117 src_offset += cur;
5118 len -= cur;
5119 offset = 0;
5120 i++;
5121 }
5122}
5123
5124static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5125{
5126 unsigned long distance = (src > dst) ? src - dst : dst - src;
5127 return distance < len;
5128}
5129
5130static void copy_pages(struct page *dst_page, struct page *src_page,
5131 unsigned long dst_off, unsigned long src_off,
5132 unsigned long len)
5133{
5134 char *dst_kaddr = page_address(dst_page);
5135 char *src_kaddr;
5136 int must_memmove = 0;
5137
5138 if (dst_page != src_page) {
5139 src_kaddr = page_address(src_page);
5140 } else {
5141 src_kaddr = dst_kaddr;
5142 if (areas_overlap(src_off, dst_off, len))
5143 must_memmove = 1;
5144 }
5145
5146 if (must_memmove)
5147 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5148 else
5149 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5150}
5151
5152void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5153 unsigned long src_offset, unsigned long len)
5154{
5155 size_t cur;
5156 size_t dst_off_in_page;
5157 size_t src_off_in_page;
5158 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5159 unsigned long dst_i;
5160 unsigned long src_i;
5161
5162 if (src_offset + len > dst->len) {
5163 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5164 "len %lu dst len %lu\n", src_offset, len, dst->len);
5165 BUG_ON(1);
5166 }
5167 if (dst_offset + len > dst->len) {
5168 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5169 "len %lu dst len %lu\n", dst_offset, len, dst->len);
5170 BUG_ON(1);
5171 }
5172
5173 while (len > 0) {
5174 dst_off_in_page = (start_offset + dst_offset) &
5175 (PAGE_CACHE_SIZE - 1);
5176 src_off_in_page = (start_offset + src_offset) &
5177 (PAGE_CACHE_SIZE - 1);
5178
5179 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5180 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5181
5182 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5183 src_off_in_page));
5184 cur = min_t(unsigned long, cur,
5185 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5186
5187 copy_pages(extent_buffer_page(dst, dst_i),
5188 extent_buffer_page(dst, src_i),
5189 dst_off_in_page, src_off_in_page, cur);
5190
5191 src_offset += cur;
5192 dst_offset += cur;
5193 len -= cur;
5194 }
5195}
5196
5197void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5198 unsigned long src_offset, unsigned long len)
5199{
5200 size_t cur;
5201 size_t dst_off_in_page;
5202 size_t src_off_in_page;
5203 unsigned long dst_end = dst_offset + len - 1;
5204 unsigned long src_end = src_offset + len - 1;
5205 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5206 unsigned long dst_i;
5207 unsigned long src_i;
5208
5209 if (src_offset + len > dst->len) {
5210 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5211 "len %lu len %lu\n", src_offset, len, dst->len);
5212 BUG_ON(1);
5213 }
5214 if (dst_offset + len > dst->len) {
5215 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5216 "len %lu len %lu\n", dst_offset, len, dst->len);
5217 BUG_ON(1);
5218 }
5219 if (dst_offset < src_offset) {
5220 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5221 return;
5222 }
5223 while (len > 0) {
5224 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5225 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5226
5227 dst_off_in_page = (start_offset + dst_end) &
5228 (PAGE_CACHE_SIZE - 1);
5229 src_off_in_page = (start_offset + src_end) &
5230 (PAGE_CACHE_SIZE - 1);
5231
5232 cur = min_t(unsigned long, len, src_off_in_page + 1);
5233 cur = min(cur, dst_off_in_page + 1);
5234 copy_pages(extent_buffer_page(dst, dst_i),
5235 extent_buffer_page(dst, src_i),
5236 dst_off_in_page - cur + 1,
5237 src_off_in_page - cur + 1, cur);
5238
5239 dst_end -= cur;
5240 src_end -= cur;
5241 len -= cur;
5242 }
5243}
5244
5245int try_release_extent_buffer(struct page *page)
5246{
5247 struct extent_buffer *eb;
5248
5249 /*
5250 * We need to make sure noboody is attaching this page to an eb right
5251 * now.
5252 */
5253 spin_lock(&page->mapping->private_lock);
5254 if (!PagePrivate(page)) {
5255 spin_unlock(&page->mapping->private_lock);
5256 return 1;
5257 }
5258
5259 eb = (struct extent_buffer *)page->private;
5260 BUG_ON(!eb);
5261
5262 /*
5263 * This is a little awful but should be ok, we need to make sure that
5264 * the eb doesn't disappear out from under us while we're looking at
5265 * this page.
5266 */
5267 spin_lock(&eb->refs_lock);
5268 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5269 spin_unlock(&eb->refs_lock);
5270 spin_unlock(&page->mapping->private_lock);
5271 return 0;
5272 }
5273 spin_unlock(&page->mapping->private_lock);
5274
5275 /*
5276 * If tree ref isn't set then we know the ref on this eb is a real ref,
5277 * so just return, this page will likely be freed soon anyway.
5278 */
5279 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5280 spin_unlock(&eb->refs_lock);
5281 return 0;
5282 }
5283
5284 return release_extent_buffer(eb);
5285}