Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bitops.h>
4#include <linux/slab.h>
5#include <linux/bio.h>
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/page-flags.h>
9#include <linux/sched/mm.h>
10#include <linux/spinlock.h>
11#include <linux/blkdev.h>
12#include <linux/swap.h>
13#include <linux/writeback.h>
14#include <linux/pagevec.h>
15#include <linux/prefetch.h>
16#include <linux/fsverity.h>
17#include "extent_io.h"
18#include "extent-io-tree.h"
19#include "extent_map.h"
20#include "ctree.h"
21#include "btrfs_inode.h"
22#include "bio.h"
23#include "locking.h"
24#include "backref.h"
25#include "disk-io.h"
26#include "subpage.h"
27#include "zoned.h"
28#include "block-group.h"
29#include "compression.h"
30#include "fs.h"
31#include "accessors.h"
32#include "file-item.h"
33#include "file.h"
34#include "dev-replace.h"
35#include "super.h"
36#include "transaction.h"
37
38static struct kmem_cache *extent_buffer_cache;
39
40#ifdef CONFIG_BTRFS_DEBUG
41static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
42{
43 struct btrfs_fs_info *fs_info = eb->fs_info;
44 unsigned long flags;
45
46 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
47 list_add(&eb->leak_list, &fs_info->allocated_ebs);
48 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
49}
50
51static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
52{
53 struct btrfs_fs_info *fs_info = eb->fs_info;
54 unsigned long flags;
55
56 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
57 list_del(&eb->leak_list);
58 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
59}
60
61void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
62{
63 struct extent_buffer *eb;
64 unsigned long flags;
65
66 /*
67 * If we didn't get into open_ctree our allocated_ebs will not be
68 * initialized, so just skip this.
69 */
70 if (!fs_info->allocated_ebs.next)
71 return;
72
73 WARN_ON(!list_empty(&fs_info->allocated_ebs));
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75 while (!list_empty(&fs_info->allocated_ebs)) {
76 eb = list_first_entry(&fs_info->allocated_ebs,
77 struct extent_buffer, leak_list);
78 pr_err(
79 "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81 btrfs_header_owner(eb));
82 list_del(&eb->leak_list);
83 WARN_ON_ONCE(1);
84 kmem_cache_free(extent_buffer_cache, eb);
85 }
86 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
87}
88#else
89#define btrfs_leak_debug_add_eb(eb) do {} while (0)
90#define btrfs_leak_debug_del_eb(eb) do {} while (0)
91#endif
92
93/*
94 * Structure to record info about the bio being assembled, and other info like
95 * how many bytes are there before stripe/ordered extent boundary.
96 */
97struct btrfs_bio_ctrl {
98 struct btrfs_bio *bbio;
99 enum btrfs_compression_type compress_type;
100 u32 len_to_oe_boundary;
101 blk_opf_t opf;
102 btrfs_bio_end_io_t end_io_func;
103 struct writeback_control *wbc;
104
105 /*
106 * The sectors of the page which are going to be submitted by
107 * extent_writepage_io().
108 * This is to avoid touching ranges covered by compression/inline.
109 */
110 unsigned long submit_bitmap;
111};
112
113static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
114{
115 struct btrfs_bio *bbio = bio_ctrl->bbio;
116
117 if (!bbio)
118 return;
119
120 /* Caller should ensure the bio has at least some range added */
121 ASSERT(bbio->bio.bi_iter.bi_size);
122
123 if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
124 bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
125 btrfs_submit_compressed_read(bbio);
126 else
127 btrfs_submit_bbio(bbio, 0);
128
129 /* The bbio is owned by the end_io handler now */
130 bio_ctrl->bbio = NULL;
131}
132
133/*
134 * Submit or fail the current bio in the bio_ctrl structure.
135 */
136static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
137{
138 struct btrfs_bio *bbio = bio_ctrl->bbio;
139
140 if (!bbio)
141 return;
142
143 if (ret) {
144 ASSERT(ret < 0);
145 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
146 /* The bio is owned by the end_io handler now */
147 bio_ctrl->bbio = NULL;
148 } else {
149 submit_one_bio(bio_ctrl);
150 }
151}
152
153int __init extent_buffer_init_cachep(void)
154{
155 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
156 sizeof(struct extent_buffer), 0, 0,
157 NULL);
158 if (!extent_buffer_cache)
159 return -ENOMEM;
160
161 return 0;
162}
163
164void __cold extent_buffer_free_cachep(void)
165{
166 /*
167 * Make sure all delayed rcu free are flushed before we
168 * destroy caches.
169 */
170 rcu_barrier();
171 kmem_cache_destroy(extent_buffer_cache);
172}
173
174static void process_one_folio(struct btrfs_fs_info *fs_info,
175 struct folio *folio, const struct folio *locked_folio,
176 unsigned long page_ops, u64 start, u64 end)
177{
178 u32 len;
179
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
181 len = end + 1 - start;
182
183 if (page_ops & PAGE_SET_ORDERED)
184 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
185 if (page_ops & PAGE_START_WRITEBACK) {
186 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
187 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
188 }
189 if (page_ops & PAGE_END_WRITEBACK)
190 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
191
192 if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
193 btrfs_folio_end_lock(fs_info, folio, start, len);
194}
195
196static void __process_folios_contig(struct address_space *mapping,
197 const struct folio *locked_folio, u64 start,
198 u64 end, unsigned long page_ops)
199{
200 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
201 pgoff_t start_index = start >> PAGE_SHIFT;
202 pgoff_t end_index = end >> PAGE_SHIFT;
203 pgoff_t index = start_index;
204 struct folio_batch fbatch;
205 int i;
206
207 folio_batch_init(&fbatch);
208 while (index <= end_index) {
209 int found_folios;
210
211 found_folios = filemap_get_folios_contig(mapping, &index,
212 end_index, &fbatch);
213 for (i = 0; i < found_folios; i++) {
214 struct folio *folio = fbatch.folios[i];
215
216 process_one_folio(fs_info, folio, locked_folio,
217 page_ops, start, end);
218 }
219 folio_batch_release(&fbatch);
220 cond_resched();
221 }
222}
223
224static noinline void __unlock_for_delalloc(const struct inode *inode,
225 const struct folio *locked_folio,
226 u64 start, u64 end)
227{
228 unsigned long index = start >> PAGE_SHIFT;
229 unsigned long end_index = end >> PAGE_SHIFT;
230
231 ASSERT(locked_folio);
232 if (index == locked_folio->index && end_index == index)
233 return;
234
235 __process_folios_contig(inode->i_mapping, locked_folio, start, end,
236 PAGE_UNLOCK);
237}
238
239static noinline int lock_delalloc_folios(struct inode *inode,
240 const struct folio *locked_folio,
241 u64 start, u64 end)
242{
243 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
244 struct address_space *mapping = inode->i_mapping;
245 pgoff_t start_index = start >> PAGE_SHIFT;
246 pgoff_t end_index = end >> PAGE_SHIFT;
247 pgoff_t index = start_index;
248 u64 processed_end = start;
249 struct folio_batch fbatch;
250
251 if (index == locked_folio->index && index == end_index)
252 return 0;
253
254 folio_batch_init(&fbatch);
255 while (index <= end_index) {
256 unsigned int found_folios, i;
257
258 found_folios = filemap_get_folios_contig(mapping, &index,
259 end_index, &fbatch);
260 if (found_folios == 0)
261 goto out;
262
263 for (i = 0; i < found_folios; i++) {
264 struct folio *folio = fbatch.folios[i];
265 u64 range_start;
266 u32 range_len;
267
268 if (folio == locked_folio)
269 continue;
270
271 folio_lock(folio);
272 if (!folio_test_dirty(folio) || folio->mapping != mapping) {
273 folio_unlock(folio);
274 goto out;
275 }
276 range_start = max_t(u64, folio_pos(folio), start);
277 range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
278 end + 1) - range_start;
279 btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
280
281 processed_end = range_start + range_len - 1;
282 }
283 folio_batch_release(&fbatch);
284 cond_resched();
285 }
286
287 return 0;
288out:
289 folio_batch_release(&fbatch);
290 if (processed_end > start)
291 __unlock_for_delalloc(inode, locked_folio, start,
292 processed_end);
293 return -EAGAIN;
294}
295
296/*
297 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
298 * more than @max_bytes.
299 *
300 * @start: The original start bytenr to search.
301 * Will store the extent range start bytenr.
302 * @end: The original end bytenr of the search range
303 * Will store the extent range end bytenr.
304 *
305 * Return true if we find a delalloc range which starts inside the original
306 * range, and @start/@end will store the delalloc range start/end.
307 *
308 * Return false if we can't find any delalloc range which starts inside the
309 * original range, and @start/@end will be the non-delalloc range start/end.
310 */
311EXPORT_FOR_TESTS
312noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
313 struct folio *locked_folio,
314 u64 *start, u64 *end)
315{
316 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
317 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
318 const u64 orig_start = *start;
319 const u64 orig_end = *end;
320 /* The sanity tests may not set a valid fs_info. */
321 u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
322 u64 delalloc_start;
323 u64 delalloc_end;
324 bool found;
325 struct extent_state *cached_state = NULL;
326 int ret;
327 int loops = 0;
328
329 /* Caller should pass a valid @end to indicate the search range end */
330 ASSERT(orig_end > orig_start);
331
332 /* The range should at least cover part of the folio */
333 ASSERT(!(orig_start >= folio_pos(locked_folio) + folio_size(locked_folio) ||
334 orig_end <= folio_pos(locked_folio)));
335again:
336 /* step one, find a bunch of delalloc bytes starting at start */
337 delalloc_start = *start;
338 delalloc_end = 0;
339 found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
340 max_bytes, &cached_state);
341 if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
342 *start = delalloc_start;
343
344 /* @delalloc_end can be -1, never go beyond @orig_end */
345 *end = min(delalloc_end, orig_end);
346 free_extent_state(cached_state);
347 return false;
348 }
349
350 /*
351 * start comes from the offset of locked_folio. We have to lock
352 * folios in order, so we can't process delalloc bytes before
353 * locked_folio
354 */
355 if (delalloc_start < *start)
356 delalloc_start = *start;
357
358 /*
359 * make sure to limit the number of folios we try to lock down
360 */
361 if (delalloc_end + 1 - delalloc_start > max_bytes)
362 delalloc_end = delalloc_start + max_bytes - 1;
363
364 /* step two, lock all the folioss after the folios that has start */
365 ret = lock_delalloc_folios(inode, locked_folio, delalloc_start,
366 delalloc_end);
367 ASSERT(!ret || ret == -EAGAIN);
368 if (ret == -EAGAIN) {
369 /* some of the folios are gone, lets avoid looping by
370 * shortening the size of the delalloc range we're searching
371 */
372 free_extent_state(cached_state);
373 cached_state = NULL;
374 if (!loops) {
375 max_bytes = PAGE_SIZE;
376 loops = 1;
377 goto again;
378 } else {
379 found = false;
380 goto out_failed;
381 }
382 }
383
384 /* step three, lock the state bits for the whole range */
385 lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
386
387 /* then test to make sure it is all still delalloc */
388 ret = test_range_bit(tree, delalloc_start, delalloc_end,
389 EXTENT_DELALLOC, cached_state);
390
391 unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
392 if (!ret) {
393 __unlock_for_delalloc(inode, locked_folio, delalloc_start,
394 delalloc_end);
395 cond_resched();
396 goto again;
397 }
398 *start = delalloc_start;
399 *end = delalloc_end;
400out_failed:
401 return found;
402}
403
404void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
405 const struct folio *locked_folio,
406 struct extent_state **cached,
407 u32 clear_bits, unsigned long page_ops)
408{
409 clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
410
411 __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start,
412 end, page_ops);
413}
414
415static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
416{
417 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
418
419 if (!fsverity_active(folio->mapping->host) ||
420 btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
421 start >= i_size_read(folio->mapping->host))
422 return true;
423 return fsverity_verify_folio(folio);
424}
425
426static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
427{
428 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
429
430 ASSERT(folio_pos(folio) <= start &&
431 start + len <= folio_pos(folio) + PAGE_SIZE);
432
433 if (uptodate && btrfs_verify_folio(folio, start, len))
434 btrfs_folio_set_uptodate(fs_info, folio, start, len);
435 else
436 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
437
438 if (!btrfs_is_subpage(fs_info, folio->mapping))
439 folio_unlock(folio);
440 else
441 btrfs_folio_end_lock(fs_info, folio, start, len);
442}
443
444/*
445 * After a write IO is done, we need to:
446 *
447 * - clear the uptodate bits on error
448 * - clear the writeback bits in the extent tree for the range
449 * - filio_end_writeback() if there is no more pending io for the folio
450 *
451 * Scheduling is not allowed, so the extent state tree is expected
452 * to have one and only one object corresponding to this IO.
453 */
454static void end_bbio_data_write(struct btrfs_bio *bbio)
455{
456 struct btrfs_fs_info *fs_info = bbio->fs_info;
457 struct bio *bio = &bbio->bio;
458 int error = blk_status_to_errno(bio->bi_status);
459 struct folio_iter fi;
460 const u32 sectorsize = fs_info->sectorsize;
461
462 ASSERT(!bio_flagged(bio, BIO_CLONED));
463 bio_for_each_folio_all(fi, bio) {
464 struct folio *folio = fi.folio;
465 u64 start = folio_pos(folio) + fi.offset;
466 u32 len = fi.length;
467
468 /* Only order 0 (single page) folios are allowed for data. */
469 ASSERT(folio_order(folio) == 0);
470
471 /* Our read/write should always be sector aligned. */
472 if (!IS_ALIGNED(fi.offset, sectorsize))
473 btrfs_err(fs_info,
474 "partial page write in btrfs with offset %zu and length %zu",
475 fi.offset, fi.length);
476 else if (!IS_ALIGNED(fi.length, sectorsize))
477 btrfs_info(fs_info,
478 "incomplete page write with offset %zu and length %zu",
479 fi.offset, fi.length);
480
481 btrfs_finish_ordered_extent(bbio->ordered, folio, start, len,
482 !error);
483 if (error)
484 mapping_set_error(folio->mapping, error);
485 btrfs_folio_clear_writeback(fs_info, folio, start, len);
486 }
487
488 bio_put(bio);
489}
490
491static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
492{
493 ASSERT(folio_test_locked(folio));
494 if (!btrfs_is_subpage(fs_info, folio->mapping))
495 return;
496
497 ASSERT(folio_test_private(folio));
498 btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
499}
500
501/*
502 * After a data read IO is done, we need to:
503 *
504 * - clear the uptodate bits on error
505 * - set the uptodate bits if things worked
506 * - set the folio up to date if all extents in the tree are uptodate
507 * - clear the lock bit in the extent tree
508 * - unlock the folio if there are no other extents locked for it
509 *
510 * Scheduling is not allowed, so the extent state tree is expected
511 * to have one and only one object corresponding to this IO.
512 */
513static void end_bbio_data_read(struct btrfs_bio *bbio)
514{
515 struct btrfs_fs_info *fs_info = bbio->fs_info;
516 struct bio *bio = &bbio->bio;
517 struct folio_iter fi;
518 const u32 sectorsize = fs_info->sectorsize;
519
520 ASSERT(!bio_flagged(bio, BIO_CLONED));
521 bio_for_each_folio_all(fi, &bbio->bio) {
522 bool uptodate = !bio->bi_status;
523 struct folio *folio = fi.folio;
524 struct inode *inode = folio->mapping->host;
525 u64 start;
526 u64 end;
527 u32 len;
528
529 /* For now only order 0 folios are supported for data. */
530 ASSERT(folio_order(folio) == 0);
531 btrfs_debug(fs_info,
532 "%s: bi_sector=%llu, err=%d, mirror=%u",
533 __func__, bio->bi_iter.bi_sector, bio->bi_status,
534 bbio->mirror_num);
535
536 /*
537 * We always issue full-sector reads, but if some block in a
538 * folio fails to read, blk_update_request() will advance
539 * bv_offset and adjust bv_len to compensate. Print a warning
540 * for unaligned offsets, and an error if they don't add up to
541 * a full sector.
542 */
543 if (!IS_ALIGNED(fi.offset, sectorsize))
544 btrfs_err(fs_info,
545 "partial page read in btrfs with offset %zu and length %zu",
546 fi.offset, fi.length);
547 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
548 btrfs_info(fs_info,
549 "incomplete page read with offset %zu and length %zu",
550 fi.offset, fi.length);
551
552 start = folio_pos(folio) + fi.offset;
553 end = start + fi.length - 1;
554 len = fi.length;
555
556 if (likely(uptodate)) {
557 loff_t i_size = i_size_read(inode);
558 pgoff_t end_index = i_size >> folio_shift(folio);
559
560 /*
561 * Zero out the remaining part if this range straddles
562 * i_size.
563 *
564 * Here we should only zero the range inside the folio,
565 * not touch anything else.
566 *
567 * NOTE: i_size is exclusive while end is inclusive.
568 */
569 if (folio_index(folio) == end_index && i_size <= end) {
570 u32 zero_start = max(offset_in_folio(folio, i_size),
571 offset_in_folio(folio, start));
572 u32 zero_len = offset_in_folio(folio, end) + 1 -
573 zero_start;
574
575 folio_zero_range(folio, zero_start, zero_len);
576 }
577 }
578
579 /* Update page status and unlock. */
580 end_folio_read(folio, uptodate, start, len);
581 }
582 bio_put(bio);
583}
584
585/*
586 * Populate every free slot in a provided array with folios using GFP_NOFS.
587 *
588 * @nr_folios: number of folios to allocate
589 * @folio_array: the array to fill with folios; any existing non-NULL entries in
590 * the array will be skipped
591 *
592 * Return: 0 if all folios were able to be allocated;
593 * -ENOMEM otherwise, the partially allocated folios would be freed and
594 * the array slots zeroed
595 */
596int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
597{
598 for (int i = 0; i < nr_folios; i++) {
599 if (folio_array[i])
600 continue;
601 folio_array[i] = folio_alloc(GFP_NOFS, 0);
602 if (!folio_array[i])
603 goto error;
604 }
605 return 0;
606error:
607 for (int i = 0; i < nr_folios; i++) {
608 if (folio_array[i])
609 folio_put(folio_array[i]);
610 }
611 return -ENOMEM;
612}
613
614/*
615 * Populate every free slot in a provided array with pages, using GFP_NOFS.
616 *
617 * @nr_pages: number of pages to allocate
618 * @page_array: the array to fill with pages; any existing non-null entries in
619 * the array will be skipped
620 * @nofail: whether using __GFP_NOFAIL flag
621 *
622 * Return: 0 if all pages were able to be allocated;
623 * -ENOMEM otherwise, the partially allocated pages would be freed and
624 * the array slots zeroed
625 */
626int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
627 bool nofail)
628{
629 const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
630 unsigned int allocated;
631
632 for (allocated = 0; allocated < nr_pages;) {
633 unsigned int last = allocated;
634
635 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
636 if (unlikely(allocated == last)) {
637 /* No progress, fail and do cleanup. */
638 for (int i = 0; i < allocated; i++) {
639 __free_page(page_array[i]);
640 page_array[i] = NULL;
641 }
642 return -ENOMEM;
643 }
644 }
645 return 0;
646}
647
648/*
649 * Populate needed folios for the extent buffer.
650 *
651 * For now, the folios populated are always in order 0 (aka, single page).
652 */
653static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
654{
655 struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
656 int num_pages = num_extent_pages(eb);
657 int ret;
658
659 ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
660 if (ret < 0)
661 return ret;
662
663 for (int i = 0; i < num_pages; i++)
664 eb->folios[i] = page_folio(page_array[i]);
665 eb->folio_size = PAGE_SIZE;
666 eb->folio_shift = PAGE_SHIFT;
667 return 0;
668}
669
670static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
671 struct folio *folio, u64 disk_bytenr,
672 unsigned int pg_offset)
673{
674 struct bio *bio = &bio_ctrl->bbio->bio;
675 struct bio_vec *bvec = bio_last_bvec_all(bio);
676 const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
677 struct folio *bv_folio = page_folio(bvec->bv_page);
678
679 if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
680 /*
681 * For compression, all IO should have its logical bytenr set
682 * to the starting bytenr of the compressed extent.
683 */
684 return bio->bi_iter.bi_sector == sector;
685 }
686
687 /*
688 * The contig check requires the following conditions to be met:
689 *
690 * 1) The folios are belonging to the same inode
691 * This is implied by the call chain.
692 *
693 * 2) The range has adjacent logical bytenr
694 *
695 * 3) The range has adjacent file offset
696 * This is required for the usage of btrfs_bio->file_offset.
697 */
698 return bio_end_sector(bio) == sector &&
699 folio_pos(bv_folio) + bvec->bv_offset + bvec->bv_len ==
700 folio_pos(folio) + pg_offset;
701}
702
703static void alloc_new_bio(struct btrfs_inode *inode,
704 struct btrfs_bio_ctrl *bio_ctrl,
705 u64 disk_bytenr, u64 file_offset)
706{
707 struct btrfs_fs_info *fs_info = inode->root->fs_info;
708 struct btrfs_bio *bbio;
709
710 bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
711 bio_ctrl->end_io_func, NULL);
712 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
713 bbio->inode = inode;
714 bbio->file_offset = file_offset;
715 bio_ctrl->bbio = bbio;
716 bio_ctrl->len_to_oe_boundary = U32_MAX;
717
718 /* Limit data write bios to the ordered boundary. */
719 if (bio_ctrl->wbc) {
720 struct btrfs_ordered_extent *ordered;
721
722 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
723 if (ordered) {
724 bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
725 ordered->file_offset +
726 ordered->disk_num_bytes - file_offset);
727 bbio->ordered = ordered;
728 }
729
730 /*
731 * Pick the last added device to support cgroup writeback. For
732 * multi-device file systems this means blk-cgroup policies have
733 * to always be set on the last added/replaced device.
734 * This is a bit odd but has been like that for a long time.
735 */
736 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
737 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
738 }
739}
740
741/*
742 * @disk_bytenr: logical bytenr where the write will be
743 * @page: page to add to the bio
744 * @size: portion of page that we want to write to
745 * @pg_offset: offset of the new bio or to check whether we are adding
746 * a contiguous page to the previous one
747 *
748 * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
749 * new one in @bio_ctrl->bbio.
750 * The mirror number for this IO should already be initizlied in
751 * @bio_ctrl->mirror_num.
752 */
753static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
754 u64 disk_bytenr, struct folio *folio,
755 size_t size, unsigned long pg_offset)
756{
757 struct btrfs_inode *inode = folio_to_inode(folio);
758
759 ASSERT(pg_offset + size <= PAGE_SIZE);
760 ASSERT(bio_ctrl->end_io_func);
761
762 if (bio_ctrl->bbio &&
763 !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
764 submit_one_bio(bio_ctrl);
765
766 do {
767 u32 len = size;
768
769 /* Allocate new bio if needed */
770 if (!bio_ctrl->bbio) {
771 alloc_new_bio(inode, bio_ctrl, disk_bytenr,
772 folio_pos(folio) + pg_offset);
773 }
774
775 /* Cap to the current ordered extent boundary if there is one. */
776 if (len > bio_ctrl->len_to_oe_boundary) {
777 ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
778 ASSERT(is_data_inode(inode));
779 len = bio_ctrl->len_to_oe_boundary;
780 }
781
782 if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
783 /* bio full: move on to a new one */
784 submit_one_bio(bio_ctrl);
785 continue;
786 }
787
788 if (bio_ctrl->wbc)
789 wbc_account_cgroup_owner(bio_ctrl->wbc, folio,
790 len);
791
792 size -= len;
793 pg_offset += len;
794 disk_bytenr += len;
795
796 /*
797 * len_to_oe_boundary defaults to U32_MAX, which isn't folio or
798 * sector aligned. alloc_new_bio() then sets it to the end of
799 * our ordered extent for writes into zoned devices.
800 *
801 * When len_to_oe_boundary is tracking an ordered extent, we
802 * trust the ordered extent code to align things properly, and
803 * the check above to cap our write to the ordered extent
804 * boundary is correct.
805 *
806 * When len_to_oe_boundary is U32_MAX, the cap above would
807 * result in a 4095 byte IO for the last folio right before
808 * we hit the bio limit of UINT_MAX. bio_add_folio() has all
809 * the checks required to make sure we don't overflow the bio,
810 * and we should just ignore len_to_oe_boundary completely
811 * unless we're using it to track an ordered extent.
812 *
813 * It's pretty hard to make a bio sized U32_MAX, but it can
814 * happen when the page cache is able to feed us contiguous
815 * folios for large extents.
816 */
817 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
818 bio_ctrl->len_to_oe_boundary -= len;
819
820 /* Ordered extent boundary: move on to a new bio. */
821 if (bio_ctrl->len_to_oe_boundary == 0)
822 submit_one_bio(bio_ctrl);
823 } while (size);
824}
825
826static int attach_extent_buffer_folio(struct extent_buffer *eb,
827 struct folio *folio,
828 struct btrfs_subpage *prealloc)
829{
830 struct btrfs_fs_info *fs_info = eb->fs_info;
831 int ret = 0;
832
833 /*
834 * If the page is mapped to btree inode, we should hold the private
835 * lock to prevent race.
836 * For cloned or dummy extent buffers, their pages are not mapped and
837 * will not race with any other ebs.
838 */
839 if (folio->mapping)
840 lockdep_assert_held(&folio->mapping->i_private_lock);
841
842 if (fs_info->nodesize >= PAGE_SIZE) {
843 if (!folio_test_private(folio))
844 folio_attach_private(folio, eb);
845 else
846 WARN_ON(folio_get_private(folio) != eb);
847 return 0;
848 }
849
850 /* Already mapped, just free prealloc */
851 if (folio_test_private(folio)) {
852 btrfs_free_subpage(prealloc);
853 return 0;
854 }
855
856 if (prealloc)
857 /* Has preallocated memory for subpage */
858 folio_attach_private(folio, prealloc);
859 else
860 /* Do new allocation to attach subpage */
861 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
862 return ret;
863}
864
865int set_page_extent_mapped(struct page *page)
866{
867 return set_folio_extent_mapped(page_folio(page));
868}
869
870int set_folio_extent_mapped(struct folio *folio)
871{
872 struct btrfs_fs_info *fs_info;
873
874 ASSERT(folio->mapping);
875
876 if (folio_test_private(folio))
877 return 0;
878
879 fs_info = folio_to_fs_info(folio);
880
881 if (btrfs_is_subpage(fs_info, folio->mapping))
882 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
883
884 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
885 return 0;
886}
887
888void clear_folio_extent_mapped(struct folio *folio)
889{
890 struct btrfs_fs_info *fs_info;
891
892 ASSERT(folio->mapping);
893
894 if (!folio_test_private(folio))
895 return;
896
897 fs_info = folio_to_fs_info(folio);
898 if (btrfs_is_subpage(fs_info, folio->mapping))
899 return btrfs_detach_subpage(fs_info, folio);
900
901 folio_detach_private(folio);
902}
903
904static struct extent_map *get_extent_map(struct btrfs_inode *inode,
905 struct folio *folio, u64 start,
906 u64 len, struct extent_map **em_cached)
907{
908 struct extent_map *em;
909
910 ASSERT(em_cached);
911
912 if (*em_cached) {
913 em = *em_cached;
914 if (extent_map_in_tree(em) && start >= em->start &&
915 start < extent_map_end(em)) {
916 refcount_inc(&em->refs);
917 return em;
918 }
919
920 free_extent_map(em);
921 *em_cached = NULL;
922 }
923
924 em = btrfs_get_extent(inode, folio, start, len);
925 if (!IS_ERR(em)) {
926 BUG_ON(*em_cached);
927 refcount_inc(&em->refs);
928 *em_cached = em;
929 }
930
931 return em;
932}
933/*
934 * basic readpage implementation. Locked extent state structs are inserted
935 * into the tree that are removed when the IO is done (by the end_io
936 * handlers)
937 * XXX JDM: This needs looking at to ensure proper page locking
938 * return 0 on success, otherwise return error
939 */
940static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
941 struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
942{
943 struct inode *inode = folio->mapping->host;
944 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
945 u64 start = folio_pos(folio);
946 const u64 end = start + PAGE_SIZE - 1;
947 u64 cur = start;
948 u64 extent_offset;
949 u64 last_byte = i_size_read(inode);
950 u64 block_start;
951 struct extent_map *em;
952 int ret = 0;
953 size_t pg_offset = 0;
954 size_t iosize;
955 size_t blocksize = fs_info->sectorsize;
956
957 ret = set_folio_extent_mapped(folio);
958 if (ret < 0) {
959 folio_unlock(folio);
960 return ret;
961 }
962
963 if (folio->index == last_byte >> folio_shift(folio)) {
964 size_t zero_offset = offset_in_folio(folio, last_byte);
965
966 if (zero_offset) {
967 iosize = folio_size(folio) - zero_offset;
968 folio_zero_range(folio, zero_offset, iosize);
969 }
970 }
971 bio_ctrl->end_io_func = end_bbio_data_read;
972 begin_folio_read(fs_info, folio);
973 while (cur <= end) {
974 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
975 bool force_bio_submit = false;
976 u64 disk_bytenr;
977
978 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
979 if (cur >= last_byte) {
980 iosize = folio_size(folio) - pg_offset;
981 folio_zero_range(folio, pg_offset, iosize);
982 end_folio_read(folio, true, cur, iosize);
983 break;
984 }
985 em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached);
986 if (IS_ERR(em)) {
987 end_folio_read(folio, false, cur, end + 1 - cur);
988 return PTR_ERR(em);
989 }
990 extent_offset = cur - em->start;
991 BUG_ON(extent_map_end(em) <= cur);
992 BUG_ON(end < cur);
993
994 compress_type = extent_map_compression(em);
995
996 iosize = min(extent_map_end(em) - cur, end - cur + 1);
997 iosize = ALIGN(iosize, blocksize);
998 if (compress_type != BTRFS_COMPRESS_NONE)
999 disk_bytenr = em->disk_bytenr;
1000 else
1001 disk_bytenr = extent_map_block_start(em) + extent_offset;
1002 block_start = extent_map_block_start(em);
1003 if (em->flags & EXTENT_FLAG_PREALLOC)
1004 block_start = EXTENT_MAP_HOLE;
1005
1006 /*
1007 * If we have a file range that points to a compressed extent
1008 * and it's followed by a consecutive file range that points
1009 * to the same compressed extent (possibly with a different
1010 * offset and/or length, so it either points to the whole extent
1011 * or only part of it), we must make sure we do not submit a
1012 * single bio to populate the folios for the 2 ranges because
1013 * this makes the compressed extent read zero out the folios
1014 * belonging to the 2nd range. Imagine the following scenario:
1015 *
1016 * File layout
1017 * [0 - 8K] [8K - 24K]
1018 * | |
1019 * | |
1020 * points to extent X, points to extent X,
1021 * offset 4K, length of 8K offset 0, length 16K
1022 *
1023 * [extent X, compressed length = 4K uncompressed length = 16K]
1024 *
1025 * If the bio to read the compressed extent covers both ranges,
1026 * it will decompress extent X into the folios belonging to the
1027 * first range and then it will stop, zeroing out the remaining
1028 * folios that belong to the other range that points to extent X.
1029 * So here we make sure we submit 2 bios, one for the first
1030 * range and another one for the third range. Both will target
1031 * the same physical extent from disk, but we can't currently
1032 * make the compressed bio endio callback populate the folios
1033 * for both ranges because each compressed bio is tightly
1034 * coupled with a single extent map, and each range can have
1035 * an extent map with a different offset value relative to the
1036 * uncompressed data of our extent and different lengths. This
1037 * is a corner case so we prioritize correctness over
1038 * non-optimal behavior (submitting 2 bios for the same extent).
1039 */
1040 if (compress_type != BTRFS_COMPRESS_NONE &&
1041 prev_em_start && *prev_em_start != (u64)-1 &&
1042 *prev_em_start != em->start)
1043 force_bio_submit = true;
1044
1045 if (prev_em_start)
1046 *prev_em_start = em->start;
1047
1048 free_extent_map(em);
1049 em = NULL;
1050
1051 /* we've found a hole, just zero and go on */
1052 if (block_start == EXTENT_MAP_HOLE) {
1053 folio_zero_range(folio, pg_offset, iosize);
1054
1055 end_folio_read(folio, true, cur, iosize);
1056 cur = cur + iosize;
1057 pg_offset += iosize;
1058 continue;
1059 }
1060 /* the get_extent function already copied into the folio */
1061 if (block_start == EXTENT_MAP_INLINE) {
1062 end_folio_read(folio, true, cur, iosize);
1063 cur = cur + iosize;
1064 pg_offset += iosize;
1065 continue;
1066 }
1067
1068 if (bio_ctrl->compress_type != compress_type) {
1069 submit_one_bio(bio_ctrl);
1070 bio_ctrl->compress_type = compress_type;
1071 }
1072
1073 if (force_bio_submit)
1074 submit_one_bio(bio_ctrl);
1075 submit_extent_folio(bio_ctrl, disk_bytenr, folio, iosize,
1076 pg_offset);
1077 cur = cur + iosize;
1078 pg_offset += iosize;
1079 }
1080
1081 return 0;
1082}
1083
1084int btrfs_read_folio(struct file *file, struct folio *folio)
1085{
1086 struct btrfs_inode *inode = folio_to_inode(folio);
1087 const u64 start = folio_pos(folio);
1088 const u64 end = start + folio_size(folio) - 1;
1089 struct extent_state *cached_state = NULL;
1090 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1091 struct extent_map *em_cached = NULL;
1092 int ret;
1093
1094 btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
1095 ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
1096 unlock_extent(&inode->io_tree, start, end, &cached_state);
1097
1098 free_extent_map(em_cached);
1099
1100 /*
1101 * If btrfs_do_readpage() failed we will want to submit the assembled
1102 * bio to do the cleanup.
1103 */
1104 submit_one_bio(&bio_ctrl);
1105 return ret;
1106}
1107
1108static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap,
1109 u64 start, u32 len)
1110{
1111 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1112 const u64 folio_start = folio_pos(folio);
1113 unsigned int start_bit;
1114 unsigned int nbits;
1115
1116 ASSERT(start >= folio_start && start + len <= folio_start + PAGE_SIZE);
1117 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1118 nbits = len >> fs_info->sectorsize_bits;
1119 ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits));
1120 bitmap_set(delalloc_bitmap, start_bit, nbits);
1121}
1122
1123static bool find_next_delalloc_bitmap(struct folio *folio,
1124 unsigned long *delalloc_bitmap, u64 start,
1125 u64 *found_start, u32 *found_len)
1126{
1127 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1128 const u64 folio_start = folio_pos(folio);
1129 const unsigned int bitmap_size = fs_info->sectors_per_page;
1130 unsigned int start_bit;
1131 unsigned int first_zero;
1132 unsigned int first_set;
1133
1134 ASSERT(start >= folio_start && start < folio_start + PAGE_SIZE);
1135
1136 start_bit = (start - folio_start) >> fs_info->sectorsize_bits;
1137 first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit);
1138 if (first_set >= bitmap_size)
1139 return false;
1140
1141 *found_start = folio_start + (first_set << fs_info->sectorsize_bits);
1142 first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set);
1143 *found_len = (first_zero - first_set) << fs_info->sectorsize_bits;
1144 return true;
1145}
1146
1147/*
1148 * Do all of the delayed allocation setup.
1149 *
1150 * Return >0 if all the dirty blocks are submitted async (compression) or inlined.
1151 * The @folio should no longer be touched (treat it as already unlocked).
1152 *
1153 * Return 0 if there is still dirty block that needs to be submitted through
1154 * extent_writepage_io().
1155 * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be
1156 * submitted, and @folio is still kept locked.
1157 *
1158 * Return <0 if there is any error hit.
1159 * Any allocated ordered extent range covering this folio will be marked
1160 * finished (IOERR), and @folio is still kept locked.
1161 */
1162static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1163 struct folio *folio,
1164 struct btrfs_bio_ctrl *bio_ctrl)
1165{
1166 struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1167 struct writeback_control *wbc = bio_ctrl->wbc;
1168 const bool is_subpage = btrfs_is_subpage(fs_info, folio->mapping);
1169 const u64 page_start = folio_pos(folio);
1170 const u64 page_end = page_start + folio_size(folio) - 1;
1171 unsigned long delalloc_bitmap = 0;
1172 /*
1173 * Save the last found delalloc end. As the delalloc end can go beyond
1174 * page boundary, thus we cannot rely on subpage bitmap to locate the
1175 * last delalloc end.
1176 */
1177 u64 last_delalloc_end = 0;
1178 /*
1179 * The range end (exclusive) of the last successfully finished delalloc
1180 * range.
1181 * Any range covered by ordered extent must either be manually marked
1182 * finished (error handling), or has IO submitted (and finish the
1183 * ordered extent normally).
1184 *
1185 * This records the end of ordered extent cleanup if we hit an error.
1186 */
1187 u64 last_finished_delalloc_end = page_start;
1188 u64 delalloc_start = page_start;
1189 u64 delalloc_end = page_end;
1190 u64 delalloc_to_write = 0;
1191 int ret = 0;
1192 int bit;
1193
1194 /* Save the dirty bitmap as our submission bitmap will be a subset of it. */
1195 if (btrfs_is_subpage(fs_info, inode->vfs_inode.i_mapping)) {
1196 ASSERT(fs_info->sectors_per_page > 1);
1197 btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap);
1198 } else {
1199 bio_ctrl->submit_bitmap = 1;
1200 }
1201
1202 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1203 u64 start = page_start + (bit << fs_info->sectorsize_bits);
1204
1205 btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
1206 }
1207
1208 /* Lock all (subpage) delalloc ranges inside the folio first. */
1209 while (delalloc_start < page_end) {
1210 delalloc_end = page_end;
1211 if (!find_lock_delalloc_range(&inode->vfs_inode, folio,
1212 &delalloc_start, &delalloc_end)) {
1213 delalloc_start = delalloc_end + 1;
1214 continue;
1215 }
1216 set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start,
1217 min(delalloc_end, page_end) + 1 - delalloc_start);
1218 last_delalloc_end = delalloc_end;
1219 delalloc_start = delalloc_end + 1;
1220 }
1221 delalloc_start = page_start;
1222
1223 if (!last_delalloc_end)
1224 goto out;
1225
1226 /* Run the delalloc ranges for the above locked ranges. */
1227 while (delalloc_start < page_end) {
1228 u64 found_start;
1229 u32 found_len;
1230 bool found;
1231
1232 if (!is_subpage) {
1233 /*
1234 * For non-subpage case, the found delalloc range must
1235 * cover this folio and there must be only one locked
1236 * delalloc range.
1237 */
1238 found_start = page_start;
1239 found_len = last_delalloc_end + 1 - found_start;
1240 found = true;
1241 } else {
1242 found = find_next_delalloc_bitmap(folio, &delalloc_bitmap,
1243 delalloc_start, &found_start, &found_len);
1244 }
1245 if (!found)
1246 break;
1247 /*
1248 * The subpage range covers the last sector, the delalloc range may
1249 * end beyond the folio boundary, use the saved delalloc_end
1250 * instead.
1251 */
1252 if (found_start + found_len >= page_end)
1253 found_len = last_delalloc_end + 1 - found_start;
1254
1255 if (ret >= 0) {
1256 /*
1257 * Some delalloc range may be created by previous folios.
1258 * Thus we still need to clean up this range during error
1259 * handling.
1260 */
1261 last_finished_delalloc_end = found_start;
1262 /* No errors hit so far, run the current delalloc range. */
1263 ret = btrfs_run_delalloc_range(inode, folio,
1264 found_start,
1265 found_start + found_len - 1,
1266 wbc);
1267 if (ret >= 0)
1268 last_finished_delalloc_end = found_start + found_len;
1269 } else {
1270 /*
1271 * We've hit an error during previous delalloc range,
1272 * have to cleanup the remaining locked ranges.
1273 */
1274 unlock_extent(&inode->io_tree, found_start,
1275 found_start + found_len - 1, NULL);
1276 __unlock_for_delalloc(&inode->vfs_inode, folio,
1277 found_start,
1278 found_start + found_len - 1);
1279 }
1280
1281 /*
1282 * We have some ranges that's going to be submitted asynchronously
1283 * (compression or inline). These range have their own control
1284 * on when to unlock the pages. We should not touch them
1285 * anymore, so clear the range from the submission bitmap.
1286 */
1287 if (ret > 0) {
1288 unsigned int start_bit = (found_start - page_start) >>
1289 fs_info->sectorsize_bits;
1290 unsigned int end_bit = (min(page_end + 1, found_start + found_len) -
1291 page_start) >> fs_info->sectorsize_bits;
1292 bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit);
1293 }
1294 /*
1295 * Above btrfs_run_delalloc_range() may have unlocked the folio,
1296 * thus for the last range, we cannot touch the folio anymore.
1297 */
1298 if (found_start + found_len >= last_delalloc_end + 1)
1299 break;
1300
1301 delalloc_start = found_start + found_len;
1302 }
1303 /*
1304 * It's possible we had some ordered extents created before we hit
1305 * an error, cleanup non-async successfully created delalloc ranges.
1306 */
1307 if (unlikely(ret < 0)) {
1308 unsigned int bitmap_size = min(
1309 (last_finished_delalloc_end - page_start) >>
1310 fs_info->sectorsize_bits,
1311 fs_info->sectors_per_page);
1312
1313 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size)
1314 btrfs_mark_ordered_io_finished(inode, folio,
1315 page_start + (bit << fs_info->sectorsize_bits),
1316 fs_info->sectorsize, false);
1317 return ret;
1318 }
1319out:
1320 if (last_delalloc_end)
1321 delalloc_end = last_delalloc_end;
1322 else
1323 delalloc_end = page_end;
1324 /*
1325 * delalloc_end is already one less than the total length, so
1326 * we don't subtract one from PAGE_SIZE
1327 */
1328 delalloc_to_write +=
1329 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1330
1331 /*
1332 * If all ranges are submitted asynchronously, we just need to account
1333 * for them here.
1334 */
1335 if (bitmap_empty(&bio_ctrl->submit_bitmap, fs_info->sectors_per_page)) {
1336 wbc->nr_to_write -= delalloc_to_write;
1337 return 1;
1338 }
1339
1340 if (wbc->nr_to_write < delalloc_to_write) {
1341 int thresh = 8192;
1342
1343 if (delalloc_to_write < thresh * 2)
1344 thresh = delalloc_to_write;
1345 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1346 thresh);
1347 }
1348
1349 return 0;
1350}
1351
1352/*
1353 * Return 0 if we have submitted or queued the sector for submission.
1354 * Return <0 for critical errors.
1355 *
1356 * Caller should make sure filepos < i_size and handle filepos >= i_size case.
1357 */
1358static int submit_one_sector(struct btrfs_inode *inode,
1359 struct folio *folio,
1360 u64 filepos, struct btrfs_bio_ctrl *bio_ctrl,
1361 loff_t i_size)
1362{
1363 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1364 struct extent_map *em;
1365 u64 block_start;
1366 u64 disk_bytenr;
1367 u64 extent_offset;
1368 u64 em_end;
1369 const u32 sectorsize = fs_info->sectorsize;
1370
1371 ASSERT(IS_ALIGNED(filepos, sectorsize));
1372
1373 /* @filepos >= i_size case should be handled by the caller. */
1374 ASSERT(filepos < i_size);
1375
1376 em = btrfs_get_extent(inode, NULL, filepos, sectorsize);
1377 if (IS_ERR(em))
1378 return PTR_ERR_OR_ZERO(em);
1379
1380 extent_offset = filepos - em->start;
1381 em_end = extent_map_end(em);
1382 ASSERT(filepos <= em_end);
1383 ASSERT(IS_ALIGNED(em->start, sectorsize));
1384 ASSERT(IS_ALIGNED(em->len, sectorsize));
1385
1386 block_start = extent_map_block_start(em);
1387 disk_bytenr = extent_map_block_start(em) + extent_offset;
1388
1389 ASSERT(!extent_map_is_compressed(em));
1390 ASSERT(block_start != EXTENT_MAP_HOLE);
1391 ASSERT(block_start != EXTENT_MAP_INLINE);
1392
1393 free_extent_map(em);
1394 em = NULL;
1395
1396 /*
1397 * Although the PageDirty bit is cleared before entering this
1398 * function, subpage dirty bit is not cleared.
1399 * So clear subpage dirty bit here so next time we won't submit
1400 * a folio for a range already written to disk.
1401 */
1402 btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize);
1403 btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize);
1404 /*
1405 * Above call should set the whole folio with writeback flag, even
1406 * just for a single subpage sector.
1407 * As long as the folio is properly locked and the range is correct,
1408 * we should always get the folio with writeback flag.
1409 */
1410 ASSERT(folio_test_writeback(folio));
1411
1412 submit_extent_folio(bio_ctrl, disk_bytenr, folio,
1413 sectorsize, filepos - folio_pos(folio));
1414 return 0;
1415}
1416
1417/*
1418 * Helper for extent_writepage(). This calls the writepage start hooks,
1419 * and does the loop to map the page into extents and bios.
1420 *
1421 * We return 1 if the IO is started and the page is unlocked,
1422 * 0 if all went well (page still locked)
1423 * < 0 if there were errors (page still locked)
1424 */
1425static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode,
1426 struct folio *folio,
1427 u64 start, u32 len,
1428 struct btrfs_bio_ctrl *bio_ctrl,
1429 loff_t i_size)
1430{
1431 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1432 unsigned long range_bitmap = 0;
1433 bool submitted_io = false;
1434 bool error = false;
1435 const u64 folio_start = folio_pos(folio);
1436 u64 cur;
1437 int bit;
1438 int ret = 0;
1439
1440 ASSERT(start >= folio_start &&
1441 start + len <= folio_start + folio_size(folio));
1442
1443 ret = btrfs_writepage_cow_fixup(folio);
1444 if (ret) {
1445 /* Fixup worker will requeue */
1446 folio_redirty_for_writepage(bio_ctrl->wbc, folio);
1447 folio_unlock(folio);
1448 return 1;
1449 }
1450
1451 for (cur = start; cur < start + len; cur += fs_info->sectorsize)
1452 set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap);
1453 bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap,
1454 fs_info->sectors_per_page);
1455
1456 bio_ctrl->end_io_func = end_bbio_data_write;
1457
1458 for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
1459 cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
1460
1461 if (cur >= i_size) {
1462 btrfs_mark_ordered_io_finished(inode, folio, cur,
1463 start + len - cur, true);
1464 /*
1465 * This range is beyond i_size, thus we don't need to
1466 * bother writing back.
1467 * But we still need to clear the dirty subpage bit, or
1468 * the next time the folio gets dirtied, we will try to
1469 * writeback the sectors with subpage dirty bits,
1470 * causing writeback without ordered extent.
1471 */
1472 btrfs_folio_clear_dirty(fs_info, folio, cur,
1473 start + len - cur);
1474 break;
1475 }
1476 ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
1477 if (unlikely(ret < 0)) {
1478 /*
1479 * bio_ctrl may contain a bio crossing several folios.
1480 * Submit it immediately so that the bio has a chance
1481 * to finish normally, other than marked as error.
1482 */
1483 submit_one_bio(bio_ctrl);
1484 /*
1485 * Failed to grab the extent map which should be very rare.
1486 * Since there is no bio submitted to finish the ordered
1487 * extent, we have to manually finish this sector.
1488 */
1489 btrfs_mark_ordered_io_finished(inode, folio, cur,
1490 fs_info->sectorsize, false);
1491 error = true;
1492 continue;
1493 }
1494 submitted_io = true;
1495 }
1496
1497 /*
1498 * If we didn't submitted any sector (>= i_size), folio dirty get
1499 * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared
1500 * by folio_start_writeback() if the folio is not dirty).
1501 *
1502 * Here we set writeback and clear for the range. If the full folio
1503 * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag.
1504 *
1505 * If we hit any error, the corresponding sector will still be dirty
1506 * thus no need to clear PAGECACHE_TAG_DIRTY.
1507 */
1508 if (!submitted_io && !error) {
1509 btrfs_folio_set_writeback(fs_info, folio, start, len);
1510 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1511 }
1512 return ret;
1513}
1514
1515/*
1516 * the writepage semantics are similar to regular writepage. extent
1517 * records are inserted to lock ranges in the tree, and as dirty areas
1518 * are found, they are marked writeback. Then the lock bits are removed
1519 * and the end_io handler clears the writeback ranges
1520 *
1521 * Return 0 if everything goes well.
1522 * Return <0 for error.
1523 */
1524static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl)
1525{
1526 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
1527 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1528 int ret;
1529 size_t pg_offset;
1530 loff_t i_size = i_size_read(&inode->vfs_inode);
1531 unsigned long end_index = i_size >> PAGE_SHIFT;
1532
1533 trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc);
1534
1535 WARN_ON(!folio_test_locked(folio));
1536
1537 pg_offset = offset_in_folio(folio, i_size);
1538 if (folio->index > end_index ||
1539 (folio->index == end_index && !pg_offset)) {
1540 folio_invalidate(folio, 0, folio_size(folio));
1541 folio_unlock(folio);
1542 return 0;
1543 }
1544
1545 if (folio->index == end_index)
1546 folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset);
1547
1548 /*
1549 * Default to unlock the whole folio.
1550 * The proper bitmap can only be initialized until writepage_delalloc().
1551 */
1552 bio_ctrl->submit_bitmap = (unsigned long)-1;
1553 ret = set_folio_extent_mapped(folio);
1554 if (ret < 0)
1555 goto done;
1556
1557 ret = writepage_delalloc(inode, folio, bio_ctrl);
1558 if (ret == 1)
1559 return 0;
1560 if (ret)
1561 goto done;
1562
1563 ret = extent_writepage_io(inode, folio, folio_pos(folio),
1564 PAGE_SIZE, bio_ctrl, i_size);
1565 if (ret == 1)
1566 return 0;
1567
1568 bio_ctrl->wbc->nr_to_write--;
1569
1570done:
1571 if (ret < 0)
1572 mapping_set_error(folio->mapping, ret);
1573 /*
1574 * Only unlock ranges that are submitted. As there can be some async
1575 * submitted ranges inside the folio.
1576 */
1577 btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
1578 ASSERT(ret <= 0);
1579 return ret;
1580}
1581
1582void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1583{
1584 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1585 TASK_UNINTERRUPTIBLE);
1586}
1587
1588/*
1589 * Lock extent buffer status and pages for writeback.
1590 *
1591 * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1592 * extent buffer is not dirty)
1593 * Return %true is the extent buffer is submitted to bio.
1594 */
1595static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1596 struct writeback_control *wbc)
1597{
1598 struct btrfs_fs_info *fs_info = eb->fs_info;
1599 bool ret = false;
1600
1601 btrfs_tree_lock(eb);
1602 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1603 btrfs_tree_unlock(eb);
1604 if (wbc->sync_mode != WB_SYNC_ALL)
1605 return false;
1606 wait_on_extent_buffer_writeback(eb);
1607 btrfs_tree_lock(eb);
1608 }
1609
1610 /*
1611 * We need to do this to prevent races in people who check if the eb is
1612 * under IO since we can end up having no IO bits set for a short period
1613 * of time.
1614 */
1615 spin_lock(&eb->refs_lock);
1616 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1617 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1618 spin_unlock(&eb->refs_lock);
1619 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1620 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1621 -eb->len,
1622 fs_info->dirty_metadata_batch);
1623 ret = true;
1624 } else {
1625 spin_unlock(&eb->refs_lock);
1626 }
1627 btrfs_tree_unlock(eb);
1628 return ret;
1629}
1630
1631static void set_btree_ioerr(struct extent_buffer *eb)
1632{
1633 struct btrfs_fs_info *fs_info = eb->fs_info;
1634
1635 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1636
1637 /*
1638 * A read may stumble upon this buffer later, make sure that it gets an
1639 * error and knows there was an error.
1640 */
1641 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1642
1643 /*
1644 * We need to set the mapping with the io error as well because a write
1645 * error will flip the file system readonly, and then syncfs() will
1646 * return a 0 because we are readonly if we don't modify the err seq for
1647 * the superblock.
1648 */
1649 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1650
1651 /*
1652 * If writeback for a btree extent that doesn't belong to a log tree
1653 * failed, increment the counter transaction->eb_write_errors.
1654 * We do this because while the transaction is running and before it's
1655 * committing (when we call filemap_fdata[write|wait]_range against
1656 * the btree inode), we might have
1657 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1658 * returns an error or an error happens during writeback, when we're
1659 * committing the transaction we wouldn't know about it, since the pages
1660 * can be no longer dirty nor marked anymore for writeback (if a
1661 * subsequent modification to the extent buffer didn't happen before the
1662 * transaction commit), which makes filemap_fdata[write|wait]_range not
1663 * able to find the pages which contain errors at transaction
1664 * commit time. So if this happens we must abort the transaction,
1665 * otherwise we commit a super block with btree roots that point to
1666 * btree nodes/leafs whose content on disk is invalid - either garbage
1667 * or the content of some node/leaf from a past generation that got
1668 * cowed or deleted and is no longer valid.
1669 *
1670 * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1671 * not be enough - we need to distinguish between log tree extents vs
1672 * non-log tree extents, and the next filemap_fdatawait_range() call
1673 * will catch and clear such errors in the mapping - and that call might
1674 * be from a log sync and not from a transaction commit. Also, checking
1675 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1676 * not done and would not be reliable - the eb might have been released
1677 * from memory and reading it back again means that flag would not be
1678 * set (since it's a runtime flag, not persisted on disk).
1679 *
1680 * Using the flags below in the btree inode also makes us achieve the
1681 * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1682 * writeback for all dirty pages and before filemap_fdatawait_range()
1683 * is called, the writeback for all dirty pages had already finished
1684 * with errors - because we were not using AS_EIO/AS_ENOSPC,
1685 * filemap_fdatawait_range() would return success, as it could not know
1686 * that writeback errors happened (the pages were no longer tagged for
1687 * writeback).
1688 */
1689 switch (eb->log_index) {
1690 case -1:
1691 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1692 break;
1693 case 0:
1694 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1695 break;
1696 case 1:
1697 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1698 break;
1699 default:
1700 BUG(); /* unexpected, logic error */
1701 }
1702}
1703
1704/*
1705 * The endio specific version which won't touch any unsafe spinlock in endio
1706 * context.
1707 */
1708static struct extent_buffer *find_extent_buffer_nolock(
1709 const struct btrfs_fs_info *fs_info, u64 start)
1710{
1711 struct extent_buffer *eb;
1712
1713 rcu_read_lock();
1714 eb = radix_tree_lookup(&fs_info->buffer_radix,
1715 start >> fs_info->sectorsize_bits);
1716 if (eb && atomic_inc_not_zero(&eb->refs)) {
1717 rcu_read_unlock();
1718 return eb;
1719 }
1720 rcu_read_unlock();
1721 return NULL;
1722}
1723
1724static void end_bbio_meta_write(struct btrfs_bio *bbio)
1725{
1726 struct extent_buffer *eb = bbio->private;
1727 struct btrfs_fs_info *fs_info = eb->fs_info;
1728 bool uptodate = !bbio->bio.bi_status;
1729 struct folio_iter fi;
1730 u32 bio_offset = 0;
1731
1732 if (!uptodate)
1733 set_btree_ioerr(eb);
1734
1735 bio_for_each_folio_all(fi, &bbio->bio) {
1736 u64 start = eb->start + bio_offset;
1737 struct folio *folio = fi.folio;
1738 u32 len = fi.length;
1739
1740 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1741 bio_offset += len;
1742 }
1743
1744 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1745 smp_mb__after_atomic();
1746 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1747
1748 bio_put(&bbio->bio);
1749}
1750
1751static void prepare_eb_write(struct extent_buffer *eb)
1752{
1753 u32 nritems;
1754 unsigned long start;
1755 unsigned long end;
1756
1757 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1758
1759 /* Set btree blocks beyond nritems with 0 to avoid stale content */
1760 nritems = btrfs_header_nritems(eb);
1761 if (btrfs_header_level(eb) > 0) {
1762 end = btrfs_node_key_ptr_offset(eb, nritems);
1763 memzero_extent_buffer(eb, end, eb->len - end);
1764 } else {
1765 /*
1766 * Leaf:
1767 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1768 */
1769 start = btrfs_item_nr_offset(eb, nritems);
1770 end = btrfs_item_nr_offset(eb, 0);
1771 if (nritems == 0)
1772 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1773 else
1774 end += btrfs_item_offset(eb, nritems - 1);
1775 memzero_extent_buffer(eb, start, end - start);
1776 }
1777}
1778
1779static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1780 struct writeback_control *wbc)
1781{
1782 struct btrfs_fs_info *fs_info = eb->fs_info;
1783 struct btrfs_bio *bbio;
1784
1785 prepare_eb_write(eb);
1786
1787 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1788 REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1789 eb->fs_info, end_bbio_meta_write, eb);
1790 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1791 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1792 wbc_init_bio(wbc, &bbio->bio);
1793 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1794 bbio->file_offset = eb->start;
1795 if (fs_info->nodesize < PAGE_SIZE) {
1796 struct folio *folio = eb->folios[0];
1797 bool ret;
1798
1799 folio_lock(folio);
1800 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1801 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1802 eb->len)) {
1803 folio_clear_dirty_for_io(folio);
1804 wbc->nr_to_write--;
1805 }
1806 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1807 eb->start - folio_pos(folio));
1808 ASSERT(ret);
1809 wbc_account_cgroup_owner(wbc, folio, eb->len);
1810 folio_unlock(folio);
1811 } else {
1812 int num_folios = num_extent_folios(eb);
1813
1814 for (int i = 0; i < num_folios; i++) {
1815 struct folio *folio = eb->folios[i];
1816 bool ret;
1817
1818 folio_lock(folio);
1819 folio_clear_dirty_for_io(folio);
1820 folio_start_writeback(folio);
1821 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1822 ASSERT(ret);
1823 wbc_account_cgroup_owner(wbc, folio, eb->folio_size);
1824 wbc->nr_to_write -= folio_nr_pages(folio);
1825 folio_unlock(folio);
1826 }
1827 }
1828 btrfs_submit_bbio(bbio, 0);
1829}
1830
1831/*
1832 * Submit one subpage btree page.
1833 *
1834 * The main difference to submit_eb_page() is:
1835 * - Page locking
1836 * For subpage, we don't rely on page locking at all.
1837 *
1838 * - Flush write bio
1839 * We only flush bio if we may be unable to fit current extent buffers into
1840 * current bio.
1841 *
1842 * Return >=0 for the number of submitted extent buffers.
1843 * Return <0 for fatal error.
1844 */
1845static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
1846{
1847 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
1848 int submitted = 0;
1849 u64 folio_start = folio_pos(folio);
1850 int bit_start = 0;
1851 int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1852
1853 /* Lock and write each dirty extent buffers in the range */
1854 while (bit_start < fs_info->sectors_per_page) {
1855 struct btrfs_subpage *subpage = folio_get_private(folio);
1856 struct extent_buffer *eb;
1857 unsigned long flags;
1858 u64 start;
1859
1860 /*
1861 * Take private lock to ensure the subpage won't be detached
1862 * in the meantime.
1863 */
1864 spin_lock(&folio->mapping->i_private_lock);
1865 if (!folio_test_private(folio)) {
1866 spin_unlock(&folio->mapping->i_private_lock);
1867 break;
1868 }
1869 spin_lock_irqsave(&subpage->lock, flags);
1870 if (!test_bit(bit_start + btrfs_bitmap_nr_dirty * fs_info->sectors_per_page,
1871 subpage->bitmaps)) {
1872 spin_unlock_irqrestore(&subpage->lock, flags);
1873 spin_unlock(&folio->mapping->i_private_lock);
1874 bit_start++;
1875 continue;
1876 }
1877
1878 start = folio_start + bit_start * fs_info->sectorsize;
1879 bit_start += sectors_per_node;
1880
1881 /*
1882 * Here we just want to grab the eb without touching extra
1883 * spin locks, so call find_extent_buffer_nolock().
1884 */
1885 eb = find_extent_buffer_nolock(fs_info, start);
1886 spin_unlock_irqrestore(&subpage->lock, flags);
1887 spin_unlock(&folio->mapping->i_private_lock);
1888
1889 /*
1890 * The eb has already reached 0 refs thus find_extent_buffer()
1891 * doesn't return it. We don't need to write back such eb
1892 * anyway.
1893 */
1894 if (!eb)
1895 continue;
1896
1897 if (lock_extent_buffer_for_io(eb, wbc)) {
1898 write_one_eb(eb, wbc);
1899 submitted++;
1900 }
1901 free_extent_buffer(eb);
1902 }
1903 return submitted;
1904}
1905
1906/*
1907 * Submit all page(s) of one extent buffer.
1908 *
1909 * @page: the page of one extent buffer
1910 * @eb_context: to determine if we need to submit this page, if current page
1911 * belongs to this eb, we don't need to submit
1912 *
1913 * The caller should pass each page in their bytenr order, and here we use
1914 * @eb_context to determine if we have submitted pages of one extent buffer.
1915 *
1916 * If we have, we just skip until we hit a new page that doesn't belong to
1917 * current @eb_context.
1918 *
1919 * If not, we submit all the page(s) of the extent buffer.
1920 *
1921 * Return >0 if we have submitted the extent buffer successfully.
1922 * Return 0 if we don't need to submit the page, as it's already submitted by
1923 * previous call.
1924 * Return <0 for fatal error.
1925 */
1926static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ctx)
1927{
1928 struct writeback_control *wbc = ctx->wbc;
1929 struct address_space *mapping = folio->mapping;
1930 struct extent_buffer *eb;
1931 int ret;
1932
1933 if (!folio_test_private(folio))
1934 return 0;
1935
1936 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
1937 return submit_eb_subpage(folio, wbc);
1938
1939 spin_lock(&mapping->i_private_lock);
1940 if (!folio_test_private(folio)) {
1941 spin_unlock(&mapping->i_private_lock);
1942 return 0;
1943 }
1944
1945 eb = folio_get_private(folio);
1946
1947 /*
1948 * Shouldn't happen and normally this would be a BUG_ON but no point
1949 * crashing the machine for something we can survive anyway.
1950 */
1951 if (WARN_ON(!eb)) {
1952 spin_unlock(&mapping->i_private_lock);
1953 return 0;
1954 }
1955
1956 if (eb == ctx->eb) {
1957 spin_unlock(&mapping->i_private_lock);
1958 return 0;
1959 }
1960 ret = atomic_inc_not_zero(&eb->refs);
1961 spin_unlock(&mapping->i_private_lock);
1962 if (!ret)
1963 return 0;
1964
1965 ctx->eb = eb;
1966
1967 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1968 if (ret) {
1969 if (ret == -EBUSY)
1970 ret = 0;
1971 free_extent_buffer(eb);
1972 return ret;
1973 }
1974
1975 if (!lock_extent_buffer_for_io(eb, wbc)) {
1976 free_extent_buffer(eb);
1977 return 0;
1978 }
1979 /* Implies write in zoned mode. */
1980 if (ctx->zoned_bg) {
1981 /* Mark the last eb in the block group. */
1982 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
1983 ctx->zoned_bg->meta_write_pointer += eb->len;
1984 }
1985 write_one_eb(eb, wbc);
1986 free_extent_buffer(eb);
1987 return 1;
1988}
1989
1990int btree_write_cache_pages(struct address_space *mapping,
1991 struct writeback_control *wbc)
1992{
1993 struct btrfs_eb_write_context ctx = { .wbc = wbc };
1994 struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
1995 int ret = 0;
1996 int done = 0;
1997 int nr_to_write_done = 0;
1998 struct folio_batch fbatch;
1999 unsigned int nr_folios;
2000 pgoff_t index;
2001 pgoff_t end; /* Inclusive */
2002 int scanned = 0;
2003 xa_mark_t tag;
2004
2005 folio_batch_init(&fbatch);
2006 if (wbc->range_cyclic) {
2007 index = mapping->writeback_index; /* Start from prev offset */
2008 end = -1;
2009 /*
2010 * Start from the beginning does not need to cycle over the
2011 * range, mark it as scanned.
2012 */
2013 scanned = (index == 0);
2014 } else {
2015 index = wbc->range_start >> PAGE_SHIFT;
2016 end = wbc->range_end >> PAGE_SHIFT;
2017 scanned = 1;
2018 }
2019 if (wbc->sync_mode == WB_SYNC_ALL)
2020 tag = PAGECACHE_TAG_TOWRITE;
2021 else
2022 tag = PAGECACHE_TAG_DIRTY;
2023 btrfs_zoned_meta_io_lock(fs_info);
2024retry:
2025 if (wbc->sync_mode == WB_SYNC_ALL)
2026 tag_pages_for_writeback(mapping, index, end);
2027 while (!done && !nr_to_write_done && (index <= end) &&
2028 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2029 tag, &fbatch))) {
2030 unsigned i;
2031
2032 for (i = 0; i < nr_folios; i++) {
2033 struct folio *folio = fbatch.folios[i];
2034
2035 ret = submit_eb_page(folio, &ctx);
2036 if (ret == 0)
2037 continue;
2038 if (ret < 0) {
2039 done = 1;
2040 break;
2041 }
2042
2043 /*
2044 * the filesystem may choose to bump up nr_to_write.
2045 * We have to make sure to honor the new nr_to_write
2046 * at any time
2047 */
2048 nr_to_write_done = wbc->nr_to_write <= 0;
2049 }
2050 folio_batch_release(&fbatch);
2051 cond_resched();
2052 }
2053 if (!scanned && !done) {
2054 /*
2055 * We hit the last page and there is more work to be done: wrap
2056 * back to the start of the file
2057 */
2058 scanned = 1;
2059 index = 0;
2060 goto retry;
2061 }
2062 /*
2063 * If something went wrong, don't allow any metadata write bio to be
2064 * submitted.
2065 *
2066 * This would prevent use-after-free if we had dirty pages not
2067 * cleaned up, which can still happen by fuzzed images.
2068 *
2069 * - Bad extent tree
2070 * Allowing existing tree block to be allocated for other trees.
2071 *
2072 * - Log tree operations
2073 * Exiting tree blocks get allocated to log tree, bumps its
2074 * generation, then get cleaned in tree re-balance.
2075 * Such tree block will not be written back, since it's clean,
2076 * thus no WRITTEN flag set.
2077 * And after log writes back, this tree block is not traced by
2078 * any dirty extent_io_tree.
2079 *
2080 * - Offending tree block gets re-dirtied from its original owner
2081 * Since it has bumped generation, no WRITTEN flag, it can be
2082 * reused without COWing. This tree block will not be traced
2083 * by btrfs_transaction::dirty_pages.
2084 *
2085 * Now such dirty tree block will not be cleaned by any dirty
2086 * extent io tree. Thus we don't want to submit such wild eb
2087 * if the fs already has error.
2088 *
2089 * We can get ret > 0 from submit_extent_folio() indicating how many ebs
2090 * were submitted. Reset it to 0 to avoid false alerts for the caller.
2091 */
2092 if (ret > 0)
2093 ret = 0;
2094 if (!ret && BTRFS_FS_ERROR(fs_info))
2095 ret = -EROFS;
2096
2097 if (ctx.zoned_bg)
2098 btrfs_put_block_group(ctx.zoned_bg);
2099 btrfs_zoned_meta_io_unlock(fs_info);
2100 return ret;
2101}
2102
2103/*
2104 * Walk the list of dirty pages of the given address space and write all of them.
2105 *
2106 * @mapping: address space structure to write
2107 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2108 * @bio_ctrl: holds context for the write, namely the bio
2109 *
2110 * If a page is already under I/O, write_cache_pages() skips it, even
2111 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2112 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2113 * and msync() need to guarantee that all the data which was dirty at the time
2114 * the call was made get new I/O started against them. If wbc->sync_mode is
2115 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2116 * existing IO to complete.
2117 */
2118static int extent_write_cache_pages(struct address_space *mapping,
2119 struct btrfs_bio_ctrl *bio_ctrl)
2120{
2121 struct writeback_control *wbc = bio_ctrl->wbc;
2122 struct inode *inode = mapping->host;
2123 int ret = 0;
2124 int done = 0;
2125 int nr_to_write_done = 0;
2126 struct folio_batch fbatch;
2127 unsigned int nr_folios;
2128 pgoff_t index;
2129 pgoff_t end; /* Inclusive */
2130 pgoff_t done_index;
2131 int range_whole = 0;
2132 int scanned = 0;
2133 xa_mark_t tag;
2134
2135 /*
2136 * We have to hold onto the inode so that ordered extents can do their
2137 * work when the IO finishes. The alternative to this is failing to add
2138 * an ordered extent if the igrab() fails there and that is a huge pain
2139 * to deal with, so instead just hold onto the inode throughout the
2140 * writepages operation. If it fails here we are freeing up the inode
2141 * anyway and we'd rather not waste our time writing out stuff that is
2142 * going to be truncated anyway.
2143 */
2144 if (!igrab(inode))
2145 return 0;
2146
2147 folio_batch_init(&fbatch);
2148 if (wbc->range_cyclic) {
2149 index = mapping->writeback_index; /* Start from prev offset */
2150 end = -1;
2151 /*
2152 * Start from the beginning does not need to cycle over the
2153 * range, mark it as scanned.
2154 */
2155 scanned = (index == 0);
2156 } else {
2157 index = wbc->range_start >> PAGE_SHIFT;
2158 end = wbc->range_end >> PAGE_SHIFT;
2159 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2160 range_whole = 1;
2161 scanned = 1;
2162 }
2163
2164 /*
2165 * We do the tagged writepage as long as the snapshot flush bit is set
2166 * and we are the first one who do the filemap_flush() on this inode.
2167 *
2168 * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2169 * not race in and drop the bit.
2170 */
2171 if (range_whole && wbc->nr_to_write == LONG_MAX &&
2172 test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2173 &BTRFS_I(inode)->runtime_flags))
2174 wbc->tagged_writepages = 1;
2175
2176 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2177 tag = PAGECACHE_TAG_TOWRITE;
2178 else
2179 tag = PAGECACHE_TAG_DIRTY;
2180retry:
2181 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2182 tag_pages_for_writeback(mapping, index, end);
2183 done_index = index;
2184 while (!done && !nr_to_write_done && (index <= end) &&
2185 (nr_folios = filemap_get_folios_tag(mapping, &index,
2186 end, tag, &fbatch))) {
2187 unsigned i;
2188
2189 for (i = 0; i < nr_folios; i++) {
2190 struct folio *folio = fbatch.folios[i];
2191
2192 done_index = folio_next_index(folio);
2193 /*
2194 * At this point we hold neither the i_pages lock nor
2195 * the page lock: the page may be truncated or
2196 * invalidated (changing page->mapping to NULL),
2197 * or even swizzled back from swapper_space to
2198 * tmpfs file mapping
2199 */
2200 if (!folio_trylock(folio)) {
2201 submit_write_bio(bio_ctrl, 0);
2202 folio_lock(folio);
2203 }
2204
2205 if (unlikely(folio->mapping != mapping)) {
2206 folio_unlock(folio);
2207 continue;
2208 }
2209
2210 if (!folio_test_dirty(folio)) {
2211 /* Someone wrote it for us. */
2212 folio_unlock(folio);
2213 continue;
2214 }
2215
2216 /*
2217 * For subpage case, compression can lead to mixed
2218 * writeback and dirty flags, e.g:
2219 * 0 32K 64K 96K 128K
2220 * | |//////||/////| |//|
2221 *
2222 * In above case, [32K, 96K) is asynchronously submitted
2223 * for compression, and [124K, 128K) needs to be written back.
2224 *
2225 * If we didn't wait wrtiteback for page 64K, [128K, 128K)
2226 * won't be submitted as the page still has writeback flag
2227 * and will be skipped in the next check.
2228 *
2229 * This mixed writeback and dirty case is only possible for
2230 * subpage case.
2231 *
2232 * TODO: Remove this check after migrating compression to
2233 * regular submission.
2234 */
2235 if (wbc->sync_mode != WB_SYNC_NONE ||
2236 btrfs_is_subpage(inode_to_fs_info(inode), mapping)) {
2237 if (folio_test_writeback(folio))
2238 submit_write_bio(bio_ctrl, 0);
2239 folio_wait_writeback(folio);
2240 }
2241
2242 if (folio_test_writeback(folio) ||
2243 !folio_clear_dirty_for_io(folio)) {
2244 folio_unlock(folio);
2245 continue;
2246 }
2247
2248 ret = extent_writepage(folio, bio_ctrl);
2249 if (ret < 0) {
2250 done = 1;
2251 break;
2252 }
2253
2254 /*
2255 * The filesystem may choose to bump up nr_to_write.
2256 * We have to make sure to honor the new nr_to_write
2257 * at any time.
2258 */
2259 nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2260 wbc->nr_to_write <= 0);
2261 }
2262 folio_batch_release(&fbatch);
2263 cond_resched();
2264 }
2265 if (!scanned && !done) {
2266 /*
2267 * We hit the last page and there is more work to be done: wrap
2268 * back to the start of the file
2269 */
2270 scanned = 1;
2271 index = 0;
2272
2273 /*
2274 * If we're looping we could run into a page that is locked by a
2275 * writer and that writer could be waiting on writeback for a
2276 * page in our current bio, and thus deadlock, so flush the
2277 * write bio here.
2278 */
2279 submit_write_bio(bio_ctrl, 0);
2280 goto retry;
2281 }
2282
2283 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2284 mapping->writeback_index = done_index;
2285
2286 btrfs_add_delayed_iput(BTRFS_I(inode));
2287 return ret;
2288}
2289
2290/*
2291 * Submit the pages in the range to bio for call sites which delalloc range has
2292 * already been ran (aka, ordered extent inserted) and all pages are still
2293 * locked.
2294 */
2295void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
2296 u64 start, u64 end, struct writeback_control *wbc,
2297 bool pages_dirty)
2298{
2299 bool found_error = false;
2300 int ret = 0;
2301 struct address_space *mapping = inode->i_mapping;
2302 struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2303 const u32 sectorsize = fs_info->sectorsize;
2304 loff_t i_size = i_size_read(inode);
2305 u64 cur = start;
2306 struct btrfs_bio_ctrl bio_ctrl = {
2307 .wbc = wbc,
2308 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2309 };
2310
2311 if (wbc->no_cgroup_owner)
2312 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2313
2314 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2315
2316 while (cur <= end) {
2317 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2318 u32 cur_len = cur_end + 1 - cur;
2319 struct folio *folio;
2320
2321 folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
2322
2323 /*
2324 * This shouldn't happen, the pages are pinned and locked, this
2325 * code is just in case, but shouldn't actually be run.
2326 */
2327 if (IS_ERR(folio)) {
2328 btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL,
2329 cur, cur_len, false);
2330 mapping_set_error(mapping, PTR_ERR(folio));
2331 cur = cur_end + 1;
2332 continue;
2333 }
2334
2335 ASSERT(folio_test_locked(folio));
2336 if (pages_dirty && folio != locked_folio)
2337 ASSERT(folio_test_dirty(folio));
2338
2339 /*
2340 * Set the submission bitmap to submit all sectors.
2341 * extent_writepage_io() will do the truncation correctly.
2342 */
2343 bio_ctrl.submit_bitmap = (unsigned long)-1;
2344 ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
2345 &bio_ctrl, i_size);
2346 if (ret == 1)
2347 goto next_page;
2348
2349 if (ret)
2350 mapping_set_error(mapping, ret);
2351 btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
2352 if (ret < 0)
2353 found_error = true;
2354next_page:
2355 folio_put(folio);
2356 cur = cur_end + 1;
2357 }
2358
2359 submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2360}
2361
2362int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2363{
2364 struct inode *inode = mapping->host;
2365 int ret = 0;
2366 struct btrfs_bio_ctrl bio_ctrl = {
2367 .wbc = wbc,
2368 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2369 };
2370
2371 /*
2372 * Allow only a single thread to do the reloc work in zoned mode to
2373 * protect the write pointer updates.
2374 */
2375 btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2376 ret = extent_write_cache_pages(mapping, &bio_ctrl);
2377 submit_write_bio(&bio_ctrl, ret);
2378 btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2379 return ret;
2380}
2381
2382void btrfs_readahead(struct readahead_control *rac)
2383{
2384 struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2385 struct folio *folio;
2386 struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
2387 const u64 start = readahead_pos(rac);
2388 const u64 end = start + readahead_length(rac) - 1;
2389 struct extent_state *cached_state = NULL;
2390 struct extent_map *em_cached = NULL;
2391 u64 prev_em_start = (u64)-1;
2392
2393 btrfs_lock_and_flush_ordered_range(inode, start, end, &cached_state);
2394
2395 while ((folio = readahead_folio(rac)) != NULL)
2396 btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
2397
2398 unlock_extent(&inode->io_tree, start, end, &cached_state);
2399
2400 if (em_cached)
2401 free_extent_map(em_cached);
2402 submit_one_bio(&bio_ctrl);
2403}
2404
2405/*
2406 * basic invalidate_folio code, this waits on any locked or writeback
2407 * ranges corresponding to the folio, and then deletes any extent state
2408 * records from the tree
2409 */
2410int extent_invalidate_folio(struct extent_io_tree *tree,
2411 struct folio *folio, size_t offset)
2412{
2413 struct extent_state *cached_state = NULL;
2414 u64 start = folio_pos(folio);
2415 u64 end = start + folio_size(folio) - 1;
2416 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2417
2418 /* This function is only called for the btree inode */
2419 ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2420
2421 start += ALIGN(offset, blocksize);
2422 if (start > end)
2423 return 0;
2424
2425 lock_extent(tree, start, end, &cached_state);
2426 folio_wait_writeback(folio);
2427
2428 /*
2429 * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2430 * so here we only need to unlock the extent range to free any
2431 * existing extent state.
2432 */
2433 unlock_extent(tree, start, end, &cached_state);
2434 return 0;
2435}
2436
2437/*
2438 * a helper for release_folio, this tests for areas of the page that
2439 * are locked or under IO and drops the related state bits if it is safe
2440 * to drop the page.
2441 */
2442static bool try_release_extent_state(struct extent_io_tree *tree,
2443 struct folio *folio)
2444{
2445 u64 start = folio_pos(folio);
2446 u64 end = start + PAGE_SIZE - 1;
2447 bool ret;
2448
2449 if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2450 ret = false;
2451 } else {
2452 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2453 EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2454 EXTENT_QGROUP_RESERVED);
2455 int ret2;
2456
2457 /*
2458 * At this point we can safely clear everything except the
2459 * locked bit, the nodatasum bit and the delalloc new bit.
2460 * The delalloc new bit will be cleared by ordered extent
2461 * completion.
2462 */
2463 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2464
2465 /* if clear_extent_bit failed for enomem reasons,
2466 * we can't allow the release to continue.
2467 */
2468 if (ret2 < 0)
2469 ret = false;
2470 else
2471 ret = true;
2472 }
2473 return ret;
2474}
2475
2476/*
2477 * a helper for release_folio. As long as there are no locked extents
2478 * in the range corresponding to the page, both state records and extent
2479 * map records are removed
2480 */
2481bool try_release_extent_mapping(struct folio *folio, gfp_t mask)
2482{
2483 u64 start = folio_pos(folio);
2484 u64 end = start + PAGE_SIZE - 1;
2485 struct btrfs_inode *inode = folio_to_inode(folio);
2486 struct extent_io_tree *io_tree = &inode->io_tree;
2487
2488 while (start <= end) {
2489 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2490 const u64 len = end - start + 1;
2491 struct extent_map_tree *extent_tree = &inode->extent_tree;
2492 struct extent_map *em;
2493
2494 write_lock(&extent_tree->lock);
2495 em = lookup_extent_mapping(extent_tree, start, len);
2496 if (!em) {
2497 write_unlock(&extent_tree->lock);
2498 break;
2499 }
2500 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2501 write_unlock(&extent_tree->lock);
2502 free_extent_map(em);
2503 break;
2504 }
2505 if (test_range_bit_exists(io_tree, em->start,
2506 extent_map_end(em) - 1, EXTENT_LOCKED))
2507 goto next;
2508 /*
2509 * If it's not in the list of modified extents, used by a fast
2510 * fsync, we can remove it. If it's being logged we can safely
2511 * remove it since fsync took an extra reference on the em.
2512 */
2513 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2514 goto remove_em;
2515 /*
2516 * If it's in the list of modified extents, remove it only if
2517 * its generation is older then the current one, in which case
2518 * we don't need it for a fast fsync. Otherwise don't remove it,
2519 * we could be racing with an ongoing fast fsync that could miss
2520 * the new extent.
2521 */
2522 if (em->generation >= cur_gen)
2523 goto next;
2524remove_em:
2525 /*
2526 * We only remove extent maps that are not in the list of
2527 * modified extents or that are in the list but with a
2528 * generation lower then the current generation, so there is no
2529 * need to set the full fsync flag on the inode (it hurts the
2530 * fsync performance for workloads with a data size that exceeds
2531 * or is close to the system's memory).
2532 */
2533 remove_extent_mapping(inode, em);
2534 /* Once for the inode's extent map tree. */
2535 free_extent_map(em);
2536next:
2537 start = extent_map_end(em);
2538 write_unlock(&extent_tree->lock);
2539
2540 /* Once for us, for the lookup_extent_mapping() reference. */
2541 free_extent_map(em);
2542
2543 if (need_resched()) {
2544 /*
2545 * If we need to resched but we can't block just exit
2546 * and leave any remaining extent maps.
2547 */
2548 if (!gfpflags_allow_blocking(mask))
2549 break;
2550
2551 cond_resched();
2552 }
2553 }
2554 return try_release_extent_state(io_tree, folio);
2555}
2556
2557static void __free_extent_buffer(struct extent_buffer *eb)
2558{
2559 kmem_cache_free(extent_buffer_cache, eb);
2560}
2561
2562static int extent_buffer_under_io(const struct extent_buffer *eb)
2563{
2564 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2565 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2566}
2567
2568static bool folio_range_has_eb(struct folio *folio)
2569{
2570 struct btrfs_subpage *subpage;
2571
2572 lockdep_assert_held(&folio->mapping->i_private_lock);
2573
2574 if (folio_test_private(folio)) {
2575 subpage = folio_get_private(folio);
2576 if (atomic_read(&subpage->eb_refs))
2577 return true;
2578 }
2579 return false;
2580}
2581
2582static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2583{
2584 struct btrfs_fs_info *fs_info = eb->fs_info;
2585 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2586
2587 /*
2588 * For mapped eb, we're going to change the folio private, which should
2589 * be done under the i_private_lock.
2590 */
2591 if (mapped)
2592 spin_lock(&folio->mapping->i_private_lock);
2593
2594 if (!folio_test_private(folio)) {
2595 if (mapped)
2596 spin_unlock(&folio->mapping->i_private_lock);
2597 return;
2598 }
2599
2600 if (fs_info->nodesize >= PAGE_SIZE) {
2601 /*
2602 * We do this since we'll remove the pages after we've
2603 * removed the eb from the radix tree, so we could race
2604 * and have this page now attached to the new eb. So
2605 * only clear folio if it's still connected to
2606 * this eb.
2607 */
2608 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2609 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2610 BUG_ON(folio_test_dirty(folio));
2611 BUG_ON(folio_test_writeback(folio));
2612 /* We need to make sure we haven't be attached to a new eb. */
2613 folio_detach_private(folio);
2614 }
2615 if (mapped)
2616 spin_unlock(&folio->mapping->i_private_lock);
2617 return;
2618 }
2619
2620 /*
2621 * For subpage, we can have dummy eb with folio private attached. In
2622 * this case, we can directly detach the private as such folio is only
2623 * attached to one dummy eb, no sharing.
2624 */
2625 if (!mapped) {
2626 btrfs_detach_subpage(fs_info, folio);
2627 return;
2628 }
2629
2630 btrfs_folio_dec_eb_refs(fs_info, folio);
2631
2632 /*
2633 * We can only detach the folio private if there are no other ebs in the
2634 * page range and no unfinished IO.
2635 */
2636 if (!folio_range_has_eb(folio))
2637 btrfs_detach_subpage(fs_info, folio);
2638
2639 spin_unlock(&folio->mapping->i_private_lock);
2640}
2641
2642/* Release all pages attached to the extent buffer */
2643static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2644{
2645 ASSERT(!extent_buffer_under_io(eb));
2646
2647 for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2648 struct folio *folio = eb->folios[i];
2649
2650 if (!folio)
2651 continue;
2652
2653 detach_extent_buffer_folio(eb, folio);
2654
2655 /* One for when we allocated the folio. */
2656 folio_put(folio);
2657 }
2658}
2659
2660/*
2661 * Helper for releasing the extent buffer.
2662 */
2663static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2664{
2665 btrfs_release_extent_buffer_pages(eb);
2666 btrfs_leak_debug_del_eb(eb);
2667 __free_extent_buffer(eb);
2668}
2669
2670static struct extent_buffer *
2671__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2672 unsigned long len)
2673{
2674 struct extent_buffer *eb = NULL;
2675
2676 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2677 eb->start = start;
2678 eb->len = len;
2679 eb->fs_info = fs_info;
2680 init_rwsem(&eb->lock);
2681
2682 btrfs_leak_debug_add_eb(eb);
2683
2684 spin_lock_init(&eb->refs_lock);
2685 atomic_set(&eb->refs, 1);
2686
2687 ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2688
2689 return eb;
2690}
2691
2692struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2693{
2694 struct extent_buffer *new;
2695 int num_folios = num_extent_folios(src);
2696 int ret;
2697
2698 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2699 if (new == NULL)
2700 return NULL;
2701
2702 /*
2703 * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2704 * btrfs_release_extent_buffer() have different behavior for
2705 * UNMAPPED subpage extent buffer.
2706 */
2707 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2708
2709 ret = alloc_eb_folio_array(new, false);
2710 if (ret) {
2711 btrfs_release_extent_buffer(new);
2712 return NULL;
2713 }
2714
2715 for (int i = 0; i < num_folios; i++) {
2716 struct folio *folio = new->folios[i];
2717
2718 ret = attach_extent_buffer_folio(new, folio, NULL);
2719 if (ret < 0) {
2720 btrfs_release_extent_buffer(new);
2721 return NULL;
2722 }
2723 WARN_ON(folio_test_dirty(folio));
2724 }
2725 copy_extent_buffer_full(new, src);
2726 set_extent_buffer_uptodate(new);
2727
2728 return new;
2729}
2730
2731struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2732 u64 start, unsigned long len)
2733{
2734 struct extent_buffer *eb;
2735 int num_folios = 0;
2736 int ret;
2737
2738 eb = __alloc_extent_buffer(fs_info, start, len);
2739 if (!eb)
2740 return NULL;
2741
2742 ret = alloc_eb_folio_array(eb, false);
2743 if (ret)
2744 goto err;
2745
2746 num_folios = num_extent_folios(eb);
2747 for (int i = 0; i < num_folios; i++) {
2748 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2749 if (ret < 0)
2750 goto err;
2751 }
2752
2753 set_extent_buffer_uptodate(eb);
2754 btrfs_set_header_nritems(eb, 0);
2755 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2756
2757 return eb;
2758err:
2759 for (int i = 0; i < num_folios; i++) {
2760 if (eb->folios[i]) {
2761 detach_extent_buffer_folio(eb, eb->folios[i]);
2762 folio_put(eb->folios[i]);
2763 }
2764 }
2765 __free_extent_buffer(eb);
2766 return NULL;
2767}
2768
2769struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2770 u64 start)
2771{
2772 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2773}
2774
2775static void check_buffer_tree_ref(struct extent_buffer *eb)
2776{
2777 int refs;
2778 /*
2779 * The TREE_REF bit is first set when the extent_buffer is added
2780 * to the radix tree. It is also reset, if unset, when a new reference
2781 * is created by find_extent_buffer.
2782 *
2783 * It is only cleared in two cases: freeing the last non-tree
2784 * reference to the extent_buffer when its STALE bit is set or
2785 * calling release_folio when the tree reference is the only reference.
2786 *
2787 * In both cases, care is taken to ensure that the extent_buffer's
2788 * pages are not under io. However, release_folio can be concurrently
2789 * called with creating new references, which is prone to race
2790 * conditions between the calls to check_buffer_tree_ref in those
2791 * codepaths and clearing TREE_REF in try_release_extent_buffer.
2792 *
2793 * The actual lifetime of the extent_buffer in the radix tree is
2794 * adequately protected by the refcount, but the TREE_REF bit and
2795 * its corresponding reference are not. To protect against this
2796 * class of races, we call check_buffer_tree_ref from the codepaths
2797 * which trigger io. Note that once io is initiated, TREE_REF can no
2798 * longer be cleared, so that is the moment at which any such race is
2799 * best fixed.
2800 */
2801 refs = atomic_read(&eb->refs);
2802 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2803 return;
2804
2805 spin_lock(&eb->refs_lock);
2806 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2807 atomic_inc(&eb->refs);
2808 spin_unlock(&eb->refs_lock);
2809}
2810
2811static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2812{
2813 int num_folios= num_extent_folios(eb);
2814
2815 check_buffer_tree_ref(eb);
2816
2817 for (int i = 0; i < num_folios; i++)
2818 folio_mark_accessed(eb->folios[i]);
2819}
2820
2821struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2822 u64 start)
2823{
2824 struct extent_buffer *eb;
2825
2826 eb = find_extent_buffer_nolock(fs_info, start);
2827 if (!eb)
2828 return NULL;
2829 /*
2830 * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2831 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2832 * another task running free_extent_buffer() might have seen that flag
2833 * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2834 * writeback flags not set) and it's still in the tree (flag
2835 * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2836 * decrementing the extent buffer's reference count twice. So here we
2837 * could race and increment the eb's reference count, clear its stale
2838 * flag, mark it as dirty and drop our reference before the other task
2839 * finishes executing free_extent_buffer, which would later result in
2840 * an attempt to free an extent buffer that is dirty.
2841 */
2842 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2843 spin_lock(&eb->refs_lock);
2844 spin_unlock(&eb->refs_lock);
2845 }
2846 mark_extent_buffer_accessed(eb);
2847 return eb;
2848}
2849
2850#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2851struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2852 u64 start)
2853{
2854 struct extent_buffer *eb, *exists = NULL;
2855 int ret;
2856
2857 eb = find_extent_buffer(fs_info, start);
2858 if (eb)
2859 return eb;
2860 eb = alloc_dummy_extent_buffer(fs_info, start);
2861 if (!eb)
2862 return ERR_PTR(-ENOMEM);
2863 eb->fs_info = fs_info;
2864again:
2865 ret = radix_tree_preload(GFP_NOFS);
2866 if (ret) {
2867 exists = ERR_PTR(ret);
2868 goto free_eb;
2869 }
2870 spin_lock(&fs_info->buffer_lock);
2871 ret = radix_tree_insert(&fs_info->buffer_radix,
2872 start >> fs_info->sectorsize_bits, eb);
2873 spin_unlock(&fs_info->buffer_lock);
2874 radix_tree_preload_end();
2875 if (ret == -EEXIST) {
2876 exists = find_extent_buffer(fs_info, start);
2877 if (exists)
2878 goto free_eb;
2879 else
2880 goto again;
2881 }
2882 check_buffer_tree_ref(eb);
2883 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2884
2885 return eb;
2886free_eb:
2887 btrfs_release_extent_buffer(eb);
2888 return exists;
2889}
2890#endif
2891
2892static struct extent_buffer *grab_extent_buffer(
2893 struct btrfs_fs_info *fs_info, struct page *page)
2894{
2895 struct folio *folio = page_folio(page);
2896 struct extent_buffer *exists;
2897
2898 lockdep_assert_held(&page->mapping->i_private_lock);
2899
2900 /*
2901 * For subpage case, we completely rely on radix tree to ensure we
2902 * don't try to insert two ebs for the same bytenr. So here we always
2903 * return NULL and just continue.
2904 */
2905 if (fs_info->nodesize < PAGE_SIZE)
2906 return NULL;
2907
2908 /* Page not yet attached to an extent buffer */
2909 if (!folio_test_private(folio))
2910 return NULL;
2911
2912 /*
2913 * We could have already allocated an eb for this page and attached one
2914 * so lets see if we can get a ref on the existing eb, and if we can we
2915 * know it's good and we can just return that one, else we know we can
2916 * just overwrite folio private.
2917 */
2918 exists = folio_get_private(folio);
2919 if (atomic_inc_not_zero(&exists->refs))
2920 return exists;
2921
2922 WARN_ON(PageDirty(page));
2923 folio_detach_private(folio);
2924 return NULL;
2925}
2926
2927static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2928{
2929 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2930 btrfs_err(fs_info, "bad tree block start %llu", start);
2931 return -EINVAL;
2932 }
2933
2934 if (fs_info->nodesize < PAGE_SIZE &&
2935 offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2936 btrfs_err(fs_info,
2937 "tree block crosses page boundary, start %llu nodesize %u",
2938 start, fs_info->nodesize);
2939 return -EINVAL;
2940 }
2941 if (fs_info->nodesize >= PAGE_SIZE &&
2942 !PAGE_ALIGNED(start)) {
2943 btrfs_err(fs_info,
2944 "tree block is not page aligned, start %llu nodesize %u",
2945 start, fs_info->nodesize);
2946 return -EINVAL;
2947 }
2948 if (!IS_ALIGNED(start, fs_info->nodesize) &&
2949 !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2950 btrfs_warn(fs_info,
2951"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2952 start, fs_info->nodesize);
2953 }
2954 return 0;
2955}
2956
2957
2958/*
2959 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2960 * Return >0 if there is already another extent buffer for the range,
2961 * and @found_eb_ret would be updated.
2962 * Return -EAGAIN if the filemap has an existing folio but with different size
2963 * than @eb.
2964 * The caller needs to free the existing folios and retry using the same order.
2965 */
2966static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2967 struct btrfs_subpage *prealloc,
2968 struct extent_buffer **found_eb_ret)
2969{
2970
2971 struct btrfs_fs_info *fs_info = eb->fs_info;
2972 struct address_space *mapping = fs_info->btree_inode->i_mapping;
2973 const unsigned long index = eb->start >> PAGE_SHIFT;
2974 struct folio *existing_folio = NULL;
2975 int ret;
2976
2977 ASSERT(found_eb_ret);
2978
2979 /* Caller should ensure the folio exists. */
2980 ASSERT(eb->folios[i]);
2981
2982retry:
2983 ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2984 GFP_NOFS | __GFP_NOFAIL);
2985 if (!ret)
2986 goto finish;
2987
2988 existing_folio = filemap_lock_folio(mapping, index + i);
2989 /* The page cache only exists for a very short time, just retry. */
2990 if (IS_ERR(existing_folio)) {
2991 existing_folio = NULL;
2992 goto retry;
2993 }
2994
2995 /* For now, we should only have single-page folios for btree inode. */
2996 ASSERT(folio_nr_pages(existing_folio) == 1);
2997
2998 if (folio_size(existing_folio) != eb->folio_size) {
2999 folio_unlock(existing_folio);
3000 folio_put(existing_folio);
3001 return -EAGAIN;
3002 }
3003
3004finish:
3005 spin_lock(&mapping->i_private_lock);
3006 if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
3007 /* We're going to reuse the existing page, can drop our folio now. */
3008 __free_page(folio_page(eb->folios[i], 0));
3009 eb->folios[i] = existing_folio;
3010 } else if (existing_folio) {
3011 struct extent_buffer *existing_eb;
3012
3013 existing_eb = grab_extent_buffer(fs_info,
3014 folio_page(existing_folio, 0));
3015 if (existing_eb) {
3016 /* The extent buffer still exists, we can use it directly. */
3017 *found_eb_ret = existing_eb;
3018 spin_unlock(&mapping->i_private_lock);
3019 folio_unlock(existing_folio);
3020 folio_put(existing_folio);
3021 return 1;
3022 }
3023 /* The extent buffer no longer exists, we can reuse the folio. */
3024 __free_page(folio_page(eb->folios[i], 0));
3025 eb->folios[i] = existing_folio;
3026 }
3027 eb->folio_size = folio_size(eb->folios[i]);
3028 eb->folio_shift = folio_shift(eb->folios[i]);
3029 /* Should not fail, as we have preallocated the memory. */
3030 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3031 ASSERT(!ret);
3032 /*
3033 * To inform we have an extra eb under allocation, so that
3034 * detach_extent_buffer_page() won't release the folio private when the
3035 * eb hasn't been inserted into radix tree yet.
3036 *
3037 * The ref will be decreased when the eb releases the page, in
3038 * detach_extent_buffer_page(). Thus needs no special handling in the
3039 * error path.
3040 */
3041 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3042 spin_unlock(&mapping->i_private_lock);
3043 return 0;
3044}
3045
3046struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3047 u64 start, u64 owner_root, int level)
3048{
3049 unsigned long len = fs_info->nodesize;
3050 int num_folios;
3051 int attached = 0;
3052 struct extent_buffer *eb;
3053 struct extent_buffer *existing_eb = NULL;
3054 struct btrfs_subpage *prealloc = NULL;
3055 u64 lockdep_owner = owner_root;
3056 bool page_contig = true;
3057 int uptodate = 1;
3058 int ret;
3059
3060 if (check_eb_alignment(fs_info, start))
3061 return ERR_PTR(-EINVAL);
3062
3063#if BITS_PER_LONG == 32
3064 if (start >= MAX_LFS_FILESIZE) {
3065 btrfs_err_rl(fs_info,
3066 "extent buffer %llu is beyond 32bit page cache limit", start);
3067 btrfs_err_32bit_limit(fs_info);
3068 return ERR_PTR(-EOVERFLOW);
3069 }
3070 if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3071 btrfs_warn_32bit_limit(fs_info);
3072#endif
3073
3074 eb = find_extent_buffer(fs_info, start);
3075 if (eb)
3076 return eb;
3077
3078 eb = __alloc_extent_buffer(fs_info, start, len);
3079 if (!eb)
3080 return ERR_PTR(-ENOMEM);
3081
3082 /*
3083 * The reloc trees are just snapshots, so we need them to appear to be
3084 * just like any other fs tree WRT lockdep.
3085 */
3086 if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3087 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3088
3089 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3090
3091 /*
3092 * Preallocate folio private for subpage case, so that we won't
3093 * allocate memory with i_private_lock nor page lock hold.
3094 *
3095 * The memory will be freed by attach_extent_buffer_page() or freed
3096 * manually if we exit earlier.
3097 */
3098 if (fs_info->nodesize < PAGE_SIZE) {
3099 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3100 if (IS_ERR(prealloc)) {
3101 ret = PTR_ERR(prealloc);
3102 goto out;
3103 }
3104 }
3105
3106reallocate:
3107 /* Allocate all pages first. */
3108 ret = alloc_eb_folio_array(eb, true);
3109 if (ret < 0) {
3110 btrfs_free_subpage(prealloc);
3111 goto out;
3112 }
3113
3114 num_folios = num_extent_folios(eb);
3115 /* Attach all pages to the filemap. */
3116 for (int i = 0; i < num_folios; i++) {
3117 struct folio *folio;
3118
3119 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3120 if (ret > 0) {
3121 ASSERT(existing_eb);
3122 goto out;
3123 }
3124
3125 /*
3126 * TODO: Special handling for a corner case where the order of
3127 * folios mismatch between the new eb and filemap.
3128 *
3129 * This happens when:
3130 *
3131 * - the new eb is using higher order folio
3132 *
3133 * - the filemap is still using 0-order folios for the range
3134 * This can happen at the previous eb allocation, and we don't
3135 * have higher order folio for the call.
3136 *
3137 * - the existing eb has already been freed
3138 *
3139 * In this case, we have to free the existing folios first, and
3140 * re-allocate using the same order.
3141 * Thankfully this is not going to happen yet, as we're still
3142 * using 0-order folios.
3143 */
3144 if (unlikely(ret == -EAGAIN)) {
3145 ASSERT(0);
3146 goto reallocate;
3147 }
3148 attached++;
3149
3150 /*
3151 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3152 * reliable, as we may choose to reuse the existing page cache
3153 * and free the allocated page.
3154 */
3155 folio = eb->folios[i];
3156 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3157
3158 /*
3159 * Check if the current page is physically contiguous with previous eb
3160 * page.
3161 * At this stage, either we allocated a large folio, thus @i
3162 * would only be 0, or we fall back to per-page allocation.
3163 */
3164 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3165 page_contig = false;
3166
3167 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3168 uptodate = 0;
3169
3170 /*
3171 * We can't unlock the pages just yet since the extent buffer
3172 * hasn't been properly inserted in the radix tree, this
3173 * opens a race with btree_release_folio which can free a page
3174 * while we are still filling in all pages for the buffer and
3175 * we could crash.
3176 */
3177 }
3178 if (uptodate)
3179 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3180 /* All pages are physically contiguous, can skip cross page handling. */
3181 if (page_contig)
3182 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3183again:
3184 ret = radix_tree_preload(GFP_NOFS);
3185 if (ret)
3186 goto out;
3187
3188 spin_lock(&fs_info->buffer_lock);
3189 ret = radix_tree_insert(&fs_info->buffer_radix,
3190 start >> fs_info->sectorsize_bits, eb);
3191 spin_unlock(&fs_info->buffer_lock);
3192 radix_tree_preload_end();
3193 if (ret == -EEXIST) {
3194 ret = 0;
3195 existing_eb = find_extent_buffer(fs_info, start);
3196 if (existing_eb)
3197 goto out;
3198 else
3199 goto again;
3200 }
3201 /* add one reference for the tree */
3202 check_buffer_tree_ref(eb);
3203 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3204
3205 /*
3206 * Now it's safe to unlock the pages because any calls to
3207 * btree_release_folio will correctly detect that a page belongs to a
3208 * live buffer and won't free them prematurely.
3209 */
3210 for (int i = 0; i < num_folios; i++)
3211 unlock_page(folio_page(eb->folios[i], 0));
3212 return eb;
3213
3214out:
3215 WARN_ON(!atomic_dec_and_test(&eb->refs));
3216
3217 /*
3218 * Any attached folios need to be detached before we unlock them. This
3219 * is because when we're inserting our new folios into the mapping, and
3220 * then attaching our eb to that folio. If we fail to insert our folio
3221 * we'll lookup the folio for that index, and grab that EB. We do not
3222 * want that to grab this eb, as we're getting ready to free it. So we
3223 * have to detach it first and then unlock it.
3224 *
3225 * We have to drop our reference and NULL it out here because in the
3226 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3227 * Below when we call btrfs_release_extent_buffer() we will call
3228 * detach_extent_buffer_folio() on our remaining pages in the !subpage
3229 * case. If we left eb->folios[i] populated in the subpage case we'd
3230 * double put our reference and be super sad.
3231 */
3232 for (int i = 0; i < attached; i++) {
3233 ASSERT(eb->folios[i]);
3234 detach_extent_buffer_folio(eb, eb->folios[i]);
3235 unlock_page(folio_page(eb->folios[i], 0));
3236 folio_put(eb->folios[i]);
3237 eb->folios[i] = NULL;
3238 }
3239 /*
3240 * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3241 * so it can be cleaned up without utilizing page->mapping.
3242 */
3243 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3244
3245 btrfs_release_extent_buffer(eb);
3246 if (ret < 0)
3247 return ERR_PTR(ret);
3248 ASSERT(existing_eb);
3249 return existing_eb;
3250}
3251
3252static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3253{
3254 struct extent_buffer *eb =
3255 container_of(head, struct extent_buffer, rcu_head);
3256
3257 __free_extent_buffer(eb);
3258}
3259
3260static int release_extent_buffer(struct extent_buffer *eb)
3261 __releases(&eb->refs_lock)
3262{
3263 lockdep_assert_held(&eb->refs_lock);
3264
3265 WARN_ON(atomic_read(&eb->refs) == 0);
3266 if (atomic_dec_and_test(&eb->refs)) {
3267 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3268 struct btrfs_fs_info *fs_info = eb->fs_info;
3269
3270 spin_unlock(&eb->refs_lock);
3271
3272 spin_lock(&fs_info->buffer_lock);
3273 radix_tree_delete(&fs_info->buffer_radix,
3274 eb->start >> fs_info->sectorsize_bits);
3275 spin_unlock(&fs_info->buffer_lock);
3276 } else {
3277 spin_unlock(&eb->refs_lock);
3278 }
3279
3280 btrfs_leak_debug_del_eb(eb);
3281 /* Should be safe to release our pages at this point */
3282 btrfs_release_extent_buffer_pages(eb);
3283#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3284 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3285 __free_extent_buffer(eb);
3286 return 1;
3287 }
3288#endif
3289 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3290 return 1;
3291 }
3292 spin_unlock(&eb->refs_lock);
3293
3294 return 0;
3295}
3296
3297void free_extent_buffer(struct extent_buffer *eb)
3298{
3299 int refs;
3300 if (!eb)
3301 return;
3302
3303 refs = atomic_read(&eb->refs);
3304 while (1) {
3305 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3306 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3307 refs == 1))
3308 break;
3309 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3310 return;
3311 }
3312
3313 spin_lock(&eb->refs_lock);
3314 if (atomic_read(&eb->refs) == 2 &&
3315 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3316 !extent_buffer_under_io(eb) &&
3317 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3318 atomic_dec(&eb->refs);
3319
3320 /*
3321 * I know this is terrible, but it's temporary until we stop tracking
3322 * the uptodate bits and such for the extent buffers.
3323 */
3324 release_extent_buffer(eb);
3325}
3326
3327void free_extent_buffer_stale(struct extent_buffer *eb)
3328{
3329 if (!eb)
3330 return;
3331
3332 spin_lock(&eb->refs_lock);
3333 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3334
3335 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3336 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3337 atomic_dec(&eb->refs);
3338 release_extent_buffer(eb);
3339}
3340
3341static void btree_clear_folio_dirty(struct folio *folio)
3342{
3343 ASSERT(folio_test_dirty(folio));
3344 ASSERT(folio_test_locked(folio));
3345 folio_clear_dirty_for_io(folio);
3346 xa_lock_irq(&folio->mapping->i_pages);
3347 if (!folio_test_dirty(folio))
3348 __xa_clear_mark(&folio->mapping->i_pages,
3349 folio_index(folio), PAGECACHE_TAG_DIRTY);
3350 xa_unlock_irq(&folio->mapping->i_pages);
3351}
3352
3353static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3354{
3355 struct btrfs_fs_info *fs_info = eb->fs_info;
3356 struct folio *folio = eb->folios[0];
3357 bool last;
3358
3359 /* btree_clear_folio_dirty() needs page locked. */
3360 folio_lock(folio);
3361 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3362 if (last)
3363 btree_clear_folio_dirty(folio);
3364 folio_unlock(folio);
3365 WARN_ON(atomic_read(&eb->refs) == 0);
3366}
3367
3368void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3369 struct extent_buffer *eb)
3370{
3371 struct btrfs_fs_info *fs_info = eb->fs_info;
3372 int num_folios;
3373
3374 btrfs_assert_tree_write_locked(eb);
3375
3376 if (trans && btrfs_header_generation(eb) != trans->transid)
3377 return;
3378
3379 /*
3380 * Instead of clearing the dirty flag off of the buffer, mark it as
3381 * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3382 * write-ordering in zoned mode, without the need to later re-dirty
3383 * the extent_buffer.
3384 *
3385 * The actual zeroout of the buffer will happen later in
3386 * btree_csum_one_bio.
3387 */
3388 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3389 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3390 return;
3391 }
3392
3393 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3394 return;
3395
3396 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3397 fs_info->dirty_metadata_batch);
3398
3399 if (eb->fs_info->nodesize < PAGE_SIZE)
3400 return clear_subpage_extent_buffer_dirty(eb);
3401
3402 num_folios = num_extent_folios(eb);
3403 for (int i = 0; i < num_folios; i++) {
3404 struct folio *folio = eb->folios[i];
3405
3406 if (!folio_test_dirty(folio))
3407 continue;
3408 folio_lock(folio);
3409 btree_clear_folio_dirty(folio);
3410 folio_unlock(folio);
3411 }
3412 WARN_ON(atomic_read(&eb->refs) == 0);
3413}
3414
3415void set_extent_buffer_dirty(struct extent_buffer *eb)
3416{
3417 int num_folios;
3418 bool was_dirty;
3419
3420 check_buffer_tree_ref(eb);
3421
3422 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3423
3424 num_folios = num_extent_folios(eb);
3425 WARN_ON(atomic_read(&eb->refs) == 0);
3426 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3427 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3428
3429 if (!was_dirty) {
3430 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3431
3432 /*
3433 * For subpage case, we can have other extent buffers in the
3434 * same page, and in clear_subpage_extent_buffer_dirty() we
3435 * have to clear page dirty without subpage lock held.
3436 * This can cause race where our page gets dirty cleared after
3437 * we just set it.
3438 *
3439 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3440 * its page for other reasons, we can use page lock to prevent
3441 * the above race.
3442 */
3443 if (subpage)
3444 lock_page(folio_page(eb->folios[0], 0));
3445 for (int i = 0; i < num_folios; i++)
3446 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3447 eb->start, eb->len);
3448 if (subpage)
3449 unlock_page(folio_page(eb->folios[0], 0));
3450 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3451 eb->len,
3452 eb->fs_info->dirty_metadata_batch);
3453 }
3454#ifdef CONFIG_BTRFS_DEBUG
3455 for (int i = 0; i < num_folios; i++)
3456 ASSERT(folio_test_dirty(eb->folios[i]));
3457#endif
3458}
3459
3460void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3461{
3462 struct btrfs_fs_info *fs_info = eb->fs_info;
3463 int num_folios = num_extent_folios(eb);
3464
3465 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3466 for (int i = 0; i < num_folios; i++) {
3467 struct folio *folio = eb->folios[i];
3468
3469 if (!folio)
3470 continue;
3471
3472 /*
3473 * This is special handling for metadata subpage, as regular
3474 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3475 */
3476 if (fs_info->nodesize >= PAGE_SIZE)
3477 folio_clear_uptodate(folio);
3478 else
3479 btrfs_subpage_clear_uptodate(fs_info, folio,
3480 eb->start, eb->len);
3481 }
3482}
3483
3484void set_extent_buffer_uptodate(struct extent_buffer *eb)
3485{
3486 struct btrfs_fs_info *fs_info = eb->fs_info;
3487 int num_folios = num_extent_folios(eb);
3488
3489 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3490 for (int i = 0; i < num_folios; i++) {
3491 struct folio *folio = eb->folios[i];
3492
3493 /*
3494 * This is special handling for metadata subpage, as regular
3495 * btrfs_is_subpage() can not handle cloned/dummy metadata.
3496 */
3497 if (fs_info->nodesize >= PAGE_SIZE)
3498 folio_mark_uptodate(folio);
3499 else
3500 btrfs_subpage_set_uptodate(fs_info, folio,
3501 eb->start, eb->len);
3502 }
3503}
3504
3505static void clear_extent_buffer_reading(struct extent_buffer *eb)
3506{
3507 clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3508 smp_mb__after_atomic();
3509 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3510}
3511
3512static void end_bbio_meta_read(struct btrfs_bio *bbio)
3513{
3514 struct extent_buffer *eb = bbio->private;
3515 struct btrfs_fs_info *fs_info = eb->fs_info;
3516 bool uptodate = !bbio->bio.bi_status;
3517 struct folio_iter fi;
3518 u32 bio_offset = 0;
3519
3520 /*
3521 * If the extent buffer is marked UPTODATE before the read operation
3522 * completes, other calls to read_extent_buffer_pages() will return
3523 * early without waiting for the read to finish, causing data races.
3524 */
3525 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3526
3527 eb->read_mirror = bbio->mirror_num;
3528
3529 if (uptodate &&
3530 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3531 uptodate = false;
3532
3533 if (uptodate) {
3534 set_extent_buffer_uptodate(eb);
3535 } else {
3536 clear_extent_buffer_uptodate(eb);
3537 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3538 }
3539
3540 bio_for_each_folio_all(fi, &bbio->bio) {
3541 struct folio *folio = fi.folio;
3542 u64 start = eb->start + bio_offset;
3543 u32 len = fi.length;
3544
3545 if (uptodate)
3546 btrfs_folio_set_uptodate(fs_info, folio, start, len);
3547 else
3548 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3549
3550 bio_offset += len;
3551 }
3552
3553 clear_extent_buffer_reading(eb);
3554 free_extent_buffer(eb);
3555
3556 bio_put(&bbio->bio);
3557}
3558
3559int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3560 const struct btrfs_tree_parent_check *check)
3561{
3562 struct btrfs_bio *bbio;
3563 bool ret;
3564
3565 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3566 return 0;
3567
3568 /*
3569 * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3570 * operation, which could potentially still be in flight. In this case
3571 * we simply want to return an error.
3572 */
3573 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3574 return -EIO;
3575
3576 /* Someone else is already reading the buffer, just wait for it. */
3577 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3578 goto done;
3579
3580 /*
3581 * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3582 * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3583 * started and finished reading the same eb. In this case, UPTODATE
3584 * will now be set, and we shouldn't read it in again.
3585 */
3586 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3587 clear_extent_buffer_reading(eb);
3588 return 0;
3589 }
3590
3591 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3592 eb->read_mirror = 0;
3593 check_buffer_tree_ref(eb);
3594 atomic_inc(&eb->refs);
3595
3596 bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3597 REQ_OP_READ | REQ_META, eb->fs_info,
3598 end_bbio_meta_read, eb);
3599 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3600 bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3601 bbio->file_offset = eb->start;
3602 memcpy(&bbio->parent_check, check, sizeof(*check));
3603 if (eb->fs_info->nodesize < PAGE_SIZE) {
3604 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3605 eb->start - folio_pos(eb->folios[0]));
3606 ASSERT(ret);
3607 } else {
3608 int num_folios = num_extent_folios(eb);
3609
3610 for (int i = 0; i < num_folios; i++) {
3611 struct folio *folio = eb->folios[i];
3612
3613 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3614 ASSERT(ret);
3615 }
3616 }
3617 btrfs_submit_bbio(bbio, mirror_num);
3618
3619done:
3620 if (wait == WAIT_COMPLETE) {
3621 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3622 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3623 return -EIO;
3624 }
3625
3626 return 0;
3627}
3628
3629static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3630 unsigned long len)
3631{
3632 btrfs_warn(eb->fs_info,
3633 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3634 eb->start, eb->len, start, len);
3635 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3636
3637 return true;
3638}
3639
3640/*
3641 * Check if the [start, start + len) range is valid before reading/writing
3642 * the eb.
3643 * NOTE: @start and @len are offset inside the eb, not logical address.
3644 *
3645 * Caller should not touch the dst/src memory if this function returns error.
3646 */
3647static inline int check_eb_range(const struct extent_buffer *eb,
3648 unsigned long start, unsigned long len)
3649{
3650 unsigned long offset;
3651
3652 /* start, start + len should not go beyond eb->len nor overflow */
3653 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3654 return report_eb_range(eb, start, len);
3655
3656 return false;
3657}
3658
3659void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3660 unsigned long start, unsigned long len)
3661{
3662 const int unit_size = eb->folio_size;
3663 size_t cur;
3664 size_t offset;
3665 char *dst = (char *)dstv;
3666 unsigned long i = get_eb_folio_index(eb, start);
3667
3668 if (check_eb_range(eb, start, len)) {
3669 /*
3670 * Invalid range hit, reset the memory, so callers won't get
3671 * some random garbage for their uninitialized memory.
3672 */
3673 memset(dstv, 0, len);
3674 return;
3675 }
3676
3677 if (eb->addr) {
3678 memcpy(dstv, eb->addr + start, len);
3679 return;
3680 }
3681
3682 offset = get_eb_offset_in_folio(eb, start);
3683
3684 while (len > 0) {
3685 char *kaddr;
3686
3687 cur = min(len, unit_size - offset);
3688 kaddr = folio_address(eb->folios[i]);
3689 memcpy(dst, kaddr + offset, cur);
3690
3691 dst += cur;
3692 len -= cur;
3693 offset = 0;
3694 i++;
3695 }
3696}
3697
3698int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3699 void __user *dstv,
3700 unsigned long start, unsigned long len)
3701{
3702 const int unit_size = eb->folio_size;
3703 size_t cur;
3704 size_t offset;
3705 char __user *dst = (char __user *)dstv;
3706 unsigned long i = get_eb_folio_index(eb, start);
3707 int ret = 0;
3708
3709 WARN_ON(start > eb->len);
3710 WARN_ON(start + len > eb->start + eb->len);
3711
3712 if (eb->addr) {
3713 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3714 ret = -EFAULT;
3715 return ret;
3716 }
3717
3718 offset = get_eb_offset_in_folio(eb, start);
3719
3720 while (len > 0) {
3721 char *kaddr;
3722
3723 cur = min(len, unit_size - offset);
3724 kaddr = folio_address(eb->folios[i]);
3725 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3726 ret = -EFAULT;
3727 break;
3728 }
3729
3730 dst += cur;
3731 len -= cur;
3732 offset = 0;
3733 i++;
3734 }
3735
3736 return ret;
3737}
3738
3739int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3740 unsigned long start, unsigned long len)
3741{
3742 const int unit_size = eb->folio_size;
3743 size_t cur;
3744 size_t offset;
3745 char *kaddr;
3746 char *ptr = (char *)ptrv;
3747 unsigned long i = get_eb_folio_index(eb, start);
3748 int ret = 0;
3749
3750 if (check_eb_range(eb, start, len))
3751 return -EINVAL;
3752
3753 if (eb->addr)
3754 return memcmp(ptrv, eb->addr + start, len);
3755
3756 offset = get_eb_offset_in_folio(eb, start);
3757
3758 while (len > 0) {
3759 cur = min(len, unit_size - offset);
3760 kaddr = folio_address(eb->folios[i]);
3761 ret = memcmp(ptr, kaddr + offset, cur);
3762 if (ret)
3763 break;
3764
3765 ptr += cur;
3766 len -= cur;
3767 offset = 0;
3768 i++;
3769 }
3770 return ret;
3771}
3772
3773/*
3774 * Check that the extent buffer is uptodate.
3775 *
3776 * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3777 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3778 */
3779static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3780{
3781 struct btrfs_fs_info *fs_info = eb->fs_info;
3782 struct folio *folio = eb->folios[i];
3783
3784 ASSERT(folio);
3785
3786 /*
3787 * If we are using the commit root we could potentially clear a page
3788 * Uptodate while we're using the extent buffer that we've previously
3789 * looked up. We don't want to complain in this case, as the page was
3790 * valid before, we just didn't write it out. Instead we want to catch
3791 * the case where we didn't actually read the block properly, which
3792 * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3793 */
3794 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3795 return;
3796
3797 if (fs_info->nodesize < PAGE_SIZE) {
3798 folio = eb->folios[0];
3799 ASSERT(i == 0);
3800 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3801 eb->start, eb->len)))
3802 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3803 } else {
3804 WARN_ON(!folio_test_uptodate(folio));
3805 }
3806}
3807
3808static void __write_extent_buffer(const struct extent_buffer *eb,
3809 const void *srcv, unsigned long start,
3810 unsigned long len, bool use_memmove)
3811{
3812 const int unit_size = eb->folio_size;
3813 size_t cur;
3814 size_t offset;
3815 char *kaddr;
3816 const char *src = (const char *)srcv;
3817 unsigned long i = get_eb_folio_index(eb, start);
3818 /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3819 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3820
3821 if (check_eb_range(eb, start, len))
3822 return;
3823
3824 if (eb->addr) {
3825 if (use_memmove)
3826 memmove(eb->addr + start, srcv, len);
3827 else
3828 memcpy(eb->addr + start, srcv, len);
3829 return;
3830 }
3831
3832 offset = get_eb_offset_in_folio(eb, start);
3833
3834 while (len > 0) {
3835 if (check_uptodate)
3836 assert_eb_folio_uptodate(eb, i);
3837
3838 cur = min(len, unit_size - offset);
3839 kaddr = folio_address(eb->folios[i]);
3840 if (use_memmove)
3841 memmove(kaddr + offset, src, cur);
3842 else
3843 memcpy(kaddr + offset, src, cur);
3844
3845 src += cur;
3846 len -= cur;
3847 offset = 0;
3848 i++;
3849 }
3850}
3851
3852void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3853 unsigned long start, unsigned long len)
3854{
3855 return __write_extent_buffer(eb, srcv, start, len, false);
3856}
3857
3858static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3859 unsigned long start, unsigned long len)
3860{
3861 const int unit_size = eb->folio_size;
3862 unsigned long cur = start;
3863
3864 if (eb->addr) {
3865 memset(eb->addr + start, c, len);
3866 return;
3867 }
3868
3869 while (cur < start + len) {
3870 unsigned long index = get_eb_folio_index(eb, cur);
3871 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3872 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3873
3874 assert_eb_folio_uptodate(eb, index);
3875 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3876
3877 cur += cur_len;
3878 }
3879}
3880
3881void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3882 unsigned long len)
3883{
3884 if (check_eb_range(eb, start, len))
3885 return;
3886 return memset_extent_buffer(eb, 0, start, len);
3887}
3888
3889void copy_extent_buffer_full(const struct extent_buffer *dst,
3890 const struct extent_buffer *src)
3891{
3892 const int unit_size = src->folio_size;
3893 unsigned long cur = 0;
3894
3895 ASSERT(dst->len == src->len);
3896
3897 while (cur < src->len) {
3898 unsigned long index = get_eb_folio_index(src, cur);
3899 unsigned long offset = get_eb_offset_in_folio(src, cur);
3900 unsigned long cur_len = min(src->len, unit_size - offset);
3901 void *addr = folio_address(src->folios[index]) + offset;
3902
3903 write_extent_buffer(dst, addr, cur, cur_len);
3904
3905 cur += cur_len;
3906 }
3907}
3908
3909void copy_extent_buffer(const struct extent_buffer *dst,
3910 const struct extent_buffer *src,
3911 unsigned long dst_offset, unsigned long src_offset,
3912 unsigned long len)
3913{
3914 const int unit_size = dst->folio_size;
3915 u64 dst_len = dst->len;
3916 size_t cur;
3917 size_t offset;
3918 char *kaddr;
3919 unsigned long i = get_eb_folio_index(dst, dst_offset);
3920
3921 if (check_eb_range(dst, dst_offset, len) ||
3922 check_eb_range(src, src_offset, len))
3923 return;
3924
3925 WARN_ON(src->len != dst_len);
3926
3927 offset = get_eb_offset_in_folio(dst, dst_offset);
3928
3929 while (len > 0) {
3930 assert_eb_folio_uptodate(dst, i);
3931
3932 cur = min(len, (unsigned long)(unit_size - offset));
3933
3934 kaddr = folio_address(dst->folios[i]);
3935 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3936
3937 src_offset += cur;
3938 len -= cur;
3939 offset = 0;
3940 i++;
3941 }
3942}
3943
3944/*
3945 * Calculate the folio and offset of the byte containing the given bit number.
3946 *
3947 * @eb: the extent buffer
3948 * @start: offset of the bitmap item in the extent buffer
3949 * @nr: bit number
3950 * @folio_index: return index of the folio in the extent buffer that contains
3951 * the given bit number
3952 * @folio_offset: return offset into the folio given by folio_index
3953 *
3954 * This helper hides the ugliness of finding the byte in an extent buffer which
3955 * contains a given bit.
3956 */
3957static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3958 unsigned long start, unsigned long nr,
3959 unsigned long *folio_index,
3960 size_t *folio_offset)
3961{
3962 size_t byte_offset = BIT_BYTE(nr);
3963 size_t offset;
3964
3965 /*
3966 * The byte we want is the offset of the extent buffer + the offset of
3967 * the bitmap item in the extent buffer + the offset of the byte in the
3968 * bitmap item.
3969 */
3970 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3971
3972 *folio_index = offset >> eb->folio_shift;
3973 *folio_offset = offset_in_eb_folio(eb, offset);
3974}
3975
3976/*
3977 * Determine whether a bit in a bitmap item is set.
3978 *
3979 * @eb: the extent buffer
3980 * @start: offset of the bitmap item in the extent buffer
3981 * @nr: bit number to test
3982 */
3983int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3984 unsigned long nr)
3985{
3986 unsigned long i;
3987 size_t offset;
3988 u8 *kaddr;
3989
3990 eb_bitmap_offset(eb, start, nr, &i, &offset);
3991 assert_eb_folio_uptodate(eb, i);
3992 kaddr = folio_address(eb->folios[i]);
3993 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3994}
3995
3996static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3997{
3998 unsigned long index = get_eb_folio_index(eb, bytenr);
3999
4000 if (check_eb_range(eb, bytenr, 1))
4001 return NULL;
4002 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4003}
4004
4005/*
4006 * Set an area of a bitmap to 1.
4007 *
4008 * @eb: the extent buffer
4009 * @start: offset of the bitmap item in the extent buffer
4010 * @pos: bit number of the first bit
4011 * @len: number of bits to set
4012 */
4013void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4014 unsigned long pos, unsigned long len)
4015{
4016 unsigned int first_byte = start + BIT_BYTE(pos);
4017 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4018 const bool same_byte = (first_byte == last_byte);
4019 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4020 u8 *kaddr;
4021
4022 if (same_byte)
4023 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4024
4025 /* Handle the first byte. */
4026 kaddr = extent_buffer_get_byte(eb, first_byte);
4027 *kaddr |= mask;
4028 if (same_byte)
4029 return;
4030
4031 /* Handle the byte aligned part. */
4032 ASSERT(first_byte + 1 <= last_byte);
4033 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4034
4035 /* Handle the last byte. */
4036 kaddr = extent_buffer_get_byte(eb, last_byte);
4037 *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4038}
4039
4040
4041/*
4042 * Clear an area of a bitmap.
4043 *
4044 * @eb: the extent buffer
4045 * @start: offset of the bitmap item in the extent buffer
4046 * @pos: bit number of the first bit
4047 * @len: number of bits to clear
4048 */
4049void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4050 unsigned long start, unsigned long pos,
4051 unsigned long len)
4052{
4053 unsigned int first_byte = start + BIT_BYTE(pos);
4054 unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4055 const bool same_byte = (first_byte == last_byte);
4056 u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4057 u8 *kaddr;
4058
4059 if (same_byte)
4060 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4061
4062 /* Handle the first byte. */
4063 kaddr = extent_buffer_get_byte(eb, first_byte);
4064 *kaddr &= ~mask;
4065 if (same_byte)
4066 return;
4067
4068 /* Handle the byte aligned part. */
4069 ASSERT(first_byte + 1 <= last_byte);
4070 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4071
4072 /* Handle the last byte. */
4073 kaddr = extent_buffer_get_byte(eb, last_byte);
4074 *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4075}
4076
4077static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4078{
4079 unsigned long distance = (src > dst) ? src - dst : dst - src;
4080 return distance < len;
4081}
4082
4083void memcpy_extent_buffer(const struct extent_buffer *dst,
4084 unsigned long dst_offset, unsigned long src_offset,
4085 unsigned long len)
4086{
4087 const int unit_size = dst->folio_size;
4088 unsigned long cur_off = 0;
4089
4090 if (check_eb_range(dst, dst_offset, len) ||
4091 check_eb_range(dst, src_offset, len))
4092 return;
4093
4094 if (dst->addr) {
4095 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4096
4097 if (use_memmove)
4098 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4099 else
4100 memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4101 return;
4102 }
4103
4104 while (cur_off < len) {
4105 unsigned long cur_src = cur_off + src_offset;
4106 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4107 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4108 unsigned long cur_len = min(src_offset + len - cur_src,
4109 unit_size - folio_off);
4110 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4111 const bool use_memmove = areas_overlap(src_offset + cur_off,
4112 dst_offset + cur_off, cur_len);
4113
4114 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4115 use_memmove);
4116 cur_off += cur_len;
4117 }
4118}
4119
4120void memmove_extent_buffer(const struct extent_buffer *dst,
4121 unsigned long dst_offset, unsigned long src_offset,
4122 unsigned long len)
4123{
4124 unsigned long dst_end = dst_offset + len - 1;
4125 unsigned long src_end = src_offset + len - 1;
4126
4127 if (check_eb_range(dst, dst_offset, len) ||
4128 check_eb_range(dst, src_offset, len))
4129 return;
4130
4131 if (dst_offset < src_offset) {
4132 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4133 return;
4134 }
4135
4136 if (dst->addr) {
4137 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4138 return;
4139 }
4140
4141 while (len > 0) {
4142 unsigned long src_i;
4143 size_t cur;
4144 size_t dst_off_in_folio;
4145 size_t src_off_in_folio;
4146 void *src_addr;
4147 bool use_memmove;
4148
4149 src_i = get_eb_folio_index(dst, src_end);
4150
4151 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4152 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4153
4154 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4155 cur = min(cur, dst_off_in_folio + 1);
4156
4157 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4158 cur + 1;
4159 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4160 cur);
4161
4162 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4163 use_memmove);
4164
4165 dst_end -= cur;
4166 src_end -= cur;
4167 len -= cur;
4168 }
4169}
4170
4171#define GANG_LOOKUP_SIZE 16
4172static struct extent_buffer *get_next_extent_buffer(
4173 const struct btrfs_fs_info *fs_info, struct folio *folio, u64 bytenr)
4174{
4175 struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4176 struct extent_buffer *found = NULL;
4177 u64 folio_start = folio_pos(folio);
4178 u64 cur = folio_start;
4179
4180 ASSERT(in_range(bytenr, folio_start, PAGE_SIZE));
4181 lockdep_assert_held(&fs_info->buffer_lock);
4182
4183 while (cur < folio_start + PAGE_SIZE) {
4184 int ret;
4185 int i;
4186
4187 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4188 (void **)gang, cur >> fs_info->sectorsize_bits,
4189 min_t(unsigned int, GANG_LOOKUP_SIZE,
4190 PAGE_SIZE / fs_info->nodesize));
4191 if (ret == 0)
4192 goto out;
4193 for (i = 0; i < ret; i++) {
4194 /* Already beyond page end */
4195 if (gang[i]->start >= folio_start + PAGE_SIZE)
4196 goto out;
4197 /* Found one */
4198 if (gang[i]->start >= bytenr) {
4199 found = gang[i];
4200 goto out;
4201 }
4202 }
4203 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4204 }
4205out:
4206 return found;
4207}
4208
4209static int try_release_subpage_extent_buffer(struct folio *folio)
4210{
4211 struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
4212 u64 cur = folio_pos(folio);
4213 const u64 end = cur + PAGE_SIZE;
4214 int ret;
4215
4216 while (cur < end) {
4217 struct extent_buffer *eb = NULL;
4218
4219 /*
4220 * Unlike try_release_extent_buffer() which uses folio private
4221 * to grab buffer, for subpage case we rely on radix tree, thus
4222 * we need to ensure radix tree consistency.
4223 *
4224 * We also want an atomic snapshot of the radix tree, thus go
4225 * with spinlock rather than RCU.
4226 */
4227 spin_lock(&fs_info->buffer_lock);
4228 eb = get_next_extent_buffer(fs_info, folio, cur);
4229 if (!eb) {
4230 /* No more eb in the page range after or at cur */
4231 spin_unlock(&fs_info->buffer_lock);
4232 break;
4233 }
4234 cur = eb->start + eb->len;
4235
4236 /*
4237 * The same as try_release_extent_buffer(), to ensure the eb
4238 * won't disappear out from under us.
4239 */
4240 spin_lock(&eb->refs_lock);
4241 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4242 spin_unlock(&eb->refs_lock);
4243 spin_unlock(&fs_info->buffer_lock);
4244 break;
4245 }
4246 spin_unlock(&fs_info->buffer_lock);
4247
4248 /*
4249 * If tree ref isn't set then we know the ref on this eb is a
4250 * real ref, so just return, this eb will likely be freed soon
4251 * anyway.
4252 */
4253 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4254 spin_unlock(&eb->refs_lock);
4255 break;
4256 }
4257
4258 /*
4259 * Here we don't care about the return value, we will always
4260 * check the folio private at the end. And
4261 * release_extent_buffer() will release the refs_lock.
4262 */
4263 release_extent_buffer(eb);
4264 }
4265 /*
4266 * Finally to check if we have cleared folio private, as if we have
4267 * released all ebs in the page, the folio private should be cleared now.
4268 */
4269 spin_lock(&folio->mapping->i_private_lock);
4270 if (!folio_test_private(folio))
4271 ret = 1;
4272 else
4273 ret = 0;
4274 spin_unlock(&folio->mapping->i_private_lock);
4275 return ret;
4276
4277}
4278
4279int try_release_extent_buffer(struct folio *folio)
4280{
4281 struct extent_buffer *eb;
4282
4283 if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
4284 return try_release_subpage_extent_buffer(folio);
4285
4286 /*
4287 * We need to make sure nobody is changing folio private, as we rely on
4288 * folio private as the pointer to extent buffer.
4289 */
4290 spin_lock(&folio->mapping->i_private_lock);
4291 if (!folio_test_private(folio)) {
4292 spin_unlock(&folio->mapping->i_private_lock);
4293 return 1;
4294 }
4295
4296 eb = folio_get_private(folio);
4297 BUG_ON(!eb);
4298
4299 /*
4300 * This is a little awful but should be ok, we need to make sure that
4301 * the eb doesn't disappear out from under us while we're looking at
4302 * this page.
4303 */
4304 spin_lock(&eb->refs_lock);
4305 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4306 spin_unlock(&eb->refs_lock);
4307 spin_unlock(&folio->mapping->i_private_lock);
4308 return 0;
4309 }
4310 spin_unlock(&folio->mapping->i_private_lock);
4311
4312 /*
4313 * If tree ref isn't set then we know the ref on this eb is a real ref,
4314 * so just return, this page will likely be freed soon anyway.
4315 */
4316 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4317 spin_unlock(&eb->refs_lock);
4318 return 0;
4319 }
4320
4321 return release_extent_buffer(eb);
4322}
4323
4324/*
4325 * Attempt to readahead a child block.
4326 *
4327 * @fs_info: the fs_info
4328 * @bytenr: bytenr to read
4329 * @owner_root: objectid of the root that owns this eb
4330 * @gen: generation for the uptodate check, can be 0
4331 * @level: level for the eb
4332 *
4333 * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a
4334 * normal uptodate check of the eb, without checking the generation. If we have
4335 * to read the block we will not block on anything.
4336 */
4337void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4338 u64 bytenr, u64 owner_root, u64 gen, int level)
4339{
4340 struct btrfs_tree_parent_check check = {
4341 .level = level,
4342 .transid = gen
4343 };
4344 struct extent_buffer *eb;
4345 int ret;
4346
4347 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4348 if (IS_ERR(eb))
4349 return;
4350
4351 if (btrfs_buffer_uptodate(eb, gen, 1)) {
4352 free_extent_buffer(eb);
4353 return;
4354 }
4355
4356 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4357 if (ret < 0)
4358 free_extent_buffer_stale(eb);
4359 else
4360 free_extent_buffer(eb);
4361}
4362
4363/*
4364 * Readahead a node's child block.
4365 *
4366 * @node: parent node we're reading from
4367 * @slot: slot in the parent node for the child we want to read
4368 *
4369 * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4370 * the slot in the node provided.
4371 */
4372void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4373{
4374 btrfs_readahead_tree_block(node->fs_info,
4375 btrfs_node_blockptr(node, slot),
4376 btrfs_header_owner(node),
4377 btrfs_node_ptr_generation(node, slot),
4378 btrfs_header_level(node) - 1);
4379}
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/pagemap.h>
6#include <linux/page-flags.h>
7#include <linux/spinlock.h>
8#include <linux/blkdev.h>
9#include <linux/swap.h>
10#include <linux/writeback.h>
11#include <linux/pagevec.h>
12#include <linux/prefetch.h>
13#include <linux/cleancache.h>
14#include "extent_io.h"
15#include "extent_map.h"
16#include "ctree.h"
17#include "btrfs_inode.h"
18#include "volumes.h"
19#include "check-integrity.h"
20#include "locking.h"
21#include "rcu-string.h"
22#include "backref.h"
23
24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache;
26static struct bio_set *btrfs_bioset;
27
28#ifdef CONFIG_BTRFS_DEBUG
29static LIST_HEAD(buffers);
30static LIST_HEAD(states);
31
32static DEFINE_SPINLOCK(leak_lock);
33
34static inline
35void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
36{
37 unsigned long flags;
38
39 spin_lock_irqsave(&leak_lock, flags);
40 list_add(new, head);
41 spin_unlock_irqrestore(&leak_lock, flags);
42}
43
44static inline
45void btrfs_leak_debug_del(struct list_head *entry)
46{
47 unsigned long flags;
48
49 spin_lock_irqsave(&leak_lock, flags);
50 list_del(entry);
51 spin_unlock_irqrestore(&leak_lock, flags);
52}
53
54static inline
55void btrfs_leak_debug_check(void)
56{
57 struct extent_state *state;
58 struct extent_buffer *eb;
59
60 while (!list_empty(&states)) {
61 state = list_entry(states.next, struct extent_state, leak_list);
62 printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
63 "state %lu in tree %p refs %d\n",
64 state->start, state->end, state->state, state->tree,
65 atomic_read(&state->refs));
66 list_del(&state->leak_list);
67 kmem_cache_free(extent_state_cache, state);
68 }
69
70 while (!list_empty(&buffers)) {
71 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
72 printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
73 "refs %d\n",
74 eb->start, eb->len, atomic_read(&eb->refs));
75 list_del(&eb->leak_list);
76 kmem_cache_free(extent_buffer_cache, eb);
77 }
78}
79
80#define btrfs_debug_check_extent_io_range(tree, start, end) \
81 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
82static inline void __btrfs_debug_check_extent_io_range(const char *caller,
83 struct extent_io_tree *tree, u64 start, u64 end)
84{
85 struct inode *inode;
86 u64 isize;
87
88 if (!tree->mapping)
89 return;
90
91 inode = tree->mapping->host;
92 isize = i_size_read(inode);
93 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
94 printk_ratelimited(KERN_DEBUG
95 "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
96 caller, btrfs_ino(inode), isize, start, end);
97 }
98}
99#else
100#define btrfs_leak_debug_add(new, head) do {} while (0)
101#define btrfs_leak_debug_del(entry) do {} while (0)
102#define btrfs_leak_debug_check() do {} while (0)
103#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
104#endif
105
106#define BUFFER_LRU_MAX 64
107
108struct tree_entry {
109 u64 start;
110 u64 end;
111 struct rb_node rb_node;
112};
113
114struct extent_page_data {
115 struct bio *bio;
116 struct extent_io_tree *tree;
117 get_extent_t *get_extent;
118 unsigned long bio_flags;
119
120 /* tells writepage not to lock the state bits for this range
121 * it still does the unlocking
122 */
123 unsigned int extent_locked:1;
124
125 /* tells the submit_bio code to use a WRITE_SYNC */
126 unsigned int sync_io:1;
127};
128
129static noinline void flush_write_bio(void *data);
130static inline struct btrfs_fs_info *
131tree_fs_info(struct extent_io_tree *tree)
132{
133 if (!tree->mapping)
134 return NULL;
135 return btrfs_sb(tree->mapping->host->i_sb);
136}
137
138int __init extent_io_init(void)
139{
140 extent_state_cache = kmem_cache_create("btrfs_extent_state",
141 sizeof(struct extent_state), 0,
142 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
143 if (!extent_state_cache)
144 return -ENOMEM;
145
146 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
147 sizeof(struct extent_buffer), 0,
148 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
149 if (!extent_buffer_cache)
150 goto free_state_cache;
151
152 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
153 offsetof(struct btrfs_io_bio, bio));
154 if (!btrfs_bioset)
155 goto free_buffer_cache;
156
157 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
158 goto free_bioset;
159
160 return 0;
161
162free_bioset:
163 bioset_free(btrfs_bioset);
164 btrfs_bioset = NULL;
165
166free_buffer_cache:
167 kmem_cache_destroy(extent_buffer_cache);
168 extent_buffer_cache = NULL;
169
170free_state_cache:
171 kmem_cache_destroy(extent_state_cache);
172 extent_state_cache = NULL;
173 return -ENOMEM;
174}
175
176void extent_io_exit(void)
177{
178 btrfs_leak_debug_check();
179
180 /*
181 * Make sure all delayed rcu free are flushed before we
182 * destroy caches.
183 */
184 rcu_barrier();
185 if (extent_state_cache)
186 kmem_cache_destroy(extent_state_cache);
187 if (extent_buffer_cache)
188 kmem_cache_destroy(extent_buffer_cache);
189 if (btrfs_bioset)
190 bioset_free(btrfs_bioset);
191}
192
193void extent_io_tree_init(struct extent_io_tree *tree,
194 struct address_space *mapping)
195{
196 tree->state = RB_ROOT;
197 tree->ops = NULL;
198 tree->dirty_bytes = 0;
199 spin_lock_init(&tree->lock);
200 tree->mapping = mapping;
201}
202
203static struct extent_state *alloc_extent_state(gfp_t mask)
204{
205 struct extent_state *state;
206
207 state = kmem_cache_alloc(extent_state_cache, mask);
208 if (!state)
209 return state;
210 state->state = 0;
211 state->private = 0;
212 state->tree = NULL;
213 btrfs_leak_debug_add(&state->leak_list, &states);
214 atomic_set(&state->refs, 1);
215 init_waitqueue_head(&state->wq);
216 trace_alloc_extent_state(state, mask, _RET_IP_);
217 return state;
218}
219
220void free_extent_state(struct extent_state *state)
221{
222 if (!state)
223 return;
224 if (atomic_dec_and_test(&state->refs)) {
225 WARN_ON(state->tree);
226 btrfs_leak_debug_del(&state->leak_list);
227 trace_free_extent_state(state, _RET_IP_);
228 kmem_cache_free(extent_state_cache, state);
229 }
230}
231
232static struct rb_node *tree_insert(struct rb_root *root,
233 struct rb_node *search_start,
234 u64 offset,
235 struct rb_node *node,
236 struct rb_node ***p_in,
237 struct rb_node **parent_in)
238{
239 struct rb_node **p;
240 struct rb_node *parent = NULL;
241 struct tree_entry *entry;
242
243 if (p_in && parent_in) {
244 p = *p_in;
245 parent = *parent_in;
246 goto do_insert;
247 }
248
249 p = search_start ? &search_start : &root->rb_node;
250 while (*p) {
251 parent = *p;
252 entry = rb_entry(parent, struct tree_entry, rb_node);
253
254 if (offset < entry->start)
255 p = &(*p)->rb_left;
256 else if (offset > entry->end)
257 p = &(*p)->rb_right;
258 else
259 return parent;
260 }
261
262do_insert:
263 rb_link_node(node, parent, p);
264 rb_insert_color(node, root);
265 return NULL;
266}
267
268static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
269 struct rb_node **prev_ret,
270 struct rb_node **next_ret,
271 struct rb_node ***p_ret,
272 struct rb_node **parent_ret)
273{
274 struct rb_root *root = &tree->state;
275 struct rb_node **n = &root->rb_node;
276 struct rb_node *prev = NULL;
277 struct rb_node *orig_prev = NULL;
278 struct tree_entry *entry;
279 struct tree_entry *prev_entry = NULL;
280
281 while (*n) {
282 prev = *n;
283 entry = rb_entry(prev, struct tree_entry, rb_node);
284 prev_entry = entry;
285
286 if (offset < entry->start)
287 n = &(*n)->rb_left;
288 else if (offset > entry->end)
289 n = &(*n)->rb_right;
290 else
291 return *n;
292 }
293
294 if (p_ret)
295 *p_ret = n;
296 if (parent_ret)
297 *parent_ret = prev;
298
299 if (prev_ret) {
300 orig_prev = prev;
301 while (prev && offset > prev_entry->end) {
302 prev = rb_next(prev);
303 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
304 }
305 *prev_ret = prev;
306 prev = orig_prev;
307 }
308
309 if (next_ret) {
310 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
311 while (prev && offset < prev_entry->start) {
312 prev = rb_prev(prev);
313 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
314 }
315 *next_ret = prev;
316 }
317 return NULL;
318}
319
320static inline struct rb_node *
321tree_search_for_insert(struct extent_io_tree *tree,
322 u64 offset,
323 struct rb_node ***p_ret,
324 struct rb_node **parent_ret)
325{
326 struct rb_node *prev = NULL;
327 struct rb_node *ret;
328
329 ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
330 if (!ret)
331 return prev;
332 return ret;
333}
334
335static inline struct rb_node *tree_search(struct extent_io_tree *tree,
336 u64 offset)
337{
338 return tree_search_for_insert(tree, offset, NULL, NULL);
339}
340
341static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
342 struct extent_state *other)
343{
344 if (tree->ops && tree->ops->merge_extent_hook)
345 tree->ops->merge_extent_hook(tree->mapping->host, new,
346 other);
347}
348
349/*
350 * utility function to look for merge candidates inside a given range.
351 * Any extents with matching state are merged together into a single
352 * extent in the tree. Extents with EXTENT_IO in their state field
353 * are not merged because the end_io handlers need to be able to do
354 * operations on them without sleeping (or doing allocations/splits).
355 *
356 * This should be called with the tree lock held.
357 */
358static void merge_state(struct extent_io_tree *tree,
359 struct extent_state *state)
360{
361 struct extent_state *other;
362 struct rb_node *other_node;
363
364 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
365 return;
366
367 other_node = rb_prev(&state->rb_node);
368 if (other_node) {
369 other = rb_entry(other_node, struct extent_state, rb_node);
370 if (other->end == state->start - 1 &&
371 other->state == state->state) {
372 merge_cb(tree, state, other);
373 state->start = other->start;
374 other->tree = NULL;
375 rb_erase(&other->rb_node, &tree->state);
376 free_extent_state(other);
377 }
378 }
379 other_node = rb_next(&state->rb_node);
380 if (other_node) {
381 other = rb_entry(other_node, struct extent_state, rb_node);
382 if (other->start == state->end + 1 &&
383 other->state == state->state) {
384 merge_cb(tree, state, other);
385 state->end = other->end;
386 other->tree = NULL;
387 rb_erase(&other->rb_node, &tree->state);
388 free_extent_state(other);
389 }
390 }
391}
392
393static void set_state_cb(struct extent_io_tree *tree,
394 struct extent_state *state, unsigned long *bits)
395{
396 if (tree->ops && tree->ops->set_bit_hook)
397 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
398}
399
400static void clear_state_cb(struct extent_io_tree *tree,
401 struct extent_state *state, unsigned long *bits)
402{
403 if (tree->ops && tree->ops->clear_bit_hook)
404 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
405}
406
407static void set_state_bits(struct extent_io_tree *tree,
408 struct extent_state *state, unsigned long *bits);
409
410/*
411 * insert an extent_state struct into the tree. 'bits' are set on the
412 * struct before it is inserted.
413 *
414 * This may return -EEXIST if the extent is already there, in which case the
415 * state struct is freed.
416 *
417 * The tree lock is not taken internally. This is a utility function and
418 * probably isn't what you want to call (see set/clear_extent_bit).
419 */
420static int insert_state(struct extent_io_tree *tree,
421 struct extent_state *state, u64 start, u64 end,
422 struct rb_node ***p,
423 struct rb_node **parent,
424 unsigned long *bits)
425{
426 struct rb_node *node;
427
428 if (end < start)
429 WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
430 end, start);
431 state->start = start;
432 state->end = end;
433
434 set_state_bits(tree, state, bits);
435
436 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
437 if (node) {
438 struct extent_state *found;
439 found = rb_entry(node, struct extent_state, rb_node);
440 printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
441 "%llu %llu\n",
442 found->start, found->end, start, end);
443 return -EEXIST;
444 }
445 state->tree = tree;
446 merge_state(tree, state);
447 return 0;
448}
449
450static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
451 u64 split)
452{
453 if (tree->ops && tree->ops->split_extent_hook)
454 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
455}
456
457/*
458 * split a given extent state struct in two, inserting the preallocated
459 * struct 'prealloc' as the newly created second half. 'split' indicates an
460 * offset inside 'orig' where it should be split.
461 *
462 * Before calling,
463 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
464 * are two extent state structs in the tree:
465 * prealloc: [orig->start, split - 1]
466 * orig: [ split, orig->end ]
467 *
468 * The tree locks are not taken by this function. They need to be held
469 * by the caller.
470 */
471static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
472 struct extent_state *prealloc, u64 split)
473{
474 struct rb_node *node;
475
476 split_cb(tree, orig, split);
477
478 prealloc->start = orig->start;
479 prealloc->end = split - 1;
480 prealloc->state = orig->state;
481 orig->start = split;
482
483 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
484 &prealloc->rb_node, NULL, NULL);
485 if (node) {
486 free_extent_state(prealloc);
487 return -EEXIST;
488 }
489 prealloc->tree = tree;
490 return 0;
491}
492
493static struct extent_state *next_state(struct extent_state *state)
494{
495 struct rb_node *next = rb_next(&state->rb_node);
496 if (next)
497 return rb_entry(next, struct extent_state, rb_node);
498 else
499 return NULL;
500}
501
502/*
503 * utility function to clear some bits in an extent state struct.
504 * it will optionally wake up any one waiting on this state (wake == 1).
505 *
506 * If no bits are set on the state struct after clearing things, the
507 * struct is freed and removed from the tree
508 */
509static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
510 struct extent_state *state,
511 unsigned long *bits, int wake)
512{
513 struct extent_state *next;
514 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
515
516 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
517 u64 range = state->end - state->start + 1;
518 WARN_ON(range > tree->dirty_bytes);
519 tree->dirty_bytes -= range;
520 }
521 clear_state_cb(tree, state, bits);
522 state->state &= ~bits_to_clear;
523 if (wake)
524 wake_up(&state->wq);
525 if (state->state == 0) {
526 next = next_state(state);
527 if (state->tree) {
528 rb_erase(&state->rb_node, &tree->state);
529 state->tree = NULL;
530 free_extent_state(state);
531 } else {
532 WARN_ON(1);
533 }
534 } else {
535 merge_state(tree, state);
536 next = next_state(state);
537 }
538 return next;
539}
540
541static struct extent_state *
542alloc_extent_state_atomic(struct extent_state *prealloc)
543{
544 if (!prealloc)
545 prealloc = alloc_extent_state(GFP_ATOMIC);
546
547 return prealloc;
548}
549
550static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
551{
552 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
553 "Extent tree was modified by another "
554 "thread while locked.");
555}
556
557/*
558 * clear some bits on a range in the tree. This may require splitting
559 * or inserting elements in the tree, so the gfp mask is used to
560 * indicate which allocations or sleeping are allowed.
561 *
562 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
563 * the given range from the tree regardless of state (ie for truncate).
564 *
565 * the range [start, end] is inclusive.
566 *
567 * This takes the tree lock, and returns 0 on success and < 0 on error.
568 */
569int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
570 unsigned long bits, int wake, int delete,
571 struct extent_state **cached_state,
572 gfp_t mask)
573{
574 struct extent_state *state;
575 struct extent_state *cached;
576 struct extent_state *prealloc = NULL;
577 struct rb_node *node;
578 u64 last_end;
579 int err;
580 int clear = 0;
581
582 btrfs_debug_check_extent_io_range(tree, start, end);
583
584 if (bits & EXTENT_DELALLOC)
585 bits |= EXTENT_NORESERVE;
586
587 if (delete)
588 bits |= ~EXTENT_CTLBITS;
589 bits |= EXTENT_FIRST_DELALLOC;
590
591 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
592 clear = 1;
593again:
594 if (!prealloc && (mask & __GFP_WAIT)) {
595 prealloc = alloc_extent_state(mask);
596 if (!prealloc)
597 return -ENOMEM;
598 }
599
600 spin_lock(&tree->lock);
601 if (cached_state) {
602 cached = *cached_state;
603
604 if (clear) {
605 *cached_state = NULL;
606 cached_state = NULL;
607 }
608
609 if (cached && cached->tree && cached->start <= start &&
610 cached->end > start) {
611 if (clear)
612 atomic_dec(&cached->refs);
613 state = cached;
614 goto hit_next;
615 }
616 if (clear)
617 free_extent_state(cached);
618 }
619 /*
620 * this search will find the extents that end after
621 * our range starts
622 */
623 node = tree_search(tree, start);
624 if (!node)
625 goto out;
626 state = rb_entry(node, struct extent_state, rb_node);
627hit_next:
628 if (state->start > end)
629 goto out;
630 WARN_ON(state->end < start);
631 last_end = state->end;
632
633 /* the state doesn't have the wanted bits, go ahead */
634 if (!(state->state & bits)) {
635 state = next_state(state);
636 goto next;
637 }
638
639 /*
640 * | ---- desired range ---- |
641 * | state | or
642 * | ------------- state -------------- |
643 *
644 * We need to split the extent we found, and may flip
645 * bits on second half.
646 *
647 * If the extent we found extends past our range, we
648 * just split and search again. It'll get split again
649 * the next time though.
650 *
651 * If the extent we found is inside our range, we clear
652 * the desired bit on it.
653 */
654
655 if (state->start < start) {
656 prealloc = alloc_extent_state_atomic(prealloc);
657 BUG_ON(!prealloc);
658 err = split_state(tree, state, prealloc, start);
659 if (err)
660 extent_io_tree_panic(tree, err);
661
662 prealloc = NULL;
663 if (err)
664 goto out;
665 if (state->end <= end) {
666 state = clear_state_bit(tree, state, &bits, wake);
667 goto next;
668 }
669 goto search_again;
670 }
671 /*
672 * | ---- desired range ---- |
673 * | state |
674 * We need to split the extent, and clear the bit
675 * on the first half
676 */
677 if (state->start <= end && state->end > end) {
678 prealloc = alloc_extent_state_atomic(prealloc);
679 BUG_ON(!prealloc);
680 err = split_state(tree, state, prealloc, end + 1);
681 if (err)
682 extent_io_tree_panic(tree, err);
683
684 if (wake)
685 wake_up(&state->wq);
686
687 clear_state_bit(tree, prealloc, &bits, wake);
688
689 prealloc = NULL;
690 goto out;
691 }
692
693 state = clear_state_bit(tree, state, &bits, wake);
694next:
695 if (last_end == (u64)-1)
696 goto out;
697 start = last_end + 1;
698 if (start <= end && state && !need_resched())
699 goto hit_next;
700 goto search_again;
701
702out:
703 spin_unlock(&tree->lock);
704 if (prealloc)
705 free_extent_state(prealloc);
706
707 return 0;
708
709search_again:
710 if (start > end)
711 goto out;
712 spin_unlock(&tree->lock);
713 if (mask & __GFP_WAIT)
714 cond_resched();
715 goto again;
716}
717
718static void wait_on_state(struct extent_io_tree *tree,
719 struct extent_state *state)
720 __releases(tree->lock)
721 __acquires(tree->lock)
722{
723 DEFINE_WAIT(wait);
724 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
725 spin_unlock(&tree->lock);
726 schedule();
727 spin_lock(&tree->lock);
728 finish_wait(&state->wq, &wait);
729}
730
731/*
732 * waits for one or more bits to clear on a range in the state tree.
733 * The range [start, end] is inclusive.
734 * The tree lock is taken by this function
735 */
736static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
737 unsigned long bits)
738{
739 struct extent_state *state;
740 struct rb_node *node;
741
742 btrfs_debug_check_extent_io_range(tree, start, end);
743
744 spin_lock(&tree->lock);
745again:
746 while (1) {
747 /*
748 * this search will find all the extents that end after
749 * our range starts
750 */
751 node = tree_search(tree, start);
752process_node:
753 if (!node)
754 break;
755
756 state = rb_entry(node, struct extent_state, rb_node);
757
758 if (state->start > end)
759 goto out;
760
761 if (state->state & bits) {
762 start = state->start;
763 atomic_inc(&state->refs);
764 wait_on_state(tree, state);
765 free_extent_state(state);
766 goto again;
767 }
768 start = state->end + 1;
769
770 if (start > end)
771 break;
772
773 if (!cond_resched_lock(&tree->lock)) {
774 node = rb_next(node);
775 goto process_node;
776 }
777 }
778out:
779 spin_unlock(&tree->lock);
780}
781
782static void set_state_bits(struct extent_io_tree *tree,
783 struct extent_state *state,
784 unsigned long *bits)
785{
786 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
787
788 set_state_cb(tree, state, bits);
789 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
790 u64 range = state->end - state->start + 1;
791 tree->dirty_bytes += range;
792 }
793 state->state |= bits_to_set;
794}
795
796static void cache_state(struct extent_state *state,
797 struct extent_state **cached_ptr)
798{
799 if (cached_ptr && !(*cached_ptr)) {
800 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
801 *cached_ptr = state;
802 atomic_inc(&state->refs);
803 }
804 }
805}
806
807/*
808 * set some bits on a range in the tree. This may require allocations or
809 * sleeping, so the gfp mask is used to indicate what is allowed.
810 *
811 * If any of the exclusive bits are set, this will fail with -EEXIST if some
812 * part of the range already has the desired bits set. The start of the
813 * existing range is returned in failed_start in this case.
814 *
815 * [start, end] is inclusive This takes the tree lock.
816 */
817
818static int __must_check
819__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
820 unsigned long bits, unsigned long exclusive_bits,
821 u64 *failed_start, struct extent_state **cached_state,
822 gfp_t mask)
823{
824 struct extent_state *state;
825 struct extent_state *prealloc = NULL;
826 struct rb_node *node;
827 struct rb_node **p;
828 struct rb_node *parent;
829 int err = 0;
830 u64 last_start;
831 u64 last_end;
832
833 btrfs_debug_check_extent_io_range(tree, start, end);
834
835 bits |= EXTENT_FIRST_DELALLOC;
836again:
837 if (!prealloc && (mask & __GFP_WAIT)) {
838 prealloc = alloc_extent_state(mask);
839 BUG_ON(!prealloc);
840 }
841
842 spin_lock(&tree->lock);
843 if (cached_state && *cached_state) {
844 state = *cached_state;
845 if (state->start <= start && state->end > start &&
846 state->tree) {
847 node = &state->rb_node;
848 goto hit_next;
849 }
850 }
851 /*
852 * this search will find all the extents that end after
853 * our range starts.
854 */
855 node = tree_search_for_insert(tree, start, &p, &parent);
856 if (!node) {
857 prealloc = alloc_extent_state_atomic(prealloc);
858 BUG_ON(!prealloc);
859 err = insert_state(tree, prealloc, start, end,
860 &p, &parent, &bits);
861 if (err)
862 extent_io_tree_panic(tree, err);
863
864 cache_state(prealloc, cached_state);
865 prealloc = NULL;
866 goto out;
867 }
868 state = rb_entry(node, struct extent_state, rb_node);
869hit_next:
870 last_start = state->start;
871 last_end = state->end;
872
873 /*
874 * | ---- desired range ---- |
875 * | state |
876 *
877 * Just lock what we found and keep going
878 */
879 if (state->start == start && state->end <= end) {
880 if (state->state & exclusive_bits) {
881 *failed_start = state->start;
882 err = -EEXIST;
883 goto out;
884 }
885
886 set_state_bits(tree, state, &bits);
887 cache_state(state, cached_state);
888 merge_state(tree, state);
889 if (last_end == (u64)-1)
890 goto out;
891 start = last_end + 1;
892 state = next_state(state);
893 if (start < end && state && state->start == start &&
894 !need_resched())
895 goto hit_next;
896 goto search_again;
897 }
898
899 /*
900 * | ---- desired range ---- |
901 * | state |
902 * or
903 * | ------------- state -------------- |
904 *
905 * We need to split the extent we found, and may flip bits on
906 * second half.
907 *
908 * If the extent we found extends past our
909 * range, we just split and search again. It'll get split
910 * again the next time though.
911 *
912 * If the extent we found is inside our range, we set the
913 * desired bit on it.
914 */
915 if (state->start < start) {
916 if (state->state & exclusive_bits) {
917 *failed_start = start;
918 err = -EEXIST;
919 goto out;
920 }
921
922 prealloc = alloc_extent_state_atomic(prealloc);
923 BUG_ON(!prealloc);
924 err = split_state(tree, state, prealloc, start);
925 if (err)
926 extent_io_tree_panic(tree, err);
927
928 prealloc = NULL;
929 if (err)
930 goto out;
931 if (state->end <= end) {
932 set_state_bits(tree, state, &bits);
933 cache_state(state, cached_state);
934 merge_state(tree, state);
935 if (last_end == (u64)-1)
936 goto out;
937 start = last_end + 1;
938 state = next_state(state);
939 if (start < end && state && state->start == start &&
940 !need_resched())
941 goto hit_next;
942 }
943 goto search_again;
944 }
945 /*
946 * | ---- desired range ---- |
947 * | state | or | state |
948 *
949 * There's a hole, we need to insert something in it and
950 * ignore the extent we found.
951 */
952 if (state->start > start) {
953 u64 this_end;
954 if (end < last_start)
955 this_end = end;
956 else
957 this_end = last_start - 1;
958
959 prealloc = alloc_extent_state_atomic(prealloc);
960 BUG_ON(!prealloc);
961
962 /*
963 * Avoid to free 'prealloc' if it can be merged with
964 * the later extent.
965 */
966 err = insert_state(tree, prealloc, start, this_end,
967 NULL, NULL, &bits);
968 if (err)
969 extent_io_tree_panic(tree, err);
970
971 cache_state(prealloc, cached_state);
972 prealloc = NULL;
973 start = this_end + 1;
974 goto search_again;
975 }
976 /*
977 * | ---- desired range ---- |
978 * | state |
979 * We need to split the extent, and set the bit
980 * on the first half
981 */
982 if (state->start <= end && state->end > end) {
983 if (state->state & exclusive_bits) {
984 *failed_start = start;
985 err = -EEXIST;
986 goto out;
987 }
988
989 prealloc = alloc_extent_state_atomic(prealloc);
990 BUG_ON(!prealloc);
991 err = split_state(tree, state, prealloc, end + 1);
992 if (err)
993 extent_io_tree_panic(tree, err);
994
995 set_state_bits(tree, prealloc, &bits);
996 cache_state(prealloc, cached_state);
997 merge_state(tree, prealloc);
998 prealloc = NULL;
999 goto out;
1000 }
1001
1002 goto search_again;
1003
1004out:
1005 spin_unlock(&tree->lock);
1006 if (prealloc)
1007 free_extent_state(prealloc);
1008
1009 return err;
1010
1011search_again:
1012 if (start > end)
1013 goto out;
1014 spin_unlock(&tree->lock);
1015 if (mask & __GFP_WAIT)
1016 cond_resched();
1017 goto again;
1018}
1019
1020int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1021 unsigned long bits, u64 * failed_start,
1022 struct extent_state **cached_state, gfp_t mask)
1023{
1024 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1025 cached_state, mask);
1026}
1027
1028
1029/**
1030 * convert_extent_bit - convert all bits in a given range from one bit to
1031 * another
1032 * @tree: the io tree to search
1033 * @start: the start offset in bytes
1034 * @end: the end offset in bytes (inclusive)
1035 * @bits: the bits to set in this range
1036 * @clear_bits: the bits to clear in this range
1037 * @cached_state: state that we're going to cache
1038 * @mask: the allocation mask
1039 *
1040 * This will go through and set bits for the given range. If any states exist
1041 * already in this range they are set with the given bit and cleared of the
1042 * clear_bits. This is only meant to be used by things that are mergeable, ie
1043 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1044 * boundary bits like LOCK.
1045 */
1046int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1047 unsigned long bits, unsigned long clear_bits,
1048 struct extent_state **cached_state, gfp_t mask)
1049{
1050 struct extent_state *state;
1051 struct extent_state *prealloc = NULL;
1052 struct rb_node *node;
1053 struct rb_node **p;
1054 struct rb_node *parent;
1055 int err = 0;
1056 u64 last_start;
1057 u64 last_end;
1058
1059 btrfs_debug_check_extent_io_range(tree, start, end);
1060
1061again:
1062 if (!prealloc && (mask & __GFP_WAIT)) {
1063 prealloc = alloc_extent_state(mask);
1064 if (!prealloc)
1065 return -ENOMEM;
1066 }
1067
1068 spin_lock(&tree->lock);
1069 if (cached_state && *cached_state) {
1070 state = *cached_state;
1071 if (state->start <= start && state->end > start &&
1072 state->tree) {
1073 node = &state->rb_node;
1074 goto hit_next;
1075 }
1076 }
1077
1078 /*
1079 * this search will find all the extents that end after
1080 * our range starts.
1081 */
1082 node = tree_search_for_insert(tree, start, &p, &parent);
1083 if (!node) {
1084 prealloc = alloc_extent_state_atomic(prealloc);
1085 if (!prealloc) {
1086 err = -ENOMEM;
1087 goto out;
1088 }
1089 err = insert_state(tree, prealloc, start, end,
1090 &p, &parent, &bits);
1091 if (err)
1092 extent_io_tree_panic(tree, err);
1093 cache_state(prealloc, cached_state);
1094 prealloc = NULL;
1095 goto out;
1096 }
1097 state = rb_entry(node, struct extent_state, rb_node);
1098hit_next:
1099 last_start = state->start;
1100 last_end = state->end;
1101
1102 /*
1103 * | ---- desired range ---- |
1104 * | state |
1105 *
1106 * Just lock what we found and keep going
1107 */
1108 if (state->start == start && state->end <= end) {
1109 set_state_bits(tree, state, &bits);
1110 cache_state(state, cached_state);
1111 state = clear_state_bit(tree, state, &clear_bits, 0);
1112 if (last_end == (u64)-1)
1113 goto out;
1114 start = last_end + 1;
1115 if (start < end && state && state->start == start &&
1116 !need_resched())
1117 goto hit_next;
1118 goto search_again;
1119 }
1120
1121 /*
1122 * | ---- desired range ---- |
1123 * | state |
1124 * or
1125 * | ------------- state -------------- |
1126 *
1127 * We need to split the extent we found, and may flip bits on
1128 * second half.
1129 *
1130 * If the extent we found extends past our
1131 * range, we just split and search again. It'll get split
1132 * again the next time though.
1133 *
1134 * If the extent we found is inside our range, we set the
1135 * desired bit on it.
1136 */
1137 if (state->start < start) {
1138 prealloc = alloc_extent_state_atomic(prealloc);
1139 if (!prealloc) {
1140 err = -ENOMEM;
1141 goto out;
1142 }
1143 err = split_state(tree, state, prealloc, start);
1144 if (err)
1145 extent_io_tree_panic(tree, err);
1146 prealloc = NULL;
1147 if (err)
1148 goto out;
1149 if (state->end <= end) {
1150 set_state_bits(tree, state, &bits);
1151 cache_state(state, cached_state);
1152 state = clear_state_bit(tree, state, &clear_bits, 0);
1153 if (last_end == (u64)-1)
1154 goto out;
1155 start = last_end + 1;
1156 if (start < end && state && state->start == start &&
1157 !need_resched())
1158 goto hit_next;
1159 }
1160 goto search_again;
1161 }
1162 /*
1163 * | ---- desired range ---- |
1164 * | state | or | state |
1165 *
1166 * There's a hole, we need to insert something in it and
1167 * ignore the extent we found.
1168 */
1169 if (state->start > start) {
1170 u64 this_end;
1171 if (end < last_start)
1172 this_end = end;
1173 else
1174 this_end = last_start - 1;
1175
1176 prealloc = alloc_extent_state_atomic(prealloc);
1177 if (!prealloc) {
1178 err = -ENOMEM;
1179 goto out;
1180 }
1181
1182 /*
1183 * Avoid to free 'prealloc' if it can be merged with
1184 * the later extent.
1185 */
1186 err = insert_state(tree, prealloc, start, this_end,
1187 NULL, NULL, &bits);
1188 if (err)
1189 extent_io_tree_panic(tree, err);
1190 cache_state(prealloc, cached_state);
1191 prealloc = NULL;
1192 start = this_end + 1;
1193 goto search_again;
1194 }
1195 /*
1196 * | ---- desired range ---- |
1197 * | state |
1198 * We need to split the extent, and set the bit
1199 * on the first half
1200 */
1201 if (state->start <= end && state->end > end) {
1202 prealloc = alloc_extent_state_atomic(prealloc);
1203 if (!prealloc) {
1204 err = -ENOMEM;
1205 goto out;
1206 }
1207
1208 err = split_state(tree, state, prealloc, end + 1);
1209 if (err)
1210 extent_io_tree_panic(tree, err);
1211
1212 set_state_bits(tree, prealloc, &bits);
1213 cache_state(prealloc, cached_state);
1214 clear_state_bit(tree, prealloc, &clear_bits, 0);
1215 prealloc = NULL;
1216 goto out;
1217 }
1218
1219 goto search_again;
1220
1221out:
1222 spin_unlock(&tree->lock);
1223 if (prealloc)
1224 free_extent_state(prealloc);
1225
1226 return err;
1227
1228search_again:
1229 if (start > end)
1230 goto out;
1231 spin_unlock(&tree->lock);
1232 if (mask & __GFP_WAIT)
1233 cond_resched();
1234 goto again;
1235}
1236
1237/* wrappers around set/clear extent bit */
1238int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1239 gfp_t mask)
1240{
1241 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1242 NULL, mask);
1243}
1244
1245int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1246 unsigned long bits, gfp_t mask)
1247{
1248 return set_extent_bit(tree, start, end, bits, NULL,
1249 NULL, mask);
1250}
1251
1252int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1253 unsigned long bits, gfp_t mask)
1254{
1255 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1256}
1257
1258int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1259 struct extent_state **cached_state, gfp_t mask)
1260{
1261 return set_extent_bit(tree, start, end,
1262 EXTENT_DELALLOC | EXTENT_UPTODATE,
1263 NULL, cached_state, mask);
1264}
1265
1266int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1267 struct extent_state **cached_state, gfp_t mask)
1268{
1269 return set_extent_bit(tree, start, end,
1270 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1271 NULL, cached_state, mask);
1272}
1273
1274int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1275 gfp_t mask)
1276{
1277 return clear_extent_bit(tree, start, end,
1278 EXTENT_DIRTY | EXTENT_DELALLOC |
1279 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1280}
1281
1282int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1283 gfp_t mask)
1284{
1285 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1286 NULL, mask);
1287}
1288
1289int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1290 struct extent_state **cached_state, gfp_t mask)
1291{
1292 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1293 cached_state, mask);
1294}
1295
1296int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1297 struct extent_state **cached_state, gfp_t mask)
1298{
1299 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1300 cached_state, mask);
1301}
1302
1303/*
1304 * either insert or lock state struct between start and end use mask to tell
1305 * us if waiting is desired.
1306 */
1307int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1308 unsigned long bits, struct extent_state **cached_state)
1309{
1310 int err;
1311 u64 failed_start;
1312 while (1) {
1313 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1314 EXTENT_LOCKED, &failed_start,
1315 cached_state, GFP_NOFS);
1316 if (err == -EEXIST) {
1317 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1318 start = failed_start;
1319 } else
1320 break;
1321 WARN_ON(start > end);
1322 }
1323 return err;
1324}
1325
1326int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1327{
1328 return lock_extent_bits(tree, start, end, 0, NULL);
1329}
1330
1331int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1332{
1333 int err;
1334 u64 failed_start;
1335
1336 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1337 &failed_start, NULL, GFP_NOFS);
1338 if (err == -EEXIST) {
1339 if (failed_start > start)
1340 clear_extent_bit(tree, start, failed_start - 1,
1341 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1342 return 0;
1343 }
1344 return 1;
1345}
1346
1347int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1348 struct extent_state **cached, gfp_t mask)
1349{
1350 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1351 mask);
1352}
1353
1354int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1355{
1356 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1357 GFP_NOFS);
1358}
1359
1360int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1361{
1362 unsigned long index = start >> PAGE_CACHE_SHIFT;
1363 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1364 struct page *page;
1365
1366 while (index <= end_index) {
1367 page = find_get_page(inode->i_mapping, index);
1368 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1369 clear_page_dirty_for_io(page);
1370 page_cache_release(page);
1371 index++;
1372 }
1373 return 0;
1374}
1375
1376int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1377{
1378 unsigned long index = start >> PAGE_CACHE_SHIFT;
1379 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1380 struct page *page;
1381
1382 while (index <= end_index) {
1383 page = find_get_page(inode->i_mapping, index);
1384 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1385 account_page_redirty(page);
1386 __set_page_dirty_nobuffers(page);
1387 page_cache_release(page);
1388 index++;
1389 }
1390 return 0;
1391}
1392
1393/*
1394 * helper function to set both pages and extents in the tree writeback
1395 */
1396static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1397{
1398 unsigned long index = start >> PAGE_CACHE_SHIFT;
1399 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1400 struct page *page;
1401
1402 while (index <= end_index) {
1403 page = find_get_page(tree->mapping, index);
1404 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1405 set_page_writeback(page);
1406 page_cache_release(page);
1407 index++;
1408 }
1409 return 0;
1410}
1411
1412/* find the first state struct with 'bits' set after 'start', and
1413 * return it. tree->lock must be held. NULL will returned if
1414 * nothing was found after 'start'
1415 */
1416static struct extent_state *
1417find_first_extent_bit_state(struct extent_io_tree *tree,
1418 u64 start, unsigned long bits)
1419{
1420 struct rb_node *node;
1421 struct extent_state *state;
1422
1423 /*
1424 * this search will find all the extents that end after
1425 * our range starts.
1426 */
1427 node = tree_search(tree, start);
1428 if (!node)
1429 goto out;
1430
1431 while (1) {
1432 state = rb_entry(node, struct extent_state, rb_node);
1433 if (state->end >= start && (state->state & bits))
1434 return state;
1435
1436 node = rb_next(node);
1437 if (!node)
1438 break;
1439 }
1440out:
1441 return NULL;
1442}
1443
1444/*
1445 * find the first offset in the io tree with 'bits' set. zero is
1446 * returned if we find something, and *start_ret and *end_ret are
1447 * set to reflect the state struct that was found.
1448 *
1449 * If nothing was found, 1 is returned. If found something, return 0.
1450 */
1451int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1452 u64 *start_ret, u64 *end_ret, unsigned long bits,
1453 struct extent_state **cached_state)
1454{
1455 struct extent_state *state;
1456 struct rb_node *n;
1457 int ret = 1;
1458
1459 spin_lock(&tree->lock);
1460 if (cached_state && *cached_state) {
1461 state = *cached_state;
1462 if (state->end == start - 1 && state->tree) {
1463 n = rb_next(&state->rb_node);
1464 while (n) {
1465 state = rb_entry(n, struct extent_state,
1466 rb_node);
1467 if (state->state & bits)
1468 goto got_it;
1469 n = rb_next(n);
1470 }
1471 free_extent_state(*cached_state);
1472 *cached_state = NULL;
1473 goto out;
1474 }
1475 free_extent_state(*cached_state);
1476 *cached_state = NULL;
1477 }
1478
1479 state = find_first_extent_bit_state(tree, start, bits);
1480got_it:
1481 if (state) {
1482 cache_state(state, cached_state);
1483 *start_ret = state->start;
1484 *end_ret = state->end;
1485 ret = 0;
1486 }
1487out:
1488 spin_unlock(&tree->lock);
1489 return ret;
1490}
1491
1492/*
1493 * find a contiguous range of bytes in the file marked as delalloc, not
1494 * more than 'max_bytes'. start and end are used to return the range,
1495 *
1496 * 1 is returned if we find something, 0 if nothing was in the tree
1497 */
1498static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1499 u64 *start, u64 *end, u64 max_bytes,
1500 struct extent_state **cached_state)
1501{
1502 struct rb_node *node;
1503 struct extent_state *state;
1504 u64 cur_start = *start;
1505 u64 found = 0;
1506 u64 total_bytes = 0;
1507
1508 spin_lock(&tree->lock);
1509
1510 /*
1511 * this search will find all the extents that end after
1512 * our range starts.
1513 */
1514 node = tree_search(tree, cur_start);
1515 if (!node) {
1516 if (!found)
1517 *end = (u64)-1;
1518 goto out;
1519 }
1520
1521 while (1) {
1522 state = rb_entry(node, struct extent_state, rb_node);
1523 if (found && (state->start != cur_start ||
1524 (state->state & EXTENT_BOUNDARY))) {
1525 goto out;
1526 }
1527 if (!(state->state & EXTENT_DELALLOC)) {
1528 if (!found)
1529 *end = state->end;
1530 goto out;
1531 }
1532 if (!found) {
1533 *start = state->start;
1534 *cached_state = state;
1535 atomic_inc(&state->refs);
1536 }
1537 found++;
1538 *end = state->end;
1539 cur_start = state->end + 1;
1540 node = rb_next(node);
1541 total_bytes += state->end - state->start + 1;
1542 if (total_bytes >= max_bytes)
1543 break;
1544 if (!node)
1545 break;
1546 }
1547out:
1548 spin_unlock(&tree->lock);
1549 return found;
1550}
1551
1552static noinline void __unlock_for_delalloc(struct inode *inode,
1553 struct page *locked_page,
1554 u64 start, u64 end)
1555{
1556 int ret;
1557 struct page *pages[16];
1558 unsigned long index = start >> PAGE_CACHE_SHIFT;
1559 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1560 unsigned long nr_pages = end_index - index + 1;
1561 int i;
1562
1563 if (index == locked_page->index && end_index == index)
1564 return;
1565
1566 while (nr_pages > 0) {
1567 ret = find_get_pages_contig(inode->i_mapping, index,
1568 min_t(unsigned long, nr_pages,
1569 ARRAY_SIZE(pages)), pages);
1570 for (i = 0; i < ret; i++) {
1571 if (pages[i] != locked_page)
1572 unlock_page(pages[i]);
1573 page_cache_release(pages[i]);
1574 }
1575 nr_pages -= ret;
1576 index += ret;
1577 cond_resched();
1578 }
1579}
1580
1581static noinline int lock_delalloc_pages(struct inode *inode,
1582 struct page *locked_page,
1583 u64 delalloc_start,
1584 u64 delalloc_end)
1585{
1586 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1587 unsigned long start_index = index;
1588 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1589 unsigned long pages_locked = 0;
1590 struct page *pages[16];
1591 unsigned long nrpages;
1592 int ret;
1593 int i;
1594
1595 /* the caller is responsible for locking the start index */
1596 if (index == locked_page->index && index == end_index)
1597 return 0;
1598
1599 /* skip the page at the start index */
1600 nrpages = end_index - index + 1;
1601 while (nrpages > 0) {
1602 ret = find_get_pages_contig(inode->i_mapping, index,
1603 min_t(unsigned long,
1604 nrpages, ARRAY_SIZE(pages)), pages);
1605 if (ret == 0) {
1606 ret = -EAGAIN;
1607 goto done;
1608 }
1609 /* now we have an array of pages, lock them all */
1610 for (i = 0; i < ret; i++) {
1611 /*
1612 * the caller is taking responsibility for
1613 * locked_page
1614 */
1615 if (pages[i] != locked_page) {
1616 lock_page(pages[i]);
1617 if (!PageDirty(pages[i]) ||
1618 pages[i]->mapping != inode->i_mapping) {
1619 ret = -EAGAIN;
1620 unlock_page(pages[i]);
1621 page_cache_release(pages[i]);
1622 goto done;
1623 }
1624 }
1625 page_cache_release(pages[i]);
1626 pages_locked++;
1627 }
1628 nrpages -= ret;
1629 index += ret;
1630 cond_resched();
1631 }
1632 ret = 0;
1633done:
1634 if (ret && pages_locked) {
1635 __unlock_for_delalloc(inode, locked_page,
1636 delalloc_start,
1637 ((u64)(start_index + pages_locked - 1)) <<
1638 PAGE_CACHE_SHIFT);
1639 }
1640 return ret;
1641}
1642
1643/*
1644 * find a contiguous range of bytes in the file marked as delalloc, not
1645 * more than 'max_bytes'. start and end are used to return the range,
1646 *
1647 * 1 is returned if we find something, 0 if nothing was in the tree
1648 */
1649STATIC u64 find_lock_delalloc_range(struct inode *inode,
1650 struct extent_io_tree *tree,
1651 struct page *locked_page, u64 *start,
1652 u64 *end, u64 max_bytes)
1653{
1654 u64 delalloc_start;
1655 u64 delalloc_end;
1656 u64 found;
1657 struct extent_state *cached_state = NULL;
1658 int ret;
1659 int loops = 0;
1660
1661again:
1662 /* step one, find a bunch of delalloc bytes starting at start */
1663 delalloc_start = *start;
1664 delalloc_end = 0;
1665 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1666 max_bytes, &cached_state);
1667 if (!found || delalloc_end <= *start) {
1668 *start = delalloc_start;
1669 *end = delalloc_end;
1670 free_extent_state(cached_state);
1671 return 0;
1672 }
1673
1674 /*
1675 * start comes from the offset of locked_page. We have to lock
1676 * pages in order, so we can't process delalloc bytes before
1677 * locked_page
1678 */
1679 if (delalloc_start < *start)
1680 delalloc_start = *start;
1681
1682 /*
1683 * make sure to limit the number of pages we try to lock down
1684 */
1685 if (delalloc_end + 1 - delalloc_start > max_bytes)
1686 delalloc_end = delalloc_start + max_bytes - 1;
1687
1688 /* step two, lock all the pages after the page that has start */
1689 ret = lock_delalloc_pages(inode, locked_page,
1690 delalloc_start, delalloc_end);
1691 if (ret == -EAGAIN) {
1692 /* some of the pages are gone, lets avoid looping by
1693 * shortening the size of the delalloc range we're searching
1694 */
1695 free_extent_state(cached_state);
1696 if (!loops) {
1697 max_bytes = PAGE_CACHE_SIZE;
1698 loops = 1;
1699 goto again;
1700 } else {
1701 found = 0;
1702 goto out_failed;
1703 }
1704 }
1705 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1706
1707 /* step three, lock the state bits for the whole range */
1708 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1709
1710 /* then test to make sure it is all still delalloc */
1711 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1712 EXTENT_DELALLOC, 1, cached_state);
1713 if (!ret) {
1714 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1715 &cached_state, GFP_NOFS);
1716 __unlock_for_delalloc(inode, locked_page,
1717 delalloc_start, delalloc_end);
1718 cond_resched();
1719 goto again;
1720 }
1721 free_extent_state(cached_state);
1722 *start = delalloc_start;
1723 *end = delalloc_end;
1724out_failed:
1725 return found;
1726}
1727
1728int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1729 struct page *locked_page,
1730 unsigned long clear_bits,
1731 unsigned long page_ops)
1732{
1733 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1734 int ret;
1735 struct page *pages[16];
1736 unsigned long index = start >> PAGE_CACHE_SHIFT;
1737 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1738 unsigned long nr_pages = end_index - index + 1;
1739 int i;
1740
1741 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1742 if (page_ops == 0)
1743 return 0;
1744
1745 while (nr_pages > 0) {
1746 ret = find_get_pages_contig(inode->i_mapping, index,
1747 min_t(unsigned long,
1748 nr_pages, ARRAY_SIZE(pages)), pages);
1749 for (i = 0; i < ret; i++) {
1750
1751 if (page_ops & PAGE_SET_PRIVATE2)
1752 SetPagePrivate2(pages[i]);
1753
1754 if (pages[i] == locked_page) {
1755 page_cache_release(pages[i]);
1756 continue;
1757 }
1758 if (page_ops & PAGE_CLEAR_DIRTY)
1759 clear_page_dirty_for_io(pages[i]);
1760 if (page_ops & PAGE_SET_WRITEBACK)
1761 set_page_writeback(pages[i]);
1762 if (page_ops & PAGE_END_WRITEBACK)
1763 end_page_writeback(pages[i]);
1764 if (page_ops & PAGE_UNLOCK)
1765 unlock_page(pages[i]);
1766 page_cache_release(pages[i]);
1767 }
1768 nr_pages -= ret;
1769 index += ret;
1770 cond_resched();
1771 }
1772 return 0;
1773}
1774
1775/*
1776 * count the number of bytes in the tree that have a given bit(s)
1777 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1778 * cached. The total number found is returned.
1779 */
1780u64 count_range_bits(struct extent_io_tree *tree,
1781 u64 *start, u64 search_end, u64 max_bytes,
1782 unsigned long bits, int contig)
1783{
1784 struct rb_node *node;
1785 struct extent_state *state;
1786 u64 cur_start = *start;
1787 u64 total_bytes = 0;
1788 u64 last = 0;
1789 int found = 0;
1790
1791 if (WARN_ON(search_end <= cur_start))
1792 return 0;
1793
1794 spin_lock(&tree->lock);
1795 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1796 total_bytes = tree->dirty_bytes;
1797 goto out;
1798 }
1799 /*
1800 * this search will find all the extents that end after
1801 * our range starts.
1802 */
1803 node = tree_search(tree, cur_start);
1804 if (!node)
1805 goto out;
1806
1807 while (1) {
1808 state = rb_entry(node, struct extent_state, rb_node);
1809 if (state->start > search_end)
1810 break;
1811 if (contig && found && state->start > last + 1)
1812 break;
1813 if (state->end >= cur_start && (state->state & bits) == bits) {
1814 total_bytes += min(search_end, state->end) + 1 -
1815 max(cur_start, state->start);
1816 if (total_bytes >= max_bytes)
1817 break;
1818 if (!found) {
1819 *start = max(cur_start, state->start);
1820 found = 1;
1821 }
1822 last = state->end;
1823 } else if (contig && found) {
1824 break;
1825 }
1826 node = rb_next(node);
1827 if (!node)
1828 break;
1829 }
1830out:
1831 spin_unlock(&tree->lock);
1832 return total_bytes;
1833}
1834
1835/*
1836 * set the private field for a given byte offset in the tree. If there isn't
1837 * an extent_state there already, this does nothing.
1838 */
1839static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1840{
1841 struct rb_node *node;
1842 struct extent_state *state;
1843 int ret = 0;
1844
1845 spin_lock(&tree->lock);
1846 /*
1847 * this search will find all the extents that end after
1848 * our range starts.
1849 */
1850 node = tree_search(tree, start);
1851 if (!node) {
1852 ret = -ENOENT;
1853 goto out;
1854 }
1855 state = rb_entry(node, struct extent_state, rb_node);
1856 if (state->start != start) {
1857 ret = -ENOENT;
1858 goto out;
1859 }
1860 state->private = private;
1861out:
1862 spin_unlock(&tree->lock);
1863 return ret;
1864}
1865
1866int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1867{
1868 struct rb_node *node;
1869 struct extent_state *state;
1870 int ret = 0;
1871
1872 spin_lock(&tree->lock);
1873 /*
1874 * this search will find all the extents that end after
1875 * our range starts.
1876 */
1877 node = tree_search(tree, start);
1878 if (!node) {
1879 ret = -ENOENT;
1880 goto out;
1881 }
1882 state = rb_entry(node, struct extent_state, rb_node);
1883 if (state->start != start) {
1884 ret = -ENOENT;
1885 goto out;
1886 }
1887 *private = state->private;
1888out:
1889 spin_unlock(&tree->lock);
1890 return ret;
1891}
1892
1893/*
1894 * searches a range in the state tree for a given mask.
1895 * If 'filled' == 1, this returns 1 only if every extent in the tree
1896 * has the bits set. Otherwise, 1 is returned if any bit in the
1897 * range is found set.
1898 */
1899int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1900 unsigned long bits, int filled, struct extent_state *cached)
1901{
1902 struct extent_state *state = NULL;
1903 struct rb_node *node;
1904 int bitset = 0;
1905
1906 spin_lock(&tree->lock);
1907 if (cached && cached->tree && cached->start <= start &&
1908 cached->end > start)
1909 node = &cached->rb_node;
1910 else
1911 node = tree_search(tree, start);
1912 while (node && start <= end) {
1913 state = rb_entry(node, struct extent_state, rb_node);
1914
1915 if (filled && state->start > start) {
1916 bitset = 0;
1917 break;
1918 }
1919
1920 if (state->start > end)
1921 break;
1922
1923 if (state->state & bits) {
1924 bitset = 1;
1925 if (!filled)
1926 break;
1927 } else if (filled) {
1928 bitset = 0;
1929 break;
1930 }
1931
1932 if (state->end == (u64)-1)
1933 break;
1934
1935 start = state->end + 1;
1936 if (start > end)
1937 break;
1938 node = rb_next(node);
1939 if (!node) {
1940 if (filled)
1941 bitset = 0;
1942 break;
1943 }
1944 }
1945 spin_unlock(&tree->lock);
1946 return bitset;
1947}
1948
1949/*
1950 * helper function to set a given page up to date if all the
1951 * extents in the tree for that page are up to date
1952 */
1953static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1954{
1955 u64 start = page_offset(page);
1956 u64 end = start + PAGE_CACHE_SIZE - 1;
1957 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1958 SetPageUptodate(page);
1959}
1960
1961/*
1962 * When IO fails, either with EIO or csum verification fails, we
1963 * try other mirrors that might have a good copy of the data. This
1964 * io_failure_record is used to record state as we go through all the
1965 * mirrors. If another mirror has good data, the page is set up to date
1966 * and things continue. If a good mirror can't be found, the original
1967 * bio end_io callback is called to indicate things have failed.
1968 */
1969struct io_failure_record {
1970 struct page *page;
1971 u64 start;
1972 u64 len;
1973 u64 logical;
1974 unsigned long bio_flags;
1975 int this_mirror;
1976 int failed_mirror;
1977 int in_validation;
1978};
1979
1980static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1981 int did_repair)
1982{
1983 int ret;
1984 int err = 0;
1985 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1986
1987 set_state_private(failure_tree, rec->start, 0);
1988 ret = clear_extent_bits(failure_tree, rec->start,
1989 rec->start + rec->len - 1,
1990 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1991 if (ret)
1992 err = ret;
1993
1994 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1995 rec->start + rec->len - 1,
1996 EXTENT_DAMAGED, GFP_NOFS);
1997 if (ret && !err)
1998 err = ret;
1999
2000 kfree(rec);
2001 return err;
2002}
2003
2004/*
2005 * this bypasses the standard btrfs submit functions deliberately, as
2006 * the standard behavior is to write all copies in a raid setup. here we only
2007 * want to write the one bad copy. so we do the mapping for ourselves and issue
2008 * submit_bio directly.
2009 * to avoid any synchronization issues, wait for the data after writing, which
2010 * actually prevents the read that triggered the error from finishing.
2011 * currently, there can be no more than two copies of every data bit. thus,
2012 * exactly one rewrite is required.
2013 */
2014int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2015 u64 length, u64 logical, struct page *page,
2016 int mirror_num)
2017{
2018 struct bio *bio;
2019 struct btrfs_device *dev;
2020 u64 map_length = 0;
2021 u64 sector;
2022 struct btrfs_bio *bbio = NULL;
2023 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
2024 int ret;
2025
2026 ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
2027 BUG_ON(!mirror_num);
2028
2029 /* we can't repair anything in raid56 yet */
2030 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2031 return 0;
2032
2033 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2034 if (!bio)
2035 return -EIO;
2036 bio->bi_iter.bi_size = 0;
2037 map_length = length;
2038
2039 ret = btrfs_map_block(fs_info, WRITE, logical,
2040 &map_length, &bbio, mirror_num);
2041 if (ret) {
2042 bio_put(bio);
2043 return -EIO;
2044 }
2045 BUG_ON(mirror_num != bbio->mirror_num);
2046 sector = bbio->stripes[mirror_num-1].physical >> 9;
2047 bio->bi_iter.bi_sector = sector;
2048 dev = bbio->stripes[mirror_num-1].dev;
2049 kfree(bbio);
2050 if (!dev || !dev->bdev || !dev->writeable) {
2051 bio_put(bio);
2052 return -EIO;
2053 }
2054 bio->bi_bdev = dev->bdev;
2055 bio_add_page(bio, page, length, start - page_offset(page));
2056
2057 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
2058 /* try to remap that extent elsewhere? */
2059 bio_put(bio);
2060 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2061 return -EIO;
2062 }
2063
2064 printk_ratelimited_in_rcu(KERN_INFO
2065 "BTRFS: read error corrected: ino %lu off %llu "
2066 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2067 start, rcu_str_deref(dev->name), sector);
2068
2069 bio_put(bio);
2070 return 0;
2071}
2072
2073int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2074 int mirror_num)
2075{
2076 u64 start = eb->start;
2077 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2078 int ret = 0;
2079
2080 if (root->fs_info->sb->s_flags & MS_RDONLY)
2081 return -EROFS;
2082
2083 for (i = 0; i < num_pages; i++) {
2084 struct page *p = extent_buffer_page(eb, i);
2085 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2086 start, p, mirror_num);
2087 if (ret)
2088 break;
2089 start += PAGE_CACHE_SIZE;
2090 }
2091
2092 return ret;
2093}
2094
2095/*
2096 * each time an IO finishes, we do a fast check in the IO failure tree
2097 * to see if we need to process or clean up an io_failure_record
2098 */
2099static int clean_io_failure(u64 start, struct page *page)
2100{
2101 u64 private;
2102 u64 private_failure;
2103 struct io_failure_record *failrec;
2104 struct inode *inode = page->mapping->host;
2105 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2106 struct extent_state *state;
2107 int num_copies;
2108 int did_repair = 0;
2109 int ret;
2110
2111 private = 0;
2112 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2113 (u64)-1, 1, EXTENT_DIRTY, 0);
2114 if (!ret)
2115 return 0;
2116
2117 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2118 &private_failure);
2119 if (ret)
2120 return 0;
2121
2122 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2123 BUG_ON(!failrec->this_mirror);
2124
2125 if (failrec->in_validation) {
2126 /* there was no real error, just free the record */
2127 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2128 failrec->start);
2129 did_repair = 1;
2130 goto out;
2131 }
2132 if (fs_info->sb->s_flags & MS_RDONLY)
2133 goto out;
2134
2135 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2136 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2137 failrec->start,
2138 EXTENT_LOCKED);
2139 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2140
2141 if (state && state->start <= failrec->start &&
2142 state->end >= failrec->start + failrec->len - 1) {
2143 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2144 failrec->len);
2145 if (num_copies > 1) {
2146 ret = repair_io_failure(fs_info, start, failrec->len,
2147 failrec->logical, page,
2148 failrec->failed_mirror);
2149 did_repair = !ret;
2150 }
2151 ret = 0;
2152 }
2153
2154out:
2155 if (!ret)
2156 ret = free_io_failure(inode, failrec, did_repair);
2157
2158 return ret;
2159}
2160
2161/*
2162 * this is a generic handler for readpage errors (default
2163 * readpage_io_failed_hook). if other copies exist, read those and write back
2164 * good data to the failed position. does not investigate in remapping the
2165 * failed extent elsewhere, hoping the device will be smart enough to do this as
2166 * needed
2167 */
2168
2169static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2170 struct page *page, u64 start, u64 end,
2171 int failed_mirror)
2172{
2173 struct io_failure_record *failrec = NULL;
2174 u64 private;
2175 struct extent_map *em;
2176 struct inode *inode = page->mapping->host;
2177 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2178 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2179 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2180 struct bio *bio;
2181 struct btrfs_io_bio *btrfs_failed_bio;
2182 struct btrfs_io_bio *btrfs_bio;
2183 int num_copies;
2184 int ret;
2185 int read_mode;
2186 u64 logical;
2187
2188 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2189
2190 ret = get_state_private(failure_tree, start, &private);
2191 if (ret) {
2192 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2193 if (!failrec)
2194 return -ENOMEM;
2195 failrec->start = start;
2196 failrec->len = end - start + 1;
2197 failrec->this_mirror = 0;
2198 failrec->bio_flags = 0;
2199 failrec->in_validation = 0;
2200
2201 read_lock(&em_tree->lock);
2202 em = lookup_extent_mapping(em_tree, start, failrec->len);
2203 if (!em) {
2204 read_unlock(&em_tree->lock);
2205 kfree(failrec);
2206 return -EIO;
2207 }
2208
2209 if (em->start > start || em->start + em->len <= start) {
2210 free_extent_map(em);
2211 em = NULL;
2212 }
2213 read_unlock(&em_tree->lock);
2214
2215 if (!em) {
2216 kfree(failrec);
2217 return -EIO;
2218 }
2219 logical = start - em->start;
2220 logical = em->block_start + logical;
2221 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2222 logical = em->block_start;
2223 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2224 extent_set_compress_type(&failrec->bio_flags,
2225 em->compress_type);
2226 }
2227 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2228 "len=%llu\n", logical, start, failrec->len);
2229 failrec->logical = logical;
2230 free_extent_map(em);
2231
2232 /* set the bits in the private failure tree */
2233 ret = set_extent_bits(failure_tree, start, end,
2234 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2235 if (ret >= 0)
2236 ret = set_state_private(failure_tree, start,
2237 (u64)(unsigned long)failrec);
2238 /* set the bits in the inode's tree */
2239 if (ret >= 0)
2240 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2241 GFP_NOFS);
2242 if (ret < 0) {
2243 kfree(failrec);
2244 return ret;
2245 }
2246 } else {
2247 failrec = (struct io_failure_record *)(unsigned long)private;
2248 pr_debug("bio_readpage_error: (found) logical=%llu, "
2249 "start=%llu, len=%llu, validation=%d\n",
2250 failrec->logical, failrec->start, failrec->len,
2251 failrec->in_validation);
2252 /*
2253 * when data can be on disk more than twice, add to failrec here
2254 * (e.g. with a list for failed_mirror) to make
2255 * clean_io_failure() clean all those errors at once.
2256 */
2257 }
2258 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2259 failrec->logical, failrec->len);
2260 if (num_copies == 1) {
2261 /*
2262 * we only have a single copy of the data, so don't bother with
2263 * all the retry and error correction code that follows. no
2264 * matter what the error is, it is very likely to persist.
2265 */
2266 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2267 num_copies, failrec->this_mirror, failed_mirror);
2268 free_io_failure(inode, failrec, 0);
2269 return -EIO;
2270 }
2271
2272 /*
2273 * there are two premises:
2274 * a) deliver good data to the caller
2275 * b) correct the bad sectors on disk
2276 */
2277 if (failed_bio->bi_vcnt > 1) {
2278 /*
2279 * to fulfill b), we need to know the exact failing sectors, as
2280 * we don't want to rewrite any more than the failed ones. thus,
2281 * we need separate read requests for the failed bio
2282 *
2283 * if the following BUG_ON triggers, our validation request got
2284 * merged. we need separate requests for our algorithm to work.
2285 */
2286 BUG_ON(failrec->in_validation);
2287 failrec->in_validation = 1;
2288 failrec->this_mirror = failed_mirror;
2289 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2290 } else {
2291 /*
2292 * we're ready to fulfill a) and b) alongside. get a good copy
2293 * of the failed sector and if we succeed, we have setup
2294 * everything for repair_io_failure to do the rest for us.
2295 */
2296 if (failrec->in_validation) {
2297 BUG_ON(failrec->this_mirror != failed_mirror);
2298 failrec->in_validation = 0;
2299 failrec->this_mirror = 0;
2300 }
2301 failrec->failed_mirror = failed_mirror;
2302 failrec->this_mirror++;
2303 if (failrec->this_mirror == failed_mirror)
2304 failrec->this_mirror++;
2305 read_mode = READ_SYNC;
2306 }
2307
2308 if (failrec->this_mirror > num_copies) {
2309 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2310 num_copies, failrec->this_mirror, failed_mirror);
2311 free_io_failure(inode, failrec, 0);
2312 return -EIO;
2313 }
2314
2315 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2316 if (!bio) {
2317 free_io_failure(inode, failrec, 0);
2318 return -EIO;
2319 }
2320 bio->bi_end_io = failed_bio->bi_end_io;
2321 bio->bi_iter.bi_sector = failrec->logical >> 9;
2322 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2323 bio->bi_iter.bi_size = 0;
2324
2325 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2326 if (btrfs_failed_bio->csum) {
2327 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2328 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2329
2330 btrfs_bio = btrfs_io_bio(bio);
2331 btrfs_bio->csum = btrfs_bio->csum_inline;
2332 phy_offset >>= inode->i_sb->s_blocksize_bits;
2333 phy_offset *= csum_size;
2334 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2335 csum_size);
2336 }
2337
2338 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2339
2340 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2341 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2342 failrec->this_mirror, num_copies, failrec->in_validation);
2343
2344 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2345 failrec->this_mirror,
2346 failrec->bio_flags, 0);
2347 return ret;
2348}
2349
2350/* lots and lots of room for performance fixes in the end_bio funcs */
2351
2352int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2353{
2354 int uptodate = (err == 0);
2355 struct extent_io_tree *tree;
2356 int ret;
2357
2358 tree = &BTRFS_I(page->mapping->host)->io_tree;
2359
2360 if (tree->ops && tree->ops->writepage_end_io_hook) {
2361 ret = tree->ops->writepage_end_io_hook(page, start,
2362 end, NULL, uptodate);
2363 if (ret)
2364 uptodate = 0;
2365 }
2366
2367 if (!uptodate) {
2368 ClearPageUptodate(page);
2369 SetPageError(page);
2370 }
2371 return 0;
2372}
2373
2374/*
2375 * after a writepage IO is done, we need to:
2376 * clear the uptodate bits on error
2377 * clear the writeback bits in the extent tree for this IO
2378 * end_page_writeback if the page has no more pending IO
2379 *
2380 * Scheduling is not allowed, so the extent state tree is expected
2381 * to have one and only one object corresponding to this IO.
2382 */
2383static void end_bio_extent_writepage(struct bio *bio, int err)
2384{
2385 struct bio_vec *bvec;
2386 u64 start;
2387 u64 end;
2388 int i;
2389
2390 bio_for_each_segment_all(bvec, bio, i) {
2391 struct page *page = bvec->bv_page;
2392
2393 /* We always issue full-page reads, but if some block
2394 * in a page fails to read, blk_update_request() will
2395 * advance bv_offset and adjust bv_len to compensate.
2396 * Print a warning for nonzero offsets, and an error
2397 * if they don't add up to a full page. */
2398 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2399 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2400 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2401 "partial page write in btrfs with offset %u and length %u",
2402 bvec->bv_offset, bvec->bv_len);
2403 else
2404 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2405 "incomplete page write in btrfs with offset %u and "
2406 "length %u",
2407 bvec->bv_offset, bvec->bv_len);
2408 }
2409
2410 start = page_offset(page);
2411 end = start + bvec->bv_offset + bvec->bv_len - 1;
2412
2413 if (end_extent_writepage(page, err, start, end))
2414 continue;
2415
2416 end_page_writeback(page);
2417 }
2418
2419 bio_put(bio);
2420}
2421
2422static void
2423endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2424 int uptodate)
2425{
2426 struct extent_state *cached = NULL;
2427 u64 end = start + len - 1;
2428
2429 if (uptodate && tree->track_uptodate)
2430 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2431 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2432}
2433
2434/*
2435 * after a readpage IO is done, we need to:
2436 * clear the uptodate bits on error
2437 * set the uptodate bits if things worked
2438 * set the page up to date if all extents in the tree are uptodate
2439 * clear the lock bit in the extent tree
2440 * unlock the page if there are no other extents locked for it
2441 *
2442 * Scheduling is not allowed, so the extent state tree is expected
2443 * to have one and only one object corresponding to this IO.
2444 */
2445static void end_bio_extent_readpage(struct bio *bio, int err)
2446{
2447 struct bio_vec *bvec;
2448 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2449 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2450 struct extent_io_tree *tree;
2451 u64 offset = 0;
2452 u64 start;
2453 u64 end;
2454 u64 len;
2455 u64 extent_start = 0;
2456 u64 extent_len = 0;
2457 int mirror;
2458 int ret;
2459 int i;
2460
2461 if (err)
2462 uptodate = 0;
2463
2464 bio_for_each_segment_all(bvec, bio, i) {
2465 struct page *page = bvec->bv_page;
2466 struct inode *inode = page->mapping->host;
2467
2468 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2469 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
2470 io_bio->mirror_num);
2471 tree = &BTRFS_I(inode)->io_tree;
2472
2473 /* We always issue full-page reads, but if some block
2474 * in a page fails to read, blk_update_request() will
2475 * advance bv_offset and adjust bv_len to compensate.
2476 * Print a warning for nonzero offsets, and an error
2477 * if they don't add up to a full page. */
2478 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
2479 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
2480 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
2481 "partial page read in btrfs with offset %u and length %u",
2482 bvec->bv_offset, bvec->bv_len);
2483 else
2484 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
2485 "incomplete page read in btrfs with offset %u and "
2486 "length %u",
2487 bvec->bv_offset, bvec->bv_len);
2488 }
2489
2490 start = page_offset(page);
2491 end = start + bvec->bv_offset + bvec->bv_len - 1;
2492 len = bvec->bv_len;
2493
2494 mirror = io_bio->mirror_num;
2495 if (likely(uptodate && tree->ops &&
2496 tree->ops->readpage_end_io_hook)) {
2497 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2498 page, start, end,
2499 mirror);
2500 if (ret)
2501 uptodate = 0;
2502 else
2503 clean_io_failure(start, page);
2504 }
2505
2506 if (likely(uptodate))
2507 goto readpage_ok;
2508
2509 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2510 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2511 if (!ret && !err &&
2512 test_bit(BIO_UPTODATE, &bio->bi_flags))
2513 uptodate = 1;
2514 } else {
2515 /*
2516 * The generic bio_readpage_error handles errors the
2517 * following way: If possible, new read requests are
2518 * created and submitted and will end up in
2519 * end_bio_extent_readpage as well (if we're lucky, not
2520 * in the !uptodate case). In that case it returns 0 and
2521 * we just go on with the next page in our bio. If it
2522 * can't handle the error it will return -EIO and we
2523 * remain responsible for that page.
2524 */
2525 ret = bio_readpage_error(bio, offset, page, start, end,
2526 mirror);
2527 if (ret == 0) {
2528 uptodate =
2529 test_bit(BIO_UPTODATE, &bio->bi_flags);
2530 if (err)
2531 uptodate = 0;
2532 continue;
2533 }
2534 }
2535readpage_ok:
2536 if (likely(uptodate)) {
2537 loff_t i_size = i_size_read(inode);
2538 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2539 unsigned offset;
2540
2541 /* Zero out the end if this page straddles i_size */
2542 offset = i_size & (PAGE_CACHE_SIZE-1);
2543 if (page->index == end_index && offset)
2544 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2545 SetPageUptodate(page);
2546 } else {
2547 ClearPageUptodate(page);
2548 SetPageError(page);
2549 }
2550 unlock_page(page);
2551 offset += len;
2552
2553 if (unlikely(!uptodate)) {
2554 if (extent_len) {
2555 endio_readpage_release_extent(tree,
2556 extent_start,
2557 extent_len, 1);
2558 extent_start = 0;
2559 extent_len = 0;
2560 }
2561 endio_readpage_release_extent(tree, start,
2562 end - start + 1, 0);
2563 } else if (!extent_len) {
2564 extent_start = start;
2565 extent_len = end + 1 - start;
2566 } else if (extent_start + extent_len == start) {
2567 extent_len += end + 1 - start;
2568 } else {
2569 endio_readpage_release_extent(tree, extent_start,
2570 extent_len, uptodate);
2571 extent_start = start;
2572 extent_len = end + 1 - start;
2573 }
2574 }
2575
2576 if (extent_len)
2577 endio_readpage_release_extent(tree, extent_start, extent_len,
2578 uptodate);
2579 if (io_bio->end_io)
2580 io_bio->end_io(io_bio, err);
2581 bio_put(bio);
2582}
2583
2584/*
2585 * this allocates from the btrfs_bioset. We're returning a bio right now
2586 * but you can call btrfs_io_bio for the appropriate container_of magic
2587 */
2588struct bio *
2589btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2590 gfp_t gfp_flags)
2591{
2592 struct btrfs_io_bio *btrfs_bio;
2593 struct bio *bio;
2594
2595 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2596
2597 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2598 while (!bio && (nr_vecs /= 2)) {
2599 bio = bio_alloc_bioset(gfp_flags,
2600 nr_vecs, btrfs_bioset);
2601 }
2602 }
2603
2604 if (bio) {
2605 bio->bi_bdev = bdev;
2606 bio->bi_iter.bi_sector = first_sector;
2607 btrfs_bio = btrfs_io_bio(bio);
2608 btrfs_bio->csum = NULL;
2609 btrfs_bio->csum_allocated = NULL;
2610 btrfs_bio->end_io = NULL;
2611 }
2612 return bio;
2613}
2614
2615struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2616{
2617 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2618}
2619
2620
2621/* this also allocates from the btrfs_bioset */
2622struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2623{
2624 struct btrfs_io_bio *btrfs_bio;
2625 struct bio *bio;
2626
2627 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2628 if (bio) {
2629 btrfs_bio = btrfs_io_bio(bio);
2630 btrfs_bio->csum = NULL;
2631 btrfs_bio->csum_allocated = NULL;
2632 btrfs_bio->end_io = NULL;
2633 }
2634 return bio;
2635}
2636
2637
2638static int __must_check submit_one_bio(int rw, struct bio *bio,
2639 int mirror_num, unsigned long bio_flags)
2640{
2641 int ret = 0;
2642 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2643 struct page *page = bvec->bv_page;
2644 struct extent_io_tree *tree = bio->bi_private;
2645 u64 start;
2646
2647 start = page_offset(page) + bvec->bv_offset;
2648
2649 bio->bi_private = NULL;
2650
2651 bio_get(bio);
2652
2653 if (tree->ops && tree->ops->submit_bio_hook)
2654 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2655 mirror_num, bio_flags, start);
2656 else
2657 btrfsic_submit_bio(rw, bio);
2658
2659 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2660 ret = -EOPNOTSUPP;
2661 bio_put(bio);
2662 return ret;
2663}
2664
2665static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2666 unsigned long offset, size_t size, struct bio *bio,
2667 unsigned long bio_flags)
2668{
2669 int ret = 0;
2670 if (tree->ops && tree->ops->merge_bio_hook)
2671 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2672 bio_flags);
2673 BUG_ON(ret < 0);
2674 return ret;
2675
2676}
2677
2678static int submit_extent_page(int rw, struct extent_io_tree *tree,
2679 struct page *page, sector_t sector,
2680 size_t size, unsigned long offset,
2681 struct block_device *bdev,
2682 struct bio **bio_ret,
2683 unsigned long max_pages,
2684 bio_end_io_t end_io_func,
2685 int mirror_num,
2686 unsigned long prev_bio_flags,
2687 unsigned long bio_flags)
2688{
2689 int ret = 0;
2690 struct bio *bio;
2691 int nr;
2692 int contig = 0;
2693 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2694 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2695 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2696
2697 if (bio_ret && *bio_ret) {
2698 bio = *bio_ret;
2699 if (old_compressed)
2700 contig = bio->bi_iter.bi_sector == sector;
2701 else
2702 contig = bio_end_sector(bio) == sector;
2703
2704 if (prev_bio_flags != bio_flags || !contig ||
2705 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2706 bio_add_page(bio, page, page_size, offset) < page_size) {
2707 ret = submit_one_bio(rw, bio, mirror_num,
2708 prev_bio_flags);
2709 if (ret < 0)
2710 return ret;
2711 bio = NULL;
2712 } else {
2713 return 0;
2714 }
2715 }
2716 if (this_compressed)
2717 nr = BIO_MAX_PAGES;
2718 else
2719 nr = bio_get_nr_vecs(bdev);
2720
2721 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2722 if (!bio)
2723 return -ENOMEM;
2724
2725 bio_add_page(bio, page, page_size, offset);
2726 bio->bi_end_io = end_io_func;
2727 bio->bi_private = tree;
2728
2729 if (bio_ret)
2730 *bio_ret = bio;
2731 else
2732 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2733
2734 return ret;
2735}
2736
2737static void attach_extent_buffer_page(struct extent_buffer *eb,
2738 struct page *page)
2739{
2740 if (!PagePrivate(page)) {
2741 SetPagePrivate(page);
2742 page_cache_get(page);
2743 set_page_private(page, (unsigned long)eb);
2744 } else {
2745 WARN_ON(page->private != (unsigned long)eb);
2746 }
2747}
2748
2749void set_page_extent_mapped(struct page *page)
2750{
2751 if (!PagePrivate(page)) {
2752 SetPagePrivate(page);
2753 page_cache_get(page);
2754 set_page_private(page, EXTENT_PAGE_PRIVATE);
2755 }
2756}
2757
2758static struct extent_map *
2759__get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2760 u64 start, u64 len, get_extent_t *get_extent,
2761 struct extent_map **em_cached)
2762{
2763 struct extent_map *em;
2764
2765 if (em_cached && *em_cached) {
2766 em = *em_cached;
2767 if (extent_map_in_tree(em) && start >= em->start &&
2768 start < extent_map_end(em)) {
2769 atomic_inc(&em->refs);
2770 return em;
2771 }
2772
2773 free_extent_map(em);
2774 *em_cached = NULL;
2775 }
2776
2777 em = get_extent(inode, page, pg_offset, start, len, 0);
2778 if (em_cached && !IS_ERR_OR_NULL(em)) {
2779 BUG_ON(*em_cached);
2780 atomic_inc(&em->refs);
2781 *em_cached = em;
2782 }
2783 return em;
2784}
2785/*
2786 * basic readpage implementation. Locked extent state structs are inserted
2787 * into the tree that are removed when the IO is done (by the end_io
2788 * handlers)
2789 * XXX JDM: This needs looking at to ensure proper page locking
2790 */
2791static int __do_readpage(struct extent_io_tree *tree,
2792 struct page *page,
2793 get_extent_t *get_extent,
2794 struct extent_map **em_cached,
2795 struct bio **bio, int mirror_num,
2796 unsigned long *bio_flags, int rw)
2797{
2798 struct inode *inode = page->mapping->host;
2799 u64 start = page_offset(page);
2800 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2801 u64 end;
2802 u64 cur = start;
2803 u64 extent_offset;
2804 u64 last_byte = i_size_read(inode);
2805 u64 block_start;
2806 u64 cur_end;
2807 sector_t sector;
2808 struct extent_map *em;
2809 struct block_device *bdev;
2810 int ret;
2811 int nr = 0;
2812 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2813 size_t pg_offset = 0;
2814 size_t iosize;
2815 size_t disk_io_size;
2816 size_t blocksize = inode->i_sb->s_blocksize;
2817 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2818
2819 set_page_extent_mapped(page);
2820
2821 end = page_end;
2822 if (!PageUptodate(page)) {
2823 if (cleancache_get_page(page) == 0) {
2824 BUG_ON(blocksize != PAGE_SIZE);
2825 unlock_extent(tree, start, end);
2826 goto out;
2827 }
2828 }
2829
2830 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2831 char *userpage;
2832 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2833
2834 if (zero_offset) {
2835 iosize = PAGE_CACHE_SIZE - zero_offset;
2836 userpage = kmap_atomic(page);
2837 memset(userpage + zero_offset, 0, iosize);
2838 flush_dcache_page(page);
2839 kunmap_atomic(userpage);
2840 }
2841 }
2842 while (cur <= end) {
2843 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2844
2845 if (cur >= last_byte) {
2846 char *userpage;
2847 struct extent_state *cached = NULL;
2848
2849 iosize = PAGE_CACHE_SIZE - pg_offset;
2850 userpage = kmap_atomic(page);
2851 memset(userpage + pg_offset, 0, iosize);
2852 flush_dcache_page(page);
2853 kunmap_atomic(userpage);
2854 set_extent_uptodate(tree, cur, cur + iosize - 1,
2855 &cached, GFP_NOFS);
2856 if (!parent_locked)
2857 unlock_extent_cached(tree, cur,
2858 cur + iosize - 1,
2859 &cached, GFP_NOFS);
2860 break;
2861 }
2862 em = __get_extent_map(inode, page, pg_offset, cur,
2863 end - cur + 1, get_extent, em_cached);
2864 if (IS_ERR_OR_NULL(em)) {
2865 SetPageError(page);
2866 if (!parent_locked)
2867 unlock_extent(tree, cur, end);
2868 break;
2869 }
2870 extent_offset = cur - em->start;
2871 BUG_ON(extent_map_end(em) <= cur);
2872 BUG_ON(end < cur);
2873
2874 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2875 this_bio_flag |= EXTENT_BIO_COMPRESSED;
2876 extent_set_compress_type(&this_bio_flag,
2877 em->compress_type);
2878 }
2879
2880 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2881 cur_end = min(extent_map_end(em) - 1, end);
2882 iosize = ALIGN(iosize, blocksize);
2883 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2884 disk_io_size = em->block_len;
2885 sector = em->block_start >> 9;
2886 } else {
2887 sector = (em->block_start + extent_offset) >> 9;
2888 disk_io_size = iosize;
2889 }
2890 bdev = em->bdev;
2891 block_start = em->block_start;
2892 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2893 block_start = EXTENT_MAP_HOLE;
2894 free_extent_map(em);
2895 em = NULL;
2896
2897 /* we've found a hole, just zero and go on */
2898 if (block_start == EXTENT_MAP_HOLE) {
2899 char *userpage;
2900 struct extent_state *cached = NULL;
2901
2902 userpage = kmap_atomic(page);
2903 memset(userpage + pg_offset, 0, iosize);
2904 flush_dcache_page(page);
2905 kunmap_atomic(userpage);
2906
2907 set_extent_uptodate(tree, cur, cur + iosize - 1,
2908 &cached, GFP_NOFS);
2909 unlock_extent_cached(tree, cur, cur + iosize - 1,
2910 &cached, GFP_NOFS);
2911 cur = cur + iosize;
2912 pg_offset += iosize;
2913 continue;
2914 }
2915 /* the get_extent function already copied into the page */
2916 if (test_range_bit(tree, cur, cur_end,
2917 EXTENT_UPTODATE, 1, NULL)) {
2918 check_page_uptodate(tree, page);
2919 if (!parent_locked)
2920 unlock_extent(tree, cur, cur + iosize - 1);
2921 cur = cur + iosize;
2922 pg_offset += iosize;
2923 continue;
2924 }
2925 /* we have an inline extent but it didn't get marked up
2926 * to date. Error out
2927 */
2928 if (block_start == EXTENT_MAP_INLINE) {
2929 SetPageError(page);
2930 if (!parent_locked)
2931 unlock_extent(tree, cur, cur + iosize - 1);
2932 cur = cur + iosize;
2933 pg_offset += iosize;
2934 continue;
2935 }
2936
2937 pnr -= page->index;
2938 ret = submit_extent_page(rw, tree, page,
2939 sector, disk_io_size, pg_offset,
2940 bdev, bio, pnr,
2941 end_bio_extent_readpage, mirror_num,
2942 *bio_flags,
2943 this_bio_flag);
2944 if (!ret) {
2945 nr++;
2946 *bio_flags = this_bio_flag;
2947 } else {
2948 SetPageError(page);
2949 if (!parent_locked)
2950 unlock_extent(tree, cur, cur + iosize - 1);
2951 }
2952 cur = cur + iosize;
2953 pg_offset += iosize;
2954 }
2955out:
2956 if (!nr) {
2957 if (!PageError(page))
2958 SetPageUptodate(page);
2959 unlock_page(page);
2960 }
2961 return 0;
2962}
2963
2964static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2965 struct page *pages[], int nr_pages,
2966 u64 start, u64 end,
2967 get_extent_t *get_extent,
2968 struct extent_map **em_cached,
2969 struct bio **bio, int mirror_num,
2970 unsigned long *bio_flags, int rw)
2971{
2972 struct inode *inode;
2973 struct btrfs_ordered_extent *ordered;
2974 int index;
2975
2976 inode = pages[0]->mapping->host;
2977 while (1) {
2978 lock_extent(tree, start, end);
2979 ordered = btrfs_lookup_ordered_range(inode, start,
2980 end - start + 1);
2981 if (!ordered)
2982 break;
2983 unlock_extent(tree, start, end);
2984 btrfs_start_ordered_extent(inode, ordered, 1);
2985 btrfs_put_ordered_extent(ordered);
2986 }
2987
2988 for (index = 0; index < nr_pages; index++) {
2989 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
2990 mirror_num, bio_flags, rw);
2991 page_cache_release(pages[index]);
2992 }
2993}
2994
2995static void __extent_readpages(struct extent_io_tree *tree,
2996 struct page *pages[],
2997 int nr_pages, get_extent_t *get_extent,
2998 struct extent_map **em_cached,
2999 struct bio **bio, int mirror_num,
3000 unsigned long *bio_flags, int rw)
3001{
3002 u64 start = 0;
3003 u64 end = 0;
3004 u64 page_start;
3005 int index;
3006 int first_index = 0;
3007
3008 for (index = 0; index < nr_pages; index++) {
3009 page_start = page_offset(pages[index]);
3010 if (!end) {
3011 start = page_start;
3012 end = start + PAGE_CACHE_SIZE - 1;
3013 first_index = index;
3014 } else if (end + 1 == page_start) {
3015 end += PAGE_CACHE_SIZE;
3016 } else {
3017 __do_contiguous_readpages(tree, &pages[first_index],
3018 index - first_index, start,
3019 end, get_extent, em_cached,
3020 bio, mirror_num, bio_flags,
3021 rw);
3022 start = page_start;
3023 end = start + PAGE_CACHE_SIZE - 1;
3024 first_index = index;
3025 }
3026 }
3027
3028 if (end)
3029 __do_contiguous_readpages(tree, &pages[first_index],
3030 index - first_index, start,
3031 end, get_extent, em_cached, bio,
3032 mirror_num, bio_flags, rw);
3033}
3034
3035static int __extent_read_full_page(struct extent_io_tree *tree,
3036 struct page *page,
3037 get_extent_t *get_extent,
3038 struct bio **bio, int mirror_num,
3039 unsigned long *bio_flags, int rw)
3040{
3041 struct inode *inode = page->mapping->host;
3042 struct btrfs_ordered_extent *ordered;
3043 u64 start = page_offset(page);
3044 u64 end = start + PAGE_CACHE_SIZE - 1;
3045 int ret;
3046
3047 while (1) {
3048 lock_extent(tree, start, end);
3049 ordered = btrfs_lookup_ordered_extent(inode, start);
3050 if (!ordered)
3051 break;
3052 unlock_extent(tree, start, end);
3053 btrfs_start_ordered_extent(inode, ordered, 1);
3054 btrfs_put_ordered_extent(ordered);
3055 }
3056
3057 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3058 bio_flags, rw);
3059 return ret;
3060}
3061
3062int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3063 get_extent_t *get_extent, int mirror_num)
3064{
3065 struct bio *bio = NULL;
3066 unsigned long bio_flags = 0;
3067 int ret;
3068
3069 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3070 &bio_flags, READ);
3071 if (bio)
3072 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3073 return ret;
3074}
3075
3076int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3077 get_extent_t *get_extent, int mirror_num)
3078{
3079 struct bio *bio = NULL;
3080 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3081 int ret;
3082
3083 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3084 &bio_flags, READ);
3085 if (bio)
3086 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3087 return ret;
3088}
3089
3090static noinline void update_nr_written(struct page *page,
3091 struct writeback_control *wbc,
3092 unsigned long nr_written)
3093{
3094 wbc->nr_to_write -= nr_written;
3095 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3096 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3097 page->mapping->writeback_index = page->index + nr_written;
3098}
3099
3100/*
3101 * the writepage semantics are similar to regular writepage. extent
3102 * records are inserted to lock ranges in the tree, and as dirty areas
3103 * are found, they are marked writeback. Then the lock bits are removed
3104 * and the end_io handler clears the writeback ranges
3105 */
3106static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3107 void *data)
3108{
3109 struct inode *inode = page->mapping->host;
3110 struct extent_page_data *epd = data;
3111 struct extent_io_tree *tree = epd->tree;
3112 u64 start = page_offset(page);
3113 u64 delalloc_start;
3114 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3115 u64 end;
3116 u64 cur = start;
3117 u64 extent_offset;
3118 u64 last_byte = i_size_read(inode);
3119 u64 block_start;
3120 u64 iosize;
3121 sector_t sector;
3122 struct extent_state *cached_state = NULL;
3123 struct extent_map *em;
3124 struct block_device *bdev;
3125 int ret;
3126 int nr = 0;
3127 size_t pg_offset = 0;
3128 size_t blocksize;
3129 loff_t i_size = i_size_read(inode);
3130 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3131 u64 nr_delalloc;
3132 u64 delalloc_end;
3133 int page_started;
3134 int compressed;
3135 int write_flags;
3136 unsigned long nr_written = 0;
3137 bool fill_delalloc = true;
3138
3139 if (wbc->sync_mode == WB_SYNC_ALL)
3140 write_flags = WRITE_SYNC;
3141 else
3142 write_flags = WRITE;
3143
3144 trace___extent_writepage(page, inode, wbc);
3145
3146 WARN_ON(!PageLocked(page));
3147
3148 ClearPageError(page);
3149
3150 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3151 if (page->index > end_index ||
3152 (page->index == end_index && !pg_offset)) {
3153 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3154 unlock_page(page);
3155 return 0;
3156 }
3157
3158 if (page->index == end_index) {
3159 char *userpage;
3160
3161 userpage = kmap_atomic(page);
3162 memset(userpage + pg_offset, 0,
3163 PAGE_CACHE_SIZE - pg_offset);
3164 kunmap_atomic(userpage);
3165 flush_dcache_page(page);
3166 }
3167 pg_offset = 0;
3168
3169 set_page_extent_mapped(page);
3170
3171 if (!tree->ops || !tree->ops->fill_delalloc)
3172 fill_delalloc = false;
3173
3174 delalloc_start = start;
3175 delalloc_end = 0;
3176 page_started = 0;
3177 if (!epd->extent_locked && fill_delalloc) {
3178 u64 delalloc_to_write = 0;
3179 /*
3180 * make sure the wbc mapping index is at least updated
3181 * to this page.
3182 */
3183 update_nr_written(page, wbc, 0);
3184
3185 while (delalloc_end < page_end) {
3186 nr_delalloc = find_lock_delalloc_range(inode, tree,
3187 page,
3188 &delalloc_start,
3189 &delalloc_end,
3190 128 * 1024 * 1024);
3191 if (nr_delalloc == 0) {
3192 delalloc_start = delalloc_end + 1;
3193 continue;
3194 }
3195 ret = tree->ops->fill_delalloc(inode, page,
3196 delalloc_start,
3197 delalloc_end,
3198 &page_started,
3199 &nr_written);
3200 /* File system has been set read-only */
3201 if (ret) {
3202 SetPageError(page);
3203 goto done;
3204 }
3205 /*
3206 * delalloc_end is already one less than the total
3207 * length, so we don't subtract one from
3208 * PAGE_CACHE_SIZE
3209 */
3210 delalloc_to_write += (delalloc_end - delalloc_start +
3211 PAGE_CACHE_SIZE) >>
3212 PAGE_CACHE_SHIFT;
3213 delalloc_start = delalloc_end + 1;
3214 }
3215 if (wbc->nr_to_write < delalloc_to_write) {
3216 int thresh = 8192;
3217
3218 if (delalloc_to_write < thresh * 2)
3219 thresh = delalloc_to_write;
3220 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3221 thresh);
3222 }
3223
3224 /* did the fill delalloc function already unlock and start
3225 * the IO?
3226 */
3227 if (page_started) {
3228 ret = 0;
3229 /*
3230 * we've unlocked the page, so we can't update
3231 * the mapping's writeback index, just update
3232 * nr_to_write.
3233 */
3234 wbc->nr_to_write -= nr_written;
3235 goto done_unlocked;
3236 }
3237 }
3238 if (tree->ops && tree->ops->writepage_start_hook) {
3239 ret = tree->ops->writepage_start_hook(page, start,
3240 page_end);
3241 if (ret) {
3242 /* Fixup worker will requeue */
3243 if (ret == -EBUSY)
3244 wbc->pages_skipped++;
3245 else
3246 redirty_page_for_writepage(wbc, page);
3247 update_nr_written(page, wbc, nr_written);
3248 unlock_page(page);
3249 ret = 0;
3250 goto done_unlocked;
3251 }
3252 }
3253
3254 /*
3255 * we don't want to touch the inode after unlocking the page,
3256 * so we update the mapping writeback index now
3257 */
3258 update_nr_written(page, wbc, nr_written + 1);
3259
3260 end = page_end;
3261 if (last_byte <= start) {
3262 if (tree->ops && tree->ops->writepage_end_io_hook)
3263 tree->ops->writepage_end_io_hook(page, start,
3264 page_end, NULL, 1);
3265 goto done;
3266 }
3267
3268 blocksize = inode->i_sb->s_blocksize;
3269
3270 while (cur <= end) {
3271 if (cur >= last_byte) {
3272 if (tree->ops && tree->ops->writepage_end_io_hook)
3273 tree->ops->writepage_end_io_hook(page, cur,
3274 page_end, NULL, 1);
3275 break;
3276 }
3277 em = epd->get_extent(inode, page, pg_offset, cur,
3278 end - cur + 1, 1);
3279 if (IS_ERR_OR_NULL(em)) {
3280 SetPageError(page);
3281 break;
3282 }
3283
3284 extent_offset = cur - em->start;
3285 BUG_ON(extent_map_end(em) <= cur);
3286 BUG_ON(end < cur);
3287 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3288 iosize = ALIGN(iosize, blocksize);
3289 sector = (em->block_start + extent_offset) >> 9;
3290 bdev = em->bdev;
3291 block_start = em->block_start;
3292 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3293 free_extent_map(em);
3294 em = NULL;
3295
3296 /*
3297 * compressed and inline extents are written through other
3298 * paths in the FS
3299 */
3300 if (compressed || block_start == EXTENT_MAP_HOLE ||
3301 block_start == EXTENT_MAP_INLINE) {
3302 /*
3303 * end_io notification does not happen here for
3304 * compressed extents
3305 */
3306 if (!compressed && tree->ops &&
3307 tree->ops->writepage_end_io_hook)
3308 tree->ops->writepage_end_io_hook(page, cur,
3309 cur + iosize - 1,
3310 NULL, 1);
3311 else if (compressed) {
3312 /* we don't want to end_page_writeback on
3313 * a compressed extent. this happens
3314 * elsewhere
3315 */
3316 nr++;
3317 }
3318
3319 cur += iosize;
3320 pg_offset += iosize;
3321 continue;
3322 }
3323 /* leave this out until we have a page_mkwrite call */
3324 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3325 EXTENT_DIRTY, 0, NULL)) {
3326 cur = cur + iosize;
3327 pg_offset += iosize;
3328 continue;
3329 }
3330
3331 if (tree->ops && tree->ops->writepage_io_hook) {
3332 ret = tree->ops->writepage_io_hook(page, cur,
3333 cur + iosize - 1);
3334 } else {
3335 ret = 0;
3336 }
3337 if (ret) {
3338 SetPageError(page);
3339 } else {
3340 unsigned long max_nr = end_index + 1;
3341
3342 set_range_writeback(tree, cur, cur + iosize - 1);
3343 if (!PageWriteback(page)) {
3344 btrfs_err(BTRFS_I(inode)->root->fs_info,
3345 "page %lu not writeback, cur %llu end %llu",
3346 page->index, cur, end);
3347 }
3348
3349 ret = submit_extent_page(write_flags, tree, page,
3350 sector, iosize, pg_offset,
3351 bdev, &epd->bio, max_nr,
3352 end_bio_extent_writepage,
3353 0, 0, 0);
3354 if (ret)
3355 SetPageError(page);
3356 }
3357 cur = cur + iosize;
3358 pg_offset += iosize;
3359 nr++;
3360 }
3361done:
3362 if (nr == 0) {
3363 /* make sure the mapping tag for page dirty gets cleared */
3364 set_page_writeback(page);
3365 end_page_writeback(page);
3366 }
3367 unlock_page(page);
3368
3369done_unlocked:
3370
3371 /* drop our reference on any cached states */
3372 free_extent_state(cached_state);
3373 return 0;
3374}
3375
3376static int eb_wait(void *word)
3377{
3378 io_schedule();
3379 return 0;
3380}
3381
3382void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3383{
3384 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3385 TASK_UNINTERRUPTIBLE);
3386}
3387
3388static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3389 struct btrfs_fs_info *fs_info,
3390 struct extent_page_data *epd)
3391{
3392 unsigned long i, num_pages;
3393 int flush = 0;
3394 int ret = 0;
3395
3396 if (!btrfs_try_tree_write_lock(eb)) {
3397 flush = 1;
3398 flush_write_bio(epd);
3399 btrfs_tree_lock(eb);
3400 }
3401
3402 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3403 btrfs_tree_unlock(eb);
3404 if (!epd->sync_io)
3405 return 0;
3406 if (!flush) {
3407 flush_write_bio(epd);
3408 flush = 1;
3409 }
3410 while (1) {
3411 wait_on_extent_buffer_writeback(eb);
3412 btrfs_tree_lock(eb);
3413 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3414 break;
3415 btrfs_tree_unlock(eb);
3416 }
3417 }
3418
3419 /*
3420 * We need to do this to prevent races in people who check if the eb is
3421 * under IO since we can end up having no IO bits set for a short period
3422 * of time.
3423 */
3424 spin_lock(&eb->refs_lock);
3425 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3426 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3427 spin_unlock(&eb->refs_lock);
3428 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3429 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3430 -eb->len,
3431 fs_info->dirty_metadata_batch);
3432 ret = 1;
3433 } else {
3434 spin_unlock(&eb->refs_lock);
3435 }
3436
3437 btrfs_tree_unlock(eb);
3438
3439 if (!ret)
3440 return ret;
3441
3442 num_pages = num_extent_pages(eb->start, eb->len);
3443 for (i = 0; i < num_pages; i++) {
3444 struct page *p = extent_buffer_page(eb, i);
3445
3446 if (!trylock_page(p)) {
3447 if (!flush) {
3448 flush_write_bio(epd);
3449 flush = 1;
3450 }
3451 lock_page(p);
3452 }
3453 }
3454
3455 return ret;
3456}
3457
3458static void end_extent_buffer_writeback(struct extent_buffer *eb)
3459{
3460 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3461 smp_mb__after_clear_bit();
3462 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3463}
3464
3465static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3466{
3467 struct bio_vec *bvec;
3468 struct extent_buffer *eb;
3469 int i, done;
3470
3471 bio_for_each_segment_all(bvec, bio, i) {
3472 struct page *page = bvec->bv_page;
3473
3474 eb = (struct extent_buffer *)page->private;
3475 BUG_ON(!eb);
3476 done = atomic_dec_and_test(&eb->io_pages);
3477
3478 if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3479 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3480 ClearPageUptodate(page);
3481 SetPageError(page);
3482 }
3483
3484 end_page_writeback(page);
3485
3486 if (!done)
3487 continue;
3488
3489 end_extent_buffer_writeback(eb);
3490 }
3491
3492 bio_put(bio);
3493}
3494
3495static int write_one_eb(struct extent_buffer *eb,
3496 struct btrfs_fs_info *fs_info,
3497 struct writeback_control *wbc,
3498 struct extent_page_data *epd)
3499{
3500 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3501 struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
3502 u64 offset = eb->start;
3503 unsigned long i, num_pages;
3504 unsigned long bio_flags = 0;
3505 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3506 int ret = 0;
3507
3508 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3509 num_pages = num_extent_pages(eb->start, eb->len);
3510 atomic_set(&eb->io_pages, num_pages);
3511 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3512 bio_flags = EXTENT_BIO_TREE_LOG;
3513
3514 for (i = 0; i < num_pages; i++) {
3515 struct page *p = extent_buffer_page(eb, i);
3516
3517 clear_page_dirty_for_io(p);
3518 set_page_writeback(p);
3519 ret = submit_extent_page(rw, tree, p, offset >> 9,
3520 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3521 -1, end_bio_extent_buffer_writepage,
3522 0, epd->bio_flags, bio_flags);
3523 epd->bio_flags = bio_flags;
3524 if (ret) {
3525 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3526 SetPageError(p);
3527 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3528 end_extent_buffer_writeback(eb);
3529 ret = -EIO;
3530 break;
3531 }
3532 offset += PAGE_CACHE_SIZE;
3533 update_nr_written(p, wbc, 1);
3534 unlock_page(p);
3535 }
3536
3537 if (unlikely(ret)) {
3538 for (; i < num_pages; i++) {
3539 struct page *p = extent_buffer_page(eb, i);
3540 unlock_page(p);
3541 }
3542 }
3543
3544 return ret;
3545}
3546
3547int btree_write_cache_pages(struct address_space *mapping,
3548 struct writeback_control *wbc)
3549{
3550 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3551 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3552 struct extent_buffer *eb, *prev_eb = NULL;
3553 struct extent_page_data epd = {
3554 .bio = NULL,
3555 .tree = tree,
3556 .extent_locked = 0,
3557 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3558 .bio_flags = 0,
3559 };
3560 int ret = 0;
3561 int done = 0;
3562 int nr_to_write_done = 0;
3563 struct pagevec pvec;
3564 int nr_pages;
3565 pgoff_t index;
3566 pgoff_t end; /* Inclusive */
3567 int scanned = 0;
3568 int tag;
3569
3570 pagevec_init(&pvec, 0);
3571 if (wbc->range_cyclic) {
3572 index = mapping->writeback_index; /* Start from prev offset */
3573 end = -1;
3574 } else {
3575 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3576 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3577 scanned = 1;
3578 }
3579 if (wbc->sync_mode == WB_SYNC_ALL)
3580 tag = PAGECACHE_TAG_TOWRITE;
3581 else
3582 tag = PAGECACHE_TAG_DIRTY;
3583retry:
3584 if (wbc->sync_mode == WB_SYNC_ALL)
3585 tag_pages_for_writeback(mapping, index, end);
3586 while (!done && !nr_to_write_done && (index <= end) &&
3587 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3588 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3589 unsigned i;
3590
3591 scanned = 1;
3592 for (i = 0; i < nr_pages; i++) {
3593 struct page *page = pvec.pages[i];
3594
3595 if (!PagePrivate(page))
3596 continue;
3597
3598 if (!wbc->range_cyclic && page->index > end) {
3599 done = 1;
3600 break;
3601 }
3602
3603 spin_lock(&mapping->private_lock);
3604 if (!PagePrivate(page)) {
3605 spin_unlock(&mapping->private_lock);
3606 continue;
3607 }
3608
3609 eb = (struct extent_buffer *)page->private;
3610
3611 /*
3612 * Shouldn't happen and normally this would be a BUG_ON
3613 * but no sense in crashing the users box for something
3614 * we can survive anyway.
3615 */
3616 if (WARN_ON(!eb)) {
3617 spin_unlock(&mapping->private_lock);
3618 continue;
3619 }
3620
3621 if (eb == prev_eb) {
3622 spin_unlock(&mapping->private_lock);
3623 continue;
3624 }
3625
3626 ret = atomic_inc_not_zero(&eb->refs);
3627 spin_unlock(&mapping->private_lock);
3628 if (!ret)
3629 continue;
3630
3631 prev_eb = eb;
3632 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3633 if (!ret) {
3634 free_extent_buffer(eb);
3635 continue;
3636 }
3637
3638 ret = write_one_eb(eb, fs_info, wbc, &epd);
3639 if (ret) {
3640 done = 1;
3641 free_extent_buffer(eb);
3642 break;
3643 }
3644 free_extent_buffer(eb);
3645
3646 /*
3647 * the filesystem may choose to bump up nr_to_write.
3648 * We have to make sure to honor the new nr_to_write
3649 * at any time
3650 */
3651 nr_to_write_done = wbc->nr_to_write <= 0;
3652 }
3653 pagevec_release(&pvec);
3654 cond_resched();
3655 }
3656 if (!scanned && !done) {
3657 /*
3658 * We hit the last page and there is more work to be done: wrap
3659 * back to the start of the file
3660 */
3661 scanned = 1;
3662 index = 0;
3663 goto retry;
3664 }
3665 flush_write_bio(&epd);
3666 return ret;
3667}
3668
3669/**
3670 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3671 * @mapping: address space structure to write
3672 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3673 * @writepage: function called for each page
3674 * @data: data passed to writepage function
3675 *
3676 * If a page is already under I/O, write_cache_pages() skips it, even
3677 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3678 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3679 * and msync() need to guarantee that all the data which was dirty at the time
3680 * the call was made get new I/O started against them. If wbc->sync_mode is
3681 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3682 * existing IO to complete.
3683 */
3684static int extent_write_cache_pages(struct extent_io_tree *tree,
3685 struct address_space *mapping,
3686 struct writeback_control *wbc,
3687 writepage_t writepage, void *data,
3688 void (*flush_fn)(void *))
3689{
3690 struct inode *inode = mapping->host;
3691 int ret = 0;
3692 int done = 0;
3693 int nr_to_write_done = 0;
3694 struct pagevec pvec;
3695 int nr_pages;
3696 pgoff_t index;
3697 pgoff_t end; /* Inclusive */
3698 int scanned = 0;
3699 int tag;
3700
3701 /*
3702 * We have to hold onto the inode so that ordered extents can do their
3703 * work when the IO finishes. The alternative to this is failing to add
3704 * an ordered extent if the igrab() fails there and that is a huge pain
3705 * to deal with, so instead just hold onto the inode throughout the
3706 * writepages operation. If it fails here we are freeing up the inode
3707 * anyway and we'd rather not waste our time writing out stuff that is
3708 * going to be truncated anyway.
3709 */
3710 if (!igrab(inode))
3711 return 0;
3712
3713 pagevec_init(&pvec, 0);
3714 if (wbc->range_cyclic) {
3715 index = mapping->writeback_index; /* Start from prev offset */
3716 end = -1;
3717 } else {
3718 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3719 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3720 scanned = 1;
3721 }
3722 if (wbc->sync_mode == WB_SYNC_ALL)
3723 tag = PAGECACHE_TAG_TOWRITE;
3724 else
3725 tag = PAGECACHE_TAG_DIRTY;
3726retry:
3727 if (wbc->sync_mode == WB_SYNC_ALL)
3728 tag_pages_for_writeback(mapping, index, end);
3729 while (!done && !nr_to_write_done && (index <= end) &&
3730 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3731 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3732 unsigned i;
3733
3734 scanned = 1;
3735 for (i = 0; i < nr_pages; i++) {
3736 struct page *page = pvec.pages[i];
3737
3738 /*
3739 * At this point we hold neither mapping->tree_lock nor
3740 * lock on the page itself: the page may be truncated or
3741 * invalidated (changing page->mapping to NULL), or even
3742 * swizzled back from swapper_space to tmpfs file
3743 * mapping
3744 */
3745 if (!trylock_page(page)) {
3746 flush_fn(data);
3747 lock_page(page);
3748 }
3749
3750 if (unlikely(page->mapping != mapping)) {
3751 unlock_page(page);
3752 continue;
3753 }
3754
3755 if (!wbc->range_cyclic && page->index > end) {
3756 done = 1;
3757 unlock_page(page);
3758 continue;
3759 }
3760
3761 if (wbc->sync_mode != WB_SYNC_NONE) {
3762 if (PageWriteback(page))
3763 flush_fn(data);
3764 wait_on_page_writeback(page);
3765 }
3766
3767 if (PageWriteback(page) ||
3768 !clear_page_dirty_for_io(page)) {
3769 unlock_page(page);
3770 continue;
3771 }
3772
3773 ret = (*writepage)(page, wbc, data);
3774
3775 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3776 unlock_page(page);
3777 ret = 0;
3778 }
3779 if (ret)
3780 done = 1;
3781
3782 /*
3783 * the filesystem may choose to bump up nr_to_write.
3784 * We have to make sure to honor the new nr_to_write
3785 * at any time
3786 */
3787 nr_to_write_done = wbc->nr_to_write <= 0;
3788 }
3789 pagevec_release(&pvec);
3790 cond_resched();
3791 }
3792 if (!scanned && !done) {
3793 /*
3794 * We hit the last page and there is more work to be done: wrap
3795 * back to the start of the file
3796 */
3797 scanned = 1;
3798 index = 0;
3799 goto retry;
3800 }
3801 btrfs_add_delayed_iput(inode);
3802 return ret;
3803}
3804
3805static void flush_epd_write_bio(struct extent_page_data *epd)
3806{
3807 if (epd->bio) {
3808 int rw = WRITE;
3809 int ret;
3810
3811 if (epd->sync_io)
3812 rw = WRITE_SYNC;
3813
3814 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3815 BUG_ON(ret < 0); /* -ENOMEM */
3816 epd->bio = NULL;
3817 }
3818}
3819
3820static noinline void flush_write_bio(void *data)
3821{
3822 struct extent_page_data *epd = data;
3823 flush_epd_write_bio(epd);
3824}
3825
3826int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3827 get_extent_t *get_extent,
3828 struct writeback_control *wbc)
3829{
3830 int ret;
3831 struct extent_page_data epd = {
3832 .bio = NULL,
3833 .tree = tree,
3834 .get_extent = get_extent,
3835 .extent_locked = 0,
3836 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3837 .bio_flags = 0,
3838 };
3839
3840 ret = __extent_writepage(page, wbc, &epd);
3841
3842 flush_epd_write_bio(&epd);
3843 return ret;
3844}
3845
3846int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3847 u64 start, u64 end, get_extent_t *get_extent,
3848 int mode)
3849{
3850 int ret = 0;
3851 struct address_space *mapping = inode->i_mapping;
3852 struct page *page;
3853 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3854 PAGE_CACHE_SHIFT;
3855
3856 struct extent_page_data epd = {
3857 .bio = NULL,
3858 .tree = tree,
3859 .get_extent = get_extent,
3860 .extent_locked = 1,
3861 .sync_io = mode == WB_SYNC_ALL,
3862 .bio_flags = 0,
3863 };
3864 struct writeback_control wbc_writepages = {
3865 .sync_mode = mode,
3866 .nr_to_write = nr_pages * 2,
3867 .range_start = start,
3868 .range_end = end + 1,
3869 };
3870
3871 while (start <= end) {
3872 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3873 if (clear_page_dirty_for_io(page))
3874 ret = __extent_writepage(page, &wbc_writepages, &epd);
3875 else {
3876 if (tree->ops && tree->ops->writepage_end_io_hook)
3877 tree->ops->writepage_end_io_hook(page, start,
3878 start + PAGE_CACHE_SIZE - 1,
3879 NULL, 1);
3880 unlock_page(page);
3881 }
3882 page_cache_release(page);
3883 start += PAGE_CACHE_SIZE;
3884 }
3885
3886 flush_epd_write_bio(&epd);
3887 return ret;
3888}
3889
3890int extent_writepages(struct extent_io_tree *tree,
3891 struct address_space *mapping,
3892 get_extent_t *get_extent,
3893 struct writeback_control *wbc)
3894{
3895 int ret = 0;
3896 struct extent_page_data epd = {
3897 .bio = NULL,
3898 .tree = tree,
3899 .get_extent = get_extent,
3900 .extent_locked = 0,
3901 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3902 .bio_flags = 0,
3903 };
3904
3905 ret = extent_write_cache_pages(tree, mapping, wbc,
3906 __extent_writepage, &epd,
3907 flush_write_bio);
3908 flush_epd_write_bio(&epd);
3909 return ret;
3910}
3911
3912int extent_readpages(struct extent_io_tree *tree,
3913 struct address_space *mapping,
3914 struct list_head *pages, unsigned nr_pages,
3915 get_extent_t get_extent)
3916{
3917 struct bio *bio = NULL;
3918 unsigned page_idx;
3919 unsigned long bio_flags = 0;
3920 struct page *pagepool[16];
3921 struct page *page;
3922 struct extent_map *em_cached = NULL;
3923 int nr = 0;
3924
3925 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3926 page = list_entry(pages->prev, struct page, lru);
3927
3928 prefetchw(&page->flags);
3929 list_del(&page->lru);
3930 if (add_to_page_cache_lru(page, mapping,
3931 page->index, GFP_NOFS)) {
3932 page_cache_release(page);
3933 continue;
3934 }
3935
3936 pagepool[nr++] = page;
3937 if (nr < ARRAY_SIZE(pagepool))
3938 continue;
3939 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3940 &bio, 0, &bio_flags, READ);
3941 nr = 0;
3942 }
3943 if (nr)
3944 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3945 &bio, 0, &bio_flags, READ);
3946
3947 if (em_cached)
3948 free_extent_map(em_cached);
3949
3950 BUG_ON(!list_empty(pages));
3951 if (bio)
3952 return submit_one_bio(READ, bio, 0, bio_flags);
3953 return 0;
3954}
3955
3956/*
3957 * basic invalidatepage code, this waits on any locked or writeback
3958 * ranges corresponding to the page, and then deletes any extent state
3959 * records from the tree
3960 */
3961int extent_invalidatepage(struct extent_io_tree *tree,
3962 struct page *page, unsigned long offset)
3963{
3964 struct extent_state *cached_state = NULL;
3965 u64 start = page_offset(page);
3966 u64 end = start + PAGE_CACHE_SIZE - 1;
3967 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3968
3969 start += ALIGN(offset, blocksize);
3970 if (start > end)
3971 return 0;
3972
3973 lock_extent_bits(tree, start, end, 0, &cached_state);
3974 wait_on_page_writeback(page);
3975 clear_extent_bit(tree, start, end,
3976 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3977 EXTENT_DO_ACCOUNTING,
3978 1, 1, &cached_state, GFP_NOFS);
3979 return 0;
3980}
3981
3982/*
3983 * a helper for releasepage, this tests for areas of the page that
3984 * are locked or under IO and drops the related state bits if it is safe
3985 * to drop the page.
3986 */
3987static int try_release_extent_state(struct extent_map_tree *map,
3988 struct extent_io_tree *tree,
3989 struct page *page, gfp_t mask)
3990{
3991 u64 start = page_offset(page);
3992 u64 end = start + PAGE_CACHE_SIZE - 1;
3993 int ret = 1;
3994
3995 if (test_range_bit(tree, start, end,
3996 EXTENT_IOBITS, 0, NULL))
3997 ret = 0;
3998 else {
3999 if ((mask & GFP_NOFS) == GFP_NOFS)
4000 mask = GFP_NOFS;
4001 /*
4002 * at this point we can safely clear everything except the
4003 * locked bit and the nodatasum bit
4004 */
4005 ret = clear_extent_bit(tree, start, end,
4006 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4007 0, 0, NULL, mask);
4008
4009 /* if clear_extent_bit failed for enomem reasons,
4010 * we can't allow the release to continue.
4011 */
4012 if (ret < 0)
4013 ret = 0;
4014 else
4015 ret = 1;
4016 }
4017 return ret;
4018}
4019
4020/*
4021 * a helper for releasepage. As long as there are no locked extents
4022 * in the range corresponding to the page, both state records and extent
4023 * map records are removed
4024 */
4025int try_release_extent_mapping(struct extent_map_tree *map,
4026 struct extent_io_tree *tree, struct page *page,
4027 gfp_t mask)
4028{
4029 struct extent_map *em;
4030 u64 start = page_offset(page);
4031 u64 end = start + PAGE_CACHE_SIZE - 1;
4032
4033 if ((mask & __GFP_WAIT) &&
4034 page->mapping->host->i_size > 16 * 1024 * 1024) {
4035 u64 len;
4036 while (start <= end) {
4037 len = end - start + 1;
4038 write_lock(&map->lock);
4039 em = lookup_extent_mapping(map, start, len);
4040 if (!em) {
4041 write_unlock(&map->lock);
4042 break;
4043 }
4044 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4045 em->start != start) {
4046 write_unlock(&map->lock);
4047 free_extent_map(em);
4048 break;
4049 }
4050 if (!test_range_bit(tree, em->start,
4051 extent_map_end(em) - 1,
4052 EXTENT_LOCKED | EXTENT_WRITEBACK,
4053 0, NULL)) {
4054 remove_extent_mapping(map, em);
4055 /* once for the rb tree */
4056 free_extent_map(em);
4057 }
4058 start = extent_map_end(em);
4059 write_unlock(&map->lock);
4060
4061 /* once for us */
4062 free_extent_map(em);
4063 }
4064 }
4065 return try_release_extent_state(map, tree, page, mask);
4066}
4067
4068/*
4069 * helper function for fiemap, which doesn't want to see any holes.
4070 * This maps until we find something past 'last'
4071 */
4072static struct extent_map *get_extent_skip_holes(struct inode *inode,
4073 u64 offset,
4074 u64 last,
4075 get_extent_t *get_extent)
4076{
4077 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4078 struct extent_map *em;
4079 u64 len;
4080
4081 if (offset >= last)
4082 return NULL;
4083
4084 while (1) {
4085 len = last - offset;
4086 if (len == 0)
4087 break;
4088 len = ALIGN(len, sectorsize);
4089 em = get_extent(inode, NULL, 0, offset, len, 0);
4090 if (IS_ERR_OR_NULL(em))
4091 return em;
4092
4093 /* if this isn't a hole return it */
4094 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4095 em->block_start != EXTENT_MAP_HOLE) {
4096 return em;
4097 }
4098
4099 /* this is a hole, advance to the next extent */
4100 offset = extent_map_end(em);
4101 free_extent_map(em);
4102 if (offset >= last)
4103 break;
4104 }
4105 return NULL;
4106}
4107
4108static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
4109{
4110 unsigned long cnt = *((unsigned long *)ctx);
4111
4112 cnt++;
4113 *((unsigned long *)ctx) = cnt;
4114
4115 /* Now we're sure that the extent is shared. */
4116 if (cnt > 1)
4117 return 1;
4118 return 0;
4119}
4120
4121int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4122 __u64 start, __u64 len, get_extent_t *get_extent)
4123{
4124 int ret = 0;
4125 u64 off = start;
4126 u64 max = start + len;
4127 u32 flags = 0;
4128 u32 found_type;
4129 u64 last;
4130 u64 last_for_get_extent = 0;
4131 u64 disko = 0;
4132 u64 isize = i_size_read(inode);
4133 struct btrfs_key found_key;
4134 struct extent_map *em = NULL;
4135 struct extent_state *cached_state = NULL;
4136 struct btrfs_path *path;
4137 int end = 0;
4138 u64 em_start = 0;
4139 u64 em_len = 0;
4140 u64 em_end = 0;
4141
4142 if (len == 0)
4143 return -EINVAL;
4144
4145 path = btrfs_alloc_path();
4146 if (!path)
4147 return -ENOMEM;
4148 path->leave_spinning = 1;
4149
4150 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4151 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4152
4153 /*
4154 * lookup the last file extent. We're not using i_size here
4155 * because there might be preallocation past i_size
4156 */
4157 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
4158 path, btrfs_ino(inode), -1, 0);
4159 if (ret < 0) {
4160 btrfs_free_path(path);
4161 return ret;
4162 }
4163 WARN_ON(!ret);
4164 path->slots[0]--;
4165 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4166 found_type = btrfs_key_type(&found_key);
4167
4168 /* No extents, but there might be delalloc bits */
4169 if (found_key.objectid != btrfs_ino(inode) ||
4170 found_type != BTRFS_EXTENT_DATA_KEY) {
4171 /* have to trust i_size as the end */
4172 last = (u64)-1;
4173 last_for_get_extent = isize;
4174 } else {
4175 /*
4176 * remember the start of the last extent. There are a
4177 * bunch of different factors that go into the length of the
4178 * extent, so its much less complex to remember where it started
4179 */
4180 last = found_key.offset;
4181 last_for_get_extent = last + 1;
4182 }
4183 btrfs_release_path(path);
4184
4185 /*
4186 * we might have some extents allocated but more delalloc past those
4187 * extents. so, we trust isize unless the start of the last extent is
4188 * beyond isize
4189 */
4190 if (last < isize) {
4191 last = (u64)-1;
4192 last_for_get_extent = isize;
4193 }
4194
4195 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4196 &cached_state);
4197
4198 em = get_extent_skip_holes(inode, start, last_for_get_extent,
4199 get_extent);
4200 if (!em)
4201 goto out;
4202 if (IS_ERR(em)) {
4203 ret = PTR_ERR(em);
4204 goto out;
4205 }
4206
4207 while (!end) {
4208 u64 offset_in_extent = 0;
4209
4210 /* break if the extent we found is outside the range */
4211 if (em->start >= max || extent_map_end(em) < off)
4212 break;
4213
4214 /*
4215 * get_extent may return an extent that starts before our
4216 * requested range. We have to make sure the ranges
4217 * we return to fiemap always move forward and don't
4218 * overlap, so adjust the offsets here
4219 */
4220 em_start = max(em->start, off);
4221
4222 /*
4223 * record the offset from the start of the extent
4224 * for adjusting the disk offset below. Only do this if the
4225 * extent isn't compressed since our in ram offset may be past
4226 * what we have actually allocated on disk.
4227 */
4228 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4229 offset_in_extent = em_start - em->start;
4230 em_end = extent_map_end(em);
4231 em_len = em_end - em_start;
4232 disko = 0;
4233 flags = 0;
4234
4235 /*
4236 * bump off for our next call to get_extent
4237 */
4238 off = extent_map_end(em);
4239 if (off >= max)
4240 end = 1;
4241
4242 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4243 end = 1;
4244 flags |= FIEMAP_EXTENT_LAST;
4245 } else if (em->block_start == EXTENT_MAP_INLINE) {
4246 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4247 FIEMAP_EXTENT_NOT_ALIGNED);
4248 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4249 flags |= (FIEMAP_EXTENT_DELALLOC |
4250 FIEMAP_EXTENT_UNKNOWN);
4251 } else {
4252 unsigned long ref_cnt = 0;
4253
4254 disko = em->block_start + offset_in_extent;
4255
4256 /*
4257 * As btrfs supports shared space, this information
4258 * can be exported to userspace tools via
4259 * flag FIEMAP_EXTENT_SHARED.
4260 */
4261 ret = iterate_inodes_from_logical(
4262 em->block_start,
4263 BTRFS_I(inode)->root->fs_info,
4264 path, count_ext_ref, &ref_cnt);
4265 if (ret < 0 && ret != -ENOENT)
4266 goto out_free;
4267
4268 if (ref_cnt > 1)
4269 flags |= FIEMAP_EXTENT_SHARED;
4270 }
4271 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4272 flags |= FIEMAP_EXTENT_ENCODED;
4273
4274 free_extent_map(em);
4275 em = NULL;
4276 if ((em_start >= last) || em_len == (u64)-1 ||
4277 (last == (u64)-1 && isize <= em_end)) {
4278 flags |= FIEMAP_EXTENT_LAST;
4279 end = 1;
4280 }
4281
4282 /* now scan forward to see if this is really the last extent. */
4283 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4284 get_extent);
4285 if (IS_ERR(em)) {
4286 ret = PTR_ERR(em);
4287 goto out;
4288 }
4289 if (!em) {
4290 flags |= FIEMAP_EXTENT_LAST;
4291 end = 1;
4292 }
4293 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4294 em_len, flags);
4295 if (ret)
4296 goto out_free;
4297 }
4298out_free:
4299 free_extent_map(em);
4300out:
4301 btrfs_free_path(path);
4302 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4303 &cached_state, GFP_NOFS);
4304 return ret;
4305}
4306
4307static void __free_extent_buffer(struct extent_buffer *eb)
4308{
4309 btrfs_leak_debug_del(&eb->leak_list);
4310 kmem_cache_free(extent_buffer_cache, eb);
4311}
4312
4313int extent_buffer_under_io(struct extent_buffer *eb)
4314{
4315 return (atomic_read(&eb->io_pages) ||
4316 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4317 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4318}
4319
4320/*
4321 * Helper for releasing extent buffer page.
4322 */
4323static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4324 unsigned long start_idx)
4325{
4326 unsigned long index;
4327 unsigned long num_pages;
4328 struct page *page;
4329 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4330
4331 BUG_ON(extent_buffer_under_io(eb));
4332
4333 num_pages = num_extent_pages(eb->start, eb->len);
4334 index = start_idx + num_pages;
4335 if (start_idx >= index)
4336 return;
4337
4338 do {
4339 index--;
4340 page = extent_buffer_page(eb, index);
4341 if (page && mapped) {
4342 spin_lock(&page->mapping->private_lock);
4343 /*
4344 * We do this since we'll remove the pages after we've
4345 * removed the eb from the radix tree, so we could race
4346 * and have this page now attached to the new eb. So
4347 * only clear page_private if it's still connected to
4348 * this eb.
4349 */
4350 if (PagePrivate(page) &&
4351 page->private == (unsigned long)eb) {
4352 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4353 BUG_ON(PageDirty(page));
4354 BUG_ON(PageWriteback(page));
4355 /*
4356 * We need to make sure we haven't be attached
4357 * to a new eb.
4358 */
4359 ClearPagePrivate(page);
4360 set_page_private(page, 0);
4361 /* One for the page private */
4362 page_cache_release(page);
4363 }
4364 spin_unlock(&page->mapping->private_lock);
4365
4366 }
4367 if (page) {
4368 /* One for when we alloced the page */
4369 page_cache_release(page);
4370 }
4371 } while (index != start_idx);
4372}
4373
4374/*
4375 * Helper for releasing the extent buffer.
4376 */
4377static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4378{
4379 btrfs_release_extent_buffer_page(eb, 0);
4380 __free_extent_buffer(eb);
4381}
4382
4383static struct extent_buffer *
4384__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4385 unsigned long len, gfp_t mask)
4386{
4387 struct extent_buffer *eb = NULL;
4388
4389 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4390 if (eb == NULL)
4391 return NULL;
4392 eb->start = start;
4393 eb->len = len;
4394 eb->fs_info = fs_info;
4395 eb->bflags = 0;
4396 rwlock_init(&eb->lock);
4397 atomic_set(&eb->write_locks, 0);
4398 atomic_set(&eb->read_locks, 0);
4399 atomic_set(&eb->blocking_readers, 0);
4400 atomic_set(&eb->blocking_writers, 0);
4401 atomic_set(&eb->spinning_readers, 0);
4402 atomic_set(&eb->spinning_writers, 0);
4403 eb->lock_nested = 0;
4404 init_waitqueue_head(&eb->write_lock_wq);
4405 init_waitqueue_head(&eb->read_lock_wq);
4406
4407 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4408
4409 spin_lock_init(&eb->refs_lock);
4410 atomic_set(&eb->refs, 1);
4411 atomic_set(&eb->io_pages, 0);
4412
4413 /*
4414 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4415 */
4416 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4417 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4418 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4419
4420 return eb;
4421}
4422
4423struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4424{
4425 unsigned long i;
4426 struct page *p;
4427 struct extent_buffer *new;
4428 unsigned long num_pages = num_extent_pages(src->start, src->len);
4429
4430 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4431 if (new == NULL)
4432 return NULL;
4433
4434 for (i = 0; i < num_pages; i++) {
4435 p = alloc_page(GFP_NOFS);
4436 if (!p) {
4437 btrfs_release_extent_buffer(new);
4438 return NULL;
4439 }
4440 attach_extent_buffer_page(new, p);
4441 WARN_ON(PageDirty(p));
4442 SetPageUptodate(p);
4443 new->pages[i] = p;
4444 }
4445
4446 copy_extent_buffer(new, src, 0, 0, src->len);
4447 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4448 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4449
4450 return new;
4451}
4452
4453struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4454{
4455 struct extent_buffer *eb;
4456 unsigned long num_pages = num_extent_pages(0, len);
4457 unsigned long i;
4458
4459 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4460 if (!eb)
4461 return NULL;
4462
4463 for (i = 0; i < num_pages; i++) {
4464 eb->pages[i] = alloc_page(GFP_NOFS);
4465 if (!eb->pages[i])
4466 goto err;
4467 }
4468 set_extent_buffer_uptodate(eb);
4469 btrfs_set_header_nritems(eb, 0);
4470 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4471
4472 return eb;
4473err:
4474 for (; i > 0; i--)
4475 __free_page(eb->pages[i - 1]);
4476 __free_extent_buffer(eb);
4477 return NULL;
4478}
4479
4480static void check_buffer_tree_ref(struct extent_buffer *eb)
4481{
4482 int refs;
4483 /* the ref bit is tricky. We have to make sure it is set
4484 * if we have the buffer dirty. Otherwise the
4485 * code to free a buffer can end up dropping a dirty
4486 * page
4487 *
4488 * Once the ref bit is set, it won't go away while the
4489 * buffer is dirty or in writeback, and it also won't
4490 * go away while we have the reference count on the
4491 * eb bumped.
4492 *
4493 * We can't just set the ref bit without bumping the
4494 * ref on the eb because free_extent_buffer might
4495 * see the ref bit and try to clear it. If this happens
4496 * free_extent_buffer might end up dropping our original
4497 * ref by mistake and freeing the page before we are able
4498 * to add one more ref.
4499 *
4500 * So bump the ref count first, then set the bit. If someone
4501 * beat us to it, drop the ref we added.
4502 */
4503 refs = atomic_read(&eb->refs);
4504 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4505 return;
4506
4507 spin_lock(&eb->refs_lock);
4508 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4509 atomic_inc(&eb->refs);
4510 spin_unlock(&eb->refs_lock);
4511}
4512
4513static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4514{
4515 unsigned long num_pages, i;
4516
4517 check_buffer_tree_ref(eb);
4518
4519 num_pages = num_extent_pages(eb->start, eb->len);
4520 for (i = 0; i < num_pages; i++) {
4521 struct page *p = extent_buffer_page(eb, i);
4522 mark_page_accessed(p);
4523 }
4524}
4525
4526struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4527 u64 start)
4528{
4529 struct extent_buffer *eb;
4530
4531 rcu_read_lock();
4532 eb = radix_tree_lookup(&fs_info->buffer_radix,
4533 start >> PAGE_CACHE_SHIFT);
4534 if (eb && atomic_inc_not_zero(&eb->refs)) {
4535 rcu_read_unlock();
4536 mark_extent_buffer_accessed(eb);
4537 return eb;
4538 }
4539 rcu_read_unlock();
4540
4541 return NULL;
4542}
4543
4544struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4545 u64 start, unsigned long len)
4546{
4547 unsigned long num_pages = num_extent_pages(start, len);
4548 unsigned long i;
4549 unsigned long index = start >> PAGE_CACHE_SHIFT;
4550 struct extent_buffer *eb;
4551 struct extent_buffer *exists = NULL;
4552 struct page *p;
4553 struct address_space *mapping = fs_info->btree_inode->i_mapping;
4554 int uptodate = 1;
4555 int ret;
4556
4557 eb = find_extent_buffer(fs_info, start);
4558 if (eb)
4559 return eb;
4560
4561 eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
4562 if (!eb)
4563 return NULL;
4564
4565 for (i = 0; i < num_pages; i++, index++) {
4566 p = find_or_create_page(mapping, index, GFP_NOFS);
4567 if (!p)
4568 goto free_eb;
4569
4570 spin_lock(&mapping->private_lock);
4571 if (PagePrivate(p)) {
4572 /*
4573 * We could have already allocated an eb for this page
4574 * and attached one so lets see if we can get a ref on
4575 * the existing eb, and if we can we know it's good and
4576 * we can just return that one, else we know we can just
4577 * overwrite page->private.
4578 */
4579 exists = (struct extent_buffer *)p->private;
4580 if (atomic_inc_not_zero(&exists->refs)) {
4581 spin_unlock(&mapping->private_lock);
4582 unlock_page(p);
4583 page_cache_release(p);
4584 mark_extent_buffer_accessed(exists);
4585 goto free_eb;
4586 }
4587
4588 /*
4589 * Do this so attach doesn't complain and we need to
4590 * drop the ref the old guy had.
4591 */
4592 ClearPagePrivate(p);
4593 WARN_ON(PageDirty(p));
4594 page_cache_release(p);
4595 }
4596 attach_extent_buffer_page(eb, p);
4597 spin_unlock(&mapping->private_lock);
4598 WARN_ON(PageDirty(p));
4599 mark_page_accessed(p);
4600 eb->pages[i] = p;
4601 if (!PageUptodate(p))
4602 uptodate = 0;
4603
4604 /*
4605 * see below about how we avoid a nasty race with release page
4606 * and why we unlock later
4607 */
4608 }
4609 if (uptodate)
4610 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4611again:
4612 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4613 if (ret)
4614 goto free_eb;
4615
4616 spin_lock(&fs_info->buffer_lock);
4617 ret = radix_tree_insert(&fs_info->buffer_radix,
4618 start >> PAGE_CACHE_SHIFT, eb);
4619 spin_unlock(&fs_info->buffer_lock);
4620 radix_tree_preload_end();
4621 if (ret == -EEXIST) {
4622 exists = find_extent_buffer(fs_info, start);
4623 if (exists)
4624 goto free_eb;
4625 else
4626 goto again;
4627 }
4628 /* add one reference for the tree */
4629 check_buffer_tree_ref(eb);
4630 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
4631
4632 /*
4633 * there is a race where release page may have
4634 * tried to find this extent buffer in the radix
4635 * but failed. It will tell the VM it is safe to
4636 * reclaim the, and it will clear the page private bit.
4637 * We must make sure to set the page private bit properly
4638 * after the extent buffer is in the radix tree so
4639 * it doesn't get lost
4640 */
4641 SetPageChecked(eb->pages[0]);
4642 for (i = 1; i < num_pages; i++) {
4643 p = extent_buffer_page(eb, i);
4644 ClearPageChecked(p);
4645 unlock_page(p);
4646 }
4647 unlock_page(eb->pages[0]);
4648 return eb;
4649
4650free_eb:
4651 for (i = 0; i < num_pages; i++) {
4652 if (eb->pages[i])
4653 unlock_page(eb->pages[i]);
4654 }
4655
4656 WARN_ON(!atomic_dec_and_test(&eb->refs));
4657 btrfs_release_extent_buffer(eb);
4658 return exists;
4659}
4660
4661static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4662{
4663 struct extent_buffer *eb =
4664 container_of(head, struct extent_buffer, rcu_head);
4665
4666 __free_extent_buffer(eb);
4667}
4668
4669/* Expects to have eb->eb_lock already held */
4670static int release_extent_buffer(struct extent_buffer *eb)
4671{
4672 WARN_ON(atomic_read(&eb->refs) == 0);
4673 if (atomic_dec_and_test(&eb->refs)) {
4674 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
4675 struct btrfs_fs_info *fs_info = eb->fs_info;
4676
4677 spin_unlock(&eb->refs_lock);
4678
4679 spin_lock(&fs_info->buffer_lock);
4680 radix_tree_delete(&fs_info->buffer_radix,
4681 eb->start >> PAGE_CACHE_SHIFT);
4682 spin_unlock(&fs_info->buffer_lock);
4683 } else {
4684 spin_unlock(&eb->refs_lock);
4685 }
4686
4687 /* Should be safe to release our pages at this point */
4688 btrfs_release_extent_buffer_page(eb, 0);
4689 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4690 return 1;
4691 }
4692 spin_unlock(&eb->refs_lock);
4693
4694 return 0;
4695}
4696
4697void free_extent_buffer(struct extent_buffer *eb)
4698{
4699 int refs;
4700 int old;
4701 if (!eb)
4702 return;
4703
4704 while (1) {
4705 refs = atomic_read(&eb->refs);
4706 if (refs <= 3)
4707 break;
4708 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4709 if (old == refs)
4710 return;
4711 }
4712
4713 spin_lock(&eb->refs_lock);
4714 if (atomic_read(&eb->refs) == 2 &&
4715 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4716 atomic_dec(&eb->refs);
4717
4718 if (atomic_read(&eb->refs) == 2 &&
4719 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4720 !extent_buffer_under_io(eb) &&
4721 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4722 atomic_dec(&eb->refs);
4723
4724 /*
4725 * I know this is terrible, but it's temporary until we stop tracking
4726 * the uptodate bits and such for the extent buffers.
4727 */
4728 release_extent_buffer(eb);
4729}
4730
4731void free_extent_buffer_stale(struct extent_buffer *eb)
4732{
4733 if (!eb)
4734 return;
4735
4736 spin_lock(&eb->refs_lock);
4737 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4738
4739 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4740 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4741 atomic_dec(&eb->refs);
4742 release_extent_buffer(eb);
4743}
4744
4745void clear_extent_buffer_dirty(struct extent_buffer *eb)
4746{
4747 unsigned long i;
4748 unsigned long num_pages;
4749 struct page *page;
4750
4751 num_pages = num_extent_pages(eb->start, eb->len);
4752
4753 for (i = 0; i < num_pages; i++) {
4754 page = extent_buffer_page(eb, i);
4755 if (!PageDirty(page))
4756 continue;
4757
4758 lock_page(page);
4759 WARN_ON(!PagePrivate(page));
4760
4761 clear_page_dirty_for_io(page);
4762 spin_lock_irq(&page->mapping->tree_lock);
4763 if (!PageDirty(page)) {
4764 radix_tree_tag_clear(&page->mapping->page_tree,
4765 page_index(page),
4766 PAGECACHE_TAG_DIRTY);
4767 }
4768 spin_unlock_irq(&page->mapping->tree_lock);
4769 ClearPageError(page);
4770 unlock_page(page);
4771 }
4772 WARN_ON(atomic_read(&eb->refs) == 0);
4773}
4774
4775int set_extent_buffer_dirty(struct extent_buffer *eb)
4776{
4777 unsigned long i;
4778 unsigned long num_pages;
4779 int was_dirty = 0;
4780
4781 check_buffer_tree_ref(eb);
4782
4783 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4784
4785 num_pages = num_extent_pages(eb->start, eb->len);
4786 WARN_ON(atomic_read(&eb->refs) == 0);
4787 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4788
4789 for (i = 0; i < num_pages; i++)
4790 set_page_dirty(extent_buffer_page(eb, i));
4791 return was_dirty;
4792}
4793
4794int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4795{
4796 unsigned long i;
4797 struct page *page;
4798 unsigned long num_pages;
4799
4800 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4801 num_pages = num_extent_pages(eb->start, eb->len);
4802 for (i = 0; i < num_pages; i++) {
4803 page = extent_buffer_page(eb, i);
4804 if (page)
4805 ClearPageUptodate(page);
4806 }
4807 return 0;
4808}
4809
4810int set_extent_buffer_uptodate(struct extent_buffer *eb)
4811{
4812 unsigned long i;
4813 struct page *page;
4814 unsigned long num_pages;
4815
4816 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4817 num_pages = num_extent_pages(eb->start, eb->len);
4818 for (i = 0; i < num_pages; i++) {
4819 page = extent_buffer_page(eb, i);
4820 SetPageUptodate(page);
4821 }
4822 return 0;
4823}
4824
4825int extent_buffer_uptodate(struct extent_buffer *eb)
4826{
4827 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4828}
4829
4830int read_extent_buffer_pages(struct extent_io_tree *tree,
4831 struct extent_buffer *eb, u64 start, int wait,
4832 get_extent_t *get_extent, int mirror_num)
4833{
4834 unsigned long i;
4835 unsigned long start_i;
4836 struct page *page;
4837 int err;
4838 int ret = 0;
4839 int locked_pages = 0;
4840 int all_uptodate = 1;
4841 unsigned long num_pages;
4842 unsigned long num_reads = 0;
4843 struct bio *bio = NULL;
4844 unsigned long bio_flags = 0;
4845
4846 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4847 return 0;
4848
4849 if (start) {
4850 WARN_ON(start < eb->start);
4851 start_i = (start >> PAGE_CACHE_SHIFT) -
4852 (eb->start >> PAGE_CACHE_SHIFT);
4853 } else {
4854 start_i = 0;
4855 }
4856
4857 num_pages = num_extent_pages(eb->start, eb->len);
4858 for (i = start_i; i < num_pages; i++) {
4859 page = extent_buffer_page(eb, i);
4860 if (wait == WAIT_NONE) {
4861 if (!trylock_page(page))
4862 goto unlock_exit;
4863 } else {
4864 lock_page(page);
4865 }
4866 locked_pages++;
4867 if (!PageUptodate(page)) {
4868 num_reads++;
4869 all_uptodate = 0;
4870 }
4871 }
4872 if (all_uptodate) {
4873 if (start_i == 0)
4874 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4875 goto unlock_exit;
4876 }
4877
4878 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4879 eb->read_mirror = 0;
4880 atomic_set(&eb->io_pages, num_reads);
4881 for (i = start_i; i < num_pages; i++) {
4882 page = extent_buffer_page(eb, i);
4883 if (!PageUptodate(page)) {
4884 ClearPageError(page);
4885 err = __extent_read_full_page(tree, page,
4886 get_extent, &bio,
4887 mirror_num, &bio_flags,
4888 READ | REQ_META);
4889 if (err)
4890 ret = err;
4891 } else {
4892 unlock_page(page);
4893 }
4894 }
4895
4896 if (bio) {
4897 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4898 bio_flags);
4899 if (err)
4900 return err;
4901 }
4902
4903 if (ret || wait != WAIT_COMPLETE)
4904 return ret;
4905
4906 for (i = start_i; i < num_pages; i++) {
4907 page = extent_buffer_page(eb, i);
4908 wait_on_page_locked(page);
4909 if (!PageUptodate(page))
4910 ret = -EIO;
4911 }
4912
4913 return ret;
4914
4915unlock_exit:
4916 i = start_i;
4917 while (locked_pages > 0) {
4918 page = extent_buffer_page(eb, i);
4919 i++;
4920 unlock_page(page);
4921 locked_pages--;
4922 }
4923 return ret;
4924}
4925
4926void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4927 unsigned long start,
4928 unsigned long len)
4929{
4930 size_t cur;
4931 size_t offset;
4932 struct page *page;
4933 char *kaddr;
4934 char *dst = (char *)dstv;
4935 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4936 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4937
4938 WARN_ON(start > eb->len);
4939 WARN_ON(start + len > eb->start + eb->len);
4940
4941 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4942
4943 while (len > 0) {
4944 page = extent_buffer_page(eb, i);
4945
4946 cur = min(len, (PAGE_CACHE_SIZE - offset));
4947 kaddr = page_address(page);
4948 memcpy(dst, kaddr + offset, cur);
4949
4950 dst += cur;
4951 len -= cur;
4952 offset = 0;
4953 i++;
4954 }
4955}
4956
4957int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4958 unsigned long min_len, char **map,
4959 unsigned long *map_start,
4960 unsigned long *map_len)
4961{
4962 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4963 char *kaddr;
4964 struct page *p;
4965 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4966 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4967 unsigned long end_i = (start_offset + start + min_len - 1) >>
4968 PAGE_CACHE_SHIFT;
4969
4970 if (i != end_i)
4971 return -EINVAL;
4972
4973 if (i == 0) {
4974 offset = start_offset;
4975 *map_start = 0;
4976 } else {
4977 offset = 0;
4978 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4979 }
4980
4981 if (start + min_len > eb->len) {
4982 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4983 "wanted %lu %lu\n",
4984 eb->start, eb->len, start, min_len);
4985 return -EINVAL;
4986 }
4987
4988 p = extent_buffer_page(eb, i);
4989 kaddr = page_address(p);
4990 *map = kaddr + offset;
4991 *map_len = PAGE_CACHE_SIZE - offset;
4992 return 0;
4993}
4994
4995int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4996 unsigned long start,
4997 unsigned long len)
4998{
4999 size_t cur;
5000 size_t offset;
5001 struct page *page;
5002 char *kaddr;
5003 char *ptr = (char *)ptrv;
5004 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5005 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5006 int ret = 0;
5007
5008 WARN_ON(start > eb->len);
5009 WARN_ON(start + len > eb->start + eb->len);
5010
5011 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5012
5013 while (len > 0) {
5014 page = extent_buffer_page(eb, i);
5015
5016 cur = min(len, (PAGE_CACHE_SIZE - offset));
5017
5018 kaddr = page_address(page);
5019 ret = memcmp(ptr, kaddr + offset, cur);
5020 if (ret)
5021 break;
5022
5023 ptr += cur;
5024 len -= cur;
5025 offset = 0;
5026 i++;
5027 }
5028 return ret;
5029}
5030
5031void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5032 unsigned long start, unsigned long len)
5033{
5034 size_t cur;
5035 size_t offset;
5036 struct page *page;
5037 char *kaddr;
5038 char *src = (char *)srcv;
5039 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5040 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5041
5042 WARN_ON(start > eb->len);
5043 WARN_ON(start + len > eb->start + eb->len);
5044
5045 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5046
5047 while (len > 0) {
5048 page = extent_buffer_page(eb, i);
5049 WARN_ON(!PageUptodate(page));
5050
5051 cur = min(len, PAGE_CACHE_SIZE - offset);
5052 kaddr = page_address(page);
5053 memcpy(kaddr + offset, src, cur);
5054
5055 src += cur;
5056 len -= cur;
5057 offset = 0;
5058 i++;
5059 }
5060}
5061
5062void memset_extent_buffer(struct extent_buffer *eb, char c,
5063 unsigned long start, unsigned long len)
5064{
5065 size_t cur;
5066 size_t offset;
5067 struct page *page;
5068 char *kaddr;
5069 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5070 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5071
5072 WARN_ON(start > eb->len);
5073 WARN_ON(start + len > eb->start + eb->len);
5074
5075 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5076
5077 while (len > 0) {
5078 page = extent_buffer_page(eb, i);
5079 WARN_ON(!PageUptodate(page));
5080
5081 cur = min(len, PAGE_CACHE_SIZE - offset);
5082 kaddr = page_address(page);
5083 memset(kaddr + offset, c, cur);
5084
5085 len -= cur;
5086 offset = 0;
5087 i++;
5088 }
5089}
5090
5091void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5092 unsigned long dst_offset, unsigned long src_offset,
5093 unsigned long len)
5094{
5095 u64 dst_len = dst->len;
5096 size_t cur;
5097 size_t offset;
5098 struct page *page;
5099 char *kaddr;
5100 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5101 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5102
5103 WARN_ON(src->len != dst_len);
5104
5105 offset = (start_offset + dst_offset) &
5106 (PAGE_CACHE_SIZE - 1);
5107
5108 while (len > 0) {
5109 page = extent_buffer_page(dst, i);
5110 WARN_ON(!PageUptodate(page));
5111
5112 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5113
5114 kaddr = page_address(page);
5115 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5116
5117 src_offset += cur;
5118 len -= cur;
5119 offset = 0;
5120 i++;
5121 }
5122}
5123
5124static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5125{
5126 unsigned long distance = (src > dst) ? src - dst : dst - src;
5127 return distance < len;
5128}
5129
5130static void copy_pages(struct page *dst_page, struct page *src_page,
5131 unsigned long dst_off, unsigned long src_off,
5132 unsigned long len)
5133{
5134 char *dst_kaddr = page_address(dst_page);
5135 char *src_kaddr;
5136 int must_memmove = 0;
5137
5138 if (dst_page != src_page) {
5139 src_kaddr = page_address(src_page);
5140 } else {
5141 src_kaddr = dst_kaddr;
5142 if (areas_overlap(src_off, dst_off, len))
5143 must_memmove = 1;
5144 }
5145
5146 if (must_memmove)
5147 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5148 else
5149 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5150}
5151
5152void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5153 unsigned long src_offset, unsigned long len)
5154{
5155 size_t cur;
5156 size_t dst_off_in_page;
5157 size_t src_off_in_page;
5158 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5159 unsigned long dst_i;
5160 unsigned long src_i;
5161
5162 if (src_offset + len > dst->len) {
5163 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5164 "len %lu dst len %lu\n", src_offset, len, dst->len);
5165 BUG_ON(1);
5166 }
5167 if (dst_offset + len > dst->len) {
5168 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5169 "len %lu dst len %lu\n", dst_offset, len, dst->len);
5170 BUG_ON(1);
5171 }
5172
5173 while (len > 0) {
5174 dst_off_in_page = (start_offset + dst_offset) &
5175 (PAGE_CACHE_SIZE - 1);
5176 src_off_in_page = (start_offset + src_offset) &
5177 (PAGE_CACHE_SIZE - 1);
5178
5179 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5180 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5181
5182 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5183 src_off_in_page));
5184 cur = min_t(unsigned long, cur,
5185 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5186
5187 copy_pages(extent_buffer_page(dst, dst_i),
5188 extent_buffer_page(dst, src_i),
5189 dst_off_in_page, src_off_in_page, cur);
5190
5191 src_offset += cur;
5192 dst_offset += cur;
5193 len -= cur;
5194 }
5195}
5196
5197void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5198 unsigned long src_offset, unsigned long len)
5199{
5200 size_t cur;
5201 size_t dst_off_in_page;
5202 size_t src_off_in_page;
5203 unsigned long dst_end = dst_offset + len - 1;
5204 unsigned long src_end = src_offset + len - 1;
5205 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5206 unsigned long dst_i;
5207 unsigned long src_i;
5208
5209 if (src_offset + len > dst->len) {
5210 printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
5211 "len %lu len %lu\n", src_offset, len, dst->len);
5212 BUG_ON(1);
5213 }
5214 if (dst_offset + len > dst->len) {
5215 printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
5216 "len %lu len %lu\n", dst_offset, len, dst->len);
5217 BUG_ON(1);
5218 }
5219 if (dst_offset < src_offset) {
5220 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5221 return;
5222 }
5223 while (len > 0) {
5224 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5225 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5226
5227 dst_off_in_page = (start_offset + dst_end) &
5228 (PAGE_CACHE_SIZE - 1);
5229 src_off_in_page = (start_offset + src_end) &
5230 (PAGE_CACHE_SIZE - 1);
5231
5232 cur = min_t(unsigned long, len, src_off_in_page + 1);
5233 cur = min(cur, dst_off_in_page + 1);
5234 copy_pages(extent_buffer_page(dst, dst_i),
5235 extent_buffer_page(dst, src_i),
5236 dst_off_in_page - cur + 1,
5237 src_off_in_page - cur + 1, cur);
5238
5239 dst_end -= cur;
5240 src_end -= cur;
5241 len -= cur;
5242 }
5243}
5244
5245int try_release_extent_buffer(struct page *page)
5246{
5247 struct extent_buffer *eb;
5248
5249 /*
5250 * We need to make sure noboody is attaching this page to an eb right
5251 * now.
5252 */
5253 spin_lock(&page->mapping->private_lock);
5254 if (!PagePrivate(page)) {
5255 spin_unlock(&page->mapping->private_lock);
5256 return 1;
5257 }
5258
5259 eb = (struct extent_buffer *)page->private;
5260 BUG_ON(!eb);
5261
5262 /*
5263 * This is a little awful but should be ok, we need to make sure that
5264 * the eb doesn't disappear out from under us while we're looking at
5265 * this page.
5266 */
5267 spin_lock(&eb->refs_lock);
5268 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5269 spin_unlock(&eb->refs_lock);
5270 spin_unlock(&page->mapping->private_lock);
5271 return 0;
5272 }
5273 spin_unlock(&page->mapping->private_lock);
5274
5275 /*
5276 * If tree ref isn't set then we know the ref on this eb is a real ref,
5277 * so just return, this page will likely be freed soon anyway.
5278 */
5279 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5280 spin_unlock(&eb->refs_lock);
5281 return 0;
5282 }
5283
5284 return release_extent_buffer(eb);
5285}