Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4#include "messages.h"
5#include "ctree.h"
6#include "subpage.h"
7#include "btrfs_inode.h"
8
9/*
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
11 *
12 * Limitations:
13 *
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
18 *
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
21 *
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
25 * rejection.
26 *
27 * Special behavior:
28 *
29 * - Metadata
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
33 *
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
37 *
38 * This means, if we have a metadata page like this:
39 *
40 * Page offset
41 * 0 16K 32K 48K 64K
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
44 *
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
47 *
48 * This may cause extra metadata writeback which results more COW.
49 *
50 * Implementation:
51 *
52 * - Common
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
55 * granularity needed.
56 *
57 * - Metadata
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * the same page).
62 *
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
65 */
66
67bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
68{
69 if (fs_info->sectorsize >= PAGE_SIZE)
70 return false;
71
72 /*
73 * Only data pages (either through DIO or compression) can have no
74 * mapping. And if page->mapping->host is data inode, it's subpage.
75 * As we have ruled our sectorsize >= PAGE_SIZE case already.
76 */
77 if (!page->mapping || !page->mapping->host ||
78 is_data_inode(page->mapping->host))
79 return true;
80
81 /*
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
84 */
85 if (fs_info->nodesize < PAGE_SIZE)
86 return true;
87 return false;
88}
89
90void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
91{
92 unsigned int cur = 0;
93 unsigned int nr_bits;
94
95 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
96
97 nr_bits = PAGE_SIZE / sectorsize;
98 subpage_info->bitmap_nr_bits = nr_bits;
99
100 subpage_info->uptodate_offset = cur;
101 cur += nr_bits;
102
103 subpage_info->error_offset = cur;
104 cur += nr_bits;
105
106 subpage_info->dirty_offset = cur;
107 cur += nr_bits;
108
109 subpage_info->writeback_offset = cur;
110 cur += nr_bits;
111
112 subpage_info->ordered_offset = cur;
113 cur += nr_bits;
114
115 subpage_info->checked_offset = cur;
116 cur += nr_bits;
117
118 subpage_info->total_nr_bits = cur;
119}
120
121int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
122 struct page *page, enum btrfs_subpage_type type)
123{
124 struct btrfs_subpage *subpage;
125
126 /*
127 * We have cases like a dummy extent buffer page, which is not mapped
128 * and doesn't need to be locked.
129 */
130 if (page->mapping)
131 ASSERT(PageLocked(page));
132
133 /* Either not subpage, or the page already has private attached */
134 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
135 return 0;
136
137 subpage = btrfs_alloc_subpage(fs_info, type);
138 if (IS_ERR(subpage))
139 return PTR_ERR(subpage);
140
141 attach_page_private(page, subpage);
142 return 0;
143}
144
145void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
146 struct page *page)
147{
148 struct btrfs_subpage *subpage;
149
150 /* Either not subpage, or already detached */
151 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
152 return;
153
154 subpage = detach_page_private(page);
155 ASSERT(subpage);
156 btrfs_free_subpage(subpage);
157}
158
159struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
160 enum btrfs_subpage_type type)
161{
162 struct btrfs_subpage *ret;
163 unsigned int real_size;
164
165 ASSERT(fs_info->sectorsize < PAGE_SIZE);
166
167 real_size = struct_size(ret, bitmaps,
168 BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
169 ret = kzalloc(real_size, GFP_NOFS);
170 if (!ret)
171 return ERR_PTR(-ENOMEM);
172
173 spin_lock_init(&ret->lock);
174 if (type == BTRFS_SUBPAGE_METADATA) {
175 atomic_set(&ret->eb_refs, 0);
176 } else {
177 atomic_set(&ret->readers, 0);
178 atomic_set(&ret->writers, 0);
179 }
180 return ret;
181}
182
183void btrfs_free_subpage(struct btrfs_subpage *subpage)
184{
185 kfree(subpage);
186}
187
188/*
189 * Increase the eb_refs of current subpage.
190 *
191 * This is important for eb allocation, to prevent race with last eb freeing
192 * of the same page.
193 * With the eb_refs increased before the eb inserted into radix tree,
194 * detach_extent_buffer_page() won't detach the page private while we're still
195 * allocating the extent buffer.
196 */
197void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
198 struct page *page)
199{
200 struct btrfs_subpage *subpage;
201
202 if (!btrfs_is_subpage(fs_info, page))
203 return;
204
205 ASSERT(PagePrivate(page) && page->mapping);
206 lockdep_assert_held(&page->mapping->private_lock);
207
208 subpage = (struct btrfs_subpage *)page->private;
209 atomic_inc(&subpage->eb_refs);
210}
211
212void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
213 struct page *page)
214{
215 struct btrfs_subpage *subpage;
216
217 if (!btrfs_is_subpage(fs_info, page))
218 return;
219
220 ASSERT(PagePrivate(page) && page->mapping);
221 lockdep_assert_held(&page->mapping->private_lock);
222
223 subpage = (struct btrfs_subpage *)page->private;
224 ASSERT(atomic_read(&subpage->eb_refs));
225 atomic_dec(&subpage->eb_refs);
226}
227
228static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
229 struct page *page, u64 start, u32 len)
230{
231 /* Basic checks */
232 ASSERT(PagePrivate(page) && page->private);
233 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
234 IS_ALIGNED(len, fs_info->sectorsize));
235 /*
236 * The range check only works for mapped page, we can still have
237 * unmapped page like dummy extent buffer pages.
238 */
239 if (page->mapping)
240 ASSERT(page_offset(page) <= start &&
241 start + len <= page_offset(page) + PAGE_SIZE);
242}
243
244void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
245 struct page *page, u64 start, u32 len)
246{
247 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
248 const int nbits = len >> fs_info->sectorsize_bits;
249
250 btrfs_subpage_assert(fs_info, page, start, len);
251
252 atomic_add(nbits, &subpage->readers);
253}
254
255void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
256 struct page *page, u64 start, u32 len)
257{
258 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
259 const int nbits = len >> fs_info->sectorsize_bits;
260 bool is_data;
261 bool last;
262
263 btrfs_subpage_assert(fs_info, page, start, len);
264 is_data = is_data_inode(page->mapping->host);
265 ASSERT(atomic_read(&subpage->readers) >= nbits);
266 last = atomic_sub_and_test(nbits, &subpage->readers);
267
268 /*
269 * For data we need to unlock the page if the last read has finished.
270 *
271 * And please don't replace @last with atomic_sub_and_test() call
272 * inside if () condition.
273 * As we want the atomic_sub_and_test() to be always executed.
274 */
275 if (is_data && last)
276 unlock_page(page);
277}
278
279static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
280{
281 u64 orig_start = *start;
282 u32 orig_len = *len;
283
284 *start = max_t(u64, page_offset(page), orig_start);
285 /*
286 * For certain call sites like btrfs_drop_pages(), we may have pages
287 * beyond the target range. In that case, just set @len to 0, subpage
288 * helpers can handle @len == 0 without any problem.
289 */
290 if (page_offset(page) >= orig_start + orig_len)
291 *len = 0;
292 else
293 *len = min_t(u64, page_offset(page) + PAGE_SIZE,
294 orig_start + orig_len) - *start;
295}
296
297void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
298 struct page *page, u64 start, u32 len)
299{
300 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
301 const int nbits = (len >> fs_info->sectorsize_bits);
302 int ret;
303
304 btrfs_subpage_assert(fs_info, page, start, len);
305
306 ASSERT(atomic_read(&subpage->readers) == 0);
307 ret = atomic_add_return(nbits, &subpage->writers);
308 ASSERT(ret == nbits);
309}
310
311bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
312 struct page *page, u64 start, u32 len)
313{
314 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
315 const int nbits = (len >> fs_info->sectorsize_bits);
316
317 btrfs_subpage_assert(fs_info, page, start, len);
318
319 /*
320 * We have call sites passing @lock_page into
321 * extent_clear_unlock_delalloc() for compression path.
322 *
323 * This @locked_page is locked by plain lock_page(), thus its
324 * subpage::writers is 0. Handle them in a special way.
325 */
326 if (atomic_read(&subpage->writers) == 0)
327 return true;
328
329 ASSERT(atomic_read(&subpage->writers) >= nbits);
330 return atomic_sub_and_test(nbits, &subpage->writers);
331}
332
333/*
334 * Lock a page for delalloc page writeback.
335 *
336 * Return -EAGAIN if the page is not properly initialized.
337 * Return 0 with the page locked, and writer counter updated.
338 *
339 * Even with 0 returned, the page still need extra check to make sure
340 * it's really the correct page, as the caller is using
341 * filemap_get_folios_contig(), which can race with page invalidating.
342 */
343int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
344 struct page *page, u64 start, u32 len)
345{
346 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
347 lock_page(page);
348 return 0;
349 }
350 lock_page(page);
351 if (!PagePrivate(page) || !page->private) {
352 unlock_page(page);
353 return -EAGAIN;
354 }
355 btrfs_subpage_clamp_range(page, &start, &len);
356 btrfs_subpage_start_writer(fs_info, page, start, len);
357 return 0;
358}
359
360void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
361 struct page *page, u64 start, u32 len)
362{
363 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
364 return unlock_page(page);
365 btrfs_subpage_clamp_range(page, &start, &len);
366 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
367 unlock_page(page);
368}
369
370static bool bitmap_test_range_all_set(unsigned long *addr, unsigned int start,
371 unsigned int nbits)
372{
373 unsigned int found_zero;
374
375 found_zero = find_next_zero_bit(addr, start + nbits, start);
376 if (found_zero == start + nbits)
377 return true;
378 return false;
379}
380
381static bool bitmap_test_range_all_zero(unsigned long *addr, unsigned int start,
382 unsigned int nbits)
383{
384 unsigned int found_set;
385
386 found_set = find_next_bit(addr, start + nbits, start);
387 if (found_set == start + nbits)
388 return true;
389 return false;
390}
391
392#define subpage_calc_start_bit(fs_info, page, name, start, len) \
393({ \
394 unsigned int start_bit; \
395 \
396 btrfs_subpage_assert(fs_info, page, start, len); \
397 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
398 start_bit += fs_info->subpage_info->name##_offset; \
399 start_bit; \
400})
401
402#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
403 bitmap_test_range_all_set(subpage->bitmaps, \
404 fs_info->subpage_info->name##_offset, \
405 fs_info->subpage_info->bitmap_nr_bits)
406
407#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
408 bitmap_test_range_all_zero(subpage->bitmaps, \
409 fs_info->subpage_info->name##_offset, \
410 fs_info->subpage_info->bitmap_nr_bits)
411
412void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
413 struct page *page, u64 start, u32 len)
414{
415 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
416 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
417 uptodate, start, len);
418 unsigned long flags;
419
420 spin_lock_irqsave(&subpage->lock, flags);
421 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
422 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
423 SetPageUptodate(page);
424 spin_unlock_irqrestore(&subpage->lock, flags);
425}
426
427void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
428 struct page *page, u64 start, u32 len)
429{
430 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
431 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
432 uptodate, start, len);
433 unsigned long flags;
434
435 spin_lock_irqsave(&subpage->lock, flags);
436 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
437 ClearPageUptodate(page);
438 spin_unlock_irqrestore(&subpage->lock, flags);
439}
440
441void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
442 struct page *page, u64 start, u32 len)
443{
444 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
445 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
446 error, start, len);
447 unsigned long flags;
448
449 spin_lock_irqsave(&subpage->lock, flags);
450 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
451 SetPageError(page);
452 spin_unlock_irqrestore(&subpage->lock, flags);
453}
454
455void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
456 struct page *page, u64 start, u32 len)
457{
458 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
459 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
460 error, start, len);
461 unsigned long flags;
462
463 spin_lock_irqsave(&subpage->lock, flags);
464 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
465 if (subpage_test_bitmap_all_zero(fs_info, subpage, error))
466 ClearPageError(page);
467 spin_unlock_irqrestore(&subpage->lock, flags);
468}
469
470void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
471 struct page *page, u64 start, u32 len)
472{
473 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
474 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
475 dirty, start, len);
476 unsigned long flags;
477
478 spin_lock_irqsave(&subpage->lock, flags);
479 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
480 spin_unlock_irqrestore(&subpage->lock, flags);
481 set_page_dirty(page);
482}
483
484/*
485 * Extra clear_and_test function for subpage dirty bitmap.
486 *
487 * Return true if we're the last bits in the dirty_bitmap and clear the
488 * dirty_bitmap.
489 * Return false otherwise.
490 *
491 * NOTE: Callers should manually clear page dirty for true case, as we have
492 * extra handling for tree blocks.
493 */
494bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
495 struct page *page, u64 start, u32 len)
496{
497 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
498 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
499 dirty, start, len);
500 unsigned long flags;
501 bool last = false;
502
503 spin_lock_irqsave(&subpage->lock, flags);
504 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
505 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
506 last = true;
507 spin_unlock_irqrestore(&subpage->lock, flags);
508 return last;
509}
510
511void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
512 struct page *page, u64 start, u32 len)
513{
514 bool last;
515
516 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
517 if (last)
518 clear_page_dirty_for_io(page);
519}
520
521void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
522 struct page *page, u64 start, u32 len)
523{
524 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
525 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
526 writeback, start, len);
527 unsigned long flags;
528
529 spin_lock_irqsave(&subpage->lock, flags);
530 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531 set_page_writeback(page);
532 spin_unlock_irqrestore(&subpage->lock, flags);
533}
534
535void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
536 struct page *page, u64 start, u32 len)
537{
538 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
539 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
540 writeback, start, len);
541 unsigned long flags;
542
543 spin_lock_irqsave(&subpage->lock, flags);
544 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
545 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
546 ASSERT(PageWriteback(page));
547 end_page_writeback(page);
548 }
549 spin_unlock_irqrestore(&subpage->lock, flags);
550}
551
552void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
553 struct page *page, u64 start, u32 len)
554{
555 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
556 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
557 ordered, start, len);
558 unsigned long flags;
559
560 spin_lock_irqsave(&subpage->lock, flags);
561 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
562 SetPageOrdered(page);
563 spin_unlock_irqrestore(&subpage->lock, flags);
564}
565
566void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
567 struct page *page, u64 start, u32 len)
568{
569 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
570 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
571 ordered, start, len);
572 unsigned long flags;
573
574 spin_lock_irqsave(&subpage->lock, flags);
575 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
576 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
577 ClearPageOrdered(page);
578 spin_unlock_irqrestore(&subpage->lock, flags);
579}
580
581void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
582 struct page *page, u64 start, u32 len)
583{
584 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
585 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
586 checked, start, len);
587 unsigned long flags;
588
589 spin_lock_irqsave(&subpage->lock, flags);
590 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
591 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
592 SetPageChecked(page);
593 spin_unlock_irqrestore(&subpage->lock, flags);
594}
595
596void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
597 struct page *page, u64 start, u32 len)
598{
599 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
600 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
601 checked, start, len);
602 unsigned long flags;
603
604 spin_lock_irqsave(&subpage->lock, flags);
605 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
606 ClearPageChecked(page);
607 spin_unlock_irqrestore(&subpage->lock, flags);
608}
609
610/*
611 * Unlike set/clear which is dependent on each page status, for test all bits
612 * are tested in the same way.
613 */
614#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
615bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
616 struct page *page, u64 start, u32 len) \
617{ \
618 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
619 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
620 name, start, len); \
621 unsigned long flags; \
622 bool ret; \
623 \
624 spin_lock_irqsave(&subpage->lock, flags); \
625 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
626 len >> fs_info->sectorsize_bits); \
627 spin_unlock_irqrestore(&subpage->lock, flags); \
628 return ret; \
629}
630IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
631IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
632IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
633IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
634IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
635IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
636
637/*
638 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
639 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
640 * back to regular sectorsize branch.
641 */
642#define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
643 test_page_func) \
644void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
645 struct page *page, u64 start, u32 len) \
646{ \
647 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
648 set_page_func(page); \
649 return; \
650 } \
651 btrfs_subpage_set_##name(fs_info, page, start, len); \
652} \
653void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
654 struct page *page, u64 start, u32 len) \
655{ \
656 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
657 clear_page_func(page); \
658 return; \
659 } \
660 btrfs_subpage_clear_##name(fs_info, page, start, len); \
661} \
662bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
663 struct page *page, u64 start, u32 len) \
664{ \
665 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
666 return test_page_func(page); \
667 return btrfs_subpage_test_##name(fs_info, page, start, len); \
668} \
669void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
670 struct page *page, u64 start, u32 len) \
671{ \
672 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
673 set_page_func(page); \
674 return; \
675 } \
676 btrfs_subpage_clamp_range(page, &start, &len); \
677 btrfs_subpage_set_##name(fs_info, page, start, len); \
678} \
679void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
680 struct page *page, u64 start, u32 len) \
681{ \
682 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
683 clear_page_func(page); \
684 return; \
685 } \
686 btrfs_subpage_clamp_range(page, &start, &len); \
687 btrfs_subpage_clear_##name(fs_info, page, start, len); \
688} \
689bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
690 struct page *page, u64 start, u32 len) \
691{ \
692 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
693 return test_page_func(page); \
694 btrfs_subpage_clamp_range(page, &start, &len); \
695 return btrfs_subpage_test_##name(fs_info, page, start, len); \
696}
697IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
698 PageUptodate);
699IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
700IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io,
701 PageDirty);
702IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback,
703 PageWriteback);
704IMPLEMENT_BTRFS_PAGE_OPS(ordered, SetPageOrdered, ClearPageOrdered,
705 PageOrdered);
706IMPLEMENT_BTRFS_PAGE_OPS(checked, SetPageChecked, ClearPageChecked, PageChecked);
707
708/*
709 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
710 * is cleared.
711 */
712void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
713 struct page *page)
714{
715 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
716
717 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
718 return;
719
720 ASSERT(!PageDirty(page));
721 if (!btrfs_is_subpage(fs_info, page))
722 return;
723
724 ASSERT(PagePrivate(page) && page->private);
725 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
726}
727
728/*
729 * Handle different locked pages with different page sizes:
730 *
731 * - Page locked by plain lock_page()
732 * It should not have any subpage::writers count.
733 * Can be unlocked by unlock_page().
734 * This is the most common locked page for __extent_writepage() called
735 * inside extent_write_cache_pages().
736 * Rarer cases include the @locked_page from extent_write_locked_range().
737 *
738 * - Page locked by lock_delalloc_pages()
739 * There is only one caller, all pages except @locked_page for
740 * extent_write_locked_range().
741 * In this case, we have to call subpage helper to handle the case.
742 */
743void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
744 u64 start, u32 len)
745{
746 struct btrfs_subpage *subpage;
747
748 ASSERT(PageLocked(page));
749 /* For non-subpage case, we just unlock the page */
750 if (!btrfs_is_subpage(fs_info, page))
751 return unlock_page(page);
752
753 ASSERT(PagePrivate(page) && page->private);
754 subpage = (struct btrfs_subpage *)page->private;
755
756 /*
757 * For subpage case, there are two types of locked page. With or
758 * without writers number.
759 *
760 * Since we own the page lock, no one else could touch subpage::writers
761 * and we are safe to do several atomic operations without spinlock.
762 */
763 if (atomic_read(&subpage->writers) == 0)
764 /* No writers, locked by plain lock_page() */
765 return unlock_page(page);
766
767 /* Have writers, use proper subpage helper to end it */
768 btrfs_page_end_writer_lock(fs_info, page, start, len);
769}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4#include "messages.h"
5#include "ctree.h"
6#include "subpage.h"
7#include "btrfs_inode.h"
8
9/*
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
11 *
12 * Limitations:
13 *
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
18 *
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
21 *
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
25 * rejection.
26 *
27 * Special behavior:
28 *
29 * - Metadata
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
33 *
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
37 *
38 * This means, if we have a metadata page like this:
39 *
40 * Page offset
41 * 0 16K 32K 48K 64K
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
44 *
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
47 *
48 * This may cause extra metadata writeback which results more COW.
49 *
50 * Implementation:
51 *
52 * - Common
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
55 * granularity needed.
56 *
57 * - Metadata
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * the same page).
62 *
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
65 */
66
67bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
68{
69 if (fs_info->sectorsize >= PAGE_SIZE)
70 return false;
71
72 /*
73 * Only data pages (either through DIO or compression) can have no
74 * mapping. And if page->mapping->host is data inode, it's subpage.
75 * As we have ruled our sectorsize >= PAGE_SIZE case already.
76 */
77 if (!mapping || !mapping->host || is_data_inode(mapping->host))
78 return true;
79
80 /*
81 * Now the only remaining case is metadata, which we only go subpage
82 * routine if nodesize < PAGE_SIZE.
83 */
84 if (fs_info->nodesize < PAGE_SIZE)
85 return true;
86 return false;
87}
88
89void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
90{
91 unsigned int cur = 0;
92 unsigned int nr_bits;
93
94 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
95
96 nr_bits = PAGE_SIZE / sectorsize;
97 subpage_info->bitmap_nr_bits = nr_bits;
98
99 subpage_info->uptodate_offset = cur;
100 cur += nr_bits;
101
102 subpage_info->dirty_offset = cur;
103 cur += nr_bits;
104
105 subpage_info->writeback_offset = cur;
106 cur += nr_bits;
107
108 subpage_info->ordered_offset = cur;
109 cur += nr_bits;
110
111 subpage_info->checked_offset = cur;
112 cur += nr_bits;
113
114 subpage_info->locked_offset = cur;
115 cur += nr_bits;
116
117 subpage_info->total_nr_bits = cur;
118}
119
120int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
121 struct folio *folio, enum btrfs_subpage_type type)
122{
123 struct btrfs_subpage *subpage;
124
125 /*
126 * We have cases like a dummy extent buffer page, which is not mapped
127 * and doesn't need to be locked.
128 */
129 if (folio->mapping)
130 ASSERT(folio_test_locked(folio));
131
132 /* Either not subpage, or the folio already has private attached. */
133 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
134 return 0;
135
136 subpage = btrfs_alloc_subpage(fs_info, type);
137 if (IS_ERR(subpage))
138 return PTR_ERR(subpage);
139
140 folio_attach_private(folio, subpage);
141 return 0;
142}
143
144void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
145{
146 struct btrfs_subpage *subpage;
147
148 /* Either not subpage, or the folio already has private attached. */
149 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
150 return;
151
152 subpage = folio_detach_private(folio);
153 ASSERT(subpage);
154 btrfs_free_subpage(subpage);
155}
156
157struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
158 enum btrfs_subpage_type type)
159{
160 struct btrfs_subpage *ret;
161 unsigned int real_size;
162
163 ASSERT(fs_info->sectorsize < PAGE_SIZE);
164
165 real_size = struct_size(ret, bitmaps,
166 BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
167 ret = kzalloc(real_size, GFP_NOFS);
168 if (!ret)
169 return ERR_PTR(-ENOMEM);
170
171 spin_lock_init(&ret->lock);
172 if (type == BTRFS_SUBPAGE_METADATA) {
173 atomic_set(&ret->eb_refs, 0);
174 } else {
175 atomic_set(&ret->readers, 0);
176 atomic_set(&ret->writers, 0);
177 }
178 return ret;
179}
180
181void btrfs_free_subpage(struct btrfs_subpage *subpage)
182{
183 kfree(subpage);
184}
185
186/*
187 * Increase the eb_refs of current subpage.
188 *
189 * This is important for eb allocation, to prevent race with last eb freeing
190 * of the same page.
191 * With the eb_refs increased before the eb inserted into radix tree,
192 * detach_extent_buffer_page() won't detach the folio private while we're still
193 * allocating the extent buffer.
194 */
195void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
196{
197 struct btrfs_subpage *subpage;
198
199 if (!btrfs_is_subpage(fs_info, folio->mapping))
200 return;
201
202 ASSERT(folio_test_private(folio) && folio->mapping);
203 lockdep_assert_held(&folio->mapping->i_private_lock);
204
205 subpage = folio_get_private(folio);
206 atomic_inc(&subpage->eb_refs);
207}
208
209void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
210{
211 struct btrfs_subpage *subpage;
212
213 if (!btrfs_is_subpage(fs_info, folio->mapping))
214 return;
215
216 ASSERT(folio_test_private(folio) && folio->mapping);
217 lockdep_assert_held(&folio->mapping->i_private_lock);
218
219 subpage = folio_get_private(folio);
220 ASSERT(atomic_read(&subpage->eb_refs));
221 atomic_dec(&subpage->eb_refs);
222}
223
224static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
225 struct folio *folio, u64 start, u32 len)
226{
227 /* For subpage support, the folio must be single page. */
228 ASSERT(folio_order(folio) == 0);
229
230 /* Basic checks */
231 ASSERT(folio_test_private(folio) && folio_get_private(folio));
232 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
233 IS_ALIGNED(len, fs_info->sectorsize));
234 /*
235 * The range check only works for mapped page, we can still have
236 * unmapped page like dummy extent buffer pages.
237 */
238 if (folio->mapping)
239 ASSERT(folio_pos(folio) <= start &&
240 start + len <= folio_pos(folio) + PAGE_SIZE);
241}
242
243#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
244({ \
245 unsigned int start_bit; \
246 \
247 btrfs_subpage_assert(fs_info, folio, start, len); \
248 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
249 start_bit += fs_info->subpage_info->name##_offset; \
250 start_bit; \
251})
252
253void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
254 struct folio *folio, u64 start, u32 len)
255{
256 struct btrfs_subpage *subpage = folio_get_private(folio);
257 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
258 const int nbits = len >> fs_info->sectorsize_bits;
259 unsigned long flags;
260
261
262 btrfs_subpage_assert(fs_info, folio, start, len);
263
264 spin_lock_irqsave(&subpage->lock, flags);
265 /*
266 * Even though it's just for reading the page, no one should have
267 * locked the subpage range.
268 */
269 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
270 bitmap_set(subpage->bitmaps, start_bit, nbits);
271 atomic_add(nbits, &subpage->readers);
272 spin_unlock_irqrestore(&subpage->lock, flags);
273}
274
275void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
276 struct folio *folio, u64 start, u32 len)
277{
278 struct btrfs_subpage *subpage = folio_get_private(folio);
279 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
280 const int nbits = len >> fs_info->sectorsize_bits;
281 unsigned long flags;
282 bool is_data;
283 bool last;
284
285 btrfs_subpage_assert(fs_info, folio, start, len);
286 is_data = is_data_inode(folio->mapping->host);
287
288 spin_lock_irqsave(&subpage->lock, flags);
289
290 /* The range should have already been locked. */
291 ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
292 ASSERT(atomic_read(&subpage->readers) >= nbits);
293
294 bitmap_clear(subpage->bitmaps, start_bit, nbits);
295 last = atomic_sub_and_test(nbits, &subpage->readers);
296
297 /*
298 * For data we need to unlock the page if the last read has finished.
299 *
300 * And please don't replace @last with atomic_sub_and_test() call
301 * inside if () condition.
302 * As we want the atomic_sub_and_test() to be always executed.
303 */
304 if (is_data && last)
305 folio_unlock(folio);
306 spin_unlock_irqrestore(&subpage->lock, flags);
307}
308
309static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
310{
311 u64 orig_start = *start;
312 u32 orig_len = *len;
313
314 *start = max_t(u64, folio_pos(folio), orig_start);
315 /*
316 * For certain call sites like btrfs_drop_pages(), we may have pages
317 * beyond the target range. In that case, just set @len to 0, subpage
318 * helpers can handle @len == 0 without any problem.
319 */
320 if (folio_pos(folio) >= orig_start + orig_len)
321 *len = 0;
322 else
323 *len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
324 orig_start + orig_len) - *start;
325}
326
327static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
328 struct folio *folio, u64 start, u32 len)
329{
330 struct btrfs_subpage *subpage = folio_get_private(folio);
331 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
332 const int nbits = (len >> fs_info->sectorsize_bits);
333 unsigned long flags;
334 int ret;
335
336 btrfs_subpage_assert(fs_info, folio, start, len);
337
338 spin_lock_irqsave(&subpage->lock, flags);
339 ASSERT(atomic_read(&subpage->readers) == 0);
340 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
341 bitmap_set(subpage->bitmaps, start_bit, nbits);
342 ret = atomic_add_return(nbits, &subpage->writers);
343 ASSERT(ret == nbits);
344 spin_unlock_irqrestore(&subpage->lock, flags);
345}
346
347static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
348 struct folio *folio, u64 start, u32 len)
349{
350 struct btrfs_subpage *subpage = folio_get_private(folio);
351 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
352 const int nbits = (len >> fs_info->sectorsize_bits);
353 unsigned long flags;
354 bool last;
355
356 btrfs_subpage_assert(fs_info, folio, start, len);
357
358 spin_lock_irqsave(&subpage->lock, flags);
359 /*
360 * We have call sites passing @lock_page into
361 * extent_clear_unlock_delalloc() for compression path.
362 *
363 * This @locked_page is locked by plain lock_page(), thus its
364 * subpage::writers is 0. Handle them in a special way.
365 */
366 if (atomic_read(&subpage->writers) == 0) {
367 spin_unlock_irqrestore(&subpage->lock, flags);
368 return true;
369 }
370
371 ASSERT(atomic_read(&subpage->writers) >= nbits);
372 /* The target range should have been locked. */
373 ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
374 bitmap_clear(subpage->bitmaps, start_bit, nbits);
375 last = atomic_sub_and_test(nbits, &subpage->writers);
376 spin_unlock_irqrestore(&subpage->lock, flags);
377 return last;
378}
379
380/*
381 * Lock a folio for delalloc page writeback.
382 *
383 * Return -EAGAIN if the page is not properly initialized.
384 * Return 0 with the page locked, and writer counter updated.
385 *
386 * Even with 0 returned, the page still need extra check to make sure
387 * it's really the correct page, as the caller is using
388 * filemap_get_folios_contig(), which can race with page invalidating.
389 */
390int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
391 struct folio *folio, u64 start, u32 len)
392{
393 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
394 folio_lock(folio);
395 return 0;
396 }
397 folio_lock(folio);
398 if (!folio_test_private(folio) || !folio_get_private(folio)) {
399 folio_unlock(folio);
400 return -EAGAIN;
401 }
402 btrfs_subpage_clamp_range(folio, &start, &len);
403 btrfs_subpage_start_writer(fs_info, folio, start, len);
404 return 0;
405}
406
407void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
408 struct folio *folio, u64 start, u32 len)
409{
410 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
411 folio_unlock(folio);
412 return;
413 }
414 btrfs_subpage_clamp_range(folio, &start, &len);
415 if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
416 folio_unlock(folio);
417}
418
419#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
420 bitmap_test_range_all_set(subpage->bitmaps, \
421 fs_info->subpage_info->name##_offset, \
422 fs_info->subpage_info->bitmap_nr_bits)
423
424#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
425 bitmap_test_range_all_zero(subpage->bitmaps, \
426 fs_info->subpage_info->name##_offset, \
427 fs_info->subpage_info->bitmap_nr_bits)
428
429void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
430 struct folio *folio, u64 start, u32 len)
431{
432 struct btrfs_subpage *subpage = folio_get_private(folio);
433 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
434 uptodate, start, len);
435 unsigned long flags;
436
437 spin_lock_irqsave(&subpage->lock, flags);
438 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
439 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
440 folio_mark_uptodate(folio);
441 spin_unlock_irqrestore(&subpage->lock, flags);
442}
443
444void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
445 struct folio *folio, u64 start, u32 len)
446{
447 struct btrfs_subpage *subpage = folio_get_private(folio);
448 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
449 uptodate, start, len);
450 unsigned long flags;
451
452 spin_lock_irqsave(&subpage->lock, flags);
453 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
454 folio_clear_uptodate(folio);
455 spin_unlock_irqrestore(&subpage->lock, flags);
456}
457
458void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
459 struct folio *folio, u64 start, u32 len)
460{
461 struct btrfs_subpage *subpage = folio_get_private(folio);
462 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
463 dirty, start, len);
464 unsigned long flags;
465
466 spin_lock_irqsave(&subpage->lock, flags);
467 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
468 spin_unlock_irqrestore(&subpage->lock, flags);
469 folio_mark_dirty(folio);
470}
471
472/*
473 * Extra clear_and_test function for subpage dirty bitmap.
474 *
475 * Return true if we're the last bits in the dirty_bitmap and clear the
476 * dirty_bitmap.
477 * Return false otherwise.
478 *
479 * NOTE: Callers should manually clear page dirty for true case, as we have
480 * extra handling for tree blocks.
481 */
482bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
483 struct folio *folio, u64 start, u32 len)
484{
485 struct btrfs_subpage *subpage = folio_get_private(folio);
486 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
487 dirty, start, len);
488 unsigned long flags;
489 bool last = false;
490
491 spin_lock_irqsave(&subpage->lock, flags);
492 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
493 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
494 last = true;
495 spin_unlock_irqrestore(&subpage->lock, flags);
496 return last;
497}
498
499void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
500 struct folio *folio, u64 start, u32 len)
501{
502 bool last;
503
504 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
505 if (last)
506 folio_clear_dirty_for_io(folio);
507}
508
509void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
510 struct folio *folio, u64 start, u32 len)
511{
512 struct btrfs_subpage *subpage = folio_get_private(folio);
513 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
514 writeback, start, len);
515 unsigned long flags;
516
517 spin_lock_irqsave(&subpage->lock, flags);
518 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
519 if (!folio_test_writeback(folio))
520 folio_start_writeback(folio);
521 spin_unlock_irqrestore(&subpage->lock, flags);
522}
523
524void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
525 struct folio *folio, u64 start, u32 len)
526{
527 struct btrfs_subpage *subpage = folio_get_private(folio);
528 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
529 writeback, start, len);
530 unsigned long flags;
531
532 spin_lock_irqsave(&subpage->lock, flags);
533 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
534 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
535 ASSERT(folio_test_writeback(folio));
536 folio_end_writeback(folio);
537 }
538 spin_unlock_irqrestore(&subpage->lock, flags);
539}
540
541void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
542 struct folio *folio, u64 start, u32 len)
543{
544 struct btrfs_subpage *subpage = folio_get_private(folio);
545 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
546 ordered, start, len);
547 unsigned long flags;
548
549 spin_lock_irqsave(&subpage->lock, flags);
550 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
551 folio_set_ordered(folio);
552 spin_unlock_irqrestore(&subpage->lock, flags);
553}
554
555void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
556 struct folio *folio, u64 start, u32 len)
557{
558 struct btrfs_subpage *subpage = folio_get_private(folio);
559 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
560 ordered, start, len);
561 unsigned long flags;
562
563 spin_lock_irqsave(&subpage->lock, flags);
564 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
565 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
566 folio_clear_ordered(folio);
567 spin_unlock_irqrestore(&subpage->lock, flags);
568}
569
570void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
571 struct folio *folio, u64 start, u32 len)
572{
573 struct btrfs_subpage *subpage = folio_get_private(folio);
574 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
575 checked, start, len);
576 unsigned long flags;
577
578 spin_lock_irqsave(&subpage->lock, flags);
579 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
580 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
581 folio_set_checked(folio);
582 spin_unlock_irqrestore(&subpage->lock, flags);
583}
584
585void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
586 struct folio *folio, u64 start, u32 len)
587{
588 struct btrfs_subpage *subpage = folio_get_private(folio);
589 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
590 checked, start, len);
591 unsigned long flags;
592
593 spin_lock_irqsave(&subpage->lock, flags);
594 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
595 folio_clear_checked(folio);
596 spin_unlock_irqrestore(&subpage->lock, flags);
597}
598
599/*
600 * Unlike set/clear which is dependent on each page status, for test all bits
601 * are tested in the same way.
602 */
603#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
604bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
605 struct folio *folio, u64 start, u32 len) \
606{ \
607 struct btrfs_subpage *subpage = folio_get_private(folio); \
608 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
609 name, start, len); \
610 unsigned long flags; \
611 bool ret; \
612 \
613 spin_lock_irqsave(&subpage->lock, flags); \
614 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
615 len >> fs_info->sectorsize_bits); \
616 spin_unlock_irqrestore(&subpage->lock, flags); \
617 return ret; \
618}
619IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
620IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
621IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
622IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
623IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
624
625/*
626 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
627 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
628 * back to regular sectorsize branch.
629 */
630#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
631 folio_clear_func, folio_test_func) \
632void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
633 struct folio *folio, u64 start, u32 len) \
634{ \
635 if (unlikely(!fs_info) || \
636 !btrfs_is_subpage(fs_info, folio->mapping)) { \
637 folio_set_func(folio); \
638 return; \
639 } \
640 btrfs_subpage_set_##name(fs_info, folio, start, len); \
641} \
642void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
643 struct folio *folio, u64 start, u32 len) \
644{ \
645 if (unlikely(!fs_info) || \
646 !btrfs_is_subpage(fs_info, folio->mapping)) { \
647 folio_clear_func(folio); \
648 return; \
649 } \
650 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
651} \
652bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
653 struct folio *folio, u64 start, u32 len) \
654{ \
655 if (unlikely(!fs_info) || \
656 !btrfs_is_subpage(fs_info, folio->mapping)) \
657 return folio_test_func(folio); \
658 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
659} \
660void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
661 struct folio *folio, u64 start, u32 len) \
662{ \
663 if (unlikely(!fs_info) || \
664 !btrfs_is_subpage(fs_info, folio->mapping)) { \
665 folio_set_func(folio); \
666 return; \
667 } \
668 btrfs_subpage_clamp_range(folio, &start, &len); \
669 btrfs_subpage_set_##name(fs_info, folio, start, len); \
670} \
671void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
672 struct folio *folio, u64 start, u32 len) \
673{ \
674 if (unlikely(!fs_info) || \
675 !btrfs_is_subpage(fs_info, folio->mapping)) { \
676 folio_clear_func(folio); \
677 return; \
678 } \
679 btrfs_subpage_clamp_range(folio, &start, &len); \
680 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
681} \
682bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
683 struct folio *folio, u64 start, u32 len) \
684{ \
685 if (unlikely(!fs_info) || \
686 !btrfs_is_subpage(fs_info, folio->mapping)) \
687 return folio_test_func(folio); \
688 btrfs_subpage_clamp_range(folio, &start, &len); \
689 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
690}
691IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
692 folio_test_uptodate);
693IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
694 folio_test_dirty);
695IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
696 folio_test_writeback);
697IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
698 folio_test_ordered);
699IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
700 folio_test_checked);
701
702/*
703 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
704 * is cleared.
705 */
706void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
707{
708 struct btrfs_subpage *subpage = folio_get_private(folio);
709
710 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
711 return;
712
713 ASSERT(!folio_test_dirty(folio));
714 if (!btrfs_is_subpage(fs_info, folio->mapping))
715 return;
716
717 ASSERT(folio_test_private(folio) && folio_get_private(folio));
718 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
719}
720
721/*
722 * Handle different locked pages with different page sizes:
723 *
724 * - Page locked by plain lock_page()
725 * It should not have any subpage::writers count.
726 * Can be unlocked by unlock_page().
727 * This is the most common locked page for __extent_writepage() called
728 * inside extent_write_cache_pages().
729 * Rarer cases include the @locked_page from extent_write_locked_range().
730 *
731 * - Page locked by lock_delalloc_pages()
732 * There is only one caller, all pages except @locked_page for
733 * extent_write_locked_range().
734 * In this case, we have to call subpage helper to handle the case.
735 */
736void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
737 struct folio *folio, u64 start, u32 len)
738{
739 struct btrfs_subpage *subpage;
740
741 ASSERT(folio_test_locked(folio));
742 /* For non-subpage case, we just unlock the page */
743 if (!btrfs_is_subpage(fs_info, folio->mapping)) {
744 folio_unlock(folio);
745 return;
746 }
747
748 ASSERT(folio_test_private(folio) && folio_get_private(folio));
749 subpage = folio_get_private(folio);
750
751 /*
752 * For subpage case, there are two types of locked page. With or
753 * without writers number.
754 *
755 * Since we own the page lock, no one else could touch subpage::writers
756 * and we are safe to do several atomic operations without spinlock.
757 */
758 if (atomic_read(&subpage->writers) == 0) {
759 /* No writers, locked by plain lock_page() */
760 folio_unlock(folio);
761 return;
762 }
763
764 /* Have writers, use proper subpage helper to end it */
765 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
766}
767
768#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
769 bitmap_cut(dst, subpage->bitmaps, 0, \
770 subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
771
772void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
773 struct folio *folio, u64 start, u32 len)
774{
775 struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
776 struct btrfs_subpage *subpage;
777 unsigned long uptodate_bitmap;
778 unsigned long error_bitmap;
779 unsigned long dirty_bitmap;
780 unsigned long writeback_bitmap;
781 unsigned long ordered_bitmap;
782 unsigned long checked_bitmap;
783 unsigned long flags;
784
785 ASSERT(folio_test_private(folio) && folio_get_private(folio));
786 ASSERT(subpage_info);
787 subpage = folio_get_private(folio);
788
789 spin_lock_irqsave(&subpage->lock, flags);
790 GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
791 GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
792 GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
793 GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
794 GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
795 GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap);
796 spin_unlock_irqrestore(&subpage->lock, flags);
797
798 dump_page(folio_page(folio, 0), "btrfs subpage dump");
799 btrfs_warn(fs_info,
800"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
801 start, len, folio_pos(folio),
802 subpage_info->bitmap_nr_bits, &uptodate_bitmap,
803 subpage_info->bitmap_nr_bits, &error_bitmap,
804 subpage_info->bitmap_nr_bits, &dirty_bitmap,
805 subpage_info->bitmap_nr_bits, &writeback_bitmap,
806 subpage_info->bitmap_nr_bits, &ordered_bitmap,
807 subpage_info->bitmap_nr_bits, &checked_bitmap);
808}