Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4#include "messages.h"
5#include "ctree.h"
6#include "subpage.h"
7#include "btrfs_inode.h"
8
9/*
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
11 *
12 * Limitations:
13 *
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
18 *
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
21 *
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
25 * rejection.
26 *
27 * Special behavior:
28 *
29 * - Metadata
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
33 *
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
37 *
38 * This means, if we have a metadata page like this:
39 *
40 * Page offset
41 * 0 16K 32K 48K 64K
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
44 *
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
47 *
48 * This may cause extra metadata writeback which results more COW.
49 *
50 * Implementation:
51 *
52 * - Common
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
55 * granularity needed.
56 *
57 * - Metadata
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * the same page).
62 *
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
65 */
66
67#if PAGE_SIZE > SZ_4K
68bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
69{
70 if (fs_info->sectorsize >= PAGE_SIZE)
71 return false;
72
73 /*
74 * Only data pages (either through DIO or compression) can have no
75 * mapping. And if page->mapping->host is data inode, it's subpage.
76 * As we have ruled our sectorsize >= PAGE_SIZE case already.
77 */
78 if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
79 return true;
80
81 /*
82 * Now the only remaining case is metadata, which we only go subpage
83 * routine if nodesize < PAGE_SIZE.
84 */
85 if (fs_info->nodesize < PAGE_SIZE)
86 return true;
87 return false;
88}
89#endif
90
91int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
92 struct folio *folio, enum btrfs_subpage_type type)
93{
94 struct btrfs_subpage *subpage;
95
96 /*
97 * We have cases like a dummy extent buffer page, which is not mapped
98 * and doesn't need to be locked.
99 */
100 if (folio->mapping)
101 ASSERT(folio_test_locked(folio));
102
103 /* Either not subpage, or the folio already has private attached. */
104 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
105 return 0;
106
107 subpage = btrfs_alloc_subpage(fs_info, type);
108 if (IS_ERR(subpage))
109 return PTR_ERR(subpage);
110
111 folio_attach_private(folio, subpage);
112 return 0;
113}
114
115void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
116{
117 struct btrfs_subpage *subpage;
118
119 /* Either not subpage, or the folio already has private attached. */
120 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
121 return;
122
123 subpage = folio_detach_private(folio);
124 ASSERT(subpage);
125 btrfs_free_subpage(subpage);
126}
127
128struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
129 enum btrfs_subpage_type type)
130{
131 struct btrfs_subpage *ret;
132 unsigned int real_size;
133
134 ASSERT(fs_info->sectorsize < PAGE_SIZE);
135
136 real_size = struct_size(ret, bitmaps,
137 BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
138 ret = kzalloc(real_size, GFP_NOFS);
139 if (!ret)
140 return ERR_PTR(-ENOMEM);
141
142 spin_lock_init(&ret->lock);
143 if (type == BTRFS_SUBPAGE_METADATA)
144 atomic_set(&ret->eb_refs, 0);
145 else
146 atomic_set(&ret->nr_locked, 0);
147 return ret;
148}
149
150void btrfs_free_subpage(struct btrfs_subpage *subpage)
151{
152 kfree(subpage);
153}
154
155/*
156 * Increase the eb_refs of current subpage.
157 *
158 * This is important for eb allocation, to prevent race with last eb freeing
159 * of the same page.
160 * With the eb_refs increased before the eb inserted into radix tree,
161 * detach_extent_buffer_page() won't detach the folio private while we're still
162 * allocating the extent buffer.
163 */
164void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
165{
166 struct btrfs_subpage *subpage;
167
168 if (!btrfs_is_subpage(fs_info, folio->mapping))
169 return;
170
171 ASSERT(folio_test_private(folio) && folio->mapping);
172 lockdep_assert_held(&folio->mapping->i_private_lock);
173
174 subpage = folio_get_private(folio);
175 atomic_inc(&subpage->eb_refs);
176}
177
178void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
179{
180 struct btrfs_subpage *subpage;
181
182 if (!btrfs_is_subpage(fs_info, folio->mapping))
183 return;
184
185 ASSERT(folio_test_private(folio) && folio->mapping);
186 lockdep_assert_held(&folio->mapping->i_private_lock);
187
188 subpage = folio_get_private(folio);
189 ASSERT(atomic_read(&subpage->eb_refs));
190 atomic_dec(&subpage->eb_refs);
191}
192
193static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
194 struct folio *folio, u64 start, u32 len)
195{
196 /* For subpage support, the folio must be single page. */
197 ASSERT(folio_order(folio) == 0);
198
199 /* Basic checks */
200 ASSERT(folio_test_private(folio) && folio_get_private(folio));
201 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
202 IS_ALIGNED(len, fs_info->sectorsize));
203 /*
204 * The range check only works for mapped page, we can still have
205 * unmapped page like dummy extent buffer pages.
206 */
207 if (folio->mapping)
208 ASSERT(folio_pos(folio) <= start &&
209 start + len <= folio_pos(folio) + PAGE_SIZE);
210}
211
212#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
213({ \
214 unsigned int __start_bit; \
215 \
216 btrfs_subpage_assert(fs_info, folio, start, len); \
217 __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
218 __start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
219 __start_bit; \
220})
221
222static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
223{
224 u64 orig_start = *start;
225 u32 orig_len = *len;
226
227 *start = max_t(u64, folio_pos(folio), orig_start);
228 /*
229 * For certain call sites like btrfs_drop_pages(), we may have pages
230 * beyond the target range. In that case, just set @len to 0, subpage
231 * helpers can handle @len == 0 without any problem.
232 */
233 if (folio_pos(folio) >= orig_start + orig_len)
234 *len = 0;
235 else
236 *len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
237 orig_start + orig_len) - *start;
238}
239
240static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
241 struct folio *folio, u64 start, u32 len)
242{
243 struct btrfs_subpage *subpage = folio_get_private(folio);
244 const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
245 const int nbits = (len >> fs_info->sectorsize_bits);
246 unsigned long flags;
247 unsigned int cleared = 0;
248 int bit = start_bit;
249 bool last;
250
251 btrfs_subpage_assert(fs_info, folio, start, len);
252
253 spin_lock_irqsave(&subpage->lock, flags);
254 /*
255 * We have call sites passing @lock_page into
256 * extent_clear_unlock_delalloc() for compression path.
257 *
258 * This @locked_page is locked by plain lock_page(), thus its
259 * subpage::locked is 0. Handle them in a special way.
260 */
261 if (atomic_read(&subpage->nr_locked) == 0) {
262 spin_unlock_irqrestore(&subpage->lock, flags);
263 return true;
264 }
265
266 for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
267 clear_bit(bit, subpage->bitmaps);
268 cleared++;
269 }
270 ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
271 last = atomic_sub_and_test(cleared, &subpage->nr_locked);
272 spin_unlock_irqrestore(&subpage->lock, flags);
273 return last;
274}
275
276/*
277 * Handle different locked folios:
278 *
279 * - Non-subpage folio
280 * Just unlock it.
281 *
282 * - folio locked but without any subpage locked
283 * This happens either before writepage_delalloc() or the delalloc range is
284 * already handled by previous folio.
285 * We can simple unlock it.
286 *
287 * - folio locked with subpage range locked.
288 * We go through the locked sectors inside the range and clear their locked
289 * bitmap, reduce the writer lock number, and unlock the page if that's
290 * the last locked range.
291 */
292void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
293 struct folio *folio, u64 start, u32 len)
294{
295 struct btrfs_subpage *subpage = folio_get_private(folio);
296
297 ASSERT(folio_test_locked(folio));
298
299 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
300 folio_unlock(folio);
301 return;
302 }
303
304 /*
305 * For subpage case, there are two types of locked page. With or
306 * without locked number.
307 *
308 * Since we own the page lock, no one else could touch subpage::locked
309 * and we are safe to do several atomic operations without spinlock.
310 */
311 if (atomic_read(&subpage->nr_locked) == 0) {
312 /* No subpage lock, locked by plain lock_page(). */
313 folio_unlock(folio);
314 return;
315 }
316
317 btrfs_subpage_clamp_range(folio, &start, &len);
318 if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
319 folio_unlock(folio);
320}
321
322void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
323 struct folio *folio, unsigned long bitmap)
324{
325 struct btrfs_subpage *subpage = folio_get_private(folio);
326 const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
327 unsigned long flags;
328 bool last = false;
329 int cleared = 0;
330 int bit;
331
332 if (!btrfs_is_subpage(fs_info, folio->mapping)) {
333 folio_unlock(folio);
334 return;
335 }
336
337 if (atomic_read(&subpage->nr_locked) == 0) {
338 /* No subpage lock, locked by plain lock_page(). */
339 folio_unlock(folio);
340 return;
341 }
342
343 spin_lock_irqsave(&subpage->lock, flags);
344 for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
345 if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
346 cleared++;
347 }
348 ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
349 last = atomic_sub_and_test(cleared, &subpage->nr_locked);
350 spin_unlock_irqrestore(&subpage->lock, flags);
351 if (last)
352 folio_unlock(folio);
353}
354
355#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
356 bitmap_test_range_all_set(subpage->bitmaps, \
357 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
358 fs_info->sectors_per_page)
359
360#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
361 bitmap_test_range_all_zero(subpage->bitmaps, \
362 fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
363 fs_info->sectors_per_page)
364
365void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
366 struct folio *folio, u64 start, u32 len)
367{
368 struct btrfs_subpage *subpage = folio_get_private(folio);
369 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
370 uptodate, start, len);
371 unsigned long flags;
372
373 spin_lock_irqsave(&subpage->lock, flags);
374 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
375 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
376 folio_mark_uptodate(folio);
377 spin_unlock_irqrestore(&subpage->lock, flags);
378}
379
380void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
381 struct folio *folio, u64 start, u32 len)
382{
383 struct btrfs_subpage *subpage = folio_get_private(folio);
384 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
385 uptodate, start, len);
386 unsigned long flags;
387
388 spin_lock_irqsave(&subpage->lock, flags);
389 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
390 folio_clear_uptodate(folio);
391 spin_unlock_irqrestore(&subpage->lock, flags);
392}
393
394void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
395 struct folio *folio, u64 start, u32 len)
396{
397 struct btrfs_subpage *subpage = folio_get_private(folio);
398 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
399 dirty, start, len);
400 unsigned long flags;
401
402 spin_lock_irqsave(&subpage->lock, flags);
403 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
404 spin_unlock_irqrestore(&subpage->lock, flags);
405 folio_mark_dirty(folio);
406}
407
408/*
409 * Extra clear_and_test function for subpage dirty bitmap.
410 *
411 * Return true if we're the last bits in the dirty_bitmap and clear the
412 * dirty_bitmap.
413 * Return false otherwise.
414 *
415 * NOTE: Callers should manually clear page dirty for true case, as we have
416 * extra handling for tree blocks.
417 */
418bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
419 struct folio *folio, u64 start, u32 len)
420{
421 struct btrfs_subpage *subpage = folio_get_private(folio);
422 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
423 dirty, start, len);
424 unsigned long flags;
425 bool last = false;
426
427 spin_lock_irqsave(&subpage->lock, flags);
428 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
429 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
430 last = true;
431 spin_unlock_irqrestore(&subpage->lock, flags);
432 return last;
433}
434
435void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
436 struct folio *folio, u64 start, u32 len)
437{
438 bool last;
439
440 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
441 if (last)
442 folio_clear_dirty_for_io(folio);
443}
444
445void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
446 struct folio *folio, u64 start, u32 len)
447{
448 struct btrfs_subpage *subpage = folio_get_private(folio);
449 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
450 writeback, start, len);
451 unsigned long flags;
452
453 spin_lock_irqsave(&subpage->lock, flags);
454 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
455 if (!folio_test_writeback(folio))
456 folio_start_writeback(folio);
457 spin_unlock_irqrestore(&subpage->lock, flags);
458}
459
460void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
461 struct folio *folio, u64 start, u32 len)
462{
463 struct btrfs_subpage *subpage = folio_get_private(folio);
464 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
465 writeback, start, len);
466 unsigned long flags;
467
468 spin_lock_irqsave(&subpage->lock, flags);
469 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
470 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
471 ASSERT(folio_test_writeback(folio));
472 folio_end_writeback(folio);
473 }
474 spin_unlock_irqrestore(&subpage->lock, flags);
475}
476
477void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
478 struct folio *folio, u64 start, u32 len)
479{
480 struct btrfs_subpage *subpage = folio_get_private(folio);
481 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
482 ordered, start, len);
483 unsigned long flags;
484
485 spin_lock_irqsave(&subpage->lock, flags);
486 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
487 folio_set_ordered(folio);
488 spin_unlock_irqrestore(&subpage->lock, flags);
489}
490
491void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
492 struct folio *folio, u64 start, u32 len)
493{
494 struct btrfs_subpage *subpage = folio_get_private(folio);
495 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
496 ordered, start, len);
497 unsigned long flags;
498
499 spin_lock_irqsave(&subpage->lock, flags);
500 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
501 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
502 folio_clear_ordered(folio);
503 spin_unlock_irqrestore(&subpage->lock, flags);
504}
505
506void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
507 struct folio *folio, u64 start, u32 len)
508{
509 struct btrfs_subpage *subpage = folio_get_private(folio);
510 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
511 checked, start, len);
512 unsigned long flags;
513
514 spin_lock_irqsave(&subpage->lock, flags);
515 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
516 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
517 folio_set_checked(folio);
518 spin_unlock_irqrestore(&subpage->lock, flags);
519}
520
521void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
522 struct folio *folio, u64 start, u32 len)
523{
524 struct btrfs_subpage *subpage = folio_get_private(folio);
525 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
526 checked, start, len);
527 unsigned long flags;
528
529 spin_lock_irqsave(&subpage->lock, flags);
530 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531 folio_clear_checked(folio);
532 spin_unlock_irqrestore(&subpage->lock, flags);
533}
534
535/*
536 * Unlike set/clear which is dependent on each page status, for test all bits
537 * are tested in the same way.
538 */
539#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
540bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
541 struct folio *folio, u64 start, u32 len) \
542{ \
543 struct btrfs_subpage *subpage = folio_get_private(folio); \
544 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
545 name, start, len); \
546 unsigned long flags; \
547 bool ret; \
548 \
549 spin_lock_irqsave(&subpage->lock, flags); \
550 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
551 len >> fs_info->sectorsize_bits); \
552 spin_unlock_irqrestore(&subpage->lock, flags); \
553 return ret; \
554}
555IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
556IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
557IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
558IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
559IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
560
561/*
562 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
563 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
564 * back to regular sectorsize branch.
565 */
566#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
567 folio_clear_func, folio_test_func) \
568void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
569 struct folio *folio, u64 start, u32 len) \
570{ \
571 if (unlikely(!fs_info) || \
572 !btrfs_is_subpage(fs_info, folio->mapping)) { \
573 folio_set_func(folio); \
574 return; \
575 } \
576 btrfs_subpage_set_##name(fs_info, folio, start, len); \
577} \
578void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
579 struct folio *folio, u64 start, u32 len) \
580{ \
581 if (unlikely(!fs_info) || \
582 !btrfs_is_subpage(fs_info, folio->mapping)) { \
583 folio_clear_func(folio); \
584 return; \
585 } \
586 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
587} \
588bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
589 struct folio *folio, u64 start, u32 len) \
590{ \
591 if (unlikely(!fs_info) || \
592 !btrfs_is_subpage(fs_info, folio->mapping)) \
593 return folio_test_func(folio); \
594 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
595} \
596void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
597 struct folio *folio, u64 start, u32 len) \
598{ \
599 if (unlikely(!fs_info) || \
600 !btrfs_is_subpage(fs_info, folio->mapping)) { \
601 folio_set_func(folio); \
602 return; \
603 } \
604 btrfs_subpage_clamp_range(folio, &start, &len); \
605 btrfs_subpage_set_##name(fs_info, folio, start, len); \
606} \
607void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
608 struct folio *folio, u64 start, u32 len) \
609{ \
610 if (unlikely(!fs_info) || \
611 !btrfs_is_subpage(fs_info, folio->mapping)) { \
612 folio_clear_func(folio); \
613 return; \
614 } \
615 btrfs_subpage_clamp_range(folio, &start, &len); \
616 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
617} \
618bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
619 struct folio *folio, u64 start, u32 len) \
620{ \
621 if (unlikely(!fs_info) || \
622 !btrfs_is_subpage(fs_info, folio->mapping)) \
623 return folio_test_func(folio); \
624 btrfs_subpage_clamp_range(folio, &start, &len); \
625 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
626}
627IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
628 folio_test_uptodate);
629IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
630 folio_test_dirty);
631IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
632 folio_test_writeback);
633IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
634 folio_test_ordered);
635IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
636 folio_test_checked);
637
638/*
639 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
640 * is cleared.
641 */
642void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
643 struct folio *folio, u64 start, u32 len)
644{
645 struct btrfs_subpage *subpage;
646 unsigned int start_bit;
647 unsigned int nbits;
648 unsigned long flags;
649
650 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
651 return;
652
653 if (!btrfs_is_subpage(fs_info, folio->mapping)) {
654 ASSERT(!folio_test_dirty(folio));
655 return;
656 }
657
658 start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
659 nbits = len >> fs_info->sectorsize_bits;
660 subpage = folio_get_private(folio);
661 ASSERT(subpage);
662 spin_lock_irqsave(&subpage->lock, flags);
663 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
664 spin_unlock_irqrestore(&subpage->lock, flags);
665}
666
667/*
668 * This is for folio already locked by plain lock_page()/folio_lock(), which
669 * doesn't have any subpage awareness.
670 *
671 * This populates the involved subpage ranges so that subpage helpers can
672 * properly unlock them.
673 */
674void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
675 struct folio *folio, u64 start, u32 len)
676{
677 struct btrfs_subpage *subpage;
678 unsigned long flags;
679 unsigned int start_bit;
680 unsigned int nbits;
681 int ret;
682
683 ASSERT(folio_test_locked(folio));
684 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
685 return;
686
687 subpage = folio_get_private(folio);
688 start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
689 nbits = len >> fs_info->sectorsize_bits;
690 spin_lock_irqsave(&subpage->lock, flags);
691 /* Target range should not yet be locked. */
692 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
693 bitmap_set(subpage->bitmaps, start_bit, nbits);
694 ret = atomic_add_return(nbits, &subpage->nr_locked);
695 ASSERT(ret <= fs_info->sectors_per_page);
696 spin_unlock_irqrestore(&subpage->lock, flags);
697}
698
699#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst) \
700{ \
701 const int sectors_per_page = fs_info->sectors_per_page; \
702 \
703 ASSERT(sectors_per_page < BITS_PER_LONG); \
704 *dst = bitmap_read(subpage->bitmaps, \
705 sectors_per_page * btrfs_bitmap_nr_##name, \
706 sectors_per_page); \
707}
708
709void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
710 struct folio *folio, u64 start, u32 len)
711{
712 struct btrfs_subpage *subpage;
713 const u32 sectors_per_page = fs_info->sectors_per_page;
714 unsigned long uptodate_bitmap;
715 unsigned long dirty_bitmap;
716 unsigned long writeback_bitmap;
717 unsigned long ordered_bitmap;
718 unsigned long checked_bitmap;
719 unsigned long locked_bitmap;
720 unsigned long flags;
721
722 ASSERT(folio_test_private(folio) && folio_get_private(folio));
723 ASSERT(sectors_per_page > 1);
724 subpage = folio_get_private(folio);
725
726 spin_lock_irqsave(&subpage->lock, flags);
727 GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
728 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
729 GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
730 GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
731 GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
732 GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
733 spin_unlock_irqrestore(&subpage->lock, flags);
734
735 dump_page(folio_page(folio, 0), "btrfs subpage dump");
736 btrfs_warn(fs_info,
737"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
738 start, len, folio_pos(folio),
739 sectors_per_page, &uptodate_bitmap,
740 sectors_per_page, &dirty_bitmap,
741 sectors_per_page, &locked_bitmap,
742 sectors_per_page, &writeback_bitmap,
743 sectors_per_page, &ordered_bitmap,
744 sectors_per_page, &checked_bitmap);
745}
746
747void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
748 struct folio *folio,
749 unsigned long *ret_bitmap)
750{
751 struct btrfs_subpage *subpage;
752 unsigned long flags;
753
754 ASSERT(folio_test_private(folio) && folio_get_private(folio));
755 ASSERT(fs_info->sectors_per_page > 1);
756 subpage = folio_get_private(folio);
757
758 spin_lock_irqsave(&subpage->lock, flags);
759 GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
760 spin_unlock_irqrestore(&subpage->lock, flags);
761}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4#include "messages.h"
5#include "ctree.h"
6#include "subpage.h"
7#include "btrfs_inode.h"
8
9/*
10 * Subpage (sectorsize < PAGE_SIZE) support overview:
11 *
12 * Limitations:
13 *
14 * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
17 * cases where a tree block crosses several pages.
18 *
19 * - Only metadata read-write for now
20 * The data read-write part is in development.
21 *
22 * - Metadata can't cross 64K page boundary
23 * btrfs-progs and kernel have done that for a while, thus only ancient
24 * filesystems could have such problem. For such case, do a graceful
25 * rejection.
26 *
27 * Special behavior:
28 *
29 * - Metadata
30 * Metadata read is fully supported.
31 * Meaning when reading one tree block will only trigger the read for the
32 * needed range, other unrelated range in the same page will not be touched.
33 *
34 * Metadata write support is partial.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
37 *
38 * This means, if we have a metadata page like this:
39 *
40 * Page offset
41 * 0 16K 32K 48K 64K
42 * |/////////| |///////////|
43 * \- Tree block A \- Tree block B
44 *
45 * Even if we just want to writeback tree block A, we will also writeback
46 * tree block B if it's also dirty.
47 *
48 * This may cause extra metadata writeback which results more COW.
49 *
50 * Implementation:
51 *
52 * - Common
53 * Both metadata and data will use a new structure, btrfs_subpage, to
54 * record the status of each sector inside a page. This provides the extra
55 * granularity needed.
56 *
57 * - Metadata
58 * Since we have multiple tree blocks inside one page, we can't rely on page
59 * locking anymore, or we will have greatly reduced concurrency or even
60 * deadlocks (hold one tree lock while trying to lock another tree lock in
61 * the same page).
62 *
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
64 * This means a slightly higher tree locking latency.
65 */
66
67bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
68{
69 if (fs_info->sectorsize >= PAGE_SIZE)
70 return false;
71
72 /*
73 * Only data pages (either through DIO or compression) can have no
74 * mapping. And if page->mapping->host is data inode, it's subpage.
75 * As we have ruled our sectorsize >= PAGE_SIZE case already.
76 */
77 if (!mapping || !mapping->host || is_data_inode(mapping->host))
78 return true;
79
80 /*
81 * Now the only remaining case is metadata, which we only go subpage
82 * routine if nodesize < PAGE_SIZE.
83 */
84 if (fs_info->nodesize < PAGE_SIZE)
85 return true;
86 return false;
87}
88
89void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
90{
91 unsigned int cur = 0;
92 unsigned int nr_bits;
93
94 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
95
96 nr_bits = PAGE_SIZE / sectorsize;
97 subpage_info->bitmap_nr_bits = nr_bits;
98
99 subpage_info->uptodate_offset = cur;
100 cur += nr_bits;
101
102 subpage_info->dirty_offset = cur;
103 cur += nr_bits;
104
105 subpage_info->writeback_offset = cur;
106 cur += nr_bits;
107
108 subpage_info->ordered_offset = cur;
109 cur += nr_bits;
110
111 subpage_info->checked_offset = cur;
112 cur += nr_bits;
113
114 subpage_info->total_nr_bits = cur;
115}
116
117int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
118 struct folio *folio, enum btrfs_subpage_type type)
119{
120 struct btrfs_subpage *subpage;
121
122 /*
123 * We have cases like a dummy extent buffer page, which is not mapped
124 * and doesn't need to be locked.
125 */
126 if (folio->mapping)
127 ASSERT(folio_test_locked(folio));
128
129 /* Either not subpage, or the folio already has private attached. */
130 if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
131 return 0;
132
133 subpage = btrfs_alloc_subpage(fs_info, type);
134 if (IS_ERR(subpage))
135 return PTR_ERR(subpage);
136
137 folio_attach_private(folio, subpage);
138 return 0;
139}
140
141void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
142{
143 struct btrfs_subpage *subpage;
144
145 /* Either not subpage, or the folio already has private attached. */
146 if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
147 return;
148
149 subpage = folio_detach_private(folio);
150 ASSERT(subpage);
151 btrfs_free_subpage(subpage);
152}
153
154struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
155 enum btrfs_subpage_type type)
156{
157 struct btrfs_subpage *ret;
158 unsigned int real_size;
159
160 ASSERT(fs_info->sectorsize < PAGE_SIZE);
161
162 real_size = struct_size(ret, bitmaps,
163 BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
164 ret = kzalloc(real_size, GFP_NOFS);
165 if (!ret)
166 return ERR_PTR(-ENOMEM);
167
168 spin_lock_init(&ret->lock);
169 if (type == BTRFS_SUBPAGE_METADATA) {
170 atomic_set(&ret->eb_refs, 0);
171 } else {
172 atomic_set(&ret->readers, 0);
173 atomic_set(&ret->writers, 0);
174 }
175 return ret;
176}
177
178void btrfs_free_subpage(struct btrfs_subpage *subpage)
179{
180 kfree(subpage);
181}
182
183/*
184 * Increase the eb_refs of current subpage.
185 *
186 * This is important for eb allocation, to prevent race with last eb freeing
187 * of the same page.
188 * With the eb_refs increased before the eb inserted into radix tree,
189 * detach_extent_buffer_page() won't detach the folio private while we're still
190 * allocating the extent buffer.
191 */
192void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
193{
194 struct btrfs_subpage *subpage;
195
196 if (!btrfs_is_subpage(fs_info, folio->mapping))
197 return;
198
199 ASSERT(folio_test_private(folio) && folio->mapping);
200 lockdep_assert_held(&folio->mapping->i_private_lock);
201
202 subpage = folio_get_private(folio);
203 atomic_inc(&subpage->eb_refs);
204}
205
206void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
207{
208 struct btrfs_subpage *subpage;
209
210 if (!btrfs_is_subpage(fs_info, folio->mapping))
211 return;
212
213 ASSERT(folio_test_private(folio) && folio->mapping);
214 lockdep_assert_held(&folio->mapping->i_private_lock);
215
216 subpage = folio_get_private(folio);
217 ASSERT(atomic_read(&subpage->eb_refs));
218 atomic_dec(&subpage->eb_refs);
219}
220
221static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
222 struct folio *folio, u64 start, u32 len)
223{
224 /* For subpage support, the folio must be single page. */
225 ASSERT(folio_order(folio) == 0);
226
227 /* Basic checks */
228 ASSERT(folio_test_private(folio) && folio_get_private(folio));
229 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
230 IS_ALIGNED(len, fs_info->sectorsize));
231 /*
232 * The range check only works for mapped page, we can still have
233 * unmapped page like dummy extent buffer pages.
234 */
235 if (folio->mapping)
236 ASSERT(folio_pos(folio) <= start &&
237 start + len <= folio_pos(folio) + PAGE_SIZE);
238}
239
240void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
241 struct folio *folio, u64 start, u32 len)
242{
243 struct btrfs_subpage *subpage = folio_get_private(folio);
244 const int nbits = len >> fs_info->sectorsize_bits;
245
246 btrfs_subpage_assert(fs_info, folio, start, len);
247
248 atomic_add(nbits, &subpage->readers);
249}
250
251void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
252 struct folio *folio, u64 start, u32 len)
253{
254 struct btrfs_subpage *subpage = folio_get_private(folio);
255 const int nbits = len >> fs_info->sectorsize_bits;
256 bool is_data;
257 bool last;
258
259 btrfs_subpage_assert(fs_info, folio, start, len);
260 is_data = is_data_inode(folio->mapping->host);
261 ASSERT(atomic_read(&subpage->readers) >= nbits);
262 last = atomic_sub_and_test(nbits, &subpage->readers);
263
264 /*
265 * For data we need to unlock the page if the last read has finished.
266 *
267 * And please don't replace @last with atomic_sub_and_test() call
268 * inside if () condition.
269 * As we want the atomic_sub_and_test() to be always executed.
270 */
271 if (is_data && last)
272 folio_unlock(folio);
273}
274
275static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
276{
277 u64 orig_start = *start;
278 u32 orig_len = *len;
279
280 *start = max_t(u64, folio_pos(folio), orig_start);
281 /*
282 * For certain call sites like btrfs_drop_pages(), we may have pages
283 * beyond the target range. In that case, just set @len to 0, subpage
284 * helpers can handle @len == 0 without any problem.
285 */
286 if (folio_pos(folio) >= orig_start + orig_len)
287 *len = 0;
288 else
289 *len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
290 orig_start + orig_len) - *start;
291}
292
293void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
294 struct folio *folio, u64 start, u32 len)
295{
296 struct btrfs_subpage *subpage = folio_get_private(folio);
297 const int nbits = (len >> fs_info->sectorsize_bits);
298 int ret;
299
300 btrfs_subpage_assert(fs_info, folio, start, len);
301
302 ASSERT(atomic_read(&subpage->readers) == 0);
303 ret = atomic_add_return(nbits, &subpage->writers);
304 ASSERT(ret == nbits);
305}
306
307bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
308 struct folio *folio, u64 start, u32 len)
309{
310 struct btrfs_subpage *subpage = folio_get_private(folio);
311 const int nbits = (len >> fs_info->sectorsize_bits);
312
313 btrfs_subpage_assert(fs_info, folio, start, len);
314
315 /*
316 * We have call sites passing @lock_page into
317 * extent_clear_unlock_delalloc() for compression path.
318 *
319 * This @locked_page is locked by plain lock_page(), thus its
320 * subpage::writers is 0. Handle them in a special way.
321 */
322 if (atomic_read(&subpage->writers) == 0)
323 return true;
324
325 ASSERT(atomic_read(&subpage->writers) >= nbits);
326 return atomic_sub_and_test(nbits, &subpage->writers);
327}
328
329/*
330 * Lock a folio for delalloc page writeback.
331 *
332 * Return -EAGAIN if the page is not properly initialized.
333 * Return 0 with the page locked, and writer counter updated.
334 *
335 * Even with 0 returned, the page still need extra check to make sure
336 * it's really the correct page, as the caller is using
337 * filemap_get_folios_contig(), which can race with page invalidating.
338 */
339int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
340 struct folio *folio, u64 start, u32 len)
341{
342 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
343 folio_lock(folio);
344 return 0;
345 }
346 folio_lock(folio);
347 if (!folio_test_private(folio) || !folio_get_private(folio)) {
348 folio_unlock(folio);
349 return -EAGAIN;
350 }
351 btrfs_subpage_clamp_range(folio, &start, &len);
352 btrfs_subpage_start_writer(fs_info, folio, start, len);
353 return 0;
354}
355
356void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
357 struct folio *folio, u64 start, u32 len)
358{
359 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
360 folio_unlock(folio);
361 return;
362 }
363 btrfs_subpage_clamp_range(folio, &start, &len);
364 if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
365 folio_unlock(folio);
366}
367
368#define subpage_calc_start_bit(fs_info, folio, name, start, len) \
369({ \
370 unsigned int start_bit; \
371 \
372 btrfs_subpage_assert(fs_info, folio, start, len); \
373 start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
374 start_bit += fs_info->subpage_info->name##_offset; \
375 start_bit; \
376})
377
378#define subpage_test_bitmap_all_set(fs_info, subpage, name) \
379 bitmap_test_range_all_set(subpage->bitmaps, \
380 fs_info->subpage_info->name##_offset, \
381 fs_info->subpage_info->bitmap_nr_bits)
382
383#define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
384 bitmap_test_range_all_zero(subpage->bitmaps, \
385 fs_info->subpage_info->name##_offset, \
386 fs_info->subpage_info->bitmap_nr_bits)
387
388void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
389 struct folio *folio, u64 start, u32 len)
390{
391 struct btrfs_subpage *subpage = folio_get_private(folio);
392 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
393 uptodate, start, len);
394 unsigned long flags;
395
396 spin_lock_irqsave(&subpage->lock, flags);
397 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
398 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
399 folio_mark_uptodate(folio);
400 spin_unlock_irqrestore(&subpage->lock, flags);
401}
402
403void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
404 struct folio *folio, u64 start, u32 len)
405{
406 struct btrfs_subpage *subpage = folio_get_private(folio);
407 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
408 uptodate, start, len);
409 unsigned long flags;
410
411 spin_lock_irqsave(&subpage->lock, flags);
412 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
413 folio_clear_uptodate(folio);
414 spin_unlock_irqrestore(&subpage->lock, flags);
415}
416
417void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
418 struct folio *folio, u64 start, u32 len)
419{
420 struct btrfs_subpage *subpage = folio_get_private(folio);
421 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
422 dirty, start, len);
423 unsigned long flags;
424
425 spin_lock_irqsave(&subpage->lock, flags);
426 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
427 spin_unlock_irqrestore(&subpage->lock, flags);
428 folio_mark_dirty(folio);
429}
430
431/*
432 * Extra clear_and_test function for subpage dirty bitmap.
433 *
434 * Return true if we're the last bits in the dirty_bitmap and clear the
435 * dirty_bitmap.
436 * Return false otherwise.
437 *
438 * NOTE: Callers should manually clear page dirty for true case, as we have
439 * extra handling for tree blocks.
440 */
441bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
442 struct folio *folio, u64 start, u32 len)
443{
444 struct btrfs_subpage *subpage = folio_get_private(folio);
445 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
446 dirty, start, len);
447 unsigned long flags;
448 bool last = false;
449
450 spin_lock_irqsave(&subpage->lock, flags);
451 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
452 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
453 last = true;
454 spin_unlock_irqrestore(&subpage->lock, flags);
455 return last;
456}
457
458void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
459 struct folio *folio, u64 start, u32 len)
460{
461 bool last;
462
463 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
464 if (last)
465 folio_clear_dirty_for_io(folio);
466}
467
468void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
469 struct folio *folio, u64 start, u32 len)
470{
471 struct btrfs_subpage *subpage = folio_get_private(folio);
472 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
473 writeback, start, len);
474 unsigned long flags;
475
476 spin_lock_irqsave(&subpage->lock, flags);
477 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
478 if (!folio_test_writeback(folio))
479 folio_start_writeback(folio);
480 spin_unlock_irqrestore(&subpage->lock, flags);
481}
482
483void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
484 struct folio *folio, u64 start, u32 len)
485{
486 struct btrfs_subpage *subpage = folio_get_private(folio);
487 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
488 writeback, start, len);
489 unsigned long flags;
490
491 spin_lock_irqsave(&subpage->lock, flags);
492 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
493 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
494 ASSERT(folio_test_writeback(folio));
495 folio_end_writeback(folio);
496 }
497 spin_unlock_irqrestore(&subpage->lock, flags);
498}
499
500void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
501 struct folio *folio, u64 start, u32 len)
502{
503 struct btrfs_subpage *subpage = folio_get_private(folio);
504 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
505 ordered, start, len);
506 unsigned long flags;
507
508 spin_lock_irqsave(&subpage->lock, flags);
509 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
510 folio_set_ordered(folio);
511 spin_unlock_irqrestore(&subpage->lock, flags);
512}
513
514void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
515 struct folio *folio, u64 start, u32 len)
516{
517 struct btrfs_subpage *subpage = folio_get_private(folio);
518 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
519 ordered, start, len);
520 unsigned long flags;
521
522 spin_lock_irqsave(&subpage->lock, flags);
523 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
524 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
525 folio_clear_ordered(folio);
526 spin_unlock_irqrestore(&subpage->lock, flags);
527}
528
529void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
530 struct folio *folio, u64 start, u32 len)
531{
532 struct btrfs_subpage *subpage = folio_get_private(folio);
533 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
534 checked, start, len);
535 unsigned long flags;
536
537 spin_lock_irqsave(&subpage->lock, flags);
538 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
539 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
540 folio_set_checked(folio);
541 spin_unlock_irqrestore(&subpage->lock, flags);
542}
543
544void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
545 struct folio *folio, u64 start, u32 len)
546{
547 struct btrfs_subpage *subpage = folio_get_private(folio);
548 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
549 checked, start, len);
550 unsigned long flags;
551
552 spin_lock_irqsave(&subpage->lock, flags);
553 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
554 folio_clear_checked(folio);
555 spin_unlock_irqrestore(&subpage->lock, flags);
556}
557
558/*
559 * Unlike set/clear which is dependent on each page status, for test all bits
560 * are tested in the same way.
561 */
562#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
563bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
564 struct folio *folio, u64 start, u32 len) \
565{ \
566 struct btrfs_subpage *subpage = folio_get_private(folio); \
567 unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \
568 name, start, len); \
569 unsigned long flags; \
570 bool ret; \
571 \
572 spin_lock_irqsave(&subpage->lock, flags); \
573 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
574 len >> fs_info->sectorsize_bits); \
575 spin_unlock_irqrestore(&subpage->lock, flags); \
576 return ret; \
577}
578IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
579IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
580IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
581IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
582IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
583
584/*
585 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
586 * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
587 * back to regular sectorsize branch.
588 */
589#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \
590 folio_clear_func, folio_test_func) \
591void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
592 struct folio *folio, u64 start, u32 len) \
593{ \
594 if (unlikely(!fs_info) || \
595 !btrfs_is_subpage(fs_info, folio->mapping)) { \
596 folio_set_func(folio); \
597 return; \
598 } \
599 btrfs_subpage_set_##name(fs_info, folio, start, len); \
600} \
601void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
602 struct folio *folio, u64 start, u32 len) \
603{ \
604 if (unlikely(!fs_info) || \
605 !btrfs_is_subpage(fs_info, folio->mapping)) { \
606 folio_clear_func(folio); \
607 return; \
608 } \
609 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
610} \
611bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
612 struct folio *folio, u64 start, u32 len) \
613{ \
614 if (unlikely(!fs_info) || \
615 !btrfs_is_subpage(fs_info, folio->mapping)) \
616 return folio_test_func(folio); \
617 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
618} \
619void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
620 struct folio *folio, u64 start, u32 len) \
621{ \
622 if (unlikely(!fs_info) || \
623 !btrfs_is_subpage(fs_info, folio->mapping)) { \
624 folio_set_func(folio); \
625 return; \
626 } \
627 btrfs_subpage_clamp_range(folio, &start, &len); \
628 btrfs_subpage_set_##name(fs_info, folio, start, len); \
629} \
630void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
631 struct folio *folio, u64 start, u32 len) \
632{ \
633 if (unlikely(!fs_info) || \
634 !btrfs_is_subpage(fs_info, folio->mapping)) { \
635 folio_clear_func(folio); \
636 return; \
637 } \
638 btrfs_subpage_clamp_range(folio, &start, &len); \
639 btrfs_subpage_clear_##name(fs_info, folio, start, len); \
640} \
641bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
642 struct folio *folio, u64 start, u32 len) \
643{ \
644 if (unlikely(!fs_info) || \
645 !btrfs_is_subpage(fs_info, folio->mapping)) \
646 return folio_test_func(folio); \
647 btrfs_subpage_clamp_range(folio, &start, &len); \
648 return btrfs_subpage_test_##name(fs_info, folio, start, len); \
649}
650IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
651 folio_test_uptodate);
652IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
653 folio_test_dirty);
654IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
655 folio_test_writeback);
656IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
657 folio_test_ordered);
658IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
659 folio_test_checked);
660
661/*
662 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
663 * is cleared.
664 */
665void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
666{
667 struct btrfs_subpage *subpage = folio_get_private(folio);
668
669 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
670 return;
671
672 ASSERT(!folio_test_dirty(folio));
673 if (!btrfs_is_subpage(fs_info, folio->mapping))
674 return;
675
676 ASSERT(folio_test_private(folio) && folio_get_private(folio));
677 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
678}
679
680/*
681 * Handle different locked pages with different page sizes:
682 *
683 * - Page locked by plain lock_page()
684 * It should not have any subpage::writers count.
685 * Can be unlocked by unlock_page().
686 * This is the most common locked page for __extent_writepage() called
687 * inside extent_write_cache_pages().
688 * Rarer cases include the @locked_page from extent_write_locked_range().
689 *
690 * - Page locked by lock_delalloc_pages()
691 * There is only one caller, all pages except @locked_page for
692 * extent_write_locked_range().
693 * In this case, we have to call subpage helper to handle the case.
694 */
695void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
696 struct folio *folio, u64 start, u32 len)
697{
698 struct btrfs_subpage *subpage;
699
700 ASSERT(folio_test_locked(folio));
701 /* For non-subpage case, we just unlock the page */
702 if (!btrfs_is_subpage(fs_info, folio->mapping)) {
703 folio_unlock(folio);
704 return;
705 }
706
707 ASSERT(folio_test_private(folio) && folio_get_private(folio));
708 subpage = folio_get_private(folio);
709
710 /*
711 * For subpage case, there are two types of locked page. With or
712 * without writers number.
713 *
714 * Since we own the page lock, no one else could touch subpage::writers
715 * and we are safe to do several atomic operations without spinlock.
716 */
717 if (atomic_read(&subpage->writers) == 0) {
718 /* No writers, locked by plain lock_page() */
719 folio_unlock(folio);
720 return;
721 }
722
723 /* Have writers, use proper subpage helper to end it */
724 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
725}
726
727#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
728 bitmap_cut(dst, subpage->bitmaps, 0, \
729 subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
730
731void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
732 struct folio *folio, u64 start, u32 len)
733{
734 struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
735 struct btrfs_subpage *subpage;
736 unsigned long uptodate_bitmap;
737 unsigned long error_bitmap;
738 unsigned long dirty_bitmap;
739 unsigned long writeback_bitmap;
740 unsigned long ordered_bitmap;
741 unsigned long checked_bitmap;
742 unsigned long flags;
743
744 ASSERT(folio_test_private(folio) && folio_get_private(folio));
745 ASSERT(subpage_info);
746 subpage = folio_get_private(folio);
747
748 spin_lock_irqsave(&subpage->lock, flags);
749 GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
750 GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
751 GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
752 GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
753 GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
754 spin_unlock_irqrestore(&subpage->lock, flags);
755
756 dump_page(folio_page(folio, 0), "btrfs subpage dump");
757 btrfs_warn(fs_info,
758"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
759 start, len, folio_pos(folio),
760 subpage_info->bitmap_nr_bits, &uptodate_bitmap,
761 subpage_info->bitmap_nr_bits, &error_bitmap,
762 subpage_info->bitmap_nr_bits, &dirty_bitmap,
763 subpage_info->bitmap_nr_bits, &writeback_bitmap,
764 subpage_info->bitmap_nr_bits, &ordered_bitmap,
765 subpage_info->bitmap_nr_bits, &checked_bitmap);
766}