Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/slab.h>
  4#include "messages.h"
  5#include "ctree.h"
  6#include "subpage.h"
  7#include "btrfs_inode.h"
  8
  9/*
 10 * Subpage (sectorsize < PAGE_SIZE) support overview:
 11 *
 12 * Limitations:
 13 *
 14 * - Only support 64K page size for now
 15 *   This is to make metadata handling easier, as 64K page would ensure
 16 *   all nodesize would fit inside one page, thus we don't need to handle
 17 *   cases where a tree block crosses several pages.
 18 *
 19 * - Only metadata read-write for now
 20 *   The data read-write part is in development.
 21 *
 22 * - Metadata can't cross 64K page boundary
 23 *   btrfs-progs and kernel have done that for a while, thus only ancient
 24 *   filesystems could have such problem.  For such case, do a graceful
 25 *   rejection.
 26 *
 27 * Special behavior:
 28 *
 29 * - Metadata
 30 *   Metadata read is fully supported.
 31 *   Meaning when reading one tree block will only trigger the read for the
 32 *   needed range, other unrelated range in the same page will not be touched.
 33 *
 34 *   Metadata write support is partial.
 35 *   The writeback is still for the full page, but we will only submit
 36 *   the dirty extent buffers in the page.
 37 *
 38 *   This means, if we have a metadata page like this:
 39 *
 40 *   Page offset
 41 *   0         16K         32K         48K        64K
 42 *   |/////////|           |///////////|
 43 *        \- Tree block A        \- Tree block B
 44 *
 45 *   Even if we just want to writeback tree block A, we will also writeback
 46 *   tree block B if it's also dirty.
 47 *
 48 *   This may cause extra metadata writeback which results more COW.
 49 *
 50 * Implementation:
 51 *
 52 * - Common
 53 *   Both metadata and data will use a new structure, btrfs_subpage, to
 54 *   record the status of each sector inside a page.  This provides the extra
 55 *   granularity needed.
 56 *
 57 * - Metadata
 58 *   Since we have multiple tree blocks inside one page, we can't rely on page
 59 *   locking anymore, or we will have greatly reduced concurrency or even
 60 *   deadlocks (hold one tree lock while trying to lock another tree lock in
 61 *   the same page).
 62 *
 63 *   Thus for metadata locking, subpage support relies on io_tree locking only.
 64 *   This means a slightly higher tree locking latency.
 65 */
 66
 67#if PAGE_SIZE > SZ_4K
 68bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
 69{
 70	if (fs_info->sectorsize >= PAGE_SIZE)
 71		return false;
 72
 73	/*
 74	 * Only data pages (either through DIO or compression) can have no
 75	 * mapping. And if page->mapping->host is data inode, it's subpage.
 76	 * As we have ruled our sectorsize >= PAGE_SIZE case already.
 77	 */
 78	if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host)))
 79		return true;
 80
 81	/*
 82	 * Now the only remaining case is metadata, which we only go subpage
 83	 * routine if nodesize < PAGE_SIZE.
 84	 */
 85	if (fs_info->nodesize < PAGE_SIZE)
 86		return true;
 87	return false;
 88}
 89#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
 92			 struct folio *folio, enum btrfs_subpage_type type)
 93{
 94	struct btrfs_subpage *subpage;
 95
 96	/*
 97	 * We have cases like a dummy extent buffer page, which is not mapped
 98	 * and doesn't need to be locked.
 99	 */
100	if (folio->mapping)
101		ASSERT(folio_test_locked(folio));
102
103	/* Either not subpage, or the folio already has private attached. */
104	if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
105		return 0;
106
107	subpage = btrfs_alloc_subpage(fs_info, type);
108	if (IS_ERR(subpage))
109		return  PTR_ERR(subpage);
110
111	folio_attach_private(folio, subpage);
112	return 0;
113}
114
115void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
116{
117	struct btrfs_subpage *subpage;
118
119	/* Either not subpage, or the folio already has private attached. */
120	if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
121		return;
122
123	subpage = folio_detach_private(folio);
124	ASSERT(subpage);
125	btrfs_free_subpage(subpage);
126}
127
128struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
129					  enum btrfs_subpage_type type)
130{
131	struct btrfs_subpage *ret;
132	unsigned int real_size;
133
134	ASSERT(fs_info->sectorsize < PAGE_SIZE);
135
136	real_size = struct_size(ret, bitmaps,
137			BITS_TO_LONGS(btrfs_bitmap_nr_max * fs_info->sectors_per_page));
138	ret = kzalloc(real_size, GFP_NOFS);
139	if (!ret)
140		return ERR_PTR(-ENOMEM);
141
142	spin_lock_init(&ret->lock);
143	if (type == BTRFS_SUBPAGE_METADATA)
144		atomic_set(&ret->eb_refs, 0);
145	else
146		atomic_set(&ret->nr_locked, 0);
 
 
147	return ret;
148}
149
150void btrfs_free_subpage(struct btrfs_subpage *subpage)
151{
152	kfree(subpage);
153}
154
155/*
156 * Increase the eb_refs of current subpage.
157 *
158 * This is important for eb allocation, to prevent race with last eb freeing
159 * of the same page.
160 * With the eb_refs increased before the eb inserted into radix tree,
161 * detach_extent_buffer_page() won't detach the folio private while we're still
162 * allocating the extent buffer.
163 */
164void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
165{
166	struct btrfs_subpage *subpage;
167
168	if (!btrfs_is_subpage(fs_info, folio->mapping))
169		return;
170
171	ASSERT(folio_test_private(folio) && folio->mapping);
172	lockdep_assert_held(&folio->mapping->i_private_lock);
173
174	subpage = folio_get_private(folio);
175	atomic_inc(&subpage->eb_refs);
176}
177
178void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
179{
180	struct btrfs_subpage *subpage;
181
182	if (!btrfs_is_subpage(fs_info, folio->mapping))
183		return;
184
185	ASSERT(folio_test_private(folio) && folio->mapping);
186	lockdep_assert_held(&folio->mapping->i_private_lock);
187
188	subpage = folio_get_private(folio);
189	ASSERT(atomic_read(&subpage->eb_refs));
190	atomic_dec(&subpage->eb_refs);
191}
192
193static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
194				 struct folio *folio, u64 start, u32 len)
195{
196	/* For subpage support, the folio must be single page. */
197	ASSERT(folio_order(folio) == 0);
198
199	/* Basic checks */
200	ASSERT(folio_test_private(folio) && folio_get_private(folio));
201	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
202	       IS_ALIGNED(len, fs_info->sectorsize));
203	/*
204	 * The range check only works for mapped page, we can still have
205	 * unmapped page like dummy extent buffer pages.
206	 */
207	if (folio->mapping)
208		ASSERT(folio_pos(folio) <= start &&
209		       start + len <= folio_pos(folio) + PAGE_SIZE);
210}
211
212#define subpage_calc_start_bit(fs_info, folio, name, start, len)	\
213({									\
214	unsigned int __start_bit;						\
215									\
216	btrfs_subpage_assert(fs_info, folio, start, len);		\
217	__start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
218	__start_bit += fs_info->sectors_per_page * btrfs_bitmap_nr_##name; \
219	__start_bit;							\
220})
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
223{
224	u64 orig_start = *start;
225	u32 orig_len = *len;
226
227	*start = max_t(u64, folio_pos(folio), orig_start);
228	/*
229	 * For certain call sites like btrfs_drop_pages(), we may have pages
230	 * beyond the target range. In that case, just set @len to 0, subpage
231	 * helpers can handle @len == 0 without any problem.
232	 */
233	if (folio_pos(folio) >= orig_start + orig_len)
234		*len = 0;
235	else
236		*len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
237			     orig_start + orig_len) - *start;
238}
239
240static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
241					    struct folio *folio, u64 start, u32 len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242{
243	struct btrfs_subpage *subpage = folio_get_private(folio);
244	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
245	const int nbits = (len >> fs_info->sectorsize_bits);
246	unsigned long flags;
247	unsigned int cleared = 0;
248	int bit = start_bit;
249	bool last;
250
251	btrfs_subpage_assert(fs_info, folio, start, len);
252
253	spin_lock_irqsave(&subpage->lock, flags);
254	/*
255	 * We have call sites passing @lock_page into
256	 * extent_clear_unlock_delalloc() for compression path.
257	 *
258	 * This @locked_page is locked by plain lock_page(), thus its
259	 * subpage::locked is 0.  Handle them in a special way.
260	 */
261	if (atomic_read(&subpage->nr_locked) == 0) {
262		spin_unlock_irqrestore(&subpage->lock, flags);
263		return true;
264	}
265
266	for_each_set_bit_from(bit, subpage->bitmaps, start_bit + nbits) {
267		clear_bit(bit, subpage->bitmaps);
268		cleared++;
269	}
270	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
271	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
272	spin_unlock_irqrestore(&subpage->lock, flags);
273	return last;
274}
275
276/*
277 * Handle different locked folios:
278 *
279 * - Non-subpage folio
280 *   Just unlock it.
281 *
282 * - folio locked but without any subpage locked
283 *   This happens either before writepage_delalloc() or the delalloc range is
284 *   already handled by previous folio.
285 *   We can simple unlock it.
286 *
287 * - folio locked with subpage range locked.
288 *   We go through the locked sectors inside the range and clear their locked
289 *   bitmap, reduce the writer lock number, and unlock the page if that's
290 *   the last locked range.
291 */
292void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
293			  struct folio *folio, u64 start, u32 len)
294{
295	struct btrfs_subpage *subpage = folio_get_private(folio);
296
297	ASSERT(folio_test_locked(folio));
298
299	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
300		folio_unlock(folio);
301		return;
302	}
303
304	/*
305	 * For subpage case, there are two types of locked page.  With or
306	 * without locked number.
307	 *
308	 * Since we own the page lock, no one else could touch subpage::locked
309	 * and we are safe to do several atomic operations without spinlock.
310	 */
311	if (atomic_read(&subpage->nr_locked) == 0) {
312		/* No subpage lock, locked by plain lock_page(). */
313		folio_unlock(folio);
314		return;
315	}
316
317	btrfs_subpage_clamp_range(folio, &start, &len);
318	if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
319		folio_unlock(folio);
320}
321
322void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
323				 struct folio *folio, unsigned long bitmap)
324{
325	struct btrfs_subpage *subpage = folio_get_private(folio);
326	const int start_bit = fs_info->sectors_per_page * btrfs_bitmap_nr_locked;
327	unsigned long flags;
328	bool last = false;
329	int cleared = 0;
330	int bit;
331
332	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
333		folio_unlock(folio);
334		return;
335	}
336
337	if (atomic_read(&subpage->nr_locked) == 0) {
338		/* No subpage lock, locked by plain lock_page(). */
339		folio_unlock(folio);
340		return;
341	}
342
343	spin_lock_irqsave(&subpage->lock, flags);
344	for_each_set_bit(bit, &bitmap, fs_info->sectors_per_page) {
345		if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
346			cleared++;
347	}
348	ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
349	last = atomic_sub_and_test(cleared, &subpage->nr_locked);
350	spin_unlock_irqrestore(&subpage->lock, flags);
351	if (last)
352		folio_unlock(folio);
353}
354
355#define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
356	bitmap_test_range_all_set(subpage->bitmaps,			\
357			fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
358			fs_info->sectors_per_page)
359
360#define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
361	bitmap_test_range_all_zero(subpage->bitmaps,			\
362			fs_info->sectors_per_page * btrfs_bitmap_nr_##name, \
363			fs_info->sectors_per_page)
364
365void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
366				struct folio *folio, u64 start, u32 len)
367{
368	struct btrfs_subpage *subpage = folio_get_private(folio);
369	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
370							uptodate, start, len);
371	unsigned long flags;
372
373	spin_lock_irqsave(&subpage->lock, flags);
374	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
375	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
376		folio_mark_uptodate(folio);
377	spin_unlock_irqrestore(&subpage->lock, flags);
378}
379
380void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
381				  struct folio *folio, u64 start, u32 len)
382{
383	struct btrfs_subpage *subpage = folio_get_private(folio);
384	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
385							uptodate, start, len);
386	unsigned long flags;
387
388	spin_lock_irqsave(&subpage->lock, flags);
389	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
390	folio_clear_uptodate(folio);
391	spin_unlock_irqrestore(&subpage->lock, flags);
392}
393
394void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
395			     struct folio *folio, u64 start, u32 len)
396{
397	struct btrfs_subpage *subpage = folio_get_private(folio);
398	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
399							dirty, start, len);
400	unsigned long flags;
401
402	spin_lock_irqsave(&subpage->lock, flags);
403	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
404	spin_unlock_irqrestore(&subpage->lock, flags);
405	folio_mark_dirty(folio);
406}
407
408/*
409 * Extra clear_and_test function for subpage dirty bitmap.
410 *
411 * Return true if we're the last bits in the dirty_bitmap and clear the
412 * dirty_bitmap.
413 * Return false otherwise.
414 *
415 * NOTE: Callers should manually clear page dirty for true case, as we have
416 * extra handling for tree blocks.
417 */
418bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
419					struct folio *folio, u64 start, u32 len)
420{
421	struct btrfs_subpage *subpage = folio_get_private(folio);
422	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
423							dirty, start, len);
424	unsigned long flags;
425	bool last = false;
426
427	spin_lock_irqsave(&subpage->lock, flags);
428	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
429	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
430		last = true;
431	spin_unlock_irqrestore(&subpage->lock, flags);
432	return last;
433}
434
435void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
436			       struct folio *folio, u64 start, u32 len)
437{
438	bool last;
439
440	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
441	if (last)
442		folio_clear_dirty_for_io(folio);
443}
444
445void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
446				 struct folio *folio, u64 start, u32 len)
447{
448	struct btrfs_subpage *subpage = folio_get_private(folio);
449	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
450							writeback, start, len);
451	unsigned long flags;
452
453	spin_lock_irqsave(&subpage->lock, flags);
454	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
455	if (!folio_test_writeback(folio))
456		folio_start_writeback(folio);
457	spin_unlock_irqrestore(&subpage->lock, flags);
458}
459
460void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
461				   struct folio *folio, u64 start, u32 len)
462{
463	struct btrfs_subpage *subpage = folio_get_private(folio);
464	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
465							writeback, start, len);
466	unsigned long flags;
467
468	spin_lock_irqsave(&subpage->lock, flags);
469	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
470	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
471		ASSERT(folio_test_writeback(folio));
472		folio_end_writeback(folio);
473	}
474	spin_unlock_irqrestore(&subpage->lock, flags);
475}
476
477void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
478			       struct folio *folio, u64 start, u32 len)
479{
480	struct btrfs_subpage *subpage = folio_get_private(folio);
481	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
482							ordered, start, len);
483	unsigned long flags;
484
485	spin_lock_irqsave(&subpage->lock, flags);
486	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
487	folio_set_ordered(folio);
488	spin_unlock_irqrestore(&subpage->lock, flags);
489}
490
491void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
492				 struct folio *folio, u64 start, u32 len)
493{
494	struct btrfs_subpage *subpage = folio_get_private(folio);
495	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
496							ordered, start, len);
497	unsigned long flags;
498
499	spin_lock_irqsave(&subpage->lock, flags);
500	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
501	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
502		folio_clear_ordered(folio);
503	spin_unlock_irqrestore(&subpage->lock, flags);
504}
505
506void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
507			       struct folio *folio, u64 start, u32 len)
508{
509	struct btrfs_subpage *subpage = folio_get_private(folio);
510	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
511							checked, start, len);
512	unsigned long flags;
513
514	spin_lock_irqsave(&subpage->lock, flags);
515	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
516	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
517		folio_set_checked(folio);
518	spin_unlock_irqrestore(&subpage->lock, flags);
519}
520
521void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
522				 struct folio *folio, u64 start, u32 len)
523{
524	struct btrfs_subpage *subpage = folio_get_private(folio);
525	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
526							checked, start, len);
527	unsigned long flags;
528
529	spin_lock_irqsave(&subpage->lock, flags);
530	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
531	folio_clear_checked(folio);
532	spin_unlock_irqrestore(&subpage->lock, flags);
533}
534
535/*
536 * Unlike set/clear which is dependent on each page status, for test all bits
537 * are tested in the same way.
538 */
539#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
540bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
541			       struct folio *folio, u64 start, u32 len)	\
542{									\
543	struct btrfs_subpage *subpage = folio_get_private(folio);	\
544	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,	\
545						name, start, len);	\
546	unsigned long flags;						\
547	bool ret;							\
548									\
549	spin_lock_irqsave(&subpage->lock, flags);			\
550	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
551				len >> fs_info->sectorsize_bits);	\
552	spin_unlock_irqrestore(&subpage->lock, flags);			\
553	return ret;							\
554}
555IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
556IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
557IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
558IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
559IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
560
561/*
562 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
563 * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
564 * back to regular sectorsize branch.
565 */
566#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func,			\
567				 folio_clear_func, folio_test_func)	\
568void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
569			    struct folio *folio, u64 start, u32 len)	\
570{									\
571	if (unlikely(!fs_info) ||					\
572	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
573		folio_set_func(folio);					\
574		return;							\
575	}								\
576	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
577}									\
578void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
579			      struct folio *folio, u64 start, u32 len)	\
580{									\
581	if (unlikely(!fs_info) ||					\
582	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
583		folio_clear_func(folio);				\
584		return;							\
585	}								\
586	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
587}									\
588bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
589			     struct folio *folio, u64 start, u32 len)	\
590{									\
591	if (unlikely(!fs_info) ||					\
592	    !btrfs_is_subpage(fs_info, folio->mapping))			\
593		return folio_test_func(folio);				\
594	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
595}									\
596void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
597				  struct folio *folio, u64 start, u32 len) \
598{									\
599	if (unlikely(!fs_info) ||					\
600	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
601		folio_set_func(folio);					\
602		return;							\
603	}								\
604	btrfs_subpage_clamp_range(folio, &start, &len);			\
605	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
606}									\
607void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
608				    struct folio *folio, u64 start, u32 len) \
609{									\
610	if (unlikely(!fs_info) ||					\
611	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
612		folio_clear_func(folio);				\
613		return;							\
614	}								\
615	btrfs_subpage_clamp_range(folio, &start, &len);			\
616	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
617}									\
618bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
619				   struct folio *folio, u64 start, u32 len) \
620{									\
621	if (unlikely(!fs_info) ||					\
622	    !btrfs_is_subpage(fs_info, folio->mapping))			\
623		return folio_test_func(folio);				\
624	btrfs_subpage_clamp_range(folio, &start, &len);			\
625	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
626}
627IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
628			 folio_test_uptodate);
629IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
630			 folio_test_dirty);
631IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
632			 folio_test_writeback);
633IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
634			 folio_test_ordered);
635IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
636			 folio_test_checked);
637
638/*
639 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
640 * is cleared.
641 */
642void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
643				  struct folio *folio, u64 start, u32 len)
644{
645	struct btrfs_subpage *subpage;
646	unsigned int start_bit;
647	unsigned int nbits;
648	unsigned long flags;
649
650	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
651		return;
652
653	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
654		ASSERT(!folio_test_dirty(folio));
655		return;
656	}
657
658	start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len);
659	nbits = len >> fs_info->sectorsize_bits;
660	subpage = folio_get_private(folio);
661	ASSERT(subpage);
662	spin_lock_irqsave(&subpage->lock, flags);
663	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
664	spin_unlock_irqrestore(&subpage->lock, flags);
665}
666
667/*
668 * This is for folio already locked by plain lock_page()/folio_lock(), which
669 * doesn't have any subpage awareness.
 
 
 
 
 
 
670 *
671 * This populates the involved subpage ranges so that subpage helpers can
672 * properly unlock them.
 
 
673 */
674void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
675			  struct folio *folio, u64 start, u32 len)
676{
677	struct btrfs_subpage *subpage;
678	unsigned long flags;
679	unsigned int start_bit;
680	unsigned int nbits;
681	int ret;
682
683	ASSERT(folio_test_locked(folio));
684	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping))
 
 
685		return;
 
686
 
687	subpage = folio_get_private(folio);
688	start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
689	nbits = len >> fs_info->sectorsize_bits;
690	spin_lock_irqsave(&subpage->lock, flags);
691	/* Target range should not yet be locked. */
692	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
693	bitmap_set(subpage->bitmaps, start_bit, nbits);
694	ret = atomic_add_return(nbits, &subpage->nr_locked);
695	ASSERT(ret <= fs_info->sectors_per_page);
696	spin_unlock_irqrestore(&subpage->lock, flags);
697}
698
699#define GET_SUBPAGE_BITMAP(subpage, fs_info, name, dst)			\
700{									\
701	const int sectors_per_page = fs_info->sectors_per_page;		\
702									\
703	ASSERT(sectors_per_page < BITS_PER_LONG);			\
704	*dst = bitmap_read(subpage->bitmaps,				\
705			   sectors_per_page * btrfs_bitmap_nr_##name,	\
706			   sectors_per_page);				\
 
 
 
 
 
 
 
707}
708
 
 
 
 
709void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
710				      struct folio *folio, u64 start, u32 len)
711{
 
712	struct btrfs_subpage *subpage;
713	const u32 sectors_per_page = fs_info->sectors_per_page;
714	unsigned long uptodate_bitmap;
 
715	unsigned long dirty_bitmap;
716	unsigned long writeback_bitmap;
717	unsigned long ordered_bitmap;
718	unsigned long checked_bitmap;
719	unsigned long locked_bitmap;
720	unsigned long flags;
721
722	ASSERT(folio_test_private(folio) && folio_get_private(folio));
723	ASSERT(sectors_per_page > 1);
724	subpage = folio_get_private(folio);
725
726	spin_lock_irqsave(&subpage->lock, flags);
727	GET_SUBPAGE_BITMAP(subpage, fs_info, uptodate, &uptodate_bitmap);
728	GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, &dirty_bitmap);
729	GET_SUBPAGE_BITMAP(subpage, fs_info, writeback, &writeback_bitmap);
730	GET_SUBPAGE_BITMAP(subpage, fs_info, ordered, &ordered_bitmap);
731	GET_SUBPAGE_BITMAP(subpage, fs_info, checked, &checked_bitmap);
732	GET_SUBPAGE_BITMAP(subpage, fs_info, locked, &locked_bitmap);
733	spin_unlock_irqrestore(&subpage->lock, flags);
734
735	dump_page(folio_page(folio, 0), "btrfs subpage dump");
736	btrfs_warn(fs_info,
737"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl locked=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
738		    start, len, folio_pos(folio),
739		    sectors_per_page, &uptodate_bitmap,
740		    sectors_per_page, &dirty_bitmap,
741		    sectors_per_page, &locked_bitmap,
742		    sectors_per_page, &writeback_bitmap,
743		    sectors_per_page, &ordered_bitmap,
744		    sectors_per_page, &checked_bitmap);
745}
746
747void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
748				    struct folio *folio,
749				    unsigned long *ret_bitmap)
750{
751	struct btrfs_subpage *subpage;
752	unsigned long flags;
753
754	ASSERT(folio_test_private(folio) && folio_get_private(folio));
755	ASSERT(fs_info->sectors_per_page > 1);
756	subpage = folio_get_private(folio);
757
758	spin_lock_irqsave(&subpage->lock, flags);
759	GET_SUBPAGE_BITMAP(subpage, fs_info, dirty, ret_bitmap);
760	spin_unlock_irqrestore(&subpage->lock, flags);
761}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/slab.h>
  4#include "messages.h"
  5#include "ctree.h"
  6#include "subpage.h"
  7#include "btrfs_inode.h"
  8
  9/*
 10 * Subpage (sectorsize < PAGE_SIZE) support overview:
 11 *
 12 * Limitations:
 13 *
 14 * - Only support 64K page size for now
 15 *   This is to make metadata handling easier, as 64K page would ensure
 16 *   all nodesize would fit inside one page, thus we don't need to handle
 17 *   cases where a tree block crosses several pages.
 18 *
 19 * - Only metadata read-write for now
 20 *   The data read-write part is in development.
 21 *
 22 * - Metadata can't cross 64K page boundary
 23 *   btrfs-progs and kernel have done that for a while, thus only ancient
 24 *   filesystems could have such problem.  For such case, do a graceful
 25 *   rejection.
 26 *
 27 * Special behavior:
 28 *
 29 * - Metadata
 30 *   Metadata read is fully supported.
 31 *   Meaning when reading one tree block will only trigger the read for the
 32 *   needed range, other unrelated range in the same page will not be touched.
 33 *
 34 *   Metadata write support is partial.
 35 *   The writeback is still for the full page, but we will only submit
 36 *   the dirty extent buffers in the page.
 37 *
 38 *   This means, if we have a metadata page like this:
 39 *
 40 *   Page offset
 41 *   0         16K         32K         48K        64K
 42 *   |/////////|           |///////////|
 43 *        \- Tree block A        \- Tree block B
 44 *
 45 *   Even if we just want to writeback tree block A, we will also writeback
 46 *   tree block B if it's also dirty.
 47 *
 48 *   This may cause extra metadata writeback which results more COW.
 49 *
 50 * Implementation:
 51 *
 52 * - Common
 53 *   Both metadata and data will use a new structure, btrfs_subpage, to
 54 *   record the status of each sector inside a page.  This provides the extra
 55 *   granularity needed.
 56 *
 57 * - Metadata
 58 *   Since we have multiple tree blocks inside one page, we can't rely on page
 59 *   locking anymore, or we will have greatly reduced concurrency or even
 60 *   deadlocks (hold one tree lock while trying to lock another tree lock in
 61 *   the same page).
 62 *
 63 *   Thus for metadata locking, subpage support relies on io_tree locking only.
 64 *   This means a slightly higher tree locking latency.
 65 */
 66
 
 67bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
 68{
 69	if (fs_info->sectorsize >= PAGE_SIZE)
 70		return false;
 71
 72	/*
 73	 * Only data pages (either through DIO or compression) can have no
 74	 * mapping. And if page->mapping->host is data inode, it's subpage.
 75	 * As we have ruled our sectorsize >= PAGE_SIZE case already.
 76	 */
 77	if (!mapping || !mapping->host || is_data_inode(mapping->host))
 78		return true;
 79
 80	/*
 81	 * Now the only remaining case is metadata, which we only go subpage
 82	 * routine if nodesize < PAGE_SIZE.
 83	 */
 84	if (fs_info->nodesize < PAGE_SIZE)
 85		return true;
 86	return false;
 87}
 88
 89void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize)
 90{
 91	unsigned int cur = 0;
 92	unsigned int nr_bits;
 93
 94	ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize));
 95
 96	nr_bits = PAGE_SIZE / sectorsize;
 97	subpage_info->bitmap_nr_bits = nr_bits;
 98
 99	subpage_info->uptodate_offset = cur;
100	cur += nr_bits;
101
102	subpage_info->dirty_offset = cur;
103	cur += nr_bits;
104
105	subpage_info->writeback_offset = cur;
106	cur += nr_bits;
107
108	subpage_info->ordered_offset = cur;
109	cur += nr_bits;
110
111	subpage_info->checked_offset = cur;
112	cur += nr_bits;
113
114	subpage_info->locked_offset = cur;
115	cur += nr_bits;
116
117	subpage_info->total_nr_bits = cur;
118}
119
120int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
121			 struct folio *folio, enum btrfs_subpage_type type)
122{
123	struct btrfs_subpage *subpage;
124
125	/*
126	 * We have cases like a dummy extent buffer page, which is not mapped
127	 * and doesn't need to be locked.
128	 */
129	if (folio->mapping)
130		ASSERT(folio_test_locked(folio));
131
132	/* Either not subpage, or the folio already has private attached. */
133	if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio))
134		return 0;
135
136	subpage = btrfs_alloc_subpage(fs_info, type);
137	if (IS_ERR(subpage))
138		return  PTR_ERR(subpage);
139
140	folio_attach_private(folio, subpage);
141	return 0;
142}
143
144void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio)
145{
146	struct btrfs_subpage *subpage;
147
148	/* Either not subpage, or the folio already has private attached. */
149	if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio))
150		return;
151
152	subpage = folio_detach_private(folio);
153	ASSERT(subpage);
154	btrfs_free_subpage(subpage);
155}
156
157struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
158					  enum btrfs_subpage_type type)
159{
160	struct btrfs_subpage *ret;
161	unsigned int real_size;
162
163	ASSERT(fs_info->sectorsize < PAGE_SIZE);
164
165	real_size = struct_size(ret, bitmaps,
166			BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits));
167	ret = kzalloc(real_size, GFP_NOFS);
168	if (!ret)
169		return ERR_PTR(-ENOMEM);
170
171	spin_lock_init(&ret->lock);
172	if (type == BTRFS_SUBPAGE_METADATA) {
173		atomic_set(&ret->eb_refs, 0);
174	} else {
175		atomic_set(&ret->readers, 0);
176		atomic_set(&ret->writers, 0);
177	}
178	return ret;
179}
180
181void btrfs_free_subpage(struct btrfs_subpage *subpage)
182{
183	kfree(subpage);
184}
185
186/*
187 * Increase the eb_refs of current subpage.
188 *
189 * This is important for eb allocation, to prevent race with last eb freeing
190 * of the same page.
191 * With the eb_refs increased before the eb inserted into radix tree,
192 * detach_extent_buffer_page() won't detach the folio private while we're still
193 * allocating the extent buffer.
194 */
195void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
196{
197	struct btrfs_subpage *subpage;
198
199	if (!btrfs_is_subpage(fs_info, folio->mapping))
200		return;
201
202	ASSERT(folio_test_private(folio) && folio->mapping);
203	lockdep_assert_held(&folio->mapping->i_private_lock);
204
205	subpage = folio_get_private(folio);
206	atomic_inc(&subpage->eb_refs);
207}
208
209void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
210{
211	struct btrfs_subpage *subpage;
212
213	if (!btrfs_is_subpage(fs_info, folio->mapping))
214		return;
215
216	ASSERT(folio_test_private(folio) && folio->mapping);
217	lockdep_assert_held(&folio->mapping->i_private_lock);
218
219	subpage = folio_get_private(folio);
220	ASSERT(atomic_read(&subpage->eb_refs));
221	atomic_dec(&subpage->eb_refs);
222}
223
224static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
225				 struct folio *folio, u64 start, u32 len)
226{
227	/* For subpage support, the folio must be single page. */
228	ASSERT(folio_order(folio) == 0);
229
230	/* Basic checks */
231	ASSERT(folio_test_private(folio) && folio_get_private(folio));
232	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
233	       IS_ALIGNED(len, fs_info->sectorsize));
234	/*
235	 * The range check only works for mapped page, we can still have
236	 * unmapped page like dummy extent buffer pages.
237	 */
238	if (folio->mapping)
239		ASSERT(folio_pos(folio) <= start &&
240		       start + len <= folio_pos(folio) + PAGE_SIZE);
241}
242
243#define subpage_calc_start_bit(fs_info, folio, name, start, len)	\
244({									\
245	unsigned int start_bit;						\
246									\
247	btrfs_subpage_assert(fs_info, folio, start, len);		\
248	start_bit = offset_in_page(start) >> fs_info->sectorsize_bits;	\
249	start_bit += fs_info->subpage_info->name##_offset;		\
250	start_bit;							\
251})
252
253void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
254				struct folio *folio, u64 start, u32 len)
255{
256	struct btrfs_subpage *subpage = folio_get_private(folio);
257	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
258	const int nbits = len >> fs_info->sectorsize_bits;
259	unsigned long flags;
260
261
262	btrfs_subpage_assert(fs_info, folio, start, len);
263
264	spin_lock_irqsave(&subpage->lock, flags);
265	/*
266	 * Even though it's just for reading the page, no one should have
267	 * locked the subpage range.
268	 */
269	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
270	bitmap_set(subpage->bitmaps, start_bit, nbits);
271	atomic_add(nbits, &subpage->readers);
272	spin_unlock_irqrestore(&subpage->lock, flags);
273}
274
275void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
276			      struct folio *folio, u64 start, u32 len)
277{
278	struct btrfs_subpage *subpage = folio_get_private(folio);
279	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
280	const int nbits = len >> fs_info->sectorsize_bits;
281	unsigned long flags;
282	bool is_data;
283	bool last;
284
285	btrfs_subpage_assert(fs_info, folio, start, len);
286	is_data = is_data_inode(folio->mapping->host);
287
288	spin_lock_irqsave(&subpage->lock, flags);
289
290	/* The range should have already been locked. */
291	ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
292	ASSERT(atomic_read(&subpage->readers) >= nbits);
293
294	bitmap_clear(subpage->bitmaps, start_bit, nbits);
295	last = atomic_sub_and_test(nbits, &subpage->readers);
296
297	/*
298	 * For data we need to unlock the page if the last read has finished.
299	 *
300	 * And please don't replace @last with atomic_sub_and_test() call
301	 * inside if () condition.
302	 * As we want the atomic_sub_and_test() to be always executed.
303	 */
304	if (is_data && last)
305		folio_unlock(folio);
306	spin_unlock_irqrestore(&subpage->lock, flags);
307}
308
309static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
310{
311	u64 orig_start = *start;
312	u32 orig_len = *len;
313
314	*start = max_t(u64, folio_pos(folio), orig_start);
315	/*
316	 * For certain call sites like btrfs_drop_pages(), we may have pages
317	 * beyond the target range. In that case, just set @len to 0, subpage
318	 * helpers can handle @len == 0 without any problem.
319	 */
320	if (folio_pos(folio) >= orig_start + orig_len)
321		*len = 0;
322	else
323		*len = min_t(u64, folio_pos(folio) + PAGE_SIZE,
324			     orig_start + orig_len) - *start;
325}
326
327static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
328				       struct folio *folio, u64 start, u32 len)
329{
330	struct btrfs_subpage *subpage = folio_get_private(folio);
331	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
332	const int nbits = (len >> fs_info->sectorsize_bits);
333	unsigned long flags;
334	int ret;
335
336	btrfs_subpage_assert(fs_info, folio, start, len);
337
338	spin_lock_irqsave(&subpage->lock, flags);
339	ASSERT(atomic_read(&subpage->readers) == 0);
340	ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
341	bitmap_set(subpage->bitmaps, start_bit, nbits);
342	ret = atomic_add_return(nbits, &subpage->writers);
343	ASSERT(ret == nbits);
344	spin_unlock_irqrestore(&subpage->lock, flags);
345}
346
347static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
348					      struct folio *folio, u64 start, u32 len)
349{
350	struct btrfs_subpage *subpage = folio_get_private(folio);
351	const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
352	const int nbits = (len >> fs_info->sectorsize_bits);
353	unsigned long flags;
 
 
354	bool last;
355
356	btrfs_subpage_assert(fs_info, folio, start, len);
357
358	spin_lock_irqsave(&subpage->lock, flags);
359	/*
360	 * We have call sites passing @lock_page into
361	 * extent_clear_unlock_delalloc() for compression path.
362	 *
363	 * This @locked_page is locked by plain lock_page(), thus its
364	 * subpage::writers is 0.  Handle them in a special way.
365	 */
366	if (atomic_read(&subpage->writers) == 0) {
367		spin_unlock_irqrestore(&subpage->lock, flags);
368		return true;
369	}
370
371	ASSERT(atomic_read(&subpage->writers) >= nbits);
372	/* The target range should have been locked. */
373	ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
374	bitmap_clear(subpage->bitmaps, start_bit, nbits);
375	last = atomic_sub_and_test(nbits, &subpage->writers);
 
376	spin_unlock_irqrestore(&subpage->lock, flags);
377	return last;
378}
379
380/*
381 * Lock a folio for delalloc page writeback.
 
 
 
382 *
383 * Return -EAGAIN if the page is not properly initialized.
384 * Return 0 with the page locked, and writer counter updated.
 
 
385 *
386 * Even with 0 returned, the page still need extra check to make sure
387 * it's really the correct page, as the caller is using
388 * filemap_get_folios_contig(), which can race with page invalidating.
 
389 */
390int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
391				  struct folio *folio, u64 start, u32 len)
392{
 
 
 
 
393	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
394		folio_lock(folio);
395		return 0;
396	}
397	folio_lock(folio);
398	if (!folio_test_private(folio) || !folio_get_private(folio)) {
 
 
 
 
 
 
 
 
399		folio_unlock(folio);
400		return -EAGAIN;
401	}
 
402	btrfs_subpage_clamp_range(folio, &start, &len);
403	btrfs_subpage_start_writer(fs_info, folio, start, len);
404	return 0;
405}
406
407void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
408				 struct folio *folio, u64 start, u32 len)
409{
410	if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
411		folio_unlock(folio);
412		return;
413	}
414	btrfs_subpage_clamp_range(folio, &start, &len);
415	if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
 
 
 
 
 
 
 
 
416		folio_unlock(folio);
417}
418
419#define subpage_test_bitmap_all_set(fs_info, subpage, name)		\
420	bitmap_test_range_all_set(subpage->bitmaps,			\
421			fs_info->subpage_info->name##_offset,		\
422			fs_info->subpage_info->bitmap_nr_bits)
423
424#define subpage_test_bitmap_all_zero(fs_info, subpage, name)		\
425	bitmap_test_range_all_zero(subpage->bitmaps,			\
426			fs_info->subpage_info->name##_offset,		\
427			fs_info->subpage_info->bitmap_nr_bits)
428
429void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
430				struct folio *folio, u64 start, u32 len)
431{
432	struct btrfs_subpage *subpage = folio_get_private(folio);
433	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
434							uptodate, start, len);
435	unsigned long flags;
436
437	spin_lock_irqsave(&subpage->lock, flags);
438	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
439	if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
440		folio_mark_uptodate(folio);
441	spin_unlock_irqrestore(&subpage->lock, flags);
442}
443
444void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
445				  struct folio *folio, u64 start, u32 len)
446{
447	struct btrfs_subpage *subpage = folio_get_private(folio);
448	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
449							uptodate, start, len);
450	unsigned long flags;
451
452	spin_lock_irqsave(&subpage->lock, flags);
453	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
454	folio_clear_uptodate(folio);
455	spin_unlock_irqrestore(&subpage->lock, flags);
456}
457
458void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info,
459			     struct folio *folio, u64 start, u32 len)
460{
461	struct btrfs_subpage *subpage = folio_get_private(folio);
462	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
463							dirty, start, len);
464	unsigned long flags;
465
466	spin_lock_irqsave(&subpage->lock, flags);
467	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
468	spin_unlock_irqrestore(&subpage->lock, flags);
469	folio_mark_dirty(folio);
470}
471
472/*
473 * Extra clear_and_test function for subpage dirty bitmap.
474 *
475 * Return true if we're the last bits in the dirty_bitmap and clear the
476 * dirty_bitmap.
477 * Return false otherwise.
478 *
479 * NOTE: Callers should manually clear page dirty for true case, as we have
480 * extra handling for tree blocks.
481 */
482bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
483					struct folio *folio, u64 start, u32 len)
484{
485	struct btrfs_subpage *subpage = folio_get_private(folio);
486	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
487							dirty, start, len);
488	unsigned long flags;
489	bool last = false;
490
491	spin_lock_irqsave(&subpage->lock, flags);
492	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
493	if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
494		last = true;
495	spin_unlock_irqrestore(&subpage->lock, flags);
496	return last;
497}
498
499void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info,
500			       struct folio *folio, u64 start, u32 len)
501{
502	bool last;
503
504	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len);
505	if (last)
506		folio_clear_dirty_for_io(folio);
507}
508
509void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
510				 struct folio *folio, u64 start, u32 len)
511{
512	struct btrfs_subpage *subpage = folio_get_private(folio);
513	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
514							writeback, start, len);
515	unsigned long flags;
516
517	spin_lock_irqsave(&subpage->lock, flags);
518	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
519	if (!folio_test_writeback(folio))
520		folio_start_writeback(folio);
521	spin_unlock_irqrestore(&subpage->lock, flags);
522}
523
524void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info,
525				   struct folio *folio, u64 start, u32 len)
526{
527	struct btrfs_subpage *subpage = folio_get_private(folio);
528	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
529							writeback, start, len);
530	unsigned long flags;
531
532	spin_lock_irqsave(&subpage->lock, flags);
533	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
534	if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
535		ASSERT(folio_test_writeback(folio));
536		folio_end_writeback(folio);
537	}
538	spin_unlock_irqrestore(&subpage->lock, flags);
539}
540
541void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info,
542			       struct folio *folio, u64 start, u32 len)
543{
544	struct btrfs_subpage *subpage = folio_get_private(folio);
545	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
546							ordered, start, len);
547	unsigned long flags;
548
549	spin_lock_irqsave(&subpage->lock, flags);
550	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
551	folio_set_ordered(folio);
552	spin_unlock_irqrestore(&subpage->lock, flags);
553}
554
555void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info,
556				 struct folio *folio, u64 start, u32 len)
557{
558	struct btrfs_subpage *subpage = folio_get_private(folio);
559	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
560							ordered, start, len);
561	unsigned long flags;
562
563	spin_lock_irqsave(&subpage->lock, flags);
564	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
565	if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
566		folio_clear_ordered(folio);
567	spin_unlock_irqrestore(&subpage->lock, flags);
568}
569
570void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info,
571			       struct folio *folio, u64 start, u32 len)
572{
573	struct btrfs_subpage *subpage = folio_get_private(folio);
574	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
575							checked, start, len);
576	unsigned long flags;
577
578	spin_lock_irqsave(&subpage->lock, flags);
579	bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
580	if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
581		folio_set_checked(folio);
582	spin_unlock_irqrestore(&subpage->lock, flags);
583}
584
585void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info,
586				 struct folio *folio, u64 start, u32 len)
587{
588	struct btrfs_subpage *subpage = folio_get_private(folio);
589	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,
590							checked, start, len);
591	unsigned long flags;
592
593	spin_lock_irqsave(&subpage->lock, flags);
594	bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
595	folio_clear_checked(folio);
596	spin_unlock_irqrestore(&subpage->lock, flags);
597}
598
599/*
600 * Unlike set/clear which is dependent on each page status, for test all bits
601 * are tested in the same way.
602 */
603#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name)				\
604bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info,	\
605			       struct folio *folio, u64 start, u32 len)	\
606{									\
607	struct btrfs_subpage *subpage = folio_get_private(folio);	\
608	unsigned int start_bit = subpage_calc_start_bit(fs_info, folio,	\
609						name, start, len);	\
610	unsigned long flags;						\
611	bool ret;							\
612									\
613	spin_lock_irqsave(&subpage->lock, flags);			\
614	ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit,	\
615				len >> fs_info->sectorsize_bits);	\
616	spin_unlock_irqrestore(&subpage->lock, flags);			\
617	return ret;							\
618}
619IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
620IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty);
621IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback);
622IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered);
623IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
624
625/*
626 * Note that, in selftests (extent-io-tests), we can have empty fs_info passed
627 * in.  We only test sectorsize == PAGE_SIZE cases so far, thus we can fall
628 * back to regular sectorsize branch.
629 */
630#define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func,			\
631				 folio_clear_func, folio_test_func)	\
632void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info,	\
633			    struct folio *folio, u64 start, u32 len)	\
634{									\
635	if (unlikely(!fs_info) ||					\
636	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
637		folio_set_func(folio);					\
638		return;							\
639	}								\
640	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
641}									\
642void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info,	\
643			      struct folio *folio, u64 start, u32 len)	\
644{									\
645	if (unlikely(!fs_info) ||					\
646	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
647		folio_clear_func(folio);				\
648		return;							\
649	}								\
650	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
651}									\
652bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info,	\
653			     struct folio *folio, u64 start, u32 len)	\
654{									\
655	if (unlikely(!fs_info) ||					\
656	    !btrfs_is_subpage(fs_info, folio->mapping))			\
657		return folio_test_func(folio);				\
658	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
659}									\
660void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info,	\
661				  struct folio *folio, u64 start, u32 len) \
662{									\
663	if (unlikely(!fs_info) ||					\
664	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
665		folio_set_func(folio);					\
666		return;							\
667	}								\
668	btrfs_subpage_clamp_range(folio, &start, &len);			\
669	btrfs_subpage_set_##name(fs_info, folio, start, len);		\
670}									\
671void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
672				    struct folio *folio, u64 start, u32 len) \
673{									\
674	if (unlikely(!fs_info) ||					\
675	    !btrfs_is_subpage(fs_info, folio->mapping)) {		\
676		folio_clear_func(folio);				\
677		return;							\
678	}								\
679	btrfs_subpage_clamp_range(folio, &start, &len);			\
680	btrfs_subpage_clear_##name(fs_info, folio, start, len);		\
681}									\
682bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info,	\
683				   struct folio *folio, u64 start, u32 len) \
684{									\
685	if (unlikely(!fs_info) ||					\
686	    !btrfs_is_subpage(fs_info, folio->mapping))			\
687		return folio_test_func(folio);				\
688	btrfs_subpage_clamp_range(folio, &start, &len);			\
689	return btrfs_subpage_test_##name(fs_info, folio, start, len);	\
690}
691IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate,
692			 folio_test_uptodate);
693IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io,
694			 folio_test_dirty);
695IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback,
696			 folio_test_writeback);
697IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered,
698			 folio_test_ordered);
699IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
700			 folio_test_checked);
701
702/*
703 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
704 * is cleared.
705 */
706void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio)
 
707{
708	struct btrfs_subpage *subpage = folio_get_private(folio);
 
 
 
709
710	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
711		return;
712
713	ASSERT(!folio_test_dirty(folio));
714	if (!btrfs_is_subpage(fs_info, folio->mapping))
715		return;
 
716
717	ASSERT(folio_test_private(folio) && folio_get_private(folio));
718	ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
 
 
 
 
 
719}
720
721/*
722 * Handle different locked pages with different page sizes:
723 *
724 * - Page locked by plain lock_page()
725 *   It should not have any subpage::writers count.
726 *   Can be unlocked by unlock_page().
727 *   This is the most common locked page for __extent_writepage() called
728 *   inside extent_write_cache_pages().
729 *   Rarer cases include the @locked_page from extent_write_locked_range().
730 *
731 * - Page locked by lock_delalloc_pages()
732 *   There is only one caller, all pages except @locked_page for
733 *   extent_write_locked_range().
734 *   In this case, we have to call subpage helper to handle the case.
735 */
736void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
737			       struct folio *folio, u64 start, u32 len)
738{
739	struct btrfs_subpage *subpage;
 
 
 
 
740
741	ASSERT(folio_test_locked(folio));
742	/* For non-subpage case, we just unlock the page */
743	if (!btrfs_is_subpage(fs_info, folio->mapping)) {
744		folio_unlock(folio);
745		return;
746	}
747
748	ASSERT(folio_test_private(folio) && folio_get_private(folio));
749	subpage = folio_get_private(folio);
 
 
 
 
 
 
 
 
 
 
750
751	/*
752	 * For subpage case, there are two types of locked page.  With or
753	 * without writers number.
754	 *
755	 * Since we own the page lock, no one else could touch subpage::writers
756	 * and we are safe to do several atomic operations without spinlock.
757	 */
758	if (atomic_read(&subpage->writers) == 0) {
759		/* No writers, locked by plain lock_page() */
760		folio_unlock(folio);
761		return;
762	}
763
764	/* Have writers, use proper subpage helper to end it */
765	btrfs_folio_end_writer_lock(fs_info, folio, start, len);
766}
767
768#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst)		\
769	bitmap_cut(dst, subpage->bitmaps, 0,				\
770		   subpage_info->name##_offset, subpage_info->bitmap_nr_bits)
771
772void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
773				      struct folio *folio, u64 start, u32 len)
774{
775	struct btrfs_subpage_info *subpage_info = fs_info->subpage_info;
776	struct btrfs_subpage *subpage;
 
777	unsigned long uptodate_bitmap;
778	unsigned long error_bitmap;
779	unsigned long dirty_bitmap;
780	unsigned long writeback_bitmap;
781	unsigned long ordered_bitmap;
782	unsigned long checked_bitmap;
 
783	unsigned long flags;
784
785	ASSERT(folio_test_private(folio) && folio_get_private(folio));
786	ASSERT(subpage_info);
787	subpage = folio_get_private(folio);
788
789	spin_lock_irqsave(&subpage->lock, flags);
790	GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
791	GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
792	GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
793	GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
794	GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
795	GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap);
796	spin_unlock_irqrestore(&subpage->lock, flags);
797
798	dump_page(folio_page(folio, 0), "btrfs subpage dump");
799	btrfs_warn(fs_info,
800"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
801		    start, len, folio_pos(folio),
802		    subpage_info->bitmap_nr_bits, &uptodate_bitmap,
803		    subpage_info->bitmap_nr_bits, &error_bitmap,
804		    subpage_info->bitmap_nr_bits, &dirty_bitmap,
805		    subpage_info->bitmap_nr_bits, &writeback_bitmap,
806		    subpage_info->bitmap_nr_bits, &ordered_bitmap,
807		    subpage_info->bitmap_nr_bits, &checked_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
808}