Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi and Seiji Kihara.
8 */
9
10#include <linux/pagemap.h>
11#include <linux/writeback.h>
12#include <linux/swap.h>
13#include <linux/bitops.h>
14#include <linux/page-flags.h>
15#include <linux/list.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/gfp.h>
19#include "nilfs.h"
20#include "page.h"
21#include "mdt.h"
22
23
24#define NILFS_BUFFER_INHERENT_BITS \
25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28static struct buffer_head *
29__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30 int blkbits, unsigned long b_state)
31
32{
33 unsigned long first_block;
34 struct buffer_head *bh;
35
36 if (!page_has_buffers(page))
37 create_empty_buffers(page, 1 << blkbits, b_state);
38
39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40 bh = nilfs_page_get_nth_block(page, block - first_block);
41
42 touch_buffer(bh);
43 wait_on_buffer(bh);
44 return bh;
45}
46
47struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48 struct address_space *mapping,
49 unsigned long blkoff,
50 unsigned long b_state)
51{
52 int blkbits = inode->i_blkbits;
53 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54 struct page *page;
55 struct buffer_head *bh;
56
57 page = grab_cache_page(mapping, index);
58 if (unlikely(!page))
59 return NULL;
60
61 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62 if (unlikely(!bh)) {
63 unlock_page(page);
64 put_page(page);
65 return NULL;
66 }
67 return bh;
68}
69
70/**
71 * nilfs_forget_buffer - discard dirty state
72 * @bh: buffer head of the buffer to be discarded
73 */
74void nilfs_forget_buffer(struct buffer_head *bh)
75{
76 struct page *page = bh->b_page;
77 const unsigned long clear_bits =
78 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
81
82 lock_buffer(bh);
83 set_mask_bits(&bh->b_state, clear_bits, 0);
84 if (nilfs_page_buffers_clean(page))
85 __nilfs_clear_page_dirty(page);
86
87 bh->b_blocknr = -1;
88 ClearPageUptodate(page);
89 ClearPageMappedToDisk(page);
90 unlock_buffer(bh);
91 brelse(bh);
92}
93
94/**
95 * nilfs_copy_buffer -- copy buffer data and flags
96 * @dbh: destination buffer
97 * @sbh: source buffer
98 */
99void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100{
101 void *kaddr0, *kaddr1;
102 unsigned long bits;
103 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104 struct buffer_head *bh;
105
106 kaddr0 = kmap_atomic(spage);
107 kaddr1 = kmap_atomic(dpage);
108 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109 kunmap_atomic(kaddr1);
110 kunmap_atomic(kaddr0);
111
112 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113 dbh->b_blocknr = sbh->b_blocknr;
114 dbh->b_bdev = sbh->b_bdev;
115
116 bh = dbh;
117 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118 while ((bh = bh->b_this_page) != dbh) {
119 lock_buffer(bh);
120 bits &= bh->b_state;
121 unlock_buffer(bh);
122 }
123 if (bits & BIT(BH_Uptodate))
124 SetPageUptodate(dpage);
125 else
126 ClearPageUptodate(dpage);
127 if (bits & BIT(BH_Mapped))
128 SetPageMappedToDisk(dpage);
129 else
130 ClearPageMappedToDisk(dpage);
131}
132
133/**
134 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
135 * @page: page to be checked
136 *
137 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
138 * Otherwise, it returns non-zero value.
139 */
140int nilfs_page_buffers_clean(struct page *page)
141{
142 struct buffer_head *bh, *head;
143
144 bh = head = page_buffers(page);
145 do {
146 if (buffer_dirty(bh))
147 return 0;
148 bh = bh->b_this_page;
149 } while (bh != head);
150 return 1;
151}
152
153void nilfs_page_bug(struct page *page)
154{
155 struct address_space *m;
156 unsigned long ino;
157
158 if (unlikely(!page)) {
159 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
160 return;
161 }
162
163 m = page->mapping;
164 ino = m ? m->host->i_ino : 0;
165
166 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
167 "mapping=%p ino=%lu\n",
168 page, page_ref_count(page),
169 (unsigned long long)page->index, page->flags, m, ino);
170
171 if (page_has_buffers(page)) {
172 struct buffer_head *bh, *head;
173 int i = 0;
174
175 bh = head = page_buffers(page);
176 do {
177 printk(KERN_CRIT
178 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
179 i++, bh, atomic_read(&bh->b_count),
180 (unsigned long long)bh->b_blocknr, bh->b_state);
181 bh = bh->b_this_page;
182 } while (bh != head);
183 }
184}
185
186/**
187 * nilfs_copy_page -- copy the page with buffers
188 * @dst: destination page
189 * @src: source page
190 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
191 *
192 * This function is for both data pages and btnode pages. The dirty flag
193 * should be treated by caller. The page must not be under i/o.
194 * Both src and dst page must be locked
195 */
196static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
197{
198 struct buffer_head *dbh, *dbufs, *sbh;
199 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
200
201 BUG_ON(PageWriteback(dst));
202
203 sbh = page_buffers(src);
204 if (!page_has_buffers(dst))
205 create_empty_buffers(dst, sbh->b_size, 0);
206
207 if (copy_dirty)
208 mask |= BIT(BH_Dirty);
209
210 dbh = dbufs = page_buffers(dst);
211 do {
212 lock_buffer(sbh);
213 lock_buffer(dbh);
214 dbh->b_state = sbh->b_state & mask;
215 dbh->b_blocknr = sbh->b_blocknr;
216 dbh->b_bdev = sbh->b_bdev;
217 sbh = sbh->b_this_page;
218 dbh = dbh->b_this_page;
219 } while (dbh != dbufs);
220
221 copy_highpage(dst, src);
222
223 if (PageUptodate(src) && !PageUptodate(dst))
224 SetPageUptodate(dst);
225 else if (!PageUptodate(src) && PageUptodate(dst))
226 ClearPageUptodate(dst);
227 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
228 SetPageMappedToDisk(dst);
229 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
230 ClearPageMappedToDisk(dst);
231
232 do {
233 unlock_buffer(sbh);
234 unlock_buffer(dbh);
235 sbh = sbh->b_this_page;
236 dbh = dbh->b_this_page;
237 } while (dbh != dbufs);
238}
239
240int nilfs_copy_dirty_pages(struct address_space *dmap,
241 struct address_space *smap)
242{
243 struct pagevec pvec;
244 unsigned int i;
245 pgoff_t index = 0;
246 int err = 0;
247
248 pagevec_init(&pvec);
249repeat:
250 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
251 return 0;
252
253 for (i = 0; i < pagevec_count(&pvec); i++) {
254 struct page *page = pvec.pages[i], *dpage;
255
256 lock_page(page);
257 if (unlikely(!PageDirty(page)))
258 NILFS_PAGE_BUG(page, "inconsistent dirty state");
259
260 dpage = grab_cache_page(dmap, page->index);
261 if (unlikely(!dpage)) {
262 /* No empty page is added to the page cache */
263 err = -ENOMEM;
264 unlock_page(page);
265 break;
266 }
267 if (unlikely(!page_has_buffers(page)))
268 NILFS_PAGE_BUG(page,
269 "found empty page in dat page cache");
270
271 nilfs_copy_page(dpage, page, 1);
272 __set_page_dirty_nobuffers(dpage);
273
274 unlock_page(dpage);
275 put_page(dpage);
276 unlock_page(page);
277 }
278 pagevec_release(&pvec);
279 cond_resched();
280
281 if (likely(!err))
282 goto repeat;
283 return err;
284}
285
286/**
287 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
288 * @dmap: destination page cache
289 * @smap: source page cache
290 *
291 * No pages must be added to the cache during this process.
292 * This must be ensured by the caller.
293 */
294void nilfs_copy_back_pages(struct address_space *dmap,
295 struct address_space *smap)
296{
297 struct folio_batch fbatch;
298 unsigned int i, n;
299 pgoff_t start = 0;
300
301 folio_batch_init(&fbatch);
302repeat:
303 n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
304 if (!n)
305 return;
306
307 for (i = 0; i < folio_batch_count(&fbatch); i++) {
308 struct folio *folio = fbatch.folios[i], *dfolio;
309 pgoff_t index = folio->index;
310
311 folio_lock(folio);
312 dfolio = filemap_lock_folio(dmap, index);
313 if (dfolio) {
314 /* overwrite existing folio in the destination cache */
315 WARN_ON(folio_test_dirty(dfolio));
316 nilfs_copy_page(&dfolio->page, &folio->page, 0);
317 folio_unlock(dfolio);
318 folio_put(dfolio);
319 /* Do we not need to remove folio from smap here? */
320 } else {
321 struct folio *f;
322
323 /* move the folio to the destination cache */
324 xa_lock_irq(&smap->i_pages);
325 f = __xa_erase(&smap->i_pages, index);
326 WARN_ON(folio != f);
327 smap->nrpages--;
328 xa_unlock_irq(&smap->i_pages);
329
330 xa_lock_irq(&dmap->i_pages);
331 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
332 if (unlikely(f)) {
333 /* Probably -ENOMEM */
334 folio->mapping = NULL;
335 folio_put(folio);
336 } else {
337 folio->mapping = dmap;
338 dmap->nrpages++;
339 if (folio_test_dirty(folio))
340 __xa_set_mark(&dmap->i_pages, index,
341 PAGECACHE_TAG_DIRTY);
342 }
343 xa_unlock_irq(&dmap->i_pages);
344 }
345 folio_unlock(folio);
346 }
347 folio_batch_release(&fbatch);
348 cond_resched();
349
350 goto repeat;
351}
352
353/**
354 * nilfs_clear_dirty_pages - discard dirty pages in address space
355 * @mapping: address space with dirty pages for discarding
356 * @silent: suppress [true] or print [false] warning messages
357 */
358void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
359{
360 struct pagevec pvec;
361 unsigned int i;
362 pgoff_t index = 0;
363
364 pagevec_init(&pvec);
365
366 while (pagevec_lookup_tag(&pvec, mapping, &index,
367 PAGECACHE_TAG_DIRTY)) {
368 for (i = 0; i < pagevec_count(&pvec); i++) {
369 struct page *page = pvec.pages[i];
370
371 lock_page(page);
372 nilfs_clear_dirty_page(page, silent);
373 unlock_page(page);
374 }
375 pagevec_release(&pvec);
376 cond_resched();
377 }
378}
379
380/**
381 * nilfs_clear_dirty_page - discard dirty page
382 * @page: dirty page that will be discarded
383 * @silent: suppress [true] or print [false] warning messages
384 */
385void nilfs_clear_dirty_page(struct page *page, bool silent)
386{
387 struct inode *inode = page->mapping->host;
388 struct super_block *sb = inode->i_sb;
389
390 BUG_ON(!PageLocked(page));
391
392 if (!silent)
393 nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
394 page_offset(page), inode->i_ino);
395
396 ClearPageUptodate(page);
397 ClearPageMappedToDisk(page);
398
399 if (page_has_buffers(page)) {
400 struct buffer_head *bh, *head;
401 const unsigned long clear_bits =
402 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
403 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
404 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
405
406 bh = head = page_buffers(page);
407 do {
408 lock_buffer(bh);
409 if (!silent)
410 nilfs_warn(sb,
411 "discard dirty block: blocknr=%llu, size=%zu",
412 (u64)bh->b_blocknr, bh->b_size);
413
414 set_mask_bits(&bh->b_state, clear_bits, 0);
415 unlock_buffer(bh);
416 } while (bh = bh->b_this_page, bh != head);
417 }
418
419 __nilfs_clear_page_dirty(page);
420}
421
422unsigned int nilfs_page_count_clean_buffers(struct page *page,
423 unsigned int from, unsigned int to)
424{
425 unsigned int block_start, block_end;
426 struct buffer_head *bh, *head;
427 unsigned int nc = 0;
428
429 for (bh = head = page_buffers(page), block_start = 0;
430 bh != head || !block_start;
431 block_start = block_end, bh = bh->b_this_page) {
432 block_end = block_start + bh->b_size;
433 if (block_end > from && block_start < to && !buffer_dirty(bh))
434 nc++;
435 }
436 return nc;
437}
438
439/*
440 * NILFS2 needs clear_page_dirty() in the following two cases:
441 *
442 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
443 * flag of pages when it copies back pages from shadow cache to the
444 * original cache.
445 *
446 * 2) Some B-tree operations like insertion or deletion may dispose buffers
447 * in dirty state, and this needs to cancel the dirty state of their pages.
448 */
449int __nilfs_clear_page_dirty(struct page *page)
450{
451 struct address_space *mapping = page->mapping;
452
453 if (mapping) {
454 xa_lock_irq(&mapping->i_pages);
455 if (test_bit(PG_dirty, &page->flags)) {
456 __xa_clear_mark(&mapping->i_pages, page_index(page),
457 PAGECACHE_TAG_DIRTY);
458 xa_unlock_irq(&mapping->i_pages);
459 return clear_page_dirty_for_io(page);
460 }
461 xa_unlock_irq(&mapping->i_pages);
462 return 0;
463 }
464 return TestClearPageDirty(page);
465}
466
467/**
468 * nilfs_find_uncommitted_extent - find extent of uncommitted data
469 * @inode: inode
470 * @start_blk: start block offset (in)
471 * @blkoff: start offset of the found extent (out)
472 *
473 * This function searches an extent of buffers marked "delayed" which
474 * starts from a block offset equal to or larger than @start_blk. If
475 * such an extent was found, this will store the start offset in
476 * @blkoff and return its length in blocks. Otherwise, zero is
477 * returned.
478 */
479unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
480 sector_t start_blk,
481 sector_t *blkoff)
482{
483 unsigned int i, nr_folios;
484 pgoff_t index;
485 unsigned long length = 0;
486 struct folio_batch fbatch;
487 struct folio *folio;
488
489 if (inode->i_mapping->nrpages == 0)
490 return 0;
491
492 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
493
494 folio_batch_init(&fbatch);
495
496repeat:
497 nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
498 &fbatch);
499 if (nr_folios == 0)
500 return length;
501
502 i = 0;
503 do {
504 folio = fbatch.folios[i];
505
506 folio_lock(folio);
507 if (folio_buffers(folio)) {
508 struct buffer_head *bh, *head;
509 sector_t b;
510
511 b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
512 bh = head = folio_buffers(folio);
513 do {
514 if (b < start_blk)
515 continue;
516 if (buffer_delay(bh)) {
517 if (length == 0)
518 *blkoff = b;
519 length++;
520 } else if (length > 0) {
521 goto out_locked;
522 }
523 } while (++b, bh = bh->b_this_page, bh != head);
524 } else {
525 if (length > 0)
526 goto out_locked;
527 }
528 folio_unlock(folio);
529
530 } while (++i < nr_folios);
531
532 folio_batch_release(&fbatch);
533 cond_resched();
534 goto repeat;
535
536out_locked:
537 folio_unlock(folio);
538 folio_batch_release(&fbatch);
539 return length;
540}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi and Seiji Kihara.
8 */
9
10#include <linux/pagemap.h>
11#include <linux/writeback.h>
12#include <linux/swap.h>
13#include <linux/bitops.h>
14#include <linux/page-flags.h>
15#include <linux/list.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/gfp.h>
19#include "nilfs.h"
20#include "page.h"
21#include "mdt.h"
22
23
24#define NILFS_BUFFER_INHERENT_BITS \
25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28static struct buffer_head *__nilfs_get_folio_block(struct folio *folio,
29 unsigned long block, pgoff_t index, int blkbits,
30 unsigned long b_state)
31
32{
33 unsigned long first_block;
34 struct buffer_head *bh = folio_buffers(folio);
35
36 if (!bh)
37 bh = create_empty_buffers(folio, 1 << blkbits, b_state);
38
39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40 bh = get_nth_bh(bh, block - first_block);
41
42 wait_on_buffer(bh);
43 return bh;
44}
45
46struct buffer_head *nilfs_grab_buffer(struct inode *inode,
47 struct address_space *mapping,
48 unsigned long blkoff,
49 unsigned long b_state)
50{
51 int blkbits = inode->i_blkbits;
52 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
53 struct folio *folio;
54 struct buffer_head *bh;
55
56 folio = filemap_grab_folio(mapping, index);
57 if (IS_ERR(folio))
58 return NULL;
59
60 bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state);
61 if (unlikely(!bh)) {
62 folio_unlock(folio);
63 folio_put(folio);
64 return NULL;
65 }
66 bh->b_bdev = inode->i_sb->s_bdev;
67 return bh;
68}
69
70/**
71 * nilfs_forget_buffer - discard dirty state
72 * @bh: buffer head of the buffer to be discarded
73 */
74void nilfs_forget_buffer(struct buffer_head *bh)
75{
76 struct folio *folio = bh->b_folio;
77 const unsigned long clear_bits =
78 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
81 BIT(BH_Delay));
82
83 lock_buffer(bh);
84 set_mask_bits(&bh->b_state, clear_bits, 0);
85 if (nilfs_folio_buffers_clean(folio))
86 __nilfs_clear_folio_dirty(folio);
87
88 bh->b_blocknr = -1;
89 folio_clear_uptodate(folio);
90 folio_clear_mappedtodisk(folio);
91 unlock_buffer(bh);
92 brelse(bh);
93}
94
95/**
96 * nilfs_copy_buffer -- copy buffer data and flags
97 * @dbh: destination buffer
98 * @sbh: source buffer
99 */
100void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101{
102 void *saddr, *daddr;
103 unsigned long bits;
104 struct folio *sfolio = sbh->b_folio, *dfolio = dbh->b_folio;
105 struct buffer_head *bh;
106
107 saddr = kmap_local_folio(sfolio, bh_offset(sbh));
108 daddr = kmap_local_folio(dfolio, bh_offset(dbh));
109 memcpy(daddr, saddr, sbh->b_size);
110 kunmap_local(daddr);
111 kunmap_local(saddr);
112
113 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114 dbh->b_blocknr = sbh->b_blocknr;
115 dbh->b_bdev = sbh->b_bdev;
116
117 bh = dbh;
118 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
119 while ((bh = bh->b_this_page) != dbh) {
120 lock_buffer(bh);
121 bits &= bh->b_state;
122 unlock_buffer(bh);
123 }
124 if (bits & BIT(BH_Uptodate))
125 folio_mark_uptodate(dfolio);
126 else
127 folio_clear_uptodate(dfolio);
128 if (bits & BIT(BH_Mapped))
129 folio_set_mappedtodisk(dfolio);
130 else
131 folio_clear_mappedtodisk(dfolio);
132}
133
134/**
135 * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
136 * @folio: Folio to be checked.
137 *
138 * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
139 * Otherwise, it returns true.
140 */
141bool nilfs_folio_buffers_clean(struct folio *folio)
142{
143 struct buffer_head *bh, *head;
144
145 bh = head = folio_buffers(folio);
146 do {
147 if (buffer_dirty(bh))
148 return false;
149 bh = bh->b_this_page;
150 } while (bh != head);
151 return true;
152}
153
154void nilfs_folio_bug(struct folio *folio)
155{
156 struct buffer_head *bh, *head;
157 struct address_space *m;
158 unsigned long ino;
159
160 if (unlikely(!folio)) {
161 printk(KERN_CRIT "NILFS_FOLIO_BUG(NULL)\n");
162 return;
163 }
164
165 m = folio->mapping;
166 ino = m ? m->host->i_ino : 0;
167
168 printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
169 "mapping=%p ino=%lu\n",
170 folio, folio_ref_count(folio),
171 (unsigned long long)folio->index, folio->flags, m, ino);
172
173 head = folio_buffers(folio);
174 if (head) {
175 int i = 0;
176
177 bh = head;
178 do {
179 printk(KERN_CRIT
180 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
181 i++, bh, atomic_read(&bh->b_count),
182 (unsigned long long)bh->b_blocknr, bh->b_state);
183 bh = bh->b_this_page;
184 } while (bh != head);
185 }
186}
187
188/**
189 * nilfs_copy_folio -- copy the folio with buffers
190 * @dst: destination folio
191 * @src: source folio
192 * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads.
193 *
194 * This function is for both data folios and btnode folios. The dirty flag
195 * should be treated by caller. The folio must not be under i/o.
196 * Both src and dst folio must be locked
197 */
198static void nilfs_copy_folio(struct folio *dst, struct folio *src,
199 bool copy_dirty)
200{
201 struct buffer_head *dbh, *dbufs, *sbh;
202 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
203
204 BUG_ON(folio_test_writeback(dst));
205
206 sbh = folio_buffers(src);
207 dbh = folio_buffers(dst);
208 if (!dbh)
209 dbh = create_empty_buffers(dst, sbh->b_size, 0);
210
211 if (copy_dirty)
212 mask |= BIT(BH_Dirty);
213
214 dbufs = dbh;
215 do {
216 lock_buffer(sbh);
217 lock_buffer(dbh);
218 dbh->b_state = sbh->b_state & mask;
219 dbh->b_blocknr = sbh->b_blocknr;
220 dbh->b_bdev = sbh->b_bdev;
221 sbh = sbh->b_this_page;
222 dbh = dbh->b_this_page;
223 } while (dbh != dbufs);
224
225 folio_copy(dst, src);
226
227 if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
228 folio_mark_uptodate(dst);
229 else if (!folio_test_uptodate(src) && folio_test_uptodate(dst))
230 folio_clear_uptodate(dst);
231 if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
232 folio_set_mappedtodisk(dst);
233 else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
234 folio_clear_mappedtodisk(dst);
235
236 do {
237 unlock_buffer(sbh);
238 unlock_buffer(dbh);
239 sbh = sbh->b_this_page;
240 dbh = dbh->b_this_page;
241 } while (dbh != dbufs);
242}
243
244int nilfs_copy_dirty_pages(struct address_space *dmap,
245 struct address_space *smap)
246{
247 struct folio_batch fbatch;
248 unsigned int i;
249 pgoff_t index = 0;
250 int err = 0;
251
252 folio_batch_init(&fbatch);
253repeat:
254 if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
255 PAGECACHE_TAG_DIRTY, &fbatch))
256 return 0;
257
258 for (i = 0; i < folio_batch_count(&fbatch); i++) {
259 struct folio *folio = fbatch.folios[i], *dfolio;
260
261 folio_lock(folio);
262 if (unlikely(!folio_test_dirty(folio)))
263 NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
264
265 dfolio = filemap_grab_folio(dmap, folio->index);
266 if (IS_ERR(dfolio)) {
267 /* No empty page is added to the page cache */
268 folio_unlock(folio);
269 err = PTR_ERR(dfolio);
270 break;
271 }
272 if (unlikely(!folio_buffers(folio)))
273 NILFS_FOLIO_BUG(folio,
274 "found empty page in dat page cache");
275
276 nilfs_copy_folio(dfolio, folio, true);
277 filemap_dirty_folio(folio_mapping(dfolio), dfolio);
278
279 folio_unlock(dfolio);
280 folio_put(dfolio);
281 folio_unlock(folio);
282 }
283 folio_batch_release(&fbatch);
284 cond_resched();
285
286 if (likely(!err))
287 goto repeat;
288 return err;
289}
290
291/**
292 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
293 * @dmap: destination page cache
294 * @smap: source page cache
295 *
296 * No pages must be added to the cache during this process.
297 * This must be ensured by the caller.
298 */
299void nilfs_copy_back_pages(struct address_space *dmap,
300 struct address_space *smap)
301{
302 struct folio_batch fbatch;
303 unsigned int i, n;
304 pgoff_t start = 0;
305
306 folio_batch_init(&fbatch);
307repeat:
308 n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
309 if (!n)
310 return;
311
312 for (i = 0; i < folio_batch_count(&fbatch); i++) {
313 struct folio *folio = fbatch.folios[i], *dfolio;
314 pgoff_t index = folio->index;
315
316 folio_lock(folio);
317 dfolio = filemap_lock_folio(dmap, index);
318 if (!IS_ERR(dfolio)) {
319 /* overwrite existing folio in the destination cache */
320 WARN_ON(folio_test_dirty(dfolio));
321 nilfs_copy_folio(dfolio, folio, false);
322 folio_unlock(dfolio);
323 folio_put(dfolio);
324 /* Do we not need to remove folio from smap here? */
325 } else {
326 struct folio *f;
327
328 /* move the folio to the destination cache */
329 xa_lock_irq(&smap->i_pages);
330 f = __xa_erase(&smap->i_pages, index);
331 WARN_ON(folio != f);
332 smap->nrpages--;
333 xa_unlock_irq(&smap->i_pages);
334
335 xa_lock_irq(&dmap->i_pages);
336 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
337 if (unlikely(f)) {
338 /* Probably -ENOMEM */
339 folio->mapping = NULL;
340 folio_put(folio);
341 } else {
342 folio->mapping = dmap;
343 dmap->nrpages++;
344 if (folio_test_dirty(folio))
345 __xa_set_mark(&dmap->i_pages, index,
346 PAGECACHE_TAG_DIRTY);
347 }
348 xa_unlock_irq(&dmap->i_pages);
349 }
350 folio_unlock(folio);
351 }
352 folio_batch_release(&fbatch);
353 cond_resched();
354
355 goto repeat;
356}
357
358/**
359 * nilfs_clear_dirty_pages - discard dirty pages in address space
360 * @mapping: address space with dirty pages for discarding
361 */
362void nilfs_clear_dirty_pages(struct address_space *mapping)
363{
364 struct folio_batch fbatch;
365 unsigned int i;
366 pgoff_t index = 0;
367
368 folio_batch_init(&fbatch);
369
370 while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
371 PAGECACHE_TAG_DIRTY, &fbatch)) {
372 for (i = 0; i < folio_batch_count(&fbatch); i++) {
373 struct folio *folio = fbatch.folios[i];
374
375 folio_lock(folio);
376
377 /*
378 * This folio may have been removed from the address
379 * space by truncation or invalidation when the lock
380 * was acquired. Skip processing in that case.
381 */
382 if (likely(folio->mapping == mapping))
383 nilfs_clear_folio_dirty(folio);
384
385 folio_unlock(folio);
386 }
387 folio_batch_release(&fbatch);
388 cond_resched();
389 }
390}
391
392/**
393 * nilfs_clear_folio_dirty - discard dirty folio
394 * @folio: dirty folio that will be discarded
395 *
396 * nilfs_clear_folio_dirty() clears working states including dirty state for
397 * the folio and its buffers. If the folio has buffers, clear only if it is
398 * confirmed that none of the buffer heads are busy (none have valid
399 * references and none are locked).
400 */
401void nilfs_clear_folio_dirty(struct folio *folio)
402{
403 struct buffer_head *bh, *head;
404
405 BUG_ON(!folio_test_locked(folio));
406
407 head = folio_buffers(folio);
408 if (head) {
409 const unsigned long clear_bits =
410 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
411 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
412 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
413 BIT(BH_Delay));
414 bool busy, invalidated = false;
415
416recheck_buffers:
417 busy = false;
418 bh = head;
419 do {
420 if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
421 busy = true;
422 break;
423 }
424 } while (bh = bh->b_this_page, bh != head);
425
426 if (busy) {
427 if (invalidated)
428 return;
429 invalidate_bh_lrus();
430 invalidated = true;
431 goto recheck_buffers;
432 }
433
434 bh = head;
435 do {
436 lock_buffer(bh);
437 set_mask_bits(&bh->b_state, clear_bits, 0);
438 unlock_buffer(bh);
439 } while (bh = bh->b_this_page, bh != head);
440 }
441
442 folio_clear_uptodate(folio);
443 folio_clear_mappedtodisk(folio);
444 folio_clear_checked(folio);
445 __nilfs_clear_folio_dirty(folio);
446}
447
448unsigned int nilfs_page_count_clean_buffers(struct folio *folio,
449 unsigned int from, unsigned int to)
450{
451 unsigned int block_start, block_end;
452 struct buffer_head *bh, *head;
453 unsigned int nc = 0;
454
455 for (bh = head = folio_buffers(folio), block_start = 0;
456 bh != head || !block_start;
457 block_start = block_end, bh = bh->b_this_page) {
458 block_end = block_start + bh->b_size;
459 if (block_end > from && block_start < to && !buffer_dirty(bh))
460 nc++;
461 }
462 return nc;
463}
464
465/*
466 * NILFS2 needs clear_page_dirty() in the following two cases:
467 *
468 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
469 * flag of pages when it copies back pages from shadow cache to the
470 * original cache.
471 *
472 * 2) Some B-tree operations like insertion or deletion may dispose buffers
473 * in dirty state, and this needs to cancel the dirty state of their pages.
474 */
475void __nilfs_clear_folio_dirty(struct folio *folio)
476{
477 struct address_space *mapping = folio->mapping;
478
479 if (mapping) {
480 xa_lock_irq(&mapping->i_pages);
481 if (folio_test_dirty(folio)) {
482 __xa_clear_mark(&mapping->i_pages, folio->index,
483 PAGECACHE_TAG_DIRTY);
484 xa_unlock_irq(&mapping->i_pages);
485 folio_clear_dirty_for_io(folio);
486 return;
487 }
488 xa_unlock_irq(&mapping->i_pages);
489 return;
490 }
491 folio_clear_dirty(folio);
492}
493
494/**
495 * nilfs_find_uncommitted_extent - find extent of uncommitted data
496 * @inode: inode
497 * @start_blk: start block offset (in)
498 * @blkoff: start offset of the found extent (out)
499 *
500 * This function searches an extent of buffers marked "delayed" which
501 * starts from a block offset equal to or larger than @start_blk. If
502 * such an extent was found, this will store the start offset in
503 * @blkoff and return its length in blocks. Otherwise, zero is
504 * returned.
505 */
506unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
507 sector_t start_blk,
508 sector_t *blkoff)
509{
510 unsigned int i, nr_folios;
511 pgoff_t index;
512 unsigned long length = 0;
513 struct folio_batch fbatch;
514 struct folio *folio;
515
516 if (inode->i_mapping->nrpages == 0)
517 return 0;
518
519 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
520
521 folio_batch_init(&fbatch);
522
523repeat:
524 nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
525 &fbatch);
526 if (nr_folios == 0)
527 return length;
528
529 i = 0;
530 do {
531 folio = fbatch.folios[i];
532
533 folio_lock(folio);
534 if (folio_buffers(folio)) {
535 struct buffer_head *bh, *head;
536 sector_t b;
537
538 b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
539 bh = head = folio_buffers(folio);
540 do {
541 if (b < start_blk)
542 continue;
543 if (buffer_delay(bh)) {
544 if (length == 0)
545 *blkoff = b;
546 length++;
547 } else if (length > 0) {
548 goto out_locked;
549 }
550 } while (++b, bh = bh->b_this_page, bh != head);
551 } else {
552 if (length > 0)
553 goto out_locked;
554 }
555 folio_unlock(folio);
556
557 } while (++i < nr_folios);
558
559 folio_batch_release(&fbatch);
560 cond_resched();
561 goto repeat;
562
563out_locked:
564 folio_unlock(folio);
565 folio_batch_release(&fbatch);
566 return length;
567}