Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Buffer/page management specific to NILFS
  4 *
  5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6 *
  7 * Written by Ryusuke Konishi and Seiji Kihara.
  8 */
  9
 10#include <linux/pagemap.h>
 11#include <linux/writeback.h>
 12#include <linux/swap.h>
 13#include <linux/bitops.h>
 14#include <linux/page-flags.h>
 15#include <linux/list.h>
 16#include <linux/highmem.h>
 17#include <linux/pagevec.h>
 18#include <linux/gfp.h>
 19#include "nilfs.h"
 20#include "page.h"
 21#include "mdt.h"
 22
 23
 24#define NILFS_BUFFER_INHERENT_BITS					\
 25	(BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |	\
 26	 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
 27
 28static struct buffer_head *__nilfs_get_folio_block(struct folio *folio,
 29		unsigned long block, pgoff_t index, int blkbits,
 30		unsigned long b_state)
 31
 32{
 33	unsigned long first_block;
 34	struct buffer_head *bh = folio_buffers(folio);
 35
 36	if (!bh)
 37		bh = create_empty_buffers(folio, 1 << blkbits, b_state);
 38
 39	first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
 40	bh = get_nth_bh(bh, block - first_block);
 41
 42	touch_buffer(bh);
 43	wait_on_buffer(bh);
 44	return bh;
 45}
 46
 47struct buffer_head *nilfs_grab_buffer(struct inode *inode,
 48				      struct address_space *mapping,
 49				      unsigned long blkoff,
 50				      unsigned long b_state)
 51{
 52	int blkbits = inode->i_blkbits;
 53	pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
 54	struct folio *folio;
 55	struct buffer_head *bh;
 56
 57	folio = filemap_grab_folio(mapping, index);
 58	if (IS_ERR(folio))
 59		return NULL;
 60
 61	bh = __nilfs_get_folio_block(folio, blkoff, index, blkbits, b_state);
 62	if (unlikely(!bh)) {
 63		folio_unlock(folio);
 64		folio_put(folio);
 65		return NULL;
 66	}
 67	return bh;
 68}
 69
 70/**
 71 * nilfs_forget_buffer - discard dirty state
 
 72 * @bh: buffer head of the buffer to be discarded
 73 */
 74void nilfs_forget_buffer(struct buffer_head *bh)
 75{
 76	struct folio *folio = bh->b_folio;
 77	const unsigned long clear_bits =
 78		(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
 79		 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
 80		 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
 81
 82	lock_buffer(bh);
 83	set_mask_bits(&bh->b_state, clear_bits, 0);
 84	if (nilfs_folio_buffers_clean(folio))
 85		__nilfs_clear_folio_dirty(folio);
 86
 87	bh->b_blocknr = -1;
 88	folio_clear_uptodate(folio);
 89	folio_clear_mappedtodisk(folio);
 90	unlock_buffer(bh);
 91	brelse(bh);
 92}
 93
 94/**
 95 * nilfs_copy_buffer -- copy buffer data and flags
 96 * @dbh: destination buffer
 97 * @sbh: source buffer
 98 */
 99void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100{
101	void *kaddr0, *kaddr1;
102	unsigned long bits;
103	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104	struct buffer_head *bh;
105
106	kaddr0 = kmap_atomic(spage);
107	kaddr1 = kmap_atomic(dpage);
108	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109	kunmap_atomic(kaddr1);
110	kunmap_atomic(kaddr0);
111
112	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113	dbh->b_blocknr = sbh->b_blocknr;
114	dbh->b_bdev = sbh->b_bdev;
115
116	bh = dbh;
117	bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118	while ((bh = bh->b_this_page) != dbh) {
119		lock_buffer(bh);
120		bits &= bh->b_state;
121		unlock_buffer(bh);
122	}
123	if (bits & BIT(BH_Uptodate))
124		SetPageUptodate(dpage);
125	else
126		ClearPageUptodate(dpage);
127	if (bits & BIT(BH_Mapped))
128		SetPageMappedToDisk(dpage);
129	else
130		ClearPageMappedToDisk(dpage);
131}
132
133/**
134 * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
135 * @folio: Folio to be checked.
136 *
137 * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
138 * Otherwise, it returns true.
139 */
140bool nilfs_folio_buffers_clean(struct folio *folio)
141{
142	struct buffer_head *bh, *head;
143
144	bh = head = folio_buffers(folio);
145	do {
146		if (buffer_dirty(bh))
147			return false;
148		bh = bh->b_this_page;
149	} while (bh != head);
150	return true;
151}
152
153void nilfs_folio_bug(struct folio *folio)
154{
155	struct buffer_head *bh, *head;
156	struct address_space *m;
157	unsigned long ino;
158
159	if (unlikely(!folio)) {
160		printk(KERN_CRIT "NILFS_FOLIO_BUG(NULL)\n");
161		return;
162	}
163
164	m = folio->mapping;
165	ino = m ? m->host->i_ino : 0;
166
167	printk(KERN_CRIT "NILFS_FOLIO_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168	       "mapping=%p ino=%lu\n",
169	       folio, folio_ref_count(folio),
170	       (unsigned long long)folio->index, folio->flags, m, ino);
171
172	head = folio_buffers(folio);
173	if (head) {
174		int i = 0;
175
176		bh = head;
177		do {
178			printk(KERN_CRIT
179			       " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180			       i++, bh, atomic_read(&bh->b_count),
181			       (unsigned long long)bh->b_blocknr, bh->b_state);
182			bh = bh->b_this_page;
183		} while (bh != head);
184	}
185}
186
187/**
188 * nilfs_copy_folio -- copy the folio with buffers
189 * @dst: destination folio
190 * @src: source folio
191 * @copy_dirty: flag whether to copy dirty states on the folio's buffer heads.
192 *
193 * This function is for both data folios and btnode folios.  The dirty flag
194 * should be treated by caller.  The folio must not be under i/o.
195 * Both src and dst folio must be locked
196 */
197static void nilfs_copy_folio(struct folio *dst, struct folio *src,
198		bool copy_dirty)
199{
200	struct buffer_head *dbh, *dbufs, *sbh;
201	unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
202
203	BUG_ON(folio_test_writeback(dst));
204
205	sbh = folio_buffers(src);
206	dbh = folio_buffers(dst);
207	if (!dbh)
208		dbh = create_empty_buffers(dst, sbh->b_size, 0);
209
210	if (copy_dirty)
211		mask |= BIT(BH_Dirty);
212
213	dbufs = dbh;
214	do {
215		lock_buffer(sbh);
216		lock_buffer(dbh);
217		dbh->b_state = sbh->b_state & mask;
218		dbh->b_blocknr = sbh->b_blocknr;
219		dbh->b_bdev = sbh->b_bdev;
220		sbh = sbh->b_this_page;
221		dbh = dbh->b_this_page;
222	} while (dbh != dbufs);
223
224	folio_copy(dst, src);
225
226	if (folio_test_uptodate(src) && !folio_test_uptodate(dst))
227		folio_mark_uptodate(dst);
228	else if (!folio_test_uptodate(src) && folio_test_uptodate(dst))
229		folio_clear_uptodate(dst);
230	if (folio_test_mappedtodisk(src) && !folio_test_mappedtodisk(dst))
231		folio_set_mappedtodisk(dst);
232	else if (!folio_test_mappedtodisk(src) && folio_test_mappedtodisk(dst))
233		folio_clear_mappedtodisk(dst);
234
235	do {
236		unlock_buffer(sbh);
237		unlock_buffer(dbh);
238		sbh = sbh->b_this_page;
239		dbh = dbh->b_this_page;
240	} while (dbh != dbufs);
241}
242
243int nilfs_copy_dirty_pages(struct address_space *dmap,
244			   struct address_space *smap)
245{
246	struct folio_batch fbatch;
247	unsigned int i;
248	pgoff_t index = 0;
249	int err = 0;
250
251	folio_batch_init(&fbatch);
252repeat:
253	if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
254				PAGECACHE_TAG_DIRTY, &fbatch))
255		return 0;
256
257	for (i = 0; i < folio_batch_count(&fbatch); i++) {
258		struct folio *folio = fbatch.folios[i], *dfolio;
259
260		folio_lock(folio);
261		if (unlikely(!folio_test_dirty(folio)))
262			NILFS_FOLIO_BUG(folio, "inconsistent dirty state");
263
264		dfolio = filemap_grab_folio(dmap, folio->index);
265		if (unlikely(IS_ERR(dfolio))) {
266			/* No empty page is added to the page cache */
267			folio_unlock(folio);
268			err = PTR_ERR(dfolio);
269			break;
270		}
271		if (unlikely(!folio_buffers(folio)))
272			NILFS_FOLIO_BUG(folio,
273				       "found empty page in dat page cache");
274
275		nilfs_copy_folio(dfolio, folio, true);
276		filemap_dirty_folio(folio_mapping(dfolio), dfolio);
277
278		folio_unlock(dfolio);
279		folio_put(dfolio);
280		folio_unlock(folio);
281	}
282	folio_batch_release(&fbatch);
283	cond_resched();
284
285	if (likely(!err))
286		goto repeat;
287	return err;
288}
289
290/**
291 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
292 * @dmap: destination page cache
293 * @smap: source page cache
294 *
295 * No pages must be added to the cache during this process.
296 * This must be ensured by the caller.
297 */
298void nilfs_copy_back_pages(struct address_space *dmap,
299			   struct address_space *smap)
300{
301	struct folio_batch fbatch;
302	unsigned int i, n;
303	pgoff_t start = 0;
304
305	folio_batch_init(&fbatch);
306repeat:
307	n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
308	if (!n)
309		return;
310
311	for (i = 0; i < folio_batch_count(&fbatch); i++) {
312		struct folio *folio = fbatch.folios[i], *dfolio;
313		pgoff_t index = folio->index;
314
315		folio_lock(folio);
316		dfolio = filemap_lock_folio(dmap, index);
317		if (!IS_ERR(dfolio)) {
318			/* overwrite existing folio in the destination cache */
319			WARN_ON(folio_test_dirty(dfolio));
320			nilfs_copy_folio(dfolio, folio, false);
321			folio_unlock(dfolio);
322			folio_put(dfolio);
323			/* Do we not need to remove folio from smap here? */
324		} else {
325			struct folio *f;
326
327			/* move the folio to the destination cache */
328			xa_lock_irq(&smap->i_pages);
329			f = __xa_erase(&smap->i_pages, index);
330			WARN_ON(folio != f);
331			smap->nrpages--;
332			xa_unlock_irq(&smap->i_pages);
333
334			xa_lock_irq(&dmap->i_pages);
335			f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
336			if (unlikely(f)) {
337				/* Probably -ENOMEM */
338				folio->mapping = NULL;
339				folio_put(folio);
340			} else {
341				folio->mapping = dmap;
342				dmap->nrpages++;
343				if (folio_test_dirty(folio))
344					__xa_set_mark(&dmap->i_pages, index,
345							PAGECACHE_TAG_DIRTY);
346			}
347			xa_unlock_irq(&dmap->i_pages);
348		}
349		folio_unlock(folio);
350	}
351	folio_batch_release(&fbatch);
352	cond_resched();
353
354	goto repeat;
355}
356
357/**
358 * nilfs_clear_dirty_pages - discard dirty pages in address space
359 * @mapping: address space with dirty pages for discarding
360 * @silent: suppress [true] or print [false] warning messages
361 */
362void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
363{
364	struct folio_batch fbatch;
365	unsigned int i;
366	pgoff_t index = 0;
367
368	folio_batch_init(&fbatch);
369
370	while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
371				PAGECACHE_TAG_DIRTY, &fbatch)) {
372		for (i = 0; i < folio_batch_count(&fbatch); i++) {
373			struct folio *folio = fbatch.folios[i];
374
375			folio_lock(folio);
376
377			/*
378			 * This folio may have been removed from the address
379			 * space by truncation or invalidation when the lock
380			 * was acquired.  Skip processing in that case.
381			 */
382			if (likely(folio->mapping == mapping))
383				nilfs_clear_folio_dirty(folio, silent);
384
385			folio_unlock(folio);
 
 
 
 
 
 
 
386		}
387		folio_batch_release(&fbatch);
388		cond_resched();
389	}
390}
391
392/**
393 * nilfs_clear_folio_dirty - discard dirty folio
394 * @folio: dirty folio that will be discarded
395 * @silent: suppress [true] or print [false] warning messages
396 */
397void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
398{
399	struct inode *inode = folio->mapping->host;
400	struct super_block *sb = inode->i_sb;
401	struct buffer_head *bh, *head;
402
403	BUG_ON(!folio_test_locked(folio));
404
405	if (!silent)
406		nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
407			   folio_pos(folio), inode->i_ino);
 
408
409	folio_clear_uptodate(folio);
410	folio_clear_mappedtodisk(folio);
411
412	head = folio_buffers(folio);
413	if (head) {
414		const unsigned long clear_bits =
415			(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
416			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
417			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
418
419		bh = head;
420		do {
421			lock_buffer(bh);
422			if (!silent)
423				nilfs_warn(sb,
424					   "discard dirty block: blocknr=%llu, size=%zu",
425					   (u64)bh->b_blocknr, bh->b_size);
426
427			set_mask_bits(&bh->b_state, clear_bits, 0);
428			unlock_buffer(bh);
429		} while (bh = bh->b_this_page, bh != head);
430	}
431
432	__nilfs_clear_folio_dirty(folio);
433}
434
435unsigned int nilfs_page_count_clean_buffers(struct page *page,
436					    unsigned int from, unsigned int to)
437{
438	unsigned int block_start, block_end;
439	struct buffer_head *bh, *head;
440	unsigned int nc = 0;
441
442	for (bh = head = page_buffers(page), block_start = 0;
443	     bh != head || !block_start;
444	     block_start = block_end, bh = bh->b_this_page) {
445		block_end = block_start + bh->b_size;
446		if (block_end > from && block_start < to && !buffer_dirty(bh))
447			nc++;
448	}
449	return nc;
450}
451
 
 
 
 
 
 
 
 
 
452/*
453 * NILFS2 needs clear_page_dirty() in the following two cases:
454 *
455 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
456 *    flag of pages when it copies back pages from shadow cache to the
457 *    original cache.
 
458 *
459 * 2) Some B-tree operations like insertion or deletion may dispose buffers
460 *    in dirty state, and this needs to cancel the dirty state of their pages.
461 */
462void __nilfs_clear_folio_dirty(struct folio *folio)
463{
464	struct address_space *mapping = folio->mapping;
465
466	if (mapping) {
467		xa_lock_irq(&mapping->i_pages);
468		if (folio_test_dirty(folio)) {
469			__xa_clear_mark(&mapping->i_pages, folio->index,
470					     PAGECACHE_TAG_DIRTY);
471			xa_unlock_irq(&mapping->i_pages);
472			folio_clear_dirty_for_io(folio);
473			return;
474		}
475		xa_unlock_irq(&mapping->i_pages);
476		return;
477	}
478	folio_clear_dirty(folio);
479}
480
481/**
482 * nilfs_find_uncommitted_extent - find extent of uncommitted data
483 * @inode: inode
484 * @start_blk: start block offset (in)
485 * @blkoff: start offset of the found extent (out)
486 *
487 * This function searches an extent of buffers marked "delayed" which
488 * starts from a block offset equal to or larger than @start_blk.  If
489 * such an extent was found, this will store the start offset in
490 * @blkoff and return its length in blocks.  Otherwise, zero is
491 * returned.
492 */
493unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
494					    sector_t start_blk,
495					    sector_t *blkoff)
496{
497	unsigned int i, nr_folios;
498	pgoff_t index;
 
499	unsigned long length = 0;
500	struct folio_batch fbatch;
501	struct folio *folio;
 
502
503	if (inode->i_mapping->nrpages == 0)
504		return 0;
505
506	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
 
507
508	folio_batch_init(&fbatch);
509
510repeat:
511	nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
512			&fbatch);
513	if (nr_folios == 0)
514		return length;
515
 
 
 
 
516	i = 0;
517	do {
518		folio = fbatch.folios[i];
519
520		folio_lock(folio);
521		if (folio_buffers(folio)) {
522			struct buffer_head *bh, *head;
523			sector_t b;
524
525			b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
526			bh = head = folio_buffers(folio);
527			do {
528				if (b < start_blk)
529					continue;
530				if (buffer_delay(bh)) {
531					if (length == 0)
532						*blkoff = b;
533					length++;
534				} else if (length > 0) {
535					goto out_locked;
536				}
537			} while (++b, bh = bh->b_this_page, bh != head);
538		} else {
539			if (length > 0)
540				goto out_locked;
 
 
541		}
542		folio_unlock(folio);
543
544	} while (++i < nr_folios);
545
546	folio_batch_release(&fbatch);
 
547	cond_resched();
548	goto repeat;
549
550out_locked:
551	folio_unlock(folio);
552	folio_batch_release(&fbatch);
 
553	return length;
554}
v5.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * page.c - buffer/page management specific to NILFS
  4 *
  5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6 *
  7 * Written by Ryusuke Konishi and Seiji Kihara.
  8 */
  9
 10#include <linux/pagemap.h>
 11#include <linux/writeback.h>
 12#include <linux/swap.h>
 13#include <linux/bitops.h>
 14#include <linux/page-flags.h>
 15#include <linux/list.h>
 16#include <linux/highmem.h>
 17#include <linux/pagevec.h>
 18#include <linux/gfp.h>
 19#include "nilfs.h"
 20#include "page.h"
 21#include "mdt.h"
 22
 23
 24#define NILFS_BUFFER_INHERENT_BITS					\
 25	(BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |	\
 26	 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
 27
 28static struct buffer_head *
 29__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
 30		       int blkbits, unsigned long b_state)
 31
 32{
 33	unsigned long first_block;
 34	struct buffer_head *bh;
 35
 36	if (!page_has_buffers(page))
 37		create_empty_buffers(page, 1 << blkbits, b_state);
 38
 39	first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
 40	bh = nilfs_page_get_nth_block(page, block - first_block);
 41
 42	touch_buffer(bh);
 43	wait_on_buffer(bh);
 44	return bh;
 45}
 46
 47struct buffer_head *nilfs_grab_buffer(struct inode *inode,
 48				      struct address_space *mapping,
 49				      unsigned long blkoff,
 50				      unsigned long b_state)
 51{
 52	int blkbits = inode->i_blkbits;
 53	pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
 54	struct page *page;
 55	struct buffer_head *bh;
 56
 57	page = grab_cache_page(mapping, index);
 58	if (unlikely(!page))
 59		return NULL;
 60
 61	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
 62	if (unlikely(!bh)) {
 63		unlock_page(page);
 64		put_page(page);
 65		return NULL;
 66	}
 67	return bh;
 68}
 69
 70/**
 71 * nilfs_forget_buffer - discard dirty state
 72 * @inode: owner inode of the buffer
 73 * @bh: buffer head of the buffer to be discarded
 74 */
 75void nilfs_forget_buffer(struct buffer_head *bh)
 76{
 77	struct page *page = bh->b_page;
 78	const unsigned long clear_bits =
 79		(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
 80		 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
 81		 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
 82
 83	lock_buffer(bh);
 84	set_mask_bits(&bh->b_state, clear_bits, 0);
 85	if (nilfs_page_buffers_clean(page))
 86		__nilfs_clear_page_dirty(page);
 87
 88	bh->b_blocknr = -1;
 89	ClearPageUptodate(page);
 90	ClearPageMappedToDisk(page);
 91	unlock_buffer(bh);
 92	brelse(bh);
 93}
 94
 95/**
 96 * nilfs_copy_buffer -- copy buffer data and flags
 97 * @dbh: destination buffer
 98 * @sbh: source buffer
 99 */
100void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101{
102	void *kaddr0, *kaddr1;
103	unsigned long bits;
104	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
105	struct buffer_head *bh;
106
107	kaddr0 = kmap_atomic(spage);
108	kaddr1 = kmap_atomic(dpage);
109	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
110	kunmap_atomic(kaddr1);
111	kunmap_atomic(kaddr0);
112
113	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114	dbh->b_blocknr = sbh->b_blocknr;
115	dbh->b_bdev = sbh->b_bdev;
116
117	bh = dbh;
118	bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
119	while ((bh = bh->b_this_page) != dbh) {
120		lock_buffer(bh);
121		bits &= bh->b_state;
122		unlock_buffer(bh);
123	}
124	if (bits & BIT(BH_Uptodate))
125		SetPageUptodate(dpage);
126	else
127		ClearPageUptodate(dpage);
128	if (bits & BIT(BH_Mapped))
129		SetPageMappedToDisk(dpage);
130	else
131		ClearPageMappedToDisk(dpage);
132}
133
134/**
135 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
136 * @page: page to be checked
137 *
138 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
139 * Otherwise, it returns non-zero value.
140 */
141int nilfs_page_buffers_clean(struct page *page)
142{
143	struct buffer_head *bh, *head;
144
145	bh = head = page_buffers(page);
146	do {
147		if (buffer_dirty(bh))
148			return 0;
149		bh = bh->b_this_page;
150	} while (bh != head);
151	return 1;
152}
153
154void nilfs_page_bug(struct page *page)
155{
 
156	struct address_space *m;
157	unsigned long ino;
158
159	if (unlikely(!page)) {
160		printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
161		return;
162	}
163
164	m = page->mapping;
165	ino = m ? m->host->i_ino : 0;
166
167	printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168	       "mapping=%p ino=%lu\n",
169	       page, page_ref_count(page),
170	       (unsigned long long)page->index, page->flags, m, ino);
171
172	if (page_has_buffers(page)) {
173		struct buffer_head *bh, *head;
174		int i = 0;
175
176		bh = head = page_buffers(page);
177		do {
178			printk(KERN_CRIT
179			       " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180			       i++, bh, atomic_read(&bh->b_count),
181			       (unsigned long long)bh->b_blocknr, bh->b_state);
182			bh = bh->b_this_page;
183		} while (bh != head);
184	}
185}
186
187/**
188 * nilfs_copy_page -- copy the page with buffers
189 * @dst: destination page
190 * @src: source page
191 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
192 *
193 * This function is for both data pages and btnode pages.  The dirty flag
194 * should be treated by caller.  The page must not be under i/o.
195 * Both src and dst page must be locked
196 */
197static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
 
198{
199	struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
200	unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
201
202	BUG_ON(PageWriteback(dst));
203
204	sbh = sbufs = page_buffers(src);
205	if (!page_has_buffers(dst))
206		create_empty_buffers(dst, sbh->b_size, 0);
 
207
208	if (copy_dirty)
209		mask |= BIT(BH_Dirty);
210
211	dbh = dbufs = page_buffers(dst);
212	do {
213		lock_buffer(sbh);
214		lock_buffer(dbh);
215		dbh->b_state = sbh->b_state & mask;
216		dbh->b_blocknr = sbh->b_blocknr;
217		dbh->b_bdev = sbh->b_bdev;
218		sbh = sbh->b_this_page;
219		dbh = dbh->b_this_page;
220	} while (dbh != dbufs);
221
222	copy_highpage(dst, src);
223
224	if (PageUptodate(src) && !PageUptodate(dst))
225		SetPageUptodate(dst);
226	else if (!PageUptodate(src) && PageUptodate(dst))
227		ClearPageUptodate(dst);
228	if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
229		SetPageMappedToDisk(dst);
230	else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
231		ClearPageMappedToDisk(dst);
232
233	do {
234		unlock_buffer(sbh);
235		unlock_buffer(dbh);
236		sbh = sbh->b_this_page;
237		dbh = dbh->b_this_page;
238	} while (dbh != dbufs);
239}
240
241int nilfs_copy_dirty_pages(struct address_space *dmap,
242			   struct address_space *smap)
243{
244	struct pagevec pvec;
245	unsigned int i;
246	pgoff_t index = 0;
247	int err = 0;
248
249	pagevec_init(&pvec);
250repeat:
251	if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
 
252		return 0;
253
254	for (i = 0; i < pagevec_count(&pvec); i++) {
255		struct page *page = pvec.pages[i], *dpage;
256
257		lock_page(page);
258		if (unlikely(!PageDirty(page)))
259			NILFS_PAGE_BUG(page, "inconsistent dirty state");
260
261		dpage = grab_cache_page(dmap, page->index);
262		if (unlikely(!dpage)) {
263			/* No empty page is added to the page cache */
264			err = -ENOMEM;
265			unlock_page(page);
266			break;
267		}
268		if (unlikely(!page_has_buffers(page)))
269			NILFS_PAGE_BUG(page,
270				       "found empty page in dat page cache");
271
272		nilfs_copy_page(dpage, page, 1);
273		__set_page_dirty_nobuffers(dpage);
274
275		unlock_page(dpage);
276		put_page(dpage);
277		unlock_page(page);
278	}
279	pagevec_release(&pvec);
280	cond_resched();
281
282	if (likely(!err))
283		goto repeat;
284	return err;
285}
286
287/**
288 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
289 * @dmap: destination page cache
290 * @smap: source page cache
291 *
292 * No pages must be added to the cache during this process.
293 * This must be ensured by the caller.
294 */
295void nilfs_copy_back_pages(struct address_space *dmap,
296			   struct address_space *smap)
297{
298	struct pagevec pvec;
299	unsigned int i, n;
300	pgoff_t index = 0;
301
302	pagevec_init(&pvec);
303repeat:
304	n = pagevec_lookup(&pvec, smap, &index);
305	if (!n)
306		return;
307
308	for (i = 0; i < pagevec_count(&pvec); i++) {
309		struct page *page = pvec.pages[i], *dpage;
310		pgoff_t offset = page->index;
311
312		lock_page(page);
313		dpage = find_lock_page(dmap, offset);
314		if (dpage) {
315			/* overwrite existing page in the destination cache */
316			WARN_ON(PageDirty(dpage));
317			nilfs_copy_page(dpage, page, 0);
318			unlock_page(dpage);
319			put_page(dpage);
320			/* Do we not need to remove page from smap here? */
321		} else {
322			struct page *p;
323
324			/* move the page to the destination cache */
325			xa_lock_irq(&smap->i_pages);
326			p = __xa_erase(&smap->i_pages, offset);
327			WARN_ON(page != p);
328			smap->nrpages--;
329			xa_unlock_irq(&smap->i_pages);
330
331			xa_lock_irq(&dmap->i_pages);
332			p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
333			if (unlikely(p)) {
334				/* Probably -ENOMEM */
335				page->mapping = NULL;
336				put_page(page);
337			} else {
338				page->mapping = dmap;
339				dmap->nrpages++;
340				if (PageDirty(page))
341					__xa_set_mark(&dmap->i_pages, offset,
342							PAGECACHE_TAG_DIRTY);
343			}
344			xa_unlock_irq(&dmap->i_pages);
345		}
346		unlock_page(page);
347	}
348	pagevec_release(&pvec);
349	cond_resched();
350
351	goto repeat;
352}
353
354/**
355 * nilfs_clear_dirty_pages - discard dirty pages in address space
356 * @mapping: address space with dirty pages for discarding
357 * @silent: suppress [true] or print [false] warning messages
358 */
359void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
360{
361	struct pagevec pvec;
362	unsigned int i;
363	pgoff_t index = 0;
364
365	pagevec_init(&pvec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
367	while (pagevec_lookup_tag(&pvec, mapping, &index,
368					PAGECACHE_TAG_DIRTY)) {
369		for (i = 0; i < pagevec_count(&pvec); i++) {
370			struct page *page = pvec.pages[i];
371
372			lock_page(page);
373			nilfs_clear_dirty_page(page, silent);
374			unlock_page(page);
375		}
376		pagevec_release(&pvec);
377		cond_resched();
378	}
379}
380
381/**
382 * nilfs_clear_dirty_page - discard dirty page
383 * @page: dirty page that will be discarded
384 * @silent: suppress [true] or print [false] warning messages
385 */
386void nilfs_clear_dirty_page(struct page *page, bool silent)
387{
388	struct inode *inode = page->mapping->host;
389	struct super_block *sb = inode->i_sb;
 
390
391	BUG_ON(!PageLocked(page));
392
393	if (!silent)
394		nilfs_msg(sb, KERN_WARNING,
395			  "discard dirty page: offset=%lld, ino=%lu",
396			  page_offset(page), inode->i_ino);
397
398	ClearPageUptodate(page);
399	ClearPageMappedToDisk(page);
400
401	if (page_has_buffers(page)) {
402		struct buffer_head *bh, *head;
403		const unsigned long clear_bits =
404			(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
405			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
406			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
407
408		bh = head = page_buffers(page);
409		do {
410			lock_buffer(bh);
411			if (!silent)
412				nilfs_msg(sb, KERN_WARNING,
413					  "discard dirty block: blocknr=%llu, size=%zu",
414					  (u64)bh->b_blocknr, bh->b_size);
415
416			set_mask_bits(&bh->b_state, clear_bits, 0);
417			unlock_buffer(bh);
418		} while (bh = bh->b_this_page, bh != head);
419	}
420
421	__nilfs_clear_page_dirty(page);
422}
423
424unsigned int nilfs_page_count_clean_buffers(struct page *page,
425					    unsigned int from, unsigned int to)
426{
427	unsigned int block_start, block_end;
428	struct buffer_head *bh, *head;
429	unsigned int nc = 0;
430
431	for (bh = head = page_buffers(page), block_start = 0;
432	     bh != head || !block_start;
433	     block_start = block_end, bh = bh->b_this_page) {
434		block_end = block_start + bh->b_size;
435		if (block_end > from && block_start < to && !buffer_dirty(bh))
436			nc++;
437	}
438	return nc;
439}
440
441void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
442{
443	mapping->host = inode;
444	mapping->flags = 0;
445	mapping_set_gfp_mask(mapping, GFP_NOFS);
446	mapping->private_data = NULL;
447	mapping->a_ops = &empty_aops;
448}
449
450/*
451 * NILFS2 needs clear_page_dirty() in the following two cases:
452 *
453 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
454 *    page dirty flags when it copies back pages from the shadow cache
455 *    (gcdat->{i_mapping,i_btnode_cache}) to its original cache
456 *    (dat->{i_mapping,i_btnode_cache}).
457 *
458 * 2) Some B-tree operations like insertion or deletion may dispose buffers
459 *    in dirty state, and this needs to cancel the dirty state of their pages.
460 */
461int __nilfs_clear_page_dirty(struct page *page)
462{
463	struct address_space *mapping = page->mapping;
464
465	if (mapping) {
466		xa_lock_irq(&mapping->i_pages);
467		if (test_bit(PG_dirty, &page->flags)) {
468			__xa_clear_mark(&mapping->i_pages, page_index(page),
469					     PAGECACHE_TAG_DIRTY);
470			xa_unlock_irq(&mapping->i_pages);
471			return clear_page_dirty_for_io(page);
 
472		}
473		xa_unlock_irq(&mapping->i_pages);
474		return 0;
475	}
476	return TestClearPageDirty(page);
477}
478
479/**
480 * nilfs_find_uncommitted_extent - find extent of uncommitted data
481 * @inode: inode
482 * @start_blk: start block offset (in)
483 * @blkoff: start offset of the found extent (out)
484 *
485 * This function searches an extent of buffers marked "delayed" which
486 * starts from a block offset equal to or larger than @start_blk.  If
487 * such an extent was found, this will store the start offset in
488 * @blkoff and return its length in blocks.  Otherwise, zero is
489 * returned.
490 */
491unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
492					    sector_t start_blk,
493					    sector_t *blkoff)
494{
495	unsigned int i;
496	pgoff_t index;
497	unsigned int nblocks_in_page;
498	unsigned long length = 0;
499	sector_t b;
500	struct pagevec pvec;
501	struct page *page;
502
503	if (inode->i_mapping->nrpages == 0)
504		return 0;
505
506	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
507	nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
508
509	pagevec_init(&pvec);
510
511repeat:
512	pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
513					pvec.pages);
514	if (pvec.nr == 0)
515		return length;
516
517	if (length > 0 && pvec.pages[0]->index > index)
518		goto out;
519
520	b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
521	i = 0;
522	do {
523		page = pvec.pages[i];
524
525		lock_page(page);
526		if (page_has_buffers(page)) {
527			struct buffer_head *bh, *head;
 
528
529			bh = head = page_buffers(page);
 
530			do {
531				if (b < start_blk)
532					continue;
533				if (buffer_delay(bh)) {
534					if (length == 0)
535						*blkoff = b;
536					length++;
537				} else if (length > 0) {
538					goto out_locked;
539				}
540			} while (++b, bh = bh->b_this_page, bh != head);
541		} else {
542			if (length > 0)
543				goto out_locked;
544
545			b += nblocks_in_page;
546		}
547		unlock_page(page);
548
549	} while (++i < pagevec_count(&pvec));
550
551	index = page->index + 1;
552	pagevec_release(&pvec);
553	cond_resched();
554	goto repeat;
555
556out_locked:
557	unlock_page(page);
558out:
559	pagevec_release(&pvec);
560	return length;
561}