Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * Meta data file for NILFS
  4 *
  5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6 *
  7 * Written by Ryusuke Konishi.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/buffer_head.h>
 11#include <linux/mpage.h>
 12#include <linux/mm.h>
 13#include <linux/writeback.h>
 14#include <linux/backing-dev.h>
 15#include <linux/swap.h>
 16#include <linux/slab.h>
 17#include "nilfs.h"
 18#include "btnode.h"
 19#include "segment.h"
 20#include "page.h"
 21#include "mdt.h"
 22#include "alloc.h"		/* nilfs_palloc_destroy_cache() */
 23
 24#include <trace/events/nilfs2.h>
 25
 26#define NILFS_MDT_MAX_RA_BLOCKS		(16 - 1)
 27
 28
 29static int
 30nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
 31			   struct buffer_head *bh,
 32			   void (*init_block)(struct inode *,
 33					      struct buffer_head *, void *))
 34{
 35	struct nilfs_inode_info *ii = NILFS_I(inode);
 36	struct folio *folio = bh->b_folio;
 37	void *from;
 38	int ret;
 39
 40	/* Caller exclude read accesses using page lock */
 41
 42	/* set_buffer_new(bh); */
 43	bh->b_blocknr = 0;
 44
 45	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
 46	if (unlikely(ret))
 47		return ret;
 48
 49	set_buffer_mapped(bh);
 50
 51	/* Initialize block (block size > PAGE_SIZE not yet supported) */
 52	from = kmap_local_folio(folio, offset_in_folio(folio, bh->b_data));
 53	memset(from, 0, bh->b_size);
 54	if (init_block)
 55		init_block(inode, bh, from);
 56	kunmap_local(from);
 57
 58	flush_dcache_folio(folio);
 59
 60	set_buffer_uptodate(bh);
 61	mark_buffer_dirty(bh);
 62	nilfs_mdt_mark_dirty(inode);
 63
 64	trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
 65
 66	return 0;
 67}
 68
 69static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
 70				  struct buffer_head **out_bh,
 71				  void (*init_block)(struct inode *,
 72						     struct buffer_head *,
 73						     void *))
 74{
 75	struct super_block *sb = inode->i_sb;
 76	struct nilfs_transaction_info ti;
 77	struct buffer_head *bh;
 78	int err;
 79
 80	nilfs_transaction_begin(sb, &ti, 0);
 81
 82	err = -ENOMEM;
 83	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
 84	if (unlikely(!bh))
 85		goto failed_unlock;
 86
 87	err = -EEXIST;
 88	if (buffer_uptodate(bh))
 89		goto failed_bh;
 90
 91	wait_on_buffer(bh);
 92	if (buffer_uptodate(bh))
 93		goto failed_bh;
 94
 
 95	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
 96	if (likely(!err)) {
 97		get_bh(bh);
 98		*out_bh = bh;
 99	}
100
101 failed_bh:
102	folio_unlock(bh->b_folio);
103	folio_put(bh->b_folio);
104	brelse(bh);
105
106 failed_unlock:
107	if (likely(!err))
108		err = nilfs_transaction_commit(sb);
109	else
110		nilfs_transaction_abort(sb);
111
112	return err;
113}
114
115static int
116nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf,
117		       struct buffer_head **out_bh)
118{
119	struct buffer_head *bh;
120	__u64 blknum = 0;
121	int ret = -ENOMEM;
122
123	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
124	if (unlikely(!bh))
125		goto failed;
126
127	ret = -EEXIST; /* internal code */
128	if (buffer_uptodate(bh))
129		goto out;
130
131	if (opf & REQ_RAHEAD) {
132		if (!trylock_buffer(bh)) {
133			ret = -EBUSY;
134			goto failed_bh;
135		}
136	} else /* opf == REQ_OP_READ */
137		lock_buffer(bh);
138
139	if (buffer_uptodate(bh)) {
140		unlock_buffer(bh);
141		goto out;
142	}
143
144	ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
145	if (unlikely(ret)) {
146		unlock_buffer(bh);
147		goto failed_bh;
148	}
149	map_bh(bh, inode->i_sb, (sector_t)blknum);
150
151	bh->b_end_io = end_buffer_read_sync;
152	get_bh(bh);
153	submit_bh(opf, bh);
154	ret = 0;
155
156	trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff,
157				      opf & REQ_OP_MASK);
158 out:
159	get_bh(bh);
160	*out_bh = bh;
161
162 failed_bh:
163	folio_unlock(bh->b_folio);
164	folio_put(bh->b_folio);
165	brelse(bh);
166 failed:
167	return ret;
168}
169
170static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
171				int readahead, struct buffer_head **out_bh)
172{
173	struct buffer_head *first_bh, *bh;
174	unsigned long blkoff;
175	int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
176	int err;
177
178	err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh);
179	if (err == -EEXIST) /* internal code */
180		goto out;
181
182	if (unlikely(err))
183		goto failed;
184
185	if (readahead) {
186		blkoff = block + 1;
187		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
188			err = nilfs_mdt_submit_block(inode, blkoff,
189						REQ_OP_READ | REQ_RAHEAD, &bh);
190			if (likely(!err || err == -EEXIST))
191				brelse(bh);
192			else if (err != -EBUSY)
193				break;
194				/* abort readahead if bmap lookup failed */
195			if (!buffer_locked(first_bh))
196				goto out_no_wait;
197		}
198	}
199
200	wait_on_buffer(first_bh);
201
202 out_no_wait:
203	err = -EIO;
204	if (!buffer_uptodate(first_bh)) {
205		nilfs_err(inode->i_sb,
206			  "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
207			  inode->i_ino, block);
208		goto failed_bh;
209	}
210 out:
211	*out_bh = first_bh;
212	return 0;
213
214 failed_bh:
215	brelse(first_bh);
216 failed:
217	return err;
218}
219
220/**
221 * nilfs_mdt_get_block - read or create a buffer on meta data file.
222 * @inode: inode of the meta data file
223 * @blkoff: block offset
224 * @create: create flag
225 * @init_block: initializer used for newly allocated block
226 * @out_bh: output of a pointer to the buffer_head
227 *
228 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
229 * a new buffer if @create is not zero.  On success, the returned buffer is
230 * assured to be either existing or formatted using a buffer lock on success.
231 * @out_bh is substituted only when zero is returned.
232 *
233 * Return Value: On success, it returns 0. On error, the following negative
234 * error code is returned.
235 *
236 * %-ENOMEM - Insufficient memory available.
237 *
238 * %-EIO - I/O error
239 *
240 * %-ENOENT - the specified block does not exist (hole block)
241 *
242 * %-EROFS - Read only filesystem (for create mode)
243 */
244int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
245			void (*init_block)(struct inode *,
246					   struct buffer_head *, void *),
247			struct buffer_head **out_bh)
248{
249	int ret;
250
251	/* Should be rewritten with merging nilfs_mdt_read_block() */
252 retry:
253	ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
254	if (!create || ret != -ENOENT)
255		return ret;
256
257	ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
258	if (unlikely(ret == -EEXIST)) {
259		/* create = 0; */  /* limit read-create loop retries */
260		goto retry;
261	}
262	return ret;
263}
264
265/**
266 * nilfs_mdt_find_block - find and get a buffer on meta data file.
267 * @inode: inode of the meta data file
268 * @start: start block offset (inclusive)
269 * @end: end block offset (inclusive)
270 * @blkoff: block offset
271 * @out_bh: place to store a pointer to buffer_head struct
272 *
273 * nilfs_mdt_find_block() looks up an existing block in range of
274 * [@start, @end] and stores pointer to a buffer head of the block to
275 * @out_bh, and block offset to @blkoff, respectively.  @out_bh and
276 * @blkoff are substituted only when zero is returned.
277 *
278 * Return Value: On success, it returns 0. On error, the following negative
279 * error code is returned.
280 *
281 * %-ENOMEM - Insufficient memory available.
282 *
283 * %-EIO - I/O error
284 *
285 * %-ENOENT - no block was found in the range
286 */
287int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
288			 unsigned long end, unsigned long *blkoff,
289			 struct buffer_head **out_bh)
290{
291	__u64 next;
292	int ret;
293
294	if (unlikely(start > end))
295		return -ENOENT;
296
297	ret = nilfs_mdt_read_block(inode, start, true, out_bh);
298	if (!ret) {
299		*blkoff = start;
300		goto out;
301	}
302	if (unlikely(ret != -ENOENT || start == ULONG_MAX))
303		goto out;
304
305	ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
306	if (!ret) {
307		if (next <= end) {
308			ret = nilfs_mdt_read_block(inode, next, true, out_bh);
309			if (!ret)
310				*blkoff = next;
311		} else {
312			ret = -ENOENT;
313		}
314	}
315out:
316	return ret;
317}
318
319/**
320 * nilfs_mdt_delete_block - make a hole on the meta data file.
321 * @inode: inode of the meta data file
322 * @block: block offset
323 *
324 * Return Value: On success, zero is returned.
325 * On error, one of the following negative error code is returned.
326 *
327 * %-ENOMEM - Insufficient memory available.
328 *
329 * %-EIO - I/O error
330 */
331int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
332{
333	struct nilfs_inode_info *ii = NILFS_I(inode);
334	int err;
335
336	err = nilfs_bmap_delete(ii->i_bmap, block);
337	if (!err || err == -ENOENT) {
338		nilfs_mdt_mark_dirty(inode);
339		nilfs_mdt_forget_block(inode, block);
340	}
341	return err;
342}
343
344/**
345 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
346 * @inode: inode of the meta data file
347 * @block: block offset
348 *
349 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
350 * tries to release the page including the buffer from a page cache.
351 *
352 * Return Value: On success, 0 is returned. On error, one of the following
353 * negative error code is returned.
354 *
355 * %-EBUSY - page has an active buffer.
356 *
357 * %-ENOENT - page cache has no page addressed by the offset.
358 */
359int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
360{
361	pgoff_t index = block >> (PAGE_SHIFT - inode->i_blkbits);
362	struct folio *folio;
363	struct buffer_head *bh;
 
364	int ret = 0;
365	int still_dirty;
366
367	folio = filemap_lock_folio(inode->i_mapping, index);
368	if (IS_ERR(folio))
369		return -ENOENT;
370
371	folio_wait_writeback(folio);
 
 
 
 
 
372
373	bh = folio_buffers(folio);
374	if (bh) {
375		unsigned long first_block = index <<
376				(PAGE_SHIFT - inode->i_blkbits);
377		bh = get_nth_bh(bh, block - first_block);
378		nilfs_forget_buffer(bh);
379	}
380	still_dirty = folio_test_dirty(folio);
381	folio_unlock(folio);
382	folio_put(folio);
383
384	if (still_dirty ||
385	    invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
386		ret = -EBUSY;
387	return ret;
388}
389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390int nilfs_mdt_fetch_dirty(struct inode *inode)
391{
392	struct nilfs_inode_info *ii = NILFS_I(inode);
393
394	if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
395		set_bit(NILFS_I_DIRTY, &ii->i_state);
396		return 1;
397	}
398	return test_bit(NILFS_I_DIRTY, &ii->i_state);
399}
400
401static int nilfs_mdt_write_folio(struct folio *folio,
402		struct writeback_control *wbc)
403{
404	struct inode *inode = folio->mapping->host;
405	struct super_block *sb;
406	int err = 0;
407
408	if (inode && sb_rdonly(inode->i_sb)) {
409		/*
410		 * It means that filesystem was remounted in read-only
411		 * mode because of error or metadata corruption. But we
412		 * have dirty folios that try to be flushed in background.
413		 * So, here we simply discard this dirty folio.
414		 */
415		nilfs_clear_folio_dirty(folio);
416		folio_unlock(folio);
417		return -EROFS;
418	}
419
420	folio_redirty_for_writepage(wbc, folio);
421	folio_unlock(folio);
422
423	if (!inode)
424		return 0;
425
426	sb = inode->i_sb;
427
428	if (wbc->sync_mode == WB_SYNC_ALL)
429		err = nilfs_construct_segment(sb);
430	else if (wbc->for_reclaim)
431		nilfs_flush_segment(sb, inode->i_ino);
432
433	return err;
434}
435
436static int nilfs_mdt_writeback(struct address_space *mapping,
437		struct writeback_control *wbc)
438{
439	struct folio *folio = NULL;
440	int error;
441
442	while ((folio = writeback_iter(mapping, wbc, folio, &error)))
443		error = nilfs_mdt_write_folio(folio, wbc);
444
445	return error;
446}
447
448static const struct address_space_operations def_mdt_aops = {
449	.dirty_folio		= block_dirty_folio,
450	.invalidate_folio	= block_invalidate_folio,
451	.writepages		= nilfs_mdt_writeback,
452	.migrate_folio		= buffer_migrate_folio_norefs,
453};
454
455static const struct inode_operations def_mdt_iops;
456static const struct file_operations def_mdt_fops;
457
458
459int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
460{
461	struct nilfs_mdt_info *mi;
462
463	mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
464	if (!mi)
465		return -ENOMEM;
466
467	init_rwsem(&mi->mi_sem);
468	inode->i_private = mi;
469
470	inode->i_mode = S_IFREG;
471	mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
 
472
473	inode->i_op = &def_mdt_iops;
474	inode->i_fop = &def_mdt_fops;
475	inode->i_mapping->a_ops = &def_mdt_aops;
476
477	return 0;
478}
479
480/**
481 * nilfs_mdt_clear - do cleanup for the metadata file
482 * @inode: inode of the metadata file
483 */
484void nilfs_mdt_clear(struct inode *inode)
485{
486	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
487	struct nilfs_shadow_map *shadow = mdi->mi_shadow;
488
489	if (mdi->mi_palloc_cache)
490		nilfs_palloc_destroy_cache(inode);
491
492	if (shadow) {
493		struct inode *s_inode = shadow->inode;
494
495		shadow->inode = NULL;
496		iput(s_inode);
497		mdi->mi_shadow = NULL;
498	}
499}
500
501/**
502 * nilfs_mdt_destroy - release resources used by the metadata file
503 * @inode: inode of the metadata file
504 */
505void nilfs_mdt_destroy(struct inode *inode)
506{
507	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
508
509	kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
510	kfree(mdi);
511}
512
513void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
514			      unsigned int header_size)
515{
516	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
517
518	mi->mi_entry_size = entry_size;
519	mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
520	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
521}
522
523/**
524 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
525 * @inode: inode of the metadata file
526 * @shadow: shadow mapping
527 */
528int nilfs_mdt_setup_shadow_map(struct inode *inode,
529			       struct nilfs_shadow_map *shadow)
530{
531	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
532	struct inode *s_inode;
533
534	INIT_LIST_HEAD(&shadow->frozen_buffers);
535
536	s_inode = nilfs_iget_for_shadow(inode);
537	if (IS_ERR(s_inode))
538		return PTR_ERR(s_inode);
539
540	shadow->inode = s_inode;
541	mi->mi_shadow = shadow;
542	return 0;
543}
544
545/**
546 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
547 * @inode: inode of the metadata file
548 */
549int nilfs_mdt_save_to_shadow_map(struct inode *inode)
550{
551	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
552	struct nilfs_inode_info *ii = NILFS_I(inode);
553	struct nilfs_shadow_map *shadow = mi->mi_shadow;
554	struct inode *s_inode = shadow->inode;
555	int ret;
556
557	ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
558	if (ret)
559		goto out;
560
561	ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
562				     ii->i_assoc_inode->i_mapping);
563	if (ret)
564		goto out;
565
566	nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
567 out:
568	return ret;
569}
570
571int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
572{
573	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
574	struct buffer_head *bh_frozen;
575	struct folio *folio;
576	int blkbits = inode->i_blkbits;
577
578	folio = filemap_grab_folio(shadow->inode->i_mapping,
579			bh->b_folio->index);
580	if (IS_ERR(folio))
581		return PTR_ERR(folio);
582
583	bh_frozen = folio_buffers(folio);
584	if (!bh_frozen)
585		bh_frozen = create_empty_buffers(folio, 1 << blkbits, 0);
586
587	bh_frozen = get_nth_bh(bh_frozen,
588			       offset_in_folio(folio, bh->b_data) >> blkbits);
589
590	if (!buffer_uptodate(bh_frozen))
591		nilfs_copy_buffer(bh_frozen, bh);
592	if (list_empty(&bh_frozen->b_assoc_buffers)) {
593		list_add_tail(&bh_frozen->b_assoc_buffers,
594			      &shadow->frozen_buffers);
595		set_buffer_nilfs_redirected(bh);
596	} else {
597		brelse(bh_frozen); /* already frozen */
598	}
599
600	folio_unlock(folio);
601	folio_put(folio);
602	return 0;
603}
604
605struct buffer_head *
606nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
607{
608	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
609	struct buffer_head *bh_frozen = NULL;
610	struct folio *folio;
611	int n;
612
613	folio = filemap_lock_folio(shadow->inode->i_mapping,
614			bh->b_folio->index);
615	if (!IS_ERR(folio)) {
616		bh_frozen = folio_buffers(folio);
617		if (bh_frozen) {
618			n = offset_in_folio(folio, bh->b_data) >>
619				inode->i_blkbits;
620			bh_frozen = get_nth_bh(bh_frozen, n);
621		}
622		folio_unlock(folio);
623		folio_put(folio);
624	}
625	return bh_frozen;
626}
627
628static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
629{
630	struct list_head *head = &shadow->frozen_buffers;
631	struct buffer_head *bh;
632
633	while (!list_empty(head)) {
634		bh = list_first_entry(head, struct buffer_head,
635				      b_assoc_buffers);
636		list_del_init(&bh->b_assoc_buffers);
637		brelse(bh); /* drop ref-count to make it releasable */
638	}
639}
640
641/**
642 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
643 * @inode: inode of the metadata file
644 */
645void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
646{
647	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
648	struct nilfs_inode_info *ii = NILFS_I(inode);
649	struct nilfs_shadow_map *shadow = mi->mi_shadow;
650
651	down_write(&mi->mi_sem);
652
653	if (mi->mi_palloc_cache)
654		nilfs_palloc_clear_cache(inode);
655
656	nilfs_clear_dirty_pages(inode->i_mapping);
657	nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
658
659	nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping);
660	nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
661			      NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
662
663	nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
664
665	up_write(&mi->mi_sem);
666}
667
668/**
669 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
670 * @inode: inode of the metadata file
671 */
672void nilfs_mdt_clear_shadow_map(struct inode *inode)
673{
674	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
675	struct nilfs_shadow_map *shadow = mi->mi_shadow;
676	struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
677
678	down_write(&mi->mi_sem);
679	nilfs_release_frozen_buffers(shadow);
680	truncate_inode_pages(shadow->inode->i_mapping, 0);
681	truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
682	up_write(&mi->mi_sem);
683}
v3.15
 
  1/*
  2 * mdt.c - meta data file for NILFS
  3 *
  4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 19 *
 20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
 21 */
 22
 23#include <linux/buffer_head.h>
 24#include <linux/mpage.h>
 25#include <linux/mm.h>
 26#include <linux/writeback.h>
 27#include <linux/backing-dev.h>
 28#include <linux/swap.h>
 29#include <linux/slab.h>
 30#include "nilfs.h"
 31#include "btnode.h"
 32#include "segment.h"
 33#include "page.h"
 34#include "mdt.h"
 
 35
 
 36
 37#define NILFS_MDT_MAX_RA_BLOCKS		(16 - 1)
 38
 39
 40static int
 41nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
 42			   struct buffer_head *bh,
 43			   void (*init_block)(struct inode *,
 44					      struct buffer_head *, void *))
 45{
 46	struct nilfs_inode_info *ii = NILFS_I(inode);
 47	void *kaddr;
 
 48	int ret;
 49
 50	/* Caller exclude read accesses using page lock */
 51
 52	/* set_buffer_new(bh); */
 53	bh->b_blocknr = 0;
 54
 55	ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
 56	if (unlikely(ret))
 57		return ret;
 58
 59	set_buffer_mapped(bh);
 60
 61	kaddr = kmap_atomic(bh->b_page);
 62	memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
 
 63	if (init_block)
 64		init_block(inode, bh, kaddr);
 65	flush_dcache_page(bh->b_page);
 66	kunmap_atomic(kaddr);
 
 67
 68	set_buffer_uptodate(bh);
 69	mark_buffer_dirty(bh);
 70	nilfs_mdt_mark_dirty(inode);
 
 
 
 71	return 0;
 72}
 73
 74static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
 75				  struct buffer_head **out_bh,
 76				  void (*init_block)(struct inode *,
 77						     struct buffer_head *,
 78						     void *))
 79{
 80	struct super_block *sb = inode->i_sb;
 81	struct nilfs_transaction_info ti;
 82	struct buffer_head *bh;
 83	int err;
 84
 85	nilfs_transaction_begin(sb, &ti, 0);
 86
 87	err = -ENOMEM;
 88	bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
 89	if (unlikely(!bh))
 90		goto failed_unlock;
 91
 92	err = -EEXIST;
 93	if (buffer_uptodate(bh))
 94		goto failed_bh;
 95
 96	wait_on_buffer(bh);
 97	if (buffer_uptodate(bh))
 98		goto failed_bh;
 99
100	bh->b_bdev = sb->s_bdev;
101	err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
102	if (likely(!err)) {
103		get_bh(bh);
104		*out_bh = bh;
105	}
106
107 failed_bh:
108	unlock_page(bh->b_page);
109	page_cache_release(bh->b_page);
110	brelse(bh);
111
112 failed_unlock:
113	if (likely(!err))
114		err = nilfs_transaction_commit(sb);
115	else
116		nilfs_transaction_abort(sb);
117
118	return err;
119}
120
121static int
122nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
123		       int mode, struct buffer_head **out_bh)
124{
125	struct buffer_head *bh;
126	__u64 blknum = 0;
127	int ret = -ENOMEM;
128
129	bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
130	if (unlikely(!bh))
131		goto failed;
132
133	ret = -EEXIST; /* internal code */
134	if (buffer_uptodate(bh))
135		goto out;
136
137	if (mode == READA) {
138		if (!trylock_buffer(bh)) {
139			ret = -EBUSY;
140			goto failed_bh;
141		}
142	} else /* mode == READ */
143		lock_buffer(bh);
144
145	if (buffer_uptodate(bh)) {
146		unlock_buffer(bh);
147		goto out;
148	}
149
150	ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
151	if (unlikely(ret)) {
152		unlock_buffer(bh);
153		goto failed_bh;
154	}
155	map_bh(bh, inode->i_sb, (sector_t)blknum);
156
157	bh->b_end_io = end_buffer_read_sync;
158	get_bh(bh);
159	submit_bh(mode, bh);
160	ret = 0;
 
 
 
161 out:
162	get_bh(bh);
163	*out_bh = bh;
164
165 failed_bh:
166	unlock_page(bh->b_page);
167	page_cache_release(bh->b_page);
168	brelse(bh);
169 failed:
170	return ret;
171}
172
173static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
174				int readahead, struct buffer_head **out_bh)
175{
176	struct buffer_head *first_bh, *bh;
177	unsigned long blkoff;
178	int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
179	int err;
180
181	err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
182	if (err == -EEXIST) /* internal code */
183		goto out;
184
185	if (unlikely(err))
186		goto failed;
187
188	if (readahead) {
189		blkoff = block + 1;
190		for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
191			err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
 
192			if (likely(!err || err == -EEXIST))
193				brelse(bh);
194			else if (err != -EBUSY)
195				break;
196				/* abort readahead if bmap lookup failed */
197			if (!buffer_locked(first_bh))
198				goto out_no_wait;
199		}
200	}
201
202	wait_on_buffer(first_bh);
203
204 out_no_wait:
205	err = -EIO;
206	if (!buffer_uptodate(first_bh))
 
 
 
207		goto failed_bh;
 
208 out:
209	*out_bh = first_bh;
210	return 0;
211
212 failed_bh:
213	brelse(first_bh);
214 failed:
215	return err;
216}
217
218/**
219 * nilfs_mdt_get_block - read or create a buffer on meta data file.
220 * @inode: inode of the meta data file
221 * @blkoff: block offset
222 * @create: create flag
223 * @init_block: initializer used for newly allocated block
224 * @out_bh: output of a pointer to the buffer_head
225 *
226 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
227 * a new buffer if @create is not zero.  On success, the returned buffer is
228 * assured to be either existing or formatted using a buffer lock on success.
229 * @out_bh is substituted only when zero is returned.
230 *
231 * Return Value: On success, it returns 0. On error, the following negative
232 * error code is returned.
233 *
234 * %-ENOMEM - Insufficient memory available.
235 *
236 * %-EIO - I/O error
237 *
238 * %-ENOENT - the specified block does not exist (hole block)
239 *
240 * %-EROFS - Read only filesystem (for create mode)
241 */
242int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
243			void (*init_block)(struct inode *,
244					   struct buffer_head *, void *),
245			struct buffer_head **out_bh)
246{
247	int ret;
248
249	/* Should be rewritten with merging nilfs_mdt_read_block() */
250 retry:
251	ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
252	if (!create || ret != -ENOENT)
253		return ret;
254
255	ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
256	if (unlikely(ret == -EEXIST)) {
257		/* create = 0; */  /* limit read-create loop retries */
258		goto retry;
259	}
260	return ret;
261}
262
263/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264 * nilfs_mdt_delete_block - make a hole on the meta data file.
265 * @inode: inode of the meta data file
266 * @block: block offset
267 *
268 * Return Value: On success, zero is returned.
269 * On error, one of the following negative error code is returned.
270 *
271 * %-ENOMEM - Insufficient memory available.
272 *
273 * %-EIO - I/O error
274 */
275int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
276{
277	struct nilfs_inode_info *ii = NILFS_I(inode);
278	int err;
279
280	err = nilfs_bmap_delete(ii->i_bmap, block);
281	if (!err || err == -ENOENT) {
282		nilfs_mdt_mark_dirty(inode);
283		nilfs_mdt_forget_block(inode, block);
284	}
285	return err;
286}
287
288/**
289 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
290 * @inode: inode of the meta data file
291 * @block: block offset
292 *
293 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
294 * tries to release the page including the buffer from a page cache.
295 *
296 * Return Value: On success, 0 is returned. On error, one of the following
297 * negative error code is returned.
298 *
299 * %-EBUSY - page has an active buffer.
300 *
301 * %-ENOENT - page cache has no page addressed by the offset.
302 */
303int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
304{
305	pgoff_t index = (pgoff_t)block >>
306		(PAGE_CACHE_SHIFT - inode->i_blkbits);
307	struct page *page;
308	unsigned long first_block;
309	int ret = 0;
310	int still_dirty;
311
312	page = find_lock_page(inode->i_mapping, index);
313	if (!page)
314		return -ENOENT;
315
316	wait_on_page_writeback(page);
317
318	first_block = (unsigned long)index <<
319		(PAGE_CACHE_SHIFT - inode->i_blkbits);
320	if (page_has_buffers(page)) {
321		struct buffer_head *bh;
322
323		bh = nilfs_page_get_nth_block(page, block - first_block);
 
 
 
 
324		nilfs_forget_buffer(bh);
325	}
326	still_dirty = PageDirty(page);
327	unlock_page(page);
328	page_cache_release(page);
329
330	if (still_dirty ||
331	    invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
332		ret = -EBUSY;
333	return ret;
334}
335
336/**
337 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
338 * @inode: inode of the meta data file
339 * @block: block offset
340 *
341 * Return Value: On success, it returns 0. On error, the following negative
342 * error code is returned.
343 *
344 * %-ENOMEM - Insufficient memory available.
345 *
346 * %-EIO - I/O error
347 *
348 * %-ENOENT - the specified block does not exist (hole block)
349 */
350int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
351{
352	struct buffer_head *bh;
353	int err;
354
355	err = nilfs_mdt_read_block(inode, block, 0, &bh);
356	if (unlikely(err))
357		return err;
358	mark_buffer_dirty(bh);
359	nilfs_mdt_mark_dirty(inode);
360	brelse(bh);
361	return 0;
362}
363
364int nilfs_mdt_fetch_dirty(struct inode *inode)
365{
366	struct nilfs_inode_info *ii = NILFS_I(inode);
367
368	if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
369		set_bit(NILFS_I_DIRTY, &ii->i_state);
370		return 1;
371	}
372	return test_bit(NILFS_I_DIRTY, &ii->i_state);
373}
374
375static int
376nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
377{
378	struct inode *inode = page->mapping->host;
379	struct super_block *sb;
380	int err = 0;
381
382	if (inode && (inode->i_sb->s_flags & MS_RDONLY)) {
383		/*
384		 * It means that filesystem was remounted in read-only
385		 * mode because of error or metadata corruption. But we
386		 * have dirty pages that try to be flushed in background.
387		 * So, here we simply discard this dirty page.
388		 */
389		nilfs_clear_dirty_page(page, false);
390		unlock_page(page);
391		return -EROFS;
392	}
393
394	redirty_page_for_writepage(wbc, page);
395	unlock_page(page);
396
397	if (!inode)
398		return 0;
399
400	sb = inode->i_sb;
401
402	if (wbc->sync_mode == WB_SYNC_ALL)
403		err = nilfs_construct_segment(sb);
404	else if (wbc->for_reclaim)
405		nilfs_flush_segment(sb, inode->i_ino);
406
407	return err;
408}
409
 
 
 
 
 
 
 
 
 
 
 
410
411static const struct address_space_operations def_mdt_aops = {
412	.writepage		= nilfs_mdt_write_page,
 
 
 
413};
414
415static const struct inode_operations def_mdt_iops;
416static const struct file_operations def_mdt_fops;
417
418
419int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
420{
421	struct nilfs_mdt_info *mi;
422
423	mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
424	if (!mi)
425		return -ENOMEM;
426
427	init_rwsem(&mi->mi_sem);
428	inode->i_private = mi;
429
430	inode->i_mode = S_IFREG;
431	mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
432	inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
433
434	inode->i_op = &def_mdt_iops;
435	inode->i_fop = &def_mdt_fops;
436	inode->i_mapping->a_ops = &def_mdt_aops;
437
438	return 0;
439}
440
441void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
442			      unsigned header_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443{
444	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
445
446	mi->mi_entry_size = entry_size;
447	mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
448	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
449}
450
451/**
452 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
453 * @inode: inode of the metadata file
454 * @shadow: shadow mapping
455 */
456int nilfs_mdt_setup_shadow_map(struct inode *inode,
457			       struct nilfs_shadow_map *shadow)
458{
459	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
460	struct backing_dev_info *bdi = inode->i_sb->s_bdi;
461
462	INIT_LIST_HEAD(&shadow->frozen_buffers);
463	address_space_init_once(&shadow->frozen_data);
464	nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
465	address_space_init_once(&shadow->frozen_btnodes);
466	nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
 
 
467	mi->mi_shadow = shadow;
468	return 0;
469}
470
471/**
472 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
473 * @inode: inode of the metadata file
474 */
475int nilfs_mdt_save_to_shadow_map(struct inode *inode)
476{
477	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
478	struct nilfs_inode_info *ii = NILFS_I(inode);
479	struct nilfs_shadow_map *shadow = mi->mi_shadow;
 
480	int ret;
481
482	ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
483	if (ret)
484		goto out;
485
486	ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
487				     &ii->i_btnode_cache);
488	if (ret)
489		goto out;
490
491	nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
492 out:
493	return ret;
494}
495
496int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
497{
498	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
499	struct buffer_head *bh_frozen;
500	struct page *page;
501	int blkbits = inode->i_blkbits;
502
503	page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
504	if (!page)
505		return -ENOMEM;
506
507	if (!page_has_buffers(page))
508		create_empty_buffers(page, 1 << blkbits, 0);
 
 
509
510	bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
 
511
512	if (!buffer_uptodate(bh_frozen))
513		nilfs_copy_buffer(bh_frozen, bh);
514	if (list_empty(&bh_frozen->b_assoc_buffers)) {
515		list_add_tail(&bh_frozen->b_assoc_buffers,
516			      &shadow->frozen_buffers);
517		set_buffer_nilfs_redirected(bh);
518	} else {
519		brelse(bh_frozen); /* already frozen */
520	}
521
522	unlock_page(page);
523	page_cache_release(page);
524	return 0;
525}
526
527struct buffer_head *
528nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
529{
530	struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
531	struct buffer_head *bh_frozen = NULL;
532	struct page *page;
533	int n;
534
535	page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
536	if (page) {
537		if (page_has_buffers(page)) {
538			n = bh_offset(bh) >> inode->i_blkbits;
539			bh_frozen = nilfs_page_get_nth_block(page, n);
 
 
 
540		}
541		unlock_page(page);
542		page_cache_release(page);
543	}
544	return bh_frozen;
545}
546
547static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
548{
549	struct list_head *head = &shadow->frozen_buffers;
550	struct buffer_head *bh;
551
552	while (!list_empty(head)) {
553		bh = list_first_entry(head, struct buffer_head,
554				      b_assoc_buffers);
555		list_del_init(&bh->b_assoc_buffers);
556		brelse(bh); /* drop ref-count to make it releasable */
557	}
558}
559
560/**
561 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
562 * @inode: inode of the metadata file
563 */
564void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
565{
566	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
567	struct nilfs_inode_info *ii = NILFS_I(inode);
568	struct nilfs_shadow_map *shadow = mi->mi_shadow;
569
570	down_write(&mi->mi_sem);
571
572	if (mi->mi_palloc_cache)
573		nilfs_palloc_clear_cache(inode);
574
575	nilfs_clear_dirty_pages(inode->i_mapping, true);
576	nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
577
578	nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
579	nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
 
580
581	nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
582
583	up_write(&mi->mi_sem);
584}
585
586/**
587 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
588 * @inode: inode of the metadata file
589 */
590void nilfs_mdt_clear_shadow_map(struct inode *inode)
591{
592	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
593	struct nilfs_shadow_map *shadow = mi->mi_shadow;
 
594
595	down_write(&mi->mi_sem);
596	nilfs_release_frozen_buffers(shadow);
597	truncate_inode_pages(&shadow->frozen_data, 0);
598	truncate_inode_pages(&shadow->frozen_btnodes, 0);
599	up_write(&mi->mi_sem);
600}