Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Meta data file for NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 */
9
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/mm.h>
13#include <linux/writeback.h>
14#include <linux/backing-dev.h>
15#include <linux/swap.h>
16#include <linux/slab.h>
17#include "nilfs.h"
18#include "btnode.h"
19#include "segment.h"
20#include "page.h"
21#include "mdt.h"
22#include "alloc.h" /* nilfs_palloc_destroy_cache() */
23
24#include <trace/events/nilfs2.h>
25
26#define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
27
28
29static int
30nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
31 struct buffer_head *bh,
32 void (*init_block)(struct inode *,
33 struct buffer_head *, void *))
34{
35 struct nilfs_inode_info *ii = NILFS_I(inode);
36 struct folio *folio = bh->b_folio;
37 void *from;
38 int ret;
39
40 /* Caller exclude read accesses using page lock */
41
42 /* set_buffer_new(bh); */
43 bh->b_blocknr = 0;
44
45 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
46 if (unlikely(ret))
47 return ret;
48
49 set_buffer_mapped(bh);
50
51 /* Initialize block (block size > PAGE_SIZE not yet supported) */
52 from = kmap_local_folio(folio, offset_in_folio(folio, bh->b_data));
53 memset(from, 0, bh->b_size);
54 if (init_block)
55 init_block(inode, bh, from);
56 kunmap_local(from);
57
58 flush_dcache_folio(folio);
59
60 set_buffer_uptodate(bh);
61 mark_buffer_dirty(bh);
62 nilfs_mdt_mark_dirty(inode);
63
64 trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
65
66 return 0;
67}
68
69static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
70 struct buffer_head **out_bh,
71 void (*init_block)(struct inode *,
72 struct buffer_head *,
73 void *))
74{
75 struct super_block *sb = inode->i_sb;
76 struct nilfs_transaction_info ti;
77 struct buffer_head *bh;
78 int err;
79
80 nilfs_transaction_begin(sb, &ti, 0);
81
82 err = -ENOMEM;
83 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
84 if (unlikely(!bh))
85 goto failed_unlock;
86
87 err = -EEXIST;
88 if (buffer_uptodate(bh))
89 goto failed_bh;
90
91 wait_on_buffer(bh);
92 if (buffer_uptodate(bh))
93 goto failed_bh;
94
95 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
96 if (likely(!err)) {
97 get_bh(bh);
98 *out_bh = bh;
99 }
100
101 failed_bh:
102 folio_unlock(bh->b_folio);
103 folio_put(bh->b_folio);
104 brelse(bh);
105
106 failed_unlock:
107 if (likely(!err))
108 err = nilfs_transaction_commit(sb);
109 else
110 nilfs_transaction_abort(sb);
111
112 return err;
113}
114
115static int
116nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf,
117 struct buffer_head **out_bh)
118{
119 struct buffer_head *bh;
120 __u64 blknum = 0;
121 int ret = -ENOMEM;
122
123 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
124 if (unlikely(!bh))
125 goto failed;
126
127 ret = -EEXIST; /* internal code */
128 if (buffer_uptodate(bh))
129 goto out;
130
131 if (opf & REQ_RAHEAD) {
132 if (!trylock_buffer(bh)) {
133 ret = -EBUSY;
134 goto failed_bh;
135 }
136 } else /* opf == REQ_OP_READ */
137 lock_buffer(bh);
138
139 if (buffer_uptodate(bh)) {
140 unlock_buffer(bh);
141 goto out;
142 }
143
144 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
145 if (unlikely(ret)) {
146 unlock_buffer(bh);
147 goto failed_bh;
148 }
149 map_bh(bh, inode->i_sb, (sector_t)blknum);
150
151 bh->b_end_io = end_buffer_read_sync;
152 get_bh(bh);
153 submit_bh(opf, bh);
154 ret = 0;
155
156 trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff,
157 opf & REQ_OP_MASK);
158 out:
159 get_bh(bh);
160 *out_bh = bh;
161
162 failed_bh:
163 folio_unlock(bh->b_folio);
164 folio_put(bh->b_folio);
165 brelse(bh);
166 failed:
167 return ret;
168}
169
170static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
171 int readahead, struct buffer_head **out_bh)
172{
173 struct buffer_head *first_bh, *bh;
174 unsigned long blkoff;
175 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
176 int err;
177
178 err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, &first_bh);
179 if (err == -EEXIST) /* internal code */
180 goto out;
181
182 if (unlikely(err))
183 goto failed;
184
185 if (readahead) {
186 blkoff = block + 1;
187 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
188 err = nilfs_mdt_submit_block(inode, blkoff,
189 REQ_OP_READ | REQ_RAHEAD, &bh);
190 if (likely(!err || err == -EEXIST))
191 brelse(bh);
192 else if (err != -EBUSY)
193 break;
194 /* abort readahead if bmap lookup failed */
195 if (!buffer_locked(first_bh))
196 goto out_no_wait;
197 }
198 }
199
200 wait_on_buffer(first_bh);
201
202 out_no_wait:
203 err = -EIO;
204 if (!buffer_uptodate(first_bh)) {
205 nilfs_err(inode->i_sb,
206 "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
207 inode->i_ino, block);
208 goto failed_bh;
209 }
210 out:
211 *out_bh = first_bh;
212 return 0;
213
214 failed_bh:
215 brelse(first_bh);
216 failed:
217 return err;
218}
219
220/**
221 * nilfs_mdt_get_block - read or create a buffer on meta data file.
222 * @inode: inode of the meta data file
223 * @blkoff: block offset
224 * @create: create flag
225 * @init_block: initializer used for newly allocated block
226 * @out_bh: output of a pointer to the buffer_head
227 *
228 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
229 * a new buffer if @create is not zero. On success, the returned buffer is
230 * assured to be either existing or formatted using a buffer lock on success.
231 * @out_bh is substituted only when zero is returned.
232 *
233 * Return Value: On success, it returns 0. On error, the following negative
234 * error code is returned.
235 *
236 * %-ENOMEM - Insufficient memory available.
237 *
238 * %-EIO - I/O error
239 *
240 * %-ENOENT - the specified block does not exist (hole block)
241 *
242 * %-EROFS - Read only filesystem (for create mode)
243 */
244int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
245 void (*init_block)(struct inode *,
246 struct buffer_head *, void *),
247 struct buffer_head **out_bh)
248{
249 int ret;
250
251 /* Should be rewritten with merging nilfs_mdt_read_block() */
252 retry:
253 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
254 if (!create || ret != -ENOENT)
255 return ret;
256
257 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
258 if (unlikely(ret == -EEXIST)) {
259 /* create = 0; */ /* limit read-create loop retries */
260 goto retry;
261 }
262 return ret;
263}
264
265/**
266 * nilfs_mdt_find_block - find and get a buffer on meta data file.
267 * @inode: inode of the meta data file
268 * @start: start block offset (inclusive)
269 * @end: end block offset (inclusive)
270 * @blkoff: block offset
271 * @out_bh: place to store a pointer to buffer_head struct
272 *
273 * nilfs_mdt_find_block() looks up an existing block in range of
274 * [@start, @end] and stores pointer to a buffer head of the block to
275 * @out_bh, and block offset to @blkoff, respectively. @out_bh and
276 * @blkoff are substituted only when zero is returned.
277 *
278 * Return Value: On success, it returns 0. On error, the following negative
279 * error code is returned.
280 *
281 * %-ENOMEM - Insufficient memory available.
282 *
283 * %-EIO - I/O error
284 *
285 * %-ENOENT - no block was found in the range
286 */
287int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
288 unsigned long end, unsigned long *blkoff,
289 struct buffer_head **out_bh)
290{
291 __u64 next;
292 int ret;
293
294 if (unlikely(start > end))
295 return -ENOENT;
296
297 ret = nilfs_mdt_read_block(inode, start, true, out_bh);
298 if (!ret) {
299 *blkoff = start;
300 goto out;
301 }
302 if (unlikely(ret != -ENOENT || start == ULONG_MAX))
303 goto out;
304
305 ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
306 if (!ret) {
307 if (next <= end) {
308 ret = nilfs_mdt_read_block(inode, next, true, out_bh);
309 if (!ret)
310 *blkoff = next;
311 } else {
312 ret = -ENOENT;
313 }
314 }
315out:
316 return ret;
317}
318
319/**
320 * nilfs_mdt_delete_block - make a hole on the meta data file.
321 * @inode: inode of the meta data file
322 * @block: block offset
323 *
324 * Return Value: On success, zero is returned.
325 * On error, one of the following negative error code is returned.
326 *
327 * %-ENOMEM - Insufficient memory available.
328 *
329 * %-EIO - I/O error
330 */
331int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
332{
333 struct nilfs_inode_info *ii = NILFS_I(inode);
334 int err;
335
336 err = nilfs_bmap_delete(ii->i_bmap, block);
337 if (!err || err == -ENOENT) {
338 nilfs_mdt_mark_dirty(inode);
339 nilfs_mdt_forget_block(inode, block);
340 }
341 return err;
342}
343
344/**
345 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
346 * @inode: inode of the meta data file
347 * @block: block offset
348 *
349 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
350 * tries to release the page including the buffer from a page cache.
351 *
352 * Return Value: On success, 0 is returned. On error, one of the following
353 * negative error code is returned.
354 *
355 * %-EBUSY - page has an active buffer.
356 *
357 * %-ENOENT - page cache has no page addressed by the offset.
358 */
359int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
360{
361 pgoff_t index = block >> (PAGE_SHIFT - inode->i_blkbits);
362 struct folio *folio;
363 struct buffer_head *bh;
364 int ret = 0;
365 int still_dirty;
366
367 folio = filemap_lock_folio(inode->i_mapping, index);
368 if (IS_ERR(folio))
369 return -ENOENT;
370
371 folio_wait_writeback(folio);
372
373 bh = folio_buffers(folio);
374 if (bh) {
375 unsigned long first_block = index <<
376 (PAGE_SHIFT - inode->i_blkbits);
377 bh = get_nth_bh(bh, block - first_block);
378 nilfs_forget_buffer(bh);
379 }
380 still_dirty = folio_test_dirty(folio);
381 folio_unlock(folio);
382 folio_put(folio);
383
384 if (still_dirty ||
385 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
386 ret = -EBUSY;
387 return ret;
388}
389
390int nilfs_mdt_fetch_dirty(struct inode *inode)
391{
392 struct nilfs_inode_info *ii = NILFS_I(inode);
393
394 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
395 set_bit(NILFS_I_DIRTY, &ii->i_state);
396 return 1;
397 }
398 return test_bit(NILFS_I_DIRTY, &ii->i_state);
399}
400
401static int nilfs_mdt_write_folio(struct folio *folio,
402 struct writeback_control *wbc)
403{
404 struct inode *inode = folio->mapping->host;
405 struct super_block *sb;
406 int err = 0;
407
408 if (inode && sb_rdonly(inode->i_sb)) {
409 /*
410 * It means that filesystem was remounted in read-only
411 * mode because of error or metadata corruption. But we
412 * have dirty folios that try to be flushed in background.
413 * So, here we simply discard this dirty folio.
414 */
415 nilfs_clear_folio_dirty(folio);
416 folio_unlock(folio);
417 return -EROFS;
418 }
419
420 folio_redirty_for_writepage(wbc, folio);
421 folio_unlock(folio);
422
423 if (!inode)
424 return 0;
425
426 sb = inode->i_sb;
427
428 if (wbc->sync_mode == WB_SYNC_ALL)
429 err = nilfs_construct_segment(sb);
430 else if (wbc->for_reclaim)
431 nilfs_flush_segment(sb, inode->i_ino);
432
433 return err;
434}
435
436static int nilfs_mdt_writeback(struct address_space *mapping,
437 struct writeback_control *wbc)
438{
439 struct folio *folio = NULL;
440 int error;
441
442 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
443 error = nilfs_mdt_write_folio(folio, wbc);
444
445 return error;
446}
447
448static const struct address_space_operations def_mdt_aops = {
449 .dirty_folio = block_dirty_folio,
450 .invalidate_folio = block_invalidate_folio,
451 .writepages = nilfs_mdt_writeback,
452 .migrate_folio = buffer_migrate_folio_norefs,
453};
454
455static const struct inode_operations def_mdt_iops;
456static const struct file_operations def_mdt_fops;
457
458
459int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
460{
461 struct nilfs_mdt_info *mi;
462
463 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
464 if (!mi)
465 return -ENOMEM;
466
467 init_rwsem(&mi->mi_sem);
468 inode->i_private = mi;
469
470 inode->i_mode = S_IFREG;
471 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
472
473 inode->i_op = &def_mdt_iops;
474 inode->i_fop = &def_mdt_fops;
475 inode->i_mapping->a_ops = &def_mdt_aops;
476
477 return 0;
478}
479
480/**
481 * nilfs_mdt_clear - do cleanup for the metadata file
482 * @inode: inode of the metadata file
483 */
484void nilfs_mdt_clear(struct inode *inode)
485{
486 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
487 struct nilfs_shadow_map *shadow = mdi->mi_shadow;
488
489 if (mdi->mi_palloc_cache)
490 nilfs_palloc_destroy_cache(inode);
491
492 if (shadow) {
493 struct inode *s_inode = shadow->inode;
494
495 shadow->inode = NULL;
496 iput(s_inode);
497 mdi->mi_shadow = NULL;
498 }
499}
500
501/**
502 * nilfs_mdt_destroy - release resources used by the metadata file
503 * @inode: inode of the metadata file
504 */
505void nilfs_mdt_destroy(struct inode *inode)
506{
507 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
508
509 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
510 kfree(mdi);
511}
512
513void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
514 unsigned int header_size)
515{
516 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
517
518 mi->mi_entry_size = entry_size;
519 mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
520 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
521}
522
523/**
524 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
525 * @inode: inode of the metadata file
526 * @shadow: shadow mapping
527 */
528int nilfs_mdt_setup_shadow_map(struct inode *inode,
529 struct nilfs_shadow_map *shadow)
530{
531 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
532 struct inode *s_inode;
533
534 INIT_LIST_HEAD(&shadow->frozen_buffers);
535
536 s_inode = nilfs_iget_for_shadow(inode);
537 if (IS_ERR(s_inode))
538 return PTR_ERR(s_inode);
539
540 shadow->inode = s_inode;
541 mi->mi_shadow = shadow;
542 return 0;
543}
544
545/**
546 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
547 * @inode: inode of the metadata file
548 */
549int nilfs_mdt_save_to_shadow_map(struct inode *inode)
550{
551 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
552 struct nilfs_inode_info *ii = NILFS_I(inode);
553 struct nilfs_shadow_map *shadow = mi->mi_shadow;
554 struct inode *s_inode = shadow->inode;
555 int ret;
556
557 ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
558 if (ret)
559 goto out;
560
561 ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
562 ii->i_assoc_inode->i_mapping);
563 if (ret)
564 goto out;
565
566 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
567 out:
568 return ret;
569}
570
571int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
572{
573 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
574 struct buffer_head *bh_frozen;
575 struct folio *folio;
576 int blkbits = inode->i_blkbits;
577
578 folio = filemap_grab_folio(shadow->inode->i_mapping,
579 bh->b_folio->index);
580 if (IS_ERR(folio))
581 return PTR_ERR(folio);
582
583 bh_frozen = folio_buffers(folio);
584 if (!bh_frozen)
585 bh_frozen = create_empty_buffers(folio, 1 << blkbits, 0);
586
587 bh_frozen = get_nth_bh(bh_frozen,
588 offset_in_folio(folio, bh->b_data) >> blkbits);
589
590 if (!buffer_uptodate(bh_frozen))
591 nilfs_copy_buffer(bh_frozen, bh);
592 if (list_empty(&bh_frozen->b_assoc_buffers)) {
593 list_add_tail(&bh_frozen->b_assoc_buffers,
594 &shadow->frozen_buffers);
595 set_buffer_nilfs_redirected(bh);
596 } else {
597 brelse(bh_frozen); /* already frozen */
598 }
599
600 folio_unlock(folio);
601 folio_put(folio);
602 return 0;
603}
604
605struct buffer_head *
606nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
607{
608 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
609 struct buffer_head *bh_frozen = NULL;
610 struct folio *folio;
611 int n;
612
613 folio = filemap_lock_folio(shadow->inode->i_mapping,
614 bh->b_folio->index);
615 if (!IS_ERR(folio)) {
616 bh_frozen = folio_buffers(folio);
617 if (bh_frozen) {
618 n = offset_in_folio(folio, bh->b_data) >>
619 inode->i_blkbits;
620 bh_frozen = get_nth_bh(bh_frozen, n);
621 }
622 folio_unlock(folio);
623 folio_put(folio);
624 }
625 return bh_frozen;
626}
627
628static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
629{
630 struct list_head *head = &shadow->frozen_buffers;
631 struct buffer_head *bh;
632
633 while (!list_empty(head)) {
634 bh = list_first_entry(head, struct buffer_head,
635 b_assoc_buffers);
636 list_del_init(&bh->b_assoc_buffers);
637 brelse(bh); /* drop ref-count to make it releasable */
638 }
639}
640
641/**
642 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
643 * @inode: inode of the metadata file
644 */
645void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
646{
647 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
648 struct nilfs_inode_info *ii = NILFS_I(inode);
649 struct nilfs_shadow_map *shadow = mi->mi_shadow;
650
651 down_write(&mi->mi_sem);
652
653 if (mi->mi_palloc_cache)
654 nilfs_palloc_clear_cache(inode);
655
656 nilfs_clear_dirty_pages(inode->i_mapping);
657 nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
658
659 nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping);
660 nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
661 NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
662
663 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
664
665 up_write(&mi->mi_sem);
666}
667
668/**
669 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
670 * @inode: inode of the metadata file
671 */
672void nilfs_mdt_clear_shadow_map(struct inode *inode)
673{
674 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
675 struct nilfs_shadow_map *shadow = mi->mi_shadow;
676 struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
677
678 down_write(&mi->mi_sem);
679 nilfs_release_frozen_buffers(shadow);
680 truncate_inode_pages(shadow->inode->i_mapping, 0);
681 truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
682 up_write(&mi->mi_sem);
683}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * mdt.c - meta data file for NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 */
9
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/mm.h>
13#include <linux/writeback.h>
14#include <linux/backing-dev.h>
15#include <linux/swap.h>
16#include <linux/slab.h>
17#include "nilfs.h"
18#include "btnode.h"
19#include "segment.h"
20#include "page.h"
21#include "mdt.h"
22#include "alloc.h" /* nilfs_palloc_destroy_cache() */
23
24#include <trace/events/nilfs2.h>
25
26#define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
27
28
29static int
30nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
31 struct buffer_head *bh,
32 void (*init_block)(struct inode *,
33 struct buffer_head *, void *))
34{
35 struct nilfs_inode_info *ii = NILFS_I(inode);
36 void *kaddr;
37 int ret;
38
39 /* Caller exclude read accesses using page lock */
40
41 /* set_buffer_new(bh); */
42 bh->b_blocknr = 0;
43
44 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
45 if (unlikely(ret))
46 return ret;
47
48 set_buffer_mapped(bh);
49
50 kaddr = kmap_atomic(bh->b_page);
51 memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
52 if (init_block)
53 init_block(inode, bh, kaddr);
54 flush_dcache_page(bh->b_page);
55 kunmap_atomic(kaddr);
56
57 set_buffer_uptodate(bh);
58 mark_buffer_dirty(bh);
59 nilfs_mdt_mark_dirty(inode);
60
61 trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
62
63 return 0;
64}
65
66static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
67 struct buffer_head **out_bh,
68 void (*init_block)(struct inode *,
69 struct buffer_head *,
70 void *))
71{
72 struct super_block *sb = inode->i_sb;
73 struct nilfs_transaction_info ti;
74 struct buffer_head *bh;
75 int err;
76
77 nilfs_transaction_begin(sb, &ti, 0);
78
79 err = -ENOMEM;
80 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
81 if (unlikely(!bh))
82 goto failed_unlock;
83
84 err = -EEXIST;
85 if (buffer_uptodate(bh))
86 goto failed_bh;
87
88 wait_on_buffer(bh);
89 if (buffer_uptodate(bh))
90 goto failed_bh;
91
92 bh->b_bdev = sb->s_bdev;
93 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
94 if (likely(!err)) {
95 get_bh(bh);
96 *out_bh = bh;
97 }
98
99 failed_bh:
100 unlock_page(bh->b_page);
101 put_page(bh->b_page);
102 brelse(bh);
103
104 failed_unlock:
105 if (likely(!err))
106 err = nilfs_transaction_commit(sb);
107 else
108 nilfs_transaction_abort(sb);
109
110 return err;
111}
112
113static int
114nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
115 int mode, int mode_flags, struct buffer_head **out_bh)
116{
117 struct buffer_head *bh;
118 __u64 blknum = 0;
119 int ret = -ENOMEM;
120
121 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
122 if (unlikely(!bh))
123 goto failed;
124
125 ret = -EEXIST; /* internal code */
126 if (buffer_uptodate(bh))
127 goto out;
128
129 if (mode_flags & REQ_RAHEAD) {
130 if (!trylock_buffer(bh)) {
131 ret = -EBUSY;
132 goto failed_bh;
133 }
134 } else /* mode == READ */
135 lock_buffer(bh);
136
137 if (buffer_uptodate(bh)) {
138 unlock_buffer(bh);
139 goto out;
140 }
141
142 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
143 if (unlikely(ret)) {
144 unlock_buffer(bh);
145 goto failed_bh;
146 }
147 map_bh(bh, inode->i_sb, (sector_t)blknum);
148
149 bh->b_end_io = end_buffer_read_sync;
150 get_bh(bh);
151 submit_bh(mode, mode_flags, bh);
152 ret = 0;
153
154 trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
155 out:
156 get_bh(bh);
157 *out_bh = bh;
158
159 failed_bh:
160 unlock_page(bh->b_page);
161 put_page(bh->b_page);
162 brelse(bh);
163 failed:
164 return ret;
165}
166
167static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
168 int readahead, struct buffer_head **out_bh)
169{
170 struct buffer_head *first_bh, *bh;
171 unsigned long blkoff;
172 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
173 int err;
174
175 err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh);
176 if (err == -EEXIST) /* internal code */
177 goto out;
178
179 if (unlikely(err))
180 goto failed;
181
182 if (readahead) {
183 blkoff = block + 1;
184 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
185 err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ,
186 REQ_RAHEAD, &bh);
187 if (likely(!err || err == -EEXIST))
188 brelse(bh);
189 else if (err != -EBUSY)
190 break;
191 /* abort readahead if bmap lookup failed */
192 if (!buffer_locked(first_bh))
193 goto out_no_wait;
194 }
195 }
196
197 wait_on_buffer(first_bh);
198
199 out_no_wait:
200 err = -EIO;
201 if (!buffer_uptodate(first_bh)) {
202 nilfs_msg(inode->i_sb, KERN_ERR,
203 "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
204 inode->i_ino, block);
205 goto failed_bh;
206 }
207 out:
208 *out_bh = first_bh;
209 return 0;
210
211 failed_bh:
212 brelse(first_bh);
213 failed:
214 return err;
215}
216
217/**
218 * nilfs_mdt_get_block - read or create a buffer on meta data file.
219 * @inode: inode of the meta data file
220 * @blkoff: block offset
221 * @create: create flag
222 * @init_block: initializer used for newly allocated block
223 * @out_bh: output of a pointer to the buffer_head
224 *
225 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
226 * a new buffer if @create is not zero. On success, the returned buffer is
227 * assured to be either existing or formatted using a buffer lock on success.
228 * @out_bh is substituted only when zero is returned.
229 *
230 * Return Value: On success, it returns 0. On error, the following negative
231 * error code is returned.
232 *
233 * %-ENOMEM - Insufficient memory available.
234 *
235 * %-EIO - I/O error
236 *
237 * %-ENOENT - the specified block does not exist (hole block)
238 *
239 * %-EROFS - Read only filesystem (for create mode)
240 */
241int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
242 void (*init_block)(struct inode *,
243 struct buffer_head *, void *),
244 struct buffer_head **out_bh)
245{
246 int ret;
247
248 /* Should be rewritten with merging nilfs_mdt_read_block() */
249 retry:
250 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
251 if (!create || ret != -ENOENT)
252 return ret;
253
254 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
255 if (unlikely(ret == -EEXIST)) {
256 /* create = 0; */ /* limit read-create loop retries */
257 goto retry;
258 }
259 return ret;
260}
261
262/**
263 * nilfs_mdt_find_block - find and get a buffer on meta data file.
264 * @inode: inode of the meta data file
265 * @start: start block offset (inclusive)
266 * @end: end block offset (inclusive)
267 * @blkoff: block offset
268 * @out_bh: place to store a pointer to buffer_head struct
269 *
270 * nilfs_mdt_find_block() looks up an existing block in range of
271 * [@start, @end] and stores pointer to a buffer head of the block to
272 * @out_bh, and block offset to @blkoff, respectively. @out_bh and
273 * @blkoff are substituted only when zero is returned.
274 *
275 * Return Value: On success, it returns 0. On error, the following negative
276 * error code is returned.
277 *
278 * %-ENOMEM - Insufficient memory available.
279 *
280 * %-EIO - I/O error
281 *
282 * %-ENOENT - no block was found in the range
283 */
284int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
285 unsigned long end, unsigned long *blkoff,
286 struct buffer_head **out_bh)
287{
288 __u64 next;
289 int ret;
290
291 if (unlikely(start > end))
292 return -ENOENT;
293
294 ret = nilfs_mdt_read_block(inode, start, true, out_bh);
295 if (!ret) {
296 *blkoff = start;
297 goto out;
298 }
299 if (unlikely(ret != -ENOENT || start == ULONG_MAX))
300 goto out;
301
302 ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
303 if (!ret) {
304 if (next <= end) {
305 ret = nilfs_mdt_read_block(inode, next, true, out_bh);
306 if (!ret)
307 *blkoff = next;
308 } else {
309 ret = -ENOENT;
310 }
311 }
312out:
313 return ret;
314}
315
316/**
317 * nilfs_mdt_delete_block - make a hole on the meta data file.
318 * @inode: inode of the meta data file
319 * @block: block offset
320 *
321 * Return Value: On success, zero is returned.
322 * On error, one of the following negative error code is returned.
323 *
324 * %-ENOMEM - Insufficient memory available.
325 *
326 * %-EIO - I/O error
327 */
328int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
329{
330 struct nilfs_inode_info *ii = NILFS_I(inode);
331 int err;
332
333 err = nilfs_bmap_delete(ii->i_bmap, block);
334 if (!err || err == -ENOENT) {
335 nilfs_mdt_mark_dirty(inode);
336 nilfs_mdt_forget_block(inode, block);
337 }
338 return err;
339}
340
341/**
342 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
343 * @inode: inode of the meta data file
344 * @block: block offset
345 *
346 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
347 * tries to release the page including the buffer from a page cache.
348 *
349 * Return Value: On success, 0 is returned. On error, one of the following
350 * negative error code is returned.
351 *
352 * %-EBUSY - page has an active buffer.
353 *
354 * %-ENOENT - page cache has no page addressed by the offset.
355 */
356int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
357{
358 pgoff_t index = (pgoff_t)block >>
359 (PAGE_SHIFT - inode->i_blkbits);
360 struct page *page;
361 unsigned long first_block;
362 int ret = 0;
363 int still_dirty;
364
365 page = find_lock_page(inode->i_mapping, index);
366 if (!page)
367 return -ENOENT;
368
369 wait_on_page_writeback(page);
370
371 first_block = (unsigned long)index <<
372 (PAGE_SHIFT - inode->i_blkbits);
373 if (page_has_buffers(page)) {
374 struct buffer_head *bh;
375
376 bh = nilfs_page_get_nth_block(page, block - first_block);
377 nilfs_forget_buffer(bh);
378 }
379 still_dirty = PageDirty(page);
380 unlock_page(page);
381 put_page(page);
382
383 if (still_dirty ||
384 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
385 ret = -EBUSY;
386 return ret;
387}
388
389int nilfs_mdt_fetch_dirty(struct inode *inode)
390{
391 struct nilfs_inode_info *ii = NILFS_I(inode);
392
393 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
394 set_bit(NILFS_I_DIRTY, &ii->i_state);
395 return 1;
396 }
397 return test_bit(NILFS_I_DIRTY, &ii->i_state);
398}
399
400static int
401nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
402{
403 struct inode *inode = page->mapping->host;
404 struct super_block *sb;
405 int err = 0;
406
407 if (inode && sb_rdonly(inode->i_sb)) {
408 /*
409 * It means that filesystem was remounted in read-only
410 * mode because of error or metadata corruption. But we
411 * have dirty pages that try to be flushed in background.
412 * So, here we simply discard this dirty page.
413 */
414 nilfs_clear_dirty_page(page, false);
415 unlock_page(page);
416 return -EROFS;
417 }
418
419 redirty_page_for_writepage(wbc, page);
420 unlock_page(page);
421
422 if (!inode)
423 return 0;
424
425 sb = inode->i_sb;
426
427 if (wbc->sync_mode == WB_SYNC_ALL)
428 err = nilfs_construct_segment(sb);
429 else if (wbc->for_reclaim)
430 nilfs_flush_segment(sb, inode->i_ino);
431
432 return err;
433}
434
435
436static const struct address_space_operations def_mdt_aops = {
437 .writepage = nilfs_mdt_write_page,
438};
439
440static const struct inode_operations def_mdt_iops;
441static const struct file_operations def_mdt_fops;
442
443
444int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
445{
446 struct nilfs_mdt_info *mi;
447
448 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
449 if (!mi)
450 return -ENOMEM;
451
452 init_rwsem(&mi->mi_sem);
453 inode->i_private = mi;
454
455 inode->i_mode = S_IFREG;
456 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
457
458 inode->i_op = &def_mdt_iops;
459 inode->i_fop = &def_mdt_fops;
460 inode->i_mapping->a_ops = &def_mdt_aops;
461
462 return 0;
463}
464
465/**
466 * nilfs_mdt_clear - do cleanup for the metadata file
467 * @inode: inode of the metadata file
468 */
469void nilfs_mdt_clear(struct inode *inode)
470{
471 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
472
473 if (mdi->mi_palloc_cache)
474 nilfs_palloc_destroy_cache(inode);
475}
476
477/**
478 * nilfs_mdt_destroy - release resources used by the metadata file
479 * @inode: inode of the metadata file
480 */
481void nilfs_mdt_destroy(struct inode *inode)
482{
483 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
484
485 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
486 kfree(mdi);
487}
488
489void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
490 unsigned int header_size)
491{
492 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
493
494 mi->mi_entry_size = entry_size;
495 mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
496 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
497}
498
499/**
500 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
501 * @inode: inode of the metadata file
502 * @shadow: shadow mapping
503 */
504int nilfs_mdt_setup_shadow_map(struct inode *inode,
505 struct nilfs_shadow_map *shadow)
506{
507 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
508
509 INIT_LIST_HEAD(&shadow->frozen_buffers);
510 address_space_init_once(&shadow->frozen_data);
511 nilfs_mapping_init(&shadow->frozen_data, inode);
512 address_space_init_once(&shadow->frozen_btnodes);
513 nilfs_mapping_init(&shadow->frozen_btnodes, inode);
514 mi->mi_shadow = shadow;
515 return 0;
516}
517
518/**
519 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
520 * @inode: inode of the metadata file
521 */
522int nilfs_mdt_save_to_shadow_map(struct inode *inode)
523{
524 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
525 struct nilfs_inode_info *ii = NILFS_I(inode);
526 struct nilfs_shadow_map *shadow = mi->mi_shadow;
527 int ret;
528
529 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
530 if (ret)
531 goto out;
532
533 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
534 &ii->i_btnode_cache);
535 if (ret)
536 goto out;
537
538 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
539 out:
540 return ret;
541}
542
543int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
544{
545 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
546 struct buffer_head *bh_frozen;
547 struct page *page;
548 int blkbits = inode->i_blkbits;
549
550 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
551 if (!page)
552 return -ENOMEM;
553
554 if (!page_has_buffers(page))
555 create_empty_buffers(page, 1 << blkbits, 0);
556
557 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
558
559 if (!buffer_uptodate(bh_frozen))
560 nilfs_copy_buffer(bh_frozen, bh);
561 if (list_empty(&bh_frozen->b_assoc_buffers)) {
562 list_add_tail(&bh_frozen->b_assoc_buffers,
563 &shadow->frozen_buffers);
564 set_buffer_nilfs_redirected(bh);
565 } else {
566 brelse(bh_frozen); /* already frozen */
567 }
568
569 unlock_page(page);
570 put_page(page);
571 return 0;
572}
573
574struct buffer_head *
575nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
576{
577 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
578 struct buffer_head *bh_frozen = NULL;
579 struct page *page;
580 int n;
581
582 page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
583 if (page) {
584 if (page_has_buffers(page)) {
585 n = bh_offset(bh) >> inode->i_blkbits;
586 bh_frozen = nilfs_page_get_nth_block(page, n);
587 }
588 unlock_page(page);
589 put_page(page);
590 }
591 return bh_frozen;
592}
593
594static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
595{
596 struct list_head *head = &shadow->frozen_buffers;
597 struct buffer_head *bh;
598
599 while (!list_empty(head)) {
600 bh = list_first_entry(head, struct buffer_head,
601 b_assoc_buffers);
602 list_del_init(&bh->b_assoc_buffers);
603 brelse(bh); /* drop ref-count to make it releasable */
604 }
605}
606
607/**
608 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
609 * @inode: inode of the metadata file
610 */
611void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
612{
613 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
614 struct nilfs_inode_info *ii = NILFS_I(inode);
615 struct nilfs_shadow_map *shadow = mi->mi_shadow;
616
617 down_write(&mi->mi_sem);
618
619 if (mi->mi_palloc_cache)
620 nilfs_palloc_clear_cache(inode);
621
622 nilfs_clear_dirty_pages(inode->i_mapping, true);
623 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
624
625 nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
626 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
627
628 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
629
630 up_write(&mi->mi_sem);
631}
632
633/**
634 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
635 * @inode: inode of the metadata file
636 */
637void nilfs_mdt_clear_shadow_map(struct inode *inode)
638{
639 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
640 struct nilfs_shadow_map *shadow = mi->mi_shadow;
641
642 down_write(&mi->mi_sem);
643 nilfs_release_frozen_buffers(shadow);
644 truncate_inode_pages(&shadow->frozen_data, 0);
645 truncate_inode_pages(&shadow->frozen_btnodes, 0);
646 up_write(&mi->mi_sem);
647}