Loading...
1/*
2 * mdt.c - meta data file for NILFS
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi.
17 */
18
19#include <linux/buffer_head.h>
20#include <linux/mpage.h>
21#include <linux/mm.h>
22#include <linux/writeback.h>
23#include <linux/backing-dev.h>
24#include <linux/swap.h>
25#include <linux/slab.h>
26#include "nilfs.h"
27#include "btnode.h"
28#include "segment.h"
29#include "page.h"
30#include "mdt.h"
31#include "alloc.h" /* nilfs_palloc_destroy_cache() */
32
33#include <trace/events/nilfs2.h>
34
35#define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
36
37
38static int
39nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
40 struct buffer_head *bh,
41 void (*init_block)(struct inode *,
42 struct buffer_head *, void *))
43{
44 struct nilfs_inode_info *ii = NILFS_I(inode);
45 void *kaddr;
46 int ret;
47
48 /* Caller exclude read accesses using page lock */
49
50 /* set_buffer_new(bh); */
51 bh->b_blocknr = 0;
52
53 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
54 if (unlikely(ret))
55 return ret;
56
57 set_buffer_mapped(bh);
58
59 kaddr = kmap_atomic(bh->b_page);
60 memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
61 if (init_block)
62 init_block(inode, bh, kaddr);
63 flush_dcache_page(bh->b_page);
64 kunmap_atomic(kaddr);
65
66 set_buffer_uptodate(bh);
67 mark_buffer_dirty(bh);
68 nilfs_mdt_mark_dirty(inode);
69
70 trace_nilfs2_mdt_insert_new_block(inode, inode->i_ino, block);
71
72 return 0;
73}
74
75static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
76 struct buffer_head **out_bh,
77 void (*init_block)(struct inode *,
78 struct buffer_head *,
79 void *))
80{
81 struct super_block *sb = inode->i_sb;
82 struct nilfs_transaction_info ti;
83 struct buffer_head *bh;
84 int err;
85
86 nilfs_transaction_begin(sb, &ti, 0);
87
88 err = -ENOMEM;
89 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
90 if (unlikely(!bh))
91 goto failed_unlock;
92
93 err = -EEXIST;
94 if (buffer_uptodate(bh))
95 goto failed_bh;
96
97 wait_on_buffer(bh);
98 if (buffer_uptodate(bh))
99 goto failed_bh;
100
101 bh->b_bdev = sb->s_bdev;
102 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
103 if (likely(!err)) {
104 get_bh(bh);
105 *out_bh = bh;
106 }
107
108 failed_bh:
109 unlock_page(bh->b_page);
110 put_page(bh->b_page);
111 brelse(bh);
112
113 failed_unlock:
114 if (likely(!err))
115 err = nilfs_transaction_commit(sb);
116 else
117 nilfs_transaction_abort(sb);
118
119 return err;
120}
121
122static int
123nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
124 int mode, int mode_flags, struct buffer_head **out_bh)
125{
126 struct buffer_head *bh;
127 __u64 blknum = 0;
128 int ret = -ENOMEM;
129
130 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
131 if (unlikely(!bh))
132 goto failed;
133
134 ret = -EEXIST; /* internal code */
135 if (buffer_uptodate(bh))
136 goto out;
137
138 if (mode_flags & REQ_RAHEAD) {
139 if (!trylock_buffer(bh)) {
140 ret = -EBUSY;
141 goto failed_bh;
142 }
143 } else /* mode == READ */
144 lock_buffer(bh);
145
146 if (buffer_uptodate(bh)) {
147 unlock_buffer(bh);
148 goto out;
149 }
150
151 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
152 if (unlikely(ret)) {
153 unlock_buffer(bh);
154 goto failed_bh;
155 }
156 map_bh(bh, inode->i_sb, (sector_t)blknum);
157
158 bh->b_end_io = end_buffer_read_sync;
159 get_bh(bh);
160 submit_bh(mode, mode_flags, bh);
161 ret = 0;
162
163 trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
164 out:
165 get_bh(bh);
166 *out_bh = bh;
167
168 failed_bh:
169 unlock_page(bh->b_page);
170 put_page(bh->b_page);
171 brelse(bh);
172 failed:
173 return ret;
174}
175
176static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
177 int readahead, struct buffer_head **out_bh)
178{
179 struct buffer_head *first_bh, *bh;
180 unsigned long blkoff;
181 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
182 int err;
183
184 err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh);
185 if (err == -EEXIST) /* internal code */
186 goto out;
187
188 if (unlikely(err))
189 goto failed;
190
191 if (readahead) {
192 blkoff = block + 1;
193 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
194 err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ,
195 REQ_RAHEAD, &bh);
196 if (likely(!err || err == -EEXIST))
197 brelse(bh);
198 else if (err != -EBUSY)
199 break;
200 /* abort readahead if bmap lookup failed */
201 if (!buffer_locked(first_bh))
202 goto out_no_wait;
203 }
204 }
205
206 wait_on_buffer(first_bh);
207
208 out_no_wait:
209 err = -EIO;
210 if (!buffer_uptodate(first_bh)) {
211 nilfs_msg(inode->i_sb, KERN_ERR,
212 "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
213 inode->i_ino, block);
214 goto failed_bh;
215 }
216 out:
217 *out_bh = first_bh;
218 return 0;
219
220 failed_bh:
221 brelse(first_bh);
222 failed:
223 return err;
224}
225
226/**
227 * nilfs_mdt_get_block - read or create a buffer on meta data file.
228 * @inode: inode of the meta data file
229 * @blkoff: block offset
230 * @create: create flag
231 * @init_block: initializer used for newly allocated block
232 * @out_bh: output of a pointer to the buffer_head
233 *
234 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
235 * a new buffer if @create is not zero. On success, the returned buffer is
236 * assured to be either existing or formatted using a buffer lock on success.
237 * @out_bh is substituted only when zero is returned.
238 *
239 * Return Value: On success, it returns 0. On error, the following negative
240 * error code is returned.
241 *
242 * %-ENOMEM - Insufficient memory available.
243 *
244 * %-EIO - I/O error
245 *
246 * %-ENOENT - the specified block does not exist (hole block)
247 *
248 * %-EROFS - Read only filesystem (for create mode)
249 */
250int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
251 void (*init_block)(struct inode *,
252 struct buffer_head *, void *),
253 struct buffer_head **out_bh)
254{
255 int ret;
256
257 /* Should be rewritten with merging nilfs_mdt_read_block() */
258 retry:
259 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
260 if (!create || ret != -ENOENT)
261 return ret;
262
263 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
264 if (unlikely(ret == -EEXIST)) {
265 /* create = 0; */ /* limit read-create loop retries */
266 goto retry;
267 }
268 return ret;
269}
270
271/**
272 * nilfs_mdt_find_block - find and get a buffer on meta data file.
273 * @inode: inode of the meta data file
274 * @start: start block offset (inclusive)
275 * @end: end block offset (inclusive)
276 * @blkoff: block offset
277 * @out_bh: place to store a pointer to buffer_head struct
278 *
279 * nilfs_mdt_find_block() looks up an existing block in range of
280 * [@start, @end] and stores pointer to a buffer head of the block to
281 * @out_bh, and block offset to @blkoff, respectively. @out_bh and
282 * @blkoff are substituted only when zero is returned.
283 *
284 * Return Value: On success, it returns 0. On error, the following negative
285 * error code is returned.
286 *
287 * %-ENOMEM - Insufficient memory available.
288 *
289 * %-EIO - I/O error
290 *
291 * %-ENOENT - no block was found in the range
292 */
293int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
294 unsigned long end, unsigned long *blkoff,
295 struct buffer_head **out_bh)
296{
297 __u64 next;
298 int ret;
299
300 if (unlikely(start > end))
301 return -ENOENT;
302
303 ret = nilfs_mdt_read_block(inode, start, true, out_bh);
304 if (!ret) {
305 *blkoff = start;
306 goto out;
307 }
308 if (unlikely(ret != -ENOENT || start == ULONG_MAX))
309 goto out;
310
311 ret = nilfs_bmap_seek_key(NILFS_I(inode)->i_bmap, start + 1, &next);
312 if (!ret) {
313 if (next <= end) {
314 ret = nilfs_mdt_read_block(inode, next, true, out_bh);
315 if (!ret)
316 *blkoff = next;
317 } else {
318 ret = -ENOENT;
319 }
320 }
321out:
322 return ret;
323}
324
325/**
326 * nilfs_mdt_delete_block - make a hole on the meta data file.
327 * @inode: inode of the meta data file
328 * @block: block offset
329 *
330 * Return Value: On success, zero is returned.
331 * On error, one of the following negative error code is returned.
332 *
333 * %-ENOMEM - Insufficient memory available.
334 *
335 * %-EIO - I/O error
336 */
337int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
338{
339 struct nilfs_inode_info *ii = NILFS_I(inode);
340 int err;
341
342 err = nilfs_bmap_delete(ii->i_bmap, block);
343 if (!err || err == -ENOENT) {
344 nilfs_mdt_mark_dirty(inode);
345 nilfs_mdt_forget_block(inode, block);
346 }
347 return err;
348}
349
350/**
351 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
352 * @inode: inode of the meta data file
353 * @block: block offset
354 *
355 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
356 * tries to release the page including the buffer from a page cache.
357 *
358 * Return Value: On success, 0 is returned. On error, one of the following
359 * negative error code is returned.
360 *
361 * %-EBUSY - page has an active buffer.
362 *
363 * %-ENOENT - page cache has no page addressed by the offset.
364 */
365int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
366{
367 pgoff_t index = (pgoff_t)block >>
368 (PAGE_SHIFT - inode->i_blkbits);
369 struct page *page;
370 unsigned long first_block;
371 int ret = 0;
372 int still_dirty;
373
374 page = find_lock_page(inode->i_mapping, index);
375 if (!page)
376 return -ENOENT;
377
378 wait_on_page_writeback(page);
379
380 first_block = (unsigned long)index <<
381 (PAGE_SHIFT - inode->i_blkbits);
382 if (page_has_buffers(page)) {
383 struct buffer_head *bh;
384
385 bh = nilfs_page_get_nth_block(page, block - first_block);
386 nilfs_forget_buffer(bh);
387 }
388 still_dirty = PageDirty(page);
389 unlock_page(page);
390 put_page(page);
391
392 if (still_dirty ||
393 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
394 ret = -EBUSY;
395 return ret;
396}
397
398int nilfs_mdt_fetch_dirty(struct inode *inode)
399{
400 struct nilfs_inode_info *ii = NILFS_I(inode);
401
402 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
403 set_bit(NILFS_I_DIRTY, &ii->i_state);
404 return 1;
405 }
406 return test_bit(NILFS_I_DIRTY, &ii->i_state);
407}
408
409static int
410nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
411{
412 struct inode *inode = page->mapping->host;
413 struct super_block *sb;
414 int err = 0;
415
416 if (inode && sb_rdonly(inode->i_sb)) {
417 /*
418 * It means that filesystem was remounted in read-only
419 * mode because of error or metadata corruption. But we
420 * have dirty pages that try to be flushed in background.
421 * So, here we simply discard this dirty page.
422 */
423 nilfs_clear_dirty_page(page, false);
424 unlock_page(page);
425 return -EROFS;
426 }
427
428 redirty_page_for_writepage(wbc, page);
429 unlock_page(page);
430
431 if (!inode)
432 return 0;
433
434 sb = inode->i_sb;
435
436 if (wbc->sync_mode == WB_SYNC_ALL)
437 err = nilfs_construct_segment(sb);
438 else if (wbc->for_reclaim)
439 nilfs_flush_segment(sb, inode->i_ino);
440
441 return err;
442}
443
444
445static const struct address_space_operations def_mdt_aops = {
446 .writepage = nilfs_mdt_write_page,
447};
448
449static const struct inode_operations def_mdt_iops;
450static const struct file_operations def_mdt_fops;
451
452
453int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
454{
455 struct nilfs_mdt_info *mi;
456
457 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
458 if (!mi)
459 return -ENOMEM;
460
461 init_rwsem(&mi->mi_sem);
462 inode->i_private = mi;
463
464 inode->i_mode = S_IFREG;
465 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
466
467 inode->i_op = &def_mdt_iops;
468 inode->i_fop = &def_mdt_fops;
469 inode->i_mapping->a_ops = &def_mdt_aops;
470
471 return 0;
472}
473
474/**
475 * nilfs_mdt_clear - do cleanup for the metadata file
476 * @inode: inode of the metadata file
477 */
478void nilfs_mdt_clear(struct inode *inode)
479{
480 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
481
482 if (mdi->mi_palloc_cache)
483 nilfs_palloc_destroy_cache(inode);
484}
485
486/**
487 * nilfs_mdt_destroy - release resources used by the metadata file
488 * @inode: inode of the metadata file
489 */
490void nilfs_mdt_destroy(struct inode *inode)
491{
492 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
493
494 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
495 kfree(mdi);
496}
497
498void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
499 unsigned int header_size)
500{
501 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
502
503 mi->mi_entry_size = entry_size;
504 mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
505 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
506}
507
508/**
509 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
510 * @inode: inode of the metadata file
511 * @shadow: shadow mapping
512 */
513int nilfs_mdt_setup_shadow_map(struct inode *inode,
514 struct nilfs_shadow_map *shadow)
515{
516 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
517
518 INIT_LIST_HEAD(&shadow->frozen_buffers);
519 address_space_init_once(&shadow->frozen_data);
520 nilfs_mapping_init(&shadow->frozen_data, inode);
521 address_space_init_once(&shadow->frozen_btnodes);
522 nilfs_mapping_init(&shadow->frozen_btnodes, inode);
523 mi->mi_shadow = shadow;
524 return 0;
525}
526
527/**
528 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
529 * @inode: inode of the metadata file
530 */
531int nilfs_mdt_save_to_shadow_map(struct inode *inode)
532{
533 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
534 struct nilfs_inode_info *ii = NILFS_I(inode);
535 struct nilfs_shadow_map *shadow = mi->mi_shadow;
536 int ret;
537
538 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
539 if (ret)
540 goto out;
541
542 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
543 &ii->i_btnode_cache);
544 if (ret)
545 goto out;
546
547 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
548 out:
549 return ret;
550}
551
552int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
553{
554 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
555 struct buffer_head *bh_frozen;
556 struct page *page;
557 int blkbits = inode->i_blkbits;
558
559 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
560 if (!page)
561 return -ENOMEM;
562
563 if (!page_has_buffers(page))
564 create_empty_buffers(page, 1 << blkbits, 0);
565
566 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
567
568 if (!buffer_uptodate(bh_frozen))
569 nilfs_copy_buffer(bh_frozen, bh);
570 if (list_empty(&bh_frozen->b_assoc_buffers)) {
571 list_add_tail(&bh_frozen->b_assoc_buffers,
572 &shadow->frozen_buffers);
573 set_buffer_nilfs_redirected(bh);
574 } else {
575 brelse(bh_frozen); /* already frozen */
576 }
577
578 unlock_page(page);
579 put_page(page);
580 return 0;
581}
582
583struct buffer_head *
584nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
585{
586 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
587 struct buffer_head *bh_frozen = NULL;
588 struct page *page;
589 int n;
590
591 page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
592 if (page) {
593 if (page_has_buffers(page)) {
594 n = bh_offset(bh) >> inode->i_blkbits;
595 bh_frozen = nilfs_page_get_nth_block(page, n);
596 }
597 unlock_page(page);
598 put_page(page);
599 }
600 return bh_frozen;
601}
602
603static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
604{
605 struct list_head *head = &shadow->frozen_buffers;
606 struct buffer_head *bh;
607
608 while (!list_empty(head)) {
609 bh = list_first_entry(head, struct buffer_head,
610 b_assoc_buffers);
611 list_del_init(&bh->b_assoc_buffers);
612 brelse(bh); /* drop ref-count to make it releasable */
613 }
614}
615
616/**
617 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
618 * @inode: inode of the metadata file
619 */
620void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
621{
622 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
623 struct nilfs_inode_info *ii = NILFS_I(inode);
624 struct nilfs_shadow_map *shadow = mi->mi_shadow;
625
626 down_write(&mi->mi_sem);
627
628 if (mi->mi_palloc_cache)
629 nilfs_palloc_clear_cache(inode);
630
631 nilfs_clear_dirty_pages(inode->i_mapping, true);
632 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
633
634 nilfs_clear_dirty_pages(&ii->i_btnode_cache, true);
635 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
636
637 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
638
639 up_write(&mi->mi_sem);
640}
641
642/**
643 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
644 * @inode: inode of the metadata file
645 */
646void nilfs_mdt_clear_shadow_map(struct inode *inode)
647{
648 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
649 struct nilfs_shadow_map *shadow = mi->mi_shadow;
650
651 down_write(&mi->mi_sem);
652 nilfs_release_frozen_buffers(shadow);
653 truncate_inode_pages(&shadow->frozen_data, 0);
654 truncate_inode_pages(&shadow->frozen_btnodes, 0);
655 up_write(&mi->mi_sem);
656}
1/*
2 * mdt.c - meta data file for NILFS
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 */
22
23#include <linux/buffer_head.h>
24#include <linux/mpage.h>
25#include <linux/mm.h>
26#include <linux/writeback.h>
27#include <linux/backing-dev.h>
28#include <linux/swap.h>
29#include <linux/slab.h>
30#include "nilfs.h"
31#include "btnode.h"
32#include "segment.h"
33#include "page.h"
34#include "mdt.h"
35
36
37#define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
38
39
40static int
41nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
42 struct buffer_head *bh,
43 void (*init_block)(struct inode *,
44 struct buffer_head *, void *))
45{
46 struct nilfs_inode_info *ii = NILFS_I(inode);
47 void *kaddr;
48 int ret;
49
50 /* Caller exclude read accesses using page lock */
51
52 /* set_buffer_new(bh); */
53 bh->b_blocknr = 0;
54
55 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh);
56 if (unlikely(ret))
57 return ret;
58
59 set_buffer_mapped(bh);
60
61 kaddr = kmap_atomic(bh->b_page, KM_USER0);
62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
63 if (init_block)
64 init_block(inode, bh, kaddr);
65 flush_dcache_page(bh->b_page);
66 kunmap_atomic(kaddr, KM_USER0);
67
68 set_buffer_uptodate(bh);
69 mark_buffer_dirty(bh);
70 nilfs_mdt_mark_dirty(inode);
71 return 0;
72}
73
74static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
75 struct buffer_head **out_bh,
76 void (*init_block)(struct inode *,
77 struct buffer_head *,
78 void *))
79{
80 struct super_block *sb = inode->i_sb;
81 struct nilfs_transaction_info ti;
82 struct buffer_head *bh;
83 int err;
84
85 nilfs_transaction_begin(sb, &ti, 0);
86
87 err = -ENOMEM;
88 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0);
89 if (unlikely(!bh))
90 goto failed_unlock;
91
92 err = -EEXIST;
93 if (buffer_uptodate(bh))
94 goto failed_bh;
95
96 wait_on_buffer(bh);
97 if (buffer_uptodate(bh))
98 goto failed_bh;
99
100 bh->b_bdev = sb->s_bdev;
101 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
102 if (likely(!err)) {
103 get_bh(bh);
104 *out_bh = bh;
105 }
106
107 failed_bh:
108 unlock_page(bh->b_page);
109 page_cache_release(bh->b_page);
110 brelse(bh);
111
112 failed_unlock:
113 if (likely(!err))
114 err = nilfs_transaction_commit(sb);
115 else
116 nilfs_transaction_abort(sb);
117
118 return err;
119}
120
121static int
122nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
123 int mode, struct buffer_head **out_bh)
124{
125 struct buffer_head *bh;
126 __u64 blknum = 0;
127 int ret = -ENOMEM;
128
129 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
130 if (unlikely(!bh))
131 goto failed;
132
133 ret = -EEXIST; /* internal code */
134 if (buffer_uptodate(bh))
135 goto out;
136
137 if (mode == READA) {
138 if (!trylock_buffer(bh)) {
139 ret = -EBUSY;
140 goto failed_bh;
141 }
142 } else /* mode == READ */
143 lock_buffer(bh);
144
145 if (buffer_uptodate(bh)) {
146 unlock_buffer(bh);
147 goto out;
148 }
149
150 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
151 if (unlikely(ret)) {
152 unlock_buffer(bh);
153 goto failed_bh;
154 }
155 map_bh(bh, inode->i_sb, (sector_t)blknum);
156
157 bh->b_end_io = end_buffer_read_sync;
158 get_bh(bh);
159 submit_bh(mode, bh);
160 ret = 0;
161 out:
162 get_bh(bh);
163 *out_bh = bh;
164
165 failed_bh:
166 unlock_page(bh->b_page);
167 page_cache_release(bh->b_page);
168 brelse(bh);
169 failed:
170 return ret;
171}
172
173static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
174 int readahead, struct buffer_head **out_bh)
175{
176 struct buffer_head *first_bh, *bh;
177 unsigned long blkoff;
178 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
179 int err;
180
181 err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
182 if (err == -EEXIST) /* internal code */
183 goto out;
184
185 if (unlikely(err))
186 goto failed;
187
188 if (readahead) {
189 blkoff = block + 1;
190 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
191 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
192 if (likely(!err || err == -EEXIST))
193 brelse(bh);
194 else if (err != -EBUSY)
195 break;
196 /* abort readahead if bmap lookup failed */
197 if (!buffer_locked(first_bh))
198 goto out_no_wait;
199 }
200 }
201
202 wait_on_buffer(first_bh);
203
204 out_no_wait:
205 err = -EIO;
206 if (!buffer_uptodate(first_bh))
207 goto failed_bh;
208 out:
209 *out_bh = first_bh;
210 return 0;
211
212 failed_bh:
213 brelse(first_bh);
214 failed:
215 return err;
216}
217
218/**
219 * nilfs_mdt_get_block - read or create a buffer on meta data file.
220 * @inode: inode of the meta data file
221 * @blkoff: block offset
222 * @create: create flag
223 * @init_block: initializer used for newly allocated block
224 * @out_bh: output of a pointer to the buffer_head
225 *
226 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
227 * a new buffer if @create is not zero. On success, the returned buffer is
228 * assured to be either existing or formatted using a buffer lock on success.
229 * @out_bh is substituted only when zero is returned.
230 *
231 * Return Value: On success, it returns 0. On error, the following negative
232 * error code is returned.
233 *
234 * %-ENOMEM - Insufficient memory available.
235 *
236 * %-EIO - I/O error
237 *
238 * %-ENOENT - the specified block does not exist (hole block)
239 *
240 * %-EROFS - Read only filesystem (for create mode)
241 */
242int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
243 void (*init_block)(struct inode *,
244 struct buffer_head *, void *),
245 struct buffer_head **out_bh)
246{
247 int ret;
248
249 /* Should be rewritten with merging nilfs_mdt_read_block() */
250 retry:
251 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
252 if (!create || ret != -ENOENT)
253 return ret;
254
255 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
256 if (unlikely(ret == -EEXIST)) {
257 /* create = 0; */ /* limit read-create loop retries */
258 goto retry;
259 }
260 return ret;
261}
262
263/**
264 * nilfs_mdt_delete_block - make a hole on the meta data file.
265 * @inode: inode of the meta data file
266 * @block: block offset
267 *
268 * Return Value: On success, zero is returned.
269 * On error, one of the following negative error code is returned.
270 *
271 * %-ENOMEM - Insufficient memory available.
272 *
273 * %-EIO - I/O error
274 */
275int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
276{
277 struct nilfs_inode_info *ii = NILFS_I(inode);
278 int err;
279
280 err = nilfs_bmap_delete(ii->i_bmap, block);
281 if (!err || err == -ENOENT) {
282 nilfs_mdt_mark_dirty(inode);
283 nilfs_mdt_forget_block(inode, block);
284 }
285 return err;
286}
287
288/**
289 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
290 * @inode: inode of the meta data file
291 * @block: block offset
292 *
293 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
294 * tries to release the page including the buffer from a page cache.
295 *
296 * Return Value: On success, 0 is returned. On error, one of the following
297 * negative error code is returned.
298 *
299 * %-EBUSY - page has an active buffer.
300 *
301 * %-ENOENT - page cache has no page addressed by the offset.
302 */
303int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
304{
305 pgoff_t index = (pgoff_t)block >>
306 (PAGE_CACHE_SHIFT - inode->i_blkbits);
307 struct page *page;
308 unsigned long first_block;
309 int ret = 0;
310 int still_dirty;
311
312 page = find_lock_page(inode->i_mapping, index);
313 if (!page)
314 return -ENOENT;
315
316 wait_on_page_writeback(page);
317
318 first_block = (unsigned long)index <<
319 (PAGE_CACHE_SHIFT - inode->i_blkbits);
320 if (page_has_buffers(page)) {
321 struct buffer_head *bh;
322
323 bh = nilfs_page_get_nth_block(page, block - first_block);
324 nilfs_forget_buffer(bh);
325 }
326 still_dirty = PageDirty(page);
327 unlock_page(page);
328 page_cache_release(page);
329
330 if (still_dirty ||
331 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0)
332 ret = -EBUSY;
333 return ret;
334}
335
336/**
337 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
338 * @inode: inode of the meta data file
339 * @block: block offset
340 *
341 * Return Value: On success, it returns 0. On error, the following negative
342 * error code is returned.
343 *
344 * %-ENOMEM - Insufficient memory available.
345 *
346 * %-EIO - I/O error
347 *
348 * %-ENOENT - the specified block does not exist (hole block)
349 */
350int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
351{
352 struct buffer_head *bh;
353 int err;
354
355 err = nilfs_mdt_read_block(inode, block, 0, &bh);
356 if (unlikely(err))
357 return err;
358 mark_buffer_dirty(bh);
359 nilfs_mdt_mark_dirty(inode);
360 brelse(bh);
361 return 0;
362}
363
364int nilfs_mdt_fetch_dirty(struct inode *inode)
365{
366 struct nilfs_inode_info *ii = NILFS_I(inode);
367
368 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) {
369 set_bit(NILFS_I_DIRTY, &ii->i_state);
370 return 1;
371 }
372 return test_bit(NILFS_I_DIRTY, &ii->i_state);
373}
374
375static int
376nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
377{
378 struct inode *inode;
379 struct super_block *sb;
380 int err = 0;
381
382 redirty_page_for_writepage(wbc, page);
383 unlock_page(page);
384
385 inode = page->mapping->host;
386 if (!inode)
387 return 0;
388
389 sb = inode->i_sb;
390
391 if (wbc->sync_mode == WB_SYNC_ALL)
392 err = nilfs_construct_segment(sb);
393 else if (wbc->for_reclaim)
394 nilfs_flush_segment(sb, inode->i_ino);
395
396 return err;
397}
398
399
400static const struct address_space_operations def_mdt_aops = {
401 .writepage = nilfs_mdt_write_page,
402};
403
404static const struct inode_operations def_mdt_iops;
405static const struct file_operations def_mdt_fops;
406
407
408int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
409{
410 struct nilfs_mdt_info *mi;
411
412 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
413 if (!mi)
414 return -ENOMEM;
415
416 init_rwsem(&mi->mi_sem);
417 inode->i_private = mi;
418
419 inode->i_mode = S_IFREG;
420 mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
421 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
422
423 inode->i_op = &def_mdt_iops;
424 inode->i_fop = &def_mdt_fops;
425 inode->i_mapping->a_ops = &def_mdt_aops;
426
427 return 0;
428}
429
430void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
431 unsigned header_size)
432{
433 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
434
435 mi->mi_entry_size = entry_size;
436 mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
437 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
438}
439
440/**
441 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
442 * @inode: inode of the metadata file
443 * @shadow: shadow mapping
444 */
445int nilfs_mdt_setup_shadow_map(struct inode *inode,
446 struct nilfs_shadow_map *shadow)
447{
448 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
449 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
450
451 INIT_LIST_HEAD(&shadow->frozen_buffers);
452 address_space_init_once(&shadow->frozen_data);
453 nilfs_mapping_init(&shadow->frozen_data, inode, bdi);
454 address_space_init_once(&shadow->frozen_btnodes);
455 nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi);
456 mi->mi_shadow = shadow;
457 return 0;
458}
459
460/**
461 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
462 * @inode: inode of the metadata file
463 */
464int nilfs_mdt_save_to_shadow_map(struct inode *inode)
465{
466 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
467 struct nilfs_inode_info *ii = NILFS_I(inode);
468 struct nilfs_shadow_map *shadow = mi->mi_shadow;
469 int ret;
470
471 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
472 if (ret)
473 goto out;
474
475 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
476 &ii->i_btnode_cache);
477 if (ret)
478 goto out;
479
480 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
481 out:
482 return ret;
483}
484
485int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
486{
487 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
488 struct buffer_head *bh_frozen;
489 struct page *page;
490 int blkbits = inode->i_blkbits;
491
492 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
493 if (!page)
494 return -ENOMEM;
495
496 if (!page_has_buffers(page))
497 create_empty_buffers(page, 1 << blkbits, 0);
498
499 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
500
501 if (!buffer_uptodate(bh_frozen))
502 nilfs_copy_buffer(bh_frozen, bh);
503 if (list_empty(&bh_frozen->b_assoc_buffers)) {
504 list_add_tail(&bh_frozen->b_assoc_buffers,
505 &shadow->frozen_buffers);
506 set_buffer_nilfs_redirected(bh);
507 } else {
508 brelse(bh_frozen); /* already frozen */
509 }
510
511 unlock_page(page);
512 page_cache_release(page);
513 return 0;
514}
515
516struct buffer_head *
517nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
518{
519 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
520 struct buffer_head *bh_frozen = NULL;
521 struct page *page;
522 int n;
523
524 page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
525 if (page) {
526 if (page_has_buffers(page)) {
527 n = bh_offset(bh) >> inode->i_blkbits;
528 bh_frozen = nilfs_page_get_nth_block(page, n);
529 }
530 unlock_page(page);
531 page_cache_release(page);
532 }
533 return bh_frozen;
534}
535
536static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
537{
538 struct list_head *head = &shadow->frozen_buffers;
539 struct buffer_head *bh;
540
541 while (!list_empty(head)) {
542 bh = list_first_entry(head, struct buffer_head,
543 b_assoc_buffers);
544 list_del_init(&bh->b_assoc_buffers);
545 brelse(bh); /* drop ref-count to make it releasable */
546 }
547}
548
549/**
550 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
551 * @inode: inode of the metadata file
552 */
553void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
554{
555 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
556 struct nilfs_inode_info *ii = NILFS_I(inode);
557 struct nilfs_shadow_map *shadow = mi->mi_shadow;
558
559 down_write(&mi->mi_sem);
560
561 if (mi->mi_palloc_cache)
562 nilfs_palloc_clear_cache(inode);
563
564 nilfs_clear_dirty_pages(inode->i_mapping);
565 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
566
567 nilfs_clear_dirty_pages(&ii->i_btnode_cache);
568 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
569
570 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
571
572 up_write(&mi->mi_sem);
573}
574
575/**
576 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
577 * @inode: inode of the metadata file
578 */
579void nilfs_mdt_clear_shadow_map(struct inode *inode)
580{
581 struct nilfs_mdt_info *mi = NILFS_MDT(inode);
582 struct nilfs_shadow_map *shadow = mi->mi_shadow;
583
584 down_write(&mi->mi_sem);
585 nilfs_release_frozen_buffers(shadow);
586 truncate_inode_pages(&shadow->frozen_data, 0);
587 truncate_inode_pages(&shadow->frozen_btnodes, 0);
588 up_write(&mi->mi_sem);
589}