Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS B-tree node cache
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Originally written by Seiji Kihara.
8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
9 *
10 */
11
12#include <linux/types.h>
13#include <linux/buffer_head.h>
14#include <linux/mm.h>
15#include <linux/backing-dev.h>
16#include <linux/gfp.h>
17#include "nilfs.h"
18#include "mdt.h"
19#include "dat.h"
20#include "page.h"
21#include "btnode.h"
22
23
24/**
25 * nilfs_init_btnc_inode - initialize B-tree node cache inode
26 * @btnc_inode: inode to be initialized
27 *
28 * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
29 */
30void nilfs_init_btnc_inode(struct inode *btnc_inode)
31{
32 struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
33
34 btnc_inode->i_mode = S_IFREG;
35 ii->i_flags = 0;
36 memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
37 mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
38}
39
40void nilfs_btnode_cache_clear(struct address_space *btnc)
41{
42 invalidate_mapping_pages(btnc, 0, -1);
43 truncate_inode_pages(btnc, 0);
44}
45
46struct buffer_head *
47nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
48{
49 struct inode *inode = btnc->host;
50 struct buffer_head *bh;
51
52 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
53 if (unlikely(!bh))
54 return NULL;
55
56 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
57 buffer_dirty(bh))) {
58 brelse(bh);
59 BUG();
60 }
61 memset(bh->b_data, 0, i_blocksize(inode));
62 bh->b_bdev = inode->i_sb->s_bdev;
63 bh->b_blocknr = blocknr;
64 set_buffer_mapped(bh);
65 set_buffer_uptodate(bh);
66
67 folio_unlock(bh->b_folio);
68 folio_put(bh->b_folio);
69 return bh;
70}
71
72int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
73 sector_t pblocknr, blk_opf_t opf,
74 struct buffer_head **pbh, sector_t *submit_ptr)
75{
76 struct buffer_head *bh;
77 struct inode *inode = btnc->host;
78 struct folio *folio;
79 int err;
80
81 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
82 if (unlikely(!bh))
83 return -ENOMEM;
84
85 err = -EEXIST; /* internal code */
86 folio = bh->b_folio;
87
88 if (buffer_uptodate(bh) || buffer_dirty(bh))
89 goto found;
90
91 if (pblocknr == 0) {
92 pblocknr = blocknr;
93 if (inode->i_ino != NILFS_DAT_INO) {
94 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
95
96 /* blocknr is a virtual block number */
97 err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
98 &pblocknr);
99 if (unlikely(err)) {
100 brelse(bh);
101 goto out_locked;
102 }
103 }
104 }
105
106 if (opf & REQ_RAHEAD) {
107 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
108 err = -EBUSY; /* internal code */
109 brelse(bh);
110 goto out_locked;
111 }
112 } else { /* opf == REQ_OP_READ */
113 lock_buffer(bh);
114 }
115 if (buffer_uptodate(bh)) {
116 unlock_buffer(bh);
117 err = -EEXIST; /* internal code */
118 goto found;
119 }
120 set_buffer_mapped(bh);
121 bh->b_bdev = inode->i_sb->s_bdev;
122 bh->b_blocknr = pblocknr; /* set block address for read */
123 bh->b_end_io = end_buffer_read_sync;
124 get_bh(bh);
125 submit_bh(opf, bh);
126 bh->b_blocknr = blocknr; /* set back to the given block address */
127 *submit_ptr = pblocknr;
128 err = 0;
129found:
130 *pbh = bh;
131
132out_locked:
133 folio_unlock(folio);
134 folio_put(folio);
135 return err;
136}
137
138/**
139 * nilfs_btnode_delete - delete B-tree node buffer
140 * @bh: buffer to be deleted
141 *
142 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
143 * including the buffer if the page gets unbusy.
144 */
145void nilfs_btnode_delete(struct buffer_head *bh)
146{
147 struct address_space *mapping;
148 struct folio *folio = bh->b_folio;
149 pgoff_t index = folio->index;
150 int still_dirty;
151
152 folio_get(folio);
153 folio_lock(folio);
154 folio_wait_writeback(folio);
155
156 nilfs_forget_buffer(bh);
157 still_dirty = folio_test_dirty(folio);
158 mapping = folio->mapping;
159 folio_unlock(folio);
160 folio_put(folio);
161
162 if (!still_dirty && mapping)
163 invalidate_inode_pages2_range(mapping, index, index);
164}
165
166/**
167 * nilfs_btnode_prepare_change_key
168 * prepare to move contents of the block for old key to one of new key.
169 * the old buffer will not be removed, but might be reused for new buffer.
170 * it might return -ENOMEM because of memory allocation errors,
171 * and might return -EIO because of disk read errors.
172 */
173int nilfs_btnode_prepare_change_key(struct address_space *btnc,
174 struct nilfs_btnode_chkey_ctxt *ctxt)
175{
176 struct buffer_head *obh, *nbh;
177 struct inode *inode = btnc->host;
178 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
179 int err;
180
181 if (oldkey == newkey)
182 return 0;
183
184 obh = ctxt->bh;
185 ctxt->newbh = NULL;
186
187 if (inode->i_blkbits == PAGE_SHIFT) {
188 struct folio *ofolio = obh->b_folio;
189 folio_lock(ofolio);
190retry:
191 /* BUG_ON(oldkey != obh->b_folio->index); */
192 if (unlikely(oldkey != ofolio->index))
193 NILFS_FOLIO_BUG(ofolio,
194 "invalid oldkey %lld (newkey=%lld)",
195 (unsigned long long)oldkey,
196 (unsigned long long)newkey);
197
198 xa_lock_irq(&btnc->i_pages);
199 err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS);
200 xa_unlock_irq(&btnc->i_pages);
201 /*
202 * Note: folio->index will not change to newkey until
203 * nilfs_btnode_commit_change_key() will be called.
204 * To protect the folio in intermediate state, the folio lock
205 * is held.
206 */
207 if (!err)
208 return 0;
209 else if (err != -EBUSY)
210 goto failed_unlock;
211
212 err = invalidate_inode_pages2_range(btnc, newkey, newkey);
213 if (!err)
214 goto retry;
215 /* fallback to copy mode */
216 folio_unlock(ofolio);
217 }
218
219 nbh = nilfs_btnode_create_block(btnc, newkey);
220 if (!nbh)
221 return -ENOMEM;
222
223 BUG_ON(nbh == obh);
224 ctxt->newbh = nbh;
225 return 0;
226
227 failed_unlock:
228 folio_unlock(obh->b_folio);
229 return err;
230}
231
232/**
233 * nilfs_btnode_commit_change_key
234 * commit the change_key operation prepared by prepare_change_key().
235 */
236void nilfs_btnode_commit_change_key(struct address_space *btnc,
237 struct nilfs_btnode_chkey_ctxt *ctxt)
238{
239 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
240 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
241 struct folio *ofolio;
242
243 if (oldkey == newkey)
244 return;
245
246 if (nbh == NULL) { /* blocksize == pagesize */
247 ofolio = obh->b_folio;
248 if (unlikely(oldkey != ofolio->index))
249 NILFS_FOLIO_BUG(ofolio,
250 "invalid oldkey %lld (newkey=%lld)",
251 (unsigned long long)oldkey,
252 (unsigned long long)newkey);
253 mark_buffer_dirty(obh);
254
255 xa_lock_irq(&btnc->i_pages);
256 __xa_erase(&btnc->i_pages, oldkey);
257 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
258 xa_unlock_irq(&btnc->i_pages);
259
260 ofolio->index = obh->b_blocknr = newkey;
261 folio_unlock(ofolio);
262 } else {
263 nilfs_copy_buffer(nbh, obh);
264 mark_buffer_dirty(nbh);
265
266 nbh->b_blocknr = newkey;
267 ctxt->bh = nbh;
268 nilfs_btnode_delete(obh); /* will decrement bh->b_count */
269 }
270}
271
272/**
273 * nilfs_btnode_abort_change_key
274 * abort the change_key operation prepared by prepare_change_key().
275 */
276void nilfs_btnode_abort_change_key(struct address_space *btnc,
277 struct nilfs_btnode_chkey_ctxt *ctxt)
278{
279 struct buffer_head *nbh = ctxt->newbh;
280 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
281
282 if (oldkey == newkey)
283 return;
284
285 if (nbh == NULL) { /* blocksize == pagesize */
286 xa_erase_irq(&btnc->i_pages, newkey);
287 folio_unlock(ctxt->bh->b_folio);
288 } else {
289 /*
290 * When canceling a buffer that a prepare operation has
291 * allocated to copy a node block to another location, use
292 * nilfs_btnode_delete() to initialize and release the buffer
293 * so that the buffer flags will not be in an inconsistent
294 * state when it is reallocated.
295 */
296 nilfs_btnode_delete(nbh);
297 }
298}
1/*
2 * btnode.c - NILFS B-tree node cache
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Originally written by Seiji Kihara.
17 * Fully revised by Ryusuke Konishi for stabilization and simplification.
18 *
19 */
20
21#include <linux/types.h>
22#include <linux/buffer_head.h>
23#include <linux/mm.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include "nilfs.h"
27#include "mdt.h"
28#include "dat.h"
29#include "page.h"
30#include "btnode.h"
31
32void nilfs_btnode_cache_clear(struct address_space *btnc)
33{
34 invalidate_mapping_pages(btnc, 0, -1);
35 truncate_inode_pages(btnc, 0);
36}
37
38struct buffer_head *
39nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
40{
41 struct inode *inode = NILFS_BTNC_I(btnc);
42 struct buffer_head *bh;
43
44 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
45 if (unlikely(!bh))
46 return NULL;
47
48 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
49 buffer_dirty(bh))) {
50 brelse(bh);
51 BUG();
52 }
53 memset(bh->b_data, 0, i_blocksize(inode));
54 bh->b_bdev = inode->i_sb->s_bdev;
55 bh->b_blocknr = blocknr;
56 set_buffer_mapped(bh);
57 set_buffer_uptodate(bh);
58
59 unlock_page(bh->b_page);
60 put_page(bh->b_page);
61 return bh;
62}
63
64int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
65 sector_t pblocknr, int mode, int mode_flags,
66 struct buffer_head **pbh, sector_t *submit_ptr)
67{
68 struct buffer_head *bh;
69 struct inode *inode = NILFS_BTNC_I(btnc);
70 struct page *page;
71 int err;
72
73 bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
74 if (unlikely(!bh))
75 return -ENOMEM;
76
77 err = -EEXIST; /* internal code */
78 page = bh->b_page;
79
80 if (buffer_uptodate(bh) || buffer_dirty(bh))
81 goto found;
82
83 if (pblocknr == 0) {
84 pblocknr = blocknr;
85 if (inode->i_ino != NILFS_DAT_INO) {
86 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
87
88 /* blocknr is a virtual block number */
89 err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
90 &pblocknr);
91 if (unlikely(err)) {
92 brelse(bh);
93 goto out_locked;
94 }
95 }
96 }
97
98 if (mode_flags & REQ_RAHEAD) {
99 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
100 err = -EBUSY; /* internal code */
101 brelse(bh);
102 goto out_locked;
103 }
104 } else { /* mode == READ */
105 lock_buffer(bh);
106 }
107 if (buffer_uptodate(bh)) {
108 unlock_buffer(bh);
109 err = -EEXIST; /* internal code */
110 goto found;
111 }
112 set_buffer_mapped(bh);
113 bh->b_bdev = inode->i_sb->s_bdev;
114 bh->b_blocknr = pblocknr; /* set block address for read */
115 bh->b_end_io = end_buffer_read_sync;
116 get_bh(bh);
117 submit_bh(mode, mode_flags, bh);
118 bh->b_blocknr = blocknr; /* set back to the given block address */
119 *submit_ptr = pblocknr;
120 err = 0;
121found:
122 *pbh = bh;
123
124out_locked:
125 unlock_page(page);
126 put_page(page);
127 return err;
128}
129
130/**
131 * nilfs_btnode_delete - delete B-tree node buffer
132 * @bh: buffer to be deleted
133 *
134 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
135 * including the buffer if the page gets unbusy.
136 */
137void nilfs_btnode_delete(struct buffer_head *bh)
138{
139 struct address_space *mapping;
140 struct page *page = bh->b_page;
141 pgoff_t index = page_index(page);
142 int still_dirty;
143
144 get_page(page);
145 lock_page(page);
146 wait_on_page_writeback(page);
147
148 nilfs_forget_buffer(bh);
149 still_dirty = PageDirty(page);
150 mapping = page->mapping;
151 unlock_page(page);
152 put_page(page);
153
154 if (!still_dirty && mapping)
155 invalidate_inode_pages2_range(mapping, index, index);
156}
157
158/**
159 * nilfs_btnode_prepare_change_key
160 * prepare to move contents of the block for old key to one of new key.
161 * the old buffer will not be removed, but might be reused for new buffer.
162 * it might return -ENOMEM because of memory allocation errors,
163 * and might return -EIO because of disk read errors.
164 */
165int nilfs_btnode_prepare_change_key(struct address_space *btnc,
166 struct nilfs_btnode_chkey_ctxt *ctxt)
167{
168 struct buffer_head *obh, *nbh;
169 struct inode *inode = NILFS_BTNC_I(btnc);
170 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
171 int err;
172
173 if (oldkey == newkey)
174 return 0;
175
176 obh = ctxt->bh;
177 ctxt->newbh = NULL;
178
179 if (inode->i_blkbits == PAGE_SHIFT) {
180 lock_page(obh->b_page);
181 /*
182 * We cannot call radix_tree_preload for the kernels older
183 * than 2.6.23, because it is not exported for modules.
184 */
185retry:
186 err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
187 if (err)
188 goto failed_unlock;
189 /* BUG_ON(oldkey != obh->b_page->index); */
190 if (unlikely(oldkey != obh->b_page->index))
191 NILFS_PAGE_BUG(obh->b_page,
192 "invalid oldkey %lld (newkey=%lld)",
193 (unsigned long long)oldkey,
194 (unsigned long long)newkey);
195
196 xa_lock_irq(&btnc->i_pages);
197 err = radix_tree_insert(&btnc->i_pages, newkey, obh->b_page);
198 xa_unlock_irq(&btnc->i_pages);
199 /*
200 * Note: page->index will not change to newkey until
201 * nilfs_btnode_commit_change_key() will be called.
202 * To protect the page in intermediate state, the page lock
203 * is held.
204 */
205 radix_tree_preload_end();
206 if (!err)
207 return 0;
208 else if (err != -EEXIST)
209 goto failed_unlock;
210
211 err = invalidate_inode_pages2_range(btnc, newkey, newkey);
212 if (!err)
213 goto retry;
214 /* fallback to copy mode */
215 unlock_page(obh->b_page);
216 }
217
218 nbh = nilfs_btnode_create_block(btnc, newkey);
219 if (!nbh)
220 return -ENOMEM;
221
222 BUG_ON(nbh == obh);
223 ctxt->newbh = nbh;
224 return 0;
225
226 failed_unlock:
227 unlock_page(obh->b_page);
228 return err;
229}
230
231/**
232 * nilfs_btnode_commit_change_key
233 * commit the change_key operation prepared by prepare_change_key().
234 */
235void nilfs_btnode_commit_change_key(struct address_space *btnc,
236 struct nilfs_btnode_chkey_ctxt *ctxt)
237{
238 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
239 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
240 struct page *opage;
241
242 if (oldkey == newkey)
243 return;
244
245 if (nbh == NULL) { /* blocksize == pagesize */
246 opage = obh->b_page;
247 if (unlikely(oldkey != opage->index))
248 NILFS_PAGE_BUG(opage,
249 "invalid oldkey %lld (newkey=%lld)",
250 (unsigned long long)oldkey,
251 (unsigned long long)newkey);
252 mark_buffer_dirty(obh);
253
254 xa_lock_irq(&btnc->i_pages);
255 radix_tree_delete(&btnc->i_pages, oldkey);
256 radix_tree_tag_set(&btnc->i_pages, newkey,
257 PAGECACHE_TAG_DIRTY);
258 xa_unlock_irq(&btnc->i_pages);
259
260 opage->index = obh->b_blocknr = newkey;
261 unlock_page(opage);
262 } else {
263 nilfs_copy_buffer(nbh, obh);
264 mark_buffer_dirty(nbh);
265
266 nbh->b_blocknr = newkey;
267 ctxt->bh = nbh;
268 nilfs_btnode_delete(obh); /* will decrement bh->b_count */
269 }
270}
271
272/**
273 * nilfs_btnode_abort_change_key
274 * abort the change_key operation prepared by prepare_change_key().
275 */
276void nilfs_btnode_abort_change_key(struct address_space *btnc,
277 struct nilfs_btnode_chkey_ctxt *ctxt)
278{
279 struct buffer_head *nbh = ctxt->newbh;
280 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
281
282 if (oldkey == newkey)
283 return;
284
285 if (nbh == NULL) { /* blocksize == pagesize */
286 xa_lock_irq(&btnc->i_pages);
287 radix_tree_delete(&btnc->i_pages, newkey);
288 xa_unlock_irq(&btnc->i_pages);
289 unlock_page(ctxt->bh->b_page);
290 } else
291 brelse(nbh);
292}