Loading...
1/*
2 * direct.c - NILFS direct block pointer.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/errno.h>
24#include "nilfs.h"
25#include "page.h"
26#include "direct.h"
27#include "alloc.h"
28#include "dat.h"
29
30static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
31{
32 return (__le64 *)
33 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
34}
35
36static inline __u64
37nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
38{
39 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
40}
41
42static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
43 __u64 key, __u64 ptr)
44{
45 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
46}
47
48static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
49 __u64 key, int level, __u64 *ptrp)
50{
51 __u64 ptr;
52
53 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
54 return -ENOENT;
55 ptr = nilfs_direct_get_ptr(direct, key);
56 if (ptr == NILFS_BMAP_INVALID_PTR)
57 return -ENOENT;
58
59 *ptrp = ptr;
60 return 0;
61}
62
63static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
64 __u64 key, __u64 *ptrp,
65 unsigned maxblocks)
66{
67 struct inode *dat = NULL;
68 __u64 ptr, ptr2;
69 sector_t blocknr;
70 int ret, cnt;
71
72 if (key > NILFS_DIRECT_KEY_MAX)
73 return -ENOENT;
74 ptr = nilfs_direct_get_ptr(direct, key);
75 if (ptr == NILFS_BMAP_INVALID_PTR)
76 return -ENOENT;
77
78 if (NILFS_BMAP_USE_VBN(direct)) {
79 dat = nilfs_bmap_get_dat(direct);
80 ret = nilfs_dat_translate(dat, ptr, &blocknr);
81 if (ret < 0)
82 return ret;
83 ptr = blocknr;
84 }
85
86 maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1);
87 for (cnt = 1; cnt < maxblocks &&
88 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
89 NILFS_BMAP_INVALID_PTR;
90 cnt++) {
91 if (dat) {
92 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
93 if (ret < 0)
94 return ret;
95 ptr2 = blocknr;
96 }
97 if (ptr2 != ptr + cnt)
98 break;
99 }
100 *ptrp = ptr;
101 return cnt;
102}
103
104static __u64
105nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
106{
107 __u64 ptr;
108
109 ptr = nilfs_bmap_find_target_seq(direct, key);
110 if (ptr != NILFS_BMAP_INVALID_PTR)
111 /* sequential access */
112 return ptr;
113 else
114 /* block group */
115 return nilfs_bmap_find_target_in_group(direct);
116}
117
118static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
119{
120 union nilfs_bmap_ptr_req req;
121 struct inode *dat = NULL;
122 struct buffer_head *bh;
123 int ret;
124
125 if (key > NILFS_DIRECT_KEY_MAX)
126 return -ENOENT;
127 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
128 return -EEXIST;
129
130 if (NILFS_BMAP_USE_VBN(bmap)) {
131 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
132 dat = nilfs_bmap_get_dat(bmap);
133 }
134 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
135 if (!ret) {
136 /* ptr must be a pointer to a buffer head. */
137 bh = (struct buffer_head *)((unsigned long)ptr);
138 set_buffer_nilfs_volatile(bh);
139
140 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
141 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
142
143 if (!nilfs_bmap_dirty(bmap))
144 nilfs_bmap_set_dirty(bmap);
145
146 if (NILFS_BMAP_USE_VBN(bmap))
147 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
148
149 nilfs_inode_add_blocks(bmap->b_inode, 1);
150 }
151 return ret;
152}
153
154static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
155{
156 union nilfs_bmap_ptr_req req;
157 struct inode *dat;
158 int ret;
159
160 if (key > NILFS_DIRECT_KEY_MAX ||
161 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
162 return -ENOENT;
163
164 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
165 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
166
167 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
168 if (!ret) {
169 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
170 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
171 nilfs_inode_sub_blocks(bmap->b_inode, 1);
172 }
173 return ret;
174}
175
176static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
177{
178 __u64 key, lastkey;
179
180 lastkey = NILFS_DIRECT_KEY_MAX + 1;
181 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
182 if (nilfs_direct_get_ptr(direct, key) !=
183 NILFS_BMAP_INVALID_PTR)
184 lastkey = key;
185
186 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
187 return -ENOENT;
188
189 *keyp = lastkey;
190
191 return 0;
192}
193
194static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
195{
196 return key > NILFS_DIRECT_KEY_MAX;
197}
198
199static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
200 __u64 *keys, __u64 *ptrs, int nitems)
201{
202 __u64 key;
203 __u64 ptr;
204 int n;
205
206 if (nitems > NILFS_DIRECT_NBLOCKS)
207 nitems = NILFS_DIRECT_NBLOCKS;
208 n = 0;
209 for (key = 0; key < nitems; key++) {
210 ptr = nilfs_direct_get_ptr(direct, key);
211 if (ptr != NILFS_BMAP_INVALID_PTR) {
212 keys[n] = key;
213 ptrs[n] = ptr;
214 n++;
215 }
216 }
217 return n;
218}
219
220int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
221 __u64 key, __u64 *keys, __u64 *ptrs, int n)
222{
223 __le64 *dptrs;
224 int ret, i, j;
225
226 /* no need to allocate any resource for conversion */
227
228 /* delete */
229 ret = bmap->b_ops->bop_delete(bmap, key);
230 if (ret < 0)
231 return ret;
232
233 /* free resources */
234 if (bmap->b_ops->bop_clear != NULL)
235 bmap->b_ops->bop_clear(bmap);
236
237 /* convert */
238 dptrs = nilfs_direct_dptrs(bmap);
239 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
240 if ((j < n) && (i == keys[j])) {
241 dptrs[i] = (i != key) ?
242 cpu_to_le64(ptrs[j]) :
243 NILFS_BMAP_INVALID_PTR;
244 j++;
245 } else
246 dptrs[i] = NILFS_BMAP_INVALID_PTR;
247 }
248
249 nilfs_direct_init(bmap);
250 return 0;
251}
252
253static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
254 struct buffer_head *bh)
255{
256 struct nilfs_palloc_req oldreq, newreq;
257 struct inode *dat;
258 __u64 key;
259 __u64 ptr;
260 int ret;
261
262 if (!NILFS_BMAP_USE_VBN(bmap))
263 return 0;
264
265 dat = nilfs_bmap_get_dat(bmap);
266 key = nilfs_bmap_data_get_key(bmap, bh);
267 ptr = nilfs_direct_get_ptr(bmap, key);
268 if (!buffer_nilfs_volatile(bh)) {
269 oldreq.pr_entry_nr = ptr;
270 newreq.pr_entry_nr = ptr;
271 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
272 if (ret < 0)
273 return ret;
274 nilfs_dat_commit_update(dat, &oldreq, &newreq,
275 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
276 set_buffer_nilfs_volatile(bh);
277 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
278 } else
279 ret = nilfs_dat_mark_dirty(dat, ptr);
280
281 return ret;
282}
283
284static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
285 __u64 key, __u64 ptr,
286 struct buffer_head **bh,
287 sector_t blocknr,
288 union nilfs_binfo *binfo)
289{
290 struct inode *dat = nilfs_bmap_get_dat(direct);
291 union nilfs_bmap_ptr_req req;
292 int ret;
293
294 req.bpr_ptr = ptr;
295 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
296 if (!ret) {
297 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
298 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
299 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
300 }
301 return ret;
302}
303
304static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
305 __u64 key, __u64 ptr,
306 struct buffer_head **bh,
307 sector_t blocknr,
308 union nilfs_binfo *binfo)
309{
310 nilfs_direct_set_ptr(direct, key, blocknr);
311
312 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
313 binfo->bi_dat.bi_level = 0;
314
315 return 0;
316}
317
318static int nilfs_direct_assign(struct nilfs_bmap *bmap,
319 struct buffer_head **bh,
320 sector_t blocknr,
321 union nilfs_binfo *binfo)
322{
323 __u64 key;
324 __u64 ptr;
325
326 key = nilfs_bmap_data_get_key(bmap, *bh);
327 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
328 printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
329 (unsigned long long)key);
330 return -EINVAL;
331 }
332 ptr = nilfs_direct_get_ptr(bmap, key);
333 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
334 printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
335 (unsigned long long)ptr);
336 return -EINVAL;
337 }
338
339 return NILFS_BMAP_USE_VBN(bmap) ?
340 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
341 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
342}
343
344static const struct nilfs_bmap_operations nilfs_direct_ops = {
345 .bop_lookup = nilfs_direct_lookup,
346 .bop_lookup_contig = nilfs_direct_lookup_contig,
347 .bop_insert = nilfs_direct_insert,
348 .bop_delete = nilfs_direct_delete,
349 .bop_clear = NULL,
350
351 .bop_propagate = nilfs_direct_propagate,
352
353 .bop_lookup_dirty_buffers = NULL,
354
355 .bop_assign = nilfs_direct_assign,
356 .bop_mark = NULL,
357
358 .bop_last_key = nilfs_direct_last_key,
359 .bop_check_insert = nilfs_direct_check_insert,
360 .bop_check_delete = NULL,
361 .bop_gather_data = nilfs_direct_gather_data,
362};
363
364
365int nilfs_direct_init(struct nilfs_bmap *bmap)
366{
367 bmap->b_ops = &nilfs_direct_ops;
368 return 0;
369}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS direct block pointer.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Koji Sato.
8 */
9
10#include <linux/errno.h>
11#include "nilfs.h"
12#include "page.h"
13#include "direct.h"
14#include "alloc.h"
15#include "dat.h"
16
17static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
18{
19 return (__le64 *)
20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
21}
22
23static inline __u64
24nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
25{
26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
27}
28
29static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
30 __u64 key, __u64 ptr)
31{
32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
33}
34
35static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 __u64 key, int level, __u64 *ptrp)
37{
38 __u64 ptr;
39
40 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
41 return -ENOENT;
42 ptr = nilfs_direct_get_ptr(direct, key);
43 if (ptr == NILFS_BMAP_INVALID_PTR)
44 return -ENOENT;
45
46 *ptrp = ptr;
47 return 0;
48}
49
50static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 __u64 key, __u64 *ptrp,
52 unsigned int maxblocks)
53{
54 struct inode *dat = NULL;
55 __u64 ptr, ptr2;
56 sector_t blocknr;
57 int ret, cnt;
58
59 if (key > NILFS_DIRECT_KEY_MAX)
60 return -ENOENT;
61 ptr = nilfs_direct_get_ptr(direct, key);
62 if (ptr == NILFS_BMAP_INVALID_PTR)
63 return -ENOENT;
64
65 if (NILFS_BMAP_USE_VBN(direct)) {
66 dat = nilfs_bmap_get_dat(direct);
67 ret = nilfs_dat_translate(dat, ptr, &blocknr);
68 if (ret < 0)
69 goto dat_error;
70 ptr = blocknr;
71 }
72
73 maxblocks = min_t(unsigned int, maxblocks,
74 NILFS_DIRECT_KEY_MAX - key + 1);
75 for (cnt = 1; cnt < maxblocks &&
76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 NILFS_BMAP_INVALID_PTR;
78 cnt++) {
79 if (dat) {
80 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
81 if (ret < 0)
82 goto dat_error;
83 ptr2 = blocknr;
84 }
85 if (ptr2 != ptr + cnt)
86 break;
87 }
88 *ptrp = ptr;
89 return cnt;
90
91 dat_error:
92 if (ret == -ENOENT)
93 ret = -EINVAL; /* Notify bmap layer of metadata corruption */
94 return ret;
95}
96
97static __u64
98nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
99{
100 __u64 ptr;
101
102 ptr = nilfs_bmap_find_target_seq(direct, key);
103 if (ptr != NILFS_BMAP_INVALID_PTR)
104 /* sequential access */
105 return ptr;
106
107 /* block group */
108 return nilfs_bmap_find_target_in_group(direct);
109}
110
111static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
112{
113 union nilfs_bmap_ptr_req req;
114 struct inode *dat = NULL;
115 struct buffer_head *bh;
116 int ret;
117
118 if (key > NILFS_DIRECT_KEY_MAX)
119 return -ENOENT;
120 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
121 return -EEXIST;
122
123 if (NILFS_BMAP_USE_VBN(bmap)) {
124 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
125 dat = nilfs_bmap_get_dat(bmap);
126 }
127 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
128 if (!ret) {
129 /* ptr must be a pointer to a buffer head. */
130 bh = (struct buffer_head *)((unsigned long)ptr);
131 set_buffer_nilfs_volatile(bh);
132
133 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
134 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
135
136 if (!nilfs_bmap_dirty(bmap))
137 nilfs_bmap_set_dirty(bmap);
138
139 if (NILFS_BMAP_USE_VBN(bmap))
140 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
141
142 nilfs_inode_add_blocks(bmap->b_inode, 1);
143 }
144 return ret;
145}
146
147static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
148{
149 union nilfs_bmap_ptr_req req;
150 struct inode *dat;
151 int ret;
152
153 if (key > NILFS_DIRECT_KEY_MAX ||
154 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
155 return -ENOENT;
156
157 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
158 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
159
160 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
161 if (!ret) {
162 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
163 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
164 nilfs_inode_sub_blocks(bmap->b_inode, 1);
165 }
166 return ret;
167}
168
169static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
170 __u64 *keyp)
171{
172 __u64 key;
173
174 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
175 if (nilfs_direct_get_ptr(direct, key) !=
176 NILFS_BMAP_INVALID_PTR) {
177 *keyp = key;
178 return 0;
179 }
180 }
181 return -ENOENT;
182}
183
184static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
185{
186 __u64 key, lastkey;
187
188 lastkey = NILFS_DIRECT_KEY_MAX + 1;
189 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
190 if (nilfs_direct_get_ptr(direct, key) !=
191 NILFS_BMAP_INVALID_PTR)
192 lastkey = key;
193
194 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
195 return -ENOENT;
196
197 *keyp = lastkey;
198
199 return 0;
200}
201
202static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
203{
204 return key > NILFS_DIRECT_KEY_MAX;
205}
206
207static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
208 __u64 *keys, __u64 *ptrs, int nitems)
209{
210 __u64 key;
211 __u64 ptr;
212 int n;
213
214 if (nitems > NILFS_DIRECT_NBLOCKS)
215 nitems = NILFS_DIRECT_NBLOCKS;
216 n = 0;
217 for (key = 0; key < nitems; key++) {
218 ptr = nilfs_direct_get_ptr(direct, key);
219 if (ptr != NILFS_BMAP_INVALID_PTR) {
220 keys[n] = key;
221 ptrs[n] = ptr;
222 n++;
223 }
224 }
225 return n;
226}
227
228int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
229 __u64 key, __u64 *keys, __u64 *ptrs, int n)
230{
231 __le64 *dptrs;
232 int ret, i, j;
233
234 /* no need to allocate any resource for conversion */
235
236 /* delete */
237 ret = bmap->b_ops->bop_delete(bmap, key);
238 if (ret < 0)
239 return ret;
240
241 /* free resources */
242 if (bmap->b_ops->bop_clear != NULL)
243 bmap->b_ops->bop_clear(bmap);
244
245 /* convert */
246 dptrs = nilfs_direct_dptrs(bmap);
247 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
248 if ((j < n) && (i == keys[j])) {
249 dptrs[i] = (i != key) ?
250 cpu_to_le64(ptrs[j]) :
251 NILFS_BMAP_INVALID_PTR;
252 j++;
253 } else
254 dptrs[i] = NILFS_BMAP_INVALID_PTR;
255 }
256
257 nilfs_direct_init(bmap);
258 return 0;
259}
260
261static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
262 struct buffer_head *bh)
263{
264 struct nilfs_palloc_req oldreq, newreq;
265 struct inode *dat;
266 __u64 key;
267 __u64 ptr;
268 int ret;
269
270 if (!NILFS_BMAP_USE_VBN(bmap))
271 return 0;
272
273 dat = nilfs_bmap_get_dat(bmap);
274 key = nilfs_bmap_data_get_key(bmap, bh);
275 ptr = nilfs_direct_get_ptr(bmap, key);
276 if (!buffer_nilfs_volatile(bh)) {
277 oldreq.pr_entry_nr = ptr;
278 newreq.pr_entry_nr = ptr;
279 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
280 if (ret < 0)
281 return ret;
282 nilfs_dat_commit_update(dat, &oldreq, &newreq,
283 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
284 set_buffer_nilfs_volatile(bh);
285 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
286 } else
287 ret = nilfs_dat_mark_dirty(dat, ptr);
288
289 return ret;
290}
291
292static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
293 __u64 key, __u64 ptr,
294 struct buffer_head **bh,
295 sector_t blocknr,
296 union nilfs_binfo *binfo)
297{
298 struct inode *dat = nilfs_bmap_get_dat(direct);
299 union nilfs_bmap_ptr_req req;
300 int ret;
301
302 req.bpr_ptr = ptr;
303 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
304 if (!ret) {
305 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
306 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
307 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
308 }
309 return ret;
310}
311
312static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
313 __u64 key, __u64 ptr,
314 struct buffer_head **bh,
315 sector_t blocknr,
316 union nilfs_binfo *binfo)
317{
318 nilfs_direct_set_ptr(direct, key, blocknr);
319
320 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
321 binfo->bi_dat.bi_level = 0;
322 memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
323
324 return 0;
325}
326
327static int nilfs_direct_assign(struct nilfs_bmap *bmap,
328 struct buffer_head **bh,
329 sector_t blocknr,
330 union nilfs_binfo *binfo)
331{
332 __u64 key;
333 __u64 ptr;
334
335 key = nilfs_bmap_data_get_key(bmap, *bh);
336 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
337 nilfs_crit(bmap->b_inode->i_sb,
338 "%s (ino=%lu): invalid key: %llu",
339 __func__,
340 bmap->b_inode->i_ino, (unsigned long long)key);
341 return -EINVAL;
342 }
343 ptr = nilfs_direct_get_ptr(bmap, key);
344 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
345 nilfs_crit(bmap->b_inode->i_sb,
346 "%s (ino=%lu): invalid pointer: %llu",
347 __func__,
348 bmap->b_inode->i_ino, (unsigned long long)ptr);
349 return -EINVAL;
350 }
351
352 return NILFS_BMAP_USE_VBN(bmap) ?
353 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
354 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
355}
356
357static const struct nilfs_bmap_operations nilfs_direct_ops = {
358 .bop_lookup = nilfs_direct_lookup,
359 .bop_lookup_contig = nilfs_direct_lookup_contig,
360 .bop_insert = nilfs_direct_insert,
361 .bop_delete = nilfs_direct_delete,
362 .bop_clear = NULL,
363
364 .bop_propagate = nilfs_direct_propagate,
365
366 .bop_lookup_dirty_buffers = NULL,
367
368 .bop_assign = nilfs_direct_assign,
369 .bop_mark = NULL,
370
371 .bop_seek_key = nilfs_direct_seek_key,
372 .bop_last_key = nilfs_direct_last_key,
373
374 .bop_check_insert = nilfs_direct_check_insert,
375 .bop_check_delete = NULL,
376 .bop_gather_data = nilfs_direct_gather_data,
377};
378
379
380int nilfs_direct_init(struct nilfs_bmap *bmap)
381{
382 bmap->b_ops = &nilfs_direct_ops;
383 return 0;
384}