Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS direct block pointer.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Koji Sato.
8 */
9
10#include <linux/errno.h>
11#include "nilfs.h"
12#include "page.h"
13#include "direct.h"
14#include "alloc.h"
15#include "dat.h"
16
17static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
18{
19 return (__le64 *)
20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
21}
22
23static inline __u64
24nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
25{
26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
27}
28
29static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
30 __u64 key, __u64 ptr)
31{
32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
33}
34
35static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 __u64 key, int level, __u64 *ptrp)
37{
38 __u64 ptr;
39
40 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
41 return -ENOENT;
42 ptr = nilfs_direct_get_ptr(direct, key);
43 if (ptr == NILFS_BMAP_INVALID_PTR)
44 return -ENOENT;
45
46 *ptrp = ptr;
47 return 0;
48}
49
50static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 __u64 key, __u64 *ptrp,
52 unsigned int maxblocks)
53{
54 struct inode *dat = NULL;
55 __u64 ptr, ptr2;
56 sector_t blocknr;
57 int ret, cnt;
58
59 if (key > NILFS_DIRECT_KEY_MAX)
60 return -ENOENT;
61 ptr = nilfs_direct_get_ptr(direct, key);
62 if (ptr == NILFS_BMAP_INVALID_PTR)
63 return -ENOENT;
64
65 if (NILFS_BMAP_USE_VBN(direct)) {
66 dat = nilfs_bmap_get_dat(direct);
67 ret = nilfs_dat_translate(dat, ptr, &blocknr);
68 if (ret < 0)
69 return ret;
70 ptr = blocknr;
71 }
72
73 maxblocks = min_t(unsigned int, maxblocks,
74 NILFS_DIRECT_KEY_MAX - key + 1);
75 for (cnt = 1; cnt < maxblocks &&
76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 NILFS_BMAP_INVALID_PTR;
78 cnt++) {
79 if (dat) {
80 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
81 if (ret < 0)
82 return ret;
83 ptr2 = blocknr;
84 }
85 if (ptr2 != ptr + cnt)
86 break;
87 }
88 *ptrp = ptr;
89 return cnt;
90}
91
92static __u64
93nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
94{
95 __u64 ptr;
96
97 ptr = nilfs_bmap_find_target_seq(direct, key);
98 if (ptr != NILFS_BMAP_INVALID_PTR)
99 /* sequential access */
100 return ptr;
101
102 /* block group */
103 return nilfs_bmap_find_target_in_group(direct);
104}
105
106static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
107{
108 union nilfs_bmap_ptr_req req;
109 struct inode *dat = NULL;
110 struct buffer_head *bh;
111 int ret;
112
113 if (key > NILFS_DIRECT_KEY_MAX)
114 return -ENOENT;
115 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
116 return -EEXIST;
117
118 if (NILFS_BMAP_USE_VBN(bmap)) {
119 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
120 dat = nilfs_bmap_get_dat(bmap);
121 }
122 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
123 if (!ret) {
124 /* ptr must be a pointer to a buffer head. */
125 bh = (struct buffer_head *)((unsigned long)ptr);
126 set_buffer_nilfs_volatile(bh);
127
128 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
129 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
130
131 if (!nilfs_bmap_dirty(bmap))
132 nilfs_bmap_set_dirty(bmap);
133
134 if (NILFS_BMAP_USE_VBN(bmap))
135 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
136
137 nilfs_inode_add_blocks(bmap->b_inode, 1);
138 }
139 return ret;
140}
141
142static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
143{
144 union nilfs_bmap_ptr_req req;
145 struct inode *dat;
146 int ret;
147
148 if (key > NILFS_DIRECT_KEY_MAX ||
149 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
150 return -ENOENT;
151
152 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
153 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
154
155 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
156 if (!ret) {
157 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
158 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
159 nilfs_inode_sub_blocks(bmap->b_inode, 1);
160 }
161 return ret;
162}
163
164static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
165 __u64 *keyp)
166{
167 __u64 key;
168
169 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
170 if (nilfs_direct_get_ptr(direct, key) !=
171 NILFS_BMAP_INVALID_PTR) {
172 *keyp = key;
173 return 0;
174 }
175 }
176 return -ENOENT;
177}
178
179static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
180{
181 __u64 key, lastkey;
182
183 lastkey = NILFS_DIRECT_KEY_MAX + 1;
184 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
185 if (nilfs_direct_get_ptr(direct, key) !=
186 NILFS_BMAP_INVALID_PTR)
187 lastkey = key;
188
189 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
190 return -ENOENT;
191
192 *keyp = lastkey;
193
194 return 0;
195}
196
197static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
198{
199 return key > NILFS_DIRECT_KEY_MAX;
200}
201
202static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
203 __u64 *keys, __u64 *ptrs, int nitems)
204{
205 __u64 key;
206 __u64 ptr;
207 int n;
208
209 if (nitems > NILFS_DIRECT_NBLOCKS)
210 nitems = NILFS_DIRECT_NBLOCKS;
211 n = 0;
212 for (key = 0; key < nitems; key++) {
213 ptr = nilfs_direct_get_ptr(direct, key);
214 if (ptr != NILFS_BMAP_INVALID_PTR) {
215 keys[n] = key;
216 ptrs[n] = ptr;
217 n++;
218 }
219 }
220 return n;
221}
222
223int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
224 __u64 key, __u64 *keys, __u64 *ptrs, int n)
225{
226 __le64 *dptrs;
227 int ret, i, j;
228
229 /* no need to allocate any resource for conversion */
230
231 /* delete */
232 ret = bmap->b_ops->bop_delete(bmap, key);
233 if (ret < 0)
234 return ret;
235
236 /* free resources */
237 if (bmap->b_ops->bop_clear != NULL)
238 bmap->b_ops->bop_clear(bmap);
239
240 /* convert */
241 dptrs = nilfs_direct_dptrs(bmap);
242 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
243 if ((j < n) && (i == keys[j])) {
244 dptrs[i] = (i != key) ?
245 cpu_to_le64(ptrs[j]) :
246 NILFS_BMAP_INVALID_PTR;
247 j++;
248 } else
249 dptrs[i] = NILFS_BMAP_INVALID_PTR;
250 }
251
252 nilfs_direct_init(bmap);
253 return 0;
254}
255
256static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
257 struct buffer_head *bh)
258{
259 struct nilfs_palloc_req oldreq, newreq;
260 struct inode *dat;
261 __u64 key;
262 __u64 ptr;
263 int ret;
264
265 if (!NILFS_BMAP_USE_VBN(bmap))
266 return 0;
267
268 dat = nilfs_bmap_get_dat(bmap);
269 key = nilfs_bmap_data_get_key(bmap, bh);
270 ptr = nilfs_direct_get_ptr(bmap, key);
271 if (!buffer_nilfs_volatile(bh)) {
272 oldreq.pr_entry_nr = ptr;
273 newreq.pr_entry_nr = ptr;
274 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
275 if (ret < 0)
276 return ret;
277 nilfs_dat_commit_update(dat, &oldreq, &newreq,
278 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
279 set_buffer_nilfs_volatile(bh);
280 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
281 } else
282 ret = nilfs_dat_mark_dirty(dat, ptr);
283
284 return ret;
285}
286
287static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
288 __u64 key, __u64 ptr,
289 struct buffer_head **bh,
290 sector_t blocknr,
291 union nilfs_binfo *binfo)
292{
293 struct inode *dat = nilfs_bmap_get_dat(direct);
294 union nilfs_bmap_ptr_req req;
295 int ret;
296
297 req.bpr_ptr = ptr;
298 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
299 if (!ret) {
300 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
301 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
302 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
303 }
304 return ret;
305}
306
307static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
308 __u64 key, __u64 ptr,
309 struct buffer_head **bh,
310 sector_t blocknr,
311 union nilfs_binfo *binfo)
312{
313 nilfs_direct_set_ptr(direct, key, blocknr);
314
315 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
316 binfo->bi_dat.bi_level = 0;
317 memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
318
319 return 0;
320}
321
322static int nilfs_direct_assign(struct nilfs_bmap *bmap,
323 struct buffer_head **bh,
324 sector_t blocknr,
325 union nilfs_binfo *binfo)
326{
327 __u64 key;
328 __u64 ptr;
329
330 key = nilfs_bmap_data_get_key(bmap, *bh);
331 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
332 nilfs_crit(bmap->b_inode->i_sb,
333 "%s (ino=%lu): invalid key: %llu",
334 __func__,
335 bmap->b_inode->i_ino, (unsigned long long)key);
336 return -EINVAL;
337 }
338 ptr = nilfs_direct_get_ptr(bmap, key);
339 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
340 nilfs_crit(bmap->b_inode->i_sb,
341 "%s (ino=%lu): invalid pointer: %llu",
342 __func__,
343 bmap->b_inode->i_ino, (unsigned long long)ptr);
344 return -EINVAL;
345 }
346
347 return NILFS_BMAP_USE_VBN(bmap) ?
348 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
349 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
350}
351
352static const struct nilfs_bmap_operations nilfs_direct_ops = {
353 .bop_lookup = nilfs_direct_lookup,
354 .bop_lookup_contig = nilfs_direct_lookup_contig,
355 .bop_insert = nilfs_direct_insert,
356 .bop_delete = nilfs_direct_delete,
357 .bop_clear = NULL,
358
359 .bop_propagate = nilfs_direct_propagate,
360
361 .bop_lookup_dirty_buffers = NULL,
362
363 .bop_assign = nilfs_direct_assign,
364 .bop_mark = NULL,
365
366 .bop_seek_key = nilfs_direct_seek_key,
367 .bop_last_key = nilfs_direct_last_key,
368
369 .bop_check_insert = nilfs_direct_check_insert,
370 .bop_check_delete = NULL,
371 .bop_gather_data = nilfs_direct_gather_data,
372};
373
374
375int nilfs_direct_init(struct nilfs_bmap *bmap)
376{
377 bmap->b_ops = &nilfs_direct_ops;
378 return 0;
379}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS direct block pointer.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Koji Sato.
8 */
9
10#include <linux/errno.h>
11#include "nilfs.h"
12#include "page.h"
13#include "direct.h"
14#include "alloc.h"
15#include "dat.h"
16
17static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
18{
19 return (__le64 *)
20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
21}
22
23static inline __u64
24nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
25{
26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
27}
28
29static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
30 __u64 key, __u64 ptr)
31{
32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
33}
34
35static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 __u64 key, int level, __u64 *ptrp)
37{
38 __u64 ptr;
39
40 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
41 return -ENOENT;
42 ptr = nilfs_direct_get_ptr(direct, key);
43 if (ptr == NILFS_BMAP_INVALID_PTR)
44 return -ENOENT;
45
46 *ptrp = ptr;
47 return 0;
48}
49
50static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 __u64 key, __u64 *ptrp,
52 unsigned int maxblocks)
53{
54 struct inode *dat = NULL;
55 __u64 ptr, ptr2;
56 sector_t blocknr;
57 int ret, cnt;
58
59 if (key > NILFS_DIRECT_KEY_MAX)
60 return -ENOENT;
61 ptr = nilfs_direct_get_ptr(direct, key);
62 if (ptr == NILFS_BMAP_INVALID_PTR)
63 return -ENOENT;
64
65 if (NILFS_BMAP_USE_VBN(direct)) {
66 dat = nilfs_bmap_get_dat(direct);
67 ret = nilfs_dat_translate(dat, ptr, &blocknr);
68 if (ret < 0)
69 goto dat_error;
70 ptr = blocknr;
71 }
72
73 maxblocks = min_t(unsigned int, maxblocks,
74 NILFS_DIRECT_KEY_MAX - key + 1);
75 for (cnt = 1; cnt < maxblocks &&
76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 NILFS_BMAP_INVALID_PTR;
78 cnt++) {
79 if (dat) {
80 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
81 if (ret < 0)
82 goto dat_error;
83 ptr2 = blocknr;
84 }
85 if (ptr2 != ptr + cnt)
86 break;
87 }
88 *ptrp = ptr;
89 return cnt;
90
91 dat_error:
92 if (ret == -ENOENT)
93 ret = -EINVAL; /* Notify bmap layer of metadata corruption */
94 return ret;
95}
96
97static __u64
98nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
99{
100 __u64 ptr;
101
102 ptr = nilfs_bmap_find_target_seq(direct, key);
103 if (ptr != NILFS_BMAP_INVALID_PTR)
104 /* sequential access */
105 return ptr;
106
107 /* block group */
108 return nilfs_bmap_find_target_in_group(direct);
109}
110
111static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
112{
113 union nilfs_bmap_ptr_req req;
114 struct inode *dat = NULL;
115 struct buffer_head *bh;
116 int ret;
117
118 if (key > NILFS_DIRECT_KEY_MAX)
119 return -ENOENT;
120 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
121 return -EEXIST;
122
123 if (NILFS_BMAP_USE_VBN(bmap)) {
124 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
125 dat = nilfs_bmap_get_dat(bmap);
126 }
127 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
128 if (!ret) {
129 /* ptr must be a pointer to a buffer head. */
130 bh = (struct buffer_head *)((unsigned long)ptr);
131 set_buffer_nilfs_volatile(bh);
132
133 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
134 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
135
136 if (!nilfs_bmap_dirty(bmap))
137 nilfs_bmap_set_dirty(bmap);
138
139 if (NILFS_BMAP_USE_VBN(bmap))
140 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
141
142 nilfs_inode_add_blocks(bmap->b_inode, 1);
143 }
144 return ret;
145}
146
147static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
148{
149 union nilfs_bmap_ptr_req req;
150 struct inode *dat;
151 int ret;
152
153 if (key > NILFS_DIRECT_KEY_MAX ||
154 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
155 return -ENOENT;
156
157 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
158 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
159
160 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
161 if (!ret) {
162 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
163 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
164 nilfs_inode_sub_blocks(bmap->b_inode, 1);
165 }
166 return ret;
167}
168
169static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
170 __u64 *keyp)
171{
172 __u64 key;
173
174 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
175 if (nilfs_direct_get_ptr(direct, key) !=
176 NILFS_BMAP_INVALID_PTR) {
177 *keyp = key;
178 return 0;
179 }
180 }
181 return -ENOENT;
182}
183
184static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
185{
186 __u64 key, lastkey;
187
188 lastkey = NILFS_DIRECT_KEY_MAX + 1;
189 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
190 if (nilfs_direct_get_ptr(direct, key) !=
191 NILFS_BMAP_INVALID_PTR)
192 lastkey = key;
193
194 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
195 return -ENOENT;
196
197 *keyp = lastkey;
198
199 return 0;
200}
201
202static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
203{
204 return key > NILFS_DIRECT_KEY_MAX;
205}
206
207static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
208 __u64 *keys, __u64 *ptrs, int nitems)
209{
210 __u64 key;
211 __u64 ptr;
212 int n;
213
214 if (nitems > NILFS_DIRECT_NBLOCKS)
215 nitems = NILFS_DIRECT_NBLOCKS;
216 n = 0;
217 for (key = 0; key < nitems; key++) {
218 ptr = nilfs_direct_get_ptr(direct, key);
219 if (ptr != NILFS_BMAP_INVALID_PTR) {
220 keys[n] = key;
221 ptrs[n] = ptr;
222 n++;
223 }
224 }
225 return n;
226}
227
228int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
229 __u64 key, __u64 *keys, __u64 *ptrs, int n)
230{
231 __le64 *dptrs;
232 int ret, i, j;
233
234 /* no need to allocate any resource for conversion */
235
236 /* delete */
237 ret = bmap->b_ops->bop_delete(bmap, key);
238 if (ret < 0)
239 return ret;
240
241 /* free resources */
242 if (bmap->b_ops->bop_clear != NULL)
243 bmap->b_ops->bop_clear(bmap);
244
245 /* convert */
246 dptrs = nilfs_direct_dptrs(bmap);
247 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
248 if ((j < n) && (i == keys[j])) {
249 dptrs[i] = (i != key) ?
250 cpu_to_le64(ptrs[j]) :
251 NILFS_BMAP_INVALID_PTR;
252 j++;
253 } else
254 dptrs[i] = NILFS_BMAP_INVALID_PTR;
255 }
256
257 nilfs_direct_init(bmap);
258 return 0;
259}
260
261static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
262 struct buffer_head *bh)
263{
264 struct nilfs_palloc_req oldreq, newreq;
265 struct inode *dat;
266 __u64 key;
267 __u64 ptr;
268 int ret;
269
270 if (!NILFS_BMAP_USE_VBN(bmap))
271 return 0;
272
273 dat = nilfs_bmap_get_dat(bmap);
274 key = nilfs_bmap_data_get_key(bmap, bh);
275 ptr = nilfs_direct_get_ptr(bmap, key);
276 if (!buffer_nilfs_volatile(bh)) {
277 oldreq.pr_entry_nr = ptr;
278 newreq.pr_entry_nr = ptr;
279 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
280 if (ret < 0)
281 return ret;
282 nilfs_dat_commit_update(dat, &oldreq, &newreq,
283 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
284 set_buffer_nilfs_volatile(bh);
285 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
286 } else
287 ret = nilfs_dat_mark_dirty(dat, ptr);
288
289 return ret;
290}
291
292static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
293 __u64 key, __u64 ptr,
294 struct buffer_head **bh,
295 sector_t blocknr,
296 union nilfs_binfo *binfo)
297{
298 struct inode *dat = nilfs_bmap_get_dat(direct);
299 union nilfs_bmap_ptr_req req;
300 int ret;
301
302 req.bpr_ptr = ptr;
303 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
304 if (!ret) {
305 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
306 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
307 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
308 }
309 return ret;
310}
311
312static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
313 __u64 key, __u64 ptr,
314 struct buffer_head **bh,
315 sector_t blocknr,
316 union nilfs_binfo *binfo)
317{
318 nilfs_direct_set_ptr(direct, key, blocknr);
319
320 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
321 binfo->bi_dat.bi_level = 0;
322 memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
323
324 return 0;
325}
326
327static int nilfs_direct_assign(struct nilfs_bmap *bmap,
328 struct buffer_head **bh,
329 sector_t blocknr,
330 union nilfs_binfo *binfo)
331{
332 __u64 key;
333 __u64 ptr;
334
335 key = nilfs_bmap_data_get_key(bmap, *bh);
336 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
337 nilfs_crit(bmap->b_inode->i_sb,
338 "%s (ino=%lu): invalid key: %llu",
339 __func__,
340 bmap->b_inode->i_ino, (unsigned long long)key);
341 return -EINVAL;
342 }
343 ptr = nilfs_direct_get_ptr(bmap, key);
344 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
345 nilfs_crit(bmap->b_inode->i_sb,
346 "%s (ino=%lu): invalid pointer: %llu",
347 __func__,
348 bmap->b_inode->i_ino, (unsigned long long)ptr);
349 return -EINVAL;
350 }
351
352 return NILFS_BMAP_USE_VBN(bmap) ?
353 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
354 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
355}
356
357static const struct nilfs_bmap_operations nilfs_direct_ops = {
358 .bop_lookup = nilfs_direct_lookup,
359 .bop_lookup_contig = nilfs_direct_lookup_contig,
360 .bop_insert = nilfs_direct_insert,
361 .bop_delete = nilfs_direct_delete,
362 .bop_clear = NULL,
363
364 .bop_propagate = nilfs_direct_propagate,
365
366 .bop_lookup_dirty_buffers = NULL,
367
368 .bop_assign = nilfs_direct_assign,
369 .bop_mark = NULL,
370
371 .bop_seek_key = nilfs_direct_seek_key,
372 .bop_last_key = nilfs_direct_last_key,
373
374 .bop_check_insert = nilfs_direct_check_insert,
375 .bop_check_delete = NULL,
376 .bop_gather_data = nilfs_direct_gather_data,
377};
378
379
380int nilfs_direct_init(struct nilfs_bmap *bmap)
381{
382 bmap->b_ops = &nilfs_direct_ops;
383 return 0;
384}