Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * NILFS direct block pointer.
  4 *
  5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  6 *
  7 * Written by Koji Sato.
  8 */
  9
 10#include <linux/errno.h>
 11#include "nilfs.h"
 12#include "page.h"
 13#include "direct.h"
 14#include "alloc.h"
 15#include "dat.h"
 16
 17static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
 18{
 19	return (__le64 *)
 20		((struct nilfs_direct_node *)direct->b_u.u_data + 1);
 21}
 22
 23static inline __u64
 24nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
 25{
 26	return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
 27}
 28
 29static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
 30					__u64 key, __u64 ptr)
 31{
 32	*(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
 33}
 34
 35static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
 36			       __u64 key, int level, __u64 *ptrp)
 37{
 38	__u64 ptr;
 39
 40	if (key > NILFS_DIRECT_KEY_MAX || level != 1)
 41		return -ENOENT;
 42	ptr = nilfs_direct_get_ptr(direct, key);
 43	if (ptr == NILFS_BMAP_INVALID_PTR)
 44		return -ENOENT;
 45
 46	*ptrp = ptr;
 47	return 0;
 48}
 49
 50static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
 51				      __u64 key, __u64 *ptrp,
 52				      unsigned int maxblocks)
 53{
 54	struct inode *dat = NULL;
 55	__u64 ptr, ptr2;
 56	sector_t blocknr;
 57	int ret, cnt;
 58
 59	if (key > NILFS_DIRECT_KEY_MAX)
 60		return -ENOENT;
 61	ptr = nilfs_direct_get_ptr(direct, key);
 62	if (ptr == NILFS_BMAP_INVALID_PTR)
 63		return -ENOENT;
 64
 65	if (NILFS_BMAP_USE_VBN(direct)) {
 66		dat = nilfs_bmap_get_dat(direct);
 67		ret = nilfs_dat_translate(dat, ptr, &blocknr);
 68		if (ret < 0)
 69			return ret;
 70		ptr = blocknr;
 71	}
 72
 73	maxblocks = min_t(unsigned int, maxblocks,
 74			  NILFS_DIRECT_KEY_MAX - key + 1);
 75	for (cnt = 1; cnt < maxblocks &&
 76		     (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
 77		     NILFS_BMAP_INVALID_PTR;
 78	     cnt++) {
 79		if (dat) {
 80			ret = nilfs_dat_translate(dat, ptr2, &blocknr);
 81			if (ret < 0)
 82				return ret;
 83			ptr2 = blocknr;
 84		}
 85		if (ptr2 != ptr + cnt)
 86			break;
 87	}
 88	*ptrp = ptr;
 89	return cnt;
 90}
 91
 92static __u64
 93nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
 94{
 95	__u64 ptr;
 96
 97	ptr = nilfs_bmap_find_target_seq(direct, key);
 98	if (ptr != NILFS_BMAP_INVALID_PTR)
 99		/* sequential access */
100		return ptr;
101
102	/* block group */
103	return nilfs_bmap_find_target_in_group(direct);
104}
105
106static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
107{
108	union nilfs_bmap_ptr_req req;
109	struct inode *dat = NULL;
110	struct buffer_head *bh;
111	int ret;
112
113	if (key > NILFS_DIRECT_KEY_MAX)
114		return -ENOENT;
115	if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
116		return -EEXIST;
117
118	if (NILFS_BMAP_USE_VBN(bmap)) {
119		req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
120		dat = nilfs_bmap_get_dat(bmap);
121	}
122	ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
123	if (!ret) {
124		/* ptr must be a pointer to a buffer head. */
125		bh = (struct buffer_head *)((unsigned long)ptr);
126		set_buffer_nilfs_volatile(bh);
127
128		nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
129		nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
130
131		if (!nilfs_bmap_dirty(bmap))
132			nilfs_bmap_set_dirty(bmap);
133
134		if (NILFS_BMAP_USE_VBN(bmap))
135			nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
136
137		nilfs_inode_add_blocks(bmap->b_inode, 1);
138	}
139	return ret;
140}
141
142static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
143{
144	union nilfs_bmap_ptr_req req;
145	struct inode *dat;
146	int ret;
147
148	if (key > NILFS_DIRECT_KEY_MAX ||
149	    nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
150		return -ENOENT;
151
152	dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
153	req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
154
155	ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
156	if (!ret) {
157		nilfs_bmap_commit_end_ptr(bmap, &req, dat);
158		nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
159		nilfs_inode_sub_blocks(bmap->b_inode, 1);
160	}
161	return ret;
162}
163
164static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
165				 __u64 *keyp)
166{
167	__u64 key;
168
169	for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
170		if (nilfs_direct_get_ptr(direct, key) !=
171		    NILFS_BMAP_INVALID_PTR) {
172			*keyp = key;
173			return 0;
174		}
175	}
176	return -ENOENT;
177}
178
179static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
180{
181	__u64 key, lastkey;
182
183	lastkey = NILFS_DIRECT_KEY_MAX + 1;
184	for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
185		if (nilfs_direct_get_ptr(direct, key) !=
186		    NILFS_BMAP_INVALID_PTR)
187			lastkey = key;
188
189	if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
190		return -ENOENT;
191
192	*keyp = lastkey;
193
194	return 0;
195}
196
197static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
198{
199	return key > NILFS_DIRECT_KEY_MAX;
200}
201
202static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
203				    __u64 *keys, __u64 *ptrs, int nitems)
204{
205	__u64 key;
206	__u64 ptr;
207	int n;
208
209	if (nitems > NILFS_DIRECT_NBLOCKS)
210		nitems = NILFS_DIRECT_NBLOCKS;
211	n = 0;
212	for (key = 0; key < nitems; key++) {
213		ptr = nilfs_direct_get_ptr(direct, key);
214		if (ptr != NILFS_BMAP_INVALID_PTR) {
215			keys[n] = key;
216			ptrs[n] = ptr;
217			n++;
218		}
219	}
220	return n;
221}
222
223int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
224				    __u64 key, __u64 *keys, __u64 *ptrs, int n)
225{
226	__le64 *dptrs;
227	int ret, i, j;
228
229	/* no need to allocate any resource for conversion */
230
231	/* delete */
232	ret = bmap->b_ops->bop_delete(bmap, key);
233	if (ret < 0)
234		return ret;
235
236	/* free resources */
237	if (bmap->b_ops->bop_clear != NULL)
238		bmap->b_ops->bop_clear(bmap);
239
240	/* convert */
241	dptrs = nilfs_direct_dptrs(bmap);
242	for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
243		if ((j < n) && (i == keys[j])) {
244			dptrs[i] = (i != key) ?
245				cpu_to_le64(ptrs[j]) :
246				NILFS_BMAP_INVALID_PTR;
247			j++;
248		} else
249			dptrs[i] = NILFS_BMAP_INVALID_PTR;
250	}
251
252	nilfs_direct_init(bmap);
253	return 0;
254}
255
256static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
257				  struct buffer_head *bh)
258{
259	struct nilfs_palloc_req oldreq, newreq;
260	struct inode *dat;
261	__u64 key;
262	__u64 ptr;
263	int ret;
264
265	if (!NILFS_BMAP_USE_VBN(bmap))
266		return 0;
267
268	dat = nilfs_bmap_get_dat(bmap);
269	key = nilfs_bmap_data_get_key(bmap, bh);
270	ptr = nilfs_direct_get_ptr(bmap, key);
271	if (!buffer_nilfs_volatile(bh)) {
272		oldreq.pr_entry_nr = ptr;
273		newreq.pr_entry_nr = ptr;
274		ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
275		if (ret < 0)
276			return ret;
277		nilfs_dat_commit_update(dat, &oldreq, &newreq,
278					bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
279		set_buffer_nilfs_volatile(bh);
280		nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
281	} else
282		ret = nilfs_dat_mark_dirty(dat, ptr);
283
284	return ret;
285}
286
287static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
288				 __u64 key, __u64 ptr,
289				 struct buffer_head **bh,
290				 sector_t blocknr,
291				 union nilfs_binfo *binfo)
292{
293	struct inode *dat = nilfs_bmap_get_dat(direct);
294	union nilfs_bmap_ptr_req req;
295	int ret;
296
297	req.bpr_ptr = ptr;
298	ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
299	if (!ret) {
300		nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
301		binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
302		binfo->bi_v.bi_blkoff = cpu_to_le64(key);
303	}
304	return ret;
305}
306
307static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
308				 __u64 key, __u64 ptr,
309				 struct buffer_head **bh,
310				 sector_t blocknr,
311				 union nilfs_binfo *binfo)
312{
313	nilfs_direct_set_ptr(direct, key, blocknr);
314
315	binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
316	binfo->bi_dat.bi_level = 0;
317	memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
318
319	return 0;
320}
321
322static int nilfs_direct_assign(struct nilfs_bmap *bmap,
323			       struct buffer_head **bh,
324			       sector_t blocknr,
325			       union nilfs_binfo *binfo)
326{
327	__u64 key;
328	__u64 ptr;
329
330	key = nilfs_bmap_data_get_key(bmap, *bh);
331	if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
332		nilfs_crit(bmap->b_inode->i_sb,
333			   "%s (ino=%lu): invalid key: %llu",
334			   __func__,
335			   bmap->b_inode->i_ino, (unsigned long long)key);
336		return -EINVAL;
337	}
338	ptr = nilfs_direct_get_ptr(bmap, key);
339	if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
340		nilfs_crit(bmap->b_inode->i_sb,
341			   "%s (ino=%lu): invalid pointer: %llu",
342			   __func__,
343			   bmap->b_inode->i_ino, (unsigned long long)ptr);
344		return -EINVAL;
345	}
346
347	return NILFS_BMAP_USE_VBN(bmap) ?
348		nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
349		nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
350}
351
352static const struct nilfs_bmap_operations nilfs_direct_ops = {
353	.bop_lookup		=	nilfs_direct_lookup,
354	.bop_lookup_contig	=	nilfs_direct_lookup_contig,
355	.bop_insert		=	nilfs_direct_insert,
356	.bop_delete		=	nilfs_direct_delete,
357	.bop_clear		=	NULL,
358
359	.bop_propagate		=	nilfs_direct_propagate,
360
361	.bop_lookup_dirty_buffers	=	NULL,
362
363	.bop_assign		=	nilfs_direct_assign,
364	.bop_mark		=	NULL,
365
366	.bop_seek_key		=	nilfs_direct_seek_key,
367	.bop_last_key		=	nilfs_direct_last_key,
368
369	.bop_check_insert	=	nilfs_direct_check_insert,
370	.bop_check_delete	=	NULL,
371	.bop_gather_data	=	nilfs_direct_gather_data,
372};
373
374
375int nilfs_direct_init(struct nilfs_bmap *bmap)
376{
377	bmap->b_ops = &nilfs_direct_ops;
378	return 0;
379}
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * direct.c - NILFS direct block pointer.
  4 *
  5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  6 *
  7 * Written by Koji Sato.
  8 */
  9
 10#include <linux/errno.h>
 11#include "nilfs.h"
 12#include "page.h"
 13#include "direct.h"
 14#include "alloc.h"
 15#include "dat.h"
 16
 17static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
 18{
 19	return (__le64 *)
 20		((struct nilfs_direct_node *)direct->b_u.u_data + 1);
 21}
 22
 23static inline __u64
 24nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
 25{
 26	return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
 27}
 28
 29static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
 30					__u64 key, __u64 ptr)
 31{
 32	*(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
 33}
 34
 35static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
 36			       __u64 key, int level, __u64 *ptrp)
 37{
 38	__u64 ptr;
 39
 40	if (key > NILFS_DIRECT_KEY_MAX || level != 1)
 41		return -ENOENT;
 42	ptr = nilfs_direct_get_ptr(direct, key);
 43	if (ptr == NILFS_BMAP_INVALID_PTR)
 44		return -ENOENT;
 45
 46	*ptrp = ptr;
 47	return 0;
 48}
 49
 50static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
 51				      __u64 key, __u64 *ptrp,
 52				      unsigned int maxblocks)
 53{
 54	struct inode *dat = NULL;
 55	__u64 ptr, ptr2;
 56	sector_t blocknr;
 57	int ret, cnt;
 58
 59	if (key > NILFS_DIRECT_KEY_MAX)
 60		return -ENOENT;
 61	ptr = nilfs_direct_get_ptr(direct, key);
 62	if (ptr == NILFS_BMAP_INVALID_PTR)
 63		return -ENOENT;
 64
 65	if (NILFS_BMAP_USE_VBN(direct)) {
 66		dat = nilfs_bmap_get_dat(direct);
 67		ret = nilfs_dat_translate(dat, ptr, &blocknr);
 68		if (ret < 0)
 69			return ret;
 70		ptr = blocknr;
 71	}
 72
 73	maxblocks = min_t(unsigned int, maxblocks,
 74			  NILFS_DIRECT_KEY_MAX - key + 1);
 75	for (cnt = 1; cnt < maxblocks &&
 76		     (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
 77		     NILFS_BMAP_INVALID_PTR;
 78	     cnt++) {
 79		if (dat) {
 80			ret = nilfs_dat_translate(dat, ptr2, &blocknr);
 81			if (ret < 0)
 82				return ret;
 83			ptr2 = blocknr;
 84		}
 85		if (ptr2 != ptr + cnt)
 86			break;
 87	}
 88	*ptrp = ptr;
 89	return cnt;
 90}
 91
 92static __u64
 93nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
 94{
 95	__u64 ptr;
 96
 97	ptr = nilfs_bmap_find_target_seq(direct, key);
 98	if (ptr != NILFS_BMAP_INVALID_PTR)
 99		/* sequential access */
100		return ptr;
101
102	/* block group */
103	return nilfs_bmap_find_target_in_group(direct);
104}
105
106static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
107{
108	union nilfs_bmap_ptr_req req;
109	struct inode *dat = NULL;
110	struct buffer_head *bh;
111	int ret;
112
113	if (key > NILFS_DIRECT_KEY_MAX)
114		return -ENOENT;
115	if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
116		return -EEXIST;
117
118	if (NILFS_BMAP_USE_VBN(bmap)) {
119		req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
120		dat = nilfs_bmap_get_dat(bmap);
121	}
122	ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
123	if (!ret) {
124		/* ptr must be a pointer to a buffer head. */
125		bh = (struct buffer_head *)((unsigned long)ptr);
126		set_buffer_nilfs_volatile(bh);
127
128		nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
129		nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
130
131		if (!nilfs_bmap_dirty(bmap))
132			nilfs_bmap_set_dirty(bmap);
133
134		if (NILFS_BMAP_USE_VBN(bmap))
135			nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
136
137		nilfs_inode_add_blocks(bmap->b_inode, 1);
138	}
139	return ret;
140}
141
142static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
143{
144	union nilfs_bmap_ptr_req req;
145	struct inode *dat;
146	int ret;
147
148	if (key > NILFS_DIRECT_KEY_MAX ||
149	    nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
150		return -ENOENT;
151
152	dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
153	req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
154
155	ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
156	if (!ret) {
157		nilfs_bmap_commit_end_ptr(bmap, &req, dat);
158		nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
159		nilfs_inode_sub_blocks(bmap->b_inode, 1);
160	}
161	return ret;
162}
163
164static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
165				 __u64 *keyp)
166{
167	__u64 key;
168
169	for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
170		if (nilfs_direct_get_ptr(direct, key) !=
171		    NILFS_BMAP_INVALID_PTR) {
172			*keyp = key;
173			return 0;
174		}
175	}
176	return -ENOENT;
177}
178
179static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
180{
181	__u64 key, lastkey;
182
183	lastkey = NILFS_DIRECT_KEY_MAX + 1;
184	for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
185		if (nilfs_direct_get_ptr(direct, key) !=
186		    NILFS_BMAP_INVALID_PTR)
187			lastkey = key;
188
189	if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
190		return -ENOENT;
191
192	*keyp = lastkey;
193
194	return 0;
195}
196
197static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
198{
199	return key > NILFS_DIRECT_KEY_MAX;
200}
201
202static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
203				    __u64 *keys, __u64 *ptrs, int nitems)
204{
205	__u64 key;
206	__u64 ptr;
207	int n;
208
209	if (nitems > NILFS_DIRECT_NBLOCKS)
210		nitems = NILFS_DIRECT_NBLOCKS;
211	n = 0;
212	for (key = 0; key < nitems; key++) {
213		ptr = nilfs_direct_get_ptr(direct, key);
214		if (ptr != NILFS_BMAP_INVALID_PTR) {
215			keys[n] = key;
216			ptrs[n] = ptr;
217			n++;
218		}
219	}
220	return n;
221}
222
223int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
224				    __u64 key, __u64 *keys, __u64 *ptrs, int n)
225{
226	__le64 *dptrs;
227	int ret, i, j;
228
229	/* no need to allocate any resource for conversion */
230
231	/* delete */
232	ret = bmap->b_ops->bop_delete(bmap, key);
233	if (ret < 0)
234		return ret;
235
236	/* free resources */
237	if (bmap->b_ops->bop_clear != NULL)
238		bmap->b_ops->bop_clear(bmap);
239
240	/* convert */
241	dptrs = nilfs_direct_dptrs(bmap);
242	for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
243		if ((j < n) && (i == keys[j])) {
244			dptrs[i] = (i != key) ?
245				cpu_to_le64(ptrs[j]) :
246				NILFS_BMAP_INVALID_PTR;
247			j++;
248		} else
249			dptrs[i] = NILFS_BMAP_INVALID_PTR;
250	}
251
252	nilfs_direct_init(bmap);
253	return 0;
254}
255
256static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
257				  struct buffer_head *bh)
258{
259	struct nilfs_palloc_req oldreq, newreq;
260	struct inode *dat;
261	__u64 key;
262	__u64 ptr;
263	int ret;
264
265	if (!NILFS_BMAP_USE_VBN(bmap))
266		return 0;
267
268	dat = nilfs_bmap_get_dat(bmap);
269	key = nilfs_bmap_data_get_key(bmap, bh);
270	ptr = nilfs_direct_get_ptr(bmap, key);
271	if (!buffer_nilfs_volatile(bh)) {
272		oldreq.pr_entry_nr = ptr;
273		newreq.pr_entry_nr = ptr;
274		ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
275		if (ret < 0)
276			return ret;
277		nilfs_dat_commit_update(dat, &oldreq, &newreq,
278					bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
279		set_buffer_nilfs_volatile(bh);
280		nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
281	} else
282		ret = nilfs_dat_mark_dirty(dat, ptr);
283
284	return ret;
285}
286
287static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
288				 __u64 key, __u64 ptr,
289				 struct buffer_head **bh,
290				 sector_t blocknr,
291				 union nilfs_binfo *binfo)
292{
293	struct inode *dat = nilfs_bmap_get_dat(direct);
294	union nilfs_bmap_ptr_req req;
295	int ret;
296
297	req.bpr_ptr = ptr;
298	ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
299	if (!ret) {
300		nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
301		binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
302		binfo->bi_v.bi_blkoff = cpu_to_le64(key);
303	}
304	return ret;
305}
306
307static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
308				 __u64 key, __u64 ptr,
309				 struct buffer_head **bh,
310				 sector_t blocknr,
311				 union nilfs_binfo *binfo)
312{
313	nilfs_direct_set_ptr(direct, key, blocknr);
314
315	binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
316	binfo->bi_dat.bi_level = 0;
 
317
318	return 0;
319}
320
321static int nilfs_direct_assign(struct nilfs_bmap *bmap,
322			       struct buffer_head **bh,
323			       sector_t blocknr,
324			       union nilfs_binfo *binfo)
325{
326	__u64 key;
327	__u64 ptr;
328
329	key = nilfs_bmap_data_get_key(bmap, *bh);
330	if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
331		nilfs_crit(bmap->b_inode->i_sb,
332			   "%s (ino=%lu): invalid key: %llu",
333			   __func__,
334			   bmap->b_inode->i_ino, (unsigned long long)key);
335		return -EINVAL;
336	}
337	ptr = nilfs_direct_get_ptr(bmap, key);
338	if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
339		nilfs_crit(bmap->b_inode->i_sb,
340			   "%s (ino=%lu): invalid pointer: %llu",
341			   __func__,
342			   bmap->b_inode->i_ino, (unsigned long long)ptr);
343		return -EINVAL;
344	}
345
346	return NILFS_BMAP_USE_VBN(bmap) ?
347		nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
348		nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
349}
350
351static const struct nilfs_bmap_operations nilfs_direct_ops = {
352	.bop_lookup		=	nilfs_direct_lookup,
353	.bop_lookup_contig	=	nilfs_direct_lookup_contig,
354	.bop_insert		=	nilfs_direct_insert,
355	.bop_delete		=	nilfs_direct_delete,
356	.bop_clear		=	NULL,
357
358	.bop_propagate		=	nilfs_direct_propagate,
359
360	.bop_lookup_dirty_buffers	=	NULL,
361
362	.bop_assign		=	nilfs_direct_assign,
363	.bop_mark		=	NULL,
364
365	.bop_seek_key		=	nilfs_direct_seek_key,
366	.bop_last_key		=	nilfs_direct_last_key,
367
368	.bop_check_insert	=	nilfs_direct_check_insert,
369	.bop_check_delete	=	NULL,
370	.bop_gather_data	=	nilfs_direct_gather_data,
371};
372
373
374int nilfs_direct_init(struct nilfs_bmap *bmap)
375{
376	bmap->b_ops = &nilfs_direct_ops;
377	return 0;
378}