Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS disk address translation.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Koji Sato.
8 */
9
10#include <linux/types.h>
11#include <linux/buffer_head.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include "nilfs.h"
15#include "mdt.h"
16#include "alloc.h"
17#include "dat.h"
18
19
20#define NILFS_CNO_MIN ((__u64)1)
21#define NILFS_CNO_MAX (~(__u64)0)
22
23/**
24 * struct nilfs_dat_info - on-memory private data of DAT file
25 * @mi: on-memory private data of metadata file
26 * @palloc_cache: persistent object allocator cache of DAT file
27 * @shadow: shadow map of DAT file
28 */
29struct nilfs_dat_info {
30 struct nilfs_mdt_info mi;
31 struct nilfs_palloc_cache palloc_cache;
32 struct nilfs_shadow_map shadow;
33};
34
35static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
36{
37 return (struct nilfs_dat_info *)NILFS_MDT(dat);
38}
39
40static int nilfs_dat_prepare_entry(struct inode *dat,
41 struct nilfs_palloc_req *req, int create)
42{
43 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
44 create, &req->pr_entry_bh);
45}
46
47static void nilfs_dat_commit_entry(struct inode *dat,
48 struct nilfs_palloc_req *req)
49{
50 mark_buffer_dirty(req->pr_entry_bh);
51 nilfs_mdt_mark_dirty(dat);
52 brelse(req->pr_entry_bh);
53}
54
55static void nilfs_dat_abort_entry(struct inode *dat,
56 struct nilfs_palloc_req *req)
57{
58 brelse(req->pr_entry_bh);
59}
60
61int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
62{
63 int ret;
64
65 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
66 if (ret < 0)
67 return ret;
68
69 ret = nilfs_dat_prepare_entry(dat, req, 1);
70 if (ret < 0)
71 nilfs_palloc_abort_alloc_entry(dat, req);
72
73 return ret;
74}
75
76void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
77{
78 struct nilfs_dat_entry *entry;
79 void *kaddr;
80
81 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
82 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
83 req->pr_entry_bh, kaddr);
84 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
85 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
86 entry->de_blocknr = cpu_to_le64(0);
87 kunmap_atomic(kaddr);
88
89 nilfs_palloc_commit_alloc_entry(dat, req);
90 nilfs_dat_commit_entry(dat, req);
91}
92
93void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
94{
95 nilfs_dat_abort_entry(dat, req);
96 nilfs_palloc_abort_alloc_entry(dat, req);
97}
98
99static void nilfs_dat_commit_free(struct inode *dat,
100 struct nilfs_palloc_req *req)
101{
102 struct nilfs_dat_entry *entry;
103 void *kaddr;
104
105 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
106 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
107 req->pr_entry_bh, kaddr);
108 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
109 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
110 entry->de_blocknr = cpu_to_le64(0);
111 kunmap_atomic(kaddr);
112
113 nilfs_dat_commit_entry(dat, req);
114
115 if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) {
116 nilfs_error(dat->i_sb,
117 "state inconsistency probably due to duplicate use of vblocknr = %llu",
118 (unsigned long long)req->pr_entry_nr);
119 return;
120 }
121 nilfs_palloc_commit_free_entry(dat, req);
122}
123
124int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
125{
126 int ret;
127
128 ret = nilfs_dat_prepare_entry(dat, req, 0);
129 WARN_ON(ret == -ENOENT);
130 return ret;
131}
132
133void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
134 sector_t blocknr)
135{
136 struct nilfs_dat_entry *entry;
137 void *kaddr;
138
139 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
140 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
141 req->pr_entry_bh, kaddr);
142 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
143 entry->de_blocknr = cpu_to_le64(blocknr);
144 kunmap_atomic(kaddr);
145
146 nilfs_dat_commit_entry(dat, req);
147}
148
149int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
150{
151 struct nilfs_dat_entry *entry;
152 sector_t blocknr;
153 void *kaddr;
154 int ret;
155
156 ret = nilfs_dat_prepare_entry(dat, req, 0);
157 if (ret < 0) {
158 WARN_ON(ret == -ENOENT);
159 return ret;
160 }
161
162 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
163 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
164 req->pr_entry_bh, kaddr);
165 blocknr = le64_to_cpu(entry->de_blocknr);
166 kunmap_atomic(kaddr);
167
168 if (blocknr == 0) {
169 ret = nilfs_palloc_prepare_free_entry(dat, req);
170 if (ret < 0) {
171 nilfs_dat_abort_entry(dat, req);
172 return ret;
173 }
174 }
175
176 return 0;
177}
178
179void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
180 int dead)
181{
182 struct nilfs_dat_entry *entry;
183 __u64 start, end;
184 sector_t blocknr;
185 void *kaddr;
186
187 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
188 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
189 req->pr_entry_bh, kaddr);
190 end = start = le64_to_cpu(entry->de_start);
191 if (!dead) {
192 end = nilfs_mdt_cno(dat);
193 WARN_ON(start > end);
194 }
195 entry->de_end = cpu_to_le64(end);
196 blocknr = le64_to_cpu(entry->de_blocknr);
197 kunmap_atomic(kaddr);
198
199 if (blocknr == 0)
200 nilfs_dat_commit_free(dat, req);
201 else
202 nilfs_dat_commit_entry(dat, req);
203}
204
205void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
206{
207 struct nilfs_dat_entry *entry;
208 __u64 start;
209 sector_t blocknr;
210 void *kaddr;
211
212 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
213 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
214 req->pr_entry_bh, kaddr);
215 start = le64_to_cpu(entry->de_start);
216 blocknr = le64_to_cpu(entry->de_blocknr);
217 kunmap_atomic(kaddr);
218
219 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
220 nilfs_palloc_abort_free_entry(dat, req);
221 nilfs_dat_abort_entry(dat, req);
222}
223
224int nilfs_dat_prepare_update(struct inode *dat,
225 struct nilfs_palloc_req *oldreq,
226 struct nilfs_palloc_req *newreq)
227{
228 int ret;
229
230 ret = nilfs_dat_prepare_end(dat, oldreq);
231 if (!ret) {
232 ret = nilfs_dat_prepare_alloc(dat, newreq);
233 if (ret < 0)
234 nilfs_dat_abort_end(dat, oldreq);
235 }
236 return ret;
237}
238
239void nilfs_dat_commit_update(struct inode *dat,
240 struct nilfs_palloc_req *oldreq,
241 struct nilfs_palloc_req *newreq, int dead)
242{
243 nilfs_dat_commit_end(dat, oldreq, dead);
244 nilfs_dat_commit_alloc(dat, newreq);
245}
246
247void nilfs_dat_abort_update(struct inode *dat,
248 struct nilfs_palloc_req *oldreq,
249 struct nilfs_palloc_req *newreq)
250{
251 nilfs_dat_abort_end(dat, oldreq);
252 nilfs_dat_abort_alloc(dat, newreq);
253}
254
255/**
256 * nilfs_dat_mark_dirty -
257 * @dat: DAT file inode
258 * @vblocknr: virtual block number
259 *
260 * Description:
261 *
262 * Return Value: On success, 0 is returned. On error, one of the following
263 * negative error codes is returned.
264 *
265 * %-EIO - I/O error.
266 *
267 * %-ENOMEM - Insufficient amount of memory available.
268 */
269int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
270{
271 struct nilfs_palloc_req req;
272 int ret;
273
274 req.pr_entry_nr = vblocknr;
275 ret = nilfs_dat_prepare_entry(dat, &req, 0);
276 if (ret == 0)
277 nilfs_dat_commit_entry(dat, &req);
278 return ret;
279}
280
281/**
282 * nilfs_dat_freev - free virtual block numbers
283 * @dat: DAT file inode
284 * @vblocknrs: array of virtual block numbers
285 * @nitems: number of virtual block numbers
286 *
287 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
288 * @vblocknrs and @nitems.
289 *
290 * Return Value: On success, 0 is returned. On error, one of the following
291 * negative error codes is returned.
292 *
293 * %-EIO - I/O error.
294 *
295 * %-ENOMEM - Insufficient amount of memory available.
296 *
297 * %-ENOENT - The virtual block number have not been allocated.
298 */
299int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
300{
301 return nilfs_palloc_freev(dat, vblocknrs, nitems);
302}
303
304/**
305 * nilfs_dat_move - change a block number
306 * @dat: DAT file inode
307 * @vblocknr: virtual block number
308 * @blocknr: block number
309 *
310 * Description: nilfs_dat_move() changes the block number associated with
311 * @vblocknr to @blocknr.
312 *
313 * Return Value: On success, 0 is returned. On error, one of the following
314 * negative error codes is returned.
315 *
316 * %-EIO - I/O error.
317 *
318 * %-ENOMEM - Insufficient amount of memory available.
319 */
320int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
321{
322 struct buffer_head *entry_bh;
323 struct nilfs_dat_entry *entry;
324 void *kaddr;
325 int ret;
326
327 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
328 if (ret < 0)
329 return ret;
330
331 /*
332 * The given disk block number (blocknr) is not yet written to
333 * the device at this point.
334 *
335 * To prevent nilfs_dat_translate() from returning the
336 * uncommitted block number, this makes a copy of the entry
337 * buffer and redirects nilfs_dat_translate() to the copy.
338 */
339 if (!buffer_nilfs_redirected(entry_bh)) {
340 ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
341 if (ret) {
342 brelse(entry_bh);
343 return ret;
344 }
345 }
346
347 kaddr = kmap_atomic(entry_bh->b_page);
348 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
349 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
350 nilfs_crit(dat->i_sb,
351 "%s: invalid vblocknr = %llu, [%llu, %llu)",
352 __func__, (unsigned long long)vblocknr,
353 (unsigned long long)le64_to_cpu(entry->de_start),
354 (unsigned long long)le64_to_cpu(entry->de_end));
355 kunmap_atomic(kaddr);
356 brelse(entry_bh);
357 return -EINVAL;
358 }
359 WARN_ON(blocknr == 0);
360 entry->de_blocknr = cpu_to_le64(blocknr);
361 kunmap_atomic(kaddr);
362
363 mark_buffer_dirty(entry_bh);
364 nilfs_mdt_mark_dirty(dat);
365
366 brelse(entry_bh);
367
368 return 0;
369}
370
371/**
372 * nilfs_dat_translate - translate a virtual block number to a block number
373 * @dat: DAT file inode
374 * @vblocknr: virtual block number
375 * @blocknrp: pointer to a block number
376 *
377 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
378 * to the corresponding block number.
379 *
380 * Return Value: On success, 0 is returned and the block number associated
381 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
382 * of the following negative error codes is returned.
383 *
384 * %-EIO - I/O error.
385 *
386 * %-ENOMEM - Insufficient amount of memory available.
387 *
388 * %-ENOENT - A block number associated with @vblocknr does not exist.
389 */
390int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
391{
392 struct buffer_head *entry_bh, *bh;
393 struct nilfs_dat_entry *entry;
394 sector_t blocknr;
395 void *kaddr;
396 int ret;
397
398 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
399 if (ret < 0)
400 return ret;
401
402 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
403 bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
404 if (bh) {
405 WARN_ON(!buffer_uptodate(bh));
406 brelse(entry_bh);
407 entry_bh = bh;
408 }
409 }
410
411 kaddr = kmap_atomic(entry_bh->b_page);
412 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
413 blocknr = le64_to_cpu(entry->de_blocknr);
414 if (blocknr == 0) {
415 ret = -ENOENT;
416 goto out;
417 }
418 *blocknrp = blocknr;
419
420 out:
421 kunmap_atomic(kaddr);
422 brelse(entry_bh);
423 return ret;
424}
425
426ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
427 size_t nvi)
428{
429 struct buffer_head *entry_bh;
430 struct nilfs_dat_entry *entry;
431 struct nilfs_vinfo *vinfo = buf;
432 __u64 first, last;
433 void *kaddr;
434 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
435 int i, j, n, ret;
436
437 for (i = 0; i < nvi; i += n) {
438 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
439 0, &entry_bh);
440 if (ret < 0)
441 return ret;
442 kaddr = kmap_atomic(entry_bh->b_page);
443 /* last virtual block number in this block */
444 first = vinfo->vi_vblocknr;
445 do_div(first, entries_per_block);
446 first *= entries_per_block;
447 last = first + entries_per_block - 1;
448 for (j = i, n = 0;
449 j < nvi && vinfo->vi_vblocknr >= first &&
450 vinfo->vi_vblocknr <= last;
451 j++, n++, vinfo = (void *)vinfo + visz) {
452 entry = nilfs_palloc_block_get_entry(
453 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
454 vinfo->vi_start = le64_to_cpu(entry->de_start);
455 vinfo->vi_end = le64_to_cpu(entry->de_end);
456 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
457 }
458 kunmap_atomic(kaddr);
459 brelse(entry_bh);
460 }
461
462 return nvi;
463}
464
465/**
466 * nilfs_dat_read - read or get dat inode
467 * @sb: super block instance
468 * @entry_size: size of a dat entry
469 * @raw_inode: on-disk dat inode
470 * @inodep: buffer to store the inode
471 */
472int nilfs_dat_read(struct super_block *sb, size_t entry_size,
473 struct nilfs_inode *raw_inode, struct inode **inodep)
474{
475 static struct lock_class_key dat_lock_key;
476 struct inode *dat;
477 struct nilfs_dat_info *di;
478 int err;
479
480 if (entry_size > sb->s_blocksize) {
481 nilfs_err(sb, "too large DAT entry size: %zu bytes",
482 entry_size);
483 return -EINVAL;
484 } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
485 nilfs_err(sb, "too small DAT entry size: %zu bytes",
486 entry_size);
487 return -EINVAL;
488 }
489
490 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
491 if (unlikely(!dat))
492 return -ENOMEM;
493 if (!(dat->i_state & I_NEW))
494 goto out;
495
496 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
497 if (err)
498 goto failed;
499
500 err = nilfs_palloc_init_blockgroup(dat, entry_size);
501 if (err)
502 goto failed;
503
504 di = NILFS_DAT_I(dat);
505 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
506 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
507 err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
508 if (err)
509 goto failed;
510
511 err = nilfs_read_inode_common(dat, raw_inode);
512 if (err)
513 goto failed;
514
515 unlock_new_inode(dat);
516 out:
517 *inodep = dat;
518 return 0;
519 failed:
520 iget_failed(dat);
521 return err;
522}
1/*
2 * dat.c - NILFS disk address translation.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/types.h>
24#include <linux/buffer_head.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include "nilfs.h"
28#include "mdt.h"
29#include "alloc.h"
30#include "dat.h"
31
32
33#define NILFS_CNO_MIN ((__u64)1)
34#define NILFS_CNO_MAX (~(__u64)0)
35
36/**
37 * struct nilfs_dat_info - on-memory private data of DAT file
38 * @mi: on-memory private data of metadata file
39 * @palloc_cache: persistent object allocator cache of DAT file
40 * @shadow: shadow map of DAT file
41 */
42struct nilfs_dat_info {
43 struct nilfs_mdt_info mi;
44 struct nilfs_palloc_cache palloc_cache;
45 struct nilfs_shadow_map shadow;
46};
47
48static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
49{
50 return (struct nilfs_dat_info *)NILFS_MDT(dat);
51}
52
53static int nilfs_dat_prepare_entry(struct inode *dat,
54 struct nilfs_palloc_req *req, int create)
55{
56 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
57 create, &req->pr_entry_bh);
58}
59
60static void nilfs_dat_commit_entry(struct inode *dat,
61 struct nilfs_palloc_req *req)
62{
63 mark_buffer_dirty(req->pr_entry_bh);
64 nilfs_mdt_mark_dirty(dat);
65 brelse(req->pr_entry_bh);
66}
67
68static void nilfs_dat_abort_entry(struct inode *dat,
69 struct nilfs_palloc_req *req)
70{
71 brelse(req->pr_entry_bh);
72}
73
74int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
75{
76 int ret;
77
78 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
79 if (ret < 0)
80 return ret;
81
82 ret = nilfs_dat_prepare_entry(dat, req, 1);
83 if (ret < 0)
84 nilfs_palloc_abort_alloc_entry(dat, req);
85
86 return ret;
87}
88
89void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
90{
91 struct nilfs_dat_entry *entry;
92 void *kaddr;
93
94 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
96 req->pr_entry_bh, kaddr);
97 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
98 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
99 entry->de_blocknr = cpu_to_le64(0);
100 kunmap_atomic(kaddr);
101
102 nilfs_palloc_commit_alloc_entry(dat, req);
103 nilfs_dat_commit_entry(dat, req);
104}
105
106void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
107{
108 nilfs_dat_abort_entry(dat, req);
109 nilfs_palloc_abort_alloc_entry(dat, req);
110}
111
112static void nilfs_dat_commit_free(struct inode *dat,
113 struct nilfs_palloc_req *req)
114{
115 struct nilfs_dat_entry *entry;
116 void *kaddr;
117
118 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
120 req->pr_entry_bh, kaddr);
121 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
122 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
123 entry->de_blocknr = cpu_to_le64(0);
124 kunmap_atomic(kaddr);
125
126 nilfs_dat_commit_entry(dat, req);
127 nilfs_palloc_commit_free_entry(dat, req);
128}
129
130int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
131{
132 int ret;
133
134 ret = nilfs_dat_prepare_entry(dat, req, 0);
135 WARN_ON(ret == -ENOENT);
136 return ret;
137}
138
139void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
140 sector_t blocknr)
141{
142 struct nilfs_dat_entry *entry;
143 void *kaddr;
144
145 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
146 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
147 req->pr_entry_bh, kaddr);
148 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
149 entry->de_blocknr = cpu_to_le64(blocknr);
150 kunmap_atomic(kaddr);
151
152 nilfs_dat_commit_entry(dat, req);
153}
154
155int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
156{
157 struct nilfs_dat_entry *entry;
158 __u64 start;
159 sector_t blocknr;
160 void *kaddr;
161 int ret;
162
163 ret = nilfs_dat_prepare_entry(dat, req, 0);
164 if (ret < 0) {
165 WARN_ON(ret == -ENOENT);
166 return ret;
167 }
168
169 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
170 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
171 req->pr_entry_bh, kaddr);
172 start = le64_to_cpu(entry->de_start);
173 blocknr = le64_to_cpu(entry->de_blocknr);
174 kunmap_atomic(kaddr);
175
176 if (blocknr == 0) {
177 ret = nilfs_palloc_prepare_free_entry(dat, req);
178 if (ret < 0) {
179 nilfs_dat_abort_entry(dat, req);
180 return ret;
181 }
182 }
183
184 return 0;
185}
186
187void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
188 int dead)
189{
190 struct nilfs_dat_entry *entry;
191 __u64 start, end;
192 sector_t blocknr;
193 void *kaddr;
194
195 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
196 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
197 req->pr_entry_bh, kaddr);
198 end = start = le64_to_cpu(entry->de_start);
199 if (!dead) {
200 end = nilfs_mdt_cno(dat);
201 WARN_ON(start > end);
202 }
203 entry->de_end = cpu_to_le64(end);
204 blocknr = le64_to_cpu(entry->de_blocknr);
205 kunmap_atomic(kaddr);
206
207 if (blocknr == 0)
208 nilfs_dat_commit_free(dat, req);
209 else
210 nilfs_dat_commit_entry(dat, req);
211}
212
213void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
214{
215 struct nilfs_dat_entry *entry;
216 __u64 start;
217 sector_t blocknr;
218 void *kaddr;
219
220 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
221 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
222 req->pr_entry_bh, kaddr);
223 start = le64_to_cpu(entry->de_start);
224 blocknr = le64_to_cpu(entry->de_blocknr);
225 kunmap_atomic(kaddr);
226
227 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
228 nilfs_palloc_abort_free_entry(dat, req);
229 nilfs_dat_abort_entry(dat, req);
230}
231
232int nilfs_dat_prepare_update(struct inode *dat,
233 struct nilfs_palloc_req *oldreq,
234 struct nilfs_palloc_req *newreq)
235{
236 int ret;
237
238 ret = nilfs_dat_prepare_end(dat, oldreq);
239 if (!ret) {
240 ret = nilfs_dat_prepare_alloc(dat, newreq);
241 if (ret < 0)
242 nilfs_dat_abort_end(dat, oldreq);
243 }
244 return ret;
245}
246
247void nilfs_dat_commit_update(struct inode *dat,
248 struct nilfs_palloc_req *oldreq,
249 struct nilfs_palloc_req *newreq, int dead)
250{
251 nilfs_dat_commit_end(dat, oldreq, dead);
252 nilfs_dat_commit_alloc(dat, newreq);
253}
254
255void nilfs_dat_abort_update(struct inode *dat,
256 struct nilfs_palloc_req *oldreq,
257 struct nilfs_palloc_req *newreq)
258{
259 nilfs_dat_abort_end(dat, oldreq);
260 nilfs_dat_abort_alloc(dat, newreq);
261}
262
263/**
264 * nilfs_dat_mark_dirty -
265 * @dat: DAT file inode
266 * @vblocknr: virtual block number
267 *
268 * Description:
269 *
270 * Return Value: On success, 0 is returned. On error, one of the following
271 * negative error codes is returned.
272 *
273 * %-EIO - I/O error.
274 *
275 * %-ENOMEM - Insufficient amount of memory available.
276 */
277int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
278{
279 struct nilfs_palloc_req req;
280 int ret;
281
282 req.pr_entry_nr = vblocknr;
283 ret = nilfs_dat_prepare_entry(dat, &req, 0);
284 if (ret == 0)
285 nilfs_dat_commit_entry(dat, &req);
286 return ret;
287}
288
289/**
290 * nilfs_dat_freev - free virtual block numbers
291 * @dat: DAT file inode
292 * @vblocknrs: array of virtual block numbers
293 * @nitems: number of virtual block numbers
294 *
295 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
296 * @vblocknrs and @nitems.
297 *
298 * Return Value: On success, 0 is returned. On error, one of the following
299 * negative error codes is returned.
300 *
301 * %-EIO - I/O error.
302 *
303 * %-ENOMEM - Insufficient amount of memory available.
304 *
305 * %-ENOENT - The virtual block number have not been allocated.
306 */
307int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
308{
309 return nilfs_palloc_freev(dat, vblocknrs, nitems);
310}
311
312/**
313 * nilfs_dat_move - change a block number
314 * @dat: DAT file inode
315 * @vblocknr: virtual block number
316 * @blocknr: block number
317 *
318 * Description: nilfs_dat_move() changes the block number associated with
319 * @vblocknr to @blocknr.
320 *
321 * Return Value: On success, 0 is returned. On error, one of the following
322 * negative error codes is returned.
323 *
324 * %-EIO - I/O error.
325 *
326 * %-ENOMEM - Insufficient amount of memory available.
327 */
328int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
329{
330 struct buffer_head *entry_bh;
331 struct nilfs_dat_entry *entry;
332 void *kaddr;
333 int ret;
334
335 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
336 if (ret < 0)
337 return ret;
338
339 /*
340 * The given disk block number (blocknr) is not yet written to
341 * the device at this point.
342 *
343 * To prevent nilfs_dat_translate() from returning the
344 * uncommitted block number, this makes a copy of the entry
345 * buffer and redirects nilfs_dat_translate() to the copy.
346 */
347 if (!buffer_nilfs_redirected(entry_bh)) {
348 ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
349 if (ret) {
350 brelse(entry_bh);
351 return ret;
352 }
353 }
354
355 kaddr = kmap_atomic(entry_bh->b_page);
356 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
357 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
358 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
359 (unsigned long long)vblocknr,
360 (unsigned long long)le64_to_cpu(entry->de_start),
361 (unsigned long long)le64_to_cpu(entry->de_end));
362 kunmap_atomic(kaddr);
363 brelse(entry_bh);
364 return -EINVAL;
365 }
366 WARN_ON(blocknr == 0);
367 entry->de_blocknr = cpu_to_le64(blocknr);
368 kunmap_atomic(kaddr);
369
370 mark_buffer_dirty(entry_bh);
371 nilfs_mdt_mark_dirty(dat);
372
373 brelse(entry_bh);
374
375 return 0;
376}
377
378/**
379 * nilfs_dat_translate - translate a virtual block number to a block number
380 * @dat: DAT file inode
381 * @vblocknr: virtual block number
382 * @blocknrp: pointer to a block number
383 *
384 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
385 * to the corresponding block number.
386 *
387 * Return Value: On success, 0 is returned and the block number associated
388 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
389 * of the following negative error codes is returned.
390 *
391 * %-EIO - I/O error.
392 *
393 * %-ENOMEM - Insufficient amount of memory available.
394 *
395 * %-ENOENT - A block number associated with @vblocknr does not exist.
396 */
397int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
398{
399 struct buffer_head *entry_bh, *bh;
400 struct nilfs_dat_entry *entry;
401 sector_t blocknr;
402 void *kaddr;
403 int ret;
404
405 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
406 if (ret < 0)
407 return ret;
408
409 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
410 bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
411 if (bh) {
412 WARN_ON(!buffer_uptodate(bh));
413 brelse(entry_bh);
414 entry_bh = bh;
415 }
416 }
417
418 kaddr = kmap_atomic(entry_bh->b_page);
419 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
420 blocknr = le64_to_cpu(entry->de_blocknr);
421 if (blocknr == 0) {
422 ret = -ENOENT;
423 goto out;
424 }
425 *blocknrp = blocknr;
426
427 out:
428 kunmap_atomic(kaddr);
429 brelse(entry_bh);
430 return ret;
431}
432
433ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
434 size_t nvi)
435{
436 struct buffer_head *entry_bh;
437 struct nilfs_dat_entry *entry;
438 struct nilfs_vinfo *vinfo = buf;
439 __u64 first, last;
440 void *kaddr;
441 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
442 int i, j, n, ret;
443
444 for (i = 0; i < nvi; i += n) {
445 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
446 0, &entry_bh);
447 if (ret < 0)
448 return ret;
449 kaddr = kmap_atomic(entry_bh->b_page);
450 /* last virtual block number in this block */
451 first = vinfo->vi_vblocknr;
452 do_div(first, entries_per_block);
453 first *= entries_per_block;
454 last = first + entries_per_block - 1;
455 for (j = i, n = 0;
456 j < nvi && vinfo->vi_vblocknr >= first &&
457 vinfo->vi_vblocknr <= last;
458 j++, n++, vinfo = (void *)vinfo + visz) {
459 entry = nilfs_palloc_block_get_entry(
460 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
461 vinfo->vi_start = le64_to_cpu(entry->de_start);
462 vinfo->vi_end = le64_to_cpu(entry->de_end);
463 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
464 }
465 kunmap_atomic(kaddr);
466 brelse(entry_bh);
467 }
468
469 return nvi;
470}
471
472/**
473 * nilfs_dat_read - read or get dat inode
474 * @sb: super block instance
475 * @entry_size: size of a dat entry
476 * @raw_inode: on-disk dat inode
477 * @inodep: buffer to store the inode
478 */
479int nilfs_dat_read(struct super_block *sb, size_t entry_size,
480 struct nilfs_inode *raw_inode, struct inode **inodep)
481{
482 static struct lock_class_key dat_lock_key;
483 struct inode *dat;
484 struct nilfs_dat_info *di;
485 int err;
486
487 if (entry_size > sb->s_blocksize) {
488 printk(KERN_ERR
489 "NILFS: too large DAT entry size: %zu bytes.\n",
490 entry_size);
491 return -EINVAL;
492 } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
493 printk(KERN_ERR
494 "NILFS: too small DAT entry size: %zu bytes.\n",
495 entry_size);
496 return -EINVAL;
497 }
498
499 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
500 if (unlikely(!dat))
501 return -ENOMEM;
502 if (!(dat->i_state & I_NEW))
503 goto out;
504
505 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
506 if (err)
507 goto failed;
508
509 err = nilfs_palloc_init_blockgroup(dat, entry_size);
510 if (err)
511 goto failed;
512
513 di = NILFS_DAT_I(dat);
514 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
515 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
516 nilfs_mdt_setup_shadow_map(dat, &di->shadow);
517
518 err = nilfs_read_inode_common(dat, raw_inode);
519 if (err)
520 goto failed;
521
522 unlock_new_inode(dat);
523 out:
524 *inodep = dat;
525 return 0;
526 failed:
527 iget_failed(dat);
528 return err;
529}