Loading...
1/*
2 * cpfile.c - NILFS checkpoint file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato.
17 */
18
19#include <linux/kernel.h>
20#include <linux/fs.h>
21#include <linux/string.h>
22#include <linux/buffer_head.h>
23#include <linux/errno.h>
24#include "mdt.h"
25#include "cpfile.h"
26
27
28static inline unsigned long
29nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
30{
31 return NILFS_MDT(cpfile)->mi_entries_per_block;
32}
33
34/* block number from the beginning of the file */
35static unsigned long
36nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
37{
38 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
39
40 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
41 return (unsigned long)tcno;
42}
43
44/* offset in block */
45static unsigned long
46nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
47{
48 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
49
50 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
51}
52
53static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile,
54 unsigned long blkoff)
55{
56 return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff
57 + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset;
58}
59
60static unsigned long
61nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
62 __u64 curr,
63 __u64 max)
64{
65 return min_t(__u64,
66 nilfs_cpfile_checkpoints_per_block(cpfile) -
67 nilfs_cpfile_get_offset(cpfile, curr),
68 max - curr);
69}
70
71static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
72 __u64 cno)
73{
74 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
75}
76
77static unsigned int
78nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
79 struct buffer_head *bh,
80 void *kaddr,
81 unsigned int n)
82{
83 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
84 unsigned int count;
85
86 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
87 cp->cp_checkpoints_count = cpu_to_le32(count);
88 return count;
89}
90
91static unsigned int
92nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
93 struct buffer_head *bh,
94 void *kaddr,
95 unsigned int n)
96{
97 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
98 unsigned int count;
99
100 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
101 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
102 cp->cp_checkpoints_count = cpu_to_le32(count);
103 return count;
104}
105
106static inline struct nilfs_cpfile_header *
107nilfs_cpfile_block_get_header(const struct inode *cpfile,
108 struct buffer_head *bh,
109 void *kaddr)
110{
111 return kaddr + bh_offset(bh);
112}
113
114static struct nilfs_checkpoint *
115nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
116 struct buffer_head *bh,
117 void *kaddr)
118{
119 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
120 NILFS_MDT(cpfile)->mi_entry_size;
121}
122
123static void nilfs_cpfile_block_init(struct inode *cpfile,
124 struct buffer_head *bh,
125 void *kaddr)
126{
127 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
128 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
129 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
130
131 while (n-- > 0) {
132 nilfs_checkpoint_set_invalid(cp);
133 cp = (void *)cp + cpsz;
134 }
135}
136
137static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
138 struct buffer_head **bhp)
139{
140 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
141}
142
143static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
144 __u64 cno,
145 int create,
146 struct buffer_head **bhp)
147{
148 return nilfs_mdt_get_block(cpfile,
149 nilfs_cpfile_get_blkoff(cpfile, cno),
150 create, nilfs_cpfile_block_init, bhp);
151}
152
153/**
154 * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile
155 * @cpfile: inode of cpfile
156 * @start_cno: start checkpoint number (inclusive)
157 * @end_cno: end checkpoint number (inclusive)
158 * @cnop: place to store the next checkpoint number
159 * @bhp: place to store a pointer to buffer_head struct
160 *
161 * Return Value: On success, it returns 0. On error, the following negative
162 * error code is returned.
163 *
164 * %-ENOMEM - Insufficient memory available.
165 *
166 * %-EIO - I/O error
167 *
168 * %-ENOENT - no block exists in the range.
169 */
170static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
171 __u64 start_cno, __u64 end_cno,
172 __u64 *cnop,
173 struct buffer_head **bhp)
174{
175 unsigned long start, end, blkoff;
176 int ret;
177
178 if (unlikely(start_cno > end_cno))
179 return -ENOENT;
180
181 start = nilfs_cpfile_get_blkoff(cpfile, start_cno);
182 end = nilfs_cpfile_get_blkoff(cpfile, end_cno);
183
184 ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp);
185 if (!ret)
186 *cnop = (blkoff == start) ? start_cno :
187 nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff);
188 return ret;
189}
190
191static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
192 __u64 cno)
193{
194 return nilfs_mdt_delete_block(cpfile,
195 nilfs_cpfile_get_blkoff(cpfile, cno));
196}
197
198/**
199 * nilfs_cpfile_get_checkpoint - get a checkpoint
200 * @cpfile: inode of checkpoint file
201 * @cno: checkpoint number
202 * @create: create flag
203 * @cpp: pointer to a checkpoint
204 * @bhp: pointer to a buffer head
205 *
206 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
207 * specified by @cno. A new checkpoint will be created if @cno is the current
208 * checkpoint number and @create is nonzero.
209 *
210 * Return Value: On success, 0 is returned, and the checkpoint and the
211 * buffer head of the buffer on which the checkpoint is located are stored in
212 * the place pointed by @cpp and @bhp, respectively. On error, one of the
213 * following negative error codes is returned.
214 *
215 * %-EIO - I/O error.
216 *
217 * %-ENOMEM - Insufficient amount of memory available.
218 *
219 * %-ENOENT - No such checkpoint.
220 *
221 * %-EINVAL - invalid checkpoint.
222 */
223int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
224 __u64 cno,
225 int create,
226 struct nilfs_checkpoint **cpp,
227 struct buffer_head **bhp)
228{
229 struct buffer_head *header_bh, *cp_bh;
230 struct nilfs_cpfile_header *header;
231 struct nilfs_checkpoint *cp;
232 void *kaddr;
233 int ret;
234
235 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
236 (cno < nilfs_mdt_cno(cpfile) && create)))
237 return -EINVAL;
238
239 down_write(&NILFS_MDT(cpfile)->mi_sem);
240
241 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
242 if (ret < 0)
243 goto out_sem;
244 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
245 if (ret < 0)
246 goto out_header;
247 kaddr = kmap(cp_bh->b_page);
248 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
249 if (nilfs_checkpoint_invalid(cp)) {
250 if (!create) {
251 kunmap(cp_bh->b_page);
252 brelse(cp_bh);
253 ret = -ENOENT;
254 goto out_header;
255 }
256 /* a newly-created checkpoint */
257 nilfs_checkpoint_clear_invalid(cp);
258 if (!nilfs_cpfile_is_in_first(cpfile, cno))
259 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
260 kaddr, 1);
261 mark_buffer_dirty(cp_bh);
262
263 kaddr = kmap_atomic(header_bh->b_page);
264 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
265 kaddr);
266 le64_add_cpu(&header->ch_ncheckpoints, 1);
267 kunmap_atomic(kaddr);
268 mark_buffer_dirty(header_bh);
269 nilfs_mdt_mark_dirty(cpfile);
270 }
271
272 if (cpp != NULL)
273 *cpp = cp;
274 *bhp = cp_bh;
275
276 out_header:
277 brelse(header_bh);
278
279 out_sem:
280 up_write(&NILFS_MDT(cpfile)->mi_sem);
281 return ret;
282}
283
284/**
285 * nilfs_cpfile_put_checkpoint - put a checkpoint
286 * @cpfile: inode of checkpoint file
287 * @cno: checkpoint number
288 * @bh: buffer head
289 *
290 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
291 * specified by @cno. @bh must be the buffer head which has been returned by
292 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
293 */
294void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
295 struct buffer_head *bh)
296{
297 kunmap(bh->b_page);
298 brelse(bh);
299}
300
301/**
302 * nilfs_cpfile_delete_checkpoints - delete checkpoints
303 * @cpfile: inode of checkpoint file
304 * @start: start checkpoint number
305 * @end: end checkpoint numer
306 *
307 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
308 * the period from @start to @end, excluding @end itself. The checkpoints
309 * which have been already deleted are ignored.
310 *
311 * Return Value: On success, 0 is returned. On error, one of the following
312 * negative error codes is returned.
313 *
314 * %-EIO - I/O error.
315 *
316 * %-ENOMEM - Insufficient amount of memory available.
317 *
318 * %-EINVAL - invalid checkpoints.
319 */
320int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
321 __u64 start,
322 __u64 end)
323{
324 struct buffer_head *header_bh, *cp_bh;
325 struct nilfs_cpfile_header *header;
326 struct nilfs_checkpoint *cp;
327 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
328 __u64 cno;
329 void *kaddr;
330 unsigned long tnicps;
331 int ret, ncps, nicps, nss, count, i;
332
333 if (unlikely(start == 0 || start > end)) {
334 nilfs_msg(cpfile->i_sb, KERN_ERR,
335 "cannot delete checkpoints: invalid range [%llu, %llu)",
336 (unsigned long long)start, (unsigned long long)end);
337 return -EINVAL;
338 }
339
340 down_write(&NILFS_MDT(cpfile)->mi_sem);
341
342 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
343 if (ret < 0)
344 goto out_sem;
345 tnicps = 0;
346 nss = 0;
347
348 for (cno = start; cno < end; cno += ncps) {
349 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
350 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
351 if (ret < 0) {
352 if (ret != -ENOENT)
353 break;
354 /* skip hole */
355 ret = 0;
356 continue;
357 }
358
359 kaddr = kmap_atomic(cp_bh->b_page);
360 cp = nilfs_cpfile_block_get_checkpoint(
361 cpfile, cno, cp_bh, kaddr);
362 nicps = 0;
363 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
364 if (nilfs_checkpoint_snapshot(cp)) {
365 nss++;
366 } else if (!nilfs_checkpoint_invalid(cp)) {
367 nilfs_checkpoint_set_invalid(cp);
368 nicps++;
369 }
370 }
371 if (nicps > 0) {
372 tnicps += nicps;
373 mark_buffer_dirty(cp_bh);
374 nilfs_mdt_mark_dirty(cpfile);
375 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
376 count =
377 nilfs_cpfile_block_sub_valid_checkpoints(
378 cpfile, cp_bh, kaddr, nicps);
379 if (count == 0) {
380 /* make hole */
381 kunmap_atomic(kaddr);
382 brelse(cp_bh);
383 ret =
384 nilfs_cpfile_delete_checkpoint_block(
385 cpfile, cno);
386 if (ret == 0)
387 continue;
388 nilfs_msg(cpfile->i_sb, KERN_ERR,
389 "error %d deleting checkpoint block",
390 ret);
391 break;
392 }
393 }
394 }
395
396 kunmap_atomic(kaddr);
397 brelse(cp_bh);
398 }
399
400 if (tnicps > 0) {
401 kaddr = kmap_atomic(header_bh->b_page);
402 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
403 kaddr);
404 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
405 mark_buffer_dirty(header_bh);
406 nilfs_mdt_mark_dirty(cpfile);
407 kunmap_atomic(kaddr);
408 }
409
410 brelse(header_bh);
411 if (nss > 0)
412 ret = -EBUSY;
413
414 out_sem:
415 up_write(&NILFS_MDT(cpfile)->mi_sem);
416 return ret;
417}
418
419static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
420 struct nilfs_checkpoint *cp,
421 struct nilfs_cpinfo *ci)
422{
423 ci->ci_flags = le32_to_cpu(cp->cp_flags);
424 ci->ci_cno = le64_to_cpu(cp->cp_cno);
425 ci->ci_create = le64_to_cpu(cp->cp_create);
426 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
427 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
428 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
429 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
430}
431
432static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
433 void *buf, unsigned int cisz,
434 size_t nci)
435{
436 struct nilfs_checkpoint *cp;
437 struct nilfs_cpinfo *ci = buf;
438 struct buffer_head *bh;
439 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
440 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
441 void *kaddr;
442 int n, ret;
443 int ncps, i;
444
445 if (cno == 0)
446 return -ENOENT; /* checkpoint number 0 is invalid */
447 down_read(&NILFS_MDT(cpfile)->mi_sem);
448
449 for (n = 0; n < nci; cno += ncps) {
450 ret = nilfs_cpfile_find_checkpoint_block(
451 cpfile, cno, cur_cno - 1, &cno, &bh);
452 if (ret < 0) {
453 if (likely(ret == -ENOENT))
454 break;
455 goto out;
456 }
457 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
458
459 kaddr = kmap_atomic(bh->b_page);
460 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
461 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
462 if (!nilfs_checkpoint_invalid(cp)) {
463 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
464 ci);
465 ci = (void *)ci + cisz;
466 n++;
467 }
468 }
469 kunmap_atomic(kaddr);
470 brelse(bh);
471 }
472
473 ret = n;
474 if (n > 0) {
475 ci = (void *)ci - cisz;
476 *cnop = ci->ci_cno + 1;
477 }
478
479 out:
480 up_read(&NILFS_MDT(cpfile)->mi_sem);
481 return ret;
482}
483
484static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
485 void *buf, unsigned int cisz,
486 size_t nci)
487{
488 struct buffer_head *bh;
489 struct nilfs_cpfile_header *header;
490 struct nilfs_checkpoint *cp;
491 struct nilfs_cpinfo *ci = buf;
492 __u64 curr = *cnop, next;
493 unsigned long curr_blkoff, next_blkoff;
494 void *kaddr;
495 int n = 0, ret;
496
497 down_read(&NILFS_MDT(cpfile)->mi_sem);
498
499 if (curr == 0) {
500 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
501 if (ret < 0)
502 goto out;
503 kaddr = kmap_atomic(bh->b_page);
504 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
505 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
506 kunmap_atomic(kaddr);
507 brelse(bh);
508 if (curr == 0) {
509 ret = 0;
510 goto out;
511 }
512 } else if (unlikely(curr == ~(__u64)0)) {
513 ret = 0;
514 goto out;
515 }
516
517 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
518 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
519 if (unlikely(ret < 0)) {
520 if (ret == -ENOENT)
521 ret = 0; /* No snapshots (started from a hole block) */
522 goto out;
523 }
524 kaddr = kmap_atomic(bh->b_page);
525 while (n < nci) {
526 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
527 curr = ~(__u64)0; /* Terminator */
528 if (unlikely(nilfs_checkpoint_invalid(cp) ||
529 !nilfs_checkpoint_snapshot(cp)))
530 break;
531 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
532 ci = (void *)ci + cisz;
533 n++;
534 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
535 if (next == 0)
536 break; /* reach end of the snapshot list */
537
538 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
539 if (curr_blkoff != next_blkoff) {
540 kunmap_atomic(kaddr);
541 brelse(bh);
542 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
543 0, &bh);
544 if (unlikely(ret < 0)) {
545 WARN_ON(ret == -ENOENT);
546 goto out;
547 }
548 kaddr = kmap_atomic(bh->b_page);
549 }
550 curr = next;
551 curr_blkoff = next_blkoff;
552 }
553 kunmap_atomic(kaddr);
554 brelse(bh);
555 *cnop = curr;
556 ret = n;
557
558 out:
559 up_read(&NILFS_MDT(cpfile)->mi_sem);
560 return ret;
561}
562
563/**
564 * nilfs_cpfile_get_cpinfo -
565 * @cpfile:
566 * @cno:
567 * @ci:
568 * @nci:
569 */
570
571ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
572 void *buf, unsigned int cisz, size_t nci)
573{
574 switch (mode) {
575 case NILFS_CHECKPOINT:
576 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
577 case NILFS_SNAPSHOT:
578 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
579 default:
580 return -EINVAL;
581 }
582}
583
584/**
585 * nilfs_cpfile_delete_checkpoint -
586 * @cpfile:
587 * @cno:
588 */
589int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
590{
591 struct nilfs_cpinfo ci;
592 __u64 tcno = cno;
593 ssize_t nci;
594
595 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
596 if (nci < 0)
597 return nci;
598 else if (nci == 0 || ci.ci_cno != cno)
599 return -ENOENT;
600 else if (nilfs_cpinfo_snapshot(&ci))
601 return -EBUSY;
602
603 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
604}
605
606static struct nilfs_snapshot_list *
607nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
608 __u64 cno,
609 struct buffer_head *bh,
610 void *kaddr)
611{
612 struct nilfs_cpfile_header *header;
613 struct nilfs_checkpoint *cp;
614 struct nilfs_snapshot_list *list;
615
616 if (cno != 0) {
617 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
618 list = &cp->cp_snapshot_list;
619 } else {
620 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
621 list = &header->ch_snapshot_list;
622 }
623 return list;
624}
625
626static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
627{
628 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
629 struct nilfs_cpfile_header *header;
630 struct nilfs_checkpoint *cp;
631 struct nilfs_snapshot_list *list;
632 __u64 curr, prev;
633 unsigned long curr_blkoff, prev_blkoff;
634 void *kaddr;
635 int ret;
636
637 if (cno == 0)
638 return -ENOENT; /* checkpoint number 0 is invalid */
639 down_write(&NILFS_MDT(cpfile)->mi_sem);
640
641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
642 if (ret < 0)
643 goto out_sem;
644 kaddr = kmap_atomic(cp_bh->b_page);
645 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
646 if (nilfs_checkpoint_invalid(cp)) {
647 ret = -ENOENT;
648 kunmap_atomic(kaddr);
649 goto out_cp;
650 }
651 if (nilfs_checkpoint_snapshot(cp)) {
652 ret = 0;
653 kunmap_atomic(kaddr);
654 goto out_cp;
655 }
656 kunmap_atomic(kaddr);
657
658 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
659 if (ret < 0)
660 goto out_cp;
661 kaddr = kmap_atomic(header_bh->b_page);
662 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
663 list = &header->ch_snapshot_list;
664 curr_bh = header_bh;
665 get_bh(curr_bh);
666 curr = 0;
667 curr_blkoff = 0;
668 prev = le64_to_cpu(list->ssl_prev);
669 while (prev > cno) {
670 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
671 curr = prev;
672 if (curr_blkoff != prev_blkoff) {
673 kunmap_atomic(kaddr);
674 brelse(curr_bh);
675 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
676 0, &curr_bh);
677 if (ret < 0)
678 goto out_header;
679 kaddr = kmap_atomic(curr_bh->b_page);
680 }
681 curr_blkoff = prev_blkoff;
682 cp = nilfs_cpfile_block_get_checkpoint(
683 cpfile, curr, curr_bh, kaddr);
684 list = &cp->cp_snapshot_list;
685 prev = le64_to_cpu(list->ssl_prev);
686 }
687 kunmap_atomic(kaddr);
688
689 if (prev != 0) {
690 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
691 &prev_bh);
692 if (ret < 0)
693 goto out_curr;
694 } else {
695 prev_bh = header_bh;
696 get_bh(prev_bh);
697 }
698
699 kaddr = kmap_atomic(curr_bh->b_page);
700 list = nilfs_cpfile_block_get_snapshot_list(
701 cpfile, curr, curr_bh, kaddr);
702 list->ssl_prev = cpu_to_le64(cno);
703 kunmap_atomic(kaddr);
704
705 kaddr = kmap_atomic(cp_bh->b_page);
706 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
707 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
708 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
709 nilfs_checkpoint_set_snapshot(cp);
710 kunmap_atomic(kaddr);
711
712 kaddr = kmap_atomic(prev_bh->b_page);
713 list = nilfs_cpfile_block_get_snapshot_list(
714 cpfile, prev, prev_bh, kaddr);
715 list->ssl_next = cpu_to_le64(cno);
716 kunmap_atomic(kaddr);
717
718 kaddr = kmap_atomic(header_bh->b_page);
719 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
720 le64_add_cpu(&header->ch_nsnapshots, 1);
721 kunmap_atomic(kaddr);
722
723 mark_buffer_dirty(prev_bh);
724 mark_buffer_dirty(curr_bh);
725 mark_buffer_dirty(cp_bh);
726 mark_buffer_dirty(header_bh);
727 nilfs_mdt_mark_dirty(cpfile);
728
729 brelse(prev_bh);
730
731 out_curr:
732 brelse(curr_bh);
733
734 out_header:
735 brelse(header_bh);
736
737 out_cp:
738 brelse(cp_bh);
739
740 out_sem:
741 up_write(&NILFS_MDT(cpfile)->mi_sem);
742 return ret;
743}
744
745static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
746{
747 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
748 struct nilfs_cpfile_header *header;
749 struct nilfs_checkpoint *cp;
750 struct nilfs_snapshot_list *list;
751 __u64 next, prev;
752 void *kaddr;
753 int ret;
754
755 if (cno == 0)
756 return -ENOENT; /* checkpoint number 0 is invalid */
757 down_write(&NILFS_MDT(cpfile)->mi_sem);
758
759 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
760 if (ret < 0)
761 goto out_sem;
762 kaddr = kmap_atomic(cp_bh->b_page);
763 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
764 if (nilfs_checkpoint_invalid(cp)) {
765 ret = -ENOENT;
766 kunmap_atomic(kaddr);
767 goto out_cp;
768 }
769 if (!nilfs_checkpoint_snapshot(cp)) {
770 ret = 0;
771 kunmap_atomic(kaddr);
772 goto out_cp;
773 }
774
775 list = &cp->cp_snapshot_list;
776 next = le64_to_cpu(list->ssl_next);
777 prev = le64_to_cpu(list->ssl_prev);
778 kunmap_atomic(kaddr);
779
780 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
781 if (ret < 0)
782 goto out_cp;
783 if (next != 0) {
784 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
785 &next_bh);
786 if (ret < 0)
787 goto out_header;
788 } else {
789 next_bh = header_bh;
790 get_bh(next_bh);
791 }
792 if (prev != 0) {
793 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
794 &prev_bh);
795 if (ret < 0)
796 goto out_next;
797 } else {
798 prev_bh = header_bh;
799 get_bh(prev_bh);
800 }
801
802 kaddr = kmap_atomic(next_bh->b_page);
803 list = nilfs_cpfile_block_get_snapshot_list(
804 cpfile, next, next_bh, kaddr);
805 list->ssl_prev = cpu_to_le64(prev);
806 kunmap_atomic(kaddr);
807
808 kaddr = kmap_atomic(prev_bh->b_page);
809 list = nilfs_cpfile_block_get_snapshot_list(
810 cpfile, prev, prev_bh, kaddr);
811 list->ssl_next = cpu_to_le64(next);
812 kunmap_atomic(kaddr);
813
814 kaddr = kmap_atomic(cp_bh->b_page);
815 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
816 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
817 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
818 nilfs_checkpoint_clear_snapshot(cp);
819 kunmap_atomic(kaddr);
820
821 kaddr = kmap_atomic(header_bh->b_page);
822 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
823 le64_add_cpu(&header->ch_nsnapshots, -1);
824 kunmap_atomic(kaddr);
825
826 mark_buffer_dirty(next_bh);
827 mark_buffer_dirty(prev_bh);
828 mark_buffer_dirty(cp_bh);
829 mark_buffer_dirty(header_bh);
830 nilfs_mdt_mark_dirty(cpfile);
831
832 brelse(prev_bh);
833
834 out_next:
835 brelse(next_bh);
836
837 out_header:
838 brelse(header_bh);
839
840 out_cp:
841 brelse(cp_bh);
842
843 out_sem:
844 up_write(&NILFS_MDT(cpfile)->mi_sem);
845 return ret;
846}
847
848/**
849 * nilfs_cpfile_is_snapshot -
850 * @cpfile: inode of checkpoint file
851 * @cno: checkpoint number
852 *
853 * Description:
854 *
855 * Return Value: On success, 1 is returned if the checkpoint specified by
856 * @cno is a snapshot, or 0 if not. On error, one of the following negative
857 * error codes is returned.
858 *
859 * %-EIO - I/O error.
860 *
861 * %-ENOMEM - Insufficient amount of memory available.
862 *
863 * %-ENOENT - No such checkpoint.
864 */
865int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
866{
867 struct buffer_head *bh;
868 struct nilfs_checkpoint *cp;
869 void *kaddr;
870 int ret;
871
872 /*
873 * CP number is invalid if it's zero or larger than the
874 * largest existing one.
875 */
876 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
877 return -ENOENT;
878 down_read(&NILFS_MDT(cpfile)->mi_sem);
879
880 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
881 if (ret < 0)
882 goto out;
883 kaddr = kmap_atomic(bh->b_page);
884 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
885 if (nilfs_checkpoint_invalid(cp))
886 ret = -ENOENT;
887 else
888 ret = nilfs_checkpoint_snapshot(cp);
889 kunmap_atomic(kaddr);
890 brelse(bh);
891
892 out:
893 up_read(&NILFS_MDT(cpfile)->mi_sem);
894 return ret;
895}
896
897/**
898 * nilfs_cpfile_change_cpmode - change checkpoint mode
899 * @cpfile: inode of checkpoint file
900 * @cno: checkpoint number
901 * @status: mode of checkpoint
902 *
903 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
904 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
905 *
906 * Return Value: On success, 0 is returned. On error, one of the following
907 * negative error codes is returned.
908 *
909 * %-EIO - I/O error.
910 *
911 * %-ENOMEM - Insufficient amount of memory available.
912 *
913 * %-ENOENT - No such checkpoint.
914 */
915int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
916{
917 int ret;
918
919 switch (mode) {
920 case NILFS_CHECKPOINT:
921 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
922 /*
923 * Current implementation does not have to protect
924 * plain read-only mounts since they are exclusive
925 * with a read/write mount and are protected from the
926 * cleaner.
927 */
928 ret = -EBUSY;
929 else
930 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
931 return ret;
932 case NILFS_SNAPSHOT:
933 return nilfs_cpfile_set_snapshot(cpfile, cno);
934 default:
935 return -EINVAL;
936 }
937}
938
939/**
940 * nilfs_cpfile_get_stat - get checkpoint statistics
941 * @cpfile: inode of checkpoint file
942 * @stat: pointer to a structure of checkpoint statistics
943 *
944 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
945 *
946 * Return Value: On success, 0 is returned, and checkpoints information is
947 * stored in the place pointed by @stat. On error, one of the following
948 * negative error codes is returned.
949 *
950 * %-EIO - I/O error.
951 *
952 * %-ENOMEM - Insufficient amount of memory available.
953 */
954int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
955{
956 struct buffer_head *bh;
957 struct nilfs_cpfile_header *header;
958 void *kaddr;
959 int ret;
960
961 down_read(&NILFS_MDT(cpfile)->mi_sem);
962
963 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
964 if (ret < 0)
965 goto out_sem;
966 kaddr = kmap_atomic(bh->b_page);
967 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
968 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
969 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
970 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
971 kunmap_atomic(kaddr);
972 brelse(bh);
973
974 out_sem:
975 up_read(&NILFS_MDT(cpfile)->mi_sem);
976 return ret;
977}
978
979/**
980 * nilfs_cpfile_read - read or get cpfile inode
981 * @sb: super block instance
982 * @cpsize: size of a checkpoint entry
983 * @raw_inode: on-disk cpfile inode
984 * @inodep: buffer to store the inode
985 */
986int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
987 struct nilfs_inode *raw_inode, struct inode **inodep)
988{
989 struct inode *cpfile;
990 int err;
991
992 if (cpsize > sb->s_blocksize) {
993 nilfs_msg(sb, KERN_ERR,
994 "too large checkpoint size: %zu bytes", cpsize);
995 return -EINVAL;
996 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
997 nilfs_msg(sb, KERN_ERR,
998 "too small checkpoint size: %zu bytes", cpsize);
999 return -EINVAL;
1000 }
1001
1002 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
1003 if (unlikely(!cpfile))
1004 return -ENOMEM;
1005 if (!(cpfile->i_state & I_NEW))
1006 goto out;
1007
1008 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
1009 if (err)
1010 goto failed;
1011
1012 nilfs_mdt_set_entry_size(cpfile, cpsize,
1013 sizeof(struct nilfs_cpfile_header));
1014
1015 err = nilfs_read_inode_common(cpfile, raw_inode);
1016 if (err)
1017 goto failed;
1018
1019 unlock_new_inode(cpfile);
1020 out:
1021 *inodep = cpfile;
1022 return 0;
1023 failed:
1024 iget_failed(cpfile);
1025 return err;
1026}
1/*
2 * cpfile.c - NILFS checkpoint file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/kernel.h>
24#include <linux/fs.h>
25#include <linux/string.h>
26#include <linux/buffer_head.h>
27#include <linux/errno.h>
28#include <linux/nilfs2_fs.h>
29#include "mdt.h"
30#include "cpfile.h"
31
32
33static inline unsigned long
34nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
35{
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
37}
38
39/* block number from the beginning of the file */
40static unsigned long
41nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
42{
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
46}
47
48/* offset in block */
49static unsigned long
50nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
51{
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
54}
55
56static unsigned long
57nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
58 __u64 curr,
59 __u64 max)
60{
61 return min_t(__u64,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
64 max - curr);
65}
66
67static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
68 __u64 cno)
69{
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
71}
72
73static unsigned int
74nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
76 void *kaddr,
77 unsigned int n)
78{
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
80 unsigned int count;
81
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
84 return count;
85}
86
87static unsigned int
88nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
90 void *kaddr,
91 unsigned int n)
92{
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
94 unsigned int count;
95
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
99 return count;
100}
101
102static inline struct nilfs_cpfile_header *
103nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
105 void *kaddr)
106{
107 return kaddr + bh_offset(bh);
108}
109
110static struct nilfs_checkpoint *
111nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
113 void *kaddr)
114{
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
117}
118
119static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
121 void *kaddr)
122{
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
126
127 while (n-- > 0) {
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
130 }
131}
132
133static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
135{
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
137}
138
139static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
140 __u64 cno,
141 int create,
142 struct buffer_head **bhp)
143{
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
147}
148
149static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
150 __u64 cno)
151{
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
154}
155
156/**
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
163 *
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
167 *
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
172 *
173 * %-EIO - I/O error.
174 *
175 * %-ENOMEM - Insufficient amount of memory available.
176 *
177 * %-ENOENT - No such checkpoint.
178 *
179 * %-EINVAL - invalid checkpoint.
180 */
181int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
182 __u64 cno,
183 int create,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
186{
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
190 void *kaddr;
191 int ret;
192
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL;
196
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
198
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
200 if (ret < 0)
201 goto out_sem;
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
203 if (ret < 0)
204 goto out_header;
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
208 if (!create) {
209 kunmap(cp_bh->b_page);
210 brelse(cp_bh);
211 ret = -ENOENT;
212 goto out_header;
213 }
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
218 kaddr, 1);
219 mark_buffer_dirty(cp_bh);
220
221 kaddr = kmap_atomic(header_bh->b_page);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr);
226 mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile);
228 }
229
230 if (cpp != NULL)
231 *cpp = cp;
232 *bhp = cp_bh;
233
234 out_header:
235 brelse(header_bh);
236
237 out_sem:
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
239 return ret;
240}
241
242/**
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
246 * @bh: buffer head
247 *
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
251 */
252void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
254{
255 kunmap(bh->b_page);
256 brelse(bh);
257}
258
259/**
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
264 *
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
268 *
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
271 *
272 * %-EIO - I/O error.
273 *
274 * %-ENOMEM - Insufficient amount of memory available.
275 *
276 * %-EINVAL - invalid checkpoints.
277 */
278int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
279 __u64 start,
280 __u64 end)
281{
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
286 __u64 cno;
287 void *kaddr;
288 unsigned long tnicps;
289 int ret, ncps, nicps, nss, count, i;
290
291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
295 return -EINVAL;
296 }
297
298 down_write(&NILFS_MDT(cpfile)->mi_sem);
299
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
301 if (ret < 0)
302 goto out_sem;
303 tnicps = 0;
304 nss = 0;
305
306 for (cno = start; cno < end; cno += ncps) {
307 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
308 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
309 if (ret < 0) {
310 if (ret != -ENOENT)
311 break;
312 /* skip hole */
313 ret = 0;
314 continue;
315 }
316
317 kaddr = kmap_atomic(cp_bh->b_page);
318 cp = nilfs_cpfile_block_get_checkpoint(
319 cpfile, cno, cp_bh, kaddr);
320 nicps = 0;
321 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
322 if (nilfs_checkpoint_snapshot(cp)) {
323 nss++;
324 } else if (!nilfs_checkpoint_invalid(cp)) {
325 nilfs_checkpoint_set_invalid(cp);
326 nicps++;
327 }
328 }
329 if (nicps > 0) {
330 tnicps += nicps;
331 mark_buffer_dirty(cp_bh);
332 nilfs_mdt_mark_dirty(cpfile);
333 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
334 count =
335 nilfs_cpfile_block_sub_valid_checkpoints(
336 cpfile, cp_bh, kaddr, nicps);
337 if (count == 0) {
338 /* make hole */
339 kunmap_atomic(kaddr);
340 brelse(cp_bh);
341 ret =
342 nilfs_cpfile_delete_checkpoint_block(
343 cpfile, cno);
344 if (ret == 0)
345 continue;
346 printk(KERN_ERR
347 "%s: cannot delete block\n",
348 __func__);
349 break;
350 }
351 }
352 }
353
354 kunmap_atomic(kaddr);
355 brelse(cp_bh);
356 }
357
358 if (tnicps > 0) {
359 kaddr = kmap_atomic(header_bh->b_page);
360 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
361 kaddr);
362 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
363 mark_buffer_dirty(header_bh);
364 nilfs_mdt_mark_dirty(cpfile);
365 kunmap_atomic(kaddr);
366 }
367
368 brelse(header_bh);
369 if (nss > 0)
370 ret = -EBUSY;
371
372 out_sem:
373 up_write(&NILFS_MDT(cpfile)->mi_sem);
374 return ret;
375}
376
377static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
378 struct nilfs_checkpoint *cp,
379 struct nilfs_cpinfo *ci)
380{
381 ci->ci_flags = le32_to_cpu(cp->cp_flags);
382 ci->ci_cno = le64_to_cpu(cp->cp_cno);
383 ci->ci_create = le64_to_cpu(cp->cp_create);
384 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
385 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
386 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
387 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
388}
389
390static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
391 void *buf, unsigned cisz, size_t nci)
392{
393 struct nilfs_checkpoint *cp;
394 struct nilfs_cpinfo *ci = buf;
395 struct buffer_head *bh;
396 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
397 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
398 void *kaddr;
399 int n, ret;
400 int ncps, i;
401
402 if (cno == 0)
403 return -ENOENT; /* checkpoint number 0 is invalid */
404 down_read(&NILFS_MDT(cpfile)->mi_sem);
405
406 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
407 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
408 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
409 if (ret < 0) {
410 if (ret != -ENOENT)
411 goto out;
412 continue; /* skip hole */
413 }
414
415 kaddr = kmap_atomic(bh->b_page);
416 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
417 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
418 if (!nilfs_checkpoint_invalid(cp)) {
419 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
420 ci);
421 ci = (void *)ci + cisz;
422 n++;
423 }
424 }
425 kunmap_atomic(kaddr);
426 brelse(bh);
427 }
428
429 ret = n;
430 if (n > 0) {
431 ci = (void *)ci - cisz;
432 *cnop = ci->ci_cno + 1;
433 }
434
435 out:
436 up_read(&NILFS_MDT(cpfile)->mi_sem);
437 return ret;
438}
439
440static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
441 void *buf, unsigned cisz, size_t nci)
442{
443 struct buffer_head *bh;
444 struct nilfs_cpfile_header *header;
445 struct nilfs_checkpoint *cp;
446 struct nilfs_cpinfo *ci = buf;
447 __u64 curr = *cnop, next;
448 unsigned long curr_blkoff, next_blkoff;
449 void *kaddr;
450 int n = 0, ret;
451
452 down_read(&NILFS_MDT(cpfile)->mi_sem);
453
454 if (curr == 0) {
455 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
456 if (ret < 0)
457 goto out;
458 kaddr = kmap_atomic(bh->b_page);
459 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
460 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
461 kunmap_atomic(kaddr);
462 brelse(bh);
463 if (curr == 0) {
464 ret = 0;
465 goto out;
466 }
467 } else if (unlikely(curr == ~(__u64)0)) {
468 ret = 0;
469 goto out;
470 }
471
472 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
473 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
474 if (unlikely(ret < 0)) {
475 if (ret == -ENOENT)
476 ret = 0; /* No snapshots (started from a hole block) */
477 goto out;
478 }
479 kaddr = kmap_atomic(bh->b_page);
480 while (n < nci) {
481 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
482 curr = ~(__u64)0; /* Terminator */
483 if (unlikely(nilfs_checkpoint_invalid(cp) ||
484 !nilfs_checkpoint_snapshot(cp)))
485 break;
486 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
487 ci = (void *)ci + cisz;
488 n++;
489 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
490 if (next == 0)
491 break; /* reach end of the snapshot list */
492
493 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
494 if (curr_blkoff != next_blkoff) {
495 kunmap_atomic(kaddr);
496 brelse(bh);
497 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
498 0, &bh);
499 if (unlikely(ret < 0)) {
500 WARN_ON(ret == -ENOENT);
501 goto out;
502 }
503 kaddr = kmap_atomic(bh->b_page);
504 }
505 curr = next;
506 curr_blkoff = next_blkoff;
507 }
508 kunmap_atomic(kaddr);
509 brelse(bh);
510 *cnop = curr;
511 ret = n;
512
513 out:
514 up_read(&NILFS_MDT(cpfile)->mi_sem);
515 return ret;
516}
517
518/**
519 * nilfs_cpfile_get_cpinfo -
520 * @cpfile:
521 * @cno:
522 * @ci:
523 * @nci:
524 */
525
526ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
527 void *buf, unsigned cisz, size_t nci)
528{
529 switch (mode) {
530 case NILFS_CHECKPOINT:
531 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
532 case NILFS_SNAPSHOT:
533 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
534 default:
535 return -EINVAL;
536 }
537}
538
539/**
540 * nilfs_cpfile_delete_checkpoint -
541 * @cpfile:
542 * @cno:
543 */
544int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
545{
546 struct nilfs_cpinfo ci;
547 __u64 tcno = cno;
548 ssize_t nci;
549
550 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
551 if (nci < 0)
552 return nci;
553 else if (nci == 0 || ci.ci_cno != cno)
554 return -ENOENT;
555 else if (nilfs_cpinfo_snapshot(&ci))
556 return -EBUSY;
557
558 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
559}
560
561static struct nilfs_snapshot_list *
562nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
563 __u64 cno,
564 struct buffer_head *bh,
565 void *kaddr)
566{
567 struct nilfs_cpfile_header *header;
568 struct nilfs_checkpoint *cp;
569 struct nilfs_snapshot_list *list;
570
571 if (cno != 0) {
572 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
573 list = &cp->cp_snapshot_list;
574 } else {
575 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
576 list = &header->ch_snapshot_list;
577 }
578 return list;
579}
580
581static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
582{
583 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
584 struct nilfs_cpfile_header *header;
585 struct nilfs_checkpoint *cp;
586 struct nilfs_snapshot_list *list;
587 __u64 curr, prev;
588 unsigned long curr_blkoff, prev_blkoff;
589 void *kaddr;
590 int ret;
591
592 if (cno == 0)
593 return -ENOENT; /* checkpoint number 0 is invalid */
594 down_write(&NILFS_MDT(cpfile)->mi_sem);
595
596 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
597 if (ret < 0)
598 goto out_sem;
599 kaddr = kmap_atomic(cp_bh->b_page);
600 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
601 if (nilfs_checkpoint_invalid(cp)) {
602 ret = -ENOENT;
603 kunmap_atomic(kaddr);
604 goto out_cp;
605 }
606 if (nilfs_checkpoint_snapshot(cp)) {
607 ret = 0;
608 kunmap_atomic(kaddr);
609 goto out_cp;
610 }
611 kunmap_atomic(kaddr);
612
613 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
614 if (ret < 0)
615 goto out_cp;
616 kaddr = kmap_atomic(header_bh->b_page);
617 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
618 list = &header->ch_snapshot_list;
619 curr_bh = header_bh;
620 get_bh(curr_bh);
621 curr = 0;
622 curr_blkoff = 0;
623 prev = le64_to_cpu(list->ssl_prev);
624 while (prev > cno) {
625 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
626 curr = prev;
627 if (curr_blkoff != prev_blkoff) {
628 kunmap_atomic(kaddr);
629 brelse(curr_bh);
630 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
631 0, &curr_bh);
632 if (ret < 0)
633 goto out_header;
634 kaddr = kmap_atomic(curr_bh->b_page);
635 }
636 curr_blkoff = prev_blkoff;
637 cp = nilfs_cpfile_block_get_checkpoint(
638 cpfile, curr, curr_bh, kaddr);
639 list = &cp->cp_snapshot_list;
640 prev = le64_to_cpu(list->ssl_prev);
641 }
642 kunmap_atomic(kaddr);
643
644 if (prev != 0) {
645 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
646 &prev_bh);
647 if (ret < 0)
648 goto out_curr;
649 } else {
650 prev_bh = header_bh;
651 get_bh(prev_bh);
652 }
653
654 kaddr = kmap_atomic(curr_bh->b_page);
655 list = nilfs_cpfile_block_get_snapshot_list(
656 cpfile, curr, curr_bh, kaddr);
657 list->ssl_prev = cpu_to_le64(cno);
658 kunmap_atomic(kaddr);
659
660 kaddr = kmap_atomic(cp_bh->b_page);
661 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
662 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
663 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
664 nilfs_checkpoint_set_snapshot(cp);
665 kunmap_atomic(kaddr);
666
667 kaddr = kmap_atomic(prev_bh->b_page);
668 list = nilfs_cpfile_block_get_snapshot_list(
669 cpfile, prev, prev_bh, kaddr);
670 list->ssl_next = cpu_to_le64(cno);
671 kunmap_atomic(kaddr);
672
673 kaddr = kmap_atomic(header_bh->b_page);
674 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
675 le64_add_cpu(&header->ch_nsnapshots, 1);
676 kunmap_atomic(kaddr);
677
678 mark_buffer_dirty(prev_bh);
679 mark_buffer_dirty(curr_bh);
680 mark_buffer_dirty(cp_bh);
681 mark_buffer_dirty(header_bh);
682 nilfs_mdt_mark_dirty(cpfile);
683
684 brelse(prev_bh);
685
686 out_curr:
687 brelse(curr_bh);
688
689 out_header:
690 brelse(header_bh);
691
692 out_cp:
693 brelse(cp_bh);
694
695 out_sem:
696 up_write(&NILFS_MDT(cpfile)->mi_sem);
697 return ret;
698}
699
700static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
701{
702 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
703 struct nilfs_cpfile_header *header;
704 struct nilfs_checkpoint *cp;
705 struct nilfs_snapshot_list *list;
706 __u64 next, prev;
707 void *kaddr;
708 int ret;
709
710 if (cno == 0)
711 return -ENOENT; /* checkpoint number 0 is invalid */
712 down_write(&NILFS_MDT(cpfile)->mi_sem);
713
714 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
715 if (ret < 0)
716 goto out_sem;
717 kaddr = kmap_atomic(cp_bh->b_page);
718 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
719 if (nilfs_checkpoint_invalid(cp)) {
720 ret = -ENOENT;
721 kunmap_atomic(kaddr);
722 goto out_cp;
723 }
724 if (!nilfs_checkpoint_snapshot(cp)) {
725 ret = 0;
726 kunmap_atomic(kaddr);
727 goto out_cp;
728 }
729
730 list = &cp->cp_snapshot_list;
731 next = le64_to_cpu(list->ssl_next);
732 prev = le64_to_cpu(list->ssl_prev);
733 kunmap_atomic(kaddr);
734
735 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
736 if (ret < 0)
737 goto out_cp;
738 if (next != 0) {
739 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
740 &next_bh);
741 if (ret < 0)
742 goto out_header;
743 } else {
744 next_bh = header_bh;
745 get_bh(next_bh);
746 }
747 if (prev != 0) {
748 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
749 &prev_bh);
750 if (ret < 0)
751 goto out_next;
752 } else {
753 prev_bh = header_bh;
754 get_bh(prev_bh);
755 }
756
757 kaddr = kmap_atomic(next_bh->b_page);
758 list = nilfs_cpfile_block_get_snapshot_list(
759 cpfile, next, next_bh, kaddr);
760 list->ssl_prev = cpu_to_le64(prev);
761 kunmap_atomic(kaddr);
762
763 kaddr = kmap_atomic(prev_bh->b_page);
764 list = nilfs_cpfile_block_get_snapshot_list(
765 cpfile, prev, prev_bh, kaddr);
766 list->ssl_next = cpu_to_le64(next);
767 kunmap_atomic(kaddr);
768
769 kaddr = kmap_atomic(cp_bh->b_page);
770 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
771 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
772 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
773 nilfs_checkpoint_clear_snapshot(cp);
774 kunmap_atomic(kaddr);
775
776 kaddr = kmap_atomic(header_bh->b_page);
777 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
778 le64_add_cpu(&header->ch_nsnapshots, -1);
779 kunmap_atomic(kaddr);
780
781 mark_buffer_dirty(next_bh);
782 mark_buffer_dirty(prev_bh);
783 mark_buffer_dirty(cp_bh);
784 mark_buffer_dirty(header_bh);
785 nilfs_mdt_mark_dirty(cpfile);
786
787 brelse(prev_bh);
788
789 out_next:
790 brelse(next_bh);
791
792 out_header:
793 brelse(header_bh);
794
795 out_cp:
796 brelse(cp_bh);
797
798 out_sem:
799 up_write(&NILFS_MDT(cpfile)->mi_sem);
800 return ret;
801}
802
803/**
804 * nilfs_cpfile_is_snapshot -
805 * @cpfile: inode of checkpoint file
806 * @cno: checkpoint number
807 *
808 * Description:
809 *
810 * Return Value: On success, 1 is returned if the checkpoint specified by
811 * @cno is a snapshot, or 0 if not. On error, one of the following negative
812 * error codes is returned.
813 *
814 * %-EIO - I/O error.
815 *
816 * %-ENOMEM - Insufficient amount of memory available.
817 *
818 * %-ENOENT - No such checkpoint.
819 */
820int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
821{
822 struct buffer_head *bh;
823 struct nilfs_checkpoint *cp;
824 void *kaddr;
825 int ret;
826
827 /* CP number is invalid if it's zero or larger than the
828 largest exist one.*/
829 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
830 return -ENOENT;
831 down_read(&NILFS_MDT(cpfile)->mi_sem);
832
833 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
834 if (ret < 0)
835 goto out;
836 kaddr = kmap_atomic(bh->b_page);
837 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
838 if (nilfs_checkpoint_invalid(cp))
839 ret = -ENOENT;
840 else
841 ret = nilfs_checkpoint_snapshot(cp);
842 kunmap_atomic(kaddr);
843 brelse(bh);
844
845 out:
846 up_read(&NILFS_MDT(cpfile)->mi_sem);
847 return ret;
848}
849
850/**
851 * nilfs_cpfile_change_cpmode - change checkpoint mode
852 * @cpfile: inode of checkpoint file
853 * @cno: checkpoint number
854 * @status: mode of checkpoint
855 *
856 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
857 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
858 *
859 * Return Value: On success, 0 is returned. On error, one of the following
860 * negative error codes is returned.
861 *
862 * %-EIO - I/O error.
863 *
864 * %-ENOMEM - Insufficient amount of memory available.
865 *
866 * %-ENOENT - No such checkpoint.
867 */
868int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
869{
870 int ret;
871
872 switch (mode) {
873 case NILFS_CHECKPOINT:
874 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
875 /*
876 * Current implementation does not have to protect
877 * plain read-only mounts since they are exclusive
878 * with a read/write mount and are protected from the
879 * cleaner.
880 */
881 ret = -EBUSY;
882 else
883 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
884 return ret;
885 case NILFS_SNAPSHOT:
886 return nilfs_cpfile_set_snapshot(cpfile, cno);
887 default:
888 return -EINVAL;
889 }
890}
891
892/**
893 * nilfs_cpfile_get_stat - get checkpoint statistics
894 * @cpfile: inode of checkpoint file
895 * @stat: pointer to a structure of checkpoint statistics
896 *
897 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
898 *
899 * Return Value: On success, 0 is returned, and checkpoints information is
900 * stored in the place pointed by @stat. On error, one of the following
901 * negative error codes is returned.
902 *
903 * %-EIO - I/O error.
904 *
905 * %-ENOMEM - Insufficient amount of memory available.
906 */
907int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
908{
909 struct buffer_head *bh;
910 struct nilfs_cpfile_header *header;
911 void *kaddr;
912 int ret;
913
914 down_read(&NILFS_MDT(cpfile)->mi_sem);
915
916 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
917 if (ret < 0)
918 goto out_sem;
919 kaddr = kmap_atomic(bh->b_page);
920 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
921 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
922 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
923 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
924 kunmap_atomic(kaddr);
925 brelse(bh);
926
927 out_sem:
928 up_read(&NILFS_MDT(cpfile)->mi_sem);
929 return ret;
930}
931
932/**
933 * nilfs_cpfile_read - read or get cpfile inode
934 * @sb: super block instance
935 * @cpsize: size of a checkpoint entry
936 * @raw_inode: on-disk cpfile inode
937 * @inodep: buffer to store the inode
938 */
939int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
940 struct nilfs_inode *raw_inode, struct inode **inodep)
941{
942 struct inode *cpfile;
943 int err;
944
945 if (cpsize > sb->s_blocksize) {
946 printk(KERN_ERR
947 "NILFS: too large checkpoint size: %zu bytes.\n",
948 cpsize);
949 return -EINVAL;
950 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
951 printk(KERN_ERR
952 "NILFS: too small checkpoint size: %zu bytes.\n",
953 cpsize);
954 return -EINVAL;
955 }
956
957 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
958 if (unlikely(!cpfile))
959 return -ENOMEM;
960 if (!(cpfile->i_state & I_NEW))
961 goto out;
962
963 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
964 if (err)
965 goto failed;
966
967 nilfs_mdt_set_entry_size(cpfile, cpsize,
968 sizeof(struct nilfs_cpfile_header));
969
970 err = nilfs_read_inode_common(cpfile, raw_inode);
971 if (err)
972 goto failed;
973
974 unlock_new_inode(cpfile);
975 out:
976 *inodep = cpfile;
977 return 0;
978 failed:
979 iget_failed(cpfile);
980 return err;
981}