Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS checkpoint file.
4 *
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Koji Sato.
8 */
9
10#include <linux/kernel.h>
11#include <linux/fs.h>
12#include <linux/string.h>
13#include <linux/buffer_head.h>
14#include <linux/errno.h>
15#include "mdt.h"
16#include "cpfile.h"
17
18
19static inline unsigned long
20nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
21{
22 return NILFS_MDT(cpfile)->mi_entries_per_block;
23}
24
25/* block number from the beginning of the file */
26static unsigned long
27nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
28{
29 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
30
31 tcno = div64_ul(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
32 return (unsigned long)tcno;
33}
34
35/* offset in block */
36static unsigned long
37nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
38{
39 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
40
41 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
42}
43
44static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile,
45 unsigned long blkoff)
46{
47 return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff
48 + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset;
49}
50
51static unsigned long
52nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
53 __u64 curr,
54 __u64 max)
55{
56 return min_t(__u64,
57 nilfs_cpfile_checkpoints_per_block(cpfile) -
58 nilfs_cpfile_get_offset(cpfile, curr),
59 max - curr);
60}
61
62static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
63 __u64 cno)
64{
65 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
66}
67
68static unsigned int
69nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
70 struct buffer_head *bh,
71 unsigned int n)
72{
73 struct nilfs_checkpoint *cp;
74 unsigned int count;
75
76 cp = kmap_local_folio(bh->b_folio,
77 offset_in_folio(bh->b_folio, bh->b_data));
78 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
79 cp->cp_checkpoints_count = cpu_to_le32(count);
80 kunmap_local(cp);
81 return count;
82}
83
84static unsigned int
85nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
86 struct buffer_head *bh,
87 unsigned int n)
88{
89 struct nilfs_checkpoint *cp;
90 unsigned int count;
91
92 cp = kmap_local_folio(bh->b_folio,
93 offset_in_folio(bh->b_folio, bh->b_data));
94 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
95 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
96 cp->cp_checkpoints_count = cpu_to_le32(count);
97 kunmap_local(cp);
98 return count;
99}
100
101static void nilfs_cpfile_block_init(struct inode *cpfile,
102 struct buffer_head *bh,
103 void *from)
104{
105 struct nilfs_checkpoint *cp = from;
106 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
107 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
108
109 while (n-- > 0) {
110 nilfs_checkpoint_set_invalid(cp);
111 cp = (void *)cp + cpsz;
112 }
113}
114
115/**
116 * nilfs_cpfile_checkpoint_offset - calculate the byte offset of a checkpoint
117 * entry in the folio containing it
118 * @cpfile: checkpoint file inode
119 * @cno: checkpoint number
120 * @bh: buffer head of block containing checkpoint indexed by @cno
121 *
122 * Return: Byte offset in the folio of the checkpoint specified by @cno.
123 */
124static size_t nilfs_cpfile_checkpoint_offset(const struct inode *cpfile,
125 __u64 cno,
126 struct buffer_head *bh)
127{
128 return offset_in_folio(bh->b_folio, bh->b_data) +
129 nilfs_cpfile_get_offset(cpfile, cno) *
130 NILFS_MDT(cpfile)->mi_entry_size;
131}
132
133/**
134 * nilfs_cpfile_cp_snapshot_list_offset - calculate the byte offset of a
135 * checkpoint snapshot list in the folio
136 * containing it
137 * @cpfile: checkpoint file inode
138 * @cno: checkpoint number
139 * @bh: buffer head of block containing checkpoint indexed by @cno
140 *
141 * Return: Byte offset in the folio of the checkpoint snapshot list specified
142 * by @cno.
143 */
144static size_t nilfs_cpfile_cp_snapshot_list_offset(const struct inode *cpfile,
145 __u64 cno,
146 struct buffer_head *bh)
147{
148 return nilfs_cpfile_checkpoint_offset(cpfile, cno, bh) +
149 offsetof(struct nilfs_checkpoint, cp_snapshot_list);
150}
151
152/**
153 * nilfs_cpfile_ch_snapshot_list_offset - calculate the byte offset of the
154 * snapshot list in the header
155 *
156 * Return: Byte offset in the folio of the checkpoint snapshot list
157 */
158static size_t nilfs_cpfile_ch_snapshot_list_offset(void)
159{
160 return offsetof(struct nilfs_cpfile_header, ch_snapshot_list);
161}
162
163static int nilfs_cpfile_get_header_block(struct inode *cpfile,
164 struct buffer_head **bhp)
165{
166 int err = nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
167
168 if (unlikely(err == -ENOENT)) {
169 nilfs_error(cpfile->i_sb,
170 "missing header block in checkpoint metadata");
171 err = -EIO;
172 }
173 return err;
174}
175
176static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
177 __u64 cno,
178 int create,
179 struct buffer_head **bhp)
180{
181 return nilfs_mdt_get_block(cpfile,
182 nilfs_cpfile_get_blkoff(cpfile, cno),
183 create, nilfs_cpfile_block_init, bhp);
184}
185
186/**
187 * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile
188 * @cpfile: inode of cpfile
189 * @start_cno: start checkpoint number (inclusive)
190 * @end_cno: end checkpoint number (inclusive)
191 * @cnop: place to store the next checkpoint number
192 * @bhp: place to store a pointer to buffer_head struct
193 *
194 * Return Value: On success, it returns 0. On error, the following negative
195 * error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
199 * %-EIO - I/O error
200 *
201 * %-ENOENT - no block exists in the range.
202 */
203static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
204 __u64 start_cno, __u64 end_cno,
205 __u64 *cnop,
206 struct buffer_head **bhp)
207{
208 unsigned long start, end, blkoff;
209 int ret;
210
211 if (unlikely(start_cno > end_cno))
212 return -ENOENT;
213
214 start = nilfs_cpfile_get_blkoff(cpfile, start_cno);
215 end = nilfs_cpfile_get_blkoff(cpfile, end_cno);
216
217 ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp);
218 if (!ret)
219 *cnop = (blkoff == start) ? start_cno :
220 nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff);
221 return ret;
222}
223
224static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
225 __u64 cno)
226{
227 return nilfs_mdt_delete_block(cpfile,
228 nilfs_cpfile_get_blkoff(cpfile, cno));
229}
230
231/**
232 * nilfs_cpfile_read_checkpoint - read a checkpoint entry in cpfile
233 * @cpfile: checkpoint file inode
234 * @cno: number of checkpoint entry to read
235 * @root: nilfs root object
236 * @ifile: ifile's inode to read and attach to @root
237 *
238 * This function imports checkpoint information from the checkpoint file and
239 * stores it to the inode file given by @ifile and the nilfs root object
240 * given by @root.
241 *
242 * Return: 0 on success, or the following negative error code on failure.
243 * * %-EINVAL - Invalid checkpoint.
244 * * %-ENOMEM - Insufficient memory available.
245 * * %-EIO - I/O error (including metadata corruption).
246 */
247int nilfs_cpfile_read_checkpoint(struct inode *cpfile, __u64 cno,
248 struct nilfs_root *root, struct inode *ifile)
249{
250 struct buffer_head *cp_bh;
251 struct nilfs_checkpoint *cp;
252 size_t offset;
253 int ret;
254
255 if (cno < 1 || cno > nilfs_mdt_cno(cpfile))
256 return -EINVAL;
257
258 down_read(&NILFS_MDT(cpfile)->mi_sem);
259 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
260 if (unlikely(ret < 0)) {
261 if (ret == -ENOENT)
262 ret = -EINVAL;
263 goto out_sem;
264 }
265
266 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
267 cp = kmap_local_folio(cp_bh->b_folio, offset);
268 if (nilfs_checkpoint_invalid(cp)) {
269 ret = -EINVAL;
270 goto put_cp;
271 }
272
273 ret = nilfs_read_inode_common(ifile, &cp->cp_ifile_inode);
274 if (unlikely(ret)) {
275 /*
276 * Since this inode is on a checkpoint entry, treat errors
277 * as metadata corruption.
278 */
279 nilfs_err(cpfile->i_sb,
280 "ifile inode (checkpoint number=%llu) corrupted",
281 (unsigned long long)cno);
282 ret = -EIO;
283 goto put_cp;
284 }
285
286 /* Configure the nilfs root object */
287 atomic64_set(&root->inodes_count, le64_to_cpu(cp->cp_inodes_count));
288 atomic64_set(&root->blocks_count, le64_to_cpu(cp->cp_blocks_count));
289 root->ifile = ifile;
290
291put_cp:
292 kunmap_local(cp);
293 brelse(cp_bh);
294out_sem:
295 up_read(&NILFS_MDT(cpfile)->mi_sem);
296 return ret;
297}
298
299/**
300 * nilfs_cpfile_create_checkpoint - create a checkpoint entry on cpfile
301 * @cpfile: checkpoint file inode
302 * @cno: number of checkpoint to set up
303 *
304 * This function creates a checkpoint with the number specified by @cno on
305 * cpfile. If the specified checkpoint entry already exists due to a past
306 * failure, it will be reused without returning an error.
307 * In either case, the buffer of the block containing the checkpoint entry
308 * and the cpfile inode are made dirty for inclusion in the write log.
309 *
310 * Return: 0 on success, or the following negative error code on failure.
311 * * %-ENOMEM - Insufficient memory available.
312 * * %-EIO - I/O error (including metadata corruption).
313 * * %-EROFS - Read only filesystem
314 */
315int nilfs_cpfile_create_checkpoint(struct inode *cpfile, __u64 cno)
316{
317 struct buffer_head *header_bh, *cp_bh;
318 struct nilfs_cpfile_header *header;
319 struct nilfs_checkpoint *cp;
320 size_t offset;
321 int ret;
322
323 if (WARN_ON_ONCE(cno < 1))
324 return -EIO;
325
326 down_write(&NILFS_MDT(cpfile)->mi_sem);
327 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
328 if (unlikely(ret < 0))
329 goto out_sem;
330
331 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 1, &cp_bh);
332 if (unlikely(ret < 0))
333 goto out_header;
334
335 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
336 cp = kmap_local_folio(cp_bh->b_folio, offset);
337 if (nilfs_checkpoint_invalid(cp)) {
338 /* a newly-created checkpoint */
339 nilfs_checkpoint_clear_invalid(cp);
340 kunmap_local(cp);
341 if (!nilfs_cpfile_is_in_first(cpfile, cno))
342 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
343 1);
344
345 header = kmap_local_folio(header_bh->b_folio, 0);
346 le64_add_cpu(&header->ch_ncheckpoints, 1);
347 kunmap_local(header);
348 mark_buffer_dirty(header_bh);
349 } else {
350 kunmap_local(cp);
351 }
352
353 /* Force the buffer and the inode to become dirty */
354 mark_buffer_dirty(cp_bh);
355 brelse(cp_bh);
356 nilfs_mdt_mark_dirty(cpfile);
357
358out_header:
359 brelse(header_bh);
360
361out_sem:
362 up_write(&NILFS_MDT(cpfile)->mi_sem);
363 return ret;
364}
365
366/**
367 * nilfs_cpfile_finalize_checkpoint - fill in a checkpoint entry in cpfile
368 * @cpfile: checkpoint file inode
369 * @cno: checkpoint number
370 * @root: nilfs root object
371 * @blkinc: number of blocks added by this checkpoint
372 * @ctime: checkpoint creation time
373 * @minor: minor checkpoint flag
374 *
375 * This function completes the checkpoint entry numbered by @cno in the
376 * cpfile with the data given by the arguments @root, @blkinc, @ctime, and
377 * @minor.
378 *
379 * Return: 0 on success, or the following negative error code on failure.
380 * * %-ENOMEM - Insufficient memory available.
381 * * %-EIO - I/O error (including metadata corruption).
382 */
383int nilfs_cpfile_finalize_checkpoint(struct inode *cpfile, __u64 cno,
384 struct nilfs_root *root, __u64 blkinc,
385 time64_t ctime, bool minor)
386{
387 struct buffer_head *cp_bh;
388 struct nilfs_checkpoint *cp;
389 size_t offset;
390 int ret;
391
392 if (WARN_ON_ONCE(cno < 1))
393 return -EIO;
394
395 down_write(&NILFS_MDT(cpfile)->mi_sem);
396 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
397 if (unlikely(ret < 0)) {
398 if (ret == -ENOENT)
399 goto error;
400 goto out_sem;
401 }
402
403 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
404 cp = kmap_local_folio(cp_bh->b_folio, offset);
405 if (unlikely(nilfs_checkpoint_invalid(cp))) {
406 kunmap_local(cp);
407 brelse(cp_bh);
408 goto error;
409 }
410
411 cp->cp_snapshot_list.ssl_next = 0;
412 cp->cp_snapshot_list.ssl_prev = 0;
413 cp->cp_inodes_count = cpu_to_le64(atomic64_read(&root->inodes_count));
414 cp->cp_blocks_count = cpu_to_le64(atomic64_read(&root->blocks_count));
415 cp->cp_nblk_inc = cpu_to_le64(blkinc);
416 cp->cp_create = cpu_to_le64(ctime);
417 cp->cp_cno = cpu_to_le64(cno);
418
419 if (minor)
420 nilfs_checkpoint_set_minor(cp);
421 else
422 nilfs_checkpoint_clear_minor(cp);
423
424 nilfs_write_inode_common(root->ifile, &cp->cp_ifile_inode);
425 nilfs_bmap_write(NILFS_I(root->ifile)->i_bmap, &cp->cp_ifile_inode);
426
427 kunmap_local(cp);
428 brelse(cp_bh);
429out_sem:
430 up_write(&NILFS_MDT(cpfile)->mi_sem);
431 return ret;
432
433error:
434 nilfs_error(cpfile->i_sb,
435 "checkpoint finalization failed due to metadata corruption.");
436 ret = -EIO;
437 goto out_sem;
438}
439
440/**
441 * nilfs_cpfile_delete_checkpoints - delete checkpoints
442 * @cpfile: inode of checkpoint file
443 * @start: start checkpoint number
444 * @end: end checkpoint number
445 *
446 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
447 * the period from @start to @end, excluding @end itself. The checkpoints
448 * which have been already deleted are ignored.
449 *
450 * Return Value: On success, 0 is returned. On error, one of the following
451 * negative error codes is returned.
452 *
453 * %-EIO - I/O error.
454 *
455 * %-ENOMEM - Insufficient amount of memory available.
456 *
457 * %-EINVAL - invalid checkpoints.
458 */
459int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
460 __u64 start,
461 __u64 end)
462{
463 struct buffer_head *header_bh, *cp_bh;
464 struct nilfs_cpfile_header *header;
465 struct nilfs_checkpoint *cp;
466 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
467 __u64 cno;
468 size_t offset;
469 void *kaddr;
470 unsigned long tnicps;
471 int ret, ncps, nicps, nss, count, i;
472
473 if (unlikely(start == 0 || start > end)) {
474 nilfs_err(cpfile->i_sb,
475 "cannot delete checkpoints: invalid range [%llu, %llu)",
476 (unsigned long long)start, (unsigned long long)end);
477 return -EINVAL;
478 }
479
480 down_write(&NILFS_MDT(cpfile)->mi_sem);
481
482 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
483 if (ret < 0)
484 goto out_sem;
485 tnicps = 0;
486 nss = 0;
487
488 for (cno = start; cno < end; cno += ncps) {
489 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
490 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
491 if (ret < 0) {
492 if (ret != -ENOENT)
493 break;
494 /* skip hole */
495 ret = 0;
496 continue;
497 }
498
499 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
500 cp = kaddr = kmap_local_folio(cp_bh->b_folio, offset);
501 nicps = 0;
502 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
503 if (nilfs_checkpoint_snapshot(cp)) {
504 nss++;
505 } else if (!nilfs_checkpoint_invalid(cp)) {
506 nilfs_checkpoint_set_invalid(cp);
507 nicps++;
508 }
509 }
510 kunmap_local(kaddr);
511
512 if (nicps <= 0) {
513 brelse(cp_bh);
514 continue;
515 }
516
517 tnicps += nicps;
518 mark_buffer_dirty(cp_bh);
519 nilfs_mdt_mark_dirty(cpfile);
520 if (nilfs_cpfile_is_in_first(cpfile, cno)) {
521 brelse(cp_bh);
522 continue;
523 }
524
525 count = nilfs_cpfile_block_sub_valid_checkpoints(cpfile, cp_bh,
526 nicps);
527 brelse(cp_bh);
528 if (count)
529 continue;
530
531 /* Delete the block if there are no more valid checkpoints */
532 ret = nilfs_cpfile_delete_checkpoint_block(cpfile, cno);
533 if (unlikely(ret)) {
534 nilfs_err(cpfile->i_sb,
535 "error %d deleting checkpoint block", ret);
536 break;
537 }
538 }
539
540 if (tnicps > 0) {
541 header = kmap_local_folio(header_bh->b_folio, 0);
542 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
543 mark_buffer_dirty(header_bh);
544 nilfs_mdt_mark_dirty(cpfile);
545 kunmap_local(header);
546 }
547
548 brelse(header_bh);
549 if (nss > 0)
550 ret = -EBUSY;
551
552 out_sem:
553 up_write(&NILFS_MDT(cpfile)->mi_sem);
554 return ret;
555}
556
557static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
558 struct nilfs_checkpoint *cp,
559 struct nilfs_cpinfo *ci)
560{
561 ci->ci_flags = le32_to_cpu(cp->cp_flags);
562 ci->ci_cno = le64_to_cpu(cp->cp_cno);
563 ci->ci_create = le64_to_cpu(cp->cp_create);
564 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
565 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
566 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
567 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
568}
569
570static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
571 void *buf, unsigned int cisz,
572 size_t nci)
573{
574 struct nilfs_checkpoint *cp;
575 struct nilfs_cpinfo *ci = buf;
576 struct buffer_head *bh;
577 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
578 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
579 size_t offset;
580 void *kaddr;
581 int n, ret;
582 int ncps, i;
583
584 if (cno == 0)
585 return -ENOENT; /* checkpoint number 0 is invalid */
586 down_read(&NILFS_MDT(cpfile)->mi_sem);
587
588 for (n = 0; n < nci; cno += ncps) {
589 ret = nilfs_cpfile_find_checkpoint_block(
590 cpfile, cno, cur_cno - 1, &cno, &bh);
591 if (ret < 0) {
592 if (likely(ret == -ENOENT))
593 break;
594 goto out;
595 }
596 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
597
598 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, bh);
599 cp = kaddr = kmap_local_folio(bh->b_folio, offset);
600 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
601 if (!nilfs_checkpoint_invalid(cp)) {
602 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
603 ci);
604 ci = (void *)ci + cisz;
605 n++;
606 }
607 }
608 kunmap_local(kaddr);
609 brelse(bh);
610 }
611
612 ret = n;
613 if (n > 0) {
614 ci = (void *)ci - cisz;
615 *cnop = ci->ci_cno + 1;
616 }
617
618 out:
619 up_read(&NILFS_MDT(cpfile)->mi_sem);
620 return ret;
621}
622
623static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
624 void *buf, unsigned int cisz,
625 size_t nci)
626{
627 struct buffer_head *bh;
628 struct nilfs_cpfile_header *header;
629 struct nilfs_checkpoint *cp;
630 struct nilfs_cpinfo *ci = buf;
631 __u64 curr = *cnop, next;
632 unsigned long curr_blkoff, next_blkoff;
633 size_t offset;
634 int n = 0, ret;
635
636 down_read(&NILFS_MDT(cpfile)->mi_sem);
637
638 if (curr == 0) {
639 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
640 if (ret < 0)
641 goto out;
642 header = kmap_local_folio(bh->b_folio, 0);
643 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
644 kunmap_local(header);
645 brelse(bh);
646 if (curr == 0) {
647 ret = 0;
648 goto out;
649 }
650 } else if (unlikely(curr == ~(__u64)0)) {
651 ret = 0;
652 goto out;
653 }
654
655 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
656 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
657 if (unlikely(ret < 0)) {
658 if (ret == -ENOENT)
659 ret = 0; /* No snapshots (started from a hole block) */
660 goto out;
661 }
662 offset = nilfs_cpfile_checkpoint_offset(cpfile, curr, bh);
663 cp = kmap_local_folio(bh->b_folio, offset);
664 while (n < nci) {
665 curr = ~(__u64)0; /* Terminator */
666 if (unlikely(nilfs_checkpoint_invalid(cp) ||
667 !nilfs_checkpoint_snapshot(cp)))
668 break;
669 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
670 ci = (void *)ci + cisz;
671 n++;
672 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
673 if (next == 0)
674 break; /* reach end of the snapshot list */
675
676 kunmap_local(cp);
677 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
678 if (curr_blkoff != next_blkoff) {
679 brelse(bh);
680 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
681 0, &bh);
682 if (unlikely(ret < 0)) {
683 WARN_ON(ret == -ENOENT);
684 goto out;
685 }
686 }
687 offset = nilfs_cpfile_checkpoint_offset(cpfile, next, bh);
688 cp = kmap_local_folio(bh->b_folio, offset);
689 curr = next;
690 curr_blkoff = next_blkoff;
691 }
692 kunmap_local(cp);
693 brelse(bh);
694 *cnop = curr;
695 ret = n;
696
697 out:
698 up_read(&NILFS_MDT(cpfile)->mi_sem);
699 return ret;
700}
701
702/**
703 * nilfs_cpfile_get_cpinfo - get information on checkpoints
704 * @cpfile: checkpoint file inode
705 * @cnop: place to pass a starting checkpoint number and receive a
706 * checkpoint number to continue the search
707 * @mode: mode of checkpoints that the caller wants to retrieve
708 * @buf: buffer for storing checkpoints' information
709 * @cisz: byte size of one checkpoint info item in array
710 * @nci: number of checkpoint info items to retrieve
711 *
712 * nilfs_cpfile_get_cpinfo() searches for checkpoints in @mode state
713 * starting from the checkpoint number stored in @cnop, and stores
714 * information about found checkpoints in @buf.
715 * The buffer pointed to by @buf must be large enough to store information
716 * for @nci checkpoints. If at least one checkpoint information is
717 * successfully retrieved, @cnop is updated to point to the checkpoint
718 * number to continue searching.
719 *
720 * Return: Count of checkpoint info items stored in the output buffer on
721 * success, or the following negative error code on failure.
722 * * %-EINVAL - Invalid checkpoint mode.
723 * * %-ENOMEM - Insufficient memory available.
724 * * %-EIO - I/O error (including metadata corruption).
725 * * %-ENOENT - Invalid checkpoint number specified.
726 */
727
728ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
729 void *buf, unsigned int cisz, size_t nci)
730{
731 switch (mode) {
732 case NILFS_CHECKPOINT:
733 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
734 case NILFS_SNAPSHOT:
735 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
736 default:
737 return -EINVAL;
738 }
739}
740
741/**
742 * nilfs_cpfile_delete_checkpoint - delete a checkpoint
743 * @cpfile: checkpoint file inode
744 * @cno: checkpoint number to delete
745 *
746 * Return: 0 on success, or the following negative error code on failure.
747 * * %-EBUSY - Checkpoint in use (snapshot specified).
748 * * %-EIO - I/O error (including metadata corruption).
749 * * %-ENOENT - No valid checkpoint found.
750 * * %-ENOMEM - Insufficient memory available.
751 */
752int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
753{
754 struct nilfs_cpinfo ci;
755 __u64 tcno = cno;
756 ssize_t nci;
757
758 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
759 if (nci < 0)
760 return nci;
761 else if (nci == 0 || ci.ci_cno != cno)
762 return -ENOENT;
763 else if (nilfs_cpinfo_snapshot(&ci))
764 return -EBUSY;
765
766 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
767}
768
769static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
770{
771 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
772 struct nilfs_cpfile_header *header;
773 struct nilfs_checkpoint *cp;
774 struct nilfs_snapshot_list *list;
775 __u64 curr, prev;
776 unsigned long curr_blkoff, prev_blkoff;
777 size_t offset, curr_list_offset, prev_list_offset;
778 int ret;
779
780 if (cno == 0)
781 return -ENOENT; /* checkpoint number 0 is invalid */
782 down_write(&NILFS_MDT(cpfile)->mi_sem);
783
784 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
785 if (unlikely(ret < 0))
786 goto out_sem;
787
788 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
789 if (ret < 0)
790 goto out_header;
791
792 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
793 cp = kmap_local_folio(cp_bh->b_folio, offset);
794 if (nilfs_checkpoint_invalid(cp)) {
795 ret = -ENOENT;
796 kunmap_local(cp);
797 goto out_cp;
798 }
799 if (nilfs_checkpoint_snapshot(cp)) {
800 ret = 0;
801 kunmap_local(cp);
802 goto out_cp;
803 }
804 kunmap_local(cp);
805
806 /*
807 * Find the last snapshot before the checkpoint being changed to
808 * snapshot mode by going backwards through the snapshot list.
809 * Set "prev" to its checkpoint number, or 0 if not found.
810 */
811 header = kmap_local_folio(header_bh->b_folio, 0);
812 list = &header->ch_snapshot_list;
813 curr_bh = header_bh;
814 get_bh(curr_bh);
815 curr = 0;
816 curr_blkoff = 0;
817 curr_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
818 prev = le64_to_cpu(list->ssl_prev);
819 while (prev > cno) {
820 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
821 curr = prev;
822 kunmap_local(list);
823 if (curr_blkoff != prev_blkoff) {
824 brelse(curr_bh);
825 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
826 0, &curr_bh);
827 if (unlikely(ret < 0))
828 goto out_cp;
829 }
830 curr_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
831 cpfile, curr, curr_bh);
832 list = kmap_local_folio(curr_bh->b_folio, curr_list_offset);
833 curr_blkoff = prev_blkoff;
834 prev = le64_to_cpu(list->ssl_prev);
835 }
836 kunmap_local(list);
837
838 if (prev != 0) {
839 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
840 &prev_bh);
841 if (ret < 0)
842 goto out_curr;
843
844 prev_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
845 cpfile, prev, prev_bh);
846 } else {
847 prev_bh = header_bh;
848 get_bh(prev_bh);
849 prev_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
850 }
851
852 /* Update the list entry for the next snapshot */
853 list = kmap_local_folio(curr_bh->b_folio, curr_list_offset);
854 list->ssl_prev = cpu_to_le64(cno);
855 kunmap_local(list);
856
857 /* Update the checkpoint being changed to a snapshot */
858 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
859 cp = kmap_local_folio(cp_bh->b_folio, offset);
860 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
861 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
862 nilfs_checkpoint_set_snapshot(cp);
863 kunmap_local(cp);
864
865 /* Update the list entry for the previous snapshot */
866 list = kmap_local_folio(prev_bh->b_folio, prev_list_offset);
867 list->ssl_next = cpu_to_le64(cno);
868 kunmap_local(list);
869
870 /* Update the statistics in the header */
871 header = kmap_local_folio(header_bh->b_folio, 0);
872 le64_add_cpu(&header->ch_nsnapshots, 1);
873 kunmap_local(header);
874
875 mark_buffer_dirty(prev_bh);
876 mark_buffer_dirty(curr_bh);
877 mark_buffer_dirty(cp_bh);
878 mark_buffer_dirty(header_bh);
879 nilfs_mdt_mark_dirty(cpfile);
880
881 brelse(prev_bh);
882
883 out_curr:
884 brelse(curr_bh);
885
886 out_cp:
887 brelse(cp_bh);
888
889 out_header:
890 brelse(header_bh);
891
892 out_sem:
893 up_write(&NILFS_MDT(cpfile)->mi_sem);
894 return ret;
895}
896
897static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
898{
899 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
900 struct nilfs_cpfile_header *header;
901 struct nilfs_checkpoint *cp;
902 struct nilfs_snapshot_list *list;
903 __u64 next, prev;
904 size_t offset, next_list_offset, prev_list_offset;
905 int ret;
906
907 if (cno == 0)
908 return -ENOENT; /* checkpoint number 0 is invalid */
909 down_write(&NILFS_MDT(cpfile)->mi_sem);
910
911 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
912 if (unlikely(ret < 0))
913 goto out_sem;
914
915 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
916 if (ret < 0)
917 goto out_header;
918
919 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, cp_bh);
920 cp = kmap_local_folio(cp_bh->b_folio, offset);
921 if (nilfs_checkpoint_invalid(cp)) {
922 ret = -ENOENT;
923 kunmap_local(cp);
924 goto out_cp;
925 }
926 if (!nilfs_checkpoint_snapshot(cp)) {
927 ret = 0;
928 kunmap_local(cp);
929 goto out_cp;
930 }
931
932 list = &cp->cp_snapshot_list;
933 next = le64_to_cpu(list->ssl_next);
934 prev = le64_to_cpu(list->ssl_prev);
935 kunmap_local(cp);
936
937 if (next != 0) {
938 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
939 &next_bh);
940 if (ret < 0)
941 goto out_cp;
942
943 next_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
944 cpfile, next, next_bh);
945 } else {
946 next_bh = header_bh;
947 get_bh(next_bh);
948 next_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
949 }
950 if (prev != 0) {
951 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
952 &prev_bh);
953 if (ret < 0)
954 goto out_next;
955
956 prev_list_offset = nilfs_cpfile_cp_snapshot_list_offset(
957 cpfile, prev, prev_bh);
958 } else {
959 prev_bh = header_bh;
960 get_bh(prev_bh);
961 prev_list_offset = nilfs_cpfile_ch_snapshot_list_offset();
962 }
963
964 /* Update the list entry for the next snapshot */
965 list = kmap_local_folio(next_bh->b_folio, next_list_offset);
966 list->ssl_prev = cpu_to_le64(prev);
967 kunmap_local(list);
968
969 /* Update the list entry for the previous snapshot */
970 list = kmap_local_folio(prev_bh->b_folio, prev_list_offset);
971 list->ssl_next = cpu_to_le64(next);
972 kunmap_local(list);
973
974 /* Update the snapshot being changed back to a plain checkpoint */
975 cp = kmap_local_folio(cp_bh->b_folio, offset);
976 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
977 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
978 nilfs_checkpoint_clear_snapshot(cp);
979 kunmap_local(cp);
980
981 /* Update the statistics in the header */
982 header = kmap_local_folio(header_bh->b_folio, 0);
983 le64_add_cpu(&header->ch_nsnapshots, -1);
984 kunmap_local(header);
985
986 mark_buffer_dirty(next_bh);
987 mark_buffer_dirty(prev_bh);
988 mark_buffer_dirty(cp_bh);
989 mark_buffer_dirty(header_bh);
990 nilfs_mdt_mark_dirty(cpfile);
991
992 brelse(prev_bh);
993
994 out_next:
995 brelse(next_bh);
996
997 out_cp:
998 brelse(cp_bh);
999
1000 out_header:
1001 brelse(header_bh);
1002
1003 out_sem:
1004 up_write(&NILFS_MDT(cpfile)->mi_sem);
1005 return ret;
1006}
1007
1008/**
1009 * nilfs_cpfile_is_snapshot - determine if checkpoint is a snapshot
1010 * @cpfile: inode of checkpoint file
1011 * @cno: checkpoint number
1012 *
1013 * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
1014 * the following negative error code on failure.
1015 * * %-EIO - I/O error (including metadata corruption).
1016 * * %-ENOENT - No such checkpoint.
1017 * * %-ENOMEM - Insufficient memory available.
1018 */
1019int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
1020{
1021 struct buffer_head *bh;
1022 struct nilfs_checkpoint *cp;
1023 size_t offset;
1024 int ret;
1025
1026 /*
1027 * CP number is invalid if it's zero or larger than the
1028 * largest existing one.
1029 */
1030 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
1031 return -ENOENT;
1032 down_read(&NILFS_MDT(cpfile)->mi_sem);
1033
1034 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
1035 if (ret < 0)
1036 goto out;
1037
1038 offset = nilfs_cpfile_checkpoint_offset(cpfile, cno, bh);
1039 cp = kmap_local_folio(bh->b_folio, offset);
1040 if (nilfs_checkpoint_invalid(cp))
1041 ret = -ENOENT;
1042 else
1043 ret = nilfs_checkpoint_snapshot(cp);
1044 kunmap_local(cp);
1045 brelse(bh);
1046
1047 out:
1048 up_read(&NILFS_MDT(cpfile)->mi_sem);
1049 return ret;
1050}
1051
1052/**
1053 * nilfs_cpfile_change_cpmode - change checkpoint mode
1054 * @cpfile: inode of checkpoint file
1055 * @cno: checkpoint number
1056 * @mode: mode of checkpoint
1057 *
1058 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
1059 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
1060 *
1061 * Return Value: On success, 0 is returned. On error, one of the following
1062 * negative error codes is returned.
1063 *
1064 * %-EIO - I/O error.
1065 *
1066 * %-ENOMEM - Insufficient amount of memory available.
1067 *
1068 * %-ENOENT - No such checkpoint.
1069 */
1070int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
1071{
1072 int ret;
1073
1074 switch (mode) {
1075 case NILFS_CHECKPOINT:
1076 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
1077 /*
1078 * Current implementation does not have to protect
1079 * plain read-only mounts since they are exclusive
1080 * with a read/write mount and are protected from the
1081 * cleaner.
1082 */
1083 ret = -EBUSY;
1084 else
1085 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
1086 return ret;
1087 case NILFS_SNAPSHOT:
1088 return nilfs_cpfile_set_snapshot(cpfile, cno);
1089 default:
1090 return -EINVAL;
1091 }
1092}
1093
1094/**
1095 * nilfs_cpfile_get_stat - get checkpoint statistics
1096 * @cpfile: inode of checkpoint file
1097 * @cpstat: pointer to a structure of checkpoint statistics
1098 *
1099 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
1100 *
1101 * Return Value: On success, 0 is returned, and checkpoints information is
1102 * stored in the place pointed by @cpstat. On error, one of the following
1103 * negative error codes is returned.
1104 *
1105 * %-EIO - I/O error.
1106 *
1107 * %-ENOMEM - Insufficient amount of memory available.
1108 */
1109int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
1110{
1111 struct buffer_head *bh;
1112 struct nilfs_cpfile_header *header;
1113 int ret;
1114
1115 down_read(&NILFS_MDT(cpfile)->mi_sem);
1116
1117 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
1118 if (ret < 0)
1119 goto out_sem;
1120 header = kmap_local_folio(bh->b_folio, 0);
1121 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
1122 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
1123 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
1124 kunmap_local(header);
1125 brelse(bh);
1126
1127 out_sem:
1128 up_read(&NILFS_MDT(cpfile)->mi_sem);
1129 return ret;
1130}
1131
1132/**
1133 * nilfs_cpfile_read - read or get cpfile inode
1134 * @sb: super block instance
1135 * @cpsize: size of a checkpoint entry
1136 * @raw_inode: on-disk cpfile inode
1137 * @inodep: buffer to store the inode
1138 */
1139int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
1140 struct nilfs_inode *raw_inode, struct inode **inodep)
1141{
1142 struct inode *cpfile;
1143 int err;
1144
1145 if (cpsize > sb->s_blocksize) {
1146 nilfs_err(sb, "too large checkpoint size: %zu bytes", cpsize);
1147 return -EINVAL;
1148 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
1149 nilfs_err(sb, "too small checkpoint size: %zu bytes", cpsize);
1150 return -EINVAL;
1151 }
1152
1153 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
1154 if (unlikely(!cpfile))
1155 return -ENOMEM;
1156 if (!(cpfile->i_state & I_NEW))
1157 goto out;
1158
1159 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
1160 if (err)
1161 goto failed;
1162
1163 nilfs_mdt_set_entry_size(cpfile, cpsize,
1164 sizeof(struct nilfs_cpfile_header));
1165
1166 err = nilfs_read_inode_common(cpfile, raw_inode);
1167 if (err)
1168 goto failed;
1169
1170 unlock_new_inode(cpfile);
1171 out:
1172 *inodep = cpfile;
1173 return 0;
1174 failed:
1175 iget_failed(cpfile);
1176 return err;
1177}
1/*
2 * cpfile.c - NILFS checkpoint file.
3 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>.
21 */
22
23#include <linux/kernel.h>
24#include <linux/fs.h>
25#include <linux/string.h>
26#include <linux/buffer_head.h>
27#include <linux/errno.h>
28#include <linux/nilfs2_fs.h>
29#include "mdt.h"
30#include "cpfile.h"
31
32
33static inline unsigned long
34nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
35{
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
37}
38
39/* block number from the beginning of the file */
40static unsigned long
41nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
42{
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
46}
47
48/* offset in block */
49static unsigned long
50nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
51{
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
54}
55
56static unsigned long
57nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
58 __u64 curr,
59 __u64 max)
60{
61 return min_t(__u64,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
64 max - curr);
65}
66
67static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
68 __u64 cno)
69{
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
71}
72
73static unsigned int
74nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
76 void *kaddr,
77 unsigned int n)
78{
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
80 unsigned int count;
81
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
84 return count;
85}
86
87static unsigned int
88nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
90 void *kaddr,
91 unsigned int n)
92{
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
94 unsigned int count;
95
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
99 return count;
100}
101
102static inline struct nilfs_cpfile_header *
103nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
105 void *kaddr)
106{
107 return kaddr + bh_offset(bh);
108}
109
110static struct nilfs_checkpoint *
111nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
113 void *kaddr)
114{
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
117}
118
119static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
121 void *kaddr)
122{
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
126
127 while (n-- > 0) {
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
130 }
131}
132
133static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
135{
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
137}
138
139static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
140 __u64 cno,
141 int create,
142 struct buffer_head **bhp)
143{
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
147}
148
149static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
150 __u64 cno)
151{
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
154}
155
156/**
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
163 *
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
167 *
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
172 *
173 * %-EIO - I/O error.
174 *
175 * %-ENOMEM - Insufficient amount of memory available.
176 *
177 * %-ENOENT - No such checkpoint.
178 *
179 * %-EINVAL - invalid checkpoint.
180 */
181int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
182 __u64 cno,
183 int create,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
186{
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
190 void *kaddr;
191 int ret;
192
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL;
196
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
198
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
200 if (ret < 0)
201 goto out_sem;
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
203 if (ret < 0)
204 goto out_header;
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
208 if (!create) {
209 kunmap(cp_bh->b_page);
210 brelse(cp_bh);
211 ret = -ENOENT;
212 goto out_header;
213 }
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
218 kaddr, 1);
219 mark_buffer_dirty(cp_bh);
220
221 kaddr = kmap_atomic(header_bh->b_page);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr);
226 mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile);
228 }
229
230 if (cpp != NULL)
231 *cpp = cp;
232 *bhp = cp_bh;
233
234 out_header:
235 brelse(header_bh);
236
237 out_sem:
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
239 return ret;
240}
241
242/**
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
246 * @bh: buffer head
247 *
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
251 */
252void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
254{
255 kunmap(bh->b_page);
256 brelse(bh);
257}
258
259/**
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
264 *
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
268 *
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
271 *
272 * %-EIO - I/O error.
273 *
274 * %-ENOMEM - Insufficient amount of memory available.
275 *
276 * %-EINVAL - invalid checkpoints.
277 */
278int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
279 __u64 start,
280 __u64 end)
281{
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
286 __u64 cno;
287 void *kaddr;
288 unsigned long tnicps;
289 int ret, ncps, nicps, nss, count, i;
290
291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
295 return -EINVAL;
296 }
297
298 down_write(&NILFS_MDT(cpfile)->mi_sem);
299
300 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
301 if (ret < 0)
302 goto out_sem;
303 tnicps = 0;
304 nss = 0;
305
306 for (cno = start; cno < end; cno += ncps) {
307 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
308 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
309 if (ret < 0) {
310 if (ret != -ENOENT)
311 break;
312 /* skip hole */
313 ret = 0;
314 continue;
315 }
316
317 kaddr = kmap_atomic(cp_bh->b_page);
318 cp = nilfs_cpfile_block_get_checkpoint(
319 cpfile, cno, cp_bh, kaddr);
320 nicps = 0;
321 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
322 if (nilfs_checkpoint_snapshot(cp)) {
323 nss++;
324 } else if (!nilfs_checkpoint_invalid(cp)) {
325 nilfs_checkpoint_set_invalid(cp);
326 nicps++;
327 }
328 }
329 if (nicps > 0) {
330 tnicps += nicps;
331 mark_buffer_dirty(cp_bh);
332 nilfs_mdt_mark_dirty(cpfile);
333 if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
334 count =
335 nilfs_cpfile_block_sub_valid_checkpoints(
336 cpfile, cp_bh, kaddr, nicps);
337 if (count == 0) {
338 /* make hole */
339 kunmap_atomic(kaddr);
340 brelse(cp_bh);
341 ret =
342 nilfs_cpfile_delete_checkpoint_block(
343 cpfile, cno);
344 if (ret == 0)
345 continue;
346 printk(KERN_ERR
347 "%s: cannot delete block\n",
348 __func__);
349 break;
350 }
351 }
352 }
353
354 kunmap_atomic(kaddr);
355 brelse(cp_bh);
356 }
357
358 if (tnicps > 0) {
359 kaddr = kmap_atomic(header_bh->b_page);
360 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
361 kaddr);
362 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
363 mark_buffer_dirty(header_bh);
364 nilfs_mdt_mark_dirty(cpfile);
365 kunmap_atomic(kaddr);
366 }
367
368 brelse(header_bh);
369 if (nss > 0)
370 ret = -EBUSY;
371
372 out_sem:
373 up_write(&NILFS_MDT(cpfile)->mi_sem);
374 return ret;
375}
376
377static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
378 struct nilfs_checkpoint *cp,
379 struct nilfs_cpinfo *ci)
380{
381 ci->ci_flags = le32_to_cpu(cp->cp_flags);
382 ci->ci_cno = le64_to_cpu(cp->cp_cno);
383 ci->ci_create = le64_to_cpu(cp->cp_create);
384 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
385 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
386 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
387 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
388}
389
390static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
391 void *buf, unsigned cisz, size_t nci)
392{
393 struct nilfs_checkpoint *cp;
394 struct nilfs_cpinfo *ci = buf;
395 struct buffer_head *bh;
396 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
397 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
398 void *kaddr;
399 int n, ret;
400 int ncps, i;
401
402 if (cno == 0)
403 return -ENOENT; /* checkpoint number 0 is invalid */
404 down_read(&NILFS_MDT(cpfile)->mi_sem);
405
406 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
407 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
408 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
409 if (ret < 0) {
410 if (ret != -ENOENT)
411 goto out;
412 continue; /* skip hole */
413 }
414
415 kaddr = kmap_atomic(bh->b_page);
416 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
417 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
418 if (!nilfs_checkpoint_invalid(cp)) {
419 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
420 ci);
421 ci = (void *)ci + cisz;
422 n++;
423 }
424 }
425 kunmap_atomic(kaddr);
426 brelse(bh);
427 }
428
429 ret = n;
430 if (n > 0) {
431 ci = (void *)ci - cisz;
432 *cnop = ci->ci_cno + 1;
433 }
434
435 out:
436 up_read(&NILFS_MDT(cpfile)->mi_sem);
437 return ret;
438}
439
440static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
441 void *buf, unsigned cisz, size_t nci)
442{
443 struct buffer_head *bh;
444 struct nilfs_cpfile_header *header;
445 struct nilfs_checkpoint *cp;
446 struct nilfs_cpinfo *ci = buf;
447 __u64 curr = *cnop, next;
448 unsigned long curr_blkoff, next_blkoff;
449 void *kaddr;
450 int n = 0, ret;
451
452 down_read(&NILFS_MDT(cpfile)->mi_sem);
453
454 if (curr == 0) {
455 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
456 if (ret < 0)
457 goto out;
458 kaddr = kmap_atomic(bh->b_page);
459 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
460 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
461 kunmap_atomic(kaddr);
462 brelse(bh);
463 if (curr == 0) {
464 ret = 0;
465 goto out;
466 }
467 } else if (unlikely(curr == ~(__u64)0)) {
468 ret = 0;
469 goto out;
470 }
471
472 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
473 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
474 if (unlikely(ret < 0)) {
475 if (ret == -ENOENT)
476 ret = 0; /* No snapshots (started from a hole block) */
477 goto out;
478 }
479 kaddr = kmap_atomic(bh->b_page);
480 while (n < nci) {
481 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
482 curr = ~(__u64)0; /* Terminator */
483 if (unlikely(nilfs_checkpoint_invalid(cp) ||
484 !nilfs_checkpoint_snapshot(cp)))
485 break;
486 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
487 ci = (void *)ci + cisz;
488 n++;
489 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
490 if (next == 0)
491 break; /* reach end of the snapshot list */
492
493 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
494 if (curr_blkoff != next_blkoff) {
495 kunmap_atomic(kaddr);
496 brelse(bh);
497 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
498 0, &bh);
499 if (unlikely(ret < 0)) {
500 WARN_ON(ret == -ENOENT);
501 goto out;
502 }
503 kaddr = kmap_atomic(bh->b_page);
504 }
505 curr = next;
506 curr_blkoff = next_blkoff;
507 }
508 kunmap_atomic(kaddr);
509 brelse(bh);
510 *cnop = curr;
511 ret = n;
512
513 out:
514 up_read(&NILFS_MDT(cpfile)->mi_sem);
515 return ret;
516}
517
518/**
519 * nilfs_cpfile_get_cpinfo -
520 * @cpfile:
521 * @cno:
522 * @ci:
523 * @nci:
524 */
525
526ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
527 void *buf, unsigned cisz, size_t nci)
528{
529 switch (mode) {
530 case NILFS_CHECKPOINT:
531 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
532 case NILFS_SNAPSHOT:
533 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
534 default:
535 return -EINVAL;
536 }
537}
538
539/**
540 * nilfs_cpfile_delete_checkpoint -
541 * @cpfile:
542 * @cno:
543 */
544int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
545{
546 struct nilfs_cpinfo ci;
547 __u64 tcno = cno;
548 ssize_t nci;
549
550 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
551 if (nci < 0)
552 return nci;
553 else if (nci == 0 || ci.ci_cno != cno)
554 return -ENOENT;
555 else if (nilfs_cpinfo_snapshot(&ci))
556 return -EBUSY;
557
558 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
559}
560
561static struct nilfs_snapshot_list *
562nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
563 __u64 cno,
564 struct buffer_head *bh,
565 void *kaddr)
566{
567 struct nilfs_cpfile_header *header;
568 struct nilfs_checkpoint *cp;
569 struct nilfs_snapshot_list *list;
570
571 if (cno != 0) {
572 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
573 list = &cp->cp_snapshot_list;
574 } else {
575 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
576 list = &header->ch_snapshot_list;
577 }
578 return list;
579}
580
581static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
582{
583 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
584 struct nilfs_cpfile_header *header;
585 struct nilfs_checkpoint *cp;
586 struct nilfs_snapshot_list *list;
587 __u64 curr, prev;
588 unsigned long curr_blkoff, prev_blkoff;
589 void *kaddr;
590 int ret;
591
592 if (cno == 0)
593 return -ENOENT; /* checkpoint number 0 is invalid */
594 down_write(&NILFS_MDT(cpfile)->mi_sem);
595
596 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
597 if (ret < 0)
598 goto out_sem;
599 kaddr = kmap_atomic(cp_bh->b_page);
600 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
601 if (nilfs_checkpoint_invalid(cp)) {
602 ret = -ENOENT;
603 kunmap_atomic(kaddr);
604 goto out_cp;
605 }
606 if (nilfs_checkpoint_snapshot(cp)) {
607 ret = 0;
608 kunmap_atomic(kaddr);
609 goto out_cp;
610 }
611 kunmap_atomic(kaddr);
612
613 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
614 if (ret < 0)
615 goto out_cp;
616 kaddr = kmap_atomic(header_bh->b_page);
617 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
618 list = &header->ch_snapshot_list;
619 curr_bh = header_bh;
620 get_bh(curr_bh);
621 curr = 0;
622 curr_blkoff = 0;
623 prev = le64_to_cpu(list->ssl_prev);
624 while (prev > cno) {
625 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
626 curr = prev;
627 if (curr_blkoff != prev_blkoff) {
628 kunmap_atomic(kaddr);
629 brelse(curr_bh);
630 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
631 0, &curr_bh);
632 if (ret < 0)
633 goto out_header;
634 kaddr = kmap_atomic(curr_bh->b_page);
635 }
636 curr_blkoff = prev_blkoff;
637 cp = nilfs_cpfile_block_get_checkpoint(
638 cpfile, curr, curr_bh, kaddr);
639 list = &cp->cp_snapshot_list;
640 prev = le64_to_cpu(list->ssl_prev);
641 }
642 kunmap_atomic(kaddr);
643
644 if (prev != 0) {
645 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
646 &prev_bh);
647 if (ret < 0)
648 goto out_curr;
649 } else {
650 prev_bh = header_bh;
651 get_bh(prev_bh);
652 }
653
654 kaddr = kmap_atomic(curr_bh->b_page);
655 list = nilfs_cpfile_block_get_snapshot_list(
656 cpfile, curr, curr_bh, kaddr);
657 list->ssl_prev = cpu_to_le64(cno);
658 kunmap_atomic(kaddr);
659
660 kaddr = kmap_atomic(cp_bh->b_page);
661 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
662 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
663 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
664 nilfs_checkpoint_set_snapshot(cp);
665 kunmap_atomic(kaddr);
666
667 kaddr = kmap_atomic(prev_bh->b_page);
668 list = nilfs_cpfile_block_get_snapshot_list(
669 cpfile, prev, prev_bh, kaddr);
670 list->ssl_next = cpu_to_le64(cno);
671 kunmap_atomic(kaddr);
672
673 kaddr = kmap_atomic(header_bh->b_page);
674 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
675 le64_add_cpu(&header->ch_nsnapshots, 1);
676 kunmap_atomic(kaddr);
677
678 mark_buffer_dirty(prev_bh);
679 mark_buffer_dirty(curr_bh);
680 mark_buffer_dirty(cp_bh);
681 mark_buffer_dirty(header_bh);
682 nilfs_mdt_mark_dirty(cpfile);
683
684 brelse(prev_bh);
685
686 out_curr:
687 brelse(curr_bh);
688
689 out_header:
690 brelse(header_bh);
691
692 out_cp:
693 brelse(cp_bh);
694
695 out_sem:
696 up_write(&NILFS_MDT(cpfile)->mi_sem);
697 return ret;
698}
699
700static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
701{
702 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
703 struct nilfs_cpfile_header *header;
704 struct nilfs_checkpoint *cp;
705 struct nilfs_snapshot_list *list;
706 __u64 next, prev;
707 void *kaddr;
708 int ret;
709
710 if (cno == 0)
711 return -ENOENT; /* checkpoint number 0 is invalid */
712 down_write(&NILFS_MDT(cpfile)->mi_sem);
713
714 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
715 if (ret < 0)
716 goto out_sem;
717 kaddr = kmap_atomic(cp_bh->b_page);
718 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
719 if (nilfs_checkpoint_invalid(cp)) {
720 ret = -ENOENT;
721 kunmap_atomic(kaddr);
722 goto out_cp;
723 }
724 if (!nilfs_checkpoint_snapshot(cp)) {
725 ret = 0;
726 kunmap_atomic(kaddr);
727 goto out_cp;
728 }
729
730 list = &cp->cp_snapshot_list;
731 next = le64_to_cpu(list->ssl_next);
732 prev = le64_to_cpu(list->ssl_prev);
733 kunmap_atomic(kaddr);
734
735 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
736 if (ret < 0)
737 goto out_cp;
738 if (next != 0) {
739 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
740 &next_bh);
741 if (ret < 0)
742 goto out_header;
743 } else {
744 next_bh = header_bh;
745 get_bh(next_bh);
746 }
747 if (prev != 0) {
748 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
749 &prev_bh);
750 if (ret < 0)
751 goto out_next;
752 } else {
753 prev_bh = header_bh;
754 get_bh(prev_bh);
755 }
756
757 kaddr = kmap_atomic(next_bh->b_page);
758 list = nilfs_cpfile_block_get_snapshot_list(
759 cpfile, next, next_bh, kaddr);
760 list->ssl_prev = cpu_to_le64(prev);
761 kunmap_atomic(kaddr);
762
763 kaddr = kmap_atomic(prev_bh->b_page);
764 list = nilfs_cpfile_block_get_snapshot_list(
765 cpfile, prev, prev_bh, kaddr);
766 list->ssl_next = cpu_to_le64(next);
767 kunmap_atomic(kaddr);
768
769 kaddr = kmap_atomic(cp_bh->b_page);
770 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
771 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
772 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
773 nilfs_checkpoint_clear_snapshot(cp);
774 kunmap_atomic(kaddr);
775
776 kaddr = kmap_atomic(header_bh->b_page);
777 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
778 le64_add_cpu(&header->ch_nsnapshots, -1);
779 kunmap_atomic(kaddr);
780
781 mark_buffer_dirty(next_bh);
782 mark_buffer_dirty(prev_bh);
783 mark_buffer_dirty(cp_bh);
784 mark_buffer_dirty(header_bh);
785 nilfs_mdt_mark_dirty(cpfile);
786
787 brelse(prev_bh);
788
789 out_next:
790 brelse(next_bh);
791
792 out_header:
793 brelse(header_bh);
794
795 out_cp:
796 brelse(cp_bh);
797
798 out_sem:
799 up_write(&NILFS_MDT(cpfile)->mi_sem);
800 return ret;
801}
802
803/**
804 * nilfs_cpfile_is_snapshot -
805 * @cpfile: inode of checkpoint file
806 * @cno: checkpoint number
807 *
808 * Description:
809 *
810 * Return Value: On success, 1 is returned if the checkpoint specified by
811 * @cno is a snapshot, or 0 if not. On error, one of the following negative
812 * error codes is returned.
813 *
814 * %-EIO - I/O error.
815 *
816 * %-ENOMEM - Insufficient amount of memory available.
817 *
818 * %-ENOENT - No such checkpoint.
819 */
820int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
821{
822 struct buffer_head *bh;
823 struct nilfs_checkpoint *cp;
824 void *kaddr;
825 int ret;
826
827 /* CP number is invalid if it's zero or larger than the
828 largest exist one.*/
829 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile))
830 return -ENOENT;
831 down_read(&NILFS_MDT(cpfile)->mi_sem);
832
833 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
834 if (ret < 0)
835 goto out;
836 kaddr = kmap_atomic(bh->b_page);
837 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
838 if (nilfs_checkpoint_invalid(cp))
839 ret = -ENOENT;
840 else
841 ret = nilfs_checkpoint_snapshot(cp);
842 kunmap_atomic(kaddr);
843 brelse(bh);
844
845 out:
846 up_read(&NILFS_MDT(cpfile)->mi_sem);
847 return ret;
848}
849
850/**
851 * nilfs_cpfile_change_cpmode - change checkpoint mode
852 * @cpfile: inode of checkpoint file
853 * @cno: checkpoint number
854 * @status: mode of checkpoint
855 *
856 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
857 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
858 *
859 * Return Value: On success, 0 is returned. On error, one of the following
860 * negative error codes is returned.
861 *
862 * %-EIO - I/O error.
863 *
864 * %-ENOMEM - Insufficient amount of memory available.
865 *
866 * %-ENOENT - No such checkpoint.
867 */
868int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
869{
870 int ret;
871
872 switch (mode) {
873 case NILFS_CHECKPOINT:
874 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
875 /*
876 * Current implementation does not have to protect
877 * plain read-only mounts since they are exclusive
878 * with a read/write mount and are protected from the
879 * cleaner.
880 */
881 ret = -EBUSY;
882 else
883 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
884 return ret;
885 case NILFS_SNAPSHOT:
886 return nilfs_cpfile_set_snapshot(cpfile, cno);
887 default:
888 return -EINVAL;
889 }
890}
891
892/**
893 * nilfs_cpfile_get_stat - get checkpoint statistics
894 * @cpfile: inode of checkpoint file
895 * @stat: pointer to a structure of checkpoint statistics
896 *
897 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
898 *
899 * Return Value: On success, 0 is returned, and checkpoints information is
900 * stored in the place pointed by @stat. On error, one of the following
901 * negative error codes is returned.
902 *
903 * %-EIO - I/O error.
904 *
905 * %-ENOMEM - Insufficient amount of memory available.
906 */
907int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
908{
909 struct buffer_head *bh;
910 struct nilfs_cpfile_header *header;
911 void *kaddr;
912 int ret;
913
914 down_read(&NILFS_MDT(cpfile)->mi_sem);
915
916 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
917 if (ret < 0)
918 goto out_sem;
919 kaddr = kmap_atomic(bh->b_page);
920 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
921 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
922 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
923 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
924 kunmap_atomic(kaddr);
925 brelse(bh);
926
927 out_sem:
928 up_read(&NILFS_MDT(cpfile)->mi_sem);
929 return ret;
930}
931
932/**
933 * nilfs_cpfile_read - read or get cpfile inode
934 * @sb: super block instance
935 * @cpsize: size of a checkpoint entry
936 * @raw_inode: on-disk cpfile inode
937 * @inodep: buffer to store the inode
938 */
939int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
940 struct nilfs_inode *raw_inode, struct inode **inodep)
941{
942 struct inode *cpfile;
943 int err;
944
945 if (cpsize > sb->s_blocksize) {
946 printk(KERN_ERR
947 "NILFS: too large checkpoint size: %zu bytes.\n",
948 cpsize);
949 return -EINVAL;
950 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
951 printk(KERN_ERR
952 "NILFS: too small checkpoint size: %zu bytes.\n",
953 cpsize);
954 return -EINVAL;
955 }
956
957 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
958 if (unlikely(!cpfile))
959 return -ENOMEM;
960 if (!(cpfile->i_state & I_NEW))
961 goto out;
962
963 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
964 if (err)
965 goto failed;
966
967 nilfs_mdt_set_entry_size(cpfile, cpsize,
968 sizeof(struct nilfs_cpfile_header));
969
970 err = nilfs_read_inode_common(cpfile, raw_inode);
971 if (err)
972 goto failed;
973
974 unlock_new_inode(cpfile);
975 out:
976 *inodep = cpfile;
977 return 0;
978 failed:
979 iget_failed(cpfile);
980 return err;
981}