Loading...
1/*
2 * segment.c - NILFS segment constructor.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi.
17 *
18 */
19
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/writeback.h>
23#include <linux/bitops.h>
24#include <linux/bio.h>
25#include <linux/completion.h>
26#include <linux/blkdev.h>
27#include <linux/backing-dev.h>
28#include <linux/freezer.h>
29#include <linux/kthread.h>
30#include <linux/crc32.h>
31#include <linux/pagevec.h>
32#include <linux/slab.h>
33#include <linux/sched/signal.h>
34
35#include "nilfs.h"
36#include "btnode.h"
37#include "page.h"
38#include "segment.h"
39#include "sufile.h"
40#include "cpfile.h"
41#include "ifile.h"
42#include "segbuf.h"
43
44
45/*
46 * Segment constructor
47 */
48#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
49
50#define SC_MAX_SEGDELTA 64 /*
51 * Upper limit of the number of segments
52 * appended in collection retry loop
53 */
54
55/* Construction mode */
56enum {
57 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
58 SC_LSEG_DSYNC, /*
59 * Flush data blocks of a given file and make
60 * a logical segment without a super root.
61 */
62 SC_FLUSH_FILE, /*
63 * Flush data files, leads to segment writes without
64 * creating a checkpoint.
65 */
66 SC_FLUSH_DAT, /*
67 * Flush DAT file. This also creates segments
68 * without a checkpoint.
69 */
70};
71
72/* Stage numbers of dirty block collection */
73enum {
74 NILFS_ST_INIT = 0,
75 NILFS_ST_GC, /* Collecting dirty blocks for GC */
76 NILFS_ST_FILE,
77 NILFS_ST_IFILE,
78 NILFS_ST_CPFILE,
79 NILFS_ST_SUFILE,
80 NILFS_ST_DAT,
81 NILFS_ST_SR, /* Super root */
82 NILFS_ST_DSYNC, /* Data sync blocks */
83 NILFS_ST_DONE,
84};
85
86#define CREATE_TRACE_POINTS
87#include <trace/events/nilfs2.h>
88
89/*
90 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
91 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
92 * the variable must use them because transition of stage count must involve
93 * trace events (trace_nilfs2_collection_stage_transition).
94 *
95 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
96 * produce tracepoint events. It is provided just for making the intention
97 * clear.
98 */
99static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
100{
101 sci->sc_stage.scnt++;
102 trace_nilfs2_collection_stage_transition(sci);
103}
104
105static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
106{
107 sci->sc_stage.scnt = next_scnt;
108 trace_nilfs2_collection_stage_transition(sci);
109}
110
111static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
112{
113 return sci->sc_stage.scnt;
114}
115
116/* State flags of collection */
117#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
118#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
119#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
120#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
121
122/* Operations depending on the construction mode and file type */
123struct nilfs_sc_operations {
124 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
125 struct inode *);
126 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
127 struct inode *);
128 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
129 struct inode *);
130 void (*write_data_binfo)(struct nilfs_sc_info *,
131 struct nilfs_segsum_pointer *,
132 union nilfs_binfo *);
133 void (*write_node_binfo)(struct nilfs_sc_info *,
134 struct nilfs_segsum_pointer *,
135 union nilfs_binfo *);
136};
137
138/*
139 * Other definitions
140 */
141static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
142static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
143static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
144static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
145
146#define nilfs_cnt32_gt(a, b) \
147 (typecheck(__u32, a) && typecheck(__u32, b) && \
148 ((__s32)(b) - (__s32)(a) < 0))
149#define nilfs_cnt32_ge(a, b) \
150 (typecheck(__u32, a) && typecheck(__u32, b) && \
151 ((__s32)(a) - (__s32)(b) >= 0))
152#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
153#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
154
155static int nilfs_prepare_segment_lock(struct super_block *sb,
156 struct nilfs_transaction_info *ti)
157{
158 struct nilfs_transaction_info *cur_ti = current->journal_info;
159 void *save = NULL;
160
161 if (cur_ti) {
162 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
163 return ++cur_ti->ti_count;
164
165 /*
166 * If journal_info field is occupied by other FS,
167 * it is saved and will be restored on
168 * nilfs_transaction_commit().
169 */
170 nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
171 save = current->journal_info;
172 }
173 if (!ti) {
174 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
175 if (!ti)
176 return -ENOMEM;
177 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
178 } else {
179 ti->ti_flags = 0;
180 }
181 ti->ti_count = 0;
182 ti->ti_save = save;
183 ti->ti_magic = NILFS_TI_MAGIC;
184 current->journal_info = ti;
185 return 0;
186}
187
188/**
189 * nilfs_transaction_begin - start indivisible file operations.
190 * @sb: super block
191 * @ti: nilfs_transaction_info
192 * @vacancy_check: flags for vacancy rate checks
193 *
194 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
195 * the segment semaphore, to make a segment construction and write tasks
196 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
197 * The region enclosed by these two functions can be nested. To avoid a
198 * deadlock, the semaphore is only acquired or released in the outermost call.
199 *
200 * This function allocates a nilfs_transaction_info struct to keep context
201 * information on it. It is initialized and hooked onto the current task in
202 * the outermost call. If a pre-allocated struct is given to @ti, it is used
203 * instead; otherwise a new struct is assigned from a slab.
204 *
205 * When @vacancy_check flag is set, this function will check the amount of
206 * free space, and will wait for the GC to reclaim disk space if low capacity.
207 *
208 * Return Value: On success, 0 is returned. On error, one of the following
209 * negative error code is returned.
210 *
211 * %-ENOMEM - Insufficient memory available.
212 *
213 * %-ENOSPC - No space left on device
214 */
215int nilfs_transaction_begin(struct super_block *sb,
216 struct nilfs_transaction_info *ti,
217 int vacancy_check)
218{
219 struct the_nilfs *nilfs;
220 int ret = nilfs_prepare_segment_lock(sb, ti);
221 struct nilfs_transaction_info *trace_ti;
222
223 if (unlikely(ret < 0))
224 return ret;
225 if (ret > 0) {
226 trace_ti = current->journal_info;
227
228 trace_nilfs2_transaction_transition(sb, trace_ti,
229 trace_ti->ti_count, trace_ti->ti_flags,
230 TRACE_NILFS2_TRANSACTION_BEGIN);
231 return 0;
232 }
233
234 sb_start_intwrite(sb);
235
236 nilfs = sb->s_fs_info;
237 down_read(&nilfs->ns_segctor_sem);
238 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
239 up_read(&nilfs->ns_segctor_sem);
240 ret = -ENOSPC;
241 goto failed;
242 }
243
244 trace_ti = current->journal_info;
245 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
246 trace_ti->ti_flags,
247 TRACE_NILFS2_TRANSACTION_BEGIN);
248 return 0;
249
250 failed:
251 ti = current->journal_info;
252 current->journal_info = ti->ti_save;
253 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
254 kmem_cache_free(nilfs_transaction_cachep, ti);
255 sb_end_intwrite(sb);
256 return ret;
257}
258
259/**
260 * nilfs_transaction_commit - commit indivisible file operations.
261 * @sb: super block
262 *
263 * nilfs_transaction_commit() releases the read semaphore which is
264 * acquired by nilfs_transaction_begin(). This is only performed
265 * in outermost call of this function. If a commit flag is set,
266 * nilfs_transaction_commit() sets a timer to start the segment
267 * constructor. If a sync flag is set, it starts construction
268 * directly.
269 */
270int nilfs_transaction_commit(struct super_block *sb)
271{
272 struct nilfs_transaction_info *ti = current->journal_info;
273 struct the_nilfs *nilfs = sb->s_fs_info;
274 int err = 0;
275
276 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
277 ti->ti_flags |= NILFS_TI_COMMIT;
278 if (ti->ti_count > 0) {
279 ti->ti_count--;
280 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
281 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
282 return 0;
283 }
284 if (nilfs->ns_writer) {
285 struct nilfs_sc_info *sci = nilfs->ns_writer;
286
287 if (ti->ti_flags & NILFS_TI_COMMIT)
288 nilfs_segctor_start_timer(sci);
289 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
290 nilfs_segctor_do_flush(sci, 0);
291 }
292 up_read(&nilfs->ns_segctor_sem);
293 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
294 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
295
296 current->journal_info = ti->ti_save;
297
298 if (ti->ti_flags & NILFS_TI_SYNC)
299 err = nilfs_construct_segment(sb);
300 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
301 kmem_cache_free(nilfs_transaction_cachep, ti);
302 sb_end_intwrite(sb);
303 return err;
304}
305
306void nilfs_transaction_abort(struct super_block *sb)
307{
308 struct nilfs_transaction_info *ti = current->journal_info;
309 struct the_nilfs *nilfs = sb->s_fs_info;
310
311 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
312 if (ti->ti_count > 0) {
313 ti->ti_count--;
314 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
315 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
316 return;
317 }
318 up_read(&nilfs->ns_segctor_sem);
319
320 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
321 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
322
323 current->journal_info = ti->ti_save;
324 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
325 kmem_cache_free(nilfs_transaction_cachep, ti);
326 sb_end_intwrite(sb);
327}
328
329void nilfs_relax_pressure_in_lock(struct super_block *sb)
330{
331 struct the_nilfs *nilfs = sb->s_fs_info;
332 struct nilfs_sc_info *sci = nilfs->ns_writer;
333
334 if (!sci || !sci->sc_flush_request)
335 return;
336
337 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
338 up_read(&nilfs->ns_segctor_sem);
339
340 down_write(&nilfs->ns_segctor_sem);
341 if (sci->sc_flush_request &&
342 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
343 struct nilfs_transaction_info *ti = current->journal_info;
344
345 ti->ti_flags |= NILFS_TI_WRITER;
346 nilfs_segctor_do_immediate_flush(sci);
347 ti->ti_flags &= ~NILFS_TI_WRITER;
348 }
349 downgrade_write(&nilfs->ns_segctor_sem);
350}
351
352static void nilfs_transaction_lock(struct super_block *sb,
353 struct nilfs_transaction_info *ti,
354 int gcflag)
355{
356 struct nilfs_transaction_info *cur_ti = current->journal_info;
357 struct the_nilfs *nilfs = sb->s_fs_info;
358 struct nilfs_sc_info *sci = nilfs->ns_writer;
359
360 WARN_ON(cur_ti);
361 ti->ti_flags = NILFS_TI_WRITER;
362 ti->ti_count = 0;
363 ti->ti_save = cur_ti;
364 ti->ti_magic = NILFS_TI_MAGIC;
365 current->journal_info = ti;
366
367 for (;;) {
368 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
369 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
370
371 down_write(&nilfs->ns_segctor_sem);
372 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
373 break;
374
375 nilfs_segctor_do_immediate_flush(sci);
376
377 up_write(&nilfs->ns_segctor_sem);
378 cond_resched();
379 }
380 if (gcflag)
381 ti->ti_flags |= NILFS_TI_GC;
382
383 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
384 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
385}
386
387static void nilfs_transaction_unlock(struct super_block *sb)
388{
389 struct nilfs_transaction_info *ti = current->journal_info;
390 struct the_nilfs *nilfs = sb->s_fs_info;
391
392 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
393 BUG_ON(ti->ti_count > 0);
394
395 up_write(&nilfs->ns_segctor_sem);
396 current->journal_info = ti->ti_save;
397
398 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
399 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
400}
401
402static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
403 struct nilfs_segsum_pointer *ssp,
404 unsigned int bytes)
405{
406 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
407 unsigned int blocksize = sci->sc_super->s_blocksize;
408 void *p;
409
410 if (unlikely(ssp->offset + bytes > blocksize)) {
411 ssp->offset = 0;
412 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
413 &segbuf->sb_segsum_buffers));
414 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
415 }
416 p = ssp->bh->b_data + ssp->offset;
417 ssp->offset += bytes;
418 return p;
419}
420
421/**
422 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
423 * @sci: nilfs_sc_info
424 */
425static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
426{
427 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
428 struct buffer_head *sumbh;
429 unsigned int sumbytes;
430 unsigned int flags = 0;
431 int err;
432
433 if (nilfs_doing_gc())
434 flags = NILFS_SS_GC;
435 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
436 if (unlikely(err))
437 return err;
438
439 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
440 sumbytes = segbuf->sb_sum.sumbytes;
441 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
442 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
443 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
444 return 0;
445}
446
447static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
448{
449 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
450 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
451 return -E2BIG; /*
452 * The current segment is filled up
453 * (internal code)
454 */
455 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
456 return nilfs_segctor_reset_segment_buffer(sci);
457}
458
459static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
460{
461 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
462 int err;
463
464 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
465 err = nilfs_segctor_feed_segment(sci);
466 if (err)
467 return err;
468 segbuf = sci->sc_curseg;
469 }
470 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
471 if (likely(!err))
472 segbuf->sb_sum.flags |= NILFS_SS_SR;
473 return err;
474}
475
476/*
477 * Functions for making segment summary and payloads
478 */
479static int nilfs_segctor_segsum_block_required(
480 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
481 unsigned int binfo_size)
482{
483 unsigned int blocksize = sci->sc_super->s_blocksize;
484 /* Size of finfo and binfo is enough small against blocksize */
485
486 return ssp->offset + binfo_size +
487 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
488 blocksize;
489}
490
491static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
492 struct inode *inode)
493{
494 sci->sc_curseg->sb_sum.nfinfo++;
495 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
496 nilfs_segctor_map_segsum_entry(
497 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
498
499 if (NILFS_I(inode)->i_root &&
500 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
501 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
502 /* skip finfo */
503}
504
505static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
506 struct inode *inode)
507{
508 struct nilfs_finfo *finfo;
509 struct nilfs_inode_info *ii;
510 struct nilfs_segment_buffer *segbuf;
511 __u64 cno;
512
513 if (sci->sc_blk_cnt == 0)
514 return;
515
516 ii = NILFS_I(inode);
517
518 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
519 cno = ii->i_cno;
520 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
521 cno = 0;
522 else
523 cno = sci->sc_cno;
524
525 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
526 sizeof(*finfo));
527 finfo->fi_ino = cpu_to_le64(inode->i_ino);
528 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
529 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
530 finfo->fi_cno = cpu_to_le64(cno);
531
532 segbuf = sci->sc_curseg;
533 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
534 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
535 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
536 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
537}
538
539static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
540 struct buffer_head *bh,
541 struct inode *inode,
542 unsigned int binfo_size)
543{
544 struct nilfs_segment_buffer *segbuf;
545 int required, err = 0;
546
547 retry:
548 segbuf = sci->sc_curseg;
549 required = nilfs_segctor_segsum_block_required(
550 sci, &sci->sc_binfo_ptr, binfo_size);
551 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
552 nilfs_segctor_end_finfo(sci, inode);
553 err = nilfs_segctor_feed_segment(sci);
554 if (err)
555 return err;
556 goto retry;
557 }
558 if (unlikely(required)) {
559 err = nilfs_segbuf_extend_segsum(segbuf);
560 if (unlikely(err))
561 goto failed;
562 }
563 if (sci->sc_blk_cnt == 0)
564 nilfs_segctor_begin_finfo(sci, inode);
565
566 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
567 /* Substitution to vblocknr is delayed until update_blocknr() */
568 nilfs_segbuf_add_file_buffer(segbuf, bh);
569 sci->sc_blk_cnt++;
570 failed:
571 return err;
572}
573
574/*
575 * Callback functions that enumerate, mark, and collect dirty blocks
576 */
577static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
578 struct buffer_head *bh, struct inode *inode)
579{
580 int err;
581
582 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
583 if (err < 0)
584 return err;
585
586 err = nilfs_segctor_add_file_block(sci, bh, inode,
587 sizeof(struct nilfs_binfo_v));
588 if (!err)
589 sci->sc_datablk_cnt++;
590 return err;
591}
592
593static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
594 struct buffer_head *bh,
595 struct inode *inode)
596{
597 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
598}
599
600static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
601 struct buffer_head *bh,
602 struct inode *inode)
603{
604 WARN_ON(!buffer_dirty(bh));
605 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
606}
607
608static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
609 struct nilfs_segsum_pointer *ssp,
610 union nilfs_binfo *binfo)
611{
612 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
613 sci, ssp, sizeof(*binfo_v));
614 *binfo_v = binfo->bi_v;
615}
616
617static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
618 struct nilfs_segsum_pointer *ssp,
619 union nilfs_binfo *binfo)
620{
621 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
622 sci, ssp, sizeof(*vblocknr));
623 *vblocknr = binfo->bi_v.bi_vblocknr;
624}
625
626static const struct nilfs_sc_operations nilfs_sc_file_ops = {
627 .collect_data = nilfs_collect_file_data,
628 .collect_node = nilfs_collect_file_node,
629 .collect_bmap = nilfs_collect_file_bmap,
630 .write_data_binfo = nilfs_write_file_data_binfo,
631 .write_node_binfo = nilfs_write_file_node_binfo,
632};
633
634static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
635 struct buffer_head *bh, struct inode *inode)
636{
637 int err;
638
639 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
640 if (err < 0)
641 return err;
642
643 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
644 if (!err)
645 sci->sc_datablk_cnt++;
646 return err;
647}
648
649static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
650 struct buffer_head *bh, struct inode *inode)
651{
652 WARN_ON(!buffer_dirty(bh));
653 return nilfs_segctor_add_file_block(sci, bh, inode,
654 sizeof(struct nilfs_binfo_dat));
655}
656
657static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
658 struct nilfs_segsum_pointer *ssp,
659 union nilfs_binfo *binfo)
660{
661 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
662 sizeof(*blkoff));
663 *blkoff = binfo->bi_dat.bi_blkoff;
664}
665
666static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
667 struct nilfs_segsum_pointer *ssp,
668 union nilfs_binfo *binfo)
669{
670 struct nilfs_binfo_dat *binfo_dat =
671 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
672 *binfo_dat = binfo->bi_dat;
673}
674
675static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
676 .collect_data = nilfs_collect_dat_data,
677 .collect_node = nilfs_collect_file_node,
678 .collect_bmap = nilfs_collect_dat_bmap,
679 .write_data_binfo = nilfs_write_dat_data_binfo,
680 .write_node_binfo = nilfs_write_dat_node_binfo,
681};
682
683static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
684 .collect_data = nilfs_collect_file_data,
685 .collect_node = NULL,
686 .collect_bmap = NULL,
687 .write_data_binfo = nilfs_write_file_data_binfo,
688 .write_node_binfo = NULL,
689};
690
691static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
692 struct list_head *listp,
693 size_t nlimit,
694 loff_t start, loff_t end)
695{
696 struct address_space *mapping = inode->i_mapping;
697 struct pagevec pvec;
698 pgoff_t index = 0, last = ULONG_MAX;
699 size_t ndirties = 0;
700 int i;
701
702 if (unlikely(start != 0 || end != LLONG_MAX)) {
703 /*
704 * A valid range is given for sync-ing data pages. The
705 * range is rounded to per-page; extra dirty buffers
706 * may be included if blocksize < pagesize.
707 */
708 index = start >> PAGE_SHIFT;
709 last = end >> PAGE_SHIFT;
710 }
711 pagevec_init(&pvec);
712 repeat:
713 if (unlikely(index > last) ||
714 !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
715 PAGECACHE_TAG_DIRTY))
716 return ndirties;
717
718 for (i = 0; i < pagevec_count(&pvec); i++) {
719 struct buffer_head *bh, *head;
720 struct page *page = pvec.pages[i];
721
722 lock_page(page);
723 if (!page_has_buffers(page))
724 create_empty_buffers(page, i_blocksize(inode), 0);
725 unlock_page(page);
726
727 bh = head = page_buffers(page);
728 do {
729 if (!buffer_dirty(bh) || buffer_async_write(bh))
730 continue;
731 get_bh(bh);
732 list_add_tail(&bh->b_assoc_buffers, listp);
733 ndirties++;
734 if (unlikely(ndirties >= nlimit)) {
735 pagevec_release(&pvec);
736 cond_resched();
737 return ndirties;
738 }
739 } while (bh = bh->b_this_page, bh != head);
740 }
741 pagevec_release(&pvec);
742 cond_resched();
743 goto repeat;
744}
745
746static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
747 struct list_head *listp)
748{
749 struct nilfs_inode_info *ii = NILFS_I(inode);
750 struct address_space *mapping = &ii->i_btnode_cache;
751 struct pagevec pvec;
752 struct buffer_head *bh, *head;
753 unsigned int i;
754 pgoff_t index = 0;
755
756 pagevec_init(&pvec);
757
758 while (pagevec_lookup_tag(&pvec, mapping, &index,
759 PAGECACHE_TAG_DIRTY)) {
760 for (i = 0; i < pagevec_count(&pvec); i++) {
761 bh = head = page_buffers(pvec.pages[i]);
762 do {
763 if (buffer_dirty(bh) &&
764 !buffer_async_write(bh)) {
765 get_bh(bh);
766 list_add_tail(&bh->b_assoc_buffers,
767 listp);
768 }
769 bh = bh->b_this_page;
770 } while (bh != head);
771 }
772 pagevec_release(&pvec);
773 cond_resched();
774 }
775}
776
777static void nilfs_dispose_list(struct the_nilfs *nilfs,
778 struct list_head *head, int force)
779{
780 struct nilfs_inode_info *ii, *n;
781 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
782 unsigned int nv = 0;
783
784 while (!list_empty(head)) {
785 spin_lock(&nilfs->ns_inode_lock);
786 list_for_each_entry_safe(ii, n, head, i_dirty) {
787 list_del_init(&ii->i_dirty);
788 if (force) {
789 if (unlikely(ii->i_bh)) {
790 brelse(ii->i_bh);
791 ii->i_bh = NULL;
792 }
793 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
794 set_bit(NILFS_I_QUEUED, &ii->i_state);
795 list_add_tail(&ii->i_dirty,
796 &nilfs->ns_dirty_files);
797 continue;
798 }
799 ivec[nv++] = ii;
800 if (nv == SC_N_INODEVEC)
801 break;
802 }
803 spin_unlock(&nilfs->ns_inode_lock);
804
805 for (pii = ivec; nv > 0; pii++, nv--)
806 iput(&(*pii)->vfs_inode);
807 }
808}
809
810static void nilfs_iput_work_func(struct work_struct *work)
811{
812 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
813 sc_iput_work);
814 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
815
816 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
817}
818
819static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
820 struct nilfs_root *root)
821{
822 int ret = 0;
823
824 if (nilfs_mdt_fetch_dirty(root->ifile))
825 ret++;
826 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
827 ret++;
828 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
829 ret++;
830 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
831 ret++;
832 return ret;
833}
834
835static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
836{
837 return list_empty(&sci->sc_dirty_files) &&
838 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
839 sci->sc_nfreesegs == 0 &&
840 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
841}
842
843static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
844{
845 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
846 int ret = 0;
847
848 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
849 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
850
851 spin_lock(&nilfs->ns_inode_lock);
852 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
853 ret++;
854
855 spin_unlock(&nilfs->ns_inode_lock);
856 return ret;
857}
858
859static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
860{
861 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
862
863 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
864 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
865 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
866 nilfs_mdt_clear_dirty(nilfs->ns_dat);
867}
868
869static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
870{
871 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
872 struct buffer_head *bh_cp;
873 struct nilfs_checkpoint *raw_cp;
874 int err;
875
876 /* XXX: this interface will be changed */
877 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
878 &raw_cp, &bh_cp);
879 if (likely(!err)) {
880 /*
881 * The following code is duplicated with cpfile. But, it is
882 * needed to collect the checkpoint even if it was not newly
883 * created.
884 */
885 mark_buffer_dirty(bh_cp);
886 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
887 nilfs_cpfile_put_checkpoint(
888 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
889 } else
890 WARN_ON(err == -EINVAL || err == -ENOENT);
891
892 return err;
893}
894
895static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
896{
897 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
898 struct buffer_head *bh_cp;
899 struct nilfs_checkpoint *raw_cp;
900 int err;
901
902 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
903 &raw_cp, &bh_cp);
904 if (unlikely(err)) {
905 WARN_ON(err == -EINVAL || err == -ENOENT);
906 goto failed_ibh;
907 }
908 raw_cp->cp_snapshot_list.ssl_next = 0;
909 raw_cp->cp_snapshot_list.ssl_prev = 0;
910 raw_cp->cp_inodes_count =
911 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
912 raw_cp->cp_blocks_count =
913 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
914 raw_cp->cp_nblk_inc =
915 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
916 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
917 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
918
919 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
920 nilfs_checkpoint_clear_minor(raw_cp);
921 else
922 nilfs_checkpoint_set_minor(raw_cp);
923
924 nilfs_write_inode_common(sci->sc_root->ifile,
925 &raw_cp->cp_ifile_inode, 1);
926 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
927 return 0;
928
929 failed_ibh:
930 return err;
931}
932
933static void nilfs_fill_in_file_bmap(struct inode *ifile,
934 struct nilfs_inode_info *ii)
935
936{
937 struct buffer_head *ibh;
938 struct nilfs_inode *raw_inode;
939
940 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
941 ibh = ii->i_bh;
942 BUG_ON(!ibh);
943 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
944 ibh);
945 nilfs_bmap_write(ii->i_bmap, raw_inode);
946 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
947 }
948}
949
950static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
951{
952 struct nilfs_inode_info *ii;
953
954 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
955 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
956 set_bit(NILFS_I_COLLECTED, &ii->i_state);
957 }
958}
959
960static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
961 struct the_nilfs *nilfs)
962{
963 struct buffer_head *bh_sr;
964 struct nilfs_super_root *raw_sr;
965 unsigned int isz, srsz;
966
967 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
968 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
969 isz = nilfs->ns_inode_size;
970 srsz = NILFS_SR_BYTES(isz);
971
972 raw_sr->sr_bytes = cpu_to_le16(srsz);
973 raw_sr->sr_nongc_ctime
974 = cpu_to_le64(nilfs_doing_gc() ?
975 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
976 raw_sr->sr_flags = 0;
977
978 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
979 NILFS_SR_DAT_OFFSET(isz), 1);
980 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
981 NILFS_SR_CPFILE_OFFSET(isz), 1);
982 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
983 NILFS_SR_SUFILE_OFFSET(isz), 1);
984 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
985}
986
987static void nilfs_redirty_inodes(struct list_head *head)
988{
989 struct nilfs_inode_info *ii;
990
991 list_for_each_entry(ii, head, i_dirty) {
992 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
993 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
994 }
995}
996
997static void nilfs_drop_collected_inodes(struct list_head *head)
998{
999 struct nilfs_inode_info *ii;
1000
1001 list_for_each_entry(ii, head, i_dirty) {
1002 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
1003 continue;
1004
1005 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
1006 set_bit(NILFS_I_UPDATED, &ii->i_state);
1007 }
1008}
1009
1010static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1011 struct inode *inode,
1012 struct list_head *listp,
1013 int (*collect)(struct nilfs_sc_info *,
1014 struct buffer_head *,
1015 struct inode *))
1016{
1017 struct buffer_head *bh, *n;
1018 int err = 0;
1019
1020 if (collect) {
1021 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1022 list_del_init(&bh->b_assoc_buffers);
1023 err = collect(sci, bh, inode);
1024 brelse(bh);
1025 if (unlikely(err))
1026 goto dispose_buffers;
1027 }
1028 return 0;
1029 }
1030
1031 dispose_buffers:
1032 while (!list_empty(listp)) {
1033 bh = list_first_entry(listp, struct buffer_head,
1034 b_assoc_buffers);
1035 list_del_init(&bh->b_assoc_buffers);
1036 brelse(bh);
1037 }
1038 return err;
1039}
1040
1041static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1042{
1043 /* Remaining number of blocks within segment buffer */
1044 return sci->sc_segbuf_nblocks -
1045 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1046}
1047
1048static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1049 struct inode *inode,
1050 const struct nilfs_sc_operations *sc_ops)
1051{
1052 LIST_HEAD(data_buffers);
1053 LIST_HEAD(node_buffers);
1054 int err;
1055
1056 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1057 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1058
1059 n = nilfs_lookup_dirty_data_buffers(
1060 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1061 if (n > rest) {
1062 err = nilfs_segctor_apply_buffers(
1063 sci, inode, &data_buffers,
1064 sc_ops->collect_data);
1065 BUG_ON(!err); /* always receive -E2BIG or true error */
1066 goto break_or_fail;
1067 }
1068 }
1069 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1070
1071 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1072 err = nilfs_segctor_apply_buffers(
1073 sci, inode, &data_buffers, sc_ops->collect_data);
1074 if (unlikely(err)) {
1075 /* dispose node list */
1076 nilfs_segctor_apply_buffers(
1077 sci, inode, &node_buffers, NULL);
1078 goto break_or_fail;
1079 }
1080 sci->sc_stage.flags |= NILFS_CF_NODE;
1081 }
1082 /* Collect node */
1083 err = nilfs_segctor_apply_buffers(
1084 sci, inode, &node_buffers, sc_ops->collect_node);
1085 if (unlikely(err))
1086 goto break_or_fail;
1087
1088 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1089 err = nilfs_segctor_apply_buffers(
1090 sci, inode, &node_buffers, sc_ops->collect_bmap);
1091 if (unlikely(err))
1092 goto break_or_fail;
1093
1094 nilfs_segctor_end_finfo(sci, inode);
1095 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1096
1097 break_or_fail:
1098 return err;
1099}
1100
1101static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1102 struct inode *inode)
1103{
1104 LIST_HEAD(data_buffers);
1105 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1106 int err;
1107
1108 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1109 sci->sc_dsync_start,
1110 sci->sc_dsync_end);
1111
1112 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1113 nilfs_collect_file_data);
1114 if (!err) {
1115 nilfs_segctor_end_finfo(sci, inode);
1116 BUG_ON(n > rest);
1117 /* always receive -E2BIG or true error if n > rest */
1118 }
1119 return err;
1120}
1121
1122static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1123{
1124 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1125 struct list_head *head;
1126 struct nilfs_inode_info *ii;
1127 size_t ndone;
1128 int err = 0;
1129
1130 switch (nilfs_sc_cstage_get(sci)) {
1131 case NILFS_ST_INIT:
1132 /* Pre-processes */
1133 sci->sc_stage.flags = 0;
1134
1135 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1136 sci->sc_nblk_inc = 0;
1137 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1138 if (mode == SC_LSEG_DSYNC) {
1139 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1140 goto dsync_mode;
1141 }
1142 }
1143
1144 sci->sc_stage.dirty_file_ptr = NULL;
1145 sci->sc_stage.gc_inode_ptr = NULL;
1146 if (mode == SC_FLUSH_DAT) {
1147 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1148 goto dat_stage;
1149 }
1150 nilfs_sc_cstage_inc(sci); /* Fall through */
1151 case NILFS_ST_GC:
1152 if (nilfs_doing_gc()) {
1153 head = &sci->sc_gc_inodes;
1154 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1155 head, i_dirty);
1156 list_for_each_entry_continue(ii, head, i_dirty) {
1157 err = nilfs_segctor_scan_file(
1158 sci, &ii->vfs_inode,
1159 &nilfs_sc_file_ops);
1160 if (unlikely(err)) {
1161 sci->sc_stage.gc_inode_ptr = list_entry(
1162 ii->i_dirty.prev,
1163 struct nilfs_inode_info,
1164 i_dirty);
1165 goto break_or_fail;
1166 }
1167 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1168 }
1169 sci->sc_stage.gc_inode_ptr = NULL;
1170 }
1171 nilfs_sc_cstage_inc(sci); /* Fall through */
1172 case NILFS_ST_FILE:
1173 head = &sci->sc_dirty_files;
1174 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1175 i_dirty);
1176 list_for_each_entry_continue(ii, head, i_dirty) {
1177 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1178
1179 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1180 &nilfs_sc_file_ops);
1181 if (unlikely(err)) {
1182 sci->sc_stage.dirty_file_ptr =
1183 list_entry(ii->i_dirty.prev,
1184 struct nilfs_inode_info,
1185 i_dirty);
1186 goto break_or_fail;
1187 }
1188 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1189 /* XXX: required ? */
1190 }
1191 sci->sc_stage.dirty_file_ptr = NULL;
1192 if (mode == SC_FLUSH_FILE) {
1193 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1194 return 0;
1195 }
1196 nilfs_sc_cstage_inc(sci);
1197 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1198 /* Fall through */
1199 case NILFS_ST_IFILE:
1200 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1201 &nilfs_sc_file_ops);
1202 if (unlikely(err))
1203 break;
1204 nilfs_sc_cstage_inc(sci);
1205 /* Creating a checkpoint */
1206 err = nilfs_segctor_create_checkpoint(sci);
1207 if (unlikely(err))
1208 break;
1209 /* Fall through */
1210 case NILFS_ST_CPFILE:
1211 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1212 &nilfs_sc_file_ops);
1213 if (unlikely(err))
1214 break;
1215 nilfs_sc_cstage_inc(sci); /* Fall through */
1216 case NILFS_ST_SUFILE:
1217 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1218 sci->sc_nfreesegs, &ndone);
1219 if (unlikely(err)) {
1220 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1221 sci->sc_freesegs, ndone,
1222 NULL);
1223 break;
1224 }
1225 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1226
1227 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1228 &nilfs_sc_file_ops);
1229 if (unlikely(err))
1230 break;
1231 nilfs_sc_cstage_inc(sci); /* Fall through */
1232 case NILFS_ST_DAT:
1233 dat_stage:
1234 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1235 &nilfs_sc_dat_ops);
1236 if (unlikely(err))
1237 break;
1238 if (mode == SC_FLUSH_DAT) {
1239 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1240 return 0;
1241 }
1242 nilfs_sc_cstage_inc(sci); /* Fall through */
1243 case NILFS_ST_SR:
1244 if (mode == SC_LSEG_SR) {
1245 /* Appending a super root */
1246 err = nilfs_segctor_add_super_root(sci);
1247 if (unlikely(err))
1248 break;
1249 }
1250 /* End of a logical segment */
1251 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1252 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1253 return 0;
1254 case NILFS_ST_DSYNC:
1255 dsync_mode:
1256 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1257 ii = sci->sc_dsync_inode;
1258 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1259 break;
1260
1261 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1262 if (unlikely(err))
1263 break;
1264 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1265 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1266 return 0;
1267 case NILFS_ST_DONE:
1268 return 0;
1269 default:
1270 BUG();
1271 }
1272
1273 break_or_fail:
1274 return err;
1275}
1276
1277/**
1278 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1279 * @sci: nilfs_sc_info
1280 * @nilfs: nilfs object
1281 */
1282static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1283 struct the_nilfs *nilfs)
1284{
1285 struct nilfs_segment_buffer *segbuf, *prev;
1286 __u64 nextnum;
1287 int err, alloc = 0;
1288
1289 segbuf = nilfs_segbuf_new(sci->sc_super);
1290 if (unlikely(!segbuf))
1291 return -ENOMEM;
1292
1293 if (list_empty(&sci->sc_write_logs)) {
1294 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1295 nilfs->ns_pseg_offset, nilfs);
1296 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1297 nilfs_shift_to_next_segment(nilfs);
1298 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1299 }
1300
1301 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1302 nextnum = nilfs->ns_nextnum;
1303
1304 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1305 /* Start from the head of a new full segment */
1306 alloc++;
1307 } else {
1308 /* Continue logs */
1309 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1310 nilfs_segbuf_map_cont(segbuf, prev);
1311 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1312 nextnum = prev->sb_nextnum;
1313
1314 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1315 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1316 segbuf->sb_sum.seg_seq++;
1317 alloc++;
1318 }
1319 }
1320
1321 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1322 if (err)
1323 goto failed;
1324
1325 if (alloc) {
1326 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1327 if (err)
1328 goto failed;
1329 }
1330 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1331
1332 BUG_ON(!list_empty(&sci->sc_segbufs));
1333 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1334 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1335 return 0;
1336
1337 failed:
1338 nilfs_segbuf_free(segbuf);
1339 return err;
1340}
1341
1342static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1343 struct the_nilfs *nilfs, int nadd)
1344{
1345 struct nilfs_segment_buffer *segbuf, *prev;
1346 struct inode *sufile = nilfs->ns_sufile;
1347 __u64 nextnextnum;
1348 LIST_HEAD(list);
1349 int err, ret, i;
1350
1351 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1352 /*
1353 * Since the segment specified with nextnum might be allocated during
1354 * the previous construction, the buffer including its segusage may
1355 * not be dirty. The following call ensures that the buffer is dirty
1356 * and will pin the buffer on memory until the sufile is written.
1357 */
1358 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1359 if (unlikely(err))
1360 return err;
1361
1362 for (i = 0; i < nadd; i++) {
1363 /* extend segment info */
1364 err = -ENOMEM;
1365 segbuf = nilfs_segbuf_new(sci->sc_super);
1366 if (unlikely(!segbuf))
1367 goto failed;
1368
1369 /* map this buffer to region of segment on-disk */
1370 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1371 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1372
1373 /* allocate the next next full segment */
1374 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1375 if (unlikely(err))
1376 goto failed_segbuf;
1377
1378 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1379 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1380
1381 list_add_tail(&segbuf->sb_list, &list);
1382 prev = segbuf;
1383 }
1384 list_splice_tail(&list, &sci->sc_segbufs);
1385 return 0;
1386
1387 failed_segbuf:
1388 nilfs_segbuf_free(segbuf);
1389 failed:
1390 list_for_each_entry(segbuf, &list, sb_list) {
1391 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1392 WARN_ON(ret); /* never fails */
1393 }
1394 nilfs_destroy_logs(&list);
1395 return err;
1396}
1397
1398static void nilfs_free_incomplete_logs(struct list_head *logs,
1399 struct the_nilfs *nilfs)
1400{
1401 struct nilfs_segment_buffer *segbuf, *prev;
1402 struct inode *sufile = nilfs->ns_sufile;
1403 int ret;
1404
1405 segbuf = NILFS_FIRST_SEGBUF(logs);
1406 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1407 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1408 WARN_ON(ret); /* never fails */
1409 }
1410 if (atomic_read(&segbuf->sb_err)) {
1411 /* Case 1: The first segment failed */
1412 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1413 /*
1414 * Case 1a: Partial segment appended into an existing
1415 * segment
1416 */
1417 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1418 segbuf->sb_fseg_end);
1419 else /* Case 1b: New full segment */
1420 set_nilfs_discontinued(nilfs);
1421 }
1422
1423 prev = segbuf;
1424 list_for_each_entry_continue(segbuf, logs, sb_list) {
1425 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1426 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1427 WARN_ON(ret); /* never fails */
1428 }
1429 if (atomic_read(&segbuf->sb_err) &&
1430 segbuf->sb_segnum != nilfs->ns_nextnum)
1431 /* Case 2: extended segment (!= next) failed */
1432 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1433 prev = segbuf;
1434 }
1435}
1436
1437static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1438 struct inode *sufile)
1439{
1440 struct nilfs_segment_buffer *segbuf;
1441 unsigned long live_blocks;
1442 int ret;
1443
1444 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1445 live_blocks = segbuf->sb_sum.nblocks +
1446 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1447 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1448 live_blocks,
1449 sci->sc_seg_ctime);
1450 WARN_ON(ret); /* always succeed because the segusage is dirty */
1451 }
1452}
1453
1454static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1455{
1456 struct nilfs_segment_buffer *segbuf;
1457 int ret;
1458
1459 segbuf = NILFS_FIRST_SEGBUF(logs);
1460 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1461 segbuf->sb_pseg_start -
1462 segbuf->sb_fseg_start, 0);
1463 WARN_ON(ret); /* always succeed because the segusage is dirty */
1464
1465 list_for_each_entry_continue(segbuf, logs, sb_list) {
1466 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1467 0, 0);
1468 WARN_ON(ret); /* always succeed */
1469 }
1470}
1471
1472static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1473 struct nilfs_segment_buffer *last,
1474 struct inode *sufile)
1475{
1476 struct nilfs_segment_buffer *segbuf = last;
1477 int ret;
1478
1479 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1480 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1481 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1482 WARN_ON(ret);
1483 }
1484 nilfs_truncate_logs(&sci->sc_segbufs, last);
1485}
1486
1487
1488static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1489 struct the_nilfs *nilfs, int mode)
1490{
1491 struct nilfs_cstage prev_stage = sci->sc_stage;
1492 int err, nadd = 1;
1493
1494 /* Collection retry loop */
1495 for (;;) {
1496 sci->sc_nblk_this_inc = 0;
1497 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1498
1499 err = nilfs_segctor_reset_segment_buffer(sci);
1500 if (unlikely(err))
1501 goto failed;
1502
1503 err = nilfs_segctor_collect_blocks(sci, mode);
1504 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1505 if (!err)
1506 break;
1507
1508 if (unlikely(err != -E2BIG))
1509 goto failed;
1510
1511 /* The current segment is filled up */
1512 if (mode != SC_LSEG_SR ||
1513 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1514 break;
1515
1516 nilfs_clear_logs(&sci->sc_segbufs);
1517
1518 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1519 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1520 sci->sc_freesegs,
1521 sci->sc_nfreesegs,
1522 NULL);
1523 WARN_ON(err); /* do not happen */
1524 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1525 }
1526
1527 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1528 if (unlikely(err))
1529 return err;
1530
1531 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1532 sci->sc_stage = prev_stage;
1533 }
1534 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1535 return 0;
1536
1537 failed:
1538 return err;
1539}
1540
1541static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1542 struct buffer_head *new_bh)
1543{
1544 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1545
1546 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1547 /* The caller must release old_bh */
1548}
1549
1550static int
1551nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1552 struct nilfs_segment_buffer *segbuf,
1553 int mode)
1554{
1555 struct inode *inode = NULL;
1556 sector_t blocknr;
1557 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1558 unsigned long nblocks = 0, ndatablk = 0;
1559 const struct nilfs_sc_operations *sc_op = NULL;
1560 struct nilfs_segsum_pointer ssp;
1561 struct nilfs_finfo *finfo = NULL;
1562 union nilfs_binfo binfo;
1563 struct buffer_head *bh, *bh_org;
1564 ino_t ino = 0;
1565 int err = 0;
1566
1567 if (!nfinfo)
1568 goto out;
1569
1570 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1571 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1572 ssp.offset = sizeof(struct nilfs_segment_summary);
1573
1574 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1575 if (bh == segbuf->sb_super_root)
1576 break;
1577 if (!finfo) {
1578 finfo = nilfs_segctor_map_segsum_entry(
1579 sci, &ssp, sizeof(*finfo));
1580 ino = le64_to_cpu(finfo->fi_ino);
1581 nblocks = le32_to_cpu(finfo->fi_nblocks);
1582 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1583
1584 inode = bh->b_page->mapping->host;
1585
1586 if (mode == SC_LSEG_DSYNC)
1587 sc_op = &nilfs_sc_dsync_ops;
1588 else if (ino == NILFS_DAT_INO)
1589 sc_op = &nilfs_sc_dat_ops;
1590 else /* file blocks */
1591 sc_op = &nilfs_sc_file_ops;
1592 }
1593 bh_org = bh;
1594 get_bh(bh_org);
1595 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1596 &binfo);
1597 if (bh != bh_org)
1598 nilfs_list_replace_buffer(bh_org, bh);
1599 brelse(bh_org);
1600 if (unlikely(err))
1601 goto failed_bmap;
1602
1603 if (ndatablk > 0)
1604 sc_op->write_data_binfo(sci, &ssp, &binfo);
1605 else
1606 sc_op->write_node_binfo(sci, &ssp, &binfo);
1607
1608 blocknr++;
1609 if (--nblocks == 0) {
1610 finfo = NULL;
1611 if (--nfinfo == 0)
1612 break;
1613 } else if (ndatablk > 0)
1614 ndatablk--;
1615 }
1616 out:
1617 return 0;
1618
1619 failed_bmap:
1620 return err;
1621}
1622
1623static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1624{
1625 struct nilfs_segment_buffer *segbuf;
1626 int err;
1627
1628 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1629 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1630 if (unlikely(err))
1631 return err;
1632 nilfs_segbuf_fill_in_segsum(segbuf);
1633 }
1634 return 0;
1635}
1636
1637static void nilfs_begin_page_io(struct page *page)
1638{
1639 if (!page || PageWriteback(page))
1640 /*
1641 * For split b-tree node pages, this function may be called
1642 * twice. We ignore the 2nd or later calls by this check.
1643 */
1644 return;
1645
1646 lock_page(page);
1647 clear_page_dirty_for_io(page);
1648 set_page_writeback(page);
1649 unlock_page(page);
1650}
1651
1652static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1653{
1654 struct nilfs_segment_buffer *segbuf;
1655 struct page *bd_page = NULL, *fs_page = NULL;
1656
1657 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1658 struct buffer_head *bh;
1659
1660 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1661 b_assoc_buffers) {
1662 if (bh->b_page != bd_page) {
1663 if (bd_page) {
1664 lock_page(bd_page);
1665 clear_page_dirty_for_io(bd_page);
1666 set_page_writeback(bd_page);
1667 unlock_page(bd_page);
1668 }
1669 bd_page = bh->b_page;
1670 }
1671 }
1672
1673 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1674 b_assoc_buffers) {
1675 set_buffer_async_write(bh);
1676 if (bh == segbuf->sb_super_root) {
1677 if (bh->b_page != bd_page) {
1678 lock_page(bd_page);
1679 clear_page_dirty_for_io(bd_page);
1680 set_page_writeback(bd_page);
1681 unlock_page(bd_page);
1682 bd_page = bh->b_page;
1683 }
1684 break;
1685 }
1686 if (bh->b_page != fs_page) {
1687 nilfs_begin_page_io(fs_page);
1688 fs_page = bh->b_page;
1689 }
1690 }
1691 }
1692 if (bd_page) {
1693 lock_page(bd_page);
1694 clear_page_dirty_for_io(bd_page);
1695 set_page_writeback(bd_page);
1696 unlock_page(bd_page);
1697 }
1698 nilfs_begin_page_io(fs_page);
1699}
1700
1701static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1702 struct the_nilfs *nilfs)
1703{
1704 int ret;
1705
1706 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1707 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1708 return ret;
1709}
1710
1711static void nilfs_end_page_io(struct page *page, int err)
1712{
1713 if (!page)
1714 return;
1715
1716 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1717 /*
1718 * For b-tree node pages, this function may be called twice
1719 * or more because they might be split in a segment.
1720 */
1721 if (PageDirty(page)) {
1722 /*
1723 * For pages holding split b-tree node buffers, dirty
1724 * flag on the buffers may be cleared discretely.
1725 * In that case, the page is once redirtied for
1726 * remaining buffers, and it must be cancelled if
1727 * all the buffers get cleaned later.
1728 */
1729 lock_page(page);
1730 if (nilfs_page_buffers_clean(page))
1731 __nilfs_clear_page_dirty(page);
1732 unlock_page(page);
1733 }
1734 return;
1735 }
1736
1737 if (!err) {
1738 if (!nilfs_page_buffers_clean(page))
1739 __set_page_dirty_nobuffers(page);
1740 ClearPageError(page);
1741 } else {
1742 __set_page_dirty_nobuffers(page);
1743 SetPageError(page);
1744 }
1745
1746 end_page_writeback(page);
1747}
1748
1749static void nilfs_abort_logs(struct list_head *logs, int err)
1750{
1751 struct nilfs_segment_buffer *segbuf;
1752 struct page *bd_page = NULL, *fs_page = NULL;
1753 struct buffer_head *bh;
1754
1755 if (list_empty(logs))
1756 return;
1757
1758 list_for_each_entry(segbuf, logs, sb_list) {
1759 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1760 b_assoc_buffers) {
1761 if (bh->b_page != bd_page) {
1762 if (bd_page)
1763 end_page_writeback(bd_page);
1764 bd_page = bh->b_page;
1765 }
1766 }
1767
1768 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1769 b_assoc_buffers) {
1770 clear_buffer_async_write(bh);
1771 if (bh == segbuf->sb_super_root) {
1772 if (bh->b_page != bd_page) {
1773 end_page_writeback(bd_page);
1774 bd_page = bh->b_page;
1775 }
1776 break;
1777 }
1778 if (bh->b_page != fs_page) {
1779 nilfs_end_page_io(fs_page, err);
1780 fs_page = bh->b_page;
1781 }
1782 }
1783 }
1784 if (bd_page)
1785 end_page_writeback(bd_page);
1786
1787 nilfs_end_page_io(fs_page, err);
1788}
1789
1790static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1791 struct the_nilfs *nilfs, int err)
1792{
1793 LIST_HEAD(logs);
1794 int ret;
1795
1796 list_splice_tail_init(&sci->sc_write_logs, &logs);
1797 ret = nilfs_wait_on_logs(&logs);
1798 nilfs_abort_logs(&logs, ret ? : err);
1799
1800 list_splice_tail_init(&sci->sc_segbufs, &logs);
1801 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1802 nilfs_free_incomplete_logs(&logs, nilfs);
1803
1804 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1805 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1806 sci->sc_freesegs,
1807 sci->sc_nfreesegs,
1808 NULL);
1809 WARN_ON(ret); /* do not happen */
1810 }
1811
1812 nilfs_destroy_logs(&logs);
1813}
1814
1815static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1816 struct nilfs_segment_buffer *segbuf)
1817{
1818 nilfs->ns_segnum = segbuf->sb_segnum;
1819 nilfs->ns_nextnum = segbuf->sb_nextnum;
1820 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1821 + segbuf->sb_sum.nblocks;
1822 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1823 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1824}
1825
1826static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1827{
1828 struct nilfs_segment_buffer *segbuf;
1829 struct page *bd_page = NULL, *fs_page = NULL;
1830 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1831 int update_sr = false;
1832
1833 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1834 struct buffer_head *bh;
1835
1836 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1837 b_assoc_buffers) {
1838 set_buffer_uptodate(bh);
1839 clear_buffer_dirty(bh);
1840 if (bh->b_page != bd_page) {
1841 if (bd_page)
1842 end_page_writeback(bd_page);
1843 bd_page = bh->b_page;
1844 }
1845 }
1846 /*
1847 * We assume that the buffers which belong to the same page
1848 * continue over the buffer list.
1849 * Under this assumption, the last BHs of pages is
1850 * identifiable by the discontinuity of bh->b_page
1851 * (page != fs_page).
1852 *
1853 * For B-tree node blocks, however, this assumption is not
1854 * guaranteed. The cleanup code of B-tree node pages needs
1855 * special care.
1856 */
1857 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1858 b_assoc_buffers) {
1859 const unsigned long set_bits = BIT(BH_Uptodate);
1860 const unsigned long clear_bits =
1861 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1862 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1863 BIT(BH_NILFS_Redirected));
1864
1865 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1866 if (bh == segbuf->sb_super_root) {
1867 if (bh->b_page != bd_page) {
1868 end_page_writeback(bd_page);
1869 bd_page = bh->b_page;
1870 }
1871 update_sr = true;
1872 break;
1873 }
1874 if (bh->b_page != fs_page) {
1875 nilfs_end_page_io(fs_page, 0);
1876 fs_page = bh->b_page;
1877 }
1878 }
1879
1880 if (!nilfs_segbuf_simplex(segbuf)) {
1881 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1882 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1883 sci->sc_lseg_stime = jiffies;
1884 }
1885 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1886 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1887 }
1888 }
1889 /*
1890 * Since pages may continue over multiple segment buffers,
1891 * end of the last page must be checked outside of the loop.
1892 */
1893 if (bd_page)
1894 end_page_writeback(bd_page);
1895
1896 nilfs_end_page_io(fs_page, 0);
1897
1898 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1899
1900 if (nilfs_doing_gc())
1901 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1902 else
1903 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1904
1905 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1906
1907 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1908 nilfs_set_next_segment(nilfs, segbuf);
1909
1910 if (update_sr) {
1911 nilfs->ns_flushed_device = 0;
1912 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1913 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1914
1915 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1916 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1917 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1918 nilfs_segctor_clear_metadata_dirty(sci);
1919 } else
1920 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1921}
1922
1923static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1924{
1925 int ret;
1926
1927 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1928 if (!ret) {
1929 nilfs_segctor_complete_write(sci);
1930 nilfs_destroy_logs(&sci->sc_write_logs);
1931 }
1932 return ret;
1933}
1934
1935static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1936 struct the_nilfs *nilfs)
1937{
1938 struct nilfs_inode_info *ii, *n;
1939 struct inode *ifile = sci->sc_root->ifile;
1940
1941 spin_lock(&nilfs->ns_inode_lock);
1942 retry:
1943 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1944 if (!ii->i_bh) {
1945 struct buffer_head *ibh;
1946 int err;
1947
1948 spin_unlock(&nilfs->ns_inode_lock);
1949 err = nilfs_ifile_get_inode_block(
1950 ifile, ii->vfs_inode.i_ino, &ibh);
1951 if (unlikely(err)) {
1952 nilfs_msg(sci->sc_super, KERN_WARNING,
1953 "log writer: error %d getting inode block (ino=%lu)",
1954 err, ii->vfs_inode.i_ino);
1955 return err;
1956 }
1957 spin_lock(&nilfs->ns_inode_lock);
1958 if (likely(!ii->i_bh))
1959 ii->i_bh = ibh;
1960 else
1961 brelse(ibh);
1962 goto retry;
1963 }
1964
1965 // Always redirty the buffer to avoid race condition
1966 mark_buffer_dirty(ii->i_bh);
1967 nilfs_mdt_mark_dirty(ifile);
1968
1969 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1970 set_bit(NILFS_I_BUSY, &ii->i_state);
1971 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1972 }
1973 spin_unlock(&nilfs->ns_inode_lock);
1974
1975 return 0;
1976}
1977
1978static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1979 struct the_nilfs *nilfs)
1980{
1981 struct nilfs_inode_info *ii, *n;
1982 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1983 int defer_iput = false;
1984
1985 spin_lock(&nilfs->ns_inode_lock);
1986 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1987 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1988 test_bit(NILFS_I_DIRTY, &ii->i_state))
1989 continue;
1990
1991 clear_bit(NILFS_I_BUSY, &ii->i_state);
1992 brelse(ii->i_bh);
1993 ii->i_bh = NULL;
1994 list_del_init(&ii->i_dirty);
1995 if (!ii->vfs_inode.i_nlink || during_mount) {
1996 /*
1997 * Defer calling iput() to avoid deadlocks if
1998 * i_nlink == 0 or mount is not yet finished.
1999 */
2000 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2001 defer_iput = true;
2002 } else {
2003 spin_unlock(&nilfs->ns_inode_lock);
2004 iput(&ii->vfs_inode);
2005 spin_lock(&nilfs->ns_inode_lock);
2006 }
2007 }
2008 spin_unlock(&nilfs->ns_inode_lock);
2009
2010 if (defer_iput)
2011 schedule_work(&sci->sc_iput_work);
2012}
2013
2014/*
2015 * Main procedure of segment constructor
2016 */
2017static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2018{
2019 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2020 int err;
2021
2022 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2023 sci->sc_cno = nilfs->ns_cno;
2024
2025 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2026 if (unlikely(err))
2027 goto out;
2028
2029 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2030 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2031
2032 if (nilfs_segctor_clean(sci))
2033 goto out;
2034
2035 do {
2036 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2037
2038 err = nilfs_segctor_begin_construction(sci, nilfs);
2039 if (unlikely(err))
2040 goto out;
2041
2042 /* Update time stamp */
2043 sci->sc_seg_ctime = ktime_get_real_seconds();
2044
2045 err = nilfs_segctor_collect(sci, nilfs, mode);
2046 if (unlikely(err))
2047 goto failed;
2048
2049 /* Avoid empty segment */
2050 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2051 nilfs_segbuf_empty(sci->sc_curseg)) {
2052 nilfs_segctor_abort_construction(sci, nilfs, 1);
2053 goto out;
2054 }
2055
2056 err = nilfs_segctor_assign(sci, mode);
2057 if (unlikely(err))
2058 goto failed;
2059
2060 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2061 nilfs_segctor_fill_in_file_bmap(sci);
2062
2063 if (mode == SC_LSEG_SR &&
2064 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2065 err = nilfs_segctor_fill_in_checkpoint(sci);
2066 if (unlikely(err))
2067 goto failed_to_write;
2068
2069 nilfs_segctor_fill_in_super_root(sci, nilfs);
2070 }
2071 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2072
2073 /* Write partial segments */
2074 nilfs_segctor_prepare_write(sci);
2075
2076 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2077 nilfs->ns_crc_seed);
2078
2079 err = nilfs_segctor_write(sci, nilfs);
2080 if (unlikely(err))
2081 goto failed_to_write;
2082
2083 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2084 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2085 /*
2086 * At this point, we avoid double buffering
2087 * for blocksize < pagesize because page dirty
2088 * flag is turned off during write and dirty
2089 * buffers are not properly collected for
2090 * pages crossing over segments.
2091 */
2092 err = nilfs_segctor_wait(sci);
2093 if (err)
2094 goto failed_to_write;
2095 }
2096 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2097
2098 out:
2099 nilfs_segctor_drop_written_files(sci, nilfs);
2100 return err;
2101
2102 failed_to_write:
2103 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2104 nilfs_redirty_inodes(&sci->sc_dirty_files);
2105
2106 failed:
2107 if (nilfs_doing_gc())
2108 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2109 nilfs_segctor_abort_construction(sci, nilfs, err);
2110 goto out;
2111}
2112
2113/**
2114 * nilfs_segctor_start_timer - set timer of background write
2115 * @sci: nilfs_sc_info
2116 *
2117 * If the timer has already been set, it ignores the new request.
2118 * This function MUST be called within a section locking the segment
2119 * semaphore.
2120 */
2121static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2122{
2123 spin_lock(&sci->sc_state_lock);
2124 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2125 sci->sc_timer.expires = jiffies + sci->sc_interval;
2126 add_timer(&sci->sc_timer);
2127 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2128 }
2129 spin_unlock(&sci->sc_state_lock);
2130}
2131
2132static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2133{
2134 spin_lock(&sci->sc_state_lock);
2135 if (!(sci->sc_flush_request & BIT(bn))) {
2136 unsigned long prev_req = sci->sc_flush_request;
2137
2138 sci->sc_flush_request |= BIT(bn);
2139 if (!prev_req)
2140 wake_up(&sci->sc_wait_daemon);
2141 }
2142 spin_unlock(&sci->sc_state_lock);
2143}
2144
2145/**
2146 * nilfs_flush_segment - trigger a segment construction for resource control
2147 * @sb: super block
2148 * @ino: inode number of the file to be flushed out.
2149 */
2150void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2151{
2152 struct the_nilfs *nilfs = sb->s_fs_info;
2153 struct nilfs_sc_info *sci = nilfs->ns_writer;
2154
2155 if (!sci || nilfs_doing_construction())
2156 return;
2157 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2158 /* assign bit 0 to data files */
2159}
2160
2161struct nilfs_segctor_wait_request {
2162 wait_queue_entry_t wq;
2163 __u32 seq;
2164 int err;
2165 atomic_t done;
2166};
2167
2168static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2169{
2170 struct nilfs_segctor_wait_request wait_req;
2171 int err = 0;
2172
2173 spin_lock(&sci->sc_state_lock);
2174 init_wait(&wait_req.wq);
2175 wait_req.err = 0;
2176 atomic_set(&wait_req.done, 0);
2177 wait_req.seq = ++sci->sc_seq_request;
2178 spin_unlock(&sci->sc_state_lock);
2179
2180 init_waitqueue_entry(&wait_req.wq, current);
2181 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2182 set_current_state(TASK_INTERRUPTIBLE);
2183 wake_up(&sci->sc_wait_daemon);
2184
2185 for (;;) {
2186 if (atomic_read(&wait_req.done)) {
2187 err = wait_req.err;
2188 break;
2189 }
2190 if (!signal_pending(current)) {
2191 schedule();
2192 continue;
2193 }
2194 err = -ERESTARTSYS;
2195 break;
2196 }
2197 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2198 return err;
2199}
2200
2201static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2202{
2203 struct nilfs_segctor_wait_request *wrq, *n;
2204 unsigned long flags;
2205
2206 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2207 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2208 if (!atomic_read(&wrq->done) &&
2209 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2210 wrq->err = err;
2211 atomic_set(&wrq->done, 1);
2212 }
2213 if (atomic_read(&wrq->done)) {
2214 wrq->wq.func(&wrq->wq,
2215 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2216 0, NULL);
2217 }
2218 }
2219 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2220}
2221
2222/**
2223 * nilfs_construct_segment - construct a logical segment
2224 * @sb: super block
2225 *
2226 * Return Value: On success, 0 is retured. On errors, one of the following
2227 * negative error code is returned.
2228 *
2229 * %-EROFS - Read only filesystem.
2230 *
2231 * %-EIO - I/O error
2232 *
2233 * %-ENOSPC - No space left on device (only in a panic state).
2234 *
2235 * %-ERESTARTSYS - Interrupted.
2236 *
2237 * %-ENOMEM - Insufficient memory available.
2238 */
2239int nilfs_construct_segment(struct super_block *sb)
2240{
2241 struct the_nilfs *nilfs = sb->s_fs_info;
2242 struct nilfs_sc_info *sci = nilfs->ns_writer;
2243 struct nilfs_transaction_info *ti;
2244 int err;
2245
2246 if (!sci)
2247 return -EROFS;
2248
2249 /* A call inside transactions causes a deadlock. */
2250 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2251
2252 err = nilfs_segctor_sync(sci);
2253 return err;
2254}
2255
2256/**
2257 * nilfs_construct_dsync_segment - construct a data-only logical segment
2258 * @sb: super block
2259 * @inode: inode whose data blocks should be written out
2260 * @start: start byte offset
2261 * @end: end byte offset (inclusive)
2262 *
2263 * Return Value: On success, 0 is retured. On errors, one of the following
2264 * negative error code is returned.
2265 *
2266 * %-EROFS - Read only filesystem.
2267 *
2268 * %-EIO - I/O error
2269 *
2270 * %-ENOSPC - No space left on device (only in a panic state).
2271 *
2272 * %-ERESTARTSYS - Interrupted.
2273 *
2274 * %-ENOMEM - Insufficient memory available.
2275 */
2276int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2277 loff_t start, loff_t end)
2278{
2279 struct the_nilfs *nilfs = sb->s_fs_info;
2280 struct nilfs_sc_info *sci = nilfs->ns_writer;
2281 struct nilfs_inode_info *ii;
2282 struct nilfs_transaction_info ti;
2283 int err = 0;
2284
2285 if (!sci)
2286 return -EROFS;
2287
2288 nilfs_transaction_lock(sb, &ti, 0);
2289
2290 ii = NILFS_I(inode);
2291 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2292 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2293 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2294 nilfs_discontinued(nilfs)) {
2295 nilfs_transaction_unlock(sb);
2296 err = nilfs_segctor_sync(sci);
2297 return err;
2298 }
2299
2300 spin_lock(&nilfs->ns_inode_lock);
2301 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2302 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2303 spin_unlock(&nilfs->ns_inode_lock);
2304 nilfs_transaction_unlock(sb);
2305 return 0;
2306 }
2307 spin_unlock(&nilfs->ns_inode_lock);
2308 sci->sc_dsync_inode = ii;
2309 sci->sc_dsync_start = start;
2310 sci->sc_dsync_end = end;
2311
2312 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2313 if (!err)
2314 nilfs->ns_flushed_device = 0;
2315
2316 nilfs_transaction_unlock(sb);
2317 return err;
2318}
2319
2320#define FLUSH_FILE_BIT (0x1) /* data file only */
2321#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2322
2323/**
2324 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2325 * @sci: segment constructor object
2326 */
2327static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2328{
2329 spin_lock(&sci->sc_state_lock);
2330 sci->sc_seq_accepted = sci->sc_seq_request;
2331 spin_unlock(&sci->sc_state_lock);
2332 del_timer_sync(&sci->sc_timer);
2333}
2334
2335/**
2336 * nilfs_segctor_notify - notify the result of request to caller threads
2337 * @sci: segment constructor object
2338 * @mode: mode of log forming
2339 * @err: error code to be notified
2340 */
2341static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2342{
2343 /* Clear requests (even when the construction failed) */
2344 spin_lock(&sci->sc_state_lock);
2345
2346 if (mode == SC_LSEG_SR) {
2347 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2348 sci->sc_seq_done = sci->sc_seq_accepted;
2349 nilfs_segctor_wakeup(sci, err);
2350 sci->sc_flush_request = 0;
2351 } else {
2352 if (mode == SC_FLUSH_FILE)
2353 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2354 else if (mode == SC_FLUSH_DAT)
2355 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2356
2357 /* re-enable timer if checkpoint creation was not done */
2358 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2359 time_before(jiffies, sci->sc_timer.expires))
2360 add_timer(&sci->sc_timer);
2361 }
2362 spin_unlock(&sci->sc_state_lock);
2363}
2364
2365/**
2366 * nilfs_segctor_construct - form logs and write them to disk
2367 * @sci: segment constructor object
2368 * @mode: mode of log forming
2369 */
2370static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2371{
2372 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2373 struct nilfs_super_block **sbp;
2374 int err = 0;
2375
2376 nilfs_segctor_accept(sci);
2377
2378 if (nilfs_discontinued(nilfs))
2379 mode = SC_LSEG_SR;
2380 if (!nilfs_segctor_confirm(sci))
2381 err = nilfs_segctor_do_construct(sci, mode);
2382
2383 if (likely(!err)) {
2384 if (mode != SC_FLUSH_DAT)
2385 atomic_set(&nilfs->ns_ndirtyblks, 0);
2386 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2387 nilfs_discontinued(nilfs)) {
2388 down_write(&nilfs->ns_sem);
2389 err = -EIO;
2390 sbp = nilfs_prepare_super(sci->sc_super,
2391 nilfs_sb_will_flip(nilfs));
2392 if (likely(sbp)) {
2393 nilfs_set_log_cursor(sbp[0], nilfs);
2394 err = nilfs_commit_super(sci->sc_super,
2395 NILFS_SB_COMMIT);
2396 }
2397 up_write(&nilfs->ns_sem);
2398 }
2399 }
2400
2401 nilfs_segctor_notify(sci, mode, err);
2402 return err;
2403}
2404
2405static void nilfs_construction_timeout(struct timer_list *t)
2406{
2407 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2408
2409 wake_up_process(sci->sc_timer_task);
2410}
2411
2412static void
2413nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2414{
2415 struct nilfs_inode_info *ii, *n;
2416
2417 list_for_each_entry_safe(ii, n, head, i_dirty) {
2418 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2419 continue;
2420 list_del_init(&ii->i_dirty);
2421 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2422 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2423 iput(&ii->vfs_inode);
2424 }
2425}
2426
2427int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2428 void **kbufs)
2429{
2430 struct the_nilfs *nilfs = sb->s_fs_info;
2431 struct nilfs_sc_info *sci = nilfs->ns_writer;
2432 struct nilfs_transaction_info ti;
2433 int err;
2434
2435 if (unlikely(!sci))
2436 return -EROFS;
2437
2438 nilfs_transaction_lock(sb, &ti, 1);
2439
2440 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2441 if (unlikely(err))
2442 goto out_unlock;
2443
2444 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2445 if (unlikely(err)) {
2446 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2447 goto out_unlock;
2448 }
2449
2450 sci->sc_freesegs = kbufs[4];
2451 sci->sc_nfreesegs = argv[4].v_nmembs;
2452 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2453
2454 for (;;) {
2455 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2456 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2457
2458 if (likely(!err))
2459 break;
2460
2461 nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
2462 set_current_state(TASK_INTERRUPTIBLE);
2463 schedule_timeout(sci->sc_interval);
2464 }
2465 if (nilfs_test_opt(nilfs, DISCARD)) {
2466 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2467 sci->sc_nfreesegs);
2468 if (ret) {
2469 nilfs_msg(sb, KERN_WARNING,
2470 "error %d on discard request, turning discards off for the device",
2471 ret);
2472 nilfs_clear_opt(nilfs, DISCARD);
2473 }
2474 }
2475
2476 out_unlock:
2477 sci->sc_freesegs = NULL;
2478 sci->sc_nfreesegs = 0;
2479 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2480 nilfs_transaction_unlock(sb);
2481 return err;
2482}
2483
2484static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2485{
2486 struct nilfs_transaction_info ti;
2487
2488 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2489 nilfs_segctor_construct(sci, mode);
2490
2491 /*
2492 * Unclosed segment should be retried. We do this using sc_timer.
2493 * Timeout of sc_timer will invoke complete construction which leads
2494 * to close the current logical segment.
2495 */
2496 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2497 nilfs_segctor_start_timer(sci);
2498
2499 nilfs_transaction_unlock(sci->sc_super);
2500}
2501
2502static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2503{
2504 int mode = 0;
2505
2506 spin_lock(&sci->sc_state_lock);
2507 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2508 SC_FLUSH_DAT : SC_FLUSH_FILE;
2509 spin_unlock(&sci->sc_state_lock);
2510
2511 if (mode) {
2512 nilfs_segctor_do_construct(sci, mode);
2513
2514 spin_lock(&sci->sc_state_lock);
2515 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2516 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2517 spin_unlock(&sci->sc_state_lock);
2518 }
2519 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2520}
2521
2522static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2523{
2524 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2525 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2526 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2527 return SC_FLUSH_FILE;
2528 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2529 return SC_FLUSH_DAT;
2530 }
2531 return SC_LSEG_SR;
2532}
2533
2534/**
2535 * nilfs_segctor_thread - main loop of the segment constructor thread.
2536 * @arg: pointer to a struct nilfs_sc_info.
2537 *
2538 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2539 * to execute segment constructions.
2540 */
2541static int nilfs_segctor_thread(void *arg)
2542{
2543 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2544 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2545 int timeout = 0;
2546
2547 sci->sc_timer_task = current;
2548
2549 /* start sync. */
2550 sci->sc_task = current;
2551 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2552 nilfs_msg(sci->sc_super, KERN_INFO,
2553 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2554 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2555
2556 spin_lock(&sci->sc_state_lock);
2557 loop:
2558 for (;;) {
2559 int mode;
2560
2561 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2562 goto end_thread;
2563
2564 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2565 mode = SC_LSEG_SR;
2566 else if (sci->sc_flush_request)
2567 mode = nilfs_segctor_flush_mode(sci);
2568 else
2569 break;
2570
2571 spin_unlock(&sci->sc_state_lock);
2572 nilfs_segctor_thread_construct(sci, mode);
2573 spin_lock(&sci->sc_state_lock);
2574 timeout = 0;
2575 }
2576
2577
2578 if (freezing(current)) {
2579 spin_unlock(&sci->sc_state_lock);
2580 try_to_freeze();
2581 spin_lock(&sci->sc_state_lock);
2582 } else {
2583 DEFINE_WAIT(wait);
2584 int should_sleep = 1;
2585
2586 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2587 TASK_INTERRUPTIBLE);
2588
2589 if (sci->sc_seq_request != sci->sc_seq_done)
2590 should_sleep = 0;
2591 else if (sci->sc_flush_request)
2592 should_sleep = 0;
2593 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2594 should_sleep = time_before(jiffies,
2595 sci->sc_timer.expires);
2596
2597 if (should_sleep) {
2598 spin_unlock(&sci->sc_state_lock);
2599 schedule();
2600 spin_lock(&sci->sc_state_lock);
2601 }
2602 finish_wait(&sci->sc_wait_daemon, &wait);
2603 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2604 time_after_eq(jiffies, sci->sc_timer.expires));
2605
2606 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2607 set_nilfs_discontinued(nilfs);
2608 }
2609 goto loop;
2610
2611 end_thread:
2612 spin_unlock(&sci->sc_state_lock);
2613
2614 /* end sync. */
2615 sci->sc_task = NULL;
2616 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2617 return 0;
2618}
2619
2620static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2621{
2622 struct task_struct *t;
2623
2624 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2625 if (IS_ERR(t)) {
2626 int err = PTR_ERR(t);
2627
2628 nilfs_msg(sci->sc_super, KERN_ERR,
2629 "error %d creating segctord thread", err);
2630 return err;
2631 }
2632 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2633 return 0;
2634}
2635
2636static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2637 __acquires(&sci->sc_state_lock)
2638 __releases(&sci->sc_state_lock)
2639{
2640 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2641
2642 while (sci->sc_task) {
2643 wake_up(&sci->sc_wait_daemon);
2644 spin_unlock(&sci->sc_state_lock);
2645 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2646 spin_lock(&sci->sc_state_lock);
2647 }
2648}
2649
2650/*
2651 * Setup & clean-up functions
2652 */
2653static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2654 struct nilfs_root *root)
2655{
2656 struct the_nilfs *nilfs = sb->s_fs_info;
2657 struct nilfs_sc_info *sci;
2658
2659 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2660 if (!sci)
2661 return NULL;
2662
2663 sci->sc_super = sb;
2664
2665 nilfs_get_root(root);
2666 sci->sc_root = root;
2667
2668 init_waitqueue_head(&sci->sc_wait_request);
2669 init_waitqueue_head(&sci->sc_wait_daemon);
2670 init_waitqueue_head(&sci->sc_wait_task);
2671 spin_lock_init(&sci->sc_state_lock);
2672 INIT_LIST_HEAD(&sci->sc_dirty_files);
2673 INIT_LIST_HEAD(&sci->sc_segbufs);
2674 INIT_LIST_HEAD(&sci->sc_write_logs);
2675 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2676 INIT_LIST_HEAD(&sci->sc_iput_queue);
2677 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2678 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2679
2680 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2681 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2682 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2683
2684 if (nilfs->ns_interval)
2685 sci->sc_interval = HZ * nilfs->ns_interval;
2686 if (nilfs->ns_watermark)
2687 sci->sc_watermark = nilfs->ns_watermark;
2688 return sci;
2689}
2690
2691static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2692{
2693 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2694
2695 /*
2696 * The segctord thread was stopped and its timer was removed.
2697 * But some tasks remain.
2698 */
2699 do {
2700 struct nilfs_transaction_info ti;
2701
2702 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2703 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2704 nilfs_transaction_unlock(sci->sc_super);
2705
2706 flush_work(&sci->sc_iput_work);
2707
2708 } while (ret && retrycount-- > 0);
2709}
2710
2711/**
2712 * nilfs_segctor_destroy - destroy the segment constructor.
2713 * @sci: nilfs_sc_info
2714 *
2715 * nilfs_segctor_destroy() kills the segctord thread and frees
2716 * the nilfs_sc_info struct.
2717 * Caller must hold the segment semaphore.
2718 */
2719static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2720{
2721 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2722 int flag;
2723
2724 up_write(&nilfs->ns_segctor_sem);
2725
2726 spin_lock(&sci->sc_state_lock);
2727 nilfs_segctor_kill_thread(sci);
2728 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2729 || sci->sc_seq_request != sci->sc_seq_done);
2730 spin_unlock(&sci->sc_state_lock);
2731
2732 if (flush_work(&sci->sc_iput_work))
2733 flag = true;
2734
2735 if (flag || !nilfs_segctor_confirm(sci))
2736 nilfs_segctor_write_out(sci);
2737
2738 if (!list_empty(&sci->sc_dirty_files)) {
2739 nilfs_msg(sci->sc_super, KERN_WARNING,
2740 "disposed unprocessed dirty file(s) when stopping log writer");
2741 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2742 }
2743
2744 if (!list_empty(&sci->sc_iput_queue)) {
2745 nilfs_msg(sci->sc_super, KERN_WARNING,
2746 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2747 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2748 }
2749
2750 WARN_ON(!list_empty(&sci->sc_segbufs));
2751 WARN_ON(!list_empty(&sci->sc_write_logs));
2752
2753 nilfs_put_root(sci->sc_root);
2754
2755 down_write(&nilfs->ns_segctor_sem);
2756
2757 del_timer_sync(&sci->sc_timer);
2758 kfree(sci);
2759}
2760
2761/**
2762 * nilfs_attach_log_writer - attach log writer
2763 * @sb: super block instance
2764 * @root: root object of the current filesystem tree
2765 *
2766 * This allocates a log writer object, initializes it, and starts the
2767 * log writer.
2768 *
2769 * Return Value: On success, 0 is returned. On error, one of the following
2770 * negative error code is returned.
2771 *
2772 * %-ENOMEM - Insufficient memory available.
2773 */
2774int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2775{
2776 struct the_nilfs *nilfs = sb->s_fs_info;
2777 int err;
2778
2779 if (nilfs->ns_writer) {
2780 /*
2781 * This happens if the filesystem was remounted
2782 * read/write after nilfs_error degenerated it into a
2783 * read-only mount.
2784 */
2785 nilfs_detach_log_writer(sb);
2786 }
2787
2788 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2789 if (!nilfs->ns_writer)
2790 return -ENOMEM;
2791
2792 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2793 if (err) {
2794 kfree(nilfs->ns_writer);
2795 nilfs->ns_writer = NULL;
2796 }
2797 return err;
2798}
2799
2800/**
2801 * nilfs_detach_log_writer - destroy log writer
2802 * @sb: super block instance
2803 *
2804 * This kills log writer daemon, frees the log writer object, and
2805 * destroys list of dirty files.
2806 */
2807void nilfs_detach_log_writer(struct super_block *sb)
2808{
2809 struct the_nilfs *nilfs = sb->s_fs_info;
2810 LIST_HEAD(garbage_list);
2811
2812 down_write(&nilfs->ns_segctor_sem);
2813 if (nilfs->ns_writer) {
2814 nilfs_segctor_destroy(nilfs->ns_writer);
2815 nilfs->ns_writer = NULL;
2816 }
2817
2818 /* Force to free the list of dirty files */
2819 spin_lock(&nilfs->ns_inode_lock);
2820 if (!list_empty(&nilfs->ns_dirty_files)) {
2821 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2822 nilfs_msg(sb, KERN_WARNING,
2823 "disposed unprocessed dirty file(s) when detaching log writer");
2824 }
2825 spin_unlock(&nilfs->ns_inode_lock);
2826 up_write(&nilfs->ns_segctor_sem);
2827
2828 nilfs_dispose_list(nilfs, &garbage_list, 1);
2829}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NILFS segment constructor.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 *
9 */
10
11#include <linux/pagemap.h>
12#include <linux/buffer_head.h>
13#include <linux/writeback.h>
14#include <linux/bitops.h>
15#include <linux/bio.h>
16#include <linux/completion.h>
17#include <linux/blkdev.h>
18#include <linux/backing-dev.h>
19#include <linux/freezer.h>
20#include <linux/kthread.h>
21#include <linux/crc32.h>
22#include <linux/pagevec.h>
23#include <linux/slab.h>
24#include <linux/sched/signal.h>
25
26#include "nilfs.h"
27#include "btnode.h"
28#include "page.h"
29#include "segment.h"
30#include "sufile.h"
31#include "cpfile.h"
32#include "ifile.h"
33#include "segbuf.h"
34
35
36/*
37 * Segment constructor
38 */
39#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
40
41#define SC_MAX_SEGDELTA 64 /*
42 * Upper limit of the number of segments
43 * appended in collection retry loop
44 */
45
46/* Construction mode */
47enum {
48 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
49 SC_LSEG_DSYNC, /*
50 * Flush data blocks of a given file and make
51 * a logical segment without a super root.
52 */
53 SC_FLUSH_FILE, /*
54 * Flush data files, leads to segment writes without
55 * creating a checkpoint.
56 */
57 SC_FLUSH_DAT, /*
58 * Flush DAT file. This also creates segments
59 * without a checkpoint.
60 */
61};
62
63/* Stage numbers of dirty block collection */
64enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC, /* Collecting dirty blocks for GC */
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR, /* Super root */
73 NILFS_ST_DSYNC, /* Data sync blocks */
74 NILFS_ST_DONE,
75};
76
77#define CREATE_TRACE_POINTS
78#include <trace/events/nilfs2.h>
79
80/*
81 * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
82 * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
83 * the variable must use them because transition of stage count must involve
84 * trace events (trace_nilfs2_collection_stage_transition).
85 *
86 * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
87 * produce tracepoint events. It is provided just for making the intention
88 * clear.
89 */
90static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91{
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94}
95
96static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97{
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100}
101
102static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103{
104 return sci->sc_stage.scnt;
105}
106
107/* State flags of collection */
108#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
109#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
110#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
111#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113/* Operations depending on the construction mode and file type */
114struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127};
128
129/*
130 * Other definitions
131 */
132static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137#define nilfs_cnt32_ge(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)((a) - (b)) >= 0))
140
141static int nilfs_prepare_segment_lock(struct super_block *sb,
142 struct nilfs_transaction_info *ti)
143{
144 struct nilfs_transaction_info *cur_ti = current->journal_info;
145 void *save = NULL;
146
147 if (cur_ti) {
148 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
149 return ++cur_ti->ti_count;
150
151 /*
152 * If journal_info field is occupied by other FS,
153 * it is saved and will be restored on
154 * nilfs_transaction_commit().
155 */
156 nilfs_warn(sb, "journal info from a different FS");
157 save = current->journal_info;
158 }
159 if (!ti) {
160 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
161 if (!ti)
162 return -ENOMEM;
163 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
164 } else {
165 ti->ti_flags = 0;
166 }
167 ti->ti_count = 0;
168 ti->ti_save = save;
169 ti->ti_magic = NILFS_TI_MAGIC;
170 current->journal_info = ti;
171 return 0;
172}
173
174/**
175 * nilfs_transaction_begin - start indivisible file operations.
176 * @sb: super block
177 * @ti: nilfs_transaction_info
178 * @vacancy_check: flags for vacancy rate checks
179 *
180 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
181 * the segment semaphore, to make a segment construction and write tasks
182 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
183 * The region enclosed by these two functions can be nested. To avoid a
184 * deadlock, the semaphore is only acquired or released in the outermost call.
185 *
186 * This function allocates a nilfs_transaction_info struct to keep context
187 * information on it. It is initialized and hooked onto the current task in
188 * the outermost call. If a pre-allocated struct is given to @ti, it is used
189 * instead; otherwise a new struct is assigned from a slab.
190 *
191 * When @vacancy_check flag is set, this function will check the amount of
192 * free space, and will wait for the GC to reclaim disk space if low capacity.
193 *
194 * Return Value: On success, 0 is returned. On error, one of the following
195 * negative error code is returned.
196 *
197 * %-ENOMEM - Insufficient memory available.
198 *
199 * %-ENOSPC - No space left on device
200 */
201int nilfs_transaction_begin(struct super_block *sb,
202 struct nilfs_transaction_info *ti,
203 int vacancy_check)
204{
205 struct the_nilfs *nilfs;
206 int ret = nilfs_prepare_segment_lock(sb, ti);
207 struct nilfs_transaction_info *trace_ti;
208
209 if (unlikely(ret < 0))
210 return ret;
211 if (ret > 0) {
212 trace_ti = current->journal_info;
213
214 trace_nilfs2_transaction_transition(sb, trace_ti,
215 trace_ti->ti_count, trace_ti->ti_flags,
216 TRACE_NILFS2_TRANSACTION_BEGIN);
217 return 0;
218 }
219
220 sb_start_intwrite(sb);
221
222 nilfs = sb->s_fs_info;
223 down_read(&nilfs->ns_segctor_sem);
224 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
225 up_read(&nilfs->ns_segctor_sem);
226 ret = -ENOSPC;
227 goto failed;
228 }
229
230 trace_ti = current->journal_info;
231 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
232 trace_ti->ti_flags,
233 TRACE_NILFS2_TRANSACTION_BEGIN);
234 return 0;
235
236 failed:
237 ti = current->journal_info;
238 current->journal_info = ti->ti_save;
239 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
240 kmem_cache_free(nilfs_transaction_cachep, ti);
241 sb_end_intwrite(sb);
242 return ret;
243}
244
245/**
246 * nilfs_transaction_commit - commit indivisible file operations.
247 * @sb: super block
248 *
249 * nilfs_transaction_commit() releases the read semaphore which is
250 * acquired by nilfs_transaction_begin(). This is only performed
251 * in outermost call of this function. If a commit flag is set,
252 * nilfs_transaction_commit() sets a timer to start the segment
253 * constructor. If a sync flag is set, it starts construction
254 * directly.
255 */
256int nilfs_transaction_commit(struct super_block *sb)
257{
258 struct nilfs_transaction_info *ti = current->journal_info;
259 struct the_nilfs *nilfs = sb->s_fs_info;
260 int err = 0;
261
262 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
263 ti->ti_flags |= NILFS_TI_COMMIT;
264 if (ti->ti_count > 0) {
265 ti->ti_count--;
266 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
267 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
268 return 0;
269 }
270 if (nilfs->ns_writer) {
271 struct nilfs_sc_info *sci = nilfs->ns_writer;
272
273 if (ti->ti_flags & NILFS_TI_COMMIT)
274 nilfs_segctor_start_timer(sci);
275 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
276 nilfs_segctor_do_flush(sci, 0);
277 }
278 up_read(&nilfs->ns_segctor_sem);
279 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
280 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
281
282 current->journal_info = ti->ti_save;
283
284 if (ti->ti_flags & NILFS_TI_SYNC)
285 err = nilfs_construct_segment(sb);
286 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
287 kmem_cache_free(nilfs_transaction_cachep, ti);
288 sb_end_intwrite(sb);
289 return err;
290}
291
292void nilfs_transaction_abort(struct super_block *sb)
293{
294 struct nilfs_transaction_info *ti = current->journal_info;
295 struct the_nilfs *nilfs = sb->s_fs_info;
296
297 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
298 if (ti->ti_count > 0) {
299 ti->ti_count--;
300 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
301 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
302 return;
303 }
304 up_read(&nilfs->ns_segctor_sem);
305
306 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308
309 current->journal_info = ti->ti_save;
310 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
311 kmem_cache_free(nilfs_transaction_cachep, ti);
312 sb_end_intwrite(sb);
313}
314
315void nilfs_relax_pressure_in_lock(struct super_block *sb)
316{
317 struct the_nilfs *nilfs = sb->s_fs_info;
318 struct nilfs_sc_info *sci = nilfs->ns_writer;
319
320 if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
321 return;
322
323 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
324 up_read(&nilfs->ns_segctor_sem);
325
326 down_write(&nilfs->ns_segctor_sem);
327 if (sci->sc_flush_request &&
328 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
329 struct nilfs_transaction_info *ti = current->journal_info;
330
331 ti->ti_flags |= NILFS_TI_WRITER;
332 nilfs_segctor_do_immediate_flush(sci);
333 ti->ti_flags &= ~NILFS_TI_WRITER;
334 }
335 downgrade_write(&nilfs->ns_segctor_sem);
336}
337
338static void nilfs_transaction_lock(struct super_block *sb,
339 struct nilfs_transaction_info *ti,
340 int gcflag)
341{
342 struct nilfs_transaction_info *cur_ti = current->journal_info;
343 struct the_nilfs *nilfs = sb->s_fs_info;
344 struct nilfs_sc_info *sci = nilfs->ns_writer;
345
346 WARN_ON(cur_ti);
347 ti->ti_flags = NILFS_TI_WRITER;
348 ti->ti_count = 0;
349 ti->ti_save = cur_ti;
350 ti->ti_magic = NILFS_TI_MAGIC;
351 current->journal_info = ti;
352
353 for (;;) {
354 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
355 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
356
357 down_write(&nilfs->ns_segctor_sem);
358 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
359 break;
360
361 nilfs_segctor_do_immediate_flush(sci);
362
363 up_write(&nilfs->ns_segctor_sem);
364 cond_resched();
365 }
366 if (gcflag)
367 ti->ti_flags |= NILFS_TI_GC;
368
369 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
370 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
371}
372
373static void nilfs_transaction_unlock(struct super_block *sb)
374{
375 struct nilfs_transaction_info *ti = current->journal_info;
376 struct the_nilfs *nilfs = sb->s_fs_info;
377
378 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
379 BUG_ON(ti->ti_count > 0);
380
381 up_write(&nilfs->ns_segctor_sem);
382 current->journal_info = ti->ti_save;
383
384 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
385 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
386}
387
388static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
389 struct nilfs_segsum_pointer *ssp,
390 unsigned int bytes)
391{
392 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
393 unsigned int blocksize = sci->sc_super->s_blocksize;
394 void *p;
395
396 if (unlikely(ssp->offset + bytes > blocksize)) {
397 ssp->offset = 0;
398 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
399 &segbuf->sb_segsum_buffers));
400 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
401 }
402 p = ssp->bh->b_data + ssp->offset;
403 ssp->offset += bytes;
404 return p;
405}
406
407/**
408 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
409 * @sci: nilfs_sc_info
410 */
411static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
412{
413 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
414 struct buffer_head *sumbh;
415 unsigned int sumbytes;
416 unsigned int flags = 0;
417 int err;
418
419 if (nilfs_doing_gc())
420 flags = NILFS_SS_GC;
421 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
422 if (unlikely(err))
423 return err;
424
425 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
426 sumbytes = segbuf->sb_sum.sumbytes;
427 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
428 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
429 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
430 return 0;
431}
432
433/**
434 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
435 * @sci: segment constructor object
436 *
437 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
438 * the current segment summary block.
439 */
440static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
441{
442 struct nilfs_segsum_pointer *ssp;
443
444 ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
445 if (ssp->offset < ssp->bh->b_size)
446 memset(ssp->bh->b_data + ssp->offset, 0,
447 ssp->bh->b_size - ssp->offset);
448}
449
450static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
451{
452 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
453 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
454 return -E2BIG; /*
455 * The current segment is filled up
456 * (internal code)
457 */
458 nilfs_segctor_zeropad_segsum(sci);
459 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
460 return nilfs_segctor_reset_segment_buffer(sci);
461}
462
463static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
464{
465 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
466 int err;
467
468 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
469 err = nilfs_segctor_feed_segment(sci);
470 if (err)
471 return err;
472 segbuf = sci->sc_curseg;
473 }
474 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
475 if (likely(!err))
476 segbuf->sb_sum.flags |= NILFS_SS_SR;
477 return err;
478}
479
480/*
481 * Functions for making segment summary and payloads
482 */
483static int nilfs_segctor_segsum_block_required(
484 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
485 unsigned int binfo_size)
486{
487 unsigned int blocksize = sci->sc_super->s_blocksize;
488 /* Size of finfo and binfo is enough small against blocksize */
489
490 return ssp->offset + binfo_size +
491 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
492 blocksize;
493}
494
495static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
496 struct inode *inode)
497{
498 sci->sc_curseg->sb_sum.nfinfo++;
499 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
500 nilfs_segctor_map_segsum_entry(
501 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
502
503 if (NILFS_I(inode)->i_root &&
504 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
505 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
506 /* skip finfo */
507}
508
509static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
510 struct inode *inode)
511{
512 struct nilfs_finfo *finfo;
513 struct nilfs_inode_info *ii;
514 struct nilfs_segment_buffer *segbuf;
515 __u64 cno;
516
517 if (sci->sc_blk_cnt == 0)
518 return;
519
520 ii = NILFS_I(inode);
521
522 if (ii->i_type & NILFS_I_TYPE_GC)
523 cno = ii->i_cno;
524 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
525 cno = 0;
526 else
527 cno = sci->sc_cno;
528
529 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
530 sizeof(*finfo));
531 finfo->fi_ino = cpu_to_le64(inode->i_ino);
532 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
533 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
534 finfo->fi_cno = cpu_to_le64(cno);
535
536 segbuf = sci->sc_curseg;
537 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
538 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
539 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
540 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
541}
542
543static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
544 struct buffer_head *bh,
545 struct inode *inode,
546 unsigned int binfo_size)
547{
548 struct nilfs_segment_buffer *segbuf;
549 int required, err = 0;
550
551 retry:
552 segbuf = sci->sc_curseg;
553 required = nilfs_segctor_segsum_block_required(
554 sci, &sci->sc_binfo_ptr, binfo_size);
555 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
556 nilfs_segctor_end_finfo(sci, inode);
557 err = nilfs_segctor_feed_segment(sci);
558 if (err)
559 return err;
560 goto retry;
561 }
562 if (unlikely(required)) {
563 nilfs_segctor_zeropad_segsum(sci);
564 err = nilfs_segbuf_extend_segsum(segbuf);
565 if (unlikely(err))
566 goto failed;
567 }
568 if (sci->sc_blk_cnt == 0)
569 nilfs_segctor_begin_finfo(sci, inode);
570
571 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
572 /* Substitution to vblocknr is delayed until update_blocknr() */
573 nilfs_segbuf_add_file_buffer(segbuf, bh);
574 sci->sc_blk_cnt++;
575 failed:
576 return err;
577}
578
579/*
580 * Callback functions that enumerate, mark, and collect dirty blocks
581 */
582static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
583 struct buffer_head *bh, struct inode *inode)
584{
585 int err;
586
587 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588 if (err < 0)
589 return err;
590
591 err = nilfs_segctor_add_file_block(sci, bh, inode,
592 sizeof(struct nilfs_binfo_v));
593 if (!err)
594 sci->sc_datablk_cnt++;
595 return err;
596}
597
598static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
599 struct buffer_head *bh,
600 struct inode *inode)
601{
602 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
603}
604
605static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
606 struct buffer_head *bh,
607 struct inode *inode)
608{
609 WARN_ON(!buffer_dirty(bh));
610 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
611}
612
613static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
614 struct nilfs_segsum_pointer *ssp,
615 union nilfs_binfo *binfo)
616{
617 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
618 sci, ssp, sizeof(*binfo_v));
619 *binfo_v = binfo->bi_v;
620}
621
622static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
623 struct nilfs_segsum_pointer *ssp,
624 union nilfs_binfo *binfo)
625{
626 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
627 sci, ssp, sizeof(*vblocknr));
628 *vblocknr = binfo->bi_v.bi_vblocknr;
629}
630
631static const struct nilfs_sc_operations nilfs_sc_file_ops = {
632 .collect_data = nilfs_collect_file_data,
633 .collect_node = nilfs_collect_file_node,
634 .collect_bmap = nilfs_collect_file_bmap,
635 .write_data_binfo = nilfs_write_file_data_binfo,
636 .write_node_binfo = nilfs_write_file_node_binfo,
637};
638
639static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
640 struct buffer_head *bh, struct inode *inode)
641{
642 int err;
643
644 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
645 if (err < 0)
646 return err;
647
648 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
649 if (!err)
650 sci->sc_datablk_cnt++;
651 return err;
652}
653
654static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
655 struct buffer_head *bh, struct inode *inode)
656{
657 WARN_ON(!buffer_dirty(bh));
658 return nilfs_segctor_add_file_block(sci, bh, inode,
659 sizeof(struct nilfs_binfo_dat));
660}
661
662static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
663 struct nilfs_segsum_pointer *ssp,
664 union nilfs_binfo *binfo)
665{
666 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
667 sizeof(*blkoff));
668 *blkoff = binfo->bi_dat.bi_blkoff;
669}
670
671static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
672 struct nilfs_segsum_pointer *ssp,
673 union nilfs_binfo *binfo)
674{
675 struct nilfs_binfo_dat *binfo_dat =
676 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
677 *binfo_dat = binfo->bi_dat;
678}
679
680static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
681 .collect_data = nilfs_collect_dat_data,
682 .collect_node = nilfs_collect_file_node,
683 .collect_bmap = nilfs_collect_dat_bmap,
684 .write_data_binfo = nilfs_write_dat_data_binfo,
685 .write_node_binfo = nilfs_write_dat_node_binfo,
686};
687
688static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
689 .collect_data = nilfs_collect_file_data,
690 .collect_node = NULL,
691 .collect_bmap = NULL,
692 .write_data_binfo = nilfs_write_file_data_binfo,
693 .write_node_binfo = NULL,
694};
695
696static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
697 struct list_head *listp,
698 size_t nlimit,
699 loff_t start, loff_t end)
700{
701 struct address_space *mapping = inode->i_mapping;
702 struct folio_batch fbatch;
703 pgoff_t index = 0, last = ULONG_MAX;
704 size_t ndirties = 0;
705 int i;
706
707 if (unlikely(start != 0 || end != LLONG_MAX)) {
708 /*
709 * A valid range is given for sync-ing data pages. The
710 * range is rounded to per-page; extra dirty buffers
711 * may be included if blocksize < pagesize.
712 */
713 index = start >> PAGE_SHIFT;
714 last = end >> PAGE_SHIFT;
715 }
716 folio_batch_init(&fbatch);
717 repeat:
718 if (unlikely(index > last) ||
719 !filemap_get_folios_tag(mapping, &index, last,
720 PAGECACHE_TAG_DIRTY, &fbatch))
721 return ndirties;
722
723 for (i = 0; i < folio_batch_count(&fbatch); i++) {
724 struct buffer_head *bh, *head;
725 struct folio *folio = fbatch.folios[i];
726
727 folio_lock(folio);
728 if (unlikely(folio->mapping != mapping)) {
729 /* Exclude folios removed from the address space */
730 folio_unlock(folio);
731 continue;
732 }
733 head = folio_buffers(folio);
734 if (!head)
735 head = create_empty_buffers(folio,
736 i_blocksize(inode), 0);
737
738 bh = head;
739 do {
740 if (!buffer_dirty(bh) || buffer_async_write(bh))
741 continue;
742 get_bh(bh);
743 list_add_tail(&bh->b_assoc_buffers, listp);
744 ndirties++;
745 if (unlikely(ndirties >= nlimit)) {
746 folio_unlock(folio);
747 folio_batch_release(&fbatch);
748 cond_resched();
749 return ndirties;
750 }
751 } while (bh = bh->b_this_page, bh != head);
752
753 folio_unlock(folio);
754 }
755 folio_batch_release(&fbatch);
756 cond_resched();
757 goto repeat;
758}
759
760static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
761 struct list_head *listp)
762{
763 struct nilfs_inode_info *ii = NILFS_I(inode);
764 struct inode *btnc_inode = ii->i_assoc_inode;
765 struct folio_batch fbatch;
766 struct buffer_head *bh, *head;
767 unsigned int i;
768 pgoff_t index = 0;
769
770 if (!btnc_inode)
771 return;
772 folio_batch_init(&fbatch);
773
774 while (filemap_get_folios_tag(btnc_inode->i_mapping, &index,
775 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch)) {
776 for (i = 0; i < folio_batch_count(&fbatch); i++) {
777 bh = head = folio_buffers(fbatch.folios[i]);
778 do {
779 if (buffer_dirty(bh) &&
780 !buffer_async_write(bh)) {
781 get_bh(bh);
782 list_add_tail(&bh->b_assoc_buffers,
783 listp);
784 }
785 bh = bh->b_this_page;
786 } while (bh != head);
787 }
788 folio_batch_release(&fbatch);
789 cond_resched();
790 }
791}
792
793static void nilfs_dispose_list(struct the_nilfs *nilfs,
794 struct list_head *head, int force)
795{
796 struct nilfs_inode_info *ii, *n;
797 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
798 unsigned int nv = 0;
799
800 while (!list_empty(head)) {
801 spin_lock(&nilfs->ns_inode_lock);
802 list_for_each_entry_safe(ii, n, head, i_dirty) {
803 list_del_init(&ii->i_dirty);
804 if (force) {
805 if (unlikely(ii->i_bh)) {
806 brelse(ii->i_bh);
807 ii->i_bh = NULL;
808 }
809 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
810 set_bit(NILFS_I_QUEUED, &ii->i_state);
811 list_add_tail(&ii->i_dirty,
812 &nilfs->ns_dirty_files);
813 continue;
814 }
815 ivec[nv++] = ii;
816 if (nv == SC_N_INODEVEC)
817 break;
818 }
819 spin_unlock(&nilfs->ns_inode_lock);
820
821 for (pii = ivec; nv > 0; pii++, nv--)
822 iput(&(*pii)->vfs_inode);
823 }
824}
825
826static void nilfs_iput_work_func(struct work_struct *work)
827{
828 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
829 sc_iput_work);
830 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
831
832 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
833}
834
835static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
836 struct nilfs_root *root)
837{
838 int ret = 0;
839
840 if (nilfs_mdt_fetch_dirty(root->ifile))
841 ret++;
842 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
843 ret++;
844 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
845 ret++;
846 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
847 ret++;
848 return ret;
849}
850
851static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
852{
853 return list_empty(&sci->sc_dirty_files) &&
854 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
855 sci->sc_nfreesegs == 0 &&
856 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
857}
858
859static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
860{
861 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
862 int ret = 0;
863
864 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
865 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
866
867 spin_lock(&nilfs->ns_inode_lock);
868 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
869 ret++;
870
871 spin_unlock(&nilfs->ns_inode_lock);
872 return ret;
873}
874
875static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
876{
877 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
878
879 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
880 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
881 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
882 nilfs_mdt_clear_dirty(nilfs->ns_dat);
883}
884
885static void nilfs_fill_in_file_bmap(struct inode *ifile,
886 struct nilfs_inode_info *ii)
887
888{
889 struct buffer_head *ibh;
890 struct nilfs_inode *raw_inode;
891
892 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
893 ibh = ii->i_bh;
894 BUG_ON(!ibh);
895 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
896 ibh);
897 nilfs_bmap_write(ii->i_bmap, raw_inode);
898 nilfs_ifile_unmap_inode(raw_inode);
899 }
900}
901
902static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
903{
904 struct nilfs_inode_info *ii;
905
906 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
907 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
908 set_bit(NILFS_I_COLLECTED, &ii->i_state);
909 }
910}
911
912/**
913 * nilfs_write_root_mdt_inode - export root metadata inode information to
914 * the on-disk inode
915 * @inode: inode object of the root metadata file
916 * @raw_inode: on-disk inode
917 *
918 * nilfs_write_root_mdt_inode() writes inode information and bmap data of
919 * @inode to the inode area of the metadata file allocated on the super root
920 * block created to finalize the log. Since super root blocks are configured
921 * each time, this function zero-fills the unused area of @raw_inode.
922 */
923static void nilfs_write_root_mdt_inode(struct inode *inode,
924 struct nilfs_inode *raw_inode)
925{
926 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
927
928 nilfs_write_inode_common(inode, raw_inode);
929
930 /* zero-fill unused portion of raw_inode */
931 raw_inode->i_xattr = 0;
932 raw_inode->i_pad = 0;
933 memset((void *)raw_inode + sizeof(*raw_inode), 0,
934 nilfs->ns_inode_size - sizeof(*raw_inode));
935
936 nilfs_bmap_write(NILFS_I(inode)->i_bmap, raw_inode);
937}
938
939static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
940 struct the_nilfs *nilfs)
941{
942 struct buffer_head *bh_sr;
943 struct nilfs_super_root *raw_sr;
944 unsigned int isz, srsz;
945
946 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
947
948 lock_buffer(bh_sr);
949 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
950 isz = nilfs->ns_inode_size;
951 srsz = NILFS_SR_BYTES(isz);
952
953 raw_sr->sr_sum = 0; /* Ensure initialization within this update */
954 raw_sr->sr_bytes = cpu_to_le16(srsz);
955 raw_sr->sr_nongc_ctime
956 = cpu_to_le64(nilfs_doing_gc() ?
957 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
958 raw_sr->sr_flags = 0;
959
960 nilfs_write_root_mdt_inode(nilfs->ns_dat, (void *)raw_sr +
961 NILFS_SR_DAT_OFFSET(isz));
962 nilfs_write_root_mdt_inode(nilfs->ns_cpfile, (void *)raw_sr +
963 NILFS_SR_CPFILE_OFFSET(isz));
964 nilfs_write_root_mdt_inode(nilfs->ns_sufile, (void *)raw_sr +
965 NILFS_SR_SUFILE_OFFSET(isz));
966
967 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
968 set_buffer_uptodate(bh_sr);
969 unlock_buffer(bh_sr);
970}
971
972static void nilfs_redirty_inodes(struct list_head *head)
973{
974 struct nilfs_inode_info *ii;
975
976 list_for_each_entry(ii, head, i_dirty) {
977 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
978 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
979 }
980}
981
982static void nilfs_drop_collected_inodes(struct list_head *head)
983{
984 struct nilfs_inode_info *ii;
985
986 list_for_each_entry(ii, head, i_dirty) {
987 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
988 continue;
989
990 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
991 set_bit(NILFS_I_UPDATED, &ii->i_state);
992 }
993}
994
995static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
996 struct inode *inode,
997 struct list_head *listp,
998 int (*collect)(struct nilfs_sc_info *,
999 struct buffer_head *,
1000 struct inode *))
1001{
1002 struct buffer_head *bh, *n;
1003 int err = 0;
1004
1005 if (collect) {
1006 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1007 list_del_init(&bh->b_assoc_buffers);
1008 err = collect(sci, bh, inode);
1009 brelse(bh);
1010 if (unlikely(err))
1011 goto dispose_buffers;
1012 }
1013 return 0;
1014 }
1015
1016 dispose_buffers:
1017 while (!list_empty(listp)) {
1018 bh = list_first_entry(listp, struct buffer_head,
1019 b_assoc_buffers);
1020 list_del_init(&bh->b_assoc_buffers);
1021 brelse(bh);
1022 }
1023 return err;
1024}
1025
1026static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1027{
1028 /* Remaining number of blocks within segment buffer */
1029 return sci->sc_segbuf_nblocks -
1030 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1031}
1032
1033static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1034 struct inode *inode,
1035 const struct nilfs_sc_operations *sc_ops)
1036{
1037 LIST_HEAD(data_buffers);
1038 LIST_HEAD(node_buffers);
1039 int err;
1040
1041 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1042 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1043
1044 n = nilfs_lookup_dirty_data_buffers(
1045 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1046 if (n > rest) {
1047 err = nilfs_segctor_apply_buffers(
1048 sci, inode, &data_buffers,
1049 sc_ops->collect_data);
1050 BUG_ON(!err); /* always receive -E2BIG or true error */
1051 goto break_or_fail;
1052 }
1053 }
1054 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1055
1056 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1057 err = nilfs_segctor_apply_buffers(
1058 sci, inode, &data_buffers, sc_ops->collect_data);
1059 if (unlikely(err)) {
1060 /* dispose node list */
1061 nilfs_segctor_apply_buffers(
1062 sci, inode, &node_buffers, NULL);
1063 goto break_or_fail;
1064 }
1065 sci->sc_stage.flags |= NILFS_CF_NODE;
1066 }
1067 /* Collect node */
1068 err = nilfs_segctor_apply_buffers(
1069 sci, inode, &node_buffers, sc_ops->collect_node);
1070 if (unlikely(err))
1071 goto break_or_fail;
1072
1073 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1074 err = nilfs_segctor_apply_buffers(
1075 sci, inode, &node_buffers, sc_ops->collect_bmap);
1076 if (unlikely(err))
1077 goto break_or_fail;
1078
1079 nilfs_segctor_end_finfo(sci, inode);
1080 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1081
1082 break_or_fail:
1083 return err;
1084}
1085
1086static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1087 struct inode *inode)
1088{
1089 LIST_HEAD(data_buffers);
1090 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1091 int err;
1092
1093 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1094 sci->sc_dsync_start,
1095 sci->sc_dsync_end);
1096
1097 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1098 nilfs_collect_file_data);
1099 if (!err) {
1100 nilfs_segctor_end_finfo(sci, inode);
1101 BUG_ON(n > rest);
1102 /* always receive -E2BIG or true error if n > rest */
1103 }
1104 return err;
1105}
1106
1107/**
1108 * nilfs_free_segments - free the segments given by an array of segment numbers
1109 * @nilfs: nilfs object
1110 * @segnumv: array of segment numbers to be freed
1111 * @nsegs: number of segments to be freed in @segnumv
1112 *
1113 * nilfs_free_segments() wraps nilfs_sufile_freev() and
1114 * nilfs_sufile_cancel_freev(), and edits the segment usage metadata file
1115 * (sufile) to free all segments given by @segnumv and @nsegs at once. If
1116 * it fails midway, it cancels the changes so that none of the segments are
1117 * freed. If @nsegs is 0, this function does nothing.
1118 *
1119 * The freeing of segments is not finalized until the writing of a log with
1120 * a super root block containing this sufile change is complete, and it can
1121 * be canceled with nilfs_sufile_cancel_freev() until then.
1122 *
1123 * Return: 0 on success, or the following negative error code on failure.
1124 * * %-EINVAL - Invalid segment number.
1125 * * %-EIO - I/O error (including metadata corruption).
1126 * * %-ENOMEM - Insufficient memory available.
1127 */
1128static int nilfs_free_segments(struct the_nilfs *nilfs, __u64 *segnumv,
1129 size_t nsegs)
1130{
1131 size_t ndone;
1132 int ret;
1133
1134 if (!nsegs)
1135 return 0;
1136
1137 ret = nilfs_sufile_freev(nilfs->ns_sufile, segnumv, nsegs, &ndone);
1138 if (unlikely(ret)) {
1139 nilfs_sufile_cancel_freev(nilfs->ns_sufile, segnumv, ndone,
1140 NULL);
1141 /*
1142 * If a segment usage of the segments to be freed is in a
1143 * hole block, nilfs_sufile_freev() will return -ENOENT.
1144 * In this case, -EINVAL should be returned to the caller
1145 * since there is something wrong with the given segment
1146 * number array. This error can only occur during GC, so
1147 * there is no need to worry about it propagating to other
1148 * callers (such as fsync).
1149 */
1150 if (ret == -ENOENT) {
1151 nilfs_err(nilfs->ns_sb,
1152 "The segment usage entry %llu to be freed is invalid (in a hole)",
1153 (unsigned long long)segnumv[ndone]);
1154 ret = -EINVAL;
1155 }
1156 }
1157 return ret;
1158}
1159
1160static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1161{
1162 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1163 struct list_head *head;
1164 struct nilfs_inode_info *ii;
1165 int err = 0;
1166
1167 switch (nilfs_sc_cstage_get(sci)) {
1168 case NILFS_ST_INIT:
1169 /* Pre-processes */
1170 sci->sc_stage.flags = 0;
1171
1172 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1173 sci->sc_nblk_inc = 0;
1174 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1175 if (mode == SC_LSEG_DSYNC) {
1176 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1177 goto dsync_mode;
1178 }
1179 }
1180
1181 sci->sc_stage.dirty_file_ptr = NULL;
1182 sci->sc_stage.gc_inode_ptr = NULL;
1183 if (mode == SC_FLUSH_DAT) {
1184 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1185 goto dat_stage;
1186 }
1187 nilfs_sc_cstage_inc(sci);
1188 fallthrough;
1189 case NILFS_ST_GC:
1190 if (nilfs_doing_gc()) {
1191 head = &sci->sc_gc_inodes;
1192 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1193 head, i_dirty);
1194 list_for_each_entry_continue(ii, head, i_dirty) {
1195 err = nilfs_segctor_scan_file(
1196 sci, &ii->vfs_inode,
1197 &nilfs_sc_file_ops);
1198 if (unlikely(err)) {
1199 sci->sc_stage.gc_inode_ptr = list_entry(
1200 ii->i_dirty.prev,
1201 struct nilfs_inode_info,
1202 i_dirty);
1203 goto break_or_fail;
1204 }
1205 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1206 }
1207 sci->sc_stage.gc_inode_ptr = NULL;
1208 }
1209 nilfs_sc_cstage_inc(sci);
1210 fallthrough;
1211 case NILFS_ST_FILE:
1212 head = &sci->sc_dirty_files;
1213 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1214 i_dirty);
1215 list_for_each_entry_continue(ii, head, i_dirty) {
1216 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1217
1218 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1219 &nilfs_sc_file_ops);
1220 if (unlikely(err)) {
1221 sci->sc_stage.dirty_file_ptr =
1222 list_entry(ii->i_dirty.prev,
1223 struct nilfs_inode_info,
1224 i_dirty);
1225 goto break_or_fail;
1226 }
1227 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1228 /* XXX: required ? */
1229 }
1230 sci->sc_stage.dirty_file_ptr = NULL;
1231 if (mode == SC_FLUSH_FILE) {
1232 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1233 return 0;
1234 }
1235 nilfs_sc_cstage_inc(sci);
1236 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1237 fallthrough;
1238 case NILFS_ST_IFILE:
1239 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1240 &nilfs_sc_file_ops);
1241 if (unlikely(err))
1242 break;
1243 nilfs_sc_cstage_inc(sci);
1244 /* Creating a checkpoint */
1245 err = nilfs_cpfile_create_checkpoint(nilfs->ns_cpfile,
1246 nilfs->ns_cno);
1247 if (unlikely(err))
1248 break;
1249 fallthrough;
1250 case NILFS_ST_CPFILE:
1251 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1252 &nilfs_sc_file_ops);
1253 if (unlikely(err))
1254 break;
1255 nilfs_sc_cstage_inc(sci);
1256 fallthrough;
1257 case NILFS_ST_SUFILE:
1258 err = nilfs_free_segments(nilfs, sci->sc_freesegs,
1259 sci->sc_nfreesegs);
1260 if (unlikely(err))
1261 break;
1262 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1263
1264 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1265 &nilfs_sc_file_ops);
1266 if (unlikely(err))
1267 break;
1268 nilfs_sc_cstage_inc(sci);
1269 fallthrough;
1270 case NILFS_ST_DAT:
1271 dat_stage:
1272 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1273 &nilfs_sc_dat_ops);
1274 if (unlikely(err))
1275 break;
1276 if (mode == SC_FLUSH_DAT) {
1277 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1278 return 0;
1279 }
1280 nilfs_sc_cstage_inc(sci);
1281 fallthrough;
1282 case NILFS_ST_SR:
1283 if (mode == SC_LSEG_SR) {
1284 /* Appending a super root */
1285 err = nilfs_segctor_add_super_root(sci);
1286 if (unlikely(err))
1287 break;
1288 }
1289 /* End of a logical segment */
1290 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1291 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1292 return 0;
1293 case NILFS_ST_DSYNC:
1294 dsync_mode:
1295 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1296 ii = sci->sc_dsync_inode;
1297 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1298 break;
1299
1300 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1301 if (unlikely(err))
1302 break;
1303 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1304 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1305 return 0;
1306 case NILFS_ST_DONE:
1307 return 0;
1308 default:
1309 BUG();
1310 }
1311
1312 break_or_fail:
1313 return err;
1314}
1315
1316/**
1317 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1318 * @sci: nilfs_sc_info
1319 * @nilfs: nilfs object
1320 */
1321static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1322 struct the_nilfs *nilfs)
1323{
1324 struct nilfs_segment_buffer *segbuf, *prev;
1325 __u64 nextnum;
1326 int err, alloc = 0;
1327
1328 segbuf = nilfs_segbuf_new(sci->sc_super);
1329 if (unlikely(!segbuf))
1330 return -ENOMEM;
1331
1332 if (list_empty(&sci->sc_write_logs)) {
1333 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1334 nilfs->ns_pseg_offset, nilfs);
1335 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1336 nilfs_shift_to_next_segment(nilfs);
1337 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1338 }
1339
1340 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1341 nextnum = nilfs->ns_nextnum;
1342
1343 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1344 /* Start from the head of a new full segment */
1345 alloc++;
1346 } else {
1347 /* Continue logs */
1348 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1349 nilfs_segbuf_map_cont(segbuf, prev);
1350 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1351 nextnum = prev->sb_nextnum;
1352
1353 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1354 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1355 segbuf->sb_sum.seg_seq++;
1356 alloc++;
1357 }
1358 }
1359
1360 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1361 if (err)
1362 goto failed;
1363
1364 if (alloc) {
1365 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1366 if (err)
1367 goto failed;
1368 }
1369 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1370
1371 BUG_ON(!list_empty(&sci->sc_segbufs));
1372 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1373 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1374 return 0;
1375
1376 failed:
1377 nilfs_segbuf_free(segbuf);
1378 return err;
1379}
1380
1381static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1382 struct the_nilfs *nilfs, int nadd)
1383{
1384 struct nilfs_segment_buffer *segbuf, *prev;
1385 struct inode *sufile = nilfs->ns_sufile;
1386 __u64 nextnextnum;
1387 LIST_HEAD(list);
1388 int err, ret, i;
1389
1390 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1391 /*
1392 * Since the segment specified with nextnum might be allocated during
1393 * the previous construction, the buffer including its segusage may
1394 * not be dirty. The following call ensures that the buffer is dirty
1395 * and will pin the buffer on memory until the sufile is written.
1396 */
1397 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1398 if (unlikely(err))
1399 return err;
1400
1401 for (i = 0; i < nadd; i++) {
1402 /* extend segment info */
1403 err = -ENOMEM;
1404 segbuf = nilfs_segbuf_new(sci->sc_super);
1405 if (unlikely(!segbuf))
1406 goto failed;
1407
1408 /* map this buffer to region of segment on-disk */
1409 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1410 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1411
1412 /* allocate the next next full segment */
1413 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1414 if (unlikely(err))
1415 goto failed_segbuf;
1416
1417 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1418 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1419
1420 list_add_tail(&segbuf->sb_list, &list);
1421 prev = segbuf;
1422 }
1423 list_splice_tail(&list, &sci->sc_segbufs);
1424 return 0;
1425
1426 failed_segbuf:
1427 nilfs_segbuf_free(segbuf);
1428 failed:
1429 list_for_each_entry(segbuf, &list, sb_list) {
1430 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1431 WARN_ON(ret); /* never fails */
1432 }
1433 nilfs_destroy_logs(&list);
1434 return err;
1435}
1436
1437static void nilfs_free_incomplete_logs(struct list_head *logs,
1438 struct the_nilfs *nilfs)
1439{
1440 struct nilfs_segment_buffer *segbuf, *prev;
1441 struct inode *sufile = nilfs->ns_sufile;
1442 int ret;
1443
1444 segbuf = NILFS_FIRST_SEGBUF(logs);
1445 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1446 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1447 WARN_ON(ret); /* never fails */
1448 }
1449 if (atomic_read(&segbuf->sb_err)) {
1450 /* Case 1: The first segment failed */
1451 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1452 /*
1453 * Case 1a: Partial segment appended into an existing
1454 * segment
1455 */
1456 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1457 segbuf->sb_fseg_end);
1458 else /* Case 1b: New full segment */
1459 set_nilfs_discontinued(nilfs);
1460 }
1461
1462 prev = segbuf;
1463 list_for_each_entry_continue(segbuf, logs, sb_list) {
1464 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1465 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1466 WARN_ON(ret); /* never fails */
1467 }
1468 if (atomic_read(&segbuf->sb_err) &&
1469 segbuf->sb_segnum != nilfs->ns_nextnum)
1470 /* Case 2: extended segment (!= next) failed */
1471 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1472 prev = segbuf;
1473 }
1474}
1475
1476static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1477 struct inode *sufile)
1478{
1479 struct nilfs_segment_buffer *segbuf;
1480 unsigned long live_blocks;
1481 int ret;
1482
1483 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1484 live_blocks = segbuf->sb_sum.nblocks +
1485 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1486 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1487 live_blocks,
1488 sci->sc_seg_ctime);
1489 WARN_ON(ret); /* always succeed because the segusage is dirty */
1490 }
1491}
1492
1493static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1494{
1495 struct nilfs_segment_buffer *segbuf;
1496 int ret;
1497
1498 segbuf = NILFS_FIRST_SEGBUF(logs);
1499 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1500 segbuf->sb_pseg_start -
1501 segbuf->sb_fseg_start, 0);
1502 WARN_ON(ret); /* always succeed because the segusage is dirty */
1503
1504 list_for_each_entry_continue(segbuf, logs, sb_list) {
1505 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1506 0, 0);
1507 WARN_ON(ret); /* always succeed */
1508 }
1509}
1510
1511static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1512 struct nilfs_segment_buffer *last,
1513 struct inode *sufile)
1514{
1515 struct nilfs_segment_buffer *segbuf = last;
1516 int ret;
1517
1518 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1519 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1520 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1521 WARN_ON(ret);
1522 }
1523 nilfs_truncate_logs(&sci->sc_segbufs, last);
1524}
1525
1526
1527static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1528 struct the_nilfs *nilfs, int mode)
1529{
1530 struct nilfs_cstage prev_stage = sci->sc_stage;
1531 int err, nadd = 1;
1532
1533 /* Collection retry loop */
1534 for (;;) {
1535 sci->sc_nblk_this_inc = 0;
1536 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1537
1538 err = nilfs_segctor_reset_segment_buffer(sci);
1539 if (unlikely(err))
1540 goto failed;
1541
1542 err = nilfs_segctor_collect_blocks(sci, mode);
1543 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1544 if (!err)
1545 break;
1546
1547 if (unlikely(err != -E2BIG))
1548 goto failed;
1549
1550 /* The current segment is filled up */
1551 if (mode != SC_LSEG_SR ||
1552 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1553 break;
1554
1555 nilfs_clear_logs(&sci->sc_segbufs);
1556
1557 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1558 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1559 sci->sc_freesegs,
1560 sci->sc_nfreesegs,
1561 NULL);
1562 WARN_ON(err); /* do not happen */
1563 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1564 }
1565
1566 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1567 if (unlikely(err))
1568 return err;
1569
1570 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1571 sci->sc_stage = prev_stage;
1572 }
1573 nilfs_segctor_zeropad_segsum(sci);
1574 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1575 return 0;
1576
1577 failed:
1578 return err;
1579}
1580
1581static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1582 struct buffer_head *new_bh)
1583{
1584 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1585
1586 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1587 /* The caller must release old_bh */
1588}
1589
1590static int
1591nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1592 struct nilfs_segment_buffer *segbuf,
1593 int mode)
1594{
1595 struct inode *inode = NULL;
1596 sector_t blocknr;
1597 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1598 unsigned long nblocks = 0, ndatablk = 0;
1599 const struct nilfs_sc_operations *sc_op = NULL;
1600 struct nilfs_segsum_pointer ssp;
1601 struct nilfs_finfo *finfo = NULL;
1602 union nilfs_binfo binfo;
1603 struct buffer_head *bh, *bh_org;
1604 ino_t ino = 0;
1605 int err = 0;
1606
1607 if (!nfinfo)
1608 goto out;
1609
1610 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1611 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1612 ssp.offset = sizeof(struct nilfs_segment_summary);
1613
1614 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1615 if (bh == segbuf->sb_super_root)
1616 break;
1617 if (!finfo) {
1618 finfo = nilfs_segctor_map_segsum_entry(
1619 sci, &ssp, sizeof(*finfo));
1620 ino = le64_to_cpu(finfo->fi_ino);
1621 nblocks = le32_to_cpu(finfo->fi_nblocks);
1622 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1623
1624 inode = bh->b_folio->mapping->host;
1625
1626 if (mode == SC_LSEG_DSYNC)
1627 sc_op = &nilfs_sc_dsync_ops;
1628 else if (ino == NILFS_DAT_INO)
1629 sc_op = &nilfs_sc_dat_ops;
1630 else /* file blocks */
1631 sc_op = &nilfs_sc_file_ops;
1632 }
1633 bh_org = bh;
1634 get_bh(bh_org);
1635 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1636 &binfo);
1637 if (bh != bh_org)
1638 nilfs_list_replace_buffer(bh_org, bh);
1639 brelse(bh_org);
1640 if (unlikely(err))
1641 goto failed_bmap;
1642
1643 if (ndatablk > 0)
1644 sc_op->write_data_binfo(sci, &ssp, &binfo);
1645 else
1646 sc_op->write_node_binfo(sci, &ssp, &binfo);
1647
1648 blocknr++;
1649 if (--nblocks == 0) {
1650 finfo = NULL;
1651 if (--nfinfo == 0)
1652 break;
1653 } else if (ndatablk > 0)
1654 ndatablk--;
1655 }
1656 out:
1657 return 0;
1658
1659 failed_bmap:
1660 return err;
1661}
1662
1663static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1664{
1665 struct nilfs_segment_buffer *segbuf;
1666 int err;
1667
1668 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1669 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1670 if (unlikely(err))
1671 return err;
1672 nilfs_segbuf_fill_in_segsum(segbuf);
1673 }
1674 return 0;
1675}
1676
1677static void nilfs_begin_folio_io(struct folio *folio)
1678{
1679 if (!folio || folio_test_writeback(folio))
1680 /*
1681 * For split b-tree node pages, this function may be called
1682 * twice. We ignore the 2nd or later calls by this check.
1683 */
1684 return;
1685
1686 folio_lock(folio);
1687 folio_clear_dirty_for_io(folio);
1688 folio_start_writeback(folio);
1689 folio_unlock(folio);
1690}
1691
1692/**
1693 * nilfs_prepare_write_logs - prepare to write logs
1694 * @logs: logs to prepare for writing
1695 * @seed: checksum seed value
1696 *
1697 * nilfs_prepare_write_logs() adds checksums and prepares the block
1698 * buffers/folios for writing logs. In order to stabilize folios of
1699 * memory-mapped file blocks by putting them in writeback state before
1700 * calculating the checksums, first prepare to write payload blocks other
1701 * than segment summary and super root blocks in which the checksums will
1702 * be embedded.
1703 */
1704static void nilfs_prepare_write_logs(struct list_head *logs, u32 seed)
1705{
1706 struct nilfs_segment_buffer *segbuf;
1707 struct folio *bd_folio = NULL, *fs_folio = NULL;
1708 struct buffer_head *bh;
1709
1710 /* Prepare to write payload blocks */
1711 list_for_each_entry(segbuf, logs, sb_list) {
1712 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1713 b_assoc_buffers) {
1714 if (bh == segbuf->sb_super_root)
1715 break;
1716 set_buffer_async_write(bh);
1717 if (bh->b_folio != fs_folio) {
1718 nilfs_begin_folio_io(fs_folio);
1719 fs_folio = bh->b_folio;
1720 }
1721 }
1722 }
1723 nilfs_begin_folio_io(fs_folio);
1724
1725 nilfs_add_checksums_on_logs(logs, seed);
1726
1727 /* Prepare to write segment summary blocks */
1728 list_for_each_entry(segbuf, logs, sb_list) {
1729 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1730 b_assoc_buffers) {
1731 mark_buffer_dirty(bh);
1732 if (bh->b_folio == bd_folio)
1733 continue;
1734 if (bd_folio) {
1735 folio_lock(bd_folio);
1736 folio_wait_writeback(bd_folio);
1737 folio_clear_dirty_for_io(bd_folio);
1738 folio_start_writeback(bd_folio);
1739 folio_unlock(bd_folio);
1740 }
1741 bd_folio = bh->b_folio;
1742 }
1743 }
1744
1745 /* Prepare to write super root block */
1746 bh = NILFS_LAST_SEGBUF(logs)->sb_super_root;
1747 if (bh) {
1748 mark_buffer_dirty(bh);
1749 if (bh->b_folio != bd_folio) {
1750 folio_lock(bd_folio);
1751 folio_wait_writeback(bd_folio);
1752 folio_clear_dirty_for_io(bd_folio);
1753 folio_start_writeback(bd_folio);
1754 folio_unlock(bd_folio);
1755 bd_folio = bh->b_folio;
1756 }
1757 }
1758
1759 if (bd_folio) {
1760 folio_lock(bd_folio);
1761 folio_wait_writeback(bd_folio);
1762 folio_clear_dirty_for_io(bd_folio);
1763 folio_start_writeback(bd_folio);
1764 folio_unlock(bd_folio);
1765 }
1766}
1767
1768static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1769 struct the_nilfs *nilfs)
1770{
1771 int ret;
1772
1773 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1774 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1775 return ret;
1776}
1777
1778static void nilfs_end_folio_io(struct folio *folio, int err)
1779{
1780 if (!folio)
1781 return;
1782
1783 if (buffer_nilfs_node(folio_buffers(folio)) &&
1784 !folio_test_writeback(folio)) {
1785 /*
1786 * For b-tree node pages, this function may be called twice
1787 * or more because they might be split in a segment.
1788 */
1789 if (folio_test_dirty(folio)) {
1790 /*
1791 * For pages holding split b-tree node buffers, dirty
1792 * flag on the buffers may be cleared discretely.
1793 * In that case, the page is once redirtied for
1794 * remaining buffers, and it must be cancelled if
1795 * all the buffers get cleaned later.
1796 */
1797 folio_lock(folio);
1798 if (nilfs_folio_buffers_clean(folio))
1799 __nilfs_clear_folio_dirty(folio);
1800 folio_unlock(folio);
1801 }
1802 return;
1803 }
1804
1805 if (err || !nilfs_folio_buffers_clean(folio))
1806 filemap_dirty_folio(folio->mapping, folio);
1807
1808 folio_end_writeback(folio);
1809}
1810
1811static void nilfs_abort_logs(struct list_head *logs, int err)
1812{
1813 struct nilfs_segment_buffer *segbuf;
1814 struct folio *bd_folio = NULL, *fs_folio = NULL;
1815 struct buffer_head *bh;
1816
1817 if (list_empty(logs))
1818 return;
1819
1820 list_for_each_entry(segbuf, logs, sb_list) {
1821 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1822 b_assoc_buffers) {
1823 clear_buffer_uptodate(bh);
1824 if (bh->b_folio != bd_folio) {
1825 if (bd_folio)
1826 folio_end_writeback(bd_folio);
1827 bd_folio = bh->b_folio;
1828 }
1829 }
1830
1831 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1832 b_assoc_buffers) {
1833 if (bh == segbuf->sb_super_root) {
1834 clear_buffer_uptodate(bh);
1835 if (bh->b_folio != bd_folio) {
1836 folio_end_writeback(bd_folio);
1837 bd_folio = bh->b_folio;
1838 }
1839 break;
1840 }
1841 clear_buffer_async_write(bh);
1842 if (bh->b_folio != fs_folio) {
1843 nilfs_end_folio_io(fs_folio, err);
1844 fs_folio = bh->b_folio;
1845 }
1846 }
1847 }
1848 if (bd_folio)
1849 folio_end_writeback(bd_folio);
1850
1851 nilfs_end_folio_io(fs_folio, err);
1852}
1853
1854static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1855 struct the_nilfs *nilfs, int err)
1856{
1857 LIST_HEAD(logs);
1858 int ret;
1859
1860 list_splice_tail_init(&sci->sc_write_logs, &logs);
1861 ret = nilfs_wait_on_logs(&logs);
1862 nilfs_abort_logs(&logs, ret ? : err);
1863
1864 list_splice_tail_init(&sci->sc_segbufs, &logs);
1865 if (list_empty(&logs))
1866 return; /* if the first segment buffer preparation failed */
1867
1868 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1869 nilfs_free_incomplete_logs(&logs, nilfs);
1870
1871 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1872 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1873 sci->sc_freesegs,
1874 sci->sc_nfreesegs,
1875 NULL);
1876 WARN_ON(ret); /* do not happen */
1877 }
1878
1879 nilfs_destroy_logs(&logs);
1880}
1881
1882static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1883 struct nilfs_segment_buffer *segbuf)
1884{
1885 nilfs->ns_segnum = segbuf->sb_segnum;
1886 nilfs->ns_nextnum = segbuf->sb_nextnum;
1887 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1888 + segbuf->sb_sum.nblocks;
1889 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1890 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1891}
1892
1893static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1894{
1895 struct nilfs_segment_buffer *segbuf;
1896 struct folio *bd_folio = NULL, *fs_folio = NULL;
1897 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1898 int update_sr = false;
1899
1900 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1901 struct buffer_head *bh;
1902
1903 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1904 b_assoc_buffers) {
1905 set_buffer_uptodate(bh);
1906 clear_buffer_dirty(bh);
1907 if (bh->b_folio != bd_folio) {
1908 if (bd_folio)
1909 folio_end_writeback(bd_folio);
1910 bd_folio = bh->b_folio;
1911 }
1912 }
1913 /*
1914 * We assume that the buffers which belong to the same folio
1915 * continue over the buffer list.
1916 * Under this assumption, the last BHs of folios is
1917 * identifiable by the discontinuity of bh->b_folio
1918 * (folio != fs_folio).
1919 *
1920 * For B-tree node blocks, however, this assumption is not
1921 * guaranteed. The cleanup code of B-tree node folios needs
1922 * special care.
1923 */
1924 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1925 b_assoc_buffers) {
1926 const unsigned long set_bits = BIT(BH_Uptodate);
1927 const unsigned long clear_bits =
1928 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1929 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1930 BIT(BH_NILFS_Redirected));
1931
1932 if (bh == segbuf->sb_super_root) {
1933 set_buffer_uptodate(bh);
1934 clear_buffer_dirty(bh);
1935 if (bh->b_folio != bd_folio) {
1936 folio_end_writeback(bd_folio);
1937 bd_folio = bh->b_folio;
1938 }
1939 update_sr = true;
1940 break;
1941 }
1942 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1943 if (bh->b_folio != fs_folio) {
1944 nilfs_end_folio_io(fs_folio, 0);
1945 fs_folio = bh->b_folio;
1946 }
1947 }
1948
1949 if (!nilfs_segbuf_simplex(segbuf)) {
1950 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1951 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1952 sci->sc_lseg_stime = jiffies;
1953 }
1954 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1955 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1956 }
1957 }
1958 /*
1959 * Since folios may continue over multiple segment buffers,
1960 * end of the last folio must be checked outside of the loop.
1961 */
1962 if (bd_folio)
1963 folio_end_writeback(bd_folio);
1964
1965 nilfs_end_folio_io(fs_folio, 0);
1966
1967 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1968
1969 if (nilfs_doing_gc())
1970 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1971 else
1972 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1973
1974 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1975
1976 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1977 nilfs_set_next_segment(nilfs, segbuf);
1978
1979 if (update_sr) {
1980 nilfs->ns_flushed_device = 0;
1981 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1982 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1983
1984 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1985 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1986 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1987 nilfs_segctor_clear_metadata_dirty(sci);
1988 } else
1989 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1990}
1991
1992static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1993{
1994 int ret;
1995
1996 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1997 if (!ret) {
1998 nilfs_segctor_complete_write(sci);
1999 nilfs_destroy_logs(&sci->sc_write_logs);
2000 }
2001 return ret;
2002}
2003
2004static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
2005 struct the_nilfs *nilfs)
2006{
2007 struct nilfs_inode_info *ii, *n;
2008 struct inode *ifile = sci->sc_root->ifile;
2009
2010 spin_lock(&nilfs->ns_inode_lock);
2011 retry:
2012 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
2013 if (!ii->i_bh) {
2014 struct buffer_head *ibh;
2015 int err;
2016
2017 spin_unlock(&nilfs->ns_inode_lock);
2018 err = nilfs_ifile_get_inode_block(
2019 ifile, ii->vfs_inode.i_ino, &ibh);
2020 if (unlikely(err)) {
2021 nilfs_warn(sci->sc_super,
2022 "log writer: error %d getting inode block (ino=%lu)",
2023 err, ii->vfs_inode.i_ino);
2024 return err;
2025 }
2026 spin_lock(&nilfs->ns_inode_lock);
2027 if (likely(!ii->i_bh))
2028 ii->i_bh = ibh;
2029 else
2030 brelse(ibh);
2031 goto retry;
2032 }
2033
2034 // Always redirty the buffer to avoid race condition
2035 mark_buffer_dirty(ii->i_bh);
2036 nilfs_mdt_mark_dirty(ifile);
2037
2038 clear_bit(NILFS_I_QUEUED, &ii->i_state);
2039 set_bit(NILFS_I_BUSY, &ii->i_state);
2040 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
2041 }
2042 spin_unlock(&nilfs->ns_inode_lock);
2043
2044 return 0;
2045}
2046
2047static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
2048 struct the_nilfs *nilfs)
2049{
2050 struct nilfs_inode_info *ii, *n;
2051 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
2052 int defer_iput = false;
2053
2054 spin_lock(&nilfs->ns_inode_lock);
2055 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
2056 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
2057 test_bit(NILFS_I_DIRTY, &ii->i_state))
2058 continue;
2059
2060 clear_bit(NILFS_I_BUSY, &ii->i_state);
2061 brelse(ii->i_bh);
2062 ii->i_bh = NULL;
2063 list_del_init(&ii->i_dirty);
2064 if (!ii->vfs_inode.i_nlink || during_mount) {
2065 /*
2066 * Defer calling iput() to avoid deadlocks if
2067 * i_nlink == 0 or mount is not yet finished.
2068 */
2069 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
2070 defer_iput = true;
2071 } else {
2072 spin_unlock(&nilfs->ns_inode_lock);
2073 iput(&ii->vfs_inode);
2074 spin_lock(&nilfs->ns_inode_lock);
2075 }
2076 }
2077 spin_unlock(&nilfs->ns_inode_lock);
2078
2079 if (defer_iput)
2080 schedule_work(&sci->sc_iput_work);
2081}
2082
2083/*
2084 * Main procedure of segment constructor
2085 */
2086static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2087{
2088 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2089 int err;
2090
2091 if (sb_rdonly(sci->sc_super))
2092 return -EROFS;
2093
2094 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2095 sci->sc_cno = nilfs->ns_cno;
2096
2097 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2098 if (unlikely(err))
2099 goto out;
2100
2101 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2102 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2103
2104 if (nilfs_segctor_clean(sci))
2105 goto out;
2106
2107 do {
2108 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2109
2110 err = nilfs_segctor_begin_construction(sci, nilfs);
2111 if (unlikely(err))
2112 goto failed;
2113
2114 /* Update time stamp */
2115 sci->sc_seg_ctime = ktime_get_real_seconds();
2116
2117 err = nilfs_segctor_collect(sci, nilfs, mode);
2118 if (unlikely(err))
2119 goto failed;
2120
2121 /* Avoid empty segment */
2122 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2123 nilfs_segbuf_empty(sci->sc_curseg)) {
2124 nilfs_segctor_abort_construction(sci, nilfs, 1);
2125 goto out;
2126 }
2127
2128 err = nilfs_segctor_assign(sci, mode);
2129 if (unlikely(err))
2130 goto failed;
2131
2132 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2133 nilfs_segctor_fill_in_file_bmap(sci);
2134
2135 if (mode == SC_LSEG_SR &&
2136 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2137 err = nilfs_cpfile_finalize_checkpoint(
2138 nilfs->ns_cpfile, nilfs->ns_cno, sci->sc_root,
2139 sci->sc_nblk_inc + sci->sc_nblk_this_inc,
2140 sci->sc_seg_ctime,
2141 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags));
2142 if (unlikely(err))
2143 goto failed_to_write;
2144
2145 nilfs_segctor_fill_in_super_root(sci, nilfs);
2146 }
2147 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2148
2149 /* Write partial segments */
2150 nilfs_prepare_write_logs(&sci->sc_segbufs, nilfs->ns_crc_seed);
2151
2152 err = nilfs_segctor_write(sci, nilfs);
2153 if (unlikely(err))
2154 goto failed_to_write;
2155
2156 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2157 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2158 /*
2159 * At this point, we avoid double buffering
2160 * for blocksize < pagesize because page dirty
2161 * flag is turned off during write and dirty
2162 * buffers are not properly collected for
2163 * pages crossing over segments.
2164 */
2165 err = nilfs_segctor_wait(sci);
2166 if (err)
2167 goto failed_to_write;
2168 }
2169 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2170
2171 out:
2172 nilfs_segctor_drop_written_files(sci, nilfs);
2173 return err;
2174
2175 failed_to_write:
2176 failed:
2177 if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
2178 nilfs_redirty_inodes(&sci->sc_dirty_files);
2179 if (nilfs_doing_gc())
2180 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2181 nilfs_segctor_abort_construction(sci, nilfs, err);
2182 goto out;
2183}
2184
2185/**
2186 * nilfs_segctor_start_timer - set timer of background write
2187 * @sci: nilfs_sc_info
2188 *
2189 * If the timer has already been set, it ignores the new request.
2190 * This function MUST be called within a section locking the segment
2191 * semaphore.
2192 */
2193static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2194{
2195 spin_lock(&sci->sc_state_lock);
2196 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2197 if (sci->sc_task) {
2198 sci->sc_timer.expires = jiffies + sci->sc_interval;
2199 add_timer(&sci->sc_timer);
2200 }
2201 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2202 }
2203 spin_unlock(&sci->sc_state_lock);
2204}
2205
2206static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2207{
2208 spin_lock(&sci->sc_state_lock);
2209 if (!(sci->sc_flush_request & BIT(bn))) {
2210 unsigned long prev_req = sci->sc_flush_request;
2211
2212 sci->sc_flush_request |= BIT(bn);
2213 if (!prev_req)
2214 wake_up(&sci->sc_wait_daemon);
2215 }
2216 spin_unlock(&sci->sc_state_lock);
2217}
2218
2219/**
2220 * nilfs_flush_segment - trigger a segment construction for resource control
2221 * @sb: super block
2222 * @ino: inode number of the file to be flushed out.
2223 */
2224void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2225{
2226 struct the_nilfs *nilfs = sb->s_fs_info;
2227 struct nilfs_sc_info *sci = nilfs->ns_writer;
2228
2229 if (!sci || nilfs_doing_construction())
2230 return;
2231 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2232 /* assign bit 0 to data files */
2233}
2234
2235struct nilfs_segctor_wait_request {
2236 wait_queue_entry_t wq;
2237 __u32 seq;
2238 int err;
2239 atomic_t done;
2240};
2241
2242static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2243{
2244 struct nilfs_segctor_wait_request wait_req;
2245 int err = 0;
2246
2247 init_wait(&wait_req.wq);
2248 wait_req.err = 0;
2249 atomic_set(&wait_req.done, 0);
2250 init_waitqueue_entry(&wait_req.wq, current);
2251
2252 /*
2253 * To prevent a race issue where completion notifications from the
2254 * log writer thread are missed, increment the request sequence count
2255 * "sc_seq_request" and insert a wait queue entry using the current
2256 * sequence number into the "sc_wait_request" queue at the same time
2257 * within the lock section of "sc_state_lock".
2258 */
2259 spin_lock(&sci->sc_state_lock);
2260 wait_req.seq = ++sci->sc_seq_request;
2261 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2262 spin_unlock(&sci->sc_state_lock);
2263
2264 wake_up(&sci->sc_wait_daemon);
2265
2266 for (;;) {
2267 set_current_state(TASK_INTERRUPTIBLE);
2268
2269 /*
2270 * Synchronize only while the log writer thread is alive.
2271 * Leave flushing out after the log writer thread exits to
2272 * the cleanup work in nilfs_segctor_destroy().
2273 */
2274 if (!sci->sc_task)
2275 break;
2276
2277 if (atomic_read(&wait_req.done)) {
2278 err = wait_req.err;
2279 break;
2280 }
2281 if (!signal_pending(current)) {
2282 schedule();
2283 continue;
2284 }
2285 err = -ERESTARTSYS;
2286 break;
2287 }
2288 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2289 return err;
2290}
2291
2292static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
2293{
2294 struct nilfs_segctor_wait_request *wrq, *n;
2295 unsigned long flags;
2296
2297 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2298 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2299 if (!atomic_read(&wrq->done) &&
2300 (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
2301 wrq->err = err;
2302 atomic_set(&wrq->done, 1);
2303 }
2304 if (atomic_read(&wrq->done)) {
2305 wrq->wq.func(&wrq->wq,
2306 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2307 0, NULL);
2308 }
2309 }
2310 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2311}
2312
2313/**
2314 * nilfs_construct_segment - construct a logical segment
2315 * @sb: super block
2316 *
2317 * Return Value: On success, 0 is returned. On errors, one of the following
2318 * negative error code is returned.
2319 *
2320 * %-EROFS - Read only filesystem.
2321 *
2322 * %-EIO - I/O error
2323 *
2324 * %-ENOSPC - No space left on device (only in a panic state).
2325 *
2326 * %-ERESTARTSYS - Interrupted.
2327 *
2328 * %-ENOMEM - Insufficient memory available.
2329 */
2330int nilfs_construct_segment(struct super_block *sb)
2331{
2332 struct the_nilfs *nilfs = sb->s_fs_info;
2333 struct nilfs_sc_info *sci = nilfs->ns_writer;
2334 struct nilfs_transaction_info *ti;
2335
2336 if (sb_rdonly(sb) || unlikely(!sci))
2337 return -EROFS;
2338
2339 /* A call inside transactions causes a deadlock. */
2340 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2341
2342 return nilfs_segctor_sync(sci);
2343}
2344
2345/**
2346 * nilfs_construct_dsync_segment - construct a data-only logical segment
2347 * @sb: super block
2348 * @inode: inode whose data blocks should be written out
2349 * @start: start byte offset
2350 * @end: end byte offset (inclusive)
2351 *
2352 * Return Value: On success, 0 is returned. On errors, one of the following
2353 * negative error code is returned.
2354 *
2355 * %-EROFS - Read only filesystem.
2356 *
2357 * %-EIO - I/O error
2358 *
2359 * %-ENOSPC - No space left on device (only in a panic state).
2360 *
2361 * %-ERESTARTSYS - Interrupted.
2362 *
2363 * %-ENOMEM - Insufficient memory available.
2364 */
2365int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2366 loff_t start, loff_t end)
2367{
2368 struct the_nilfs *nilfs = sb->s_fs_info;
2369 struct nilfs_sc_info *sci = nilfs->ns_writer;
2370 struct nilfs_inode_info *ii;
2371 struct nilfs_transaction_info ti;
2372 int err = 0;
2373
2374 if (sb_rdonly(sb) || unlikely(!sci))
2375 return -EROFS;
2376
2377 nilfs_transaction_lock(sb, &ti, 0);
2378
2379 ii = NILFS_I(inode);
2380 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2381 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2382 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2383 nilfs_discontinued(nilfs)) {
2384 nilfs_transaction_unlock(sb);
2385 err = nilfs_segctor_sync(sci);
2386 return err;
2387 }
2388
2389 spin_lock(&nilfs->ns_inode_lock);
2390 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2391 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2392 spin_unlock(&nilfs->ns_inode_lock);
2393 nilfs_transaction_unlock(sb);
2394 return 0;
2395 }
2396 spin_unlock(&nilfs->ns_inode_lock);
2397 sci->sc_dsync_inode = ii;
2398 sci->sc_dsync_start = start;
2399 sci->sc_dsync_end = end;
2400
2401 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2402 if (!err)
2403 nilfs->ns_flushed_device = 0;
2404
2405 nilfs_transaction_unlock(sb);
2406 return err;
2407}
2408
2409#define FLUSH_FILE_BIT (0x1) /* data file only */
2410#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
2411
2412/**
2413 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2414 * @sci: segment constructor object
2415 */
2416static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2417{
2418 bool thread_is_alive;
2419
2420 spin_lock(&sci->sc_state_lock);
2421 sci->sc_seq_accepted = sci->sc_seq_request;
2422 thread_is_alive = (bool)sci->sc_task;
2423 spin_unlock(&sci->sc_state_lock);
2424
2425 /*
2426 * This function does not race with the log writer thread's
2427 * termination. Therefore, deleting sc_timer, which should not be
2428 * done after the log writer thread exits, can be done safely outside
2429 * the area protected by sc_state_lock.
2430 */
2431 if (thread_is_alive)
2432 del_timer_sync(&sci->sc_timer);
2433}
2434
2435/**
2436 * nilfs_segctor_notify - notify the result of request to caller threads
2437 * @sci: segment constructor object
2438 * @mode: mode of log forming
2439 * @err: error code to be notified
2440 */
2441static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2442{
2443 /* Clear requests (even when the construction failed) */
2444 spin_lock(&sci->sc_state_lock);
2445
2446 if (mode == SC_LSEG_SR) {
2447 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2448 sci->sc_seq_done = sci->sc_seq_accepted;
2449 nilfs_segctor_wakeup(sci, err, false);
2450 sci->sc_flush_request = 0;
2451 } else {
2452 if (mode == SC_FLUSH_FILE)
2453 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2454 else if (mode == SC_FLUSH_DAT)
2455 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2456
2457 /* re-enable timer if checkpoint creation was not done */
2458 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
2459 time_before(jiffies, sci->sc_timer.expires))
2460 add_timer(&sci->sc_timer);
2461 }
2462 spin_unlock(&sci->sc_state_lock);
2463}
2464
2465/**
2466 * nilfs_segctor_construct - form logs and write them to disk
2467 * @sci: segment constructor object
2468 * @mode: mode of log forming
2469 */
2470static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2471{
2472 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2473 struct nilfs_super_block **sbp;
2474 int err = 0;
2475
2476 nilfs_segctor_accept(sci);
2477
2478 if (nilfs_discontinued(nilfs))
2479 mode = SC_LSEG_SR;
2480 if (!nilfs_segctor_confirm(sci))
2481 err = nilfs_segctor_do_construct(sci, mode);
2482
2483 if (likely(!err)) {
2484 if (mode != SC_FLUSH_DAT)
2485 atomic_set(&nilfs->ns_ndirtyblks, 0);
2486 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2487 nilfs_discontinued(nilfs)) {
2488 down_write(&nilfs->ns_sem);
2489 err = -EIO;
2490 sbp = nilfs_prepare_super(sci->sc_super,
2491 nilfs_sb_will_flip(nilfs));
2492 if (likely(sbp)) {
2493 nilfs_set_log_cursor(sbp[0], nilfs);
2494 err = nilfs_commit_super(sci->sc_super,
2495 NILFS_SB_COMMIT);
2496 }
2497 up_write(&nilfs->ns_sem);
2498 }
2499 }
2500
2501 nilfs_segctor_notify(sci, mode, err);
2502 return err;
2503}
2504
2505static void nilfs_construction_timeout(struct timer_list *t)
2506{
2507 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2508
2509 wake_up_process(sci->sc_task);
2510}
2511
2512static void
2513nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2514{
2515 struct nilfs_inode_info *ii, *n;
2516
2517 list_for_each_entry_safe(ii, n, head, i_dirty) {
2518 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2519 continue;
2520 list_del_init(&ii->i_dirty);
2521 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2522 nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
2523 iput(&ii->vfs_inode);
2524 }
2525}
2526
2527int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2528 void **kbufs)
2529{
2530 struct the_nilfs *nilfs = sb->s_fs_info;
2531 struct nilfs_sc_info *sci = nilfs->ns_writer;
2532 struct nilfs_transaction_info ti;
2533 int err;
2534
2535 if (unlikely(!sci))
2536 return -EROFS;
2537
2538 nilfs_transaction_lock(sb, &ti, 1);
2539
2540 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2541 if (unlikely(err))
2542 goto out_unlock;
2543
2544 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2545 if (unlikely(err)) {
2546 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2547 goto out_unlock;
2548 }
2549
2550 sci->sc_freesegs = kbufs[4];
2551 sci->sc_nfreesegs = argv[4].v_nmembs;
2552 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2553
2554 for (;;) {
2555 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2556 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2557
2558 if (likely(!err))
2559 break;
2560
2561 nilfs_warn(sb, "error %d cleaning segments", err);
2562 set_current_state(TASK_INTERRUPTIBLE);
2563 schedule_timeout(sci->sc_interval);
2564 }
2565 if (nilfs_test_opt(nilfs, DISCARD)) {
2566 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2567 sci->sc_nfreesegs);
2568 if (ret) {
2569 nilfs_warn(sb,
2570 "error %d on discard request, turning discards off for the device",
2571 ret);
2572 nilfs_clear_opt(nilfs, DISCARD);
2573 }
2574 }
2575
2576 out_unlock:
2577 sci->sc_freesegs = NULL;
2578 sci->sc_nfreesegs = 0;
2579 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2580 nilfs_transaction_unlock(sb);
2581 return err;
2582}
2583
2584static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2585{
2586 struct nilfs_transaction_info ti;
2587
2588 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2589 nilfs_segctor_construct(sci, mode);
2590
2591 /*
2592 * Unclosed segment should be retried. We do this using sc_timer.
2593 * Timeout of sc_timer will invoke complete construction which leads
2594 * to close the current logical segment.
2595 */
2596 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2597 nilfs_segctor_start_timer(sci);
2598
2599 nilfs_transaction_unlock(sci->sc_super);
2600}
2601
2602static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2603{
2604 int mode = 0;
2605
2606 spin_lock(&sci->sc_state_lock);
2607 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2608 SC_FLUSH_DAT : SC_FLUSH_FILE;
2609 spin_unlock(&sci->sc_state_lock);
2610
2611 if (mode) {
2612 nilfs_segctor_do_construct(sci, mode);
2613
2614 spin_lock(&sci->sc_state_lock);
2615 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2616 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2617 spin_unlock(&sci->sc_state_lock);
2618 }
2619 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2620}
2621
2622static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2623{
2624 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2625 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2626 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2627 return SC_FLUSH_FILE;
2628 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2629 return SC_FLUSH_DAT;
2630 }
2631 return SC_LSEG_SR;
2632}
2633
2634/**
2635 * nilfs_log_write_required - determine whether log writing is required
2636 * @sci: nilfs_sc_info struct
2637 * @modep: location for storing log writing mode
2638 *
2639 * Return: true if log writing is required, false otherwise. If log writing
2640 * is required, the mode is stored in the location pointed to by @modep.
2641 */
2642static bool nilfs_log_write_required(struct nilfs_sc_info *sci, int *modep)
2643{
2644 bool timedout, ret = true;
2645
2646 spin_lock(&sci->sc_state_lock);
2647 timedout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2648 time_after_eq(jiffies, sci->sc_timer.expires));
2649 if (timedout || sci->sc_seq_request != sci->sc_seq_done)
2650 *modep = SC_LSEG_SR;
2651 else if (sci->sc_flush_request)
2652 *modep = nilfs_segctor_flush_mode(sci);
2653 else
2654 ret = false;
2655
2656 spin_unlock(&sci->sc_state_lock);
2657 return ret;
2658}
2659
2660/**
2661 * nilfs_segctor_thread - main loop of the log writer thread
2662 * @arg: pointer to a struct nilfs_sc_info.
2663 *
2664 * nilfs_segctor_thread() is the main loop function of the log writer kernel
2665 * thread, which determines whether log writing is necessary, and if so,
2666 * performs the log write in the background, or waits if not. It is also
2667 * used to decide the background writeback of the superblock.
2668 *
2669 * Return: Always 0.
2670 */
2671static int nilfs_segctor_thread(void *arg)
2672{
2673 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2674 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2675
2676 nilfs_info(sci->sc_super,
2677 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2678 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2679
2680 set_freezable();
2681
2682 while (!kthread_should_stop()) {
2683 DEFINE_WAIT(wait);
2684 bool should_write;
2685 int mode;
2686
2687 if (freezing(current)) {
2688 try_to_freeze();
2689 continue;
2690 }
2691
2692 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2693 TASK_INTERRUPTIBLE);
2694 should_write = nilfs_log_write_required(sci, &mode);
2695 if (!should_write)
2696 schedule();
2697 finish_wait(&sci->sc_wait_daemon, &wait);
2698
2699 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2700 set_nilfs_discontinued(nilfs);
2701
2702 if (should_write)
2703 nilfs_segctor_thread_construct(sci, mode);
2704 }
2705
2706 /* end sync. */
2707 spin_lock(&sci->sc_state_lock);
2708 sci->sc_task = NULL;
2709 timer_shutdown_sync(&sci->sc_timer);
2710 spin_unlock(&sci->sc_state_lock);
2711 return 0;
2712}
2713
2714/*
2715 * Setup & clean-up functions
2716 */
2717static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2718 struct nilfs_root *root)
2719{
2720 struct the_nilfs *nilfs = sb->s_fs_info;
2721 struct nilfs_sc_info *sci;
2722
2723 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2724 if (!sci)
2725 return NULL;
2726
2727 sci->sc_super = sb;
2728
2729 nilfs_get_root(root);
2730 sci->sc_root = root;
2731
2732 init_waitqueue_head(&sci->sc_wait_request);
2733 init_waitqueue_head(&sci->sc_wait_daemon);
2734 spin_lock_init(&sci->sc_state_lock);
2735 INIT_LIST_HEAD(&sci->sc_dirty_files);
2736 INIT_LIST_HEAD(&sci->sc_segbufs);
2737 INIT_LIST_HEAD(&sci->sc_write_logs);
2738 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2739 INIT_LIST_HEAD(&sci->sc_iput_queue);
2740 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2741
2742 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2743 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2744 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2745
2746 if (nilfs->ns_interval)
2747 sci->sc_interval = HZ * nilfs->ns_interval;
2748 if (nilfs->ns_watermark)
2749 sci->sc_watermark = nilfs->ns_watermark;
2750 return sci;
2751}
2752
2753static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2754{
2755 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2756
2757 /*
2758 * The segctord thread was stopped and its timer was removed.
2759 * But some tasks remain.
2760 */
2761 do {
2762 struct nilfs_transaction_info ti;
2763
2764 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2765 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2766 nilfs_transaction_unlock(sci->sc_super);
2767
2768 flush_work(&sci->sc_iput_work);
2769
2770 } while (ret && ret != -EROFS && retrycount-- > 0);
2771}
2772
2773/**
2774 * nilfs_segctor_destroy - destroy the segment constructor.
2775 * @sci: nilfs_sc_info
2776 *
2777 * nilfs_segctor_destroy() kills the segctord thread and frees
2778 * the nilfs_sc_info struct.
2779 * Caller must hold the segment semaphore.
2780 */
2781static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2782{
2783 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2784 int flag;
2785
2786 up_write(&nilfs->ns_segctor_sem);
2787
2788 if (sci->sc_task) {
2789 wake_up(&sci->sc_wait_daemon);
2790 kthread_stop(sci->sc_task);
2791 }
2792
2793 spin_lock(&sci->sc_state_lock);
2794 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2795 || sci->sc_seq_request != sci->sc_seq_done);
2796 spin_unlock(&sci->sc_state_lock);
2797
2798 /*
2799 * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
2800 * be called from delayed iput() via nilfs_evict_inode() and can race
2801 * with the above log writer thread termination.
2802 */
2803 nilfs_segctor_wakeup(sci, 0, true);
2804
2805 if (flush_work(&sci->sc_iput_work))
2806 flag = true;
2807
2808 if (flag || !nilfs_segctor_confirm(sci))
2809 nilfs_segctor_write_out(sci);
2810
2811 if (!list_empty(&sci->sc_dirty_files)) {
2812 nilfs_warn(sci->sc_super,
2813 "disposed unprocessed dirty file(s) when stopping log writer");
2814 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2815 }
2816
2817 if (!list_empty(&sci->sc_iput_queue)) {
2818 nilfs_warn(sci->sc_super,
2819 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2820 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2821 }
2822
2823 WARN_ON(!list_empty(&sci->sc_segbufs));
2824 WARN_ON(!list_empty(&sci->sc_write_logs));
2825
2826 nilfs_put_root(sci->sc_root);
2827
2828 down_write(&nilfs->ns_segctor_sem);
2829
2830 kfree(sci);
2831}
2832
2833/**
2834 * nilfs_attach_log_writer - attach log writer
2835 * @sb: super block instance
2836 * @root: root object of the current filesystem tree
2837 *
2838 * This allocates a log writer object, initializes it, and starts the
2839 * log writer.
2840 *
2841 * Return: 0 on success, or the following negative error code on failure.
2842 * * %-EINTR - Log writer thread creation failed due to interruption.
2843 * * %-ENOMEM - Insufficient memory available.
2844 */
2845int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2846{
2847 struct the_nilfs *nilfs = sb->s_fs_info;
2848 struct nilfs_sc_info *sci;
2849 struct task_struct *t;
2850 int err;
2851
2852 if (nilfs->ns_writer) {
2853 /*
2854 * This happens if the filesystem is made read-only by
2855 * __nilfs_error or nilfs_remount and then remounted
2856 * read/write. In these cases, reuse the existing
2857 * writer.
2858 */
2859 return 0;
2860 }
2861
2862 sci = nilfs_segctor_new(sb, root);
2863 if (unlikely(!sci))
2864 return -ENOMEM;
2865
2866 nilfs->ns_writer = sci;
2867 t = kthread_create(nilfs_segctor_thread, sci, "segctord");
2868 if (IS_ERR(t)) {
2869 err = PTR_ERR(t);
2870 nilfs_err(sb, "error %d creating segctord thread", err);
2871 nilfs_detach_log_writer(sb);
2872 return err;
2873 }
2874 sci->sc_task = t;
2875 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2876
2877 wake_up_process(sci->sc_task);
2878 return 0;
2879}
2880
2881/**
2882 * nilfs_detach_log_writer - destroy log writer
2883 * @sb: super block instance
2884 *
2885 * This kills log writer daemon, frees the log writer object, and
2886 * destroys list of dirty files.
2887 */
2888void nilfs_detach_log_writer(struct super_block *sb)
2889{
2890 struct the_nilfs *nilfs = sb->s_fs_info;
2891 LIST_HEAD(garbage_list);
2892
2893 down_write(&nilfs->ns_segctor_sem);
2894 if (nilfs->ns_writer) {
2895 nilfs_segctor_destroy(nilfs->ns_writer);
2896 nilfs->ns_writer = NULL;
2897 }
2898 set_nilfs_purging(nilfs);
2899
2900 /* Force to free the list of dirty files */
2901 spin_lock(&nilfs->ns_inode_lock);
2902 if (!list_empty(&nilfs->ns_dirty_files)) {
2903 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2904 nilfs_warn(sb,
2905 "disposed unprocessed dirty file(s) when detaching log writer");
2906 }
2907 spin_unlock(&nilfs->ns_inode_lock);
2908 up_write(&nilfs->ns_segctor_sem);
2909
2910 nilfs_dispose_list(nilfs, &garbage_list, 1);
2911 clear_nilfs_purging(nilfs);
2912}