Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Adrian Hunter
8 * Artem Bityutskiy (Битюцкий Артём)
9 */
10
11/* This file implements TNC functions for committing */
12
13#include <linux/random.h>
14#include "ubifs.h"
15
16/**
17 * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
18 * @c: UBIFS file-system description object
19 * @idx: buffer in which to place new index node
20 * @znode: znode from which to make new index node
21 * @lnum: LEB number where new index node will be written
22 * @offs: offset where new index node will be written
23 * @len: length of new index node
24 */
25static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
26 struct ubifs_znode *znode, int lnum, int offs, int len)
27{
28 struct ubifs_znode *zp;
29 u8 hash[UBIFS_HASH_ARR_SZ];
30 int i, err;
31
32 /* Make index node */
33 idx->ch.node_type = UBIFS_IDX_NODE;
34 idx->child_cnt = cpu_to_le16(znode->child_cnt);
35 idx->level = cpu_to_le16(znode->level);
36 for (i = 0; i < znode->child_cnt; i++) {
37 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
38 struct ubifs_zbranch *zbr = &znode->zbranch[i];
39
40 key_write_idx(c, &zbr->key, &br->key);
41 br->lnum = cpu_to_le32(zbr->lnum);
42 br->offs = cpu_to_le32(zbr->offs);
43 br->len = cpu_to_le32(zbr->len);
44 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
45 if (!zbr->lnum || !zbr->len) {
46 ubifs_err(c, "bad ref in znode");
47 ubifs_dump_znode(c, znode);
48 if (zbr->znode)
49 ubifs_dump_znode(c, zbr->znode);
50
51 return -EINVAL;
52 }
53 }
54 ubifs_prepare_node(c, idx, len, 0);
55 ubifs_node_calc_hash(c, idx, hash);
56
57 znode->lnum = lnum;
58 znode->offs = offs;
59 znode->len = len;
60
61 err = insert_old_idx_znode(c, znode);
62
63 /* Update the parent */
64 zp = znode->parent;
65 if (zp) {
66 struct ubifs_zbranch *zbr;
67
68 zbr = &zp->zbranch[znode->iip];
69 zbr->lnum = lnum;
70 zbr->offs = offs;
71 zbr->len = len;
72 ubifs_copy_hash(c, hash, zbr->hash);
73 } else {
74 c->zroot.lnum = lnum;
75 c->zroot.offs = offs;
76 c->zroot.len = len;
77 ubifs_copy_hash(c, hash, c->zroot.hash);
78 }
79 c->calc_idx_sz += ALIGN(len, 8);
80
81 atomic_long_dec(&c->dirty_zn_cnt);
82
83 ubifs_assert(c, ubifs_zn_dirty(znode));
84 ubifs_assert(c, ubifs_zn_cow(znode));
85
86 /*
87 * Note, unlike 'write_index()' we do not add memory barriers here
88 * because this function is called with @c->tnc_mutex locked.
89 */
90 __clear_bit(DIRTY_ZNODE, &znode->flags);
91 __clear_bit(COW_ZNODE, &znode->flags);
92
93 return err;
94}
95
96/**
97 * fill_gap - make index nodes in gaps in dirty index LEBs.
98 * @c: UBIFS file-system description object
99 * @lnum: LEB number that gap appears in
100 * @gap_start: offset of start of gap
101 * @gap_end: offset of end of gap
102 * @dirt: adds dirty space to this
103 *
104 * This function returns the number of index nodes written into the gap.
105 */
106static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
107 int *dirt)
108{
109 int len, gap_remains, gap_pos, written, pad_len;
110
111 ubifs_assert(c, (gap_start & 7) == 0);
112 ubifs_assert(c, (gap_end & 7) == 0);
113 ubifs_assert(c, gap_end >= gap_start);
114
115 gap_remains = gap_end - gap_start;
116 if (!gap_remains)
117 return 0;
118 gap_pos = gap_start;
119 written = 0;
120 while (c->enext) {
121 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
122 if (len < gap_remains) {
123 struct ubifs_znode *znode = c->enext;
124 const int alen = ALIGN(len, 8);
125 int err;
126
127 ubifs_assert(c, alen <= gap_remains);
128 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
129 lnum, gap_pos, len);
130 if (err)
131 return err;
132 gap_remains -= alen;
133 gap_pos += alen;
134 c->enext = znode->cnext;
135 if (c->enext == c->cnext)
136 c->enext = NULL;
137 written += 1;
138 } else
139 break;
140 }
141 if (gap_end == c->leb_size) {
142 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
143 /* Pad to end of min_io_size */
144 pad_len = c->ileb_len - gap_pos;
145 } else
146 /* Pad to end of gap */
147 pad_len = gap_remains;
148 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
149 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
150 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
151 *dirt += pad_len;
152 return written;
153}
154
155/**
156 * find_old_idx - find an index node obsoleted since the last commit start.
157 * @c: UBIFS file-system description object
158 * @lnum: LEB number of obsoleted index node
159 * @offs: offset of obsoleted index node
160 *
161 * Returns %1 if found and %0 otherwise.
162 */
163static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
164{
165 struct ubifs_old_idx *o;
166 struct rb_node *p;
167
168 p = c->old_idx.rb_node;
169 while (p) {
170 o = rb_entry(p, struct ubifs_old_idx, rb);
171 if (lnum < o->lnum)
172 p = p->rb_left;
173 else if (lnum > o->lnum)
174 p = p->rb_right;
175 else if (offs < o->offs)
176 p = p->rb_left;
177 else if (offs > o->offs)
178 p = p->rb_right;
179 else
180 return 1;
181 }
182 return 0;
183}
184
185/**
186 * is_idx_node_in_use - determine if an index node can be overwritten.
187 * @c: UBIFS file-system description object
188 * @key: key of index node
189 * @level: index node level
190 * @lnum: LEB number of index node
191 * @offs: offset of index node
192 *
193 * If @key / @lnum / @offs identify an index node that was not part of the old
194 * index, then this function returns %0 (obsolete). Else if the index node was
195 * part of the old index but is now dirty %1 is returned, else if it is clean %2
196 * is returned. A negative error code is returned on failure.
197 */
198static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
199 int level, int lnum, int offs)
200{
201 int ret;
202
203 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
204 if (ret < 0)
205 return ret; /* Error code */
206 if (ret == 0)
207 if (find_old_idx(c, lnum, offs))
208 return 1;
209 return ret;
210}
211
212/**
213 * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
214 * @c: UBIFS file-system description object
215 * @p: return LEB number here
216 *
217 * This function lays out new index nodes for dirty znodes using in-the-gaps
218 * method of TNC commit.
219 * This function merely puts the next znode into the next gap, making no attempt
220 * to try to maximise the number of znodes that fit.
221 * This function returns the number of index nodes written into the gaps, or a
222 * negative error code on failure.
223 */
224static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
225{
226 struct ubifs_scan_leb *sleb;
227 struct ubifs_scan_node *snod;
228 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
229
230 tot_written = 0;
231 /* Get an index LEB with lots of obsolete index nodes */
232 lnum = ubifs_find_dirty_idx_leb(c);
233 if (lnum < 0)
234 /*
235 * There also may be dirt in the index head that could be
236 * filled, however we do not check there at present.
237 */
238 return lnum; /* Error code */
239 *p = lnum;
240 dbg_gc("LEB %d", lnum);
241 /*
242 * Scan the index LEB. We use the generic scan for this even though
243 * it is more comprehensive and less efficient than is needed for this
244 * purpose.
245 */
246 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
247 c->ileb_len = 0;
248 if (IS_ERR(sleb))
249 return PTR_ERR(sleb);
250 gap_start = 0;
251 list_for_each_entry(snod, &sleb->nodes, list) {
252 struct ubifs_idx_node *idx;
253 int in_use, level;
254
255 ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
256 idx = snod->node;
257 key_read(c, ubifs_idx_key(c, idx), &snod->key);
258 level = le16_to_cpu(idx->level);
259 /* Determine if the index node is in use (not obsolete) */
260 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
261 snod->offs);
262 if (in_use < 0) {
263 ubifs_scan_destroy(sleb);
264 return in_use; /* Error code */
265 }
266 if (in_use) {
267 if (in_use == 1)
268 dirt += ALIGN(snod->len, 8);
269 /*
270 * The obsolete index nodes form gaps that can be
271 * overwritten. This gap has ended because we have
272 * found an index node that is still in use
273 * i.e. not obsolete
274 */
275 gap_end = snod->offs;
276 /* Try to fill gap */
277 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
278 if (written < 0) {
279 ubifs_scan_destroy(sleb);
280 return written; /* Error code */
281 }
282 tot_written += written;
283 gap_start = ALIGN(snod->offs + snod->len, 8);
284 }
285 }
286 ubifs_scan_destroy(sleb);
287 c->ileb_len = c->leb_size;
288 gap_end = c->leb_size;
289 /* Try to fill gap */
290 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
291 if (written < 0)
292 return written; /* Error code */
293 tot_written += written;
294 if (tot_written == 0) {
295 struct ubifs_lprops lp;
296
297 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
298 err = ubifs_read_one_lp(c, lnum, &lp);
299 if (err)
300 return err;
301 if (lp.free == c->leb_size) {
302 /*
303 * We must have snatched this LEB from the idx_gc list
304 * so we need to correct the free and dirty space.
305 */
306 err = ubifs_change_one_lp(c, lnum,
307 c->leb_size - c->ileb_len,
308 dirt, 0, 0, 0);
309 if (err)
310 return err;
311 }
312 return 0;
313 }
314 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
315 0, 0, 0);
316 if (err)
317 return err;
318 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
319 if (err)
320 return err;
321 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
322 return tot_written;
323}
324
325/**
326 * get_leb_cnt - calculate the number of empty LEBs needed to commit.
327 * @c: UBIFS file-system description object
328 * @cnt: number of znodes to commit
329 *
330 * This function returns the number of empty LEBs needed to commit @cnt znodes
331 * to the current index head. The number is not exact and may be more than
332 * needed.
333 */
334static int get_leb_cnt(struct ubifs_info *c, int cnt)
335{
336 int d;
337
338 /* Assume maximum index node size (i.e. overestimate space needed) */
339 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
340 if (cnt < 0)
341 cnt = 0;
342 d = c->leb_size / c->max_idx_node_sz;
343 return DIV_ROUND_UP(cnt, d);
344}
345
346/**
347 * layout_in_gaps - in-the-gaps method of committing TNC.
348 * @c: UBIFS file-system description object
349 * @cnt: number of dirty znodes to commit.
350 *
351 * This function lays out new index nodes for dirty znodes using in-the-gaps
352 * method of TNC commit.
353 *
354 * This function returns %0 on success and a negative error code on failure.
355 */
356static int layout_in_gaps(struct ubifs_info *c, int cnt)
357{
358 int err, leb_needed_cnt, written, *p;
359
360 dbg_gc("%d znodes to write", cnt);
361
362 c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
363 GFP_NOFS);
364 if (!c->gap_lebs)
365 return -ENOMEM;
366
367 p = c->gap_lebs;
368 do {
369 ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
370 written = layout_leb_in_gaps(c, p);
371 if (written < 0) {
372 err = written;
373 if (err != -ENOSPC) {
374 kfree(c->gap_lebs);
375 c->gap_lebs = NULL;
376 return err;
377 }
378 if (!dbg_is_chk_index(c)) {
379 /*
380 * Do not print scary warnings if the debugging
381 * option which forces in-the-gaps is enabled.
382 */
383 ubifs_warn(c, "out of space");
384 ubifs_dump_budg(c, &c->bi);
385 ubifs_dump_lprops(c);
386 }
387 /* Try to commit anyway */
388 break;
389 }
390 p++;
391 cnt -= written;
392 leb_needed_cnt = get_leb_cnt(c, cnt);
393 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
394 leb_needed_cnt, c->ileb_cnt);
395 } while (leb_needed_cnt > c->ileb_cnt);
396
397 *p = -1;
398 return 0;
399}
400
401/**
402 * layout_in_empty_space - layout index nodes in empty space.
403 * @c: UBIFS file-system description object
404 *
405 * This function lays out new index nodes for dirty znodes using empty LEBs.
406 *
407 * This function returns %0 on success and a negative error code on failure.
408 */
409static int layout_in_empty_space(struct ubifs_info *c)
410{
411 struct ubifs_znode *znode, *cnext, *zp;
412 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
413 int wlen, blen, err;
414
415 cnext = c->enext;
416 if (!cnext)
417 return 0;
418
419 lnum = c->ihead_lnum;
420 buf_offs = c->ihead_offs;
421
422 buf_len = ubifs_idx_node_sz(c, c->fanout);
423 buf_len = ALIGN(buf_len, c->min_io_size);
424 used = 0;
425 avail = buf_len;
426
427 /* Ensure there is enough room for first write */
428 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
429 if (buf_offs + next_len > c->leb_size)
430 lnum = -1;
431
432 while (1) {
433 znode = cnext;
434
435 len = ubifs_idx_node_sz(c, znode->child_cnt);
436
437 /* Determine the index node position */
438 if (lnum == -1) {
439 if (c->ileb_nxt >= c->ileb_cnt) {
440 ubifs_err(c, "out of space");
441 return -ENOSPC;
442 }
443 lnum = c->ilebs[c->ileb_nxt++];
444 buf_offs = 0;
445 used = 0;
446 avail = buf_len;
447 }
448
449 offs = buf_offs + used;
450
451 znode->lnum = lnum;
452 znode->offs = offs;
453 znode->len = len;
454
455 /* Update the parent */
456 zp = znode->parent;
457 if (zp) {
458 struct ubifs_zbranch *zbr;
459 int i;
460
461 i = znode->iip;
462 zbr = &zp->zbranch[i];
463 zbr->lnum = lnum;
464 zbr->offs = offs;
465 zbr->len = len;
466 } else {
467 c->zroot.lnum = lnum;
468 c->zroot.offs = offs;
469 c->zroot.len = len;
470 }
471 c->calc_idx_sz += ALIGN(len, 8);
472
473 /*
474 * Once lprops is updated, we can decrease the dirty znode count
475 * but it is easier to just do it here.
476 */
477 atomic_long_dec(&c->dirty_zn_cnt);
478
479 /*
480 * Calculate the next index node length to see if there is
481 * enough room for it
482 */
483 cnext = znode->cnext;
484 if (cnext == c->cnext)
485 next_len = 0;
486 else
487 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
488
489 /* Update buffer positions */
490 wlen = used + len;
491 used += ALIGN(len, 8);
492 avail -= ALIGN(len, 8);
493
494 if (next_len != 0 &&
495 buf_offs + used + next_len <= c->leb_size &&
496 avail > 0)
497 continue;
498
499 if (avail <= 0 && next_len &&
500 buf_offs + used + next_len <= c->leb_size)
501 blen = buf_len;
502 else
503 blen = ALIGN(wlen, c->min_io_size);
504
505 /* The buffer is full or there are no more znodes to do */
506 buf_offs += blen;
507 if (next_len) {
508 if (buf_offs + next_len > c->leb_size) {
509 err = ubifs_update_one_lp(c, lnum,
510 c->leb_size - buf_offs, blen - used,
511 0, 0);
512 if (err)
513 return err;
514 lnum = -1;
515 }
516 used -= blen;
517 if (used < 0)
518 used = 0;
519 avail = buf_len - used;
520 continue;
521 }
522 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
523 blen - used, 0, 0);
524 if (err)
525 return err;
526 break;
527 }
528
529 c->dbg->new_ihead_lnum = lnum;
530 c->dbg->new_ihead_offs = buf_offs;
531
532 return 0;
533}
534
535/**
536 * layout_commit - determine positions of index nodes to commit.
537 * @c: UBIFS file-system description object
538 * @no_space: indicates that insufficient empty LEBs were allocated
539 * @cnt: number of znodes to commit
540 *
541 * Calculate and update the positions of index nodes to commit. If there were
542 * an insufficient number of empty LEBs allocated, then index nodes are placed
543 * into the gaps created by obsolete index nodes in non-empty index LEBs. For
544 * this purpose, an obsolete index node is one that was not in the index as at
545 * the end of the last commit. To write "in-the-gaps" requires that those index
546 * LEBs are updated atomically in-place.
547 */
548static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
549{
550 int err;
551
552 if (no_space) {
553 err = layout_in_gaps(c, cnt);
554 if (err)
555 return err;
556 }
557 err = layout_in_empty_space(c);
558 return err;
559}
560
561/**
562 * find_first_dirty - find first dirty znode.
563 * @znode: znode to begin searching from
564 */
565static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
566{
567 int i, cont;
568
569 if (!znode)
570 return NULL;
571
572 while (1) {
573 if (znode->level == 0) {
574 if (ubifs_zn_dirty(znode))
575 return znode;
576 return NULL;
577 }
578 cont = 0;
579 for (i = 0; i < znode->child_cnt; i++) {
580 struct ubifs_zbranch *zbr = &znode->zbranch[i];
581
582 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
583 znode = zbr->znode;
584 cont = 1;
585 break;
586 }
587 }
588 if (!cont) {
589 if (ubifs_zn_dirty(znode))
590 return znode;
591 return NULL;
592 }
593 }
594}
595
596/**
597 * find_next_dirty - find next dirty znode.
598 * @znode: znode to begin searching from
599 */
600static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
601{
602 int n = znode->iip + 1;
603
604 znode = znode->parent;
605 if (!znode)
606 return NULL;
607 for (; n < znode->child_cnt; n++) {
608 struct ubifs_zbranch *zbr = &znode->zbranch[n];
609
610 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
611 return find_first_dirty(zbr->znode);
612 }
613 return znode;
614}
615
616/**
617 * get_znodes_to_commit - create list of dirty znodes to commit.
618 * @c: UBIFS file-system description object
619 *
620 * This function returns the number of znodes to commit.
621 */
622static int get_znodes_to_commit(struct ubifs_info *c)
623{
624 struct ubifs_znode *znode, *cnext;
625 int cnt = 0;
626
627 c->cnext = find_first_dirty(c->zroot.znode);
628 znode = c->enext = c->cnext;
629 if (!znode) {
630 dbg_cmt("no znodes to commit");
631 return 0;
632 }
633 cnt += 1;
634 while (1) {
635 ubifs_assert(c, !ubifs_zn_cow(znode));
636 __set_bit(COW_ZNODE, &znode->flags);
637 znode->alt = 0;
638 cnext = find_next_dirty(znode);
639 if (!cnext) {
640 znode->cnext = c->cnext;
641 break;
642 }
643 znode->cparent = znode->parent;
644 znode->ciip = znode->iip;
645 znode->cnext = cnext;
646 znode = cnext;
647 cnt += 1;
648 }
649 dbg_cmt("committing %d znodes", cnt);
650 ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
651 return cnt;
652}
653
654/**
655 * alloc_idx_lebs - allocate empty LEBs to be used to commit.
656 * @c: UBIFS file-system description object
657 * @cnt: number of znodes to commit
658 *
659 * This function returns %-ENOSPC if it cannot allocate a sufficient number of
660 * empty LEBs. %0 is returned on success, otherwise a negative error code
661 * is returned.
662 */
663static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
664{
665 int i, leb_cnt, lnum;
666
667 c->ileb_cnt = 0;
668 c->ileb_nxt = 0;
669 leb_cnt = get_leb_cnt(c, cnt);
670 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
671 if (!leb_cnt)
672 return 0;
673 c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
674 if (!c->ilebs)
675 return -ENOMEM;
676 for (i = 0; i < leb_cnt; i++) {
677 lnum = ubifs_find_free_leb_for_idx(c);
678 if (lnum < 0)
679 return lnum;
680 c->ilebs[c->ileb_cnt++] = lnum;
681 dbg_cmt("LEB %d", lnum);
682 }
683 if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
684 return -ENOSPC;
685 return 0;
686}
687
688/**
689 * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
690 * @c: UBIFS file-system description object
691 *
692 * It is possible that we allocate more empty LEBs for the commit than we need.
693 * This functions frees the surplus.
694 *
695 * This function returns %0 on success and a negative error code on failure.
696 */
697static int free_unused_idx_lebs(struct ubifs_info *c)
698{
699 int i, err = 0, lnum, er;
700
701 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
702 lnum = c->ilebs[i];
703 dbg_cmt("LEB %d", lnum);
704 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
705 LPROPS_INDEX | LPROPS_TAKEN, 0);
706 if (!err)
707 err = er;
708 }
709 return err;
710}
711
712/**
713 * free_idx_lebs - free unused LEBs after commit end.
714 * @c: UBIFS file-system description object
715 *
716 * This function returns %0 on success and a negative error code on failure.
717 */
718static int free_idx_lebs(struct ubifs_info *c)
719{
720 int err;
721
722 err = free_unused_idx_lebs(c);
723 kfree(c->ilebs);
724 c->ilebs = NULL;
725 return err;
726}
727
728/**
729 * ubifs_tnc_start_commit - start TNC commit.
730 * @c: UBIFS file-system description object
731 * @zroot: new index root position is returned here
732 *
733 * This function prepares the list of indexing nodes to commit and lays out
734 * their positions on flash. If there is not enough free space it uses the
735 * in-gap commit method. Returns zero in case of success and a negative error
736 * code in case of failure.
737 */
738int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
739{
740 int err = 0, cnt;
741
742 mutex_lock(&c->tnc_mutex);
743 err = dbg_check_tnc(c, 1);
744 if (err)
745 goto out;
746 cnt = get_znodes_to_commit(c);
747 if (cnt != 0) {
748 int no_space = 0;
749
750 err = alloc_idx_lebs(c, cnt);
751 if (err == -ENOSPC)
752 no_space = 1;
753 else if (err)
754 goto out_free;
755 err = layout_commit(c, no_space, cnt);
756 if (err)
757 goto out_free;
758 ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
759 err = free_unused_idx_lebs(c);
760 if (err)
761 goto out;
762 }
763 destroy_old_idx(c);
764 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
765
766 err = ubifs_save_dirty_idx_lnums(c);
767 if (err)
768 goto out;
769
770 spin_lock(&c->space_lock);
771 /*
772 * Although we have not finished committing yet, update size of the
773 * committed index ('c->bi.old_idx_sz') and zero out the index growth
774 * budget. It is OK to do this now, because we've reserved all the
775 * space which is needed to commit the index, and it is save for the
776 * budgeting subsystem to assume the index is already committed,
777 * even though it is not.
778 */
779 ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
780 c->bi.old_idx_sz = c->calc_idx_sz;
781 c->bi.uncommitted_idx = 0;
782 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
783 spin_unlock(&c->space_lock);
784 mutex_unlock(&c->tnc_mutex);
785
786 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
787 dbg_cmt("size of index %llu", c->calc_idx_sz);
788 return err;
789
790out_free:
791 free_idx_lebs(c);
792out:
793 mutex_unlock(&c->tnc_mutex);
794 return err;
795}
796
797/**
798 * write_index - write index nodes.
799 * @c: UBIFS file-system description object
800 *
801 * This function writes the index nodes whose positions were laid out in the
802 * layout_in_empty_space function.
803 */
804static int write_index(struct ubifs_info *c)
805{
806 struct ubifs_idx_node *idx;
807 struct ubifs_znode *znode, *cnext;
808 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
809 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
810
811 cnext = c->enext;
812 if (!cnext)
813 return 0;
814
815 /*
816 * Always write index nodes to the index head so that index nodes and
817 * other types of nodes are never mixed in the same erase block.
818 */
819 lnum = c->ihead_lnum;
820 buf_offs = c->ihead_offs;
821
822 /* Allocate commit buffer */
823 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
824 used = 0;
825 avail = buf_len;
826
827 /* Ensure there is enough room for first write */
828 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
829 if (buf_offs + next_len > c->leb_size) {
830 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
831 LPROPS_TAKEN);
832 if (err)
833 return err;
834 lnum = -1;
835 }
836
837 while (1) {
838 u8 hash[UBIFS_HASH_ARR_SZ];
839
840 cond_resched();
841
842 znode = cnext;
843 idx = c->cbuf + used;
844
845 /* Make index node */
846 idx->ch.node_type = UBIFS_IDX_NODE;
847 idx->child_cnt = cpu_to_le16(znode->child_cnt);
848 idx->level = cpu_to_le16(znode->level);
849 for (i = 0; i < znode->child_cnt; i++) {
850 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
851 struct ubifs_zbranch *zbr = &znode->zbranch[i];
852
853 key_write_idx(c, &zbr->key, &br->key);
854 br->lnum = cpu_to_le32(zbr->lnum);
855 br->offs = cpu_to_le32(zbr->offs);
856 br->len = cpu_to_le32(zbr->len);
857 ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
858 if (!zbr->lnum || !zbr->len) {
859 ubifs_err(c, "bad ref in znode");
860 ubifs_dump_znode(c, znode);
861 if (zbr->znode)
862 ubifs_dump_znode(c, zbr->znode);
863
864 return -EINVAL;
865 }
866 }
867 len = ubifs_idx_node_sz(c, znode->child_cnt);
868 ubifs_prepare_node(c, idx, len, 0);
869 ubifs_node_calc_hash(c, idx, hash);
870
871 mutex_lock(&c->tnc_mutex);
872
873 if (znode->cparent)
874 ubifs_copy_hash(c, hash,
875 znode->cparent->zbranch[znode->ciip].hash);
876
877 if (znode->parent) {
878 if (!ubifs_zn_obsolete(znode))
879 ubifs_copy_hash(c, hash,
880 znode->parent->zbranch[znode->iip].hash);
881 } else {
882 ubifs_copy_hash(c, hash, c->zroot.hash);
883 }
884
885 mutex_unlock(&c->tnc_mutex);
886
887 /* Determine the index node position */
888 if (lnum == -1) {
889 lnum = c->ilebs[lnum_pos++];
890 buf_offs = 0;
891 used = 0;
892 avail = buf_len;
893 }
894 offs = buf_offs + used;
895
896 if (lnum != znode->lnum || offs != znode->offs ||
897 len != znode->len) {
898 ubifs_err(c, "inconsistent znode posn");
899 return -EINVAL;
900 }
901
902 /* Grab some stuff from znode while we still can */
903 cnext = znode->cnext;
904
905 ubifs_assert(c, ubifs_zn_dirty(znode));
906 ubifs_assert(c, ubifs_zn_cow(znode));
907
908 /*
909 * It is important that other threads should see %DIRTY_ZNODE
910 * flag cleared before %COW_ZNODE. Specifically, it matters in
911 * the 'dirty_cow_znode()' function. This is the reason for the
912 * first barrier. Also, we want the bit changes to be seen to
913 * other threads ASAP, to avoid unnecesarry copying, which is
914 * the reason for the second barrier.
915 */
916 clear_bit(DIRTY_ZNODE, &znode->flags);
917 smp_mb__before_atomic();
918 clear_bit(COW_ZNODE, &znode->flags);
919 smp_mb__after_atomic();
920
921 /*
922 * We have marked the znode as clean but have not updated the
923 * @c->clean_zn_cnt counter. If this znode becomes dirty again
924 * before 'free_obsolete_znodes()' is called, then
925 * @c->clean_zn_cnt will be decremented before it gets
926 * incremented (resulting in 2 decrements for the same znode).
927 * This means that @c->clean_zn_cnt may become negative for a
928 * while.
929 *
930 * Q: why we cannot increment @c->clean_zn_cnt?
931 * A: because we do not have the @c->tnc_mutex locked, and the
932 * following code would be racy and buggy:
933 *
934 * if (!ubifs_zn_obsolete(znode)) {
935 * atomic_long_inc(&c->clean_zn_cnt);
936 * atomic_long_inc(&ubifs_clean_zn_cnt);
937 * }
938 *
939 * Thus, we just delay the @c->clean_zn_cnt update until we
940 * have the mutex locked.
941 */
942
943 /* Do not access znode from this point on */
944
945 /* Update buffer positions */
946 wlen = used + len;
947 used += ALIGN(len, 8);
948 avail -= ALIGN(len, 8);
949
950 /*
951 * Calculate the next index node length to see if there is
952 * enough room for it
953 */
954 if (cnext == c->cnext)
955 next_len = 0;
956 else
957 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
958
959 nxt_offs = buf_offs + used + next_len;
960 if (next_len && nxt_offs <= c->leb_size) {
961 if (avail > 0)
962 continue;
963 else
964 blen = buf_len;
965 } else {
966 wlen = ALIGN(wlen, 8);
967 blen = ALIGN(wlen, c->min_io_size);
968 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
969 }
970
971 /* The buffer is full or there are no more znodes to do */
972 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
973 if (err)
974 return err;
975 buf_offs += blen;
976 if (next_len) {
977 if (nxt_offs > c->leb_size) {
978 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
979 0, LPROPS_TAKEN);
980 if (err)
981 return err;
982 lnum = -1;
983 }
984 used -= blen;
985 if (used < 0)
986 used = 0;
987 avail = buf_len - used;
988 memmove(c->cbuf, c->cbuf + blen, used);
989 continue;
990 }
991 break;
992 }
993
994 if (lnum != c->dbg->new_ihead_lnum ||
995 buf_offs != c->dbg->new_ihead_offs) {
996 ubifs_err(c, "inconsistent ihead");
997 return -EINVAL;
998 }
999
1000 c->ihead_lnum = lnum;
1001 c->ihead_offs = buf_offs;
1002
1003 return 0;
1004}
1005
1006/**
1007 * free_obsolete_znodes - free obsolete znodes.
1008 * @c: UBIFS file-system description object
1009 *
1010 * At the end of commit end, obsolete znodes are freed.
1011 */
1012static void free_obsolete_znodes(struct ubifs_info *c)
1013{
1014 struct ubifs_znode *znode, *cnext;
1015
1016 cnext = c->cnext;
1017 do {
1018 znode = cnext;
1019 cnext = znode->cnext;
1020 if (ubifs_zn_obsolete(znode))
1021 kfree(znode);
1022 else {
1023 znode->cnext = NULL;
1024 atomic_long_inc(&c->clean_zn_cnt);
1025 atomic_long_inc(&ubifs_clean_zn_cnt);
1026 }
1027 } while (cnext != c->cnext);
1028}
1029
1030/**
1031 * return_gap_lebs - return LEBs used by the in-gap commit method.
1032 * @c: UBIFS file-system description object
1033 *
1034 * This function clears the "taken" flag for the LEBs which were used by the
1035 * "commit in-the-gaps" method.
1036 */
1037static int return_gap_lebs(struct ubifs_info *c)
1038{
1039 int *p, err;
1040
1041 if (!c->gap_lebs)
1042 return 0;
1043
1044 dbg_cmt("");
1045 for (p = c->gap_lebs; *p != -1; p++) {
1046 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1047 LPROPS_TAKEN, 0);
1048 if (err)
1049 return err;
1050 }
1051
1052 kfree(c->gap_lebs);
1053 c->gap_lebs = NULL;
1054 return 0;
1055}
1056
1057/**
1058 * ubifs_tnc_end_commit - update the TNC for commit end.
1059 * @c: UBIFS file-system description object
1060 *
1061 * Write the dirty znodes.
1062 */
1063int ubifs_tnc_end_commit(struct ubifs_info *c)
1064{
1065 int err;
1066
1067 if (!c->cnext)
1068 return 0;
1069
1070 err = return_gap_lebs(c);
1071 if (err)
1072 return err;
1073
1074 err = write_index(c);
1075 if (err)
1076 return err;
1077
1078 mutex_lock(&c->tnc_mutex);
1079
1080 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1081
1082 free_obsolete_znodes(c);
1083
1084 c->cnext = NULL;
1085 kfree(c->ilebs);
1086 c->ilebs = NULL;
1087
1088 mutex_unlock(&c->tnc_mutex);
1089
1090 return 0;
1091}
1/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Authors: Adrian Hunter
20 * Artem Bityutskiy (Битюцкий Артём)
21 */
22
23/* This file implements TNC functions for committing */
24
25#include <linux/random.h>
26#include "ubifs.h"
27
28/**
29 * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
30 * @c: UBIFS file-system description object
31 * @idx: buffer in which to place new index node
32 * @znode: znode from which to make new index node
33 * @lnum: LEB number where new index node will be written
34 * @offs: offset where new index node will be written
35 * @len: length of new index node
36 */
37static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
38 struct ubifs_znode *znode, int lnum, int offs, int len)
39{
40 struct ubifs_znode *zp;
41 int i, err;
42
43 /* Make index node */
44 idx->ch.node_type = UBIFS_IDX_NODE;
45 idx->child_cnt = cpu_to_le16(znode->child_cnt);
46 idx->level = cpu_to_le16(znode->level);
47 for (i = 0; i < znode->child_cnt; i++) {
48 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
49 struct ubifs_zbranch *zbr = &znode->zbranch[i];
50
51 key_write_idx(c, &zbr->key, &br->key);
52 br->lnum = cpu_to_le32(zbr->lnum);
53 br->offs = cpu_to_le32(zbr->offs);
54 br->len = cpu_to_le32(zbr->len);
55 if (!zbr->lnum || !zbr->len) {
56 ubifs_err("bad ref in znode");
57 ubifs_dump_znode(c, znode);
58 if (zbr->znode)
59 ubifs_dump_znode(c, zbr->znode);
60 }
61 }
62 ubifs_prepare_node(c, idx, len, 0);
63
64 znode->lnum = lnum;
65 znode->offs = offs;
66 znode->len = len;
67
68 err = insert_old_idx_znode(c, znode);
69
70 /* Update the parent */
71 zp = znode->parent;
72 if (zp) {
73 struct ubifs_zbranch *zbr;
74
75 zbr = &zp->zbranch[znode->iip];
76 zbr->lnum = lnum;
77 zbr->offs = offs;
78 zbr->len = len;
79 } else {
80 c->zroot.lnum = lnum;
81 c->zroot.offs = offs;
82 c->zroot.len = len;
83 }
84 c->calc_idx_sz += ALIGN(len, 8);
85
86 atomic_long_dec(&c->dirty_zn_cnt);
87
88 ubifs_assert(ubifs_zn_dirty(znode));
89 ubifs_assert(ubifs_zn_cow(znode));
90
91 /*
92 * Note, unlike 'write_index()' we do not add memory barriers here
93 * because this function is called with @c->tnc_mutex locked.
94 */
95 __clear_bit(DIRTY_ZNODE, &znode->flags);
96 __clear_bit(COW_ZNODE, &znode->flags);
97
98 return err;
99}
100
101/**
102 * fill_gap - make index nodes in gaps in dirty index LEBs.
103 * @c: UBIFS file-system description object
104 * @lnum: LEB number that gap appears in
105 * @gap_start: offset of start of gap
106 * @gap_end: offset of end of gap
107 * @dirt: adds dirty space to this
108 *
109 * This function returns the number of index nodes written into the gap.
110 */
111static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
112 int *dirt)
113{
114 int len, gap_remains, gap_pos, written, pad_len;
115
116 ubifs_assert((gap_start & 7) == 0);
117 ubifs_assert((gap_end & 7) == 0);
118 ubifs_assert(gap_end >= gap_start);
119
120 gap_remains = gap_end - gap_start;
121 if (!gap_remains)
122 return 0;
123 gap_pos = gap_start;
124 written = 0;
125 while (c->enext) {
126 len = ubifs_idx_node_sz(c, c->enext->child_cnt);
127 if (len < gap_remains) {
128 struct ubifs_znode *znode = c->enext;
129 const int alen = ALIGN(len, 8);
130 int err;
131
132 ubifs_assert(alen <= gap_remains);
133 err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
134 lnum, gap_pos, len);
135 if (err)
136 return err;
137 gap_remains -= alen;
138 gap_pos += alen;
139 c->enext = znode->cnext;
140 if (c->enext == c->cnext)
141 c->enext = NULL;
142 written += 1;
143 } else
144 break;
145 }
146 if (gap_end == c->leb_size) {
147 c->ileb_len = ALIGN(gap_pos, c->min_io_size);
148 /* Pad to end of min_io_size */
149 pad_len = c->ileb_len - gap_pos;
150 } else
151 /* Pad to end of gap */
152 pad_len = gap_remains;
153 dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
154 lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
155 ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
156 *dirt += pad_len;
157 return written;
158}
159
160/**
161 * find_old_idx - find an index node obsoleted since the last commit start.
162 * @c: UBIFS file-system description object
163 * @lnum: LEB number of obsoleted index node
164 * @offs: offset of obsoleted index node
165 *
166 * Returns %1 if found and %0 otherwise.
167 */
168static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
169{
170 struct ubifs_old_idx *o;
171 struct rb_node *p;
172
173 p = c->old_idx.rb_node;
174 while (p) {
175 o = rb_entry(p, struct ubifs_old_idx, rb);
176 if (lnum < o->lnum)
177 p = p->rb_left;
178 else if (lnum > o->lnum)
179 p = p->rb_right;
180 else if (offs < o->offs)
181 p = p->rb_left;
182 else if (offs > o->offs)
183 p = p->rb_right;
184 else
185 return 1;
186 }
187 return 0;
188}
189
190/**
191 * is_idx_node_in_use - determine if an index node can be overwritten.
192 * @c: UBIFS file-system description object
193 * @key: key of index node
194 * @level: index node level
195 * @lnum: LEB number of index node
196 * @offs: offset of index node
197 *
198 * If @key / @lnum / @offs identify an index node that was not part of the old
199 * index, then this function returns %0 (obsolete). Else if the index node was
200 * part of the old index but is now dirty %1 is returned, else if it is clean %2
201 * is returned. A negative error code is returned on failure.
202 */
203static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
204 int level, int lnum, int offs)
205{
206 int ret;
207
208 ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
209 if (ret < 0)
210 return ret; /* Error code */
211 if (ret == 0)
212 if (find_old_idx(c, lnum, offs))
213 return 1;
214 return ret;
215}
216
217/**
218 * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
219 * @c: UBIFS file-system description object
220 * @p: return LEB number here
221 *
222 * This function lays out new index nodes for dirty znodes using in-the-gaps
223 * method of TNC commit.
224 * This function merely puts the next znode into the next gap, making no attempt
225 * to try to maximise the number of znodes that fit.
226 * This function returns the number of index nodes written into the gaps, or a
227 * negative error code on failure.
228 */
229static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
230{
231 struct ubifs_scan_leb *sleb;
232 struct ubifs_scan_node *snod;
233 int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
234
235 tot_written = 0;
236 /* Get an index LEB with lots of obsolete index nodes */
237 lnum = ubifs_find_dirty_idx_leb(c);
238 if (lnum < 0)
239 /*
240 * There also may be dirt in the index head that could be
241 * filled, however we do not check there at present.
242 */
243 return lnum; /* Error code */
244 *p = lnum;
245 dbg_gc("LEB %d", lnum);
246 /*
247 * Scan the index LEB. We use the generic scan for this even though
248 * it is more comprehensive and less efficient than is needed for this
249 * purpose.
250 */
251 sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
252 c->ileb_len = 0;
253 if (IS_ERR(sleb))
254 return PTR_ERR(sleb);
255 gap_start = 0;
256 list_for_each_entry(snod, &sleb->nodes, list) {
257 struct ubifs_idx_node *idx;
258 int in_use, level;
259
260 ubifs_assert(snod->type == UBIFS_IDX_NODE);
261 idx = snod->node;
262 key_read(c, ubifs_idx_key(c, idx), &snod->key);
263 level = le16_to_cpu(idx->level);
264 /* Determine if the index node is in use (not obsolete) */
265 in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
266 snod->offs);
267 if (in_use < 0) {
268 ubifs_scan_destroy(sleb);
269 return in_use; /* Error code */
270 }
271 if (in_use) {
272 if (in_use == 1)
273 dirt += ALIGN(snod->len, 8);
274 /*
275 * The obsolete index nodes form gaps that can be
276 * overwritten. This gap has ended because we have
277 * found an index node that is still in use
278 * i.e. not obsolete
279 */
280 gap_end = snod->offs;
281 /* Try to fill gap */
282 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
283 if (written < 0) {
284 ubifs_scan_destroy(sleb);
285 return written; /* Error code */
286 }
287 tot_written += written;
288 gap_start = ALIGN(snod->offs + snod->len, 8);
289 }
290 }
291 ubifs_scan_destroy(sleb);
292 c->ileb_len = c->leb_size;
293 gap_end = c->leb_size;
294 /* Try to fill gap */
295 written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
296 if (written < 0)
297 return written; /* Error code */
298 tot_written += written;
299 if (tot_written == 0) {
300 struct ubifs_lprops lp;
301
302 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
303 err = ubifs_read_one_lp(c, lnum, &lp);
304 if (err)
305 return err;
306 if (lp.free == c->leb_size) {
307 /*
308 * We must have snatched this LEB from the idx_gc list
309 * so we need to correct the free and dirty space.
310 */
311 err = ubifs_change_one_lp(c, lnum,
312 c->leb_size - c->ileb_len,
313 dirt, 0, 0, 0);
314 if (err)
315 return err;
316 }
317 return 0;
318 }
319 err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
320 0, 0, 0);
321 if (err)
322 return err;
323 err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
324 if (err)
325 return err;
326 dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
327 return tot_written;
328}
329
330/**
331 * get_leb_cnt - calculate the number of empty LEBs needed to commit.
332 * @c: UBIFS file-system description object
333 * @cnt: number of znodes to commit
334 *
335 * This function returns the number of empty LEBs needed to commit @cnt znodes
336 * to the current index head. The number is not exact and may be more than
337 * needed.
338 */
339static int get_leb_cnt(struct ubifs_info *c, int cnt)
340{
341 int d;
342
343 /* Assume maximum index node size (i.e. overestimate space needed) */
344 cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
345 if (cnt < 0)
346 cnt = 0;
347 d = c->leb_size / c->max_idx_node_sz;
348 return DIV_ROUND_UP(cnt, d);
349}
350
351/**
352 * layout_in_gaps - in-the-gaps method of committing TNC.
353 * @c: UBIFS file-system description object
354 * @cnt: number of dirty znodes to commit.
355 *
356 * This function lays out new index nodes for dirty znodes using in-the-gaps
357 * method of TNC commit.
358 *
359 * This function returns %0 on success and a negative error code on failure.
360 */
361static int layout_in_gaps(struct ubifs_info *c, int cnt)
362{
363 int err, leb_needed_cnt, written, *p;
364
365 dbg_gc("%d znodes to write", cnt);
366
367 c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
368 if (!c->gap_lebs)
369 return -ENOMEM;
370
371 p = c->gap_lebs;
372 do {
373 ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
374 written = layout_leb_in_gaps(c, p);
375 if (written < 0) {
376 err = written;
377 if (err != -ENOSPC) {
378 kfree(c->gap_lebs);
379 c->gap_lebs = NULL;
380 return err;
381 }
382 if (!dbg_is_chk_index(c)) {
383 /*
384 * Do not print scary warnings if the debugging
385 * option which forces in-the-gaps is enabled.
386 */
387 ubifs_warn("out of space");
388 ubifs_dump_budg(c, &c->bi);
389 ubifs_dump_lprops(c);
390 }
391 /* Try to commit anyway */
392 err = 0;
393 break;
394 }
395 p++;
396 cnt -= written;
397 leb_needed_cnt = get_leb_cnt(c, cnt);
398 dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
399 leb_needed_cnt, c->ileb_cnt);
400 } while (leb_needed_cnt > c->ileb_cnt);
401
402 *p = -1;
403 return 0;
404}
405
406/**
407 * layout_in_empty_space - layout index nodes in empty space.
408 * @c: UBIFS file-system description object
409 *
410 * This function lays out new index nodes for dirty znodes using empty LEBs.
411 *
412 * This function returns %0 on success and a negative error code on failure.
413 */
414static int layout_in_empty_space(struct ubifs_info *c)
415{
416 struct ubifs_znode *znode, *cnext, *zp;
417 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
418 int wlen, blen, err;
419
420 cnext = c->enext;
421 if (!cnext)
422 return 0;
423
424 lnum = c->ihead_lnum;
425 buf_offs = c->ihead_offs;
426
427 buf_len = ubifs_idx_node_sz(c, c->fanout);
428 buf_len = ALIGN(buf_len, c->min_io_size);
429 used = 0;
430 avail = buf_len;
431
432 /* Ensure there is enough room for first write */
433 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
434 if (buf_offs + next_len > c->leb_size)
435 lnum = -1;
436
437 while (1) {
438 znode = cnext;
439
440 len = ubifs_idx_node_sz(c, znode->child_cnt);
441
442 /* Determine the index node position */
443 if (lnum == -1) {
444 if (c->ileb_nxt >= c->ileb_cnt) {
445 ubifs_err("out of space");
446 return -ENOSPC;
447 }
448 lnum = c->ilebs[c->ileb_nxt++];
449 buf_offs = 0;
450 used = 0;
451 avail = buf_len;
452 }
453
454 offs = buf_offs + used;
455
456 znode->lnum = lnum;
457 znode->offs = offs;
458 znode->len = len;
459
460 /* Update the parent */
461 zp = znode->parent;
462 if (zp) {
463 struct ubifs_zbranch *zbr;
464 int i;
465
466 i = znode->iip;
467 zbr = &zp->zbranch[i];
468 zbr->lnum = lnum;
469 zbr->offs = offs;
470 zbr->len = len;
471 } else {
472 c->zroot.lnum = lnum;
473 c->zroot.offs = offs;
474 c->zroot.len = len;
475 }
476 c->calc_idx_sz += ALIGN(len, 8);
477
478 /*
479 * Once lprops is updated, we can decrease the dirty znode count
480 * but it is easier to just do it here.
481 */
482 atomic_long_dec(&c->dirty_zn_cnt);
483
484 /*
485 * Calculate the next index node length to see if there is
486 * enough room for it
487 */
488 cnext = znode->cnext;
489 if (cnext == c->cnext)
490 next_len = 0;
491 else
492 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
493
494 /* Update buffer positions */
495 wlen = used + len;
496 used += ALIGN(len, 8);
497 avail -= ALIGN(len, 8);
498
499 if (next_len != 0 &&
500 buf_offs + used + next_len <= c->leb_size &&
501 avail > 0)
502 continue;
503
504 if (avail <= 0 && next_len &&
505 buf_offs + used + next_len <= c->leb_size)
506 blen = buf_len;
507 else
508 blen = ALIGN(wlen, c->min_io_size);
509
510 /* The buffer is full or there are no more znodes to do */
511 buf_offs += blen;
512 if (next_len) {
513 if (buf_offs + next_len > c->leb_size) {
514 err = ubifs_update_one_lp(c, lnum,
515 c->leb_size - buf_offs, blen - used,
516 0, 0);
517 if (err)
518 return err;
519 lnum = -1;
520 }
521 used -= blen;
522 if (used < 0)
523 used = 0;
524 avail = buf_len - used;
525 continue;
526 }
527 err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
528 blen - used, 0, 0);
529 if (err)
530 return err;
531 break;
532 }
533
534 c->dbg->new_ihead_lnum = lnum;
535 c->dbg->new_ihead_offs = buf_offs;
536
537 return 0;
538}
539
540/**
541 * layout_commit - determine positions of index nodes to commit.
542 * @c: UBIFS file-system description object
543 * @no_space: indicates that insufficient empty LEBs were allocated
544 * @cnt: number of znodes to commit
545 *
546 * Calculate and update the positions of index nodes to commit. If there were
547 * an insufficient number of empty LEBs allocated, then index nodes are placed
548 * into the gaps created by obsolete index nodes in non-empty index LEBs. For
549 * this purpose, an obsolete index node is one that was not in the index as at
550 * the end of the last commit. To write "in-the-gaps" requires that those index
551 * LEBs are updated atomically in-place.
552 */
553static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
554{
555 int err;
556
557 if (no_space) {
558 err = layout_in_gaps(c, cnt);
559 if (err)
560 return err;
561 }
562 err = layout_in_empty_space(c);
563 return err;
564}
565
566/**
567 * find_first_dirty - find first dirty znode.
568 * @znode: znode to begin searching from
569 */
570static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
571{
572 int i, cont;
573
574 if (!znode)
575 return NULL;
576
577 while (1) {
578 if (znode->level == 0) {
579 if (ubifs_zn_dirty(znode))
580 return znode;
581 return NULL;
582 }
583 cont = 0;
584 for (i = 0; i < znode->child_cnt; i++) {
585 struct ubifs_zbranch *zbr = &znode->zbranch[i];
586
587 if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
588 znode = zbr->znode;
589 cont = 1;
590 break;
591 }
592 }
593 if (!cont) {
594 if (ubifs_zn_dirty(znode))
595 return znode;
596 return NULL;
597 }
598 }
599}
600
601/**
602 * find_next_dirty - find next dirty znode.
603 * @znode: znode to begin searching from
604 */
605static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
606{
607 int n = znode->iip + 1;
608
609 znode = znode->parent;
610 if (!znode)
611 return NULL;
612 for (; n < znode->child_cnt; n++) {
613 struct ubifs_zbranch *zbr = &znode->zbranch[n];
614
615 if (zbr->znode && ubifs_zn_dirty(zbr->znode))
616 return find_first_dirty(zbr->znode);
617 }
618 return znode;
619}
620
621/**
622 * get_znodes_to_commit - create list of dirty znodes to commit.
623 * @c: UBIFS file-system description object
624 *
625 * This function returns the number of znodes to commit.
626 */
627static int get_znodes_to_commit(struct ubifs_info *c)
628{
629 struct ubifs_znode *znode, *cnext;
630 int cnt = 0;
631
632 c->cnext = find_first_dirty(c->zroot.znode);
633 znode = c->enext = c->cnext;
634 if (!znode) {
635 dbg_cmt("no znodes to commit");
636 return 0;
637 }
638 cnt += 1;
639 while (1) {
640 ubifs_assert(!ubifs_zn_cow(znode));
641 __set_bit(COW_ZNODE, &znode->flags);
642 znode->alt = 0;
643 cnext = find_next_dirty(znode);
644 if (!cnext) {
645 znode->cnext = c->cnext;
646 break;
647 }
648 znode->cnext = cnext;
649 znode = cnext;
650 cnt += 1;
651 }
652 dbg_cmt("committing %d znodes", cnt);
653 ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
654 return cnt;
655}
656
657/**
658 * alloc_idx_lebs - allocate empty LEBs to be used to commit.
659 * @c: UBIFS file-system description object
660 * @cnt: number of znodes to commit
661 *
662 * This function returns %-ENOSPC if it cannot allocate a sufficient number of
663 * empty LEBs. %0 is returned on success, otherwise a negative error code
664 * is returned.
665 */
666static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
667{
668 int i, leb_cnt, lnum;
669
670 c->ileb_cnt = 0;
671 c->ileb_nxt = 0;
672 leb_cnt = get_leb_cnt(c, cnt);
673 dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
674 if (!leb_cnt)
675 return 0;
676 c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
677 if (!c->ilebs)
678 return -ENOMEM;
679 for (i = 0; i < leb_cnt; i++) {
680 lnum = ubifs_find_free_leb_for_idx(c);
681 if (lnum < 0)
682 return lnum;
683 c->ilebs[c->ileb_cnt++] = lnum;
684 dbg_cmt("LEB %d", lnum);
685 }
686 if (dbg_is_chk_index(c) && !(random32() & 7))
687 return -ENOSPC;
688 return 0;
689}
690
691/**
692 * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
693 * @c: UBIFS file-system description object
694 *
695 * It is possible that we allocate more empty LEBs for the commit than we need.
696 * This functions frees the surplus.
697 *
698 * This function returns %0 on success and a negative error code on failure.
699 */
700static int free_unused_idx_lebs(struct ubifs_info *c)
701{
702 int i, err = 0, lnum, er;
703
704 for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
705 lnum = c->ilebs[i];
706 dbg_cmt("LEB %d", lnum);
707 er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
708 LPROPS_INDEX | LPROPS_TAKEN, 0);
709 if (!err)
710 err = er;
711 }
712 return err;
713}
714
715/**
716 * free_idx_lebs - free unused LEBs after commit end.
717 * @c: UBIFS file-system description object
718 *
719 * This function returns %0 on success and a negative error code on failure.
720 */
721static int free_idx_lebs(struct ubifs_info *c)
722{
723 int err;
724
725 err = free_unused_idx_lebs(c);
726 kfree(c->ilebs);
727 c->ilebs = NULL;
728 return err;
729}
730
731/**
732 * ubifs_tnc_start_commit - start TNC commit.
733 * @c: UBIFS file-system description object
734 * @zroot: new index root position is returned here
735 *
736 * This function prepares the list of indexing nodes to commit and lays out
737 * their positions on flash. If there is not enough free space it uses the
738 * in-gap commit method. Returns zero in case of success and a negative error
739 * code in case of failure.
740 */
741int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
742{
743 int err = 0, cnt;
744
745 mutex_lock(&c->tnc_mutex);
746 err = dbg_check_tnc(c, 1);
747 if (err)
748 goto out;
749 cnt = get_znodes_to_commit(c);
750 if (cnt != 0) {
751 int no_space = 0;
752
753 err = alloc_idx_lebs(c, cnt);
754 if (err == -ENOSPC)
755 no_space = 1;
756 else if (err)
757 goto out_free;
758 err = layout_commit(c, no_space, cnt);
759 if (err)
760 goto out_free;
761 ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
762 err = free_unused_idx_lebs(c);
763 if (err)
764 goto out;
765 }
766 destroy_old_idx(c);
767 memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
768
769 err = ubifs_save_dirty_idx_lnums(c);
770 if (err)
771 goto out;
772
773 spin_lock(&c->space_lock);
774 /*
775 * Although we have not finished committing yet, update size of the
776 * committed index ('c->bi.old_idx_sz') and zero out the index growth
777 * budget. It is OK to do this now, because we've reserved all the
778 * space which is needed to commit the index, and it is save for the
779 * budgeting subsystem to assume the index is already committed,
780 * even though it is not.
781 */
782 ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
783 c->bi.old_idx_sz = c->calc_idx_sz;
784 c->bi.uncommitted_idx = 0;
785 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
786 spin_unlock(&c->space_lock);
787 mutex_unlock(&c->tnc_mutex);
788
789 dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
790 dbg_cmt("size of index %llu", c->calc_idx_sz);
791 return err;
792
793out_free:
794 free_idx_lebs(c);
795out:
796 mutex_unlock(&c->tnc_mutex);
797 return err;
798}
799
800/**
801 * write_index - write index nodes.
802 * @c: UBIFS file-system description object
803 *
804 * This function writes the index nodes whose positions were laid out in the
805 * layout_in_empty_space function.
806 */
807static int write_index(struct ubifs_info *c)
808{
809 struct ubifs_idx_node *idx;
810 struct ubifs_znode *znode, *cnext;
811 int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
812 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
813
814 cnext = c->enext;
815 if (!cnext)
816 return 0;
817
818 /*
819 * Always write index nodes to the index head so that index nodes and
820 * other types of nodes are never mixed in the same erase block.
821 */
822 lnum = c->ihead_lnum;
823 buf_offs = c->ihead_offs;
824
825 /* Allocate commit buffer */
826 buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
827 used = 0;
828 avail = buf_len;
829
830 /* Ensure there is enough room for first write */
831 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
832 if (buf_offs + next_len > c->leb_size) {
833 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
834 LPROPS_TAKEN);
835 if (err)
836 return err;
837 lnum = -1;
838 }
839
840 while (1) {
841 cond_resched();
842
843 znode = cnext;
844 idx = c->cbuf + used;
845
846 /* Make index node */
847 idx->ch.node_type = UBIFS_IDX_NODE;
848 idx->child_cnt = cpu_to_le16(znode->child_cnt);
849 idx->level = cpu_to_le16(znode->level);
850 for (i = 0; i < znode->child_cnt; i++) {
851 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
852 struct ubifs_zbranch *zbr = &znode->zbranch[i];
853
854 key_write_idx(c, &zbr->key, &br->key);
855 br->lnum = cpu_to_le32(zbr->lnum);
856 br->offs = cpu_to_le32(zbr->offs);
857 br->len = cpu_to_le32(zbr->len);
858 if (!zbr->lnum || !zbr->len) {
859 ubifs_err("bad ref in znode");
860 ubifs_dump_znode(c, znode);
861 if (zbr->znode)
862 ubifs_dump_znode(c, zbr->znode);
863 }
864 }
865 len = ubifs_idx_node_sz(c, znode->child_cnt);
866 ubifs_prepare_node(c, idx, len, 0);
867
868 /* Determine the index node position */
869 if (lnum == -1) {
870 lnum = c->ilebs[lnum_pos++];
871 buf_offs = 0;
872 used = 0;
873 avail = buf_len;
874 }
875 offs = buf_offs + used;
876
877 if (lnum != znode->lnum || offs != znode->offs ||
878 len != znode->len) {
879 ubifs_err("inconsistent znode posn");
880 return -EINVAL;
881 }
882
883 /* Grab some stuff from znode while we still can */
884 cnext = znode->cnext;
885
886 ubifs_assert(ubifs_zn_dirty(znode));
887 ubifs_assert(ubifs_zn_cow(znode));
888
889 /*
890 * It is important that other threads should see %DIRTY_ZNODE
891 * flag cleared before %COW_ZNODE. Specifically, it matters in
892 * the 'dirty_cow_znode()' function. This is the reason for the
893 * first barrier. Also, we want the bit changes to be seen to
894 * other threads ASAP, to avoid unnecesarry copying, which is
895 * the reason for the second barrier.
896 */
897 clear_bit(DIRTY_ZNODE, &znode->flags);
898 smp_mb__before_clear_bit();
899 clear_bit(COW_ZNODE, &znode->flags);
900 smp_mb__after_clear_bit();
901
902 /*
903 * We have marked the znode as clean but have not updated the
904 * @c->clean_zn_cnt counter. If this znode becomes dirty again
905 * before 'free_obsolete_znodes()' is called, then
906 * @c->clean_zn_cnt will be decremented before it gets
907 * incremented (resulting in 2 decrements for the same znode).
908 * This means that @c->clean_zn_cnt may become negative for a
909 * while.
910 *
911 * Q: why we cannot increment @c->clean_zn_cnt?
912 * A: because we do not have the @c->tnc_mutex locked, and the
913 * following code would be racy and buggy:
914 *
915 * if (!ubifs_zn_obsolete(znode)) {
916 * atomic_long_inc(&c->clean_zn_cnt);
917 * atomic_long_inc(&ubifs_clean_zn_cnt);
918 * }
919 *
920 * Thus, we just delay the @c->clean_zn_cnt update until we
921 * have the mutex locked.
922 */
923
924 /* Do not access znode from this point on */
925
926 /* Update buffer positions */
927 wlen = used + len;
928 used += ALIGN(len, 8);
929 avail -= ALIGN(len, 8);
930
931 /*
932 * Calculate the next index node length to see if there is
933 * enough room for it
934 */
935 if (cnext == c->cnext)
936 next_len = 0;
937 else
938 next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
939
940 nxt_offs = buf_offs + used + next_len;
941 if (next_len && nxt_offs <= c->leb_size) {
942 if (avail > 0)
943 continue;
944 else
945 blen = buf_len;
946 } else {
947 wlen = ALIGN(wlen, 8);
948 blen = ALIGN(wlen, c->min_io_size);
949 ubifs_pad(c, c->cbuf + wlen, blen - wlen);
950 }
951
952 /* The buffer is full or there are no more znodes to do */
953 err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
954 if (err)
955 return err;
956 buf_offs += blen;
957 if (next_len) {
958 if (nxt_offs > c->leb_size) {
959 err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
960 0, LPROPS_TAKEN);
961 if (err)
962 return err;
963 lnum = -1;
964 }
965 used -= blen;
966 if (used < 0)
967 used = 0;
968 avail = buf_len - used;
969 memmove(c->cbuf, c->cbuf + blen, used);
970 continue;
971 }
972 break;
973 }
974
975 if (lnum != c->dbg->new_ihead_lnum ||
976 buf_offs != c->dbg->new_ihead_offs) {
977 ubifs_err("inconsistent ihead");
978 return -EINVAL;
979 }
980
981 c->ihead_lnum = lnum;
982 c->ihead_offs = buf_offs;
983
984 return 0;
985}
986
987/**
988 * free_obsolete_znodes - free obsolete znodes.
989 * @c: UBIFS file-system description object
990 *
991 * At the end of commit end, obsolete znodes are freed.
992 */
993static void free_obsolete_znodes(struct ubifs_info *c)
994{
995 struct ubifs_znode *znode, *cnext;
996
997 cnext = c->cnext;
998 do {
999 znode = cnext;
1000 cnext = znode->cnext;
1001 if (ubifs_zn_obsolete(znode))
1002 kfree(znode);
1003 else {
1004 znode->cnext = NULL;
1005 atomic_long_inc(&c->clean_zn_cnt);
1006 atomic_long_inc(&ubifs_clean_zn_cnt);
1007 }
1008 } while (cnext != c->cnext);
1009}
1010
1011/**
1012 * return_gap_lebs - return LEBs used by the in-gap commit method.
1013 * @c: UBIFS file-system description object
1014 *
1015 * This function clears the "taken" flag for the LEBs which were used by the
1016 * "commit in-the-gaps" method.
1017 */
1018static int return_gap_lebs(struct ubifs_info *c)
1019{
1020 int *p, err;
1021
1022 if (!c->gap_lebs)
1023 return 0;
1024
1025 dbg_cmt("");
1026 for (p = c->gap_lebs; *p != -1; p++) {
1027 err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1028 LPROPS_TAKEN, 0);
1029 if (err)
1030 return err;
1031 }
1032
1033 kfree(c->gap_lebs);
1034 c->gap_lebs = NULL;
1035 return 0;
1036}
1037
1038/**
1039 * ubifs_tnc_end_commit - update the TNC for commit end.
1040 * @c: UBIFS file-system description object
1041 *
1042 * Write the dirty znodes.
1043 */
1044int ubifs_tnc_end_commit(struct ubifs_info *c)
1045{
1046 int err;
1047
1048 if (!c->cnext)
1049 return 0;
1050
1051 err = return_gap_lebs(c);
1052 if (err)
1053 return err;
1054
1055 err = write_index(c);
1056 if (err)
1057 return err;
1058
1059 mutex_lock(&c->tnc_mutex);
1060
1061 dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1062
1063 free_obsolete_znodes(c);
1064
1065 c->cnext = NULL;
1066 kfree(c->ilebs);
1067 c->ilebs = NULL;
1068
1069 mutex_unlock(&c->tnc_mutex);
1070
1071 return 0;
1072}