Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of UBIFS.
   4 *
   5 * Copyright (C) 2006-2008 Nokia Corporation.
   6 *
   7 * Authors: Adrian Hunter
   8 *          Artem Bityutskiy (Битюцкий Артём)
   9 */
  10
  11/* This file implements TNC functions for committing */
  12
  13#include <linux/random.h>
  14#include "ubifs.h"
  15
  16/**
  17 * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
  18 * @c: UBIFS file-system description object
  19 * @idx: buffer in which to place new index node
  20 * @znode: znode from which to make new index node
  21 * @lnum: LEB number where new index node will be written
  22 * @offs: offset where new index node will be written
  23 * @len: length of new index node
  24 */
  25static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
  26			 struct ubifs_znode *znode, int lnum, int offs, int len)
  27{
  28	struct ubifs_znode *zp;
  29	u8 hash[UBIFS_HASH_ARR_SZ];
  30	int i, err;
  31
  32	/* Make index node */
  33	idx->ch.node_type = UBIFS_IDX_NODE;
  34	idx->child_cnt = cpu_to_le16(znode->child_cnt);
  35	idx->level = cpu_to_le16(znode->level);
  36	for (i = 0; i < znode->child_cnt; i++) {
  37		struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
  38		struct ubifs_zbranch *zbr = &znode->zbranch[i];
  39
  40		key_write_idx(c, &zbr->key, &br->key);
  41		br->lnum = cpu_to_le32(zbr->lnum);
  42		br->offs = cpu_to_le32(zbr->offs);
  43		br->len = cpu_to_le32(zbr->len);
  44		ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
  45		if (!zbr->lnum || !zbr->len) {
  46			ubifs_err(c, "bad ref in znode");
  47			ubifs_dump_znode(c, znode);
  48			if (zbr->znode)
  49				ubifs_dump_znode(c, zbr->znode);
  50
  51			return -EINVAL;
  52		}
  53	}
  54	ubifs_prepare_node(c, idx, len, 0);
  55	ubifs_node_calc_hash(c, idx, hash);
  56
  57	znode->lnum = lnum;
  58	znode->offs = offs;
  59	znode->len = len;
  60
  61	err = insert_old_idx_znode(c, znode);
  62
  63	/* Update the parent */
  64	zp = znode->parent;
  65	if (zp) {
  66		struct ubifs_zbranch *zbr;
  67
  68		zbr = &zp->zbranch[znode->iip];
  69		zbr->lnum = lnum;
  70		zbr->offs = offs;
  71		zbr->len = len;
  72		ubifs_copy_hash(c, hash, zbr->hash);
  73	} else {
  74		c->zroot.lnum = lnum;
  75		c->zroot.offs = offs;
  76		c->zroot.len = len;
  77		ubifs_copy_hash(c, hash, c->zroot.hash);
  78	}
  79	c->calc_idx_sz += ALIGN(len, 8);
  80
  81	atomic_long_dec(&c->dirty_zn_cnt);
  82
  83	ubifs_assert(c, ubifs_zn_dirty(znode));
  84	ubifs_assert(c, ubifs_zn_cow(znode));
  85
  86	/*
  87	 * Note, unlike 'write_index()' we do not add memory barriers here
  88	 * because this function is called with @c->tnc_mutex locked.
  89	 */
  90	__clear_bit(DIRTY_ZNODE, &znode->flags);
  91	__clear_bit(COW_ZNODE, &znode->flags);
  92
  93	return err;
  94}
  95
  96/**
  97 * fill_gap - make index nodes in gaps in dirty index LEBs.
  98 * @c: UBIFS file-system description object
  99 * @lnum: LEB number that gap appears in
 100 * @gap_start: offset of start of gap
 101 * @gap_end: offset of end of gap
 102 * @dirt: adds dirty space to this
 103 *
 104 * This function returns the number of index nodes written into the gap.
 105 */
 106static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
 107		    int *dirt)
 108{
 109	int len, gap_remains, gap_pos, written, pad_len;
 110
 111	ubifs_assert(c, (gap_start & 7) == 0);
 112	ubifs_assert(c, (gap_end & 7) == 0);
 113	ubifs_assert(c, gap_end >= gap_start);
 114
 115	gap_remains = gap_end - gap_start;
 116	if (!gap_remains)
 117		return 0;
 118	gap_pos = gap_start;
 119	written = 0;
 120	while (c->enext) {
 121		len = ubifs_idx_node_sz(c, c->enext->child_cnt);
 122		if (len < gap_remains) {
 123			struct ubifs_znode *znode = c->enext;
 124			const int alen = ALIGN(len, 8);
 125			int err;
 126
 127			ubifs_assert(c, alen <= gap_remains);
 128			err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
 129					    lnum, gap_pos, len);
 130			if (err)
 131				return err;
 132			gap_remains -= alen;
 133			gap_pos += alen;
 134			c->enext = znode->cnext;
 135			if (c->enext == c->cnext)
 136				c->enext = NULL;
 137			written += 1;
 138		} else
 139			break;
 140	}
 141	if (gap_end == c->leb_size) {
 142		c->ileb_len = ALIGN(gap_pos, c->min_io_size);
 143		/* Pad to end of min_io_size */
 144		pad_len = c->ileb_len - gap_pos;
 145	} else
 146		/* Pad to end of gap */
 147		pad_len = gap_remains;
 148	dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
 149	       lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
 150	ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
 151	*dirt += pad_len;
 152	return written;
 153}
 154
 155/**
 156 * find_old_idx - find an index node obsoleted since the last commit start.
 157 * @c: UBIFS file-system description object
 158 * @lnum: LEB number of obsoleted index node
 159 * @offs: offset of obsoleted index node
 160 *
 161 * Returns %1 if found and %0 otherwise.
 162 */
 163static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
 164{
 165	struct ubifs_old_idx *o;
 166	struct rb_node *p;
 167
 168	p = c->old_idx.rb_node;
 169	while (p) {
 170		o = rb_entry(p, struct ubifs_old_idx, rb);
 171		if (lnum < o->lnum)
 172			p = p->rb_left;
 173		else if (lnum > o->lnum)
 174			p = p->rb_right;
 175		else if (offs < o->offs)
 176			p = p->rb_left;
 177		else if (offs > o->offs)
 178			p = p->rb_right;
 179		else
 180			return 1;
 181	}
 182	return 0;
 183}
 184
 185/**
 186 * is_idx_node_in_use - determine if an index node can be overwritten.
 187 * @c: UBIFS file-system description object
 188 * @key: key of index node
 189 * @level: index node level
 190 * @lnum: LEB number of index node
 191 * @offs: offset of index node
 192 *
 193 * If @key / @lnum / @offs identify an index node that was not part of the old
 194 * index, then this function returns %0 (obsolete).  Else if the index node was
 195 * part of the old index but is now dirty %1 is returned, else if it is clean %2
 196 * is returned. A negative error code is returned on failure.
 197 */
 198static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
 199			      int level, int lnum, int offs)
 200{
 201	int ret;
 202
 203	ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
 204	if (ret < 0)
 205		return ret; /* Error code */
 206	if (ret == 0)
 207		if (find_old_idx(c, lnum, offs))
 208			return 1;
 209	return ret;
 210}
 211
 212/**
 213 * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
 214 * @c: UBIFS file-system description object
 215 * @p: return LEB number here
 216 *
 217 * This function lays out new index nodes for dirty znodes using in-the-gaps
 218 * method of TNC commit.
 219 * This function merely puts the next znode into the next gap, making no attempt
 220 * to try to maximise the number of znodes that fit.
 221 * This function returns the number of index nodes written into the gaps, or a
 222 * negative error code on failure.
 223 */
 224static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
 225{
 226	struct ubifs_scan_leb *sleb;
 227	struct ubifs_scan_node *snod;
 228	int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
 229
 230	tot_written = 0;
 231	/* Get an index LEB with lots of obsolete index nodes */
 232	lnum = ubifs_find_dirty_idx_leb(c);
 233	if (lnum < 0)
 234		/*
 235		 * There also may be dirt in the index head that could be
 236		 * filled, however we do not check there at present.
 237		 */
 238		return lnum; /* Error code */
 239	*p = lnum;
 240	dbg_gc("LEB %d", lnum);
 241	/*
 242	 * Scan the index LEB.  We use the generic scan for this even though
 243	 * it is more comprehensive and less efficient than is needed for this
 244	 * purpose.
 245	 */
 246	sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
 247	c->ileb_len = 0;
 248	if (IS_ERR(sleb))
 249		return PTR_ERR(sleb);
 250	gap_start = 0;
 251	list_for_each_entry(snod, &sleb->nodes, list) {
 252		struct ubifs_idx_node *idx;
 253		int in_use, level;
 254
 255		ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
 256		idx = snod->node;
 257		key_read(c, ubifs_idx_key(c, idx), &snod->key);
 258		level = le16_to_cpu(idx->level);
 259		/* Determine if the index node is in use (not obsolete) */
 260		in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
 261					    snod->offs);
 262		if (in_use < 0) {
 263			ubifs_scan_destroy(sleb);
 264			return in_use; /* Error code */
 265		}
 266		if (in_use) {
 267			if (in_use == 1)
 268				dirt += ALIGN(snod->len, 8);
 269			/*
 270			 * The obsolete index nodes form gaps that can be
 271			 * overwritten.  This gap has ended because we have
 272			 * found an index node that is still in use
 273			 * i.e. not obsolete
 274			 */
 275			gap_end = snod->offs;
 276			/* Try to fill gap */
 277			written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
 278			if (written < 0) {
 279				ubifs_scan_destroy(sleb);
 280				return written; /* Error code */
 281			}
 282			tot_written += written;
 283			gap_start = ALIGN(snod->offs + snod->len, 8);
 284		}
 285	}
 286	ubifs_scan_destroy(sleb);
 287	c->ileb_len = c->leb_size;
 288	gap_end = c->leb_size;
 289	/* Try to fill gap */
 290	written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
 291	if (written < 0)
 292		return written; /* Error code */
 293	tot_written += written;
 294	if (tot_written == 0) {
 295		struct ubifs_lprops lp;
 296
 297		dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
 298		err = ubifs_read_one_lp(c, lnum, &lp);
 299		if (err)
 300			return err;
 301		if (lp.free == c->leb_size) {
 302			/*
 303			 * We must have snatched this LEB from the idx_gc list
 304			 * so we need to correct the free and dirty space.
 305			 */
 306			err = ubifs_change_one_lp(c, lnum,
 307						  c->leb_size - c->ileb_len,
 308						  dirt, 0, 0, 0);
 309			if (err)
 310				return err;
 311		}
 312		return 0;
 313	}
 314	err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
 315				  0, 0, 0);
 316	if (err)
 317		return err;
 318	err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
 319	if (err)
 320		return err;
 321	dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
 322	return tot_written;
 323}
 324
 325/**
 326 * get_leb_cnt - calculate the number of empty LEBs needed to commit.
 327 * @c: UBIFS file-system description object
 328 * @cnt: number of znodes to commit
 329 *
 330 * This function returns the number of empty LEBs needed to commit @cnt znodes
 331 * to the current index head.  The number is not exact and may be more than
 332 * needed.
 333 */
 334static int get_leb_cnt(struct ubifs_info *c, int cnt)
 335{
 336	int d;
 337
 338	/* Assume maximum index node size (i.e. overestimate space needed) */
 339	cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
 340	if (cnt < 0)
 341		cnt = 0;
 342	d = c->leb_size / c->max_idx_node_sz;
 343	return DIV_ROUND_UP(cnt, d);
 344}
 345
 346/**
 347 * layout_in_gaps - in-the-gaps method of committing TNC.
 348 * @c: UBIFS file-system description object
 349 * @cnt: number of dirty znodes to commit.
 350 *
 351 * This function lays out new index nodes for dirty znodes using in-the-gaps
 352 * method of TNC commit.
 353 *
 354 * This function returns %0 on success and a negative error code on failure.
 355 */
 356static int layout_in_gaps(struct ubifs_info *c, int cnt)
 357{
 358	int err, leb_needed_cnt, written, *p;
 359
 360	dbg_gc("%d znodes to write", cnt);
 361
 362	c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
 363				    GFP_NOFS);
 364	if (!c->gap_lebs)
 365		return -ENOMEM;
 366
 367	p = c->gap_lebs;
 368	do {
 369		ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
 370		written = layout_leb_in_gaps(c, p);
 371		if (written < 0) {
 372			err = written;
 373			if (err != -ENOSPC) {
 374				kfree(c->gap_lebs);
 375				c->gap_lebs = NULL;
 376				return err;
 377			}
 378			if (!dbg_is_chk_index(c)) {
 379				/*
 380				 * Do not print scary warnings if the debugging
 381				 * option which forces in-the-gaps is enabled.
 382				 */
 383				ubifs_warn(c, "out of space");
 384				ubifs_dump_budg(c, &c->bi);
 385				ubifs_dump_lprops(c);
 386			}
 387			/* Try to commit anyway */
 388			break;
 389		}
 390		p++;
 391		cnt -= written;
 392		leb_needed_cnt = get_leb_cnt(c, cnt);
 393		dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
 394		       leb_needed_cnt, c->ileb_cnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395	} while (leb_needed_cnt > c->ileb_cnt);
 396
 397	*p = -1;
 398	return 0;
 399}
 400
 401/**
 402 * layout_in_empty_space - layout index nodes in empty space.
 403 * @c: UBIFS file-system description object
 404 *
 405 * This function lays out new index nodes for dirty znodes using empty LEBs.
 406 *
 407 * This function returns %0 on success and a negative error code on failure.
 408 */
 409static int layout_in_empty_space(struct ubifs_info *c)
 410{
 411	struct ubifs_znode *znode, *cnext, *zp;
 412	int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
 413	int wlen, blen, err;
 414
 415	cnext = c->enext;
 416	if (!cnext)
 417		return 0;
 418
 419	lnum = c->ihead_lnum;
 420	buf_offs = c->ihead_offs;
 421
 422	buf_len = ubifs_idx_node_sz(c, c->fanout);
 423	buf_len = ALIGN(buf_len, c->min_io_size);
 424	used = 0;
 425	avail = buf_len;
 426
 427	/* Ensure there is enough room for first write */
 428	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 429	if (buf_offs + next_len > c->leb_size)
 430		lnum = -1;
 431
 432	while (1) {
 433		znode = cnext;
 434
 435		len = ubifs_idx_node_sz(c, znode->child_cnt);
 436
 437		/* Determine the index node position */
 438		if (lnum == -1) {
 439			if (c->ileb_nxt >= c->ileb_cnt) {
 440				ubifs_err(c, "out of space");
 441				return -ENOSPC;
 442			}
 443			lnum = c->ilebs[c->ileb_nxt++];
 444			buf_offs = 0;
 445			used = 0;
 446			avail = buf_len;
 447		}
 448
 449		offs = buf_offs + used;
 450
 451		znode->lnum = lnum;
 452		znode->offs = offs;
 453		znode->len = len;
 454
 455		/* Update the parent */
 456		zp = znode->parent;
 457		if (zp) {
 458			struct ubifs_zbranch *zbr;
 459			int i;
 460
 461			i = znode->iip;
 462			zbr = &zp->zbranch[i];
 463			zbr->lnum = lnum;
 464			zbr->offs = offs;
 465			zbr->len = len;
 466		} else {
 467			c->zroot.lnum = lnum;
 468			c->zroot.offs = offs;
 469			c->zroot.len = len;
 470		}
 471		c->calc_idx_sz += ALIGN(len, 8);
 472
 473		/*
 474		 * Once lprops is updated, we can decrease the dirty znode count
 475		 * but it is easier to just do it here.
 476		 */
 477		atomic_long_dec(&c->dirty_zn_cnt);
 478
 479		/*
 480		 * Calculate the next index node length to see if there is
 481		 * enough room for it
 482		 */
 483		cnext = znode->cnext;
 484		if (cnext == c->cnext)
 485			next_len = 0;
 486		else
 487			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 488
 489		/* Update buffer positions */
 490		wlen = used + len;
 491		used += ALIGN(len, 8);
 492		avail -= ALIGN(len, 8);
 493
 494		if (next_len != 0 &&
 495		    buf_offs + used + next_len <= c->leb_size &&
 496		    avail > 0)
 497			continue;
 498
 499		if (avail <= 0 && next_len &&
 500		    buf_offs + used + next_len <= c->leb_size)
 501			blen = buf_len;
 502		else
 503			blen = ALIGN(wlen, c->min_io_size);
 504
 505		/* The buffer is full or there are no more znodes to do */
 506		buf_offs += blen;
 507		if (next_len) {
 508			if (buf_offs + next_len > c->leb_size) {
 509				err = ubifs_update_one_lp(c, lnum,
 510					c->leb_size - buf_offs, blen - used,
 511					0, 0);
 512				if (err)
 513					return err;
 514				lnum = -1;
 515			}
 516			used -= blen;
 517			if (used < 0)
 518				used = 0;
 519			avail = buf_len - used;
 520			continue;
 521		}
 522		err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
 523					  blen - used, 0, 0);
 524		if (err)
 525			return err;
 526		break;
 527	}
 528
 529	c->dbg->new_ihead_lnum = lnum;
 530	c->dbg->new_ihead_offs = buf_offs;
 531
 532	return 0;
 533}
 534
 535/**
 536 * layout_commit - determine positions of index nodes to commit.
 537 * @c: UBIFS file-system description object
 538 * @no_space: indicates that insufficient empty LEBs were allocated
 539 * @cnt: number of znodes to commit
 540 *
 541 * Calculate and update the positions of index nodes to commit.  If there were
 542 * an insufficient number of empty LEBs allocated, then index nodes are placed
 543 * into the gaps created by obsolete index nodes in non-empty index LEBs.  For
 544 * this purpose, an obsolete index node is one that was not in the index as at
 545 * the end of the last commit.  To write "in-the-gaps" requires that those index
 546 * LEBs are updated atomically in-place.
 547 */
 548static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
 549{
 550	int err;
 551
 552	if (no_space) {
 553		err = layout_in_gaps(c, cnt);
 554		if (err)
 555			return err;
 556	}
 557	err = layout_in_empty_space(c);
 558	return err;
 559}
 560
 561/**
 562 * find_first_dirty - find first dirty znode.
 563 * @znode: znode to begin searching from
 564 */
 565static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
 566{
 567	int i, cont;
 568
 569	if (!znode)
 570		return NULL;
 571
 572	while (1) {
 573		if (znode->level == 0) {
 574			if (ubifs_zn_dirty(znode))
 575				return znode;
 576			return NULL;
 577		}
 578		cont = 0;
 579		for (i = 0; i < znode->child_cnt; i++) {
 580			struct ubifs_zbranch *zbr = &znode->zbranch[i];
 581
 582			if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
 583				znode = zbr->znode;
 584				cont = 1;
 585				break;
 586			}
 587		}
 588		if (!cont) {
 589			if (ubifs_zn_dirty(znode))
 590				return znode;
 591			return NULL;
 592		}
 593	}
 594}
 595
 596/**
 597 * find_next_dirty - find next dirty znode.
 598 * @znode: znode to begin searching from
 599 */
 600static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
 601{
 602	int n = znode->iip + 1;
 603
 604	znode = znode->parent;
 605	if (!znode)
 606		return NULL;
 607	for (; n < znode->child_cnt; n++) {
 608		struct ubifs_zbranch *zbr = &znode->zbranch[n];
 609
 610		if (zbr->znode && ubifs_zn_dirty(zbr->znode))
 611			return find_first_dirty(zbr->znode);
 612	}
 613	return znode;
 614}
 615
 616/**
 617 * get_znodes_to_commit - create list of dirty znodes to commit.
 618 * @c: UBIFS file-system description object
 619 *
 620 * This function returns the number of znodes to commit.
 621 */
 622static int get_znodes_to_commit(struct ubifs_info *c)
 623{
 624	struct ubifs_znode *znode, *cnext;
 625	int cnt = 0;
 626
 627	c->cnext = find_first_dirty(c->zroot.znode);
 628	znode = c->enext = c->cnext;
 629	if (!znode) {
 630		dbg_cmt("no znodes to commit");
 631		return 0;
 632	}
 633	cnt += 1;
 634	while (1) {
 635		ubifs_assert(c, !ubifs_zn_cow(znode));
 636		__set_bit(COW_ZNODE, &znode->flags);
 637		znode->alt = 0;
 638		cnext = find_next_dirty(znode);
 639		if (!cnext) {
 
 
 640			znode->cnext = c->cnext;
 641			break;
 642		}
 643		znode->cparent = znode->parent;
 644		znode->ciip = znode->iip;
 645		znode->cnext = cnext;
 646		znode = cnext;
 647		cnt += 1;
 648	}
 649	dbg_cmt("committing %d znodes", cnt);
 650	ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
 651	return cnt;
 652}
 653
 654/**
 655 * alloc_idx_lebs - allocate empty LEBs to be used to commit.
 656 * @c: UBIFS file-system description object
 657 * @cnt: number of znodes to commit
 658 *
 659 * This function returns %-ENOSPC if it cannot allocate a sufficient number of
 660 * empty LEBs.  %0 is returned on success, otherwise a negative error code
 661 * is returned.
 662 */
 663static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
 664{
 665	int i, leb_cnt, lnum;
 666
 667	c->ileb_cnt = 0;
 668	c->ileb_nxt = 0;
 669	leb_cnt = get_leb_cnt(c, cnt);
 670	dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
 671	if (!leb_cnt)
 672		return 0;
 673	c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
 674	if (!c->ilebs)
 675		return -ENOMEM;
 676	for (i = 0; i < leb_cnt; i++) {
 677		lnum = ubifs_find_free_leb_for_idx(c);
 678		if (lnum < 0)
 679			return lnum;
 680		c->ilebs[c->ileb_cnt++] = lnum;
 681		dbg_cmt("LEB %d", lnum);
 682	}
 683	if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
 684		return -ENOSPC;
 685	return 0;
 686}
 687
 688/**
 689 * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
 690 * @c: UBIFS file-system description object
 691 *
 692 * It is possible that we allocate more empty LEBs for the commit than we need.
 693 * This functions frees the surplus.
 694 *
 695 * This function returns %0 on success and a negative error code on failure.
 696 */
 697static int free_unused_idx_lebs(struct ubifs_info *c)
 698{
 699	int i, err = 0, lnum, er;
 700
 701	for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
 702		lnum = c->ilebs[i];
 703		dbg_cmt("LEB %d", lnum);
 704		er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
 705					 LPROPS_INDEX | LPROPS_TAKEN, 0);
 706		if (!err)
 707			err = er;
 708	}
 709	return err;
 710}
 711
 712/**
 713 * free_idx_lebs - free unused LEBs after commit end.
 714 * @c: UBIFS file-system description object
 715 *
 716 * This function returns %0 on success and a negative error code on failure.
 717 */
 718static int free_idx_lebs(struct ubifs_info *c)
 719{
 720	int err;
 721
 722	err = free_unused_idx_lebs(c);
 723	kfree(c->ilebs);
 724	c->ilebs = NULL;
 725	return err;
 726}
 727
 728/**
 729 * ubifs_tnc_start_commit - start TNC commit.
 730 * @c: UBIFS file-system description object
 731 * @zroot: new index root position is returned here
 732 *
 733 * This function prepares the list of indexing nodes to commit and lays out
 734 * their positions on flash. If there is not enough free space it uses the
 735 * in-gap commit method. Returns zero in case of success and a negative error
 736 * code in case of failure.
 737 */
 738int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
 739{
 740	int err = 0, cnt;
 741
 742	mutex_lock(&c->tnc_mutex);
 743	err = dbg_check_tnc(c, 1);
 744	if (err)
 745		goto out;
 746	cnt = get_znodes_to_commit(c);
 747	if (cnt != 0) {
 748		int no_space = 0;
 749
 750		err = alloc_idx_lebs(c, cnt);
 751		if (err == -ENOSPC)
 752			no_space = 1;
 753		else if (err)
 754			goto out_free;
 755		err = layout_commit(c, no_space, cnt);
 756		if (err)
 757			goto out_free;
 758		ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
 759		err = free_unused_idx_lebs(c);
 760		if (err)
 761			goto out;
 762	}
 763	destroy_old_idx(c);
 764	memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
 765
 766	err = ubifs_save_dirty_idx_lnums(c);
 767	if (err)
 768		goto out;
 769
 770	spin_lock(&c->space_lock);
 771	/*
 772	 * Although we have not finished committing yet, update size of the
 773	 * committed index ('c->bi.old_idx_sz') and zero out the index growth
 774	 * budget. It is OK to do this now, because we've reserved all the
 775	 * space which is needed to commit the index, and it is save for the
 776	 * budgeting subsystem to assume the index is already committed,
 777	 * even though it is not.
 778	 */
 779	ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
 780	c->bi.old_idx_sz = c->calc_idx_sz;
 781	c->bi.uncommitted_idx = 0;
 782	c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
 783	spin_unlock(&c->space_lock);
 784	mutex_unlock(&c->tnc_mutex);
 785
 786	dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
 787	dbg_cmt("size of index %llu", c->calc_idx_sz);
 788	return err;
 789
 790out_free:
 791	free_idx_lebs(c);
 792out:
 793	mutex_unlock(&c->tnc_mutex);
 794	return err;
 795}
 796
 797/**
 798 * write_index - write index nodes.
 799 * @c: UBIFS file-system description object
 800 *
 801 * This function writes the index nodes whose positions were laid out in the
 802 * layout_in_empty_space function.
 803 */
 804static int write_index(struct ubifs_info *c)
 805{
 806	struct ubifs_idx_node *idx;
 807	struct ubifs_znode *znode, *cnext;
 808	int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
 809	int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
 810
 811	cnext = c->enext;
 812	if (!cnext)
 813		return 0;
 814
 815	/*
 816	 * Always write index nodes to the index head so that index nodes and
 817	 * other types of nodes are never mixed in the same erase block.
 818	 */
 819	lnum = c->ihead_lnum;
 820	buf_offs = c->ihead_offs;
 821
 822	/* Allocate commit buffer */
 823	buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
 824	used = 0;
 825	avail = buf_len;
 826
 827	/* Ensure there is enough room for first write */
 828	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 829	if (buf_offs + next_len > c->leb_size) {
 830		err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
 831					  LPROPS_TAKEN);
 832		if (err)
 833			return err;
 834		lnum = -1;
 835	}
 836
 837	while (1) {
 838		u8 hash[UBIFS_HASH_ARR_SZ];
 839
 840		cond_resched();
 841
 842		znode = cnext;
 843		idx = c->cbuf + used;
 844
 845		/* Make index node */
 846		idx->ch.node_type = UBIFS_IDX_NODE;
 847		idx->child_cnt = cpu_to_le16(znode->child_cnt);
 848		idx->level = cpu_to_le16(znode->level);
 849		for (i = 0; i < znode->child_cnt; i++) {
 850			struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
 851			struct ubifs_zbranch *zbr = &znode->zbranch[i];
 852
 853			key_write_idx(c, &zbr->key, &br->key);
 854			br->lnum = cpu_to_le32(zbr->lnum);
 855			br->offs = cpu_to_le32(zbr->offs);
 856			br->len = cpu_to_le32(zbr->len);
 857			ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
 858			if (!zbr->lnum || !zbr->len) {
 859				ubifs_err(c, "bad ref in znode");
 860				ubifs_dump_znode(c, znode);
 861				if (zbr->znode)
 862					ubifs_dump_znode(c, zbr->znode);
 863
 864				return -EINVAL;
 865			}
 866		}
 867		len = ubifs_idx_node_sz(c, znode->child_cnt);
 868		ubifs_prepare_node(c, idx, len, 0);
 869		ubifs_node_calc_hash(c, idx, hash);
 870
 871		mutex_lock(&c->tnc_mutex);
 872
 873		if (znode->cparent)
 874			ubifs_copy_hash(c, hash,
 875					znode->cparent->zbranch[znode->ciip].hash);
 876
 877		if (znode->parent) {
 878			if (!ubifs_zn_obsolete(znode))
 879				ubifs_copy_hash(c, hash,
 880					znode->parent->zbranch[znode->iip].hash);
 881		} else {
 882			ubifs_copy_hash(c, hash, c->zroot.hash);
 883		}
 884
 885		mutex_unlock(&c->tnc_mutex);
 886
 887		/* Determine the index node position */
 888		if (lnum == -1) {
 889			lnum = c->ilebs[lnum_pos++];
 890			buf_offs = 0;
 891			used = 0;
 892			avail = buf_len;
 893		}
 894		offs = buf_offs + used;
 895
 896		if (lnum != znode->lnum || offs != znode->offs ||
 897		    len != znode->len) {
 898			ubifs_err(c, "inconsistent znode posn");
 899			return -EINVAL;
 900		}
 901
 902		/* Grab some stuff from znode while we still can */
 903		cnext = znode->cnext;
 904
 905		ubifs_assert(c, ubifs_zn_dirty(znode));
 906		ubifs_assert(c, ubifs_zn_cow(znode));
 907
 908		/*
 909		 * It is important that other threads should see %DIRTY_ZNODE
 910		 * flag cleared before %COW_ZNODE. Specifically, it matters in
 911		 * the 'dirty_cow_znode()' function. This is the reason for the
 912		 * first barrier. Also, we want the bit changes to be seen to
 913		 * other threads ASAP, to avoid unnecesarry copying, which is
 914		 * the reason for the second barrier.
 915		 */
 916		clear_bit(DIRTY_ZNODE, &znode->flags);
 917		smp_mb__before_atomic();
 918		clear_bit(COW_ZNODE, &znode->flags);
 919		smp_mb__after_atomic();
 920
 921		/*
 922		 * We have marked the znode as clean but have not updated the
 923		 * @c->clean_zn_cnt counter. If this znode becomes dirty again
 924		 * before 'free_obsolete_znodes()' is called, then
 925		 * @c->clean_zn_cnt will be decremented before it gets
 926		 * incremented (resulting in 2 decrements for the same znode).
 927		 * This means that @c->clean_zn_cnt may become negative for a
 928		 * while.
 929		 *
 930		 * Q: why we cannot increment @c->clean_zn_cnt?
 931		 * A: because we do not have the @c->tnc_mutex locked, and the
 932		 *    following code would be racy and buggy:
 933		 *
 934		 *    if (!ubifs_zn_obsolete(znode)) {
 935		 *            atomic_long_inc(&c->clean_zn_cnt);
 936		 *            atomic_long_inc(&ubifs_clean_zn_cnt);
 937		 *    }
 938		 *
 939		 *    Thus, we just delay the @c->clean_zn_cnt update until we
 940		 *    have the mutex locked.
 941		 */
 942
 943		/* Do not access znode from this point on */
 944
 945		/* Update buffer positions */
 946		wlen = used + len;
 947		used += ALIGN(len, 8);
 948		avail -= ALIGN(len, 8);
 949
 950		/*
 951		 * Calculate the next index node length to see if there is
 952		 * enough room for it
 953		 */
 954		if (cnext == c->cnext)
 955			next_len = 0;
 956		else
 957			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 958
 959		nxt_offs = buf_offs + used + next_len;
 960		if (next_len && nxt_offs <= c->leb_size) {
 961			if (avail > 0)
 962				continue;
 963			else
 964				blen = buf_len;
 965		} else {
 966			wlen = ALIGN(wlen, 8);
 967			blen = ALIGN(wlen, c->min_io_size);
 968			ubifs_pad(c, c->cbuf + wlen, blen - wlen);
 969		}
 970
 971		/* The buffer is full or there are no more znodes to do */
 972		err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
 973		if (err)
 974			return err;
 975		buf_offs += blen;
 976		if (next_len) {
 977			if (nxt_offs > c->leb_size) {
 978				err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
 979							  0, LPROPS_TAKEN);
 980				if (err)
 981					return err;
 982				lnum = -1;
 983			}
 984			used -= blen;
 985			if (used < 0)
 986				used = 0;
 987			avail = buf_len - used;
 988			memmove(c->cbuf, c->cbuf + blen, used);
 989			continue;
 990		}
 991		break;
 992	}
 993
 994	if (lnum != c->dbg->new_ihead_lnum ||
 995	    buf_offs != c->dbg->new_ihead_offs) {
 996		ubifs_err(c, "inconsistent ihead");
 997		return -EINVAL;
 998	}
 999
1000	c->ihead_lnum = lnum;
1001	c->ihead_offs = buf_offs;
1002
1003	return 0;
1004}
1005
1006/**
1007 * free_obsolete_znodes - free obsolete znodes.
1008 * @c: UBIFS file-system description object
1009 *
1010 * At the end of commit end, obsolete znodes are freed.
1011 */
1012static void free_obsolete_znodes(struct ubifs_info *c)
1013{
1014	struct ubifs_znode *znode, *cnext;
1015
1016	cnext = c->cnext;
1017	do {
1018		znode = cnext;
1019		cnext = znode->cnext;
1020		if (ubifs_zn_obsolete(znode))
1021			kfree(znode);
1022		else {
1023			znode->cnext = NULL;
1024			atomic_long_inc(&c->clean_zn_cnt);
1025			atomic_long_inc(&ubifs_clean_zn_cnt);
1026		}
1027	} while (cnext != c->cnext);
1028}
1029
1030/**
1031 * return_gap_lebs - return LEBs used by the in-gap commit method.
1032 * @c: UBIFS file-system description object
1033 *
1034 * This function clears the "taken" flag for the LEBs which were used by the
1035 * "commit in-the-gaps" method.
1036 */
1037static int return_gap_lebs(struct ubifs_info *c)
1038{
1039	int *p, err;
1040
1041	if (!c->gap_lebs)
1042		return 0;
1043
1044	dbg_cmt("");
1045	for (p = c->gap_lebs; *p != -1; p++) {
1046		err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1047					  LPROPS_TAKEN, 0);
1048		if (err)
1049			return err;
1050	}
1051
1052	kfree(c->gap_lebs);
1053	c->gap_lebs = NULL;
1054	return 0;
1055}
1056
1057/**
1058 * ubifs_tnc_end_commit - update the TNC for commit end.
1059 * @c: UBIFS file-system description object
1060 *
1061 * Write the dirty znodes.
1062 */
1063int ubifs_tnc_end_commit(struct ubifs_info *c)
1064{
1065	int err;
1066
1067	if (!c->cnext)
1068		return 0;
1069
1070	err = return_gap_lebs(c);
1071	if (err)
1072		return err;
1073
1074	err = write_index(c);
1075	if (err)
1076		return err;
1077
1078	mutex_lock(&c->tnc_mutex);
1079
1080	dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1081
1082	free_obsolete_znodes(c);
1083
1084	c->cnext = NULL;
1085	kfree(c->ilebs);
1086	c->ilebs = NULL;
1087
1088	mutex_unlock(&c->tnc_mutex);
1089
1090	return 0;
1091}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of UBIFS.
   4 *
   5 * Copyright (C) 2006-2008 Nokia Corporation.
   6 *
   7 * Authors: Adrian Hunter
   8 *          Artem Bityutskiy (Битюцкий Артём)
   9 */
  10
  11/* This file implements TNC functions for committing */
  12
  13#include <linux/random.h>
  14#include "ubifs.h"
  15
  16/**
  17 * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
  18 * @c: UBIFS file-system description object
  19 * @idx: buffer in which to place new index node
  20 * @znode: znode from which to make new index node
  21 * @lnum: LEB number where new index node will be written
  22 * @offs: offset where new index node will be written
  23 * @len: length of new index node
  24 */
  25static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
  26			 struct ubifs_znode *znode, int lnum, int offs, int len)
  27{
  28	struct ubifs_znode *zp;
  29	u8 hash[UBIFS_HASH_ARR_SZ];
  30	int i, err;
  31
  32	/* Make index node */
  33	idx->ch.node_type = UBIFS_IDX_NODE;
  34	idx->child_cnt = cpu_to_le16(znode->child_cnt);
  35	idx->level = cpu_to_le16(znode->level);
  36	for (i = 0; i < znode->child_cnt; i++) {
  37		struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
  38		struct ubifs_zbranch *zbr = &znode->zbranch[i];
  39
  40		key_write_idx(c, &zbr->key, &br->key);
  41		br->lnum = cpu_to_le32(zbr->lnum);
  42		br->offs = cpu_to_le32(zbr->offs);
  43		br->len = cpu_to_le32(zbr->len);
  44		ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
  45		if (!zbr->lnum || !zbr->len) {
  46			ubifs_err(c, "bad ref in znode");
  47			ubifs_dump_znode(c, znode);
  48			if (zbr->znode)
  49				ubifs_dump_znode(c, zbr->znode);
  50
  51			return -EINVAL;
  52		}
  53	}
  54	ubifs_prepare_node(c, idx, len, 0);
  55	ubifs_node_calc_hash(c, idx, hash);
  56
  57	znode->lnum = lnum;
  58	znode->offs = offs;
  59	znode->len = len;
  60
  61	err = insert_old_idx_znode(c, znode);
  62
  63	/* Update the parent */
  64	zp = znode->parent;
  65	if (zp) {
  66		struct ubifs_zbranch *zbr;
  67
  68		zbr = &zp->zbranch[znode->iip];
  69		zbr->lnum = lnum;
  70		zbr->offs = offs;
  71		zbr->len = len;
  72		ubifs_copy_hash(c, hash, zbr->hash);
  73	} else {
  74		c->zroot.lnum = lnum;
  75		c->zroot.offs = offs;
  76		c->zroot.len = len;
  77		ubifs_copy_hash(c, hash, c->zroot.hash);
  78	}
  79	c->calc_idx_sz += ALIGN(len, 8);
  80
  81	atomic_long_dec(&c->dirty_zn_cnt);
  82
  83	ubifs_assert(c, ubifs_zn_dirty(znode));
  84	ubifs_assert(c, ubifs_zn_cow(znode));
  85
  86	/*
  87	 * Note, unlike 'write_index()' we do not add memory barriers here
  88	 * because this function is called with @c->tnc_mutex locked.
  89	 */
  90	__clear_bit(DIRTY_ZNODE, &znode->flags);
  91	__clear_bit(COW_ZNODE, &znode->flags);
  92
  93	return err;
  94}
  95
  96/**
  97 * fill_gap - make index nodes in gaps in dirty index LEBs.
  98 * @c: UBIFS file-system description object
  99 * @lnum: LEB number that gap appears in
 100 * @gap_start: offset of start of gap
 101 * @gap_end: offset of end of gap
 102 * @dirt: adds dirty space to this
 103 *
 104 * This function returns the number of index nodes written into the gap.
 105 */
 106static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
 107		    int *dirt)
 108{
 109	int len, gap_remains, gap_pos, written, pad_len;
 110
 111	ubifs_assert(c, (gap_start & 7) == 0);
 112	ubifs_assert(c, (gap_end & 7) == 0);
 113	ubifs_assert(c, gap_end >= gap_start);
 114
 115	gap_remains = gap_end - gap_start;
 116	if (!gap_remains)
 117		return 0;
 118	gap_pos = gap_start;
 119	written = 0;
 120	while (c->enext) {
 121		len = ubifs_idx_node_sz(c, c->enext->child_cnt);
 122		if (len < gap_remains) {
 123			struct ubifs_znode *znode = c->enext;
 124			const int alen = ALIGN(len, 8);
 125			int err;
 126
 127			ubifs_assert(c, alen <= gap_remains);
 128			err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
 129					    lnum, gap_pos, len);
 130			if (err)
 131				return err;
 132			gap_remains -= alen;
 133			gap_pos += alen;
 134			c->enext = znode->cnext;
 135			if (c->enext == c->cnext)
 136				c->enext = NULL;
 137			written += 1;
 138		} else
 139			break;
 140	}
 141	if (gap_end == c->leb_size) {
 142		c->ileb_len = ALIGN(gap_pos, c->min_io_size);
 143		/* Pad to end of min_io_size */
 144		pad_len = c->ileb_len - gap_pos;
 145	} else
 146		/* Pad to end of gap */
 147		pad_len = gap_remains;
 148	dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
 149	       lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
 150	ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
 151	*dirt += pad_len;
 152	return written;
 153}
 154
 155/**
 156 * find_old_idx - find an index node obsoleted since the last commit start.
 157 * @c: UBIFS file-system description object
 158 * @lnum: LEB number of obsoleted index node
 159 * @offs: offset of obsoleted index node
 160 *
 161 * Returns %1 if found and %0 otherwise.
 162 */
 163static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
 164{
 165	struct ubifs_old_idx *o;
 166	struct rb_node *p;
 167
 168	p = c->old_idx.rb_node;
 169	while (p) {
 170		o = rb_entry(p, struct ubifs_old_idx, rb);
 171		if (lnum < o->lnum)
 172			p = p->rb_left;
 173		else if (lnum > o->lnum)
 174			p = p->rb_right;
 175		else if (offs < o->offs)
 176			p = p->rb_left;
 177		else if (offs > o->offs)
 178			p = p->rb_right;
 179		else
 180			return 1;
 181	}
 182	return 0;
 183}
 184
 185/**
 186 * is_idx_node_in_use - determine if an index node can be overwritten.
 187 * @c: UBIFS file-system description object
 188 * @key: key of index node
 189 * @level: index node level
 190 * @lnum: LEB number of index node
 191 * @offs: offset of index node
 192 *
 193 * If @key / @lnum / @offs identify an index node that was not part of the old
 194 * index, then this function returns %0 (obsolete).  Else if the index node was
 195 * part of the old index but is now dirty %1 is returned, else if it is clean %2
 196 * is returned. A negative error code is returned on failure.
 197 */
 198static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
 199			      int level, int lnum, int offs)
 200{
 201	int ret;
 202
 203	ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
 204	if (ret < 0)
 205		return ret; /* Error code */
 206	if (ret == 0)
 207		if (find_old_idx(c, lnum, offs))
 208			return 1;
 209	return ret;
 210}
 211
 212/**
 213 * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
 214 * @c: UBIFS file-system description object
 215 * @p: return LEB number in @c->gap_lebs[p]
 216 *
 217 * This function lays out new index nodes for dirty znodes using in-the-gaps
 218 * method of TNC commit.
 219 * This function merely puts the next znode into the next gap, making no attempt
 220 * to try to maximise the number of znodes that fit.
 221 * This function returns the number of index nodes written into the gaps, or a
 222 * negative error code on failure.
 223 */
 224static int layout_leb_in_gaps(struct ubifs_info *c, int p)
 225{
 226	struct ubifs_scan_leb *sleb;
 227	struct ubifs_scan_node *snod;
 228	int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
 229
 230	tot_written = 0;
 231	/* Get an index LEB with lots of obsolete index nodes */
 232	lnum = ubifs_find_dirty_idx_leb(c);
 233	if (lnum < 0)
 234		/*
 235		 * There also may be dirt in the index head that could be
 236		 * filled, however we do not check there at present.
 237		 */
 238		return lnum; /* Error code */
 239	c->gap_lebs[p] = lnum;
 240	dbg_gc("LEB %d", lnum);
 241	/*
 242	 * Scan the index LEB.  We use the generic scan for this even though
 243	 * it is more comprehensive and less efficient than is needed for this
 244	 * purpose.
 245	 */
 246	sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
 247	c->ileb_len = 0;
 248	if (IS_ERR(sleb))
 249		return PTR_ERR(sleb);
 250	gap_start = 0;
 251	list_for_each_entry(snod, &sleb->nodes, list) {
 252		struct ubifs_idx_node *idx;
 253		int in_use, level;
 254
 255		ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
 256		idx = snod->node;
 257		key_read(c, ubifs_idx_key(c, idx), &snod->key);
 258		level = le16_to_cpu(idx->level);
 259		/* Determine if the index node is in use (not obsolete) */
 260		in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
 261					    snod->offs);
 262		if (in_use < 0) {
 263			ubifs_scan_destroy(sleb);
 264			return in_use; /* Error code */
 265		}
 266		if (in_use) {
 267			if (in_use == 1)
 268				dirt += ALIGN(snod->len, 8);
 269			/*
 270			 * The obsolete index nodes form gaps that can be
 271			 * overwritten.  This gap has ended because we have
 272			 * found an index node that is still in use
 273			 * i.e. not obsolete
 274			 */
 275			gap_end = snod->offs;
 276			/* Try to fill gap */
 277			written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
 278			if (written < 0) {
 279				ubifs_scan_destroy(sleb);
 280				return written; /* Error code */
 281			}
 282			tot_written += written;
 283			gap_start = ALIGN(snod->offs + snod->len, 8);
 284		}
 285	}
 286	ubifs_scan_destroy(sleb);
 287	c->ileb_len = c->leb_size;
 288	gap_end = c->leb_size;
 289	/* Try to fill gap */
 290	written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
 291	if (written < 0)
 292		return written; /* Error code */
 293	tot_written += written;
 294	if (tot_written == 0) {
 295		struct ubifs_lprops lp;
 296
 297		dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
 298		err = ubifs_read_one_lp(c, lnum, &lp);
 299		if (err)
 300			return err;
 301		if (lp.free == c->leb_size) {
 302			/*
 303			 * We must have snatched this LEB from the idx_gc list
 304			 * so we need to correct the free and dirty space.
 305			 */
 306			err = ubifs_change_one_lp(c, lnum,
 307						  c->leb_size - c->ileb_len,
 308						  dirt, 0, 0, 0);
 309			if (err)
 310				return err;
 311		}
 312		return 0;
 313	}
 314	err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
 315				  0, 0, 0);
 316	if (err)
 317		return err;
 318	err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
 319	if (err)
 320		return err;
 321	dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
 322	return tot_written;
 323}
 324
 325/**
 326 * get_leb_cnt - calculate the number of empty LEBs needed to commit.
 327 * @c: UBIFS file-system description object
 328 * @cnt: number of znodes to commit
 329 *
 330 * This function returns the number of empty LEBs needed to commit @cnt znodes
 331 * to the current index head.  The number is not exact and may be more than
 332 * needed.
 333 */
 334static int get_leb_cnt(struct ubifs_info *c, int cnt)
 335{
 336	int d;
 337
 338	/* Assume maximum index node size (i.e. overestimate space needed) */
 339	cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
 340	if (cnt < 0)
 341		cnt = 0;
 342	d = c->leb_size / c->max_idx_node_sz;
 343	return DIV_ROUND_UP(cnt, d);
 344}
 345
 346/**
 347 * layout_in_gaps - in-the-gaps method of committing TNC.
 348 * @c: UBIFS file-system description object
 349 * @cnt: number of dirty znodes to commit.
 350 *
 351 * This function lays out new index nodes for dirty znodes using in-the-gaps
 352 * method of TNC commit.
 353 *
 354 * This function returns %0 on success and a negative error code on failure.
 355 */
 356static int layout_in_gaps(struct ubifs_info *c, int cnt)
 357{
 358	int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
 359
 360	dbg_gc("%d znodes to write", cnt);
 361
 362	c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
 363				    GFP_NOFS);
 364	if (!c->gap_lebs)
 365		return -ENOMEM;
 366
 367	old_idx_lebs = c->lst.idx_lebs;
 368	do {
 369		ubifs_assert(c, p < c->lst.idx_lebs);
 370		written = layout_leb_in_gaps(c, p);
 371		if (written < 0) {
 372			err = written;
 373			if (err != -ENOSPC) {
 374				kfree(c->gap_lebs);
 375				c->gap_lebs = NULL;
 376				return err;
 377			}
 378			if (!dbg_is_chk_index(c)) {
 379				/*
 380				 * Do not print scary warnings if the debugging
 381				 * option which forces in-the-gaps is enabled.
 382				 */
 383				ubifs_warn(c, "out of space");
 384				ubifs_dump_budg(c, &c->bi);
 385				ubifs_dump_lprops(c);
 386			}
 387			/* Try to commit anyway */
 388			break;
 389		}
 390		p++;
 391		cnt -= written;
 392		leb_needed_cnt = get_leb_cnt(c, cnt);
 393		dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
 394		       leb_needed_cnt, c->ileb_cnt);
 395		/*
 396		 * Dynamically change the size of @c->gap_lebs to prevent
 397		 * oob, because @c->lst.idx_lebs could be increased by
 398		 * function @get_idx_gc_leb (called by layout_leb_in_gaps->
 399		 * ubifs_find_dirty_idx_leb) during loop. Only enlarge
 400		 * @c->gap_lebs when needed.
 401		 *
 402		 */
 403		if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
 404		    old_idx_lebs < c->lst.idx_lebs) {
 405			old_idx_lebs = c->lst.idx_lebs;
 406			gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
 407					       (old_idx_lebs + 1), GFP_NOFS);
 408			if (!gap_lebs) {
 409				kfree(c->gap_lebs);
 410				c->gap_lebs = NULL;
 411				return -ENOMEM;
 412			}
 413			c->gap_lebs = gap_lebs;
 414		}
 415	} while (leb_needed_cnt > c->ileb_cnt);
 416
 417	c->gap_lebs[p] = -1;
 418	return 0;
 419}
 420
 421/**
 422 * layout_in_empty_space - layout index nodes in empty space.
 423 * @c: UBIFS file-system description object
 424 *
 425 * This function lays out new index nodes for dirty znodes using empty LEBs.
 426 *
 427 * This function returns %0 on success and a negative error code on failure.
 428 */
 429static int layout_in_empty_space(struct ubifs_info *c)
 430{
 431	struct ubifs_znode *znode, *cnext, *zp;
 432	int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
 433	int wlen, blen, err;
 434
 435	cnext = c->enext;
 436	if (!cnext)
 437		return 0;
 438
 439	lnum = c->ihead_lnum;
 440	buf_offs = c->ihead_offs;
 441
 442	buf_len = ubifs_idx_node_sz(c, c->fanout);
 443	buf_len = ALIGN(buf_len, c->min_io_size);
 444	used = 0;
 445	avail = buf_len;
 446
 447	/* Ensure there is enough room for first write */
 448	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 449	if (buf_offs + next_len > c->leb_size)
 450		lnum = -1;
 451
 452	while (1) {
 453		znode = cnext;
 454
 455		len = ubifs_idx_node_sz(c, znode->child_cnt);
 456
 457		/* Determine the index node position */
 458		if (lnum == -1) {
 459			if (c->ileb_nxt >= c->ileb_cnt) {
 460				ubifs_err(c, "out of space");
 461				return -ENOSPC;
 462			}
 463			lnum = c->ilebs[c->ileb_nxt++];
 464			buf_offs = 0;
 465			used = 0;
 466			avail = buf_len;
 467		}
 468
 469		offs = buf_offs + used;
 470
 471		znode->lnum = lnum;
 472		znode->offs = offs;
 473		znode->len = len;
 474
 475		/* Update the parent */
 476		zp = znode->parent;
 477		if (zp) {
 478			struct ubifs_zbranch *zbr;
 479			int i;
 480
 481			i = znode->iip;
 482			zbr = &zp->zbranch[i];
 483			zbr->lnum = lnum;
 484			zbr->offs = offs;
 485			zbr->len = len;
 486		} else {
 487			c->zroot.lnum = lnum;
 488			c->zroot.offs = offs;
 489			c->zroot.len = len;
 490		}
 491		c->calc_idx_sz += ALIGN(len, 8);
 492
 493		/*
 494		 * Once lprops is updated, we can decrease the dirty znode count
 495		 * but it is easier to just do it here.
 496		 */
 497		atomic_long_dec(&c->dirty_zn_cnt);
 498
 499		/*
 500		 * Calculate the next index node length to see if there is
 501		 * enough room for it
 502		 */
 503		cnext = znode->cnext;
 504		if (cnext == c->cnext)
 505			next_len = 0;
 506		else
 507			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 508
 509		/* Update buffer positions */
 510		wlen = used + len;
 511		used += ALIGN(len, 8);
 512		avail -= ALIGN(len, 8);
 513
 514		if (next_len != 0 &&
 515		    buf_offs + used + next_len <= c->leb_size &&
 516		    avail > 0)
 517			continue;
 518
 519		if (avail <= 0 && next_len &&
 520		    buf_offs + used + next_len <= c->leb_size)
 521			blen = buf_len;
 522		else
 523			blen = ALIGN(wlen, c->min_io_size);
 524
 525		/* The buffer is full or there are no more znodes to do */
 526		buf_offs += blen;
 527		if (next_len) {
 528			if (buf_offs + next_len > c->leb_size) {
 529				err = ubifs_update_one_lp(c, lnum,
 530					c->leb_size - buf_offs, blen - used,
 531					0, 0);
 532				if (err)
 533					return err;
 534				lnum = -1;
 535			}
 536			used -= blen;
 537			if (used < 0)
 538				used = 0;
 539			avail = buf_len - used;
 540			continue;
 541		}
 542		err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
 543					  blen - used, 0, 0);
 544		if (err)
 545			return err;
 546		break;
 547	}
 548
 549	c->dbg->new_ihead_lnum = lnum;
 550	c->dbg->new_ihead_offs = buf_offs;
 551
 552	return 0;
 553}
 554
 555/**
 556 * layout_commit - determine positions of index nodes to commit.
 557 * @c: UBIFS file-system description object
 558 * @no_space: indicates that insufficient empty LEBs were allocated
 559 * @cnt: number of znodes to commit
 560 *
 561 * Calculate and update the positions of index nodes to commit.  If there were
 562 * an insufficient number of empty LEBs allocated, then index nodes are placed
 563 * into the gaps created by obsolete index nodes in non-empty index LEBs.  For
 564 * this purpose, an obsolete index node is one that was not in the index as at
 565 * the end of the last commit.  To write "in-the-gaps" requires that those index
 566 * LEBs are updated atomically in-place.
 567 */
 568static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
 569{
 570	int err;
 571
 572	if (no_space) {
 573		err = layout_in_gaps(c, cnt);
 574		if (err)
 575			return err;
 576	}
 577	err = layout_in_empty_space(c);
 578	return err;
 579}
 580
 581/**
 582 * find_first_dirty - find first dirty znode.
 583 * @znode: znode to begin searching from
 584 */
 585static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
 586{
 587	int i, cont;
 588
 589	if (!znode)
 590		return NULL;
 591
 592	while (1) {
 593		if (znode->level == 0) {
 594			if (ubifs_zn_dirty(znode))
 595				return znode;
 596			return NULL;
 597		}
 598		cont = 0;
 599		for (i = 0; i < znode->child_cnt; i++) {
 600			struct ubifs_zbranch *zbr = &znode->zbranch[i];
 601
 602			if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
 603				znode = zbr->znode;
 604				cont = 1;
 605				break;
 606			}
 607		}
 608		if (!cont) {
 609			if (ubifs_zn_dirty(znode))
 610				return znode;
 611			return NULL;
 612		}
 613	}
 614}
 615
 616/**
 617 * find_next_dirty - find next dirty znode.
 618 * @znode: znode to begin searching from
 619 */
 620static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
 621{
 622	int n = znode->iip + 1;
 623
 624	znode = znode->parent;
 625	if (!znode)
 626		return NULL;
 627	for (; n < znode->child_cnt; n++) {
 628		struct ubifs_zbranch *zbr = &znode->zbranch[n];
 629
 630		if (zbr->znode && ubifs_zn_dirty(zbr->znode))
 631			return find_first_dirty(zbr->znode);
 632	}
 633	return znode;
 634}
 635
 636/**
 637 * get_znodes_to_commit - create list of dirty znodes to commit.
 638 * @c: UBIFS file-system description object
 639 *
 640 * This function returns the number of znodes to commit.
 641 */
 642static int get_znodes_to_commit(struct ubifs_info *c)
 643{
 644	struct ubifs_znode *znode, *cnext;
 645	int cnt = 0;
 646
 647	c->cnext = find_first_dirty(c->zroot.znode);
 648	znode = c->enext = c->cnext;
 649	if (!znode) {
 650		dbg_cmt("no znodes to commit");
 651		return 0;
 652	}
 653	cnt += 1;
 654	while (1) {
 655		ubifs_assert(c, !ubifs_zn_cow(znode));
 656		__set_bit(COW_ZNODE, &znode->flags);
 657		znode->alt = 0;
 658		cnext = find_next_dirty(znode);
 659		if (!cnext) {
 660			ubifs_assert(c, !znode->parent);
 661			znode->cparent = NULL;
 662			znode->cnext = c->cnext;
 663			break;
 664		}
 665		znode->cparent = znode->parent;
 666		znode->ciip = znode->iip;
 667		znode->cnext = cnext;
 668		znode = cnext;
 669		cnt += 1;
 670	}
 671	dbg_cmt("committing %d znodes", cnt);
 672	ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
 673	return cnt;
 674}
 675
 676/**
 677 * alloc_idx_lebs - allocate empty LEBs to be used to commit.
 678 * @c: UBIFS file-system description object
 679 * @cnt: number of znodes to commit
 680 *
 681 * This function returns %-ENOSPC if it cannot allocate a sufficient number of
 682 * empty LEBs.  %0 is returned on success, otherwise a negative error code
 683 * is returned.
 684 */
 685static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
 686{
 687	int i, leb_cnt, lnum;
 688
 689	c->ileb_cnt = 0;
 690	c->ileb_nxt = 0;
 691	leb_cnt = get_leb_cnt(c, cnt);
 692	dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
 693	if (!leb_cnt)
 694		return 0;
 695	c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
 696	if (!c->ilebs)
 697		return -ENOMEM;
 698	for (i = 0; i < leb_cnt; i++) {
 699		lnum = ubifs_find_free_leb_for_idx(c);
 700		if (lnum < 0)
 701			return lnum;
 702		c->ilebs[c->ileb_cnt++] = lnum;
 703		dbg_cmt("LEB %d", lnum);
 704	}
 705	if (dbg_is_chk_index(c) && !get_random_u32_below(8))
 706		return -ENOSPC;
 707	return 0;
 708}
 709
 710/**
 711 * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
 712 * @c: UBIFS file-system description object
 713 *
 714 * It is possible that we allocate more empty LEBs for the commit than we need.
 715 * This functions frees the surplus.
 716 *
 717 * This function returns %0 on success and a negative error code on failure.
 718 */
 719static int free_unused_idx_lebs(struct ubifs_info *c)
 720{
 721	int i, err = 0, lnum, er;
 722
 723	for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
 724		lnum = c->ilebs[i];
 725		dbg_cmt("LEB %d", lnum);
 726		er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
 727					 LPROPS_INDEX | LPROPS_TAKEN, 0);
 728		if (!err)
 729			err = er;
 730	}
 731	return err;
 732}
 733
 734/**
 735 * free_idx_lebs - free unused LEBs after commit end.
 736 * @c: UBIFS file-system description object
 737 *
 738 * This function returns %0 on success and a negative error code on failure.
 739 */
 740static int free_idx_lebs(struct ubifs_info *c)
 741{
 742	int err;
 743
 744	err = free_unused_idx_lebs(c);
 745	kfree(c->ilebs);
 746	c->ilebs = NULL;
 747	return err;
 748}
 749
 750/**
 751 * ubifs_tnc_start_commit - start TNC commit.
 752 * @c: UBIFS file-system description object
 753 * @zroot: new index root position is returned here
 754 *
 755 * This function prepares the list of indexing nodes to commit and lays out
 756 * their positions on flash. If there is not enough free space it uses the
 757 * in-gap commit method. Returns zero in case of success and a negative error
 758 * code in case of failure.
 759 */
 760int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
 761{
 762	int err = 0, cnt;
 763
 764	mutex_lock(&c->tnc_mutex);
 765	err = dbg_check_tnc(c, 1);
 766	if (err)
 767		goto out;
 768	cnt = get_znodes_to_commit(c);
 769	if (cnt != 0) {
 770		int no_space = 0;
 771
 772		err = alloc_idx_lebs(c, cnt);
 773		if (err == -ENOSPC)
 774			no_space = 1;
 775		else if (err)
 776			goto out_free;
 777		err = layout_commit(c, no_space, cnt);
 778		if (err)
 779			goto out_free;
 780		ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
 781		err = free_unused_idx_lebs(c);
 782		if (err)
 783			goto out;
 784	}
 785	destroy_old_idx(c);
 786	memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
 787
 788	err = ubifs_save_dirty_idx_lnums(c);
 789	if (err)
 790		goto out;
 791
 792	spin_lock(&c->space_lock);
 793	/*
 794	 * Although we have not finished committing yet, update size of the
 795	 * committed index ('c->bi.old_idx_sz') and zero out the index growth
 796	 * budget. It is OK to do this now, because we've reserved all the
 797	 * space which is needed to commit the index, and it is save for the
 798	 * budgeting subsystem to assume the index is already committed,
 799	 * even though it is not.
 800	 */
 801	ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
 802	c->bi.old_idx_sz = c->calc_idx_sz;
 803	c->bi.uncommitted_idx = 0;
 804	c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
 805	spin_unlock(&c->space_lock);
 806	mutex_unlock(&c->tnc_mutex);
 807
 808	dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
 809	dbg_cmt("size of index %llu", c->calc_idx_sz);
 810	return err;
 811
 812out_free:
 813	free_idx_lebs(c);
 814out:
 815	mutex_unlock(&c->tnc_mutex);
 816	return err;
 817}
 818
 819/**
 820 * write_index - write index nodes.
 821 * @c: UBIFS file-system description object
 822 *
 823 * This function writes the index nodes whose positions were laid out in the
 824 * layout_in_empty_space function.
 825 */
 826static int write_index(struct ubifs_info *c)
 827{
 828	struct ubifs_idx_node *idx;
 829	struct ubifs_znode *znode, *cnext;
 830	int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
 831	int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
 832
 833	cnext = c->enext;
 834	if (!cnext)
 835		return 0;
 836
 837	/*
 838	 * Always write index nodes to the index head so that index nodes and
 839	 * other types of nodes are never mixed in the same erase block.
 840	 */
 841	lnum = c->ihead_lnum;
 842	buf_offs = c->ihead_offs;
 843
 844	/* Allocate commit buffer */
 845	buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
 846	used = 0;
 847	avail = buf_len;
 848
 849	/* Ensure there is enough room for first write */
 850	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 851	if (buf_offs + next_len > c->leb_size) {
 852		err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
 853					  LPROPS_TAKEN);
 854		if (err)
 855			return err;
 856		lnum = -1;
 857	}
 858
 859	while (1) {
 860		u8 hash[UBIFS_HASH_ARR_SZ];
 861
 862		cond_resched();
 863
 864		znode = cnext;
 865		idx = c->cbuf + used;
 866
 867		/* Make index node */
 868		idx->ch.node_type = UBIFS_IDX_NODE;
 869		idx->child_cnt = cpu_to_le16(znode->child_cnt);
 870		idx->level = cpu_to_le16(znode->level);
 871		for (i = 0; i < znode->child_cnt; i++) {
 872			struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
 873			struct ubifs_zbranch *zbr = &znode->zbranch[i];
 874
 875			key_write_idx(c, &zbr->key, &br->key);
 876			br->lnum = cpu_to_le32(zbr->lnum);
 877			br->offs = cpu_to_le32(zbr->offs);
 878			br->len = cpu_to_le32(zbr->len);
 879			ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
 880			if (!zbr->lnum || !zbr->len) {
 881				ubifs_err(c, "bad ref in znode");
 882				ubifs_dump_znode(c, znode);
 883				if (zbr->znode)
 884					ubifs_dump_znode(c, zbr->znode);
 885
 886				return -EINVAL;
 887			}
 888		}
 889		len = ubifs_idx_node_sz(c, znode->child_cnt);
 890		ubifs_prepare_node(c, idx, len, 0);
 891		ubifs_node_calc_hash(c, idx, hash);
 892
 893		mutex_lock(&c->tnc_mutex);
 894
 895		if (znode->cparent)
 896			ubifs_copy_hash(c, hash,
 897					znode->cparent->zbranch[znode->ciip].hash);
 898
 899		if (znode->parent) {
 900			if (!ubifs_zn_obsolete(znode))
 901				ubifs_copy_hash(c, hash,
 902					znode->parent->zbranch[znode->iip].hash);
 903		} else {
 904			ubifs_copy_hash(c, hash, c->zroot.hash);
 905		}
 906
 907		mutex_unlock(&c->tnc_mutex);
 908
 909		/* Determine the index node position */
 910		if (lnum == -1) {
 911			lnum = c->ilebs[lnum_pos++];
 912			buf_offs = 0;
 913			used = 0;
 914			avail = buf_len;
 915		}
 916		offs = buf_offs + used;
 917
 918		if (lnum != znode->lnum || offs != znode->offs ||
 919		    len != znode->len) {
 920			ubifs_err(c, "inconsistent znode posn");
 921			return -EINVAL;
 922		}
 923
 924		/* Grab some stuff from znode while we still can */
 925		cnext = znode->cnext;
 926
 927		ubifs_assert(c, ubifs_zn_dirty(znode));
 928		ubifs_assert(c, ubifs_zn_cow(znode));
 929
 930		/*
 931		 * It is important that other threads should see %DIRTY_ZNODE
 932		 * flag cleared before %COW_ZNODE. Specifically, it matters in
 933		 * the 'dirty_cow_znode()' function. This is the reason for the
 934		 * first barrier. Also, we want the bit changes to be seen to
 935		 * other threads ASAP, to avoid unnecessary copying, which is
 936		 * the reason for the second barrier.
 937		 */
 938		clear_bit(DIRTY_ZNODE, &znode->flags);
 939		smp_mb__before_atomic();
 940		clear_bit(COW_ZNODE, &znode->flags);
 941		smp_mb__after_atomic();
 942
 943		/*
 944		 * We have marked the znode as clean but have not updated the
 945		 * @c->clean_zn_cnt counter. If this znode becomes dirty again
 946		 * before 'free_obsolete_znodes()' is called, then
 947		 * @c->clean_zn_cnt will be decremented before it gets
 948		 * incremented (resulting in 2 decrements for the same znode).
 949		 * This means that @c->clean_zn_cnt may become negative for a
 950		 * while.
 951		 *
 952		 * Q: why we cannot increment @c->clean_zn_cnt?
 953		 * A: because we do not have the @c->tnc_mutex locked, and the
 954		 *    following code would be racy and buggy:
 955		 *
 956		 *    if (!ubifs_zn_obsolete(znode)) {
 957		 *            atomic_long_inc(&c->clean_zn_cnt);
 958		 *            atomic_long_inc(&ubifs_clean_zn_cnt);
 959		 *    }
 960		 *
 961		 *    Thus, we just delay the @c->clean_zn_cnt update until we
 962		 *    have the mutex locked.
 963		 */
 964
 965		/* Do not access znode from this point on */
 966
 967		/* Update buffer positions */
 968		wlen = used + len;
 969		used += ALIGN(len, 8);
 970		avail -= ALIGN(len, 8);
 971
 972		/*
 973		 * Calculate the next index node length to see if there is
 974		 * enough room for it
 975		 */
 976		if (cnext == c->cnext)
 977			next_len = 0;
 978		else
 979			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
 980
 981		nxt_offs = buf_offs + used + next_len;
 982		if (next_len && nxt_offs <= c->leb_size) {
 983			if (avail > 0)
 984				continue;
 985			else
 986				blen = buf_len;
 987		} else {
 988			wlen = ALIGN(wlen, 8);
 989			blen = ALIGN(wlen, c->min_io_size);
 990			ubifs_pad(c, c->cbuf + wlen, blen - wlen);
 991		}
 992
 993		/* The buffer is full or there are no more znodes to do */
 994		err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
 995		if (err)
 996			return err;
 997		buf_offs += blen;
 998		if (next_len) {
 999			if (nxt_offs > c->leb_size) {
1000				err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
1001							  0, LPROPS_TAKEN);
1002				if (err)
1003					return err;
1004				lnum = -1;
1005			}
1006			used -= blen;
1007			if (used < 0)
1008				used = 0;
1009			avail = buf_len - used;
1010			memmove(c->cbuf, c->cbuf + blen, used);
1011			continue;
1012		}
1013		break;
1014	}
1015
1016	if (lnum != c->dbg->new_ihead_lnum ||
1017	    buf_offs != c->dbg->new_ihead_offs) {
1018		ubifs_err(c, "inconsistent ihead");
1019		return -EINVAL;
1020	}
1021
1022	c->ihead_lnum = lnum;
1023	c->ihead_offs = buf_offs;
1024
1025	return 0;
1026}
1027
1028/**
1029 * free_obsolete_znodes - free obsolete znodes.
1030 * @c: UBIFS file-system description object
1031 *
1032 * At the end of commit end, obsolete znodes are freed.
1033 */
1034static void free_obsolete_znodes(struct ubifs_info *c)
1035{
1036	struct ubifs_znode *znode, *cnext;
1037
1038	cnext = c->cnext;
1039	do {
1040		znode = cnext;
1041		cnext = znode->cnext;
1042		if (ubifs_zn_obsolete(znode))
1043			kfree(znode);
1044		else {
1045			znode->cnext = NULL;
1046			atomic_long_inc(&c->clean_zn_cnt);
1047			atomic_long_inc(&ubifs_clean_zn_cnt);
1048		}
1049	} while (cnext != c->cnext);
1050}
1051
1052/**
1053 * return_gap_lebs - return LEBs used by the in-gap commit method.
1054 * @c: UBIFS file-system description object
1055 *
1056 * This function clears the "taken" flag for the LEBs which were used by the
1057 * "commit in-the-gaps" method.
1058 */
1059static int return_gap_lebs(struct ubifs_info *c)
1060{
1061	int *p, err;
1062
1063	if (!c->gap_lebs)
1064		return 0;
1065
1066	dbg_cmt("");
1067	for (p = c->gap_lebs; *p != -1; p++) {
1068		err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1069					  LPROPS_TAKEN, 0);
1070		if (err)
1071			return err;
1072	}
1073
1074	kfree(c->gap_lebs);
1075	c->gap_lebs = NULL;
1076	return 0;
1077}
1078
1079/**
1080 * ubifs_tnc_end_commit - update the TNC for commit end.
1081 * @c: UBIFS file-system description object
1082 *
1083 * Write the dirty znodes.
1084 */
1085int ubifs_tnc_end_commit(struct ubifs_info *c)
1086{
1087	int err;
1088
1089	if (!c->cnext)
1090		return 0;
1091
1092	err = return_gap_lebs(c);
1093	if (err)
1094		return err;
1095
1096	err = write_index(c);
1097	if (err)
1098		return err;
1099
1100	mutex_lock(&c->tnc_mutex);
1101
1102	dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1103
1104	free_obsolete_znodes(c);
1105
1106	c->cnext = NULL;
1107	kfree(c->ilebs);
1108	c->ilebs = NULL;
1109
1110	mutex_unlock(&c->tnc_mutex);
1111
1112	return 0;
1113}