Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2012 Linutronix GmbH
   4 * Copyright (c) 2014 sigma star gmbh
   5 * Author: Richard Weinberger <richard@nod.at>
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/crc32.h>
   9#include <linux/bitmap.h>
  10#include "ubi.h"
  11
  12/**
  13 * init_seen - allocate memory for used for debugging.
  14 * @ubi: UBI device description object
  15 */
  16static inline unsigned long *init_seen(struct ubi_device *ubi)
  17{
  18	unsigned long *ret;
  19
  20	if (!ubi_dbg_chk_fastmap(ubi))
  21		return NULL;
  22
  23	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
  24		      GFP_KERNEL);
  25	if (!ret)
  26		return ERR_PTR(-ENOMEM);
  27
  28	return ret;
  29}
  30
  31/**
  32 * free_seen - free the seen logic integer array.
  33 * @seen: integer array of @ubi->peb_count size
  34 */
  35static inline void free_seen(unsigned long *seen)
  36{
  37	kfree(seen);
  38}
  39
  40/**
  41 * set_seen - mark a PEB as seen.
  42 * @ubi: UBI device description object
  43 * @pnum: The PEB to be makred as seen
  44 * @seen: integer array of @ubi->peb_count size
  45 */
  46static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
  47{
  48	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
  49		return;
  50
  51	set_bit(pnum, seen);
  52}
  53
  54/**
  55 * self_check_seen - check whether all PEB have been seen by fastmap.
  56 * @ubi: UBI device description object
  57 * @seen: integer array of @ubi->peb_count size
  58 */
  59static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
  60{
  61	int pnum, ret = 0;
  62
  63	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
  64		return 0;
  65
  66	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
  67		if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
  68			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
  69			ret = -EINVAL;
  70		}
  71	}
  72
  73	return ret;
  74}
  75
  76/**
  77 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
  78 * @ubi: UBI device description object
  79 */
  80size_t ubi_calc_fm_size(struct ubi_device *ubi)
  81{
  82	size_t size;
  83
  84	size = sizeof(struct ubi_fm_sb) +
  85		sizeof(struct ubi_fm_hdr) +
  86		sizeof(struct ubi_fm_scan_pool) +
  87		sizeof(struct ubi_fm_scan_pool) +
  88		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
  89		(sizeof(struct ubi_fm_eba) +
  90		(ubi->peb_count * sizeof(__be32))) +
  91		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
  92	return roundup(size, ubi->leb_size);
  93}
  94
  95
  96/**
  97 * new_fm_vhdr - allocate a new volume header for fastmap usage.
  98 * @ubi: UBI device description object
  99 * @vol_id: the VID of the new header
 100 *
 101 * Returns a new struct ubi_vid_hdr on success.
 102 * NULL indicates out of memory.
 103 */
 104static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
 105{
 106	struct ubi_vid_io_buf *new;
 107	struct ubi_vid_hdr *vh;
 108
 109	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
 110	if (!new)
 111		goto out;
 112
 113	vh = ubi_get_vid_hdr(new);
 114	vh->vol_type = UBI_VID_DYNAMIC;
 115	vh->vol_id = cpu_to_be32(vol_id);
 116
 117	/* UBI implementations without fastmap support have to delete the
 118	 * fastmap.
 119	 */
 120	vh->compat = UBI_COMPAT_DELETE;
 121
 122out:
 123	return new;
 124}
 125
 126/**
 127 * add_aeb - create and add a attach erase block to a given list.
 128 * @ai: UBI attach info object
 129 * @list: the target list
 130 * @pnum: PEB number of the new attach erase block
 131 * @ec: erease counter of the new LEB
 132 * @scrub: scrub this PEB after attaching
 133 *
 134 * Returns 0 on success, < 0 indicates an internal error.
 135 */
 136static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
 137		   int pnum, int ec, int scrub)
 138{
 139	struct ubi_ainf_peb *aeb;
 140
 141	aeb = ubi_alloc_aeb(ai, pnum, ec);
 142	if (!aeb)
 143		return -ENOMEM;
 144
 
 
 145	aeb->lnum = -1;
 146	aeb->scrub = scrub;
 147	aeb->copy_flag = aeb->sqnum = 0;
 148
 149	ai->ec_sum += aeb->ec;
 150	ai->ec_count++;
 151
 152	if (ai->max_ec < aeb->ec)
 153		ai->max_ec = aeb->ec;
 154
 155	if (ai->min_ec > aeb->ec)
 156		ai->min_ec = aeb->ec;
 157
 158	list_add_tail(&aeb->u.list, list);
 159
 160	return 0;
 161}
 162
 163/**
 164 * add_vol - create and add a new volume to ubi_attach_info.
 165 * @ai: ubi_attach_info object
 166 * @vol_id: VID of the new volume
 167 * @used_ebs: number of used EBS
 168 * @data_pad: data padding value of the new volume
 169 * @vol_type: volume type
 170 * @last_eb_bytes: number of bytes in the last LEB
 171 *
 172 * Returns the new struct ubi_ainf_volume on success.
 173 * NULL indicates an error.
 174 */
 175static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
 176				       int used_ebs, int data_pad, u8 vol_type,
 177				       int last_eb_bytes)
 178{
 179	struct ubi_ainf_volume *av;
 
 180
 181	av = ubi_add_av(ai, vol_id);
 182	if (IS_ERR(av))
 183		return av;
 
 
 
 
 
 
 184
 
 
 
 
 
 
 
 185	av->data_pad = data_pad;
 186	av->last_data_size = last_eb_bytes;
 187	av->compat = 0;
 188	av->vol_type = vol_type;
 189	if (av->vol_type == UBI_STATIC_VOLUME)
 190		av->used_ebs = used_ebs;
 191
 192	dbg_bld("found volume (ID %i)", vol_id);
 
 
 
 
 
 193	return av;
 194}
 195
 196/**
 197 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
 198 * from it's original list.
 199 * @ai: ubi_attach_info object
 200 * @aeb: the to be assigned SEB
 201 * @av: target scan volume
 202 */
 203static void assign_aeb_to_av(struct ubi_attach_info *ai,
 204			     struct ubi_ainf_peb *aeb,
 205			     struct ubi_ainf_volume *av)
 206{
 207	struct ubi_ainf_peb *tmp_aeb;
 208	struct rb_node **p = &av->root.rb_node, *parent = NULL;
 209
 
 210	while (*p) {
 211		parent = *p;
 212
 213		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
 214		if (aeb->lnum != tmp_aeb->lnum) {
 215			if (aeb->lnum < tmp_aeb->lnum)
 216				p = &(*p)->rb_left;
 217			else
 218				p = &(*p)->rb_right;
 219
 220			continue;
 221		} else
 222			break;
 223	}
 224
 225	list_del(&aeb->u.list);
 226	av->leb_count++;
 227
 228	rb_link_node(&aeb->u.rb, parent, p);
 229	rb_insert_color(&aeb->u.rb, &av->root);
 230}
 231
 232/**
 233 * update_vol - inserts or updates a LEB which was found a pool.
 234 * @ubi: the UBI device object
 235 * @ai: attach info object
 236 * @av: the volume this LEB belongs to
 237 * @new_vh: the volume header derived from new_aeb
 238 * @new_aeb: the AEB to be examined
 239 *
 240 * Returns 0 on success, < 0 indicates an internal error.
 241 */
 242static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
 243		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
 244		      struct ubi_ainf_peb *new_aeb)
 245{
 246	struct rb_node **p = &av->root.rb_node, *parent = NULL;
 247	struct ubi_ainf_peb *aeb, *victim;
 248	int cmp_res;
 249
 250	while (*p) {
 251		parent = *p;
 252		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
 253
 254		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
 255			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
 256				p = &(*p)->rb_left;
 257			else
 258				p = &(*p)->rb_right;
 259
 260			continue;
 261		}
 262
 263		/* This case can happen if the fastmap gets written
 264		 * because of a volume change (creation, deletion, ..).
 265		 * Then a PEB can be within the persistent EBA and the pool.
 266		 */
 267		if (aeb->pnum == new_aeb->pnum) {
 268			ubi_assert(aeb->lnum == new_aeb->lnum);
 269			ubi_free_aeb(ai, new_aeb);
 270
 271			return 0;
 272		}
 273
 274		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
 275		if (cmp_res < 0)
 276			return cmp_res;
 277
 278		/* new_aeb is newer */
 279		if (cmp_res & 1) {
 280			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
 
 281			if (!victim)
 282				return -ENOMEM;
 283
 
 
 284			list_add_tail(&victim->u.list, &ai->erase);
 285
 286			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
 287				av->last_data_size =
 288					be32_to_cpu(new_vh->data_size);
 289
 290			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
 291				av->vol_id, aeb->lnum, new_aeb->pnum);
 292
 293			aeb->ec = new_aeb->ec;
 294			aeb->pnum = new_aeb->pnum;
 295			aeb->copy_flag = new_vh->copy_flag;
 296			aeb->scrub = new_aeb->scrub;
 297			aeb->sqnum = new_aeb->sqnum;
 298			ubi_free_aeb(ai, new_aeb);
 299
 300		/* new_aeb is older */
 301		} else {
 302			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
 303				av->vol_id, aeb->lnum, new_aeb->pnum);
 304			list_add_tail(&new_aeb->u.list, &ai->erase);
 305		}
 306
 307		return 0;
 308	}
 309	/* This LEB is new, let's add it to the volume */
 310
 311	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
 312		av->highest_lnum = be32_to_cpu(new_vh->lnum);
 313		av->last_data_size = be32_to_cpu(new_vh->data_size);
 314	}
 315
 316	if (av->vol_type == UBI_STATIC_VOLUME)
 317		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
 318
 319	av->leb_count++;
 320
 321	rb_link_node(&new_aeb->u.rb, parent, p);
 322	rb_insert_color(&new_aeb->u.rb, &av->root);
 323
 324	return 0;
 325}
 326
 327/**
 328 * process_pool_aeb - we found a non-empty PEB in a pool.
 329 * @ubi: UBI device object
 330 * @ai: attach info object
 331 * @new_vh: the volume header derived from new_aeb
 332 * @new_aeb: the AEB to be examined
 333 *
 334 * Returns 0 on success, < 0 indicates an internal error.
 335 */
 336static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
 337			    struct ubi_vid_hdr *new_vh,
 338			    struct ubi_ainf_peb *new_aeb)
 339{
 340	int vol_id = be32_to_cpu(new_vh->vol_id);
 341	struct ubi_ainf_volume *av;
 342
 343	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
 344		ubi_free_aeb(ai, new_aeb);
 
 
 345
 346		return 0;
 347	}
 348
 349	/* Find the volume this SEB belongs to */
 350	av = ubi_find_av(ai, vol_id);
 351	if (!av) {
 352		ubi_err(ubi, "orphaned volume in fastmap pool!");
 353		ubi_free_aeb(ai, new_aeb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354		return UBI_BAD_FASTMAP;
 355	}
 356
 357	ubi_assert(vol_id == av->vol_id);
 358
 359	return update_vol(ubi, ai, av, new_vh, new_aeb);
 360}
 361
 362/**
 363 * unmap_peb - unmap a PEB.
 364 * If fastmap detects a free PEB in the pool it has to check whether
 365 * this PEB has been unmapped after writing the fastmap.
 366 *
 367 * @ai: UBI attach info object
 368 * @pnum: The PEB to be unmapped
 369 */
 370static void unmap_peb(struct ubi_attach_info *ai, int pnum)
 371{
 372	struct ubi_ainf_volume *av;
 373	struct rb_node *node, *node2;
 374	struct ubi_ainf_peb *aeb;
 375
 376	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
 377		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
 
 
 
 
 378			if (aeb->pnum == pnum) {
 379				rb_erase(&aeb->u.rb, &av->root);
 380				av->leb_count--;
 381				ubi_free_aeb(ai, aeb);
 382				return;
 383			}
 384		}
 385	}
 386}
 387
 388/**
 389 * scan_pool - scans a pool for changed (no longer empty PEBs).
 390 * @ubi: UBI device object
 391 * @ai: attach info object
 392 * @pebs: an array of all PEB numbers in the to be scanned pool
 393 * @pool_size: size of the pool (number of entries in @pebs)
 394 * @max_sqnum: pointer to the maximal sequence number
 
 395 * @free: list of PEBs which are most likely free (and go into @ai->free)
 396 *
 397 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
 398 * < 0 indicates an internal error.
 399 */
 400static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
 401		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
 402		     struct list_head *free)
 403{
 404	struct ubi_vid_io_buf *vb;
 405	struct ubi_vid_hdr *vh;
 406	struct ubi_ec_hdr *ech;
 407	struct ubi_ainf_peb *new_aeb;
 408	int i, pnum, err, ret = 0;
 409
 410	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 411	if (!ech)
 412		return -ENOMEM;
 413
 414	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
 415	if (!vb) {
 416		kfree(ech);
 417		return -ENOMEM;
 418	}
 419
 420	vh = ubi_get_vid_hdr(vb);
 421
 422	dbg_bld("scanning fastmap pool: size = %i", pool_size);
 423
 424	/*
 425	 * Now scan all PEBs in the pool to find changes which have been made
 426	 * after the creation of the fastmap
 427	 */
 428	for (i = 0; i < pool_size; i++) {
 429		int scrub = 0;
 430		int image_seq;
 431
 432		pnum = be32_to_cpu(pebs[i]);
 433
 434		if (ubi_io_is_bad(ubi, pnum)) {
 435			ubi_err(ubi, "bad PEB in fastmap pool!");
 436			ret = UBI_BAD_FASTMAP;
 437			goto out;
 438		}
 439
 440		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 441		if (err && err != UBI_IO_BITFLIPS) {
 442			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
 443				pnum, err);
 444			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 445			goto out;
 446		} else if (err == UBI_IO_BITFLIPS)
 447			scrub = 1;
 448
 449		/*
 450		 * Older UBI implementations have image_seq set to zero, so
 451		 * we shouldn't fail if image_seq == 0.
 452		 */
 453		image_seq = be32_to_cpu(ech->image_seq);
 454
 455		if (image_seq && (image_seq != ubi->image_seq)) {
 456			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
 457				be32_to_cpu(ech->image_seq), ubi->image_seq);
 458			ret = UBI_BAD_FASTMAP;
 459			goto out;
 460		}
 461
 462		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
 463		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
 464			unsigned long long ec = be64_to_cpu(ech->ec);
 465			unmap_peb(ai, pnum);
 466			dbg_bld("Adding PEB to free: %i", pnum);
 467
 468			if (err == UBI_IO_FF_BITFLIPS)
 469				scrub = 1;
 470
 471			add_aeb(ai, free, pnum, ec, scrub);
 472			continue;
 473		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
 474			dbg_bld("Found non empty PEB:%i in pool", pnum);
 475
 476			if (err == UBI_IO_BITFLIPS)
 477				scrub = 1;
 478
 479			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
 
 
 
 
 
 
 
 
 
 
 
 
 
 480			if (!new_aeb) {
 481				ret = -ENOMEM;
 482				goto out;
 483			}
 484
 
 
 485			new_aeb->lnum = be32_to_cpu(vh->lnum);
 486			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
 487			new_aeb->copy_flag = vh->copy_flag;
 488			new_aeb->scrub = scrub;
 489
 490			if (*max_sqnum < new_aeb->sqnum)
 491				*max_sqnum = new_aeb->sqnum;
 492
 493			err = process_pool_aeb(ubi, ai, vh, new_aeb);
 494			if (err) {
 495				ret = err > 0 ? UBI_BAD_FASTMAP : err;
 496				goto out;
 497			}
 498		} else {
 499			/* We are paranoid and fall back to scanning mode */
 500			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
 501			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 502			goto out;
 503		}
 504
 505	}
 506
 507out:
 508	ubi_free_vid_buf(vb);
 509	kfree(ech);
 510	return ret;
 511}
 512
 513/**
 514 * count_fastmap_pebs - Counts the PEBs found by fastmap.
 515 * @ai: The UBI attach info object
 516 */
 517static int count_fastmap_pebs(struct ubi_attach_info *ai)
 518{
 519	struct ubi_ainf_peb *aeb;
 520	struct ubi_ainf_volume *av;
 521	struct rb_node *rb1, *rb2;
 522	int n = 0;
 523
 524	list_for_each_entry(aeb, &ai->erase, u.list)
 525		n++;
 526
 527	list_for_each_entry(aeb, &ai->free, u.list)
 528		n++;
 529
 530	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
 531		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
 532			n++;
 533
 534	return n;
 535}
 536
 537/**
 538 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
 539 * @ubi: UBI device object
 540 * @ai: UBI attach info object
 541 * @fm: the fastmap to be attached
 542 *
 543 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
 544 * < 0 indicates an internal error.
 545 */
 546static int ubi_attach_fastmap(struct ubi_device *ubi,
 547			      struct ubi_attach_info *ai,
 548			      struct ubi_fastmap_layout *fm)
 549{
 550	struct list_head used, free;
 551	struct ubi_ainf_volume *av;
 552	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
 
 553	struct ubi_fm_sb *fmsb;
 554	struct ubi_fm_hdr *fmhdr;
 555	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
 556	struct ubi_fm_ec *fmec;
 557	struct ubi_fm_volhdr *fmvhdr;
 558	struct ubi_fm_eba *fm_eba;
 559	int ret, i, j, pool_size, wl_pool_size;
 560	size_t fm_pos = 0, fm_size = ubi->fm_size;
 561	unsigned long long max_sqnum = 0;
 562	void *fm_raw = ubi->fm_buf;
 563
 564	INIT_LIST_HEAD(&used);
 565	INIT_LIST_HEAD(&free);
 
 
 
 
 
 
 566	ai->min_ec = UBI_MAX_ERASECOUNTER;
 567
 
 
 
 
 
 
 
 
 568	fmsb = (struct ubi_fm_sb *)(fm_raw);
 569	ai->max_sqnum = fmsb->sqnum;
 570	fm_pos += sizeof(struct ubi_fm_sb);
 571	if (fm_pos >= fm_size)
 572		goto fail_bad;
 573
 574	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
 575	fm_pos += sizeof(*fmhdr);
 576	if (fm_pos >= fm_size)
 577		goto fail_bad;
 578
 579	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
 580		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
 581			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
 582		goto fail_bad;
 583	}
 584
 585	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
 586	fm_pos += sizeof(*fmpl);
 587	if (fm_pos >= fm_size)
 588		goto fail_bad;
 589	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
 590		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
 591			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
 592		goto fail_bad;
 593	}
 594
 595	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
 596	fm_pos += sizeof(*fmpl_wl);
 597	if (fm_pos >= fm_size)
 598		goto fail_bad;
 599	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
 600		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
 601			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
 602		goto fail_bad;
 603	}
 604
 605	pool_size = be16_to_cpu(fmpl->size);
 606	wl_pool_size = be16_to_cpu(fmpl_wl->size);
 607	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
 608	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
 609
 610	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
 611		ubi_err(ubi, "bad pool size: %i", pool_size);
 612		goto fail_bad;
 613	}
 614
 615	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
 616		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
 617		goto fail_bad;
 618	}
 619
 620
 621	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
 622	    fm->max_pool_size < 0) {
 623		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
 624		goto fail_bad;
 625	}
 626
 627	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
 628	    fm->max_wl_pool_size < 0) {
 629		ubi_err(ubi, "bad maximal WL pool size: %i",
 630			fm->max_wl_pool_size);
 631		goto fail_bad;
 632	}
 633
 634	/* read EC values from free list */
 635	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
 636		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 637		fm_pos += sizeof(*fmec);
 638		if (fm_pos >= fm_size)
 639			goto fail_bad;
 640
 641		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
 642			be32_to_cpu(fmec->ec), 0);
 643	}
 644
 645	/* read EC values from used list */
 646	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
 647		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 648		fm_pos += sizeof(*fmec);
 649		if (fm_pos >= fm_size)
 650			goto fail_bad;
 651
 652		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
 653			be32_to_cpu(fmec->ec), 0);
 654	}
 655
 656	/* read EC values from scrub list */
 657	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
 658		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 659		fm_pos += sizeof(*fmec);
 660		if (fm_pos >= fm_size)
 661			goto fail_bad;
 662
 663		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
 664			be32_to_cpu(fmec->ec), 1);
 665	}
 666
 667	/* read EC values from erase list */
 668	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
 669		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 670		fm_pos += sizeof(*fmec);
 671		if (fm_pos >= fm_size)
 672			goto fail_bad;
 673
 674		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
 675			be32_to_cpu(fmec->ec), 1);
 676	}
 677
 678	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
 679	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
 680
 681	/* Iterate over all volumes and read their EBA table */
 682	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
 683		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
 684		fm_pos += sizeof(*fmvhdr);
 685		if (fm_pos >= fm_size)
 686			goto fail_bad;
 687
 688		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
 689			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
 
 690				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
 691			goto fail_bad;
 692		}
 693
 694		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
 695			     be32_to_cpu(fmvhdr->used_ebs),
 696			     be32_to_cpu(fmvhdr->data_pad),
 697			     fmvhdr->vol_type,
 698			     be32_to_cpu(fmvhdr->last_eb_bytes));
 699
 700		if (IS_ERR(av)) {
 701			if (PTR_ERR(av) == -EEXIST)
 702				ubi_err(ubi, "volume (ID %i) already exists",
 703					fmvhdr->vol_id);
 704
 705			goto fail_bad;
 706		}
 707
 708		ai->vols_found++;
 709		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
 710			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
 711
 712		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
 713		fm_pos += sizeof(*fm_eba);
 714		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
 715		if (fm_pos >= fm_size)
 716			goto fail_bad;
 717
 718		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
 719			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
 
 720				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
 721			goto fail_bad;
 722		}
 723
 724		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
 725			int pnum = be32_to_cpu(fm_eba->pnum[j]);
 726
 727			if (pnum < 0)
 728				continue;
 729
 730			aeb = NULL;
 731			list_for_each_entry(tmp_aeb, &used, u.list) {
 732				if (tmp_aeb->pnum == pnum) {
 733					aeb = tmp_aeb;
 734					break;
 735				}
 736			}
 737
 
 
 
 
 
 
 
 738			if (!aeb) {
 739				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
 740				goto fail_bad;
 
 
 
 
 
 
 
 
 
 
 
 
 741			}
 742
 743			aeb->lnum = j;
 744
 745			if (av->highest_lnum <= aeb->lnum)
 746				av->highest_lnum = aeb->lnum;
 747
 748			assign_aeb_to_av(ai, aeb, av);
 749
 750			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
 751				aeb->pnum, aeb->lnum, av->vol_id);
 752		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753	}
 754
 755	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
 
 756	if (ret)
 757		goto fail;
 758
 759	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
 
 760	if (ret)
 761		goto fail;
 762
 763	if (max_sqnum > ai->max_sqnum)
 764		ai->max_sqnum = max_sqnum;
 765
 766	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
 767		list_move_tail(&tmp_aeb->u.list, &ai->free);
 768
 769	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
 770		list_move_tail(&tmp_aeb->u.list, &ai->erase);
 771
 772	ubi_assert(list_empty(&free));
 773
 774	/*
 775	 * If fastmap is leaking PEBs (must not happen), raise a
 776	 * fat warning and fall back to scanning mode.
 777	 * We do this here because in ubi_wl_init() it's too late
 778	 * and we cannot fall back to scanning.
 779	 */
 780	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
 781		    ai->bad_peb_count - fm->used_blocks))
 782		goto fail_bad;
 783
 784	return 0;
 785
 786fail_bad:
 787	ret = UBI_BAD_FASTMAP;
 788fail:
 789	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
 790		list_del(&tmp_aeb->u.list);
 791		ubi_free_aeb(ai, tmp_aeb);
 792	}
 793	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
 794		list_del(&tmp_aeb->u.list);
 795		ubi_free_aeb(ai, tmp_aeb);
 796	}
 797
 798	return ret;
 799}
 800
 801/**
 802 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
 803 * @ai: UBI attach info to be filled
 804 */
 805static int find_fm_anchor(struct ubi_attach_info *ai)
 806{
 807	int ret = -1;
 808	struct ubi_ainf_peb *aeb;
 809	unsigned long long max_sqnum = 0;
 810
 811	list_for_each_entry(aeb, &ai->fastmap, u.list) {
 812		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
 813			max_sqnum = aeb->sqnum;
 814			ret = aeb->pnum;
 815		}
 816	}
 817
 818	return ret;
 819}
 820
 821static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
 822				      struct ubi_ainf_peb *old)
 823{
 824	struct ubi_ainf_peb *new;
 825
 826	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
 827	if (!new)
 828		return NULL;
 829
 830	new->vol_id = old->vol_id;
 831	new->sqnum = old->sqnum;
 832	new->lnum = old->lnum;
 833	new->scrub = old->scrub;
 834	new->copy_flag = old->copy_flag;
 835
 836	return new;
 837}
 838
 839/**
 840 * ubi_scan_fastmap - scan the fastmap.
 841 * @ubi: UBI device object
 842 * @ai: UBI attach info to be filled
 843 * @scan_ai: UBI attach info from the first 64 PEBs,
 844 *           used to find the most recent Fastmap data structure
 845 *
 846 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
 847 * UBI_BAD_FASTMAP if one was found but is not usable.
 848 * < 0 indicates an internal error.
 849 */
 850int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
 851		     struct ubi_attach_info *scan_ai)
 852{
 853	struct ubi_fm_sb *fmsb, *fmsb2;
 854	struct ubi_vid_io_buf *vb;
 855	struct ubi_vid_hdr *vh;
 856	struct ubi_ec_hdr *ech;
 857	struct ubi_fastmap_layout *fm;
 858	struct ubi_ainf_peb *aeb;
 859	int i, used_blocks, pnum, fm_anchor, ret = 0;
 860	size_t fm_size;
 861	__be32 crc, tmp_crc;
 862	unsigned long long sqnum = 0;
 863
 864	fm_anchor = find_fm_anchor(scan_ai);
 865	if (fm_anchor < 0)
 866		return UBI_NO_FASTMAP;
 867
 868	/* Copy all (possible) fastmap blocks into our new attach structure. */
 869	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
 870		struct ubi_ainf_peb *new;
 871
 872		new = clone_aeb(ai, aeb);
 873		if (!new)
 874			return -ENOMEM;
 875
 876		list_add(&new->u.list, &ai->fastmap);
 877	}
 878
 879	down_write(&ubi->fm_protect);
 880	memset(ubi->fm_buf, 0, ubi->fm_size);
 881
 882	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
 883	if (!fmsb) {
 884		ret = -ENOMEM;
 885		goto out;
 886	}
 887
 888	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
 889	if (!fm) {
 890		ret = -ENOMEM;
 891		kfree(fmsb);
 892		goto out;
 893	}
 894
 895	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
 896	if (ret && ret != UBI_IO_BITFLIPS)
 897		goto free_fm_sb;
 898	else if (ret == UBI_IO_BITFLIPS)
 899		fm->to_be_tortured[0] = 1;
 900
 901	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
 902		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
 903			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
 904		ret = UBI_BAD_FASTMAP;
 905		goto free_fm_sb;
 906	}
 907
 908	if (fmsb->version != UBI_FM_FMT_VERSION) {
 909		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
 910			fmsb->version, UBI_FM_FMT_VERSION);
 911		ret = UBI_BAD_FASTMAP;
 912		goto free_fm_sb;
 913	}
 914
 915	used_blocks = be32_to_cpu(fmsb->used_blocks);
 916	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
 917		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
 918			used_blocks);
 919		ret = UBI_BAD_FASTMAP;
 920		goto free_fm_sb;
 921	}
 922
 923	fm_size = ubi->leb_size * used_blocks;
 924	if (fm_size != ubi->fm_size) {
 925		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
 926			fm_size, ubi->fm_size);
 927		ret = UBI_BAD_FASTMAP;
 928		goto free_fm_sb;
 929	}
 930
 931	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 932	if (!ech) {
 933		ret = -ENOMEM;
 934		goto free_fm_sb;
 935	}
 936
 937	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
 938	if (!vb) {
 939		ret = -ENOMEM;
 940		goto free_hdr;
 941	}
 942
 943	vh = ubi_get_vid_hdr(vb);
 944
 945	for (i = 0; i < used_blocks; i++) {
 946		int image_seq;
 947
 948		pnum = be32_to_cpu(fmsb->block_loc[i]);
 949
 950		if (ubi_io_is_bad(ubi, pnum)) {
 951			ret = UBI_BAD_FASTMAP;
 952			goto free_hdr;
 953		}
 954
 955		if (i == 0 && pnum != fm_anchor) {
 956			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
 957				pnum, fm_anchor);
 958			ret = UBI_BAD_FASTMAP;
 959			goto free_hdr;
 960		}
 961
 962		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 963		if (ret && ret != UBI_IO_BITFLIPS) {
 964			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
 965				i, pnum);
 966			if (ret > 0)
 967				ret = UBI_BAD_FASTMAP;
 968			goto free_hdr;
 969		} else if (ret == UBI_IO_BITFLIPS)
 970			fm->to_be_tortured[i] = 1;
 971
 972		image_seq = be32_to_cpu(ech->image_seq);
 973		if (!ubi->image_seq)
 974			ubi->image_seq = image_seq;
 975
 976		/*
 977		 * Older UBI implementations have image_seq set to zero, so
 978		 * we shouldn't fail if image_seq == 0.
 979		 */
 980		if (image_seq && (image_seq != ubi->image_seq)) {
 981			ubi_err(ubi, "wrong image seq:%d instead of %d",
 982				be32_to_cpu(ech->image_seq), ubi->image_seq);
 983			ret = UBI_BAD_FASTMAP;
 984			goto free_hdr;
 985		}
 986
 987		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
 988		if (ret && ret != UBI_IO_BITFLIPS) {
 989			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
 990				i, pnum);
 991			goto free_hdr;
 992		}
 993
 994		if (i == 0) {
 995			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
 996				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
 
 997					be32_to_cpu(vh->vol_id),
 998					UBI_FM_SB_VOLUME_ID);
 999				ret = UBI_BAD_FASTMAP;
1000				goto free_hdr;
1001			}
1002		} else {
1003			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1004				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
 
1005					be32_to_cpu(vh->vol_id),
1006					UBI_FM_DATA_VOLUME_ID);
1007				ret = UBI_BAD_FASTMAP;
1008				goto free_hdr;
1009			}
1010		}
1011
1012		if (sqnum < be64_to_cpu(vh->sqnum))
1013			sqnum = be64_to_cpu(vh->sqnum);
1014
1015		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1016				       pnum, 0, ubi->leb_size);
1017		if (ret && ret != UBI_IO_BITFLIPS) {
1018			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1019				"err: %i)", i, pnum, ret);
1020			goto free_hdr;
1021		}
1022	}
1023
1024	kfree(fmsb);
1025	fmsb = NULL;
1026
1027	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1028	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1029	fmsb2->data_crc = 0;
1030	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1031	if (crc != tmp_crc) {
1032		ubi_err(ubi, "fastmap data CRC is invalid");
1033		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1034			tmp_crc, crc);
1035		ret = UBI_BAD_FASTMAP;
1036		goto free_hdr;
1037	}
1038
1039	fmsb2->sqnum = sqnum;
1040
1041	fm->used_blocks = used_blocks;
1042
1043	ret = ubi_attach_fastmap(ubi, ai, fm);
1044	if (ret) {
1045		if (ret > 0)
1046			ret = UBI_BAD_FASTMAP;
1047		goto free_hdr;
1048	}
1049
1050	for (i = 0; i < used_blocks; i++) {
1051		struct ubi_wl_entry *e;
1052
1053		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1054		if (!e) {
1055			while (i--)
1056				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1057
1058			ret = -ENOMEM;
1059			goto free_hdr;
1060		}
1061
1062		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1063		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1064		fm->e[i] = e;
1065	}
1066
1067	ubi->fm = fm;
1068	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1069	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1070	ubi_msg(ubi, "attached by fastmap");
1071	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1072	ubi_msg(ubi, "fastmap WL pool size: %d",
1073		ubi->fm_wl_pool.max_size);
1074	ubi->fm_disabled = 0;
1075	ubi->fast_attach = 1;
1076
1077	ubi_free_vid_buf(vb);
1078	kfree(ech);
1079out:
1080	up_write(&ubi->fm_protect);
1081	if (ret == UBI_BAD_FASTMAP)
1082		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1083	return ret;
1084
1085free_hdr:
1086	ubi_free_vid_buf(vb);
1087	kfree(ech);
1088free_fm_sb:
1089	kfree(fmsb);
1090	kfree(fm);
1091	goto out;
1092}
1093
1094int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1095{
1096	struct ubi_device *ubi = vol->ubi;
1097
1098	if (!ubi->fast_attach)
1099		return 0;
1100
1101	vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1102				GFP_KERNEL);
1103	if (!vol->checkmap)
1104		return -ENOMEM;
1105
1106	return 0;
1107}
1108
1109void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1110{
1111	kfree(vol->checkmap);
1112}
1113
1114/**
1115 * ubi_write_fastmap - writes a fastmap.
1116 * @ubi: UBI device object
1117 * @new_fm: the to be written fastmap
1118 *
1119 * Returns 0 on success, < 0 indicates an internal error.
1120 */
1121static int ubi_write_fastmap(struct ubi_device *ubi,
1122			     struct ubi_fastmap_layout *new_fm)
1123{
1124	size_t fm_pos = 0;
1125	void *fm_raw;
1126	struct ubi_fm_sb *fmsb;
1127	struct ubi_fm_hdr *fmh;
1128	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1129	struct ubi_fm_ec *fec;
1130	struct ubi_fm_volhdr *fvh;
1131	struct ubi_fm_eba *feba;
 
1132	struct ubi_wl_entry *wl_e;
1133	struct ubi_volume *vol;
1134	struct ubi_vid_io_buf *avbuf, *dvbuf;
1135	struct ubi_vid_hdr *avhdr, *dvhdr;
1136	struct ubi_work *ubi_wrk;
1137	struct rb_node *tmp_rb;
1138	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1139	int scrub_peb_count, erase_peb_count;
1140	unsigned long *seen_pebs;
1141
1142	fm_raw = ubi->fm_buf;
1143	memset(ubi->fm_buf, 0, ubi->fm_size);
1144
1145	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1146	if (!avbuf) {
1147		ret = -ENOMEM;
1148		goto out;
1149	}
1150
1151	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1152	if (!dvbuf) {
1153		ret = -ENOMEM;
1154		goto out_free_avbuf;
1155	}
1156
1157	avhdr = ubi_get_vid_hdr(avbuf);
1158	dvhdr = ubi_get_vid_hdr(dvbuf);
1159
1160	seen_pebs = init_seen(ubi);
1161	if (IS_ERR(seen_pebs)) {
1162		ret = PTR_ERR(seen_pebs);
1163		goto out_free_dvbuf;
1164	}
1165
1166	spin_lock(&ubi->volumes_lock);
1167	spin_lock(&ubi->wl_lock);
1168
1169	fmsb = (struct ubi_fm_sb *)fm_raw;
1170	fm_pos += sizeof(*fmsb);
1171	ubi_assert(fm_pos <= ubi->fm_size);
1172
1173	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1174	fm_pos += sizeof(*fmh);
1175	ubi_assert(fm_pos <= ubi->fm_size);
1176
1177	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1178	fmsb->version = UBI_FM_FMT_VERSION;
1179	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1180	/* the max sqnum will be filled in while *reading* the fastmap */
1181	fmsb->sqnum = 0;
1182
1183	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1184	free_peb_count = 0;
1185	used_peb_count = 0;
1186	scrub_peb_count = 0;
1187	erase_peb_count = 0;
1188	vol_count = 0;
1189
1190	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1191	fm_pos += sizeof(*fmpl);
1192	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1193	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1194	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1195
1196	for (i = 0; i < ubi->fm_pool.size; i++) {
1197		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1198		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1199	}
 
 
 
 
1200
1201	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1202	fm_pos += sizeof(*fmpl_wl);
1203	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1204	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1205	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1206
1207	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1208		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1209		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1210	}
1211
1212	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1213		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1214
1215		fec->pnum = cpu_to_be32(wl_e->pnum);
1216		set_seen(ubi, wl_e->pnum, seen_pebs);
1217		fec->ec = cpu_to_be32(wl_e->ec);
1218
1219		free_peb_count++;
1220		fm_pos += sizeof(*fec);
1221		ubi_assert(fm_pos <= ubi->fm_size);
1222	}
1223	if (ubi->fm_next_anchor) {
1224		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1225
1226		fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
1227		set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
1228		fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
1229
1230		free_peb_count++;
1231		fm_pos += sizeof(*fec);
1232		ubi_assert(fm_pos <= ubi->fm_size);
1233	}
1234	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1235
1236	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1237		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1238
1239		fec->pnum = cpu_to_be32(wl_e->pnum);
1240		set_seen(ubi, wl_e->pnum, seen_pebs);
1241		fec->ec = cpu_to_be32(wl_e->ec);
1242
1243		used_peb_count++;
1244		fm_pos += sizeof(*fec);
1245		ubi_assert(fm_pos <= ubi->fm_size);
1246	}
1247
1248	ubi_for_each_protected_peb(ubi, i, wl_e) {
1249		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1250
1251		fec->pnum = cpu_to_be32(wl_e->pnum);
1252		set_seen(ubi, wl_e->pnum, seen_pebs);
1253		fec->ec = cpu_to_be32(wl_e->ec);
1254
1255		used_peb_count++;
1256		fm_pos += sizeof(*fec);
1257		ubi_assert(fm_pos <= ubi->fm_size);
1258	}
1259	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1260
1261	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
 
1262		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1263
1264		fec->pnum = cpu_to_be32(wl_e->pnum);
1265		set_seen(ubi, wl_e->pnum, seen_pebs);
1266		fec->ec = cpu_to_be32(wl_e->ec);
1267
1268		scrub_peb_count++;
1269		fm_pos += sizeof(*fec);
1270		ubi_assert(fm_pos <= ubi->fm_size);
1271	}
1272	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1273
1274
1275	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1276		if (ubi_is_erase_work(ubi_wrk)) {
1277			wl_e = ubi_wrk->e;
1278			ubi_assert(wl_e);
1279
1280			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1281
1282			fec->pnum = cpu_to_be32(wl_e->pnum);
1283			set_seen(ubi, wl_e->pnum, seen_pebs);
1284			fec->ec = cpu_to_be32(wl_e->ec);
1285
1286			erase_peb_count++;
1287			fm_pos += sizeof(*fec);
1288			ubi_assert(fm_pos <= ubi->fm_size);
1289		}
1290	}
1291	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1292
1293	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1294		vol = ubi->volumes[i];
1295
1296		if (!vol)
1297			continue;
1298
1299		vol_count++;
1300
1301		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1302		fm_pos += sizeof(*fvh);
1303		ubi_assert(fm_pos <= ubi->fm_size);
1304
1305		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1306		fvh->vol_id = cpu_to_be32(vol->vol_id);
1307		fvh->vol_type = vol->vol_type;
1308		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1309		fvh->data_pad = cpu_to_be32(vol->data_pad);
1310		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1311
1312		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1313			vol->vol_type == UBI_STATIC_VOLUME);
1314
1315		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1316		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1317		ubi_assert(fm_pos <= ubi->fm_size);
1318
1319		for (j = 0; j < vol->reserved_pebs; j++) {
1320			struct ubi_eba_leb_desc ldesc;
1321
1322			ubi_eba_get_ldesc(vol, j, &ldesc);
1323			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1324		}
1325
1326		feba->reserved_pebs = cpu_to_be32(j);
1327		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1328	}
1329	fmh->vol_count = cpu_to_be32(vol_count);
1330	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1331
1332	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1333	avhdr->lnum = 0;
1334
1335	spin_unlock(&ubi->wl_lock);
1336	spin_unlock(&ubi->volumes_lock);
1337
1338	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1339	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1340	if (ret) {
1341		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1342		goto out_free_seen;
1343	}
1344
1345	for (i = 0; i < new_fm->used_blocks; i++) {
1346		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1347		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1348		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1349	}
1350
1351	fmsb->data_crc = 0;
1352	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1353					   ubi->fm_size));
1354
1355	for (i = 1; i < new_fm->used_blocks; i++) {
1356		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1357		dvhdr->lnum = cpu_to_be32(i);
1358		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1359			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1360		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1361		if (ret) {
1362			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1363				new_fm->e[i]->pnum);
1364			goto out_free_seen;
1365		}
1366	}
1367
1368	for (i = 0; i < new_fm->used_blocks; i++) {
1369		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1370					new_fm->e[i]->pnum, 0, ubi->leb_size);
1371		if (ret) {
1372			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1373				new_fm->e[i]->pnum);
1374			goto out_free_seen;
1375		}
1376	}
1377
1378	ubi_assert(new_fm);
1379	ubi->fm = new_fm;
1380
1381	ret = self_check_seen(ubi, seen_pebs);
1382	dbg_bld("fastmap written!");
1383
1384out_free_seen:
1385	free_seen(seen_pebs);
1386out_free_dvbuf:
1387	ubi_free_vid_buf(dvbuf);
1388out_free_avbuf:
1389	ubi_free_vid_buf(avbuf);
1390
1391out:
1392	return ret;
1393}
1394
1395/**
1396 * erase_block - Manually erase a PEB.
1397 * @ubi: UBI device object
1398 * @pnum: PEB to be erased
1399 *
1400 * Returns the new EC value on success, < 0 indicates an internal error.
1401 */
1402static int erase_block(struct ubi_device *ubi, int pnum)
1403{
1404	int ret;
1405	struct ubi_ec_hdr *ec_hdr;
1406	long long ec;
1407
1408	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1409	if (!ec_hdr)
1410		return -ENOMEM;
1411
1412	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1413	if (ret < 0)
1414		goto out;
1415	else if (ret && ret != UBI_IO_BITFLIPS) {
1416		ret = -EINVAL;
1417		goto out;
1418	}
1419
1420	ret = ubi_io_sync_erase(ubi, pnum, 0);
1421	if (ret < 0)
1422		goto out;
1423
1424	ec = be64_to_cpu(ec_hdr->ec);
1425	ec += ret;
1426	if (ec > UBI_MAX_ERASECOUNTER) {
1427		ret = -EINVAL;
1428		goto out;
1429	}
1430
1431	ec_hdr->ec = cpu_to_be64(ec);
1432	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1433	if (ret < 0)
1434		goto out;
1435
1436	ret = ec;
1437out:
1438	kfree(ec_hdr);
1439	return ret;
1440}
1441
1442/**
1443 * invalidate_fastmap - destroys a fastmap.
1444 * @ubi: UBI device object
 
1445 *
1446 * This function ensures that upon next UBI attach a full scan
1447 * is issued. We need this if UBI is about to write a new fastmap
1448 * but is unable to do so. In this case we have two options:
1449 * a) Make sure that the current fastmap will not be usued upon
1450 * attach time and contine or b) fall back to RO mode to have the
1451 * current fastmap in a valid state.
1452 * Returns 0 on success, < 0 indicates an internal error.
1453 */
1454static int invalidate_fastmap(struct ubi_device *ubi)
 
1455{
1456	int ret;
1457	struct ubi_fastmap_layout *fm;
1458	struct ubi_wl_entry *e;
1459	struct ubi_vid_io_buf *vb = NULL;
1460	struct ubi_vid_hdr *vh;
1461
1462	if (!ubi->fm)
1463		return 0;
1464
1465	ubi->fm = NULL;
1466
1467	ret = -ENOMEM;
1468	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1469	if (!fm)
1470		goto out;
1471
1472	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1473	if (!vb)
1474		goto out_free_fm;
1475
1476	vh = ubi_get_vid_hdr(vb);
1477
1478	ret = -ENOSPC;
1479	e = ubi_wl_get_fm_peb(ubi, 1);
1480	if (!e)
1481		goto out_free_fm;
1482
1483	/*
1484	 * Create fake fastmap such that UBI will fall back
1485	 * to scanning mode.
1486	 */
1487	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1488	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1489	if (ret < 0) {
1490		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1491		goto out_free_fm;
1492	}
1493
1494	fm->used_blocks = 1;
1495	fm->e[0] = e;
1496
1497	ubi->fm = fm;
1498
1499out:
1500	ubi_free_vid_buf(vb);
1501	return ret;
1502
1503out_free_fm:
1504	kfree(fm);
1505	goto out;
1506}
1507
1508/**
1509 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1510 * WL sub-system.
1511 * @ubi: UBI device object
1512 * @fm: fastmap layout object
1513 */
1514static void return_fm_pebs(struct ubi_device *ubi,
1515			   struct ubi_fastmap_layout *fm)
1516{
1517	int i;
1518
1519	if (!fm)
1520		return;
1521
1522	for (i = 0; i < fm->used_blocks; i++) {
1523		if (fm->e[i]) {
1524			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1525					  fm->to_be_tortured[i]);
1526			fm->e[i] = NULL;
1527		}
1528	}
1529}
1530
1531/**
1532 * ubi_update_fastmap - will be called by UBI if a volume changes or
1533 * a fastmap pool becomes full.
1534 * @ubi: UBI device object
1535 *
1536 * Returns 0 on success, < 0 indicates an internal error.
1537 */
1538int ubi_update_fastmap(struct ubi_device *ubi)
1539{
1540	int ret, i, j;
1541	struct ubi_fastmap_layout *new_fm, *old_fm;
1542	struct ubi_wl_entry *tmp_e;
1543
1544	down_write(&ubi->fm_protect);
1545	down_write(&ubi->work_sem);
1546	down_write(&ubi->fm_eba_sem);
1547
1548	ubi_refill_pools(ubi);
1549
1550	if (ubi->ro_mode || ubi->fm_disabled) {
1551		up_write(&ubi->fm_eba_sem);
1552		up_write(&ubi->work_sem);
1553		up_write(&ubi->fm_protect);
1554		return 0;
1555	}
1556
 
 
 
 
 
 
1557	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1558	if (!new_fm) {
1559		up_write(&ubi->fm_eba_sem);
1560		up_write(&ubi->work_sem);
1561		up_write(&ubi->fm_protect);
1562		return -ENOMEM;
1563	}
1564
1565	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
1566	old_fm = ubi->fm;
1567	ubi->fm = NULL;
1568
1569	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1570		ubi_err(ubi, "fastmap too large");
1571		ret = -ENOSPC;
1572		goto err;
1573	}
1574
1575	for (i = 1; i < new_fm->used_blocks; i++) {
1576		spin_lock(&ubi->wl_lock);
1577		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1578		spin_unlock(&ubi->wl_lock);
1579
1580		if (!tmp_e) {
1581			if (old_fm && old_fm->e[i]) {
1582				ret = erase_block(ubi, old_fm->e[i]->pnum);
1583				if (ret < 0) {
1584					ubi_err(ubi, "could not erase old fastmap PEB");
1585
1586					for (j = 1; j < i; j++) {
1587						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1588								  j, 0);
1589						new_fm->e[j] = NULL;
1590					}
1591					goto err;
1592				}
1593				new_fm->e[i] = old_fm->e[i];
1594				old_fm->e[i] = NULL;
1595			} else {
1596				ubi_err(ubi, "could not get any free erase block");
1597
1598				for (j = 1; j < i; j++) {
1599					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1600					new_fm->e[j] = NULL;
1601				}
1602
1603				ret = -ENOSPC;
 
 
 
 
1604				goto err;
1605			}
 
 
 
1606		} else {
1607			new_fm->e[i] = tmp_e;
 
1608
1609			if (old_fm && old_fm->e[i]) {
1610				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1611						  old_fm->to_be_tortured[i]);
1612				old_fm->e[i] = NULL;
1613			}
1614		}
1615	}
1616
1617	/* Old fastmap is larger than the new one */
1618	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1619		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1620			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1621					  old_fm->to_be_tortured[i]);
1622			old_fm->e[i] = NULL;
1623		}
1624	}
1625
1626	spin_lock(&ubi->wl_lock);
1627	tmp_e = ubi->fm_anchor;
1628	ubi->fm_anchor = NULL;
1629	spin_unlock(&ubi->wl_lock);
1630
1631	if (old_fm) {
1632		/* no fresh anchor PEB was found, reuse the old one */
1633		if (!tmp_e) {
1634			ret = erase_block(ubi, old_fm->e[0]->pnum);
1635			if (ret < 0) {
1636				ubi_err(ubi, "could not erase old anchor PEB");
 
1637
1638				for (i = 1; i < new_fm->used_blocks; i++) {
1639					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1640							  i, 0);
1641					new_fm->e[i] = NULL;
1642				}
1643				goto err;
1644			}
1645			new_fm->e[0] = old_fm->e[0];
 
1646			new_fm->e[0]->ec = ret;
1647			old_fm->e[0] = NULL;
1648		} else {
1649			/* we've got a new anchor PEB, return the old one */
1650			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1651					  old_fm->to_be_tortured[0]);
1652			new_fm->e[0] = tmp_e;
1653			old_fm->e[0] = NULL;
 
1654		}
1655	} else {
1656		if (!tmp_e) {
1657			ubi_err(ubi, "could not find any anchor PEB");
 
1658
1659			for (i = 1; i < new_fm->used_blocks; i++) {
1660				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1661				new_fm->e[i] = NULL;
1662			}
1663
1664			ret = -ENOSPC;
1665			goto err;
1666		}
1667		new_fm->e[0] = tmp_e;
 
 
1668	}
1669
 
 
1670	ret = ubi_write_fastmap(ubi, new_fm);
 
 
1671
1672	if (ret)
1673		goto err;
1674
1675out_unlock:
1676	up_write(&ubi->fm_eba_sem);
1677	up_write(&ubi->work_sem);
1678	up_write(&ubi->fm_protect);
1679	kfree(old_fm);
1680
1681	ubi_ensure_anchor_pebs(ubi);
1682
1683	return ret;
1684
1685err:
1686	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1687
1688	ret = invalidate_fastmap(ubi);
1689	if (ret < 0) {
1690		ubi_err(ubi, "Unable to invalidate current fastmap!");
1691		ubi_ro_mode(ubi);
1692	} else {
1693		return_fm_pebs(ubi, old_fm);
1694		return_fm_pebs(ubi, new_fm);
1695		ret = 0;
1696	}
1697
1698	kfree(new_fm);
 
 
 
 
 
 
 
1699	goto out_unlock;
1700}
v3.15
 
   1/*
   2 * Copyright (c) 2012 Linutronix GmbH
 
   3 * Author: Richard Weinberger <richard@nod.at>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; version 2.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12 * the GNU General Public License for more details.
  13 *
  14 */
  15
  16#include <linux/crc32.h>
 
  17#include "ubi.h"
  18
  19/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
  21 * @ubi: UBI device description object
  22 */
  23size_t ubi_calc_fm_size(struct ubi_device *ubi)
  24{
  25	size_t size;
  26
  27	size = sizeof(struct ubi_fm_hdr) + \
  28		sizeof(struct ubi_fm_scan_pool) + \
  29		sizeof(struct ubi_fm_scan_pool) + \
  30		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
  31		(sizeof(struct ubi_fm_eba) + \
  32		(ubi->peb_count * sizeof(__be32))) + \
 
  33		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
  34	return roundup(size, ubi->leb_size);
  35}
  36
  37
  38/**
  39 * new_fm_vhdr - allocate a new volume header for fastmap usage.
  40 * @ubi: UBI device description object
  41 * @vol_id: the VID of the new header
  42 *
  43 * Returns a new struct ubi_vid_hdr on success.
  44 * NULL indicates out of memory.
  45 */
  46static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
  47{
  48	struct ubi_vid_hdr *new;
 
  49
  50	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
  51	if (!new)
  52		goto out;
  53
  54	new->vol_type = UBI_VID_DYNAMIC;
  55	new->vol_id = cpu_to_be32(vol_id);
 
  56
  57	/* UBI implementations without fastmap support have to delete the
  58	 * fastmap.
  59	 */
  60	new->compat = UBI_COMPAT_DELETE;
  61
  62out:
  63	return new;
  64}
  65
  66/**
  67 * add_aeb - create and add a attach erase block to a given list.
  68 * @ai: UBI attach info object
  69 * @list: the target list
  70 * @pnum: PEB number of the new attach erase block
  71 * @ec: erease counter of the new LEB
  72 * @scrub: scrub this PEB after attaching
  73 *
  74 * Returns 0 on success, < 0 indicates an internal error.
  75 */
  76static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
  77		   int pnum, int ec, int scrub)
  78{
  79	struct ubi_ainf_peb *aeb;
  80
  81	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
  82	if (!aeb)
  83		return -ENOMEM;
  84
  85	aeb->pnum = pnum;
  86	aeb->ec = ec;
  87	aeb->lnum = -1;
  88	aeb->scrub = scrub;
  89	aeb->copy_flag = aeb->sqnum = 0;
  90
  91	ai->ec_sum += aeb->ec;
  92	ai->ec_count++;
  93
  94	if (ai->max_ec < aeb->ec)
  95		ai->max_ec = aeb->ec;
  96
  97	if (ai->min_ec > aeb->ec)
  98		ai->min_ec = aeb->ec;
  99
 100	list_add_tail(&aeb->u.list, list);
 101
 102	return 0;
 103}
 104
 105/**
 106 * add_vol - create and add a new volume to ubi_attach_info.
 107 * @ai: ubi_attach_info object
 108 * @vol_id: VID of the new volume
 109 * @used_ebs: number of used EBS
 110 * @data_pad: data padding value of the new volume
 111 * @vol_type: volume type
 112 * @last_eb_bytes: number of bytes in the last LEB
 113 *
 114 * Returns the new struct ubi_ainf_volume on success.
 115 * NULL indicates an error.
 116 */
 117static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
 118				       int used_ebs, int data_pad, u8 vol_type,
 119				       int last_eb_bytes)
 120{
 121	struct ubi_ainf_volume *av;
 122	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
 123
 124	while (*p) {
 125		parent = *p;
 126		av = rb_entry(parent, struct ubi_ainf_volume, rb);
 127
 128		if (vol_id > av->vol_id)
 129			p = &(*p)->rb_left;
 130		else if (vol_id > av->vol_id)
 131			p = &(*p)->rb_right;
 132	}
 133
 134	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
 135	if (!av)
 136		goto out;
 137
 138	av->highest_lnum = av->leb_count = 0;
 139	av->vol_id = vol_id;
 140	av->used_ebs = used_ebs;
 141	av->data_pad = data_pad;
 142	av->last_data_size = last_eb_bytes;
 143	av->compat = 0;
 144	av->vol_type = vol_type;
 145	av->root = RB_ROOT;
 
 146
 147	dbg_bld("found volume (ID %i)", vol_id);
 148
 149	rb_link_node(&av->rb, parent, p);
 150	rb_insert_color(&av->rb, &ai->volumes);
 151
 152out:
 153	return av;
 154}
 155
 156/**
 157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
 158 * from it's original list.
 159 * @ai: ubi_attach_info object
 160 * @aeb: the to be assigned SEB
 161 * @av: target scan volume
 162 */
 163static void assign_aeb_to_av(struct ubi_attach_info *ai,
 164			     struct ubi_ainf_peb *aeb,
 165			     struct ubi_ainf_volume *av)
 166{
 167	struct ubi_ainf_peb *tmp_aeb;
 168	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
 169
 170	p = &av->root.rb_node;
 171	while (*p) {
 172		parent = *p;
 173
 174		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
 175		if (aeb->lnum != tmp_aeb->lnum) {
 176			if (aeb->lnum < tmp_aeb->lnum)
 177				p = &(*p)->rb_left;
 178			else
 179				p = &(*p)->rb_right;
 180
 181			continue;
 182		} else
 183			break;
 184	}
 185
 186	list_del(&aeb->u.list);
 187	av->leb_count++;
 188
 189	rb_link_node(&aeb->u.rb, parent, p);
 190	rb_insert_color(&aeb->u.rb, &av->root);
 191}
 192
 193/**
 194 * update_vol - inserts or updates a LEB which was found a pool.
 195 * @ubi: the UBI device object
 196 * @ai: attach info object
 197 * @av: the volume this LEB belongs to
 198 * @new_vh: the volume header derived from new_aeb
 199 * @new_aeb: the AEB to be examined
 200 *
 201 * Returns 0 on success, < 0 indicates an internal error.
 202 */
 203static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
 204		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
 205		      struct ubi_ainf_peb *new_aeb)
 206{
 207	struct rb_node **p = &av->root.rb_node, *parent = NULL;
 208	struct ubi_ainf_peb *aeb, *victim;
 209	int cmp_res;
 210
 211	while (*p) {
 212		parent = *p;
 213		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
 214
 215		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
 216			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
 217				p = &(*p)->rb_left;
 218			else
 219				p = &(*p)->rb_right;
 220
 221			continue;
 222		}
 223
 224		/* This case can happen if the fastmap gets written
 225		 * because of a volume change (creation, deletion, ..).
 226		 * Then a PEB can be within the persistent EBA and the pool.
 227		 */
 228		if (aeb->pnum == new_aeb->pnum) {
 229			ubi_assert(aeb->lnum == new_aeb->lnum);
 230			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 231
 232			return 0;
 233		}
 234
 235		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
 236		if (cmp_res < 0)
 237			return cmp_res;
 238
 239		/* new_aeb is newer */
 240		if (cmp_res & 1) {
 241			victim = kmem_cache_alloc(ai->aeb_slab_cache,
 242				GFP_KERNEL);
 243			if (!victim)
 244				return -ENOMEM;
 245
 246			victim->ec = aeb->ec;
 247			victim->pnum = aeb->pnum;
 248			list_add_tail(&victim->u.list, &ai->erase);
 249
 250			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
 251				av->last_data_size = \
 252					be32_to_cpu(new_vh->data_size);
 253
 254			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
 255				av->vol_id, aeb->lnum, new_aeb->pnum);
 256
 257			aeb->ec = new_aeb->ec;
 258			aeb->pnum = new_aeb->pnum;
 259			aeb->copy_flag = new_vh->copy_flag;
 260			aeb->scrub = new_aeb->scrub;
 261			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 
 262
 263		/* new_aeb is older */
 264		} else {
 265			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
 266				av->vol_id, aeb->lnum, new_aeb->pnum);
 267			list_add_tail(&new_aeb->u.list, &ai->erase);
 268		}
 269
 270		return 0;
 271	}
 272	/* This LEB is new, let's add it to the volume */
 273
 274	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
 275		av->highest_lnum = be32_to_cpu(new_vh->lnum);
 276		av->last_data_size = be32_to_cpu(new_vh->data_size);
 277	}
 278
 279	if (av->vol_type == UBI_STATIC_VOLUME)
 280		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
 281
 282	av->leb_count++;
 283
 284	rb_link_node(&new_aeb->u.rb, parent, p);
 285	rb_insert_color(&new_aeb->u.rb, &av->root);
 286
 287	return 0;
 288}
 289
 290/**
 291 * process_pool_aeb - we found a non-empty PEB in a pool.
 292 * @ubi: UBI device object
 293 * @ai: attach info object
 294 * @new_vh: the volume header derived from new_aeb
 295 * @new_aeb: the AEB to be examined
 296 *
 297 * Returns 0 on success, < 0 indicates an internal error.
 298 */
 299static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
 300			    struct ubi_vid_hdr *new_vh,
 301			    struct ubi_ainf_peb *new_aeb)
 302{
 303	struct ubi_ainf_volume *av, *tmp_av = NULL;
 304	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
 305	int found = 0;
 306
 307	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
 308		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
 309		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
 310
 311		return 0;
 312	}
 313
 314	/* Find the volume this SEB belongs to */
 315	while (*p) {
 316		parent = *p;
 317		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
 318
 319		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
 320			p = &(*p)->rb_left;
 321		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
 322			p = &(*p)->rb_right;
 323		else {
 324			found = 1;
 325			break;
 326		}
 327	}
 328
 329	if (found)
 330		av = tmp_av;
 331	else {
 332		ubi_err("orphaned volume in fastmap pool!");
 333		return UBI_BAD_FASTMAP;
 334	}
 335
 336	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
 337
 338	return update_vol(ubi, ai, av, new_vh, new_aeb);
 339}
 340
 341/**
 342 * unmap_peb - unmap a PEB.
 343 * If fastmap detects a free PEB in the pool it has to check whether
 344 * this PEB has been unmapped after writing the fastmap.
 345 *
 346 * @ai: UBI attach info object
 347 * @pnum: The PEB to be unmapped
 348 */
 349static void unmap_peb(struct ubi_attach_info *ai, int pnum)
 350{
 351	struct ubi_ainf_volume *av;
 352	struct rb_node *node, *node2;
 353	struct ubi_ainf_peb *aeb;
 354
 355	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
 356		av = rb_entry(node, struct ubi_ainf_volume, rb);
 357
 358		for (node2 = rb_first(&av->root); node2;
 359		     node2 = rb_next(node2)) {
 360			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
 361			if (aeb->pnum == pnum) {
 362				rb_erase(&aeb->u.rb, &av->root);
 363				kmem_cache_free(ai->aeb_slab_cache, aeb);
 
 364				return;
 365			}
 366		}
 367	}
 368}
 369
 370/**
 371 * scan_pool - scans a pool for changed (no longer empty PEBs).
 372 * @ubi: UBI device object
 373 * @ai: attach info object
 374 * @pebs: an array of all PEB numbers in the to be scanned pool
 375 * @pool_size: size of the pool (number of entries in @pebs)
 376 * @max_sqnum: pointer to the maximal sequence number
 377 * @eba_orphans: list of PEBs which need to be scanned
 378 * @free: list of PEBs which are most likely free (and go into @ai->free)
 379 *
 380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
 381 * < 0 indicates an internal error.
 382 */
 383static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
 384		     int *pebs, int pool_size, unsigned long long *max_sqnum,
 385		     struct list_head *eba_orphans, struct list_head *free)
 386{
 
 387	struct ubi_vid_hdr *vh;
 388	struct ubi_ec_hdr *ech;
 389	struct ubi_ainf_peb *new_aeb, *tmp_aeb;
 390	int i, pnum, err, found_orphan, ret = 0;
 391
 392	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 393	if (!ech)
 394		return -ENOMEM;
 395
 396	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
 397	if (!vh) {
 398		kfree(ech);
 399		return -ENOMEM;
 400	}
 401
 
 
 402	dbg_bld("scanning fastmap pool: size = %i", pool_size);
 403
 404	/*
 405	 * Now scan all PEBs in the pool to find changes which have been made
 406	 * after the creation of the fastmap
 407	 */
 408	for (i = 0; i < pool_size; i++) {
 409		int scrub = 0;
 410		int image_seq;
 411
 412		pnum = be32_to_cpu(pebs[i]);
 413
 414		if (ubi_io_is_bad(ubi, pnum)) {
 415			ubi_err("bad PEB in fastmap pool!");
 416			ret = UBI_BAD_FASTMAP;
 417			goto out;
 418		}
 419
 420		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 421		if (err && err != UBI_IO_BITFLIPS) {
 422			ubi_err("unable to read EC header! PEB:%i err:%i",
 423				pnum, err);
 424			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 425			goto out;
 426		} else if (ret == UBI_IO_BITFLIPS)
 427			scrub = 1;
 428
 429		/*
 430		 * Older UBI implementations have image_seq set to zero, so
 431		 * we shouldn't fail if image_seq == 0.
 432		 */
 433		image_seq = be32_to_cpu(ech->image_seq);
 434
 435		if (image_seq && (image_seq != ubi->image_seq)) {
 436			ubi_err("bad image seq: 0x%x, expected: 0x%x",
 437				be32_to_cpu(ech->image_seq), ubi->image_seq);
 438			ret = UBI_BAD_FASTMAP;
 439			goto out;
 440		}
 441
 442		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
 443		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
 444			unsigned long long ec = be64_to_cpu(ech->ec);
 445			unmap_peb(ai, pnum);
 446			dbg_bld("Adding PEB to free: %i", pnum);
 
 447			if (err == UBI_IO_FF_BITFLIPS)
 448				add_aeb(ai, free, pnum, ec, 1);
 449			else
 450				add_aeb(ai, free, pnum, ec, 0);
 451			continue;
 452		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
 453			dbg_bld("Found non empty PEB:%i in pool", pnum);
 454
 455			if (err == UBI_IO_BITFLIPS)
 456				scrub = 1;
 457
 458			found_orphan = 0;
 459			list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
 460				if (tmp_aeb->pnum == pnum) {
 461					found_orphan = 1;
 462					break;
 463				}
 464			}
 465			if (found_orphan) {
 466				list_del(&tmp_aeb->u.list);
 467				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 468			}
 469
 470			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
 471						   GFP_KERNEL);
 472			if (!new_aeb) {
 473				ret = -ENOMEM;
 474				goto out;
 475			}
 476
 477			new_aeb->ec = be64_to_cpu(ech->ec);
 478			new_aeb->pnum = pnum;
 479			new_aeb->lnum = be32_to_cpu(vh->lnum);
 480			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
 481			new_aeb->copy_flag = vh->copy_flag;
 482			new_aeb->scrub = scrub;
 483
 484			if (*max_sqnum < new_aeb->sqnum)
 485				*max_sqnum = new_aeb->sqnum;
 486
 487			err = process_pool_aeb(ubi, ai, vh, new_aeb);
 488			if (err) {
 489				ret = err > 0 ? UBI_BAD_FASTMAP : err;
 490				goto out;
 491			}
 492		} else {
 493			/* We are paranoid and fall back to scanning mode */
 494			ubi_err("fastmap pool PEBs contains damaged PEBs!");
 495			ret = err > 0 ? UBI_BAD_FASTMAP : err;
 496			goto out;
 497		}
 498
 499	}
 500
 501out:
 502	ubi_free_vid_hdr(ubi, vh);
 503	kfree(ech);
 504	return ret;
 505}
 506
 507/**
 508 * count_fastmap_pebs - Counts the PEBs found by fastmap.
 509 * @ai: The UBI attach info object
 510 */
 511static int count_fastmap_pebs(struct ubi_attach_info *ai)
 512{
 513	struct ubi_ainf_peb *aeb;
 514	struct ubi_ainf_volume *av;
 515	struct rb_node *rb1, *rb2;
 516	int n = 0;
 517
 518	list_for_each_entry(aeb, &ai->erase, u.list)
 519		n++;
 520
 521	list_for_each_entry(aeb, &ai->free, u.list)
 522		n++;
 523
 524	 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
 525		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
 526			n++;
 527
 528	return n;
 529}
 530
 531/**
 532 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
 533 * @ubi: UBI device object
 534 * @ai: UBI attach info object
 535 * @fm: the fastmap to be attached
 536 *
 537 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
 538 * < 0 indicates an internal error.
 539 */
 540static int ubi_attach_fastmap(struct ubi_device *ubi,
 541			      struct ubi_attach_info *ai,
 542			      struct ubi_fastmap_layout *fm)
 543{
 544	struct list_head used, eba_orphans, free;
 545	struct ubi_ainf_volume *av;
 546	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
 547	struct ubi_ec_hdr *ech;
 548	struct ubi_fm_sb *fmsb;
 549	struct ubi_fm_hdr *fmhdr;
 550	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
 551	struct ubi_fm_ec *fmec;
 552	struct ubi_fm_volhdr *fmvhdr;
 553	struct ubi_fm_eba *fm_eba;
 554	int ret, i, j, pool_size, wl_pool_size;
 555	size_t fm_pos = 0, fm_size = ubi->fm_size;
 556	unsigned long long max_sqnum = 0;
 557	void *fm_raw = ubi->fm_buf;
 558
 559	INIT_LIST_HEAD(&used);
 560	INIT_LIST_HEAD(&free);
 561	INIT_LIST_HEAD(&eba_orphans);
 562	INIT_LIST_HEAD(&ai->corr);
 563	INIT_LIST_HEAD(&ai->free);
 564	INIT_LIST_HEAD(&ai->erase);
 565	INIT_LIST_HEAD(&ai->alien);
 566	ai->volumes = RB_ROOT;
 567	ai->min_ec = UBI_MAX_ERASECOUNTER;
 568
 569	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
 570					       sizeof(struct ubi_ainf_peb),
 571					       0, 0, NULL);
 572	if (!ai->aeb_slab_cache) {
 573		ret = -ENOMEM;
 574		goto fail;
 575	}
 576
 577	fmsb = (struct ubi_fm_sb *)(fm_raw);
 578	ai->max_sqnum = fmsb->sqnum;
 579	fm_pos += sizeof(struct ubi_fm_sb);
 580	if (fm_pos >= fm_size)
 581		goto fail_bad;
 582
 583	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
 584	fm_pos += sizeof(*fmhdr);
 585	if (fm_pos >= fm_size)
 586		goto fail_bad;
 587
 588	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
 589		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
 590			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
 591		goto fail_bad;
 592	}
 593
 594	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
 595	fm_pos += sizeof(*fmpl1);
 596	if (fm_pos >= fm_size)
 597		goto fail_bad;
 598	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
 599		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
 600			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
 601		goto fail_bad;
 602	}
 603
 604	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
 605	fm_pos += sizeof(*fmpl2);
 606	if (fm_pos >= fm_size)
 607		goto fail_bad;
 608	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
 609		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
 610			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
 611		goto fail_bad;
 612	}
 613
 614	pool_size = be16_to_cpu(fmpl1->size);
 615	wl_pool_size = be16_to_cpu(fmpl2->size);
 616	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
 617	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
 618
 619	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
 620		ubi_err("bad pool size: %i", pool_size);
 621		goto fail_bad;
 622	}
 623
 624	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
 625		ubi_err("bad WL pool size: %i", wl_pool_size);
 626		goto fail_bad;
 627	}
 628
 629
 630	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
 631	    fm->max_pool_size < 0) {
 632		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
 633		goto fail_bad;
 634	}
 635
 636	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
 637	    fm->max_wl_pool_size < 0) {
 638		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
 
 639		goto fail_bad;
 640	}
 641
 642	/* read EC values from free list */
 643	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
 644		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 645		fm_pos += sizeof(*fmec);
 646		if (fm_pos >= fm_size)
 647			goto fail_bad;
 648
 649		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
 650			be32_to_cpu(fmec->ec), 0);
 651	}
 652
 653	/* read EC values from used list */
 654	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
 655		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 656		fm_pos += sizeof(*fmec);
 657		if (fm_pos >= fm_size)
 658			goto fail_bad;
 659
 660		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
 661			be32_to_cpu(fmec->ec), 0);
 662	}
 663
 664	/* read EC values from scrub list */
 665	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
 666		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 667		fm_pos += sizeof(*fmec);
 668		if (fm_pos >= fm_size)
 669			goto fail_bad;
 670
 671		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
 672			be32_to_cpu(fmec->ec), 1);
 673	}
 674
 675	/* read EC values from erase list */
 676	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
 677		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
 678		fm_pos += sizeof(*fmec);
 679		if (fm_pos >= fm_size)
 680			goto fail_bad;
 681
 682		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
 683			be32_to_cpu(fmec->ec), 1);
 684	}
 685
 686	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
 687	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
 688
 689	/* Iterate over all volumes and read their EBA table */
 690	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
 691		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
 692		fm_pos += sizeof(*fmvhdr);
 693		if (fm_pos >= fm_size)
 694			goto fail_bad;
 695
 696		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
 697			ubi_err("bad fastmap vol header magic: 0x%x, " \
 698				"expected: 0x%x",
 699				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
 700			goto fail_bad;
 701		}
 702
 703		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
 704			     be32_to_cpu(fmvhdr->used_ebs),
 705			     be32_to_cpu(fmvhdr->data_pad),
 706			     fmvhdr->vol_type,
 707			     be32_to_cpu(fmvhdr->last_eb_bytes));
 708
 709		if (!av)
 
 
 
 
 710			goto fail_bad;
 
 711
 712		ai->vols_found++;
 713		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
 714			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
 715
 716		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
 717		fm_pos += sizeof(*fm_eba);
 718		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
 719		if (fm_pos >= fm_size)
 720			goto fail_bad;
 721
 722		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
 723			ubi_err("bad fastmap EBA header magic: 0x%x, " \
 724				"expected: 0x%x",
 725				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
 726			goto fail_bad;
 727		}
 728
 729		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
 730			int pnum = be32_to_cpu(fm_eba->pnum[j]);
 731
 732			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
 733				continue;
 734
 735			aeb = NULL;
 736			list_for_each_entry(tmp_aeb, &used, u.list) {
 737				if (tmp_aeb->pnum == pnum) {
 738					aeb = tmp_aeb;
 739					break;
 740				}
 741			}
 742
 743			/* This can happen if a PEB is already in an EBA known
 744			 * by this fastmap but the PEB itself is not in the used
 745			 * list.
 746			 * In this case the PEB can be within the fastmap pool
 747			 * or while writing the fastmap it was in the protection
 748			 * queue.
 749			 */
 750			if (!aeb) {
 751				aeb = kmem_cache_alloc(ai->aeb_slab_cache,
 752						       GFP_KERNEL);
 753				if (!aeb) {
 754					ret = -ENOMEM;
 755
 756					goto fail;
 757				}
 758
 759				aeb->lnum = j;
 760				aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
 761				aeb->ec = -1;
 762				aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
 763				list_add_tail(&aeb->u.list, &eba_orphans);
 764				continue;
 765			}
 766
 767			aeb->lnum = j;
 768
 769			if (av->highest_lnum <= aeb->lnum)
 770				av->highest_lnum = aeb->lnum;
 771
 772			assign_aeb_to_av(ai, aeb, av);
 773
 774			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
 775				aeb->pnum, aeb->lnum, av->vol_id);
 776		}
 777
 778		ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 779		if (!ech) {
 780			ret = -ENOMEM;
 781			goto fail;
 782		}
 783
 784		list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
 785					 u.list) {
 786			int err;
 787
 788			if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
 789				ubi_err("bad PEB in fastmap EBA orphan list");
 790				ret = UBI_BAD_FASTMAP;
 791				kfree(ech);
 792				goto fail;
 793			}
 794
 795			err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
 796			if (err && err != UBI_IO_BITFLIPS) {
 797				ubi_err("unable to read EC header! PEB:%i " \
 798					"err:%i", tmp_aeb->pnum, err);
 799				ret = err > 0 ? UBI_BAD_FASTMAP : err;
 800				kfree(ech);
 801
 802				goto fail;
 803			} else if (err == UBI_IO_BITFLIPS)
 804				tmp_aeb->scrub = 1;
 805
 806			tmp_aeb->ec = be64_to_cpu(ech->ec);
 807			assign_aeb_to_av(ai, tmp_aeb, av);
 808		}
 809
 810		kfree(ech);
 811	}
 812
 813	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
 814			&eba_orphans, &free);
 815	if (ret)
 816		goto fail;
 817
 818	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
 819			&eba_orphans, &free);
 820	if (ret)
 821		goto fail;
 822
 823	if (max_sqnum > ai->max_sqnum)
 824		ai->max_sqnum = max_sqnum;
 825
 826	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
 827		list_move_tail(&tmp_aeb->u.list, &ai->free);
 828
 829	ubi_assert(list_empty(&used));
 830	ubi_assert(list_empty(&eba_orphans));
 
 831	ubi_assert(list_empty(&free));
 832
 833	/*
 834	 * If fastmap is leaking PEBs (must not happen), raise a
 835	 * fat warning and fall back to scanning mode.
 836	 * We do this here because in ubi_wl_init() it's too late
 837	 * and we cannot fall back to scanning.
 838	 */
 839	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
 840		    ai->bad_peb_count - fm->used_blocks))
 841		goto fail_bad;
 842
 843	return 0;
 844
 845fail_bad:
 846	ret = UBI_BAD_FASTMAP;
 847fail:
 848	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
 849		list_del(&tmp_aeb->u.list);
 850		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 851	}
 852	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
 853		list_del(&tmp_aeb->u.list);
 854		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 855	}
 856	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
 857		list_del(&tmp_aeb->u.list);
 858		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859	}
 860
 861	return ret;
 862}
 863
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 864/**
 865 * ubi_scan_fastmap - scan the fastmap.
 866 * @ubi: UBI device object
 867 * @ai: UBI attach info to be filled
 868 * @fm_anchor: The fastmap starts at this PEB
 
 869 *
 870 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
 871 * UBI_BAD_FASTMAP if one was found but is not usable.
 872 * < 0 indicates an internal error.
 873 */
 874int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
 875		     int fm_anchor)
 876{
 877	struct ubi_fm_sb *fmsb, *fmsb2;
 
 878	struct ubi_vid_hdr *vh;
 879	struct ubi_ec_hdr *ech;
 880	struct ubi_fastmap_layout *fm;
 881	int i, used_blocks, pnum, ret = 0;
 
 882	size_t fm_size;
 883	__be32 crc, tmp_crc;
 884	unsigned long long sqnum = 0;
 885
 886	mutex_lock(&ubi->fm_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887	memset(ubi->fm_buf, 0, ubi->fm_size);
 888
 889	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
 890	if (!fmsb) {
 891		ret = -ENOMEM;
 892		goto out;
 893	}
 894
 895	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
 896	if (!fm) {
 897		ret = -ENOMEM;
 898		kfree(fmsb);
 899		goto out;
 900	}
 901
 902	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
 903	if (ret && ret != UBI_IO_BITFLIPS)
 904		goto free_fm_sb;
 905	else if (ret == UBI_IO_BITFLIPS)
 906		fm->to_be_tortured[0] = 1;
 907
 908	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
 909		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
 910			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
 911		ret = UBI_BAD_FASTMAP;
 912		goto free_fm_sb;
 913	}
 914
 915	if (fmsb->version != UBI_FM_FMT_VERSION) {
 916		ubi_err("bad fastmap version: %i, expected: %i",
 917			fmsb->version, UBI_FM_FMT_VERSION);
 918		ret = UBI_BAD_FASTMAP;
 919		goto free_fm_sb;
 920	}
 921
 922	used_blocks = be32_to_cpu(fmsb->used_blocks);
 923	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
 924		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
 
 925		ret = UBI_BAD_FASTMAP;
 926		goto free_fm_sb;
 927	}
 928
 929	fm_size = ubi->leb_size * used_blocks;
 930	if (fm_size != ubi->fm_size) {
 931		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
 932			ubi->fm_size);
 933		ret = UBI_BAD_FASTMAP;
 934		goto free_fm_sb;
 935	}
 936
 937	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
 938	if (!ech) {
 939		ret = -ENOMEM;
 940		goto free_fm_sb;
 941	}
 942
 943	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
 944	if (!vh) {
 945		ret = -ENOMEM;
 946		goto free_hdr;
 947	}
 948
 
 
 949	for (i = 0; i < used_blocks; i++) {
 950		int image_seq;
 951
 952		pnum = be32_to_cpu(fmsb->block_loc[i]);
 953
 954		if (ubi_io_is_bad(ubi, pnum)) {
 955			ret = UBI_BAD_FASTMAP;
 956			goto free_hdr;
 957		}
 958
 
 
 
 
 
 
 
 959		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
 960		if (ret && ret != UBI_IO_BITFLIPS) {
 961			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
 962				i, pnum);
 963			if (ret > 0)
 964				ret = UBI_BAD_FASTMAP;
 965			goto free_hdr;
 966		} else if (ret == UBI_IO_BITFLIPS)
 967			fm->to_be_tortured[i] = 1;
 968
 969		image_seq = be32_to_cpu(ech->image_seq);
 970		if (!ubi->image_seq)
 971			ubi->image_seq = image_seq;
 972
 973		/*
 974		 * Older UBI implementations have image_seq set to zero, so
 975		 * we shouldn't fail if image_seq == 0.
 976		 */
 977		if (image_seq && (image_seq != ubi->image_seq)) {
 978			ubi_err("wrong image seq:%d instead of %d",
 979				be32_to_cpu(ech->image_seq), ubi->image_seq);
 980			ret = UBI_BAD_FASTMAP;
 981			goto free_hdr;
 982		}
 983
 984		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
 985		if (ret && ret != UBI_IO_BITFLIPS) {
 986			ubi_err("unable to read fastmap block# %i (PEB: %i)",
 987				i, pnum);
 988			goto free_hdr;
 989		}
 990
 991		if (i == 0) {
 992			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
 993				ubi_err("bad fastmap anchor vol_id: 0x%x," \
 994					" expected: 0x%x",
 995					be32_to_cpu(vh->vol_id),
 996					UBI_FM_SB_VOLUME_ID);
 997				ret = UBI_BAD_FASTMAP;
 998				goto free_hdr;
 999			}
1000		} else {
1001			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1002				ubi_err("bad fastmap data vol_id: 0x%x," \
1003					" expected: 0x%x",
1004					be32_to_cpu(vh->vol_id),
1005					UBI_FM_DATA_VOLUME_ID);
1006				ret = UBI_BAD_FASTMAP;
1007				goto free_hdr;
1008			}
1009		}
1010
1011		if (sqnum < be64_to_cpu(vh->sqnum))
1012			sqnum = be64_to_cpu(vh->sqnum);
1013
1014		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1015				  ubi->leb_start, ubi->leb_size);
1016		if (ret && ret != UBI_IO_BITFLIPS) {
1017			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
1018				"err: %i)", i, pnum, ret);
1019			goto free_hdr;
1020		}
1021	}
1022
1023	kfree(fmsb);
1024	fmsb = NULL;
1025
1026	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1027	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1028	fmsb2->data_crc = 0;
1029	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1030	if (crc != tmp_crc) {
1031		ubi_err("fastmap data CRC is invalid");
1032		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
 
1033		ret = UBI_BAD_FASTMAP;
1034		goto free_hdr;
1035	}
1036
1037	fmsb2->sqnum = sqnum;
1038
1039	fm->used_blocks = used_blocks;
1040
1041	ret = ubi_attach_fastmap(ubi, ai, fm);
1042	if (ret) {
1043		if (ret > 0)
1044			ret = UBI_BAD_FASTMAP;
1045		goto free_hdr;
1046	}
1047
1048	for (i = 0; i < used_blocks; i++) {
1049		struct ubi_wl_entry *e;
1050
1051		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1052		if (!e) {
1053			while (i--)
1054				kfree(fm->e[i]);
1055
1056			ret = -ENOMEM;
1057			goto free_hdr;
1058		}
1059
1060		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1061		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1062		fm->e[i] = e;
1063	}
1064
1065	ubi->fm = fm;
1066	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1067	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1068	ubi_msg("attached by fastmap");
1069	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1070	ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
 
1071	ubi->fm_disabled = 0;
 
1072
1073	ubi_free_vid_hdr(ubi, vh);
1074	kfree(ech);
1075out:
1076	mutex_unlock(&ubi->fm_mutex);
1077	if (ret == UBI_BAD_FASTMAP)
1078		ubi_err("Attach by fastmap failed, doing a full scan!");
1079	return ret;
1080
1081free_hdr:
1082	ubi_free_vid_hdr(ubi, vh);
1083	kfree(ech);
1084free_fm_sb:
1085	kfree(fmsb);
1086	kfree(fm);
1087	goto out;
1088}
1089
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1090/**
1091 * ubi_write_fastmap - writes a fastmap.
1092 * @ubi: UBI device object
1093 * @new_fm: the to be written fastmap
1094 *
1095 * Returns 0 on success, < 0 indicates an internal error.
1096 */
1097static int ubi_write_fastmap(struct ubi_device *ubi,
1098			     struct ubi_fastmap_layout *new_fm)
1099{
1100	size_t fm_pos = 0;
1101	void *fm_raw;
1102	struct ubi_fm_sb *fmsb;
1103	struct ubi_fm_hdr *fmh;
1104	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1105	struct ubi_fm_ec *fec;
1106	struct ubi_fm_volhdr *fvh;
1107	struct ubi_fm_eba *feba;
1108	struct rb_node *node;
1109	struct ubi_wl_entry *wl_e;
1110	struct ubi_volume *vol;
 
1111	struct ubi_vid_hdr *avhdr, *dvhdr;
1112	struct ubi_work *ubi_wrk;
 
1113	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1114	int scrub_peb_count, erase_peb_count;
 
1115
1116	fm_raw = ubi->fm_buf;
1117	memset(ubi->fm_buf, 0, ubi->fm_size);
1118
1119	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1120	if (!avhdr) {
1121		ret = -ENOMEM;
1122		goto out;
1123	}
1124
1125	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1126	if (!dvhdr) {
1127		ret = -ENOMEM;
1128		goto out_kfree;
 
 
 
 
 
 
 
 
 
1129	}
1130
1131	spin_lock(&ubi->volumes_lock);
1132	spin_lock(&ubi->wl_lock);
1133
1134	fmsb = (struct ubi_fm_sb *)fm_raw;
1135	fm_pos += sizeof(*fmsb);
1136	ubi_assert(fm_pos <= ubi->fm_size);
1137
1138	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1139	fm_pos += sizeof(*fmh);
1140	ubi_assert(fm_pos <= ubi->fm_size);
1141
1142	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1143	fmsb->version = UBI_FM_FMT_VERSION;
1144	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1145	/* the max sqnum will be filled in while *reading* the fastmap */
1146	fmsb->sqnum = 0;
1147
1148	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1149	free_peb_count = 0;
1150	used_peb_count = 0;
1151	scrub_peb_count = 0;
1152	erase_peb_count = 0;
1153	vol_count = 0;
1154
1155	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1156	fm_pos += sizeof(*fmpl1);
1157	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1158	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1159	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1160
1161	for (i = 0; i < ubi->fm_pool.size; i++)
1162		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1163
1164	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1165	fm_pos += sizeof(*fmpl2);
1166	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1167	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1168	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1169
1170	for (i = 0; i < ubi->fm_wl_pool.size; i++)
1171		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
 
 
 
1172
1173	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1174		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
 
 
 
 
1175		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1176
1177		fec->pnum = cpu_to_be32(wl_e->pnum);
 
1178		fec->ec = cpu_to_be32(wl_e->ec);
1179
1180		free_peb_count++;
1181		fm_pos += sizeof(*fec);
1182		ubi_assert(fm_pos <= ubi->fm_size);
1183	}
 
 
 
 
 
 
 
 
 
 
 
1184	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1185
1186	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1187		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
 
 
 
 
 
 
 
 
 
 
 
1188		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1189
1190		fec->pnum = cpu_to_be32(wl_e->pnum);
 
1191		fec->ec = cpu_to_be32(wl_e->ec);
1192
1193		used_peb_count++;
1194		fm_pos += sizeof(*fec);
1195		ubi_assert(fm_pos <= ubi->fm_size);
1196	}
1197	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1198
1199	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1200		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1201		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1202
1203		fec->pnum = cpu_to_be32(wl_e->pnum);
 
1204		fec->ec = cpu_to_be32(wl_e->ec);
1205
1206		scrub_peb_count++;
1207		fm_pos += sizeof(*fec);
1208		ubi_assert(fm_pos <= ubi->fm_size);
1209	}
1210	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1211
1212
1213	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1214		if (ubi_is_erase_work(ubi_wrk)) {
1215			wl_e = ubi_wrk->e;
1216			ubi_assert(wl_e);
1217
1218			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1219
1220			fec->pnum = cpu_to_be32(wl_e->pnum);
 
1221			fec->ec = cpu_to_be32(wl_e->ec);
1222
1223			erase_peb_count++;
1224			fm_pos += sizeof(*fec);
1225			ubi_assert(fm_pos <= ubi->fm_size);
1226		}
1227	}
1228	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1229
1230	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1231		vol = ubi->volumes[i];
1232
1233		if (!vol)
1234			continue;
1235
1236		vol_count++;
1237
1238		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1239		fm_pos += sizeof(*fvh);
1240		ubi_assert(fm_pos <= ubi->fm_size);
1241
1242		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1243		fvh->vol_id = cpu_to_be32(vol->vol_id);
1244		fvh->vol_type = vol->vol_type;
1245		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1246		fvh->data_pad = cpu_to_be32(vol->data_pad);
1247		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1248
1249		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1250			vol->vol_type == UBI_STATIC_VOLUME);
1251
1252		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1253		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1254		ubi_assert(fm_pos <= ubi->fm_size);
1255
1256		for (j = 0; j < vol->reserved_pebs; j++)
1257			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
 
 
 
 
1258
1259		feba->reserved_pebs = cpu_to_be32(j);
1260		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1261	}
1262	fmh->vol_count = cpu_to_be32(vol_count);
1263	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1264
1265	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1266	avhdr->lnum = 0;
1267
1268	spin_unlock(&ubi->wl_lock);
1269	spin_unlock(&ubi->volumes_lock);
1270
1271	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1272	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1273	if (ret) {
1274		ubi_err("unable to write vid_hdr to fastmap SB!");
1275		goto out_kfree;
1276	}
1277
1278	for (i = 0; i < new_fm->used_blocks; i++) {
1279		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
 
1280		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1281	}
1282
1283	fmsb->data_crc = 0;
1284	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1285					   ubi->fm_size));
1286
1287	for (i = 1; i < new_fm->used_blocks; i++) {
1288		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1289		dvhdr->lnum = cpu_to_be32(i);
1290		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1291			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1292		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1293		if (ret) {
1294			ubi_err("unable to write vid_hdr to PEB %i!",
1295				new_fm->e[i]->pnum);
1296			goto out_kfree;
1297		}
1298	}
1299
1300	for (i = 0; i < new_fm->used_blocks; i++) {
1301		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1302			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1303		if (ret) {
1304			ubi_err("unable to write fastmap to PEB %i!",
1305				new_fm->e[i]->pnum);
1306			goto out_kfree;
1307		}
1308	}
1309
1310	ubi_assert(new_fm);
1311	ubi->fm = new_fm;
1312
 
1313	dbg_bld("fastmap written!");
1314
1315out_kfree:
1316	ubi_free_vid_hdr(ubi, avhdr);
1317	ubi_free_vid_hdr(ubi, dvhdr);
 
 
 
 
1318out:
1319	return ret;
1320}
1321
1322/**
1323 * erase_block - Manually erase a PEB.
1324 * @ubi: UBI device object
1325 * @pnum: PEB to be erased
1326 *
1327 * Returns the new EC value on success, < 0 indicates an internal error.
1328 */
1329static int erase_block(struct ubi_device *ubi, int pnum)
1330{
1331	int ret;
1332	struct ubi_ec_hdr *ec_hdr;
1333	long long ec;
1334
1335	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1336	if (!ec_hdr)
1337		return -ENOMEM;
1338
1339	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1340	if (ret < 0)
1341		goto out;
1342	else if (ret && ret != UBI_IO_BITFLIPS) {
1343		ret = -EINVAL;
1344		goto out;
1345	}
1346
1347	ret = ubi_io_sync_erase(ubi, pnum, 0);
1348	if (ret < 0)
1349		goto out;
1350
1351	ec = be64_to_cpu(ec_hdr->ec);
1352	ec += ret;
1353	if (ec > UBI_MAX_ERASECOUNTER) {
1354		ret = -EINVAL;
1355		goto out;
1356	}
1357
1358	ec_hdr->ec = cpu_to_be64(ec);
1359	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1360	if (ret < 0)
1361		goto out;
1362
1363	ret = ec;
1364out:
1365	kfree(ec_hdr);
1366	return ret;
1367}
1368
1369/**
1370 * invalidate_fastmap - destroys a fastmap.
1371 * @ubi: UBI device object
1372 * @fm: the fastmap to be destroyed
1373 *
 
 
 
 
 
 
1374 * Returns 0 on success, < 0 indicates an internal error.
1375 */
1376static int invalidate_fastmap(struct ubi_device *ubi,
1377			      struct ubi_fastmap_layout *fm)
1378{
1379	int ret;
 
 
 
1380	struct ubi_vid_hdr *vh;
1381
1382	ret = erase_block(ubi, fm->e[0]->pnum);
1383	if (ret < 0)
1384		return ret;
 
 
 
 
 
 
1385
1386	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1387	if (!vh)
1388		return -ENOMEM;
 
 
 
 
 
 
 
1389
1390	/* deleting the current fastmap SB is not enough, an old SB may exist,
1391	 * so create a (corrupted) SB such that fastmap will find it and fall
1392	 * back to scanning mode in any case */
 
1393	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1394	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
 
 
 
 
 
 
 
 
 
1395
 
 
1396	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397}
1398
1399/**
1400 * ubi_update_fastmap - will be called by UBI if a volume changes or
1401 * a fastmap pool becomes full.
1402 * @ubi: UBI device object
1403 *
1404 * Returns 0 on success, < 0 indicates an internal error.
1405 */
1406int ubi_update_fastmap(struct ubi_device *ubi)
1407{
1408	int ret, i;
1409	struct ubi_fastmap_layout *new_fm, *old_fm;
1410	struct ubi_wl_entry *tmp_e;
1411
1412	mutex_lock(&ubi->fm_mutex);
 
 
1413
1414	ubi_refill_pools(ubi);
1415
1416	if (ubi->ro_mode || ubi->fm_disabled) {
1417		mutex_unlock(&ubi->fm_mutex);
 
 
1418		return 0;
1419	}
1420
1421	ret = ubi_ensure_anchor_pebs(ubi);
1422	if (ret) {
1423		mutex_unlock(&ubi->fm_mutex);
1424		return ret;
1425	}
1426
1427	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1428	if (!new_fm) {
1429		mutex_unlock(&ubi->fm_mutex);
 
 
1430		return -ENOMEM;
1431	}
1432
1433	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1434
1435	for (i = 0; i < new_fm->used_blocks; i++) {
1436		new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1437		if (!new_fm->e[i]) {
1438			while (i--)
1439				kfree(new_fm->e[i]);
1440
1441			kfree(new_fm);
1442			mutex_unlock(&ubi->fm_mutex);
1443			return -ENOMEM;
1444		}
1445	}
1446
1447	old_fm = ubi->fm;
1448	ubi->fm = NULL;
1449
1450	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1451		ubi_err("fastmap too large");
1452		ret = -ENOSPC;
1453		goto err;
1454	}
1455
1456	for (i = 1; i < new_fm->used_blocks; i++) {
1457		spin_lock(&ubi->wl_lock);
1458		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1459		spin_unlock(&ubi->wl_lock);
1460
1461		if (!tmp_e && !old_fm) {
1462			int j;
1463			ubi_err("could not get any free erase block");
1464
1465			for (j = 1; j < i; j++)
1466				ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1467
1468			ret = -ENOSPC;
1469			goto err;
1470		} else if (!tmp_e && old_fm) {
1471			ret = erase_block(ubi, old_fm->e[i]->pnum);
1472			if (ret < 0) {
1473				int j;
 
 
 
 
 
 
 
 
 
1474
1475				for (j = 1; j < i; j++)
1476					ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1477							  j, 0);
1478
1479				ubi_err("could not erase old fastmap PEB");
1480				goto err;
1481			}
1482
1483			new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1484			new_fm->e[i]->ec = old_fm->e[i]->ec;
1485		} else {
1486			new_fm->e[i]->pnum = tmp_e->pnum;
1487			new_fm->e[i]->ec = tmp_e->ec;
1488
1489			if (old_fm)
1490				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1491						  old_fm->to_be_tortured[i]);
 
 
 
 
 
 
 
 
 
 
 
1492		}
1493	}
1494
1495	spin_lock(&ubi->wl_lock);
1496	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
 
1497	spin_unlock(&ubi->wl_lock);
1498
1499	if (old_fm) {
1500		/* no fresh anchor PEB was found, reuse the old one */
1501		if (!tmp_e) {
1502			ret = erase_block(ubi, old_fm->e[0]->pnum);
1503			if (ret < 0) {
1504				int i;
1505				ubi_err("could not erase old anchor PEB");
1506
1507				for (i = 1; i < new_fm->used_blocks; i++)
1508					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1509							  i, 0);
 
 
1510				goto err;
1511			}
1512
1513			new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1514			new_fm->e[0]->ec = ret;
 
1515		} else {
1516			/* we've got a new anchor PEB, return the old one */
1517			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1518					  old_fm->to_be_tortured[0]);
1519
1520			new_fm->e[0]->pnum = tmp_e->pnum;
1521			new_fm->e[0]->ec = tmp_e->ec;
1522		}
1523	} else {
1524		if (!tmp_e) {
1525			int i;
1526			ubi_err("could not find any anchor PEB");
1527
1528			for (i = 1; i < new_fm->used_blocks; i++)
1529				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
 
 
1530
1531			ret = -ENOSPC;
1532			goto err;
1533		}
1534
1535		new_fm->e[0]->pnum = tmp_e->pnum;
1536		new_fm->e[0]->ec = tmp_e->ec;
1537	}
1538
1539	down_write(&ubi->work_sem);
1540	down_write(&ubi->fm_sem);
1541	ret = ubi_write_fastmap(ubi, new_fm);
1542	up_write(&ubi->fm_sem);
1543	up_write(&ubi->work_sem);
1544
1545	if (ret)
1546		goto err;
1547
1548out_unlock:
1549	mutex_unlock(&ubi->fm_mutex);
 
 
1550	kfree(old_fm);
 
 
 
1551	return ret;
1552
1553err:
1554	kfree(new_fm);
1555
1556	ubi_warn("Unable to write new fastmap, err=%i", ret);
 
 
 
 
 
 
 
 
1557
1558	ret = 0;
1559	if (old_fm) {
1560		ret = invalidate_fastmap(ubi, old_fm);
1561		if (ret < 0)
1562			ubi_err("Unable to invalidiate current fastmap!");
1563		else if (ret)
1564			ret = 0;
1565	}
1566	goto out_unlock;
1567}