Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8#include <linux/crc32.h>
9#include <linux/bitmap.h>
10#include "ubi.h"
11
12/**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
16static inline unsigned long *init_seen(struct ubi_device *ubi)
17{
18 unsigned long *ret;
19
20 if (!ubi_dbg_chk_fastmap(ubi))
21 return NULL;
22
23 ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
24 if (!ret)
25 return ERR_PTR(-ENOMEM);
26
27 return ret;
28}
29
30/**
31 * free_seen - free the seen logic integer array.
32 * @seen: integer array of @ubi->peb_count size
33 */
34static inline void free_seen(unsigned long *seen)
35{
36 bitmap_free(seen);
37}
38
39/**
40 * set_seen - mark a PEB as seen.
41 * @ubi: UBI device description object
42 * @pnum: The PEB to be makred as seen
43 * @seen: integer array of @ubi->peb_count size
44 */
45static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
46{
47 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
48 return;
49
50 set_bit(pnum, seen);
51}
52
53/**
54 * self_check_seen - check whether all PEB have been seen by fastmap.
55 * @ubi: UBI device description object
56 * @seen: integer array of @ubi->peb_count size
57 */
58static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
59{
60 int pnum, ret = 0;
61
62 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
63 return 0;
64
65 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
66 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
67 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
68 ret = -EINVAL;
69 }
70 }
71
72 return ret;
73}
74
75/**
76 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
77 * @ubi: UBI device description object
78 */
79size_t ubi_calc_fm_size(struct ubi_device *ubi)
80{
81 size_t size;
82
83 size = sizeof(struct ubi_fm_sb) +
84 sizeof(struct ubi_fm_hdr) +
85 sizeof(struct ubi_fm_scan_pool) +
86 sizeof(struct ubi_fm_scan_pool) +
87 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
88 (sizeof(struct ubi_fm_eba) +
89 (ubi->peb_count * sizeof(__be32))) +
90 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
91 return roundup(size, ubi->leb_size);
92}
93
94
95/**
96 * new_fm_vbuf() - allocate a new volume header for fastmap usage.
97 * @ubi: UBI device description object
98 * @vol_id: the VID of the new header
99 *
100 * Returns a new struct ubi_vid_hdr on success.
101 * NULL indicates out of memory.
102 */
103static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
104{
105 struct ubi_vid_io_buf *new;
106 struct ubi_vid_hdr *vh;
107
108 new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
109 if (!new)
110 goto out;
111
112 vh = ubi_get_vid_hdr(new);
113 vh->vol_type = UBI_VID_DYNAMIC;
114 vh->vol_id = cpu_to_be32(vol_id);
115
116 /* UBI implementations without fastmap support have to delete the
117 * fastmap.
118 */
119 vh->compat = UBI_COMPAT_DELETE;
120
121out:
122 return new;
123}
124
125/**
126 * add_aeb - create and add a attach erase block to a given list.
127 * @ai: UBI attach info object
128 * @list: the target list
129 * @pnum: PEB number of the new attach erase block
130 * @ec: erease counter of the new LEB
131 * @scrub: scrub this PEB after attaching
132 *
133 * Returns 0 on success, < 0 indicates an internal error.
134 */
135static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
136 int pnum, int ec, int scrub)
137{
138 struct ubi_ainf_peb *aeb;
139
140 aeb = ubi_alloc_aeb(ai, pnum, ec);
141 if (!aeb)
142 return -ENOMEM;
143
144 aeb->lnum = -1;
145 aeb->scrub = scrub;
146 aeb->copy_flag = aeb->sqnum = 0;
147
148 ai->ec_sum += aeb->ec;
149 ai->ec_count++;
150
151 if (ai->max_ec < aeb->ec)
152 ai->max_ec = aeb->ec;
153
154 if (ai->min_ec > aeb->ec)
155 ai->min_ec = aeb->ec;
156
157 list_add_tail(&aeb->u.list, list);
158
159 return 0;
160}
161
162/**
163 * add_vol - create and add a new volume to ubi_attach_info.
164 * @ai: ubi_attach_info object
165 * @vol_id: VID of the new volume
166 * @used_ebs: number of used EBS
167 * @data_pad: data padding value of the new volume
168 * @vol_type: volume type
169 * @last_eb_bytes: number of bytes in the last LEB
170 *
171 * Returns the new struct ubi_ainf_volume on success.
172 * NULL indicates an error.
173 */
174static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
175 int used_ebs, int data_pad, u8 vol_type,
176 int last_eb_bytes)
177{
178 struct ubi_ainf_volume *av;
179
180 av = ubi_add_av(ai, vol_id);
181 if (IS_ERR(av))
182 return av;
183
184 av->data_pad = data_pad;
185 av->last_data_size = last_eb_bytes;
186 av->compat = 0;
187 av->vol_type = vol_type;
188 if (av->vol_type == UBI_STATIC_VOLUME)
189 av->used_ebs = used_ebs;
190
191 dbg_bld("found volume (ID %i)", vol_id);
192 return av;
193}
194
195/**
196 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
197 * from it's original list.
198 * @ai: ubi_attach_info object
199 * @aeb: the to be assigned SEB
200 * @av: target scan volume
201 */
202static void assign_aeb_to_av(struct ubi_attach_info *ai,
203 struct ubi_ainf_peb *aeb,
204 struct ubi_ainf_volume *av)
205{
206 struct ubi_ainf_peb *tmp_aeb;
207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208
209 while (*p) {
210 parent = *p;
211
212 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
213 if (aeb->lnum != tmp_aeb->lnum) {
214 if (aeb->lnum < tmp_aeb->lnum)
215 p = &(*p)->rb_left;
216 else
217 p = &(*p)->rb_right;
218
219 continue;
220 } else
221 break;
222 }
223
224 list_del(&aeb->u.list);
225 av->leb_count++;
226
227 rb_link_node(&aeb->u.rb, parent, p);
228 rb_insert_color(&aeb->u.rb, &av->root);
229}
230
231/**
232 * update_vol - inserts or updates a LEB which was found a pool.
233 * @ubi: the UBI device object
234 * @ai: attach info object
235 * @av: the volume this LEB belongs to
236 * @new_vh: the volume header derived from new_aeb
237 * @new_aeb: the AEB to be examined
238 *
239 * Returns 0 on success, < 0 indicates an internal error.
240 */
241static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
242 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
243 struct ubi_ainf_peb *new_aeb)
244{
245 struct rb_node **p = &av->root.rb_node, *parent = NULL;
246 struct ubi_ainf_peb *aeb, *victim;
247 int cmp_res;
248
249 while (*p) {
250 parent = *p;
251 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
252
253 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
254 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
255 p = &(*p)->rb_left;
256 else
257 p = &(*p)->rb_right;
258
259 continue;
260 }
261
262 /* This case can happen if the fastmap gets written
263 * because of a volume change (creation, deletion, ..).
264 * Then a PEB can be within the persistent EBA and the pool.
265 */
266 if (aeb->pnum == new_aeb->pnum) {
267 ubi_assert(aeb->lnum == new_aeb->lnum);
268 ubi_free_aeb(ai, new_aeb);
269
270 return 0;
271 }
272
273 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
274 if (cmp_res < 0)
275 return cmp_res;
276
277 /* new_aeb is newer */
278 if (cmp_res & 1) {
279 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
280 if (!victim)
281 return -ENOMEM;
282
283 list_add_tail(&victim->u.list, &ai->erase);
284
285 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
286 av->last_data_size =
287 be32_to_cpu(new_vh->data_size);
288
289 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
290 av->vol_id, aeb->lnum, new_aeb->pnum);
291
292 aeb->ec = new_aeb->ec;
293 aeb->pnum = new_aeb->pnum;
294 aeb->copy_flag = new_vh->copy_flag;
295 aeb->scrub = new_aeb->scrub;
296 aeb->sqnum = new_aeb->sqnum;
297 ubi_free_aeb(ai, new_aeb);
298
299 /* new_aeb is older */
300 } else {
301 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
302 av->vol_id, aeb->lnum, new_aeb->pnum);
303 list_add_tail(&new_aeb->u.list, &ai->erase);
304 }
305
306 return 0;
307 }
308 /* This LEB is new, let's add it to the volume */
309
310 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
311 av->highest_lnum = be32_to_cpu(new_vh->lnum);
312 av->last_data_size = be32_to_cpu(new_vh->data_size);
313 }
314
315 if (av->vol_type == UBI_STATIC_VOLUME)
316 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
317
318 av->leb_count++;
319
320 rb_link_node(&new_aeb->u.rb, parent, p);
321 rb_insert_color(&new_aeb->u.rb, &av->root);
322
323 return 0;
324}
325
326/**
327 * process_pool_aeb - we found a non-empty PEB in a pool.
328 * @ubi: UBI device object
329 * @ai: attach info object
330 * @new_vh: the volume header derived from new_aeb
331 * @new_aeb: the AEB to be examined
332 *
333 * Returns 0 on success, < 0 indicates an internal error.
334 */
335static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
336 struct ubi_vid_hdr *new_vh,
337 struct ubi_ainf_peb *new_aeb)
338{
339 int vol_id = be32_to_cpu(new_vh->vol_id);
340 struct ubi_ainf_volume *av;
341
342 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
343 ubi_free_aeb(ai, new_aeb);
344
345 return 0;
346 }
347
348 /* Find the volume this SEB belongs to */
349 av = ubi_find_av(ai, vol_id);
350 if (!av) {
351 ubi_err(ubi, "orphaned volume in fastmap pool!");
352 ubi_free_aeb(ai, new_aeb);
353 return UBI_BAD_FASTMAP;
354 }
355
356 ubi_assert(vol_id == av->vol_id);
357
358 return update_vol(ubi, ai, av, new_vh, new_aeb);
359}
360
361/**
362 * unmap_peb - unmap a PEB.
363 * If fastmap detects a free PEB in the pool it has to check whether
364 * this PEB has been unmapped after writing the fastmap.
365 *
366 * @ai: UBI attach info object
367 * @pnum: The PEB to be unmapped
368 */
369static void unmap_peb(struct ubi_attach_info *ai, int pnum)
370{
371 struct ubi_ainf_volume *av;
372 struct rb_node *node, *node2;
373 struct ubi_ainf_peb *aeb;
374
375 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
376 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
377 if (aeb->pnum == pnum) {
378 rb_erase(&aeb->u.rb, &av->root);
379 av->leb_count--;
380 ubi_free_aeb(ai, aeb);
381 return;
382 }
383 }
384 }
385}
386
387/**
388 * scan_pool - scans a pool for changed (no longer empty PEBs).
389 * @ubi: UBI device object
390 * @ai: attach info object
391 * @pebs: an array of all PEB numbers in the to be scanned pool
392 * @pool_size: size of the pool (number of entries in @pebs)
393 * @max_sqnum: pointer to the maximal sequence number
394 * @free: list of PEBs which are most likely free (and go into @ai->free)
395 *
396 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
397 * < 0 indicates an internal error.
398 */
399static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
400 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
401 struct list_head *free)
402{
403 struct ubi_vid_io_buf *vb;
404 struct ubi_vid_hdr *vh;
405 struct ubi_ec_hdr *ech;
406 struct ubi_ainf_peb *new_aeb;
407 int i, pnum, err, ret = 0;
408
409 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
410 if (!ech)
411 return -ENOMEM;
412
413 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
414 if (!vb) {
415 kfree(ech);
416 return -ENOMEM;
417 }
418
419 vh = ubi_get_vid_hdr(vb);
420
421 dbg_bld("scanning fastmap pool: size = %i", pool_size);
422
423 /*
424 * Now scan all PEBs in the pool to find changes which have been made
425 * after the creation of the fastmap
426 */
427 for (i = 0; i < pool_size; i++) {
428 int scrub = 0;
429 int image_seq;
430
431 pnum = be32_to_cpu(pebs[i]);
432
433 if (ubi_io_is_bad(ubi, pnum)) {
434 ubi_err(ubi, "bad PEB in fastmap pool!");
435 ret = UBI_BAD_FASTMAP;
436 goto out;
437 }
438
439 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
440 if (err && err != UBI_IO_BITFLIPS) {
441 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
442 pnum, err);
443 ret = err > 0 ? UBI_BAD_FASTMAP : err;
444 goto out;
445 } else if (err == UBI_IO_BITFLIPS)
446 scrub = 1;
447
448 /*
449 * Older UBI implementations have image_seq set to zero, so
450 * we shouldn't fail if image_seq == 0.
451 */
452 image_seq = be32_to_cpu(ech->image_seq);
453
454 if (image_seq && (image_seq != ubi->image_seq)) {
455 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
456 be32_to_cpu(ech->image_seq), ubi->image_seq);
457 ret = UBI_BAD_FASTMAP;
458 goto out;
459 }
460
461 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
462 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
463 unsigned long long ec = be64_to_cpu(ech->ec);
464 unmap_peb(ai, pnum);
465 dbg_bld("Adding PEB to free: %i", pnum);
466
467 if (err == UBI_IO_FF_BITFLIPS)
468 scrub = 1;
469
470 ret = add_aeb(ai, free, pnum, ec, scrub);
471 if (ret)
472 goto out;
473 continue;
474 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
475 dbg_bld("Found non empty PEB:%i in pool", pnum);
476
477 if (err == UBI_IO_BITFLIPS)
478 scrub = 1;
479
480 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
481 if (!new_aeb) {
482 ret = -ENOMEM;
483 goto out;
484 }
485
486 new_aeb->lnum = be32_to_cpu(vh->lnum);
487 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
488 new_aeb->copy_flag = vh->copy_flag;
489 new_aeb->scrub = scrub;
490
491 if (*max_sqnum < new_aeb->sqnum)
492 *max_sqnum = new_aeb->sqnum;
493
494 err = process_pool_aeb(ubi, ai, vh, new_aeb);
495 if (err) {
496 ret = err > 0 ? UBI_BAD_FASTMAP : err;
497 goto out;
498 }
499 } else {
500 /* We are paranoid and fall back to scanning mode */
501 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
502 ret = err > 0 ? UBI_BAD_FASTMAP : err;
503 goto out;
504 }
505
506 }
507
508out:
509 ubi_free_vid_buf(vb);
510 kfree(ech);
511 return ret;
512}
513
514/**
515 * count_fastmap_pebs - Counts the PEBs found by fastmap.
516 * @ai: The UBI attach info object
517 */
518static int count_fastmap_pebs(struct ubi_attach_info *ai)
519{
520 struct ubi_ainf_peb *aeb;
521 struct ubi_ainf_volume *av;
522 struct rb_node *rb1, *rb2;
523 int n = 0;
524
525 list_for_each_entry(aeb, &ai->erase, u.list)
526 n++;
527
528 list_for_each_entry(aeb, &ai->free, u.list)
529 n++;
530
531 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
532 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
533 n++;
534
535 return n;
536}
537
538/**
539 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
540 * @ubi: UBI device object
541 * @ai: UBI attach info object
542 * @fm: the fastmap to be attached
543 *
544 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
545 * < 0 indicates an internal error.
546 */
547static int ubi_attach_fastmap(struct ubi_device *ubi,
548 struct ubi_attach_info *ai,
549 struct ubi_fastmap_layout *fm)
550{
551 struct list_head used, free;
552 struct ubi_ainf_volume *av;
553 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
554 struct ubi_fm_sb *fmsb;
555 struct ubi_fm_hdr *fmhdr;
556 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
557 struct ubi_fm_ec *fmec;
558 struct ubi_fm_volhdr *fmvhdr;
559 struct ubi_fm_eba *fm_eba;
560 int ret, i, j, pool_size, wl_pool_size;
561 size_t fm_pos = 0, fm_size = ubi->fm_size;
562 unsigned long long max_sqnum = 0;
563 void *fm_raw = ubi->fm_buf;
564
565 INIT_LIST_HEAD(&used);
566 INIT_LIST_HEAD(&free);
567 ai->min_ec = UBI_MAX_ERASECOUNTER;
568
569 fmsb = (struct ubi_fm_sb *)(fm_raw);
570 ai->max_sqnum = fmsb->sqnum;
571 fm_pos += sizeof(struct ubi_fm_sb);
572 if (fm_pos >= fm_size)
573 goto fail_bad;
574
575 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
576 fm_pos += sizeof(*fmhdr);
577 if (fm_pos >= fm_size)
578 goto fail_bad;
579
580 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
581 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
582 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
583 goto fail_bad;
584 }
585
586 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
587 fm_pos += sizeof(*fmpl);
588 if (fm_pos >= fm_size)
589 goto fail_bad;
590 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
591 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
592 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
593 goto fail_bad;
594 }
595
596 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
597 fm_pos += sizeof(*fmpl_wl);
598 if (fm_pos >= fm_size)
599 goto fail_bad;
600 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
601 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
602 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
603 goto fail_bad;
604 }
605
606 pool_size = be16_to_cpu(fmpl->size);
607 wl_pool_size = be16_to_cpu(fmpl_wl->size);
608 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
609 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
610
611 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
612 ubi_err(ubi, "bad pool size: %i", pool_size);
613 goto fail_bad;
614 }
615
616 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
617 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
618 goto fail_bad;
619 }
620
621
622 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
623 fm->max_pool_size < 0) {
624 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
625 goto fail_bad;
626 }
627
628 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
629 fm->max_wl_pool_size < 0) {
630 ubi_err(ubi, "bad maximal WL pool size: %i",
631 fm->max_wl_pool_size);
632 goto fail_bad;
633 }
634
635 /* read EC values from free list */
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size)
640 goto fail_bad;
641
642 ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0);
644 if (ret)
645 goto fail;
646 }
647
648 /* read EC values from used list */
649 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
650 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
651 fm_pos += sizeof(*fmec);
652 if (fm_pos >= fm_size)
653 goto fail_bad;
654
655 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
656 be32_to_cpu(fmec->ec), 0);
657 if (ret)
658 goto fail;
659 }
660
661 /* read EC values from scrub list */
662 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
663 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
664 fm_pos += sizeof(*fmec);
665 if (fm_pos >= fm_size)
666 goto fail_bad;
667
668 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
669 be32_to_cpu(fmec->ec), 1);
670 if (ret)
671 goto fail;
672 }
673
674 /* read EC values from erase list */
675 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
676 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
677 fm_pos += sizeof(*fmec);
678 if (fm_pos >= fm_size)
679 goto fail_bad;
680
681 ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
682 be32_to_cpu(fmec->ec), 1);
683 if (ret)
684 goto fail;
685 }
686
687 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
688 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
689
690 /* Iterate over all volumes and read their EBA table */
691 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
692 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
693 fm_pos += sizeof(*fmvhdr);
694 if (fm_pos >= fm_size)
695 goto fail_bad;
696
697 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
698 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
700 goto fail_bad;
701 }
702
703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
704 be32_to_cpu(fmvhdr->used_ebs),
705 be32_to_cpu(fmvhdr->data_pad),
706 fmvhdr->vol_type,
707 be32_to_cpu(fmvhdr->last_eb_bytes));
708
709 if (IS_ERR(av)) {
710 if (PTR_ERR(av) == -EEXIST)
711 ubi_err(ubi, "volume (ID %i) already exists",
712 fmvhdr->vol_id);
713
714 goto fail_bad;
715 }
716
717 ai->vols_found++;
718 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
719 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
720
721 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
722 fm_pos += sizeof(*fm_eba);
723 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
724 if (fm_pos >= fm_size)
725 goto fail_bad;
726
727 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
728 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
729 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
730 goto fail_bad;
731 }
732
733 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
734 int pnum = be32_to_cpu(fm_eba->pnum[j]);
735
736 if (pnum < 0)
737 continue;
738
739 aeb = NULL;
740 list_for_each_entry(tmp_aeb, &used, u.list) {
741 if (tmp_aeb->pnum == pnum) {
742 aeb = tmp_aeb;
743 break;
744 }
745 }
746
747 if (!aeb) {
748 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
749 goto fail_bad;
750 }
751
752 aeb->lnum = j;
753
754 if (av->highest_lnum <= aeb->lnum)
755 av->highest_lnum = aeb->lnum;
756
757 assign_aeb_to_av(ai, aeb, av);
758
759 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
760 aeb->pnum, aeb->lnum, av->vol_id);
761 }
762 }
763
764 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
765 if (ret)
766 goto fail;
767
768 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
769 if (ret)
770 goto fail;
771
772 if (max_sqnum > ai->max_sqnum)
773 ai->max_sqnum = max_sqnum;
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
776 list_move_tail(&tmp_aeb->u.list, &ai->free);
777
778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
779 list_move_tail(&tmp_aeb->u.list, &ai->erase);
780
781 ubi_assert(list_empty(&free));
782
783 /*
784 * If fastmap is leaking PEBs (must not happen), raise a
785 * fat warning and fall back to scanning mode.
786 * We do this here because in ubi_wl_init() it's too late
787 * and we cannot fall back to scanning.
788 */
789 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
790 ai->bad_peb_count - fm->used_blocks))
791 goto fail_bad;
792
793 return 0;
794
795fail_bad:
796 ret = UBI_BAD_FASTMAP;
797fail:
798 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
799 list_del(&tmp_aeb->u.list);
800 ubi_free_aeb(ai, tmp_aeb);
801 }
802 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
803 list_del(&tmp_aeb->u.list);
804 ubi_free_aeb(ai, tmp_aeb);
805 }
806
807 return ret;
808}
809
810/**
811 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
812 * @ai: UBI attach info to be filled
813 */
814static int find_fm_anchor(struct ubi_attach_info *ai)
815{
816 int ret = -1;
817 struct ubi_ainf_peb *aeb;
818 unsigned long long max_sqnum = 0;
819
820 list_for_each_entry(aeb, &ai->fastmap, u.list) {
821 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
822 max_sqnum = aeb->sqnum;
823 ret = aeb->pnum;
824 }
825 }
826
827 return ret;
828}
829
830static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
831 struct ubi_ainf_peb *old)
832{
833 struct ubi_ainf_peb *new;
834
835 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
836 if (!new)
837 return NULL;
838
839 new->vol_id = old->vol_id;
840 new->sqnum = old->sqnum;
841 new->lnum = old->lnum;
842 new->scrub = old->scrub;
843 new->copy_flag = old->copy_flag;
844
845 return new;
846}
847
848/**
849 * ubi_scan_fastmap - scan the fastmap.
850 * @ubi: UBI device object
851 * @ai: UBI attach info to be filled
852 * @scan_ai: UBI attach info from the first 64 PEBs,
853 * used to find the most recent Fastmap data structure
854 *
855 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
856 * UBI_BAD_FASTMAP if one was found but is not usable.
857 * < 0 indicates an internal error.
858 */
859int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
860 struct ubi_attach_info *scan_ai)
861{
862 struct ubi_fm_sb *fmsb, *fmsb2;
863 struct ubi_vid_io_buf *vb;
864 struct ubi_vid_hdr *vh;
865 struct ubi_ec_hdr *ech;
866 struct ubi_fastmap_layout *fm;
867 struct ubi_ainf_peb *aeb;
868 int i, used_blocks, pnum, fm_anchor, ret = 0;
869 size_t fm_size;
870 __be32 crc, tmp_crc;
871 unsigned long long sqnum = 0;
872
873 fm_anchor = find_fm_anchor(scan_ai);
874 if (fm_anchor < 0)
875 return UBI_NO_FASTMAP;
876
877 /* Copy all (possible) fastmap blocks into our new attach structure. */
878 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
879 struct ubi_ainf_peb *new;
880
881 new = clone_aeb(ai, aeb);
882 if (!new)
883 return -ENOMEM;
884
885 list_add(&new->u.list, &ai->fastmap);
886 }
887
888 down_write(&ubi->fm_protect);
889 memset(ubi->fm_buf, 0, ubi->fm_size);
890
891 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 if (!fmsb) {
893 ret = -ENOMEM;
894 goto out;
895 }
896
897 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 if (!fm) {
899 ret = -ENOMEM;
900 kfree(fmsb);
901 goto out;
902 }
903
904 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
905 if (ret && ret != UBI_IO_BITFLIPS)
906 goto free_fm_sb;
907 else if (ret == UBI_IO_BITFLIPS)
908 fm->to_be_tortured[0] = 1;
909
910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 ret = UBI_BAD_FASTMAP;
914 goto free_fm_sb;
915 }
916
917 if (fmsb->version != UBI_FM_FMT_VERSION) {
918 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 fmsb->version, UBI_FM_FMT_VERSION);
920 ret = UBI_BAD_FASTMAP;
921 goto free_fm_sb;
922 }
923
924 used_blocks = be32_to_cpu(fmsb->used_blocks);
925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
927 used_blocks);
928 ret = UBI_BAD_FASTMAP;
929 goto free_fm_sb;
930 }
931
932 fm_size = ubi->leb_size * used_blocks;
933 if (fm_size != ubi->fm_size) {
934 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
935 fm_size, ubi->fm_size);
936 ret = UBI_BAD_FASTMAP;
937 goto free_fm_sb;
938 }
939
940 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
941 if (!ech) {
942 ret = -ENOMEM;
943 goto free_fm_sb;
944 }
945
946 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
947 if (!vb) {
948 ret = -ENOMEM;
949 goto free_hdr;
950 }
951
952 vh = ubi_get_vid_hdr(vb);
953
954 for (i = 0; i < used_blocks; i++) {
955 int image_seq;
956
957 pnum = be32_to_cpu(fmsb->block_loc[i]);
958
959 if (ubi_io_is_bad(ubi, pnum)) {
960 ret = UBI_BAD_FASTMAP;
961 goto free_hdr;
962 }
963
964 if (i == 0 && pnum != fm_anchor) {
965 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
966 pnum, fm_anchor);
967 ret = UBI_BAD_FASTMAP;
968 goto free_hdr;
969 }
970
971 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
972 if (ret && ret != UBI_IO_BITFLIPS) {
973 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
974 i, pnum);
975 if (ret > 0)
976 ret = UBI_BAD_FASTMAP;
977 goto free_hdr;
978 } else if (ret == UBI_IO_BITFLIPS)
979 fm->to_be_tortured[i] = 1;
980
981 image_seq = be32_to_cpu(ech->image_seq);
982 if (!ubi->image_seq)
983 ubi->image_seq = image_seq;
984
985 /*
986 * Older UBI implementations have image_seq set to zero, so
987 * we shouldn't fail if image_seq == 0.
988 */
989 if (image_seq && (image_seq != ubi->image_seq)) {
990 ubi_err(ubi, "wrong image seq:%d instead of %d",
991 be32_to_cpu(ech->image_seq), ubi->image_seq);
992 ret = UBI_BAD_FASTMAP;
993 goto free_hdr;
994 }
995
996 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
997 if (ret && ret != UBI_IO_BITFLIPS) {
998 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
999 i, pnum);
1000 goto free_hdr;
1001 }
1002
1003 if (i == 0) {
1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1005 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1006 be32_to_cpu(vh->vol_id),
1007 UBI_FM_SB_VOLUME_ID);
1008 ret = UBI_BAD_FASTMAP;
1009 goto free_hdr;
1010 }
1011 } else {
1012 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1013 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1014 be32_to_cpu(vh->vol_id),
1015 UBI_FM_DATA_VOLUME_ID);
1016 ret = UBI_BAD_FASTMAP;
1017 goto free_hdr;
1018 }
1019 }
1020
1021 if (sqnum < be64_to_cpu(vh->sqnum))
1022 sqnum = be64_to_cpu(vh->sqnum);
1023
1024 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1025 pnum, 0, ubi->leb_size);
1026 if (ret && ret != UBI_IO_BITFLIPS) {
1027 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1028 "err: %i)", i, pnum, ret);
1029 goto free_hdr;
1030 }
1031 }
1032
1033 kfree(fmsb);
1034 fmsb = NULL;
1035
1036 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1037 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1038 fmsb2->data_crc = 0;
1039 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1040 if (crc != tmp_crc) {
1041 ubi_err(ubi, "fastmap data CRC is invalid");
1042 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1043 tmp_crc, crc);
1044 ret = UBI_BAD_FASTMAP;
1045 goto free_hdr;
1046 }
1047
1048 fmsb2->sqnum = sqnum;
1049
1050 fm->used_blocks = used_blocks;
1051
1052 ret = ubi_attach_fastmap(ubi, ai, fm);
1053 if (ret) {
1054 if (ret > 0)
1055 ret = UBI_BAD_FASTMAP;
1056 goto free_hdr;
1057 }
1058
1059 for (i = 0; i < used_blocks; i++) {
1060 struct ubi_wl_entry *e;
1061
1062 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1063 if (!e) {
1064 while (i--)
1065 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1066
1067 ret = -ENOMEM;
1068 goto free_hdr;
1069 }
1070
1071 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1072 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 fm->e[i] = e;
1074 }
1075
1076 ubi->fm = fm;
1077 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1078 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1079 ubi_msg(ubi, "attached by fastmap");
1080 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1081 ubi_msg(ubi, "fastmap WL pool size: %d",
1082 ubi->fm_wl_pool.max_size);
1083 ubi->fm_disabled = 0;
1084 ubi->fast_attach = 1;
1085
1086 ubi_free_vid_buf(vb);
1087 kfree(ech);
1088out:
1089 up_write(&ubi->fm_protect);
1090 if (ret == UBI_BAD_FASTMAP)
1091 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1092 return ret;
1093
1094free_hdr:
1095 ubi_free_vid_buf(vb);
1096 kfree(ech);
1097free_fm_sb:
1098 kfree(fmsb);
1099 kfree(fm);
1100 goto out;
1101}
1102
1103int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1104{
1105 struct ubi_device *ubi = vol->ubi;
1106
1107 if (!ubi->fast_attach)
1108 return 0;
1109
1110 vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
1111 if (!vol->checkmap)
1112 return -ENOMEM;
1113
1114 return 0;
1115}
1116
1117void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1118{
1119 bitmap_free(vol->checkmap);
1120}
1121
1122/**
1123 * ubi_write_fastmap - writes a fastmap.
1124 * @ubi: UBI device object
1125 * @new_fm: the to be written fastmap
1126 *
1127 * Returns 0 on success, < 0 indicates an internal error.
1128 */
1129static int ubi_write_fastmap(struct ubi_device *ubi,
1130 struct ubi_fastmap_layout *new_fm)
1131{
1132 size_t fm_pos = 0;
1133 void *fm_raw;
1134 struct ubi_fm_sb *fmsb;
1135 struct ubi_fm_hdr *fmh;
1136 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1137 struct ubi_fm_ec *fec;
1138 struct ubi_fm_volhdr *fvh;
1139 struct ubi_fm_eba *feba;
1140 struct ubi_wl_entry *wl_e;
1141 struct ubi_volume *vol;
1142 struct ubi_vid_io_buf *avbuf, *dvbuf;
1143 struct ubi_vid_hdr *avhdr, *dvhdr;
1144 struct ubi_work *ubi_wrk;
1145 struct rb_node *tmp_rb;
1146 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1147 int scrub_peb_count, erase_peb_count;
1148 unsigned long *seen_pebs;
1149
1150 fm_raw = ubi->fm_buf;
1151 memset(ubi->fm_buf, 0, ubi->fm_size);
1152
1153 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1154 if (!avbuf) {
1155 ret = -ENOMEM;
1156 goto out;
1157 }
1158
1159 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1160 if (!dvbuf) {
1161 ret = -ENOMEM;
1162 goto out_free_avbuf;
1163 }
1164
1165 avhdr = ubi_get_vid_hdr(avbuf);
1166 dvhdr = ubi_get_vid_hdr(dvbuf);
1167
1168 seen_pebs = init_seen(ubi);
1169 if (IS_ERR(seen_pebs)) {
1170 ret = PTR_ERR(seen_pebs);
1171 goto out_free_dvbuf;
1172 }
1173
1174 spin_lock(&ubi->volumes_lock);
1175 spin_lock(&ubi->wl_lock);
1176
1177 fmsb = (struct ubi_fm_sb *)fm_raw;
1178 fm_pos += sizeof(*fmsb);
1179 ubi_assert(fm_pos <= ubi->fm_size);
1180
1181 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1182 fm_pos += sizeof(*fmh);
1183 ubi_assert(fm_pos <= ubi->fm_size);
1184
1185 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1186 fmsb->version = UBI_FM_FMT_VERSION;
1187 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1188 /* the max sqnum will be filled in while *reading* the fastmap */
1189 fmsb->sqnum = 0;
1190
1191 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1192 free_peb_count = 0;
1193 used_peb_count = 0;
1194 scrub_peb_count = 0;
1195 erase_peb_count = 0;
1196 vol_count = 0;
1197
1198 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1199 fm_pos += sizeof(*fmpl);
1200 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1201 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1202 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1203
1204 for (i = 0; i < ubi->fm_pool.size; i++) {
1205 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1206 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1207 }
1208
1209 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1210 fm_pos += sizeof(*fmpl_wl);
1211 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1212 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1213 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1214
1215 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1216 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1217 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1218 }
1219
1220 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1221 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1222
1223 fec->pnum = cpu_to_be32(wl_e->pnum);
1224 set_seen(ubi, wl_e->pnum, seen_pebs);
1225 fec->ec = cpu_to_be32(wl_e->ec);
1226
1227 free_peb_count++;
1228 fm_pos += sizeof(*fec);
1229 ubi_assert(fm_pos <= ubi->fm_size);
1230 }
1231 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1232
1233 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1234 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1235
1236 fec->pnum = cpu_to_be32(wl_e->pnum);
1237 set_seen(ubi, wl_e->pnum, seen_pebs);
1238 fec->ec = cpu_to_be32(wl_e->ec);
1239
1240 used_peb_count++;
1241 fm_pos += sizeof(*fec);
1242 ubi_assert(fm_pos <= ubi->fm_size);
1243 }
1244
1245 ubi_for_each_protected_peb(ubi, i, wl_e) {
1246 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1247
1248 fec->pnum = cpu_to_be32(wl_e->pnum);
1249 set_seen(ubi, wl_e->pnum, seen_pebs);
1250 fec->ec = cpu_to_be32(wl_e->ec);
1251
1252 used_peb_count++;
1253 fm_pos += sizeof(*fec);
1254 ubi_assert(fm_pos <= ubi->fm_size);
1255 }
1256 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1257
1258 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1259 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1260
1261 fec->pnum = cpu_to_be32(wl_e->pnum);
1262 set_seen(ubi, wl_e->pnum, seen_pebs);
1263 fec->ec = cpu_to_be32(wl_e->ec);
1264
1265 scrub_peb_count++;
1266 fm_pos += sizeof(*fec);
1267 ubi_assert(fm_pos <= ubi->fm_size);
1268 }
1269 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1270
1271
1272 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1273 if (ubi_is_erase_work(ubi_wrk)) {
1274 wl_e = ubi_wrk->e;
1275 ubi_assert(wl_e);
1276
1277 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1278
1279 fec->pnum = cpu_to_be32(wl_e->pnum);
1280 set_seen(ubi, wl_e->pnum, seen_pebs);
1281 fec->ec = cpu_to_be32(wl_e->ec);
1282
1283 erase_peb_count++;
1284 fm_pos += sizeof(*fec);
1285 ubi_assert(fm_pos <= ubi->fm_size);
1286 }
1287 }
1288 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1289
1290 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1291 vol = ubi->volumes[i];
1292
1293 if (!vol)
1294 continue;
1295
1296 vol_count++;
1297
1298 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1299 fm_pos += sizeof(*fvh);
1300 ubi_assert(fm_pos <= ubi->fm_size);
1301
1302 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1303 fvh->vol_id = cpu_to_be32(vol->vol_id);
1304 fvh->vol_type = vol->vol_type;
1305 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1306 fvh->data_pad = cpu_to_be32(vol->data_pad);
1307 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1308
1309 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1310 vol->vol_type == UBI_STATIC_VOLUME);
1311
1312 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1313 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1314 ubi_assert(fm_pos <= ubi->fm_size);
1315
1316 for (j = 0; j < vol->reserved_pebs; j++) {
1317 struct ubi_eba_leb_desc ldesc;
1318
1319 ubi_eba_get_ldesc(vol, j, &ldesc);
1320 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1321 }
1322
1323 feba->reserved_pebs = cpu_to_be32(j);
1324 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1325 }
1326 fmh->vol_count = cpu_to_be32(vol_count);
1327 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1328
1329 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1330 avhdr->lnum = 0;
1331
1332 spin_unlock(&ubi->wl_lock);
1333 spin_unlock(&ubi->volumes_lock);
1334
1335 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1336 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1337 if (ret) {
1338 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1339 goto out_free_seen;
1340 }
1341
1342 for (i = 0; i < new_fm->used_blocks; i++) {
1343 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1344 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1345 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1346 }
1347
1348 fmsb->data_crc = 0;
1349 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1350 ubi->fm_size));
1351
1352 for (i = 1; i < new_fm->used_blocks; i++) {
1353 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1354 dvhdr->lnum = cpu_to_be32(i);
1355 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1356 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1357 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1358 if (ret) {
1359 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1360 new_fm->e[i]->pnum);
1361 goto out_free_seen;
1362 }
1363 }
1364
1365 for (i = 0; i < new_fm->used_blocks; i++) {
1366 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1367 new_fm->e[i]->pnum, 0, ubi->leb_size);
1368 if (ret) {
1369 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1370 new_fm->e[i]->pnum);
1371 goto out_free_seen;
1372 }
1373 }
1374
1375 ubi_assert(new_fm);
1376 ubi->fm = new_fm;
1377
1378 ret = self_check_seen(ubi, seen_pebs);
1379 dbg_bld("fastmap written!");
1380
1381out_free_seen:
1382 free_seen(seen_pebs);
1383out_free_dvbuf:
1384 ubi_free_vid_buf(dvbuf);
1385out_free_avbuf:
1386 ubi_free_vid_buf(avbuf);
1387
1388out:
1389 return ret;
1390}
1391
1392/**
1393 * invalidate_fastmap - destroys a fastmap.
1394 * @ubi: UBI device object
1395 *
1396 * This function ensures that upon next UBI attach a full scan
1397 * is issued. We need this if UBI is about to write a new fastmap
1398 * but is unable to do so. In this case we have two options:
1399 * a) Make sure that the current fastmap will not be usued upon
1400 * attach time and contine or b) fall back to RO mode to have the
1401 * current fastmap in a valid state.
1402 * Returns 0 on success, < 0 indicates an internal error.
1403 */
1404static int invalidate_fastmap(struct ubi_device *ubi)
1405{
1406 int ret;
1407 struct ubi_fastmap_layout *fm;
1408 struct ubi_wl_entry *e;
1409 struct ubi_vid_io_buf *vb = NULL;
1410 struct ubi_vid_hdr *vh;
1411
1412 if (!ubi->fm)
1413 return 0;
1414
1415 ubi->fm = NULL;
1416
1417 ret = -ENOMEM;
1418 fm = kzalloc(sizeof(*fm), GFP_NOFS);
1419 if (!fm)
1420 goto out;
1421
1422 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1423 if (!vb)
1424 goto out_free_fm;
1425
1426 vh = ubi_get_vid_hdr(vb);
1427
1428 ret = -ENOSPC;
1429 e = ubi_wl_get_fm_peb(ubi, 1);
1430 if (!e)
1431 goto out_free_fm;
1432
1433 /*
1434 * Create fake fastmap such that UBI will fall back
1435 * to scanning mode.
1436 */
1437 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1438 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1439 if (ret < 0) {
1440 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1441 goto out_free_fm;
1442 }
1443
1444 fm->used_blocks = 1;
1445 fm->e[0] = e;
1446
1447 ubi->fm = fm;
1448
1449out:
1450 ubi_free_vid_buf(vb);
1451 return ret;
1452
1453out_free_fm:
1454 kfree(fm);
1455 goto out;
1456}
1457
1458/**
1459 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1460 * WL sub-system.
1461 * @ubi: UBI device object
1462 * @fm: fastmap layout object
1463 */
1464static void return_fm_pebs(struct ubi_device *ubi,
1465 struct ubi_fastmap_layout *fm)
1466{
1467 int i;
1468
1469 if (!fm)
1470 return;
1471
1472 for (i = 0; i < fm->used_blocks; i++) {
1473 if (fm->e[i]) {
1474 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1475 fm->to_be_tortured[i]);
1476 fm->e[i] = NULL;
1477 }
1478 }
1479}
1480
1481/**
1482 * ubi_update_fastmap - will be called by UBI if a volume changes or
1483 * a fastmap pool becomes full.
1484 * @ubi: UBI device object
1485 *
1486 * Returns 0 on success, < 0 indicates an internal error.
1487 */
1488int ubi_update_fastmap(struct ubi_device *ubi)
1489{
1490 int ret, i, j;
1491 struct ubi_fastmap_layout *new_fm, *old_fm;
1492 struct ubi_wl_entry *tmp_e;
1493
1494 ubi_refill_pools_and_lock(ubi);
1495
1496 if (ubi->ro_mode || ubi->fm_disabled) {
1497 up_write(&ubi->fm_eba_sem);
1498 up_write(&ubi->work_sem);
1499 up_write(&ubi->fm_protect);
1500 return 0;
1501 }
1502
1503 new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
1504 if (!new_fm) {
1505 up_write(&ubi->fm_eba_sem);
1506 up_write(&ubi->work_sem);
1507 up_write(&ubi->fm_protect);
1508 return -ENOMEM;
1509 }
1510
1511 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1512 old_fm = ubi->fm;
1513 ubi->fm = NULL;
1514
1515 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1516 ubi_err(ubi, "fastmap too large");
1517 ret = -ENOSPC;
1518 goto err;
1519 }
1520
1521 for (i = 1; i < new_fm->used_blocks; i++) {
1522 spin_lock(&ubi->wl_lock);
1523 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1524 spin_unlock(&ubi->wl_lock);
1525
1526 if (!tmp_e) {
1527 if (old_fm && old_fm->e[i]) {
1528 ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
1529 if (ret < 0) {
1530 ubi_err(ubi, "could not erase old fastmap PEB");
1531
1532 for (j = 1; j < i; j++) {
1533 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1534 j, 0);
1535 new_fm->e[j] = NULL;
1536 }
1537 goto err;
1538 }
1539 new_fm->e[i] = old_fm->e[i];
1540 old_fm->e[i] = NULL;
1541 } else {
1542 ubi_err(ubi, "could not get any free erase block");
1543
1544 for (j = 1; j < i; j++) {
1545 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1546 new_fm->e[j] = NULL;
1547 }
1548
1549 ret = -ENOSPC;
1550 goto err;
1551 }
1552 } else {
1553 new_fm->e[i] = tmp_e;
1554
1555 if (old_fm && old_fm->e[i]) {
1556 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1557 old_fm->to_be_tortured[i]);
1558 old_fm->e[i] = NULL;
1559 }
1560 }
1561 }
1562
1563 /* Old fastmap is larger than the new one */
1564 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1565 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1566 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1567 old_fm->to_be_tortured[i]);
1568 old_fm->e[i] = NULL;
1569 }
1570 }
1571
1572 spin_lock(&ubi->wl_lock);
1573 tmp_e = ubi->fm_anchor;
1574 ubi->fm_anchor = NULL;
1575 spin_unlock(&ubi->wl_lock);
1576
1577 if (old_fm) {
1578 /* no fresh anchor PEB was found, reuse the old one */
1579 if (!tmp_e) {
1580 ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
1581 if (ret < 0) {
1582 ubi_err(ubi, "could not erase old anchor PEB");
1583
1584 for (i = 1; i < new_fm->used_blocks; i++) {
1585 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1586 i, 0);
1587 new_fm->e[i] = NULL;
1588 }
1589 goto err;
1590 }
1591 new_fm->e[0] = old_fm->e[0];
1592 old_fm->e[0] = NULL;
1593 } else {
1594 /* we've got a new anchor PEB, return the old one */
1595 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1596 old_fm->to_be_tortured[0]);
1597 new_fm->e[0] = tmp_e;
1598 old_fm->e[0] = NULL;
1599 }
1600 } else {
1601 if (!tmp_e) {
1602 ubi_err(ubi, "could not find any anchor PEB");
1603
1604 for (i = 1; i < new_fm->used_blocks; i++) {
1605 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1606 new_fm->e[i] = NULL;
1607 }
1608
1609 ret = -ENOSPC;
1610 goto err;
1611 }
1612 new_fm->e[0] = tmp_e;
1613 }
1614
1615 ret = ubi_write_fastmap(ubi, new_fm);
1616
1617 if (ret)
1618 goto err;
1619
1620out_unlock:
1621 up_write(&ubi->fm_eba_sem);
1622 up_write(&ubi->work_sem);
1623 up_write(&ubi->fm_protect);
1624 kfree(old_fm);
1625
1626 ubi_ensure_anchor_pebs(ubi);
1627
1628 return ret;
1629
1630err:
1631 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1632
1633 ret = invalidate_fastmap(ubi);
1634 if (ret < 0) {
1635 ubi_err(ubi, "Unable to invalidate current fastmap!");
1636 ubi_ro_mode(ubi);
1637 } else {
1638 return_fm_pebs(ubi, old_fm);
1639 return_fm_pebs(ubi, new_fm);
1640 ret = 0;
1641 }
1642
1643 kfree(new_fm);
1644 goto out_unlock;
1645}
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#include <linux/crc32.h>
18#include <linux/bitmap.h>
19#include "ubi.h"
20
21/**
22 * init_seen - allocate memory for used for debugging.
23 * @ubi: UBI device description object
24 */
25static inline unsigned long *init_seen(struct ubi_device *ubi)
26{
27 unsigned long *ret;
28
29 if (!ubi_dbg_chk_fastmap(ubi))
30 return NULL;
31
32 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
33 GFP_KERNEL);
34 if (!ret)
35 return ERR_PTR(-ENOMEM);
36
37 return ret;
38}
39
40/**
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
43 */
44static inline void free_seen(unsigned long *seen)
45{
46 kfree(seen);
47}
48
49/**
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
54 */
55static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
56{
57 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 return;
59
60 set_bit(pnum, seen);
61}
62
63/**
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
67 */
68static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
69{
70 int pnum, ret = 0;
71
72 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 return 0;
74
75 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
77 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 ret = -EINVAL;
79 }
80 }
81
82 return ret;
83}
84
85/**
86 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
88 */
89size_t ubi_calc_fm_size(struct ubi_device *ubi)
90{
91 size_t size;
92
93 size = sizeof(struct ubi_fm_sb) +
94 sizeof(struct ubi_fm_hdr) +
95 sizeof(struct ubi_fm_scan_pool) +
96 sizeof(struct ubi_fm_scan_pool) +
97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 (sizeof(struct ubi_fm_eba) +
99 (ubi->peb_count * sizeof(__be32))) +
100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 return roundup(size, ubi->leb_size);
102}
103
104
105/**
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
109 *
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
112 */
113static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
114{
115 struct ubi_vid_io_buf *new;
116 struct ubi_vid_hdr *vh;
117
118 new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
119 if (!new)
120 goto out;
121
122 vh = ubi_get_vid_hdr(new);
123 vh->vol_type = UBI_VID_DYNAMIC;
124 vh->vol_id = cpu_to_be32(vol_id);
125
126 /* UBI implementations without fastmap support have to delete the
127 * fastmap.
128 */
129 vh->compat = UBI_COMPAT_DELETE;
130
131out:
132 return new;
133}
134
135/**
136 * add_aeb - create and add a attach erase block to a given list.
137 * @ai: UBI attach info object
138 * @list: the target list
139 * @pnum: PEB number of the new attach erase block
140 * @ec: erease counter of the new LEB
141 * @scrub: scrub this PEB after attaching
142 *
143 * Returns 0 on success, < 0 indicates an internal error.
144 */
145static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
146 int pnum, int ec, int scrub)
147{
148 struct ubi_ainf_peb *aeb;
149
150 aeb = ubi_alloc_aeb(ai, pnum, ec);
151 if (!aeb)
152 return -ENOMEM;
153
154 aeb->lnum = -1;
155 aeb->scrub = scrub;
156 aeb->copy_flag = aeb->sqnum = 0;
157
158 ai->ec_sum += aeb->ec;
159 ai->ec_count++;
160
161 if (ai->max_ec < aeb->ec)
162 ai->max_ec = aeb->ec;
163
164 if (ai->min_ec > aeb->ec)
165 ai->min_ec = aeb->ec;
166
167 list_add_tail(&aeb->u.list, list);
168
169 return 0;
170}
171
172/**
173 * add_vol - create and add a new volume to ubi_attach_info.
174 * @ai: ubi_attach_info object
175 * @vol_id: VID of the new volume
176 * @used_ebs: number of used EBS
177 * @data_pad: data padding value of the new volume
178 * @vol_type: volume type
179 * @last_eb_bytes: number of bytes in the last LEB
180 *
181 * Returns the new struct ubi_ainf_volume on success.
182 * NULL indicates an error.
183 */
184static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 int used_ebs, int data_pad, u8 vol_type,
186 int last_eb_bytes)
187{
188 struct ubi_ainf_volume *av;
189
190 av = ubi_add_av(ai, vol_id);
191 if (IS_ERR(av))
192 return av;
193
194 av->data_pad = data_pad;
195 av->last_data_size = last_eb_bytes;
196 av->compat = 0;
197 av->vol_type = vol_type;
198 if (av->vol_type == UBI_STATIC_VOLUME)
199 av->used_ebs = used_ebs;
200
201 dbg_bld("found volume (ID %i)", vol_id);
202 return av;
203}
204
205/**
206 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207 * from it's original list.
208 * @ai: ubi_attach_info object
209 * @aeb: the to be assigned SEB
210 * @av: target scan volume
211 */
212static void assign_aeb_to_av(struct ubi_attach_info *ai,
213 struct ubi_ainf_peb *aeb,
214 struct ubi_ainf_volume *av)
215{
216 struct ubi_ainf_peb *tmp_aeb;
217 struct rb_node **p = &av->root.rb_node, *parent = NULL;
218
219 while (*p) {
220 parent = *p;
221
222 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
223 if (aeb->lnum != tmp_aeb->lnum) {
224 if (aeb->lnum < tmp_aeb->lnum)
225 p = &(*p)->rb_left;
226 else
227 p = &(*p)->rb_right;
228
229 continue;
230 } else
231 break;
232 }
233
234 list_del(&aeb->u.list);
235 av->leb_count++;
236
237 rb_link_node(&aeb->u.rb, parent, p);
238 rb_insert_color(&aeb->u.rb, &av->root);
239}
240
241/**
242 * update_vol - inserts or updates a LEB which was found a pool.
243 * @ubi: the UBI device object
244 * @ai: attach info object
245 * @av: the volume this LEB belongs to
246 * @new_vh: the volume header derived from new_aeb
247 * @new_aeb: the AEB to be examined
248 *
249 * Returns 0 on success, < 0 indicates an internal error.
250 */
251static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
252 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
253 struct ubi_ainf_peb *new_aeb)
254{
255 struct rb_node **p = &av->root.rb_node, *parent = NULL;
256 struct ubi_ainf_peb *aeb, *victim;
257 int cmp_res;
258
259 while (*p) {
260 parent = *p;
261 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
262
263 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
264 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
265 p = &(*p)->rb_left;
266 else
267 p = &(*p)->rb_right;
268
269 continue;
270 }
271
272 /* This case can happen if the fastmap gets written
273 * because of a volume change (creation, deletion, ..).
274 * Then a PEB can be within the persistent EBA and the pool.
275 */
276 if (aeb->pnum == new_aeb->pnum) {
277 ubi_assert(aeb->lnum == new_aeb->lnum);
278 ubi_free_aeb(ai, new_aeb);
279
280 return 0;
281 }
282
283 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
284 if (cmp_res < 0)
285 return cmp_res;
286
287 /* new_aeb is newer */
288 if (cmp_res & 1) {
289 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
290 if (!victim)
291 return -ENOMEM;
292
293 list_add_tail(&victim->u.list, &ai->erase);
294
295 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
296 av->last_data_size =
297 be32_to_cpu(new_vh->data_size);
298
299 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
300 av->vol_id, aeb->lnum, new_aeb->pnum);
301
302 aeb->ec = new_aeb->ec;
303 aeb->pnum = new_aeb->pnum;
304 aeb->copy_flag = new_vh->copy_flag;
305 aeb->scrub = new_aeb->scrub;
306 aeb->sqnum = new_aeb->sqnum;
307 ubi_free_aeb(ai, new_aeb);
308
309 /* new_aeb is older */
310 } else {
311 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
312 av->vol_id, aeb->lnum, new_aeb->pnum);
313 list_add_tail(&new_aeb->u.list, &ai->erase);
314 }
315
316 return 0;
317 }
318 /* This LEB is new, let's add it to the volume */
319
320 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
321 av->highest_lnum = be32_to_cpu(new_vh->lnum);
322 av->last_data_size = be32_to_cpu(new_vh->data_size);
323 }
324
325 if (av->vol_type == UBI_STATIC_VOLUME)
326 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
327
328 av->leb_count++;
329
330 rb_link_node(&new_aeb->u.rb, parent, p);
331 rb_insert_color(&new_aeb->u.rb, &av->root);
332
333 return 0;
334}
335
336/**
337 * process_pool_aeb - we found a non-empty PEB in a pool.
338 * @ubi: UBI device object
339 * @ai: attach info object
340 * @new_vh: the volume header derived from new_aeb
341 * @new_aeb: the AEB to be examined
342 *
343 * Returns 0 on success, < 0 indicates an internal error.
344 */
345static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
346 struct ubi_vid_hdr *new_vh,
347 struct ubi_ainf_peb *new_aeb)
348{
349 int vol_id = be32_to_cpu(new_vh->vol_id);
350 struct ubi_ainf_volume *av;
351
352 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
353 ubi_free_aeb(ai, new_aeb);
354
355 return 0;
356 }
357
358 /* Find the volume this SEB belongs to */
359 av = ubi_find_av(ai, vol_id);
360 if (!av) {
361 ubi_err(ubi, "orphaned volume in fastmap pool!");
362 ubi_free_aeb(ai, new_aeb);
363 return UBI_BAD_FASTMAP;
364 }
365
366 ubi_assert(vol_id == av->vol_id);
367
368 return update_vol(ubi, ai, av, new_vh, new_aeb);
369}
370
371/**
372 * unmap_peb - unmap a PEB.
373 * If fastmap detects a free PEB in the pool it has to check whether
374 * this PEB has been unmapped after writing the fastmap.
375 *
376 * @ai: UBI attach info object
377 * @pnum: The PEB to be unmapped
378 */
379static void unmap_peb(struct ubi_attach_info *ai, int pnum)
380{
381 struct ubi_ainf_volume *av;
382 struct rb_node *node, *node2;
383 struct ubi_ainf_peb *aeb;
384
385 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
386 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
387 if (aeb->pnum == pnum) {
388 rb_erase(&aeb->u.rb, &av->root);
389 av->leb_count--;
390 ubi_free_aeb(ai, aeb);
391 return;
392 }
393 }
394 }
395}
396
397/**
398 * scan_pool - scans a pool for changed (no longer empty PEBs).
399 * @ubi: UBI device object
400 * @ai: attach info object
401 * @pebs: an array of all PEB numbers in the to be scanned pool
402 * @pool_size: size of the pool (number of entries in @pebs)
403 * @max_sqnum: pointer to the maximal sequence number
404 * @free: list of PEBs which are most likely free (and go into @ai->free)
405 *
406 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
407 * < 0 indicates an internal error.
408 */
409static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
410 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
411 struct list_head *free)
412{
413 struct ubi_vid_io_buf *vb;
414 struct ubi_vid_hdr *vh;
415 struct ubi_ec_hdr *ech;
416 struct ubi_ainf_peb *new_aeb;
417 int i, pnum, err, ret = 0;
418
419 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
420 if (!ech)
421 return -ENOMEM;
422
423 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
424 if (!vb) {
425 kfree(ech);
426 return -ENOMEM;
427 }
428
429 vh = ubi_get_vid_hdr(vb);
430
431 dbg_bld("scanning fastmap pool: size = %i", pool_size);
432
433 /*
434 * Now scan all PEBs in the pool to find changes which have been made
435 * after the creation of the fastmap
436 */
437 for (i = 0; i < pool_size; i++) {
438 int scrub = 0;
439 int image_seq;
440
441 pnum = be32_to_cpu(pebs[i]);
442
443 if (ubi_io_is_bad(ubi, pnum)) {
444 ubi_err(ubi, "bad PEB in fastmap pool!");
445 ret = UBI_BAD_FASTMAP;
446 goto out;
447 }
448
449 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
450 if (err && err != UBI_IO_BITFLIPS) {
451 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
452 pnum, err);
453 ret = err > 0 ? UBI_BAD_FASTMAP : err;
454 goto out;
455 } else if (err == UBI_IO_BITFLIPS)
456 scrub = 1;
457
458 /*
459 * Older UBI implementations have image_seq set to zero, so
460 * we shouldn't fail if image_seq == 0.
461 */
462 image_seq = be32_to_cpu(ech->image_seq);
463
464 if (image_seq && (image_seq != ubi->image_seq)) {
465 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
466 be32_to_cpu(ech->image_seq), ubi->image_seq);
467 ret = UBI_BAD_FASTMAP;
468 goto out;
469 }
470
471 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
472 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
473 unsigned long long ec = be64_to_cpu(ech->ec);
474 unmap_peb(ai, pnum);
475 dbg_bld("Adding PEB to free: %i", pnum);
476
477 if (err == UBI_IO_FF_BITFLIPS)
478 scrub = 1;
479
480 add_aeb(ai, free, pnum, ec, scrub);
481 continue;
482 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
483 dbg_bld("Found non empty PEB:%i in pool", pnum);
484
485 if (err == UBI_IO_BITFLIPS)
486 scrub = 1;
487
488 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
489 if (!new_aeb) {
490 ret = -ENOMEM;
491 goto out;
492 }
493
494 new_aeb->lnum = be32_to_cpu(vh->lnum);
495 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
496 new_aeb->copy_flag = vh->copy_flag;
497 new_aeb->scrub = scrub;
498
499 if (*max_sqnum < new_aeb->sqnum)
500 *max_sqnum = new_aeb->sqnum;
501
502 err = process_pool_aeb(ubi, ai, vh, new_aeb);
503 if (err) {
504 ret = err > 0 ? UBI_BAD_FASTMAP : err;
505 goto out;
506 }
507 } else {
508 /* We are paranoid and fall back to scanning mode */
509 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
510 ret = err > 0 ? UBI_BAD_FASTMAP : err;
511 goto out;
512 }
513
514 }
515
516out:
517 ubi_free_vid_buf(vb);
518 kfree(ech);
519 return ret;
520}
521
522/**
523 * count_fastmap_pebs - Counts the PEBs found by fastmap.
524 * @ai: The UBI attach info object
525 */
526static int count_fastmap_pebs(struct ubi_attach_info *ai)
527{
528 struct ubi_ainf_peb *aeb;
529 struct ubi_ainf_volume *av;
530 struct rb_node *rb1, *rb2;
531 int n = 0;
532
533 list_for_each_entry(aeb, &ai->erase, u.list)
534 n++;
535
536 list_for_each_entry(aeb, &ai->free, u.list)
537 n++;
538
539 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
540 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
541 n++;
542
543 return n;
544}
545
546/**
547 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
548 * @ubi: UBI device object
549 * @ai: UBI attach info object
550 * @fm: the fastmap to be attached
551 *
552 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
553 * < 0 indicates an internal error.
554 */
555static int ubi_attach_fastmap(struct ubi_device *ubi,
556 struct ubi_attach_info *ai,
557 struct ubi_fastmap_layout *fm)
558{
559 struct list_head used, free;
560 struct ubi_ainf_volume *av;
561 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
562 struct ubi_fm_sb *fmsb;
563 struct ubi_fm_hdr *fmhdr;
564 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
565 struct ubi_fm_ec *fmec;
566 struct ubi_fm_volhdr *fmvhdr;
567 struct ubi_fm_eba *fm_eba;
568 int ret, i, j, pool_size, wl_pool_size;
569 size_t fm_pos = 0, fm_size = ubi->fm_size;
570 unsigned long long max_sqnum = 0;
571 void *fm_raw = ubi->fm_buf;
572
573 INIT_LIST_HEAD(&used);
574 INIT_LIST_HEAD(&free);
575 ai->min_ec = UBI_MAX_ERASECOUNTER;
576
577 fmsb = (struct ubi_fm_sb *)(fm_raw);
578 ai->max_sqnum = fmsb->sqnum;
579 fm_pos += sizeof(struct ubi_fm_sb);
580 if (fm_pos >= fm_size)
581 goto fail_bad;
582
583 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
584 fm_pos += sizeof(*fmhdr);
585 if (fm_pos >= fm_size)
586 goto fail_bad;
587
588 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
589 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
590 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
591 goto fail_bad;
592 }
593
594 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
595 fm_pos += sizeof(*fmpl);
596 if (fm_pos >= fm_size)
597 goto fail_bad;
598 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
599 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
600 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
601 goto fail_bad;
602 }
603
604 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
605 fm_pos += sizeof(*fmpl_wl);
606 if (fm_pos >= fm_size)
607 goto fail_bad;
608 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
609 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
610 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
611 goto fail_bad;
612 }
613
614 pool_size = be16_to_cpu(fmpl->size);
615 wl_pool_size = be16_to_cpu(fmpl_wl->size);
616 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
617 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
618
619 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
620 ubi_err(ubi, "bad pool size: %i", pool_size);
621 goto fail_bad;
622 }
623
624 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
625 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
626 goto fail_bad;
627 }
628
629
630 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
631 fm->max_pool_size < 0) {
632 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
633 goto fail_bad;
634 }
635
636 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
637 fm->max_wl_pool_size < 0) {
638 ubi_err(ubi, "bad maximal WL pool size: %i",
639 fm->max_wl_pool_size);
640 goto fail_bad;
641 }
642
643 /* read EC values from free list */
644 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
645 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
646 fm_pos += sizeof(*fmec);
647 if (fm_pos >= fm_size)
648 goto fail_bad;
649
650 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
651 be32_to_cpu(fmec->ec), 0);
652 }
653
654 /* read EC values from used list */
655 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
656 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
657 fm_pos += sizeof(*fmec);
658 if (fm_pos >= fm_size)
659 goto fail_bad;
660
661 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
662 be32_to_cpu(fmec->ec), 0);
663 }
664
665 /* read EC values from scrub list */
666 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
667 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
668 fm_pos += sizeof(*fmec);
669 if (fm_pos >= fm_size)
670 goto fail_bad;
671
672 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
673 be32_to_cpu(fmec->ec), 1);
674 }
675
676 /* read EC values from erase list */
677 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
678 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
679 fm_pos += sizeof(*fmec);
680 if (fm_pos >= fm_size)
681 goto fail_bad;
682
683 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
684 be32_to_cpu(fmec->ec), 1);
685 }
686
687 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
688 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
689
690 /* Iterate over all volumes and read their EBA table */
691 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
692 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
693 fm_pos += sizeof(*fmvhdr);
694 if (fm_pos >= fm_size)
695 goto fail_bad;
696
697 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
698 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
700 goto fail_bad;
701 }
702
703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
704 be32_to_cpu(fmvhdr->used_ebs),
705 be32_to_cpu(fmvhdr->data_pad),
706 fmvhdr->vol_type,
707 be32_to_cpu(fmvhdr->last_eb_bytes));
708
709 if (IS_ERR(av)) {
710 if (PTR_ERR(av) == -EEXIST)
711 ubi_err(ubi, "volume (ID %i) already exists",
712 fmvhdr->vol_id);
713
714 goto fail_bad;
715 }
716
717 ai->vols_found++;
718 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
719 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
720
721 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
722 fm_pos += sizeof(*fm_eba);
723 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
724 if (fm_pos >= fm_size)
725 goto fail_bad;
726
727 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
728 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
729 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
730 goto fail_bad;
731 }
732
733 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
734 int pnum = be32_to_cpu(fm_eba->pnum[j]);
735
736 if (pnum < 0)
737 continue;
738
739 aeb = NULL;
740 list_for_each_entry(tmp_aeb, &used, u.list) {
741 if (tmp_aeb->pnum == pnum) {
742 aeb = tmp_aeb;
743 break;
744 }
745 }
746
747 if (!aeb) {
748 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
749 goto fail_bad;
750 }
751
752 aeb->lnum = j;
753
754 if (av->highest_lnum <= aeb->lnum)
755 av->highest_lnum = aeb->lnum;
756
757 assign_aeb_to_av(ai, aeb, av);
758
759 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
760 aeb->pnum, aeb->lnum, av->vol_id);
761 }
762 }
763
764 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
765 if (ret)
766 goto fail;
767
768 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
769 if (ret)
770 goto fail;
771
772 if (max_sqnum > ai->max_sqnum)
773 ai->max_sqnum = max_sqnum;
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
776 list_move_tail(&tmp_aeb->u.list, &ai->free);
777
778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
779 list_move_tail(&tmp_aeb->u.list, &ai->erase);
780
781 ubi_assert(list_empty(&free));
782
783 /*
784 * If fastmap is leaking PEBs (must not happen), raise a
785 * fat warning and fall back to scanning mode.
786 * We do this here because in ubi_wl_init() it's too late
787 * and we cannot fall back to scanning.
788 */
789 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
790 ai->bad_peb_count - fm->used_blocks))
791 goto fail_bad;
792
793 return 0;
794
795fail_bad:
796 ret = UBI_BAD_FASTMAP;
797fail:
798 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
799 list_del(&tmp_aeb->u.list);
800 ubi_free_aeb(ai, tmp_aeb);
801 }
802 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
803 list_del(&tmp_aeb->u.list);
804 ubi_free_aeb(ai, tmp_aeb);
805 }
806
807 return ret;
808}
809
810/**
811 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
812 * @ai: UBI attach info to be filled
813 */
814static int find_fm_anchor(struct ubi_attach_info *ai)
815{
816 int ret = -1;
817 struct ubi_ainf_peb *aeb;
818 unsigned long long max_sqnum = 0;
819
820 list_for_each_entry(aeb, &ai->fastmap, u.list) {
821 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
822 max_sqnum = aeb->sqnum;
823 ret = aeb->pnum;
824 }
825 }
826
827 return ret;
828}
829
830static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
831 struct ubi_ainf_peb *old)
832{
833 struct ubi_ainf_peb *new;
834
835 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
836 if (!new)
837 return NULL;
838
839 new->vol_id = old->vol_id;
840 new->sqnum = old->sqnum;
841 new->lnum = old->lnum;
842 new->scrub = old->scrub;
843 new->copy_flag = old->copy_flag;
844
845 return new;
846}
847
848/**
849 * ubi_scan_fastmap - scan the fastmap.
850 * @ubi: UBI device object
851 * @ai: UBI attach info to be filled
852 * @scan_ai: UBI attach info from the first 64 PEBs,
853 * used to find the most recent Fastmap data structure
854 *
855 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
856 * UBI_BAD_FASTMAP if one was found but is not usable.
857 * < 0 indicates an internal error.
858 */
859int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
860 struct ubi_attach_info *scan_ai)
861{
862 struct ubi_fm_sb *fmsb, *fmsb2;
863 struct ubi_vid_io_buf *vb;
864 struct ubi_vid_hdr *vh;
865 struct ubi_ec_hdr *ech;
866 struct ubi_fastmap_layout *fm;
867 struct ubi_ainf_peb *aeb;
868 int i, used_blocks, pnum, fm_anchor, ret = 0;
869 size_t fm_size;
870 __be32 crc, tmp_crc;
871 unsigned long long sqnum = 0;
872
873 fm_anchor = find_fm_anchor(scan_ai);
874 if (fm_anchor < 0)
875 return UBI_NO_FASTMAP;
876
877 /* Copy all (possible) fastmap blocks into our new attach structure. */
878 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
879 struct ubi_ainf_peb *new;
880
881 new = clone_aeb(ai, aeb);
882 if (!new)
883 return -ENOMEM;
884
885 list_add(&new->u.list, &ai->fastmap);
886 }
887
888 down_write(&ubi->fm_protect);
889 memset(ubi->fm_buf, 0, ubi->fm_size);
890
891 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 if (!fmsb) {
893 ret = -ENOMEM;
894 goto out;
895 }
896
897 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 if (!fm) {
899 ret = -ENOMEM;
900 kfree(fmsb);
901 goto out;
902 }
903
904 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
905 if (ret && ret != UBI_IO_BITFLIPS)
906 goto free_fm_sb;
907 else if (ret == UBI_IO_BITFLIPS)
908 fm->to_be_tortured[0] = 1;
909
910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 ret = UBI_BAD_FASTMAP;
914 goto free_fm_sb;
915 }
916
917 if (fmsb->version != UBI_FM_FMT_VERSION) {
918 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 fmsb->version, UBI_FM_FMT_VERSION);
920 ret = UBI_BAD_FASTMAP;
921 goto free_fm_sb;
922 }
923
924 used_blocks = be32_to_cpu(fmsb->used_blocks);
925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
927 used_blocks);
928 ret = UBI_BAD_FASTMAP;
929 goto free_fm_sb;
930 }
931
932 fm_size = ubi->leb_size * used_blocks;
933 if (fm_size != ubi->fm_size) {
934 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
935 fm_size, ubi->fm_size);
936 ret = UBI_BAD_FASTMAP;
937 goto free_fm_sb;
938 }
939
940 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
941 if (!ech) {
942 ret = -ENOMEM;
943 goto free_fm_sb;
944 }
945
946 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
947 if (!vb) {
948 ret = -ENOMEM;
949 goto free_hdr;
950 }
951
952 vh = ubi_get_vid_hdr(vb);
953
954 for (i = 0; i < used_blocks; i++) {
955 int image_seq;
956
957 pnum = be32_to_cpu(fmsb->block_loc[i]);
958
959 if (ubi_io_is_bad(ubi, pnum)) {
960 ret = UBI_BAD_FASTMAP;
961 goto free_hdr;
962 }
963
964 if (i == 0 && pnum != fm_anchor) {
965 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
966 pnum, fm_anchor);
967 ret = UBI_BAD_FASTMAP;
968 goto free_hdr;
969 }
970
971 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
972 if (ret && ret != UBI_IO_BITFLIPS) {
973 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
974 i, pnum);
975 if (ret > 0)
976 ret = UBI_BAD_FASTMAP;
977 goto free_hdr;
978 } else if (ret == UBI_IO_BITFLIPS)
979 fm->to_be_tortured[i] = 1;
980
981 image_seq = be32_to_cpu(ech->image_seq);
982 if (!ubi->image_seq)
983 ubi->image_seq = image_seq;
984
985 /*
986 * Older UBI implementations have image_seq set to zero, so
987 * we shouldn't fail if image_seq == 0.
988 */
989 if (image_seq && (image_seq != ubi->image_seq)) {
990 ubi_err(ubi, "wrong image seq:%d instead of %d",
991 be32_to_cpu(ech->image_seq), ubi->image_seq);
992 ret = UBI_BAD_FASTMAP;
993 goto free_hdr;
994 }
995
996 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
997 if (ret && ret != UBI_IO_BITFLIPS) {
998 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
999 i, pnum);
1000 goto free_hdr;
1001 }
1002
1003 if (i == 0) {
1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1005 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1006 be32_to_cpu(vh->vol_id),
1007 UBI_FM_SB_VOLUME_ID);
1008 ret = UBI_BAD_FASTMAP;
1009 goto free_hdr;
1010 }
1011 } else {
1012 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1013 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1014 be32_to_cpu(vh->vol_id),
1015 UBI_FM_DATA_VOLUME_ID);
1016 ret = UBI_BAD_FASTMAP;
1017 goto free_hdr;
1018 }
1019 }
1020
1021 if (sqnum < be64_to_cpu(vh->sqnum))
1022 sqnum = be64_to_cpu(vh->sqnum);
1023
1024 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1025 pnum, 0, ubi->leb_size);
1026 if (ret && ret != UBI_IO_BITFLIPS) {
1027 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1028 "err: %i)", i, pnum, ret);
1029 goto free_hdr;
1030 }
1031 }
1032
1033 kfree(fmsb);
1034 fmsb = NULL;
1035
1036 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1037 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1038 fmsb2->data_crc = 0;
1039 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1040 if (crc != tmp_crc) {
1041 ubi_err(ubi, "fastmap data CRC is invalid");
1042 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1043 tmp_crc, crc);
1044 ret = UBI_BAD_FASTMAP;
1045 goto free_hdr;
1046 }
1047
1048 fmsb2->sqnum = sqnum;
1049
1050 fm->used_blocks = used_blocks;
1051
1052 ret = ubi_attach_fastmap(ubi, ai, fm);
1053 if (ret) {
1054 if (ret > 0)
1055 ret = UBI_BAD_FASTMAP;
1056 goto free_hdr;
1057 }
1058
1059 for (i = 0; i < used_blocks; i++) {
1060 struct ubi_wl_entry *e;
1061
1062 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1063 if (!e) {
1064 while (i--)
1065 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1066
1067 ret = -ENOMEM;
1068 goto free_hdr;
1069 }
1070
1071 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1072 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 fm->e[i] = e;
1074 }
1075
1076 ubi->fm = fm;
1077 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1078 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1079 ubi_msg(ubi, "attached by fastmap");
1080 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1081 ubi_msg(ubi, "fastmap WL pool size: %d",
1082 ubi->fm_wl_pool.max_size);
1083 ubi->fm_disabled = 0;
1084 ubi->fast_attach = 1;
1085
1086 ubi_free_vid_buf(vb);
1087 kfree(ech);
1088out:
1089 up_write(&ubi->fm_protect);
1090 if (ret == UBI_BAD_FASTMAP)
1091 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1092 return ret;
1093
1094free_hdr:
1095 ubi_free_vid_buf(vb);
1096 kfree(ech);
1097free_fm_sb:
1098 kfree(fmsb);
1099 kfree(fm);
1100 goto out;
1101}
1102
1103/**
1104 * ubi_write_fastmap - writes a fastmap.
1105 * @ubi: UBI device object
1106 * @new_fm: the to be written fastmap
1107 *
1108 * Returns 0 on success, < 0 indicates an internal error.
1109 */
1110static int ubi_write_fastmap(struct ubi_device *ubi,
1111 struct ubi_fastmap_layout *new_fm)
1112{
1113 size_t fm_pos = 0;
1114 void *fm_raw;
1115 struct ubi_fm_sb *fmsb;
1116 struct ubi_fm_hdr *fmh;
1117 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1118 struct ubi_fm_ec *fec;
1119 struct ubi_fm_volhdr *fvh;
1120 struct ubi_fm_eba *feba;
1121 struct ubi_wl_entry *wl_e;
1122 struct ubi_volume *vol;
1123 struct ubi_vid_io_buf *avbuf, *dvbuf;
1124 struct ubi_vid_hdr *avhdr, *dvhdr;
1125 struct ubi_work *ubi_wrk;
1126 struct rb_node *tmp_rb;
1127 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1128 int scrub_peb_count, erase_peb_count;
1129 unsigned long *seen_pebs = NULL;
1130
1131 fm_raw = ubi->fm_buf;
1132 memset(ubi->fm_buf, 0, ubi->fm_size);
1133
1134 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1135 if (!avbuf) {
1136 ret = -ENOMEM;
1137 goto out;
1138 }
1139
1140 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1141 if (!dvbuf) {
1142 ret = -ENOMEM;
1143 goto out_kfree;
1144 }
1145
1146 avhdr = ubi_get_vid_hdr(avbuf);
1147 dvhdr = ubi_get_vid_hdr(dvbuf);
1148
1149 seen_pebs = init_seen(ubi);
1150 if (IS_ERR(seen_pebs)) {
1151 ret = PTR_ERR(seen_pebs);
1152 goto out_kfree;
1153 }
1154
1155 spin_lock(&ubi->volumes_lock);
1156 spin_lock(&ubi->wl_lock);
1157
1158 fmsb = (struct ubi_fm_sb *)fm_raw;
1159 fm_pos += sizeof(*fmsb);
1160 ubi_assert(fm_pos <= ubi->fm_size);
1161
1162 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1163 fm_pos += sizeof(*fmh);
1164 ubi_assert(fm_pos <= ubi->fm_size);
1165
1166 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1167 fmsb->version = UBI_FM_FMT_VERSION;
1168 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1169 /* the max sqnum will be filled in while *reading* the fastmap */
1170 fmsb->sqnum = 0;
1171
1172 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1173 free_peb_count = 0;
1174 used_peb_count = 0;
1175 scrub_peb_count = 0;
1176 erase_peb_count = 0;
1177 vol_count = 0;
1178
1179 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1180 fm_pos += sizeof(*fmpl);
1181 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1182 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1183 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1184
1185 for (i = 0; i < ubi->fm_pool.size; i++) {
1186 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1187 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1188 }
1189
1190 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1191 fm_pos += sizeof(*fmpl_wl);
1192 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1193 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1194 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1195
1196 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1197 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1198 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1199 }
1200
1201 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1202 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1203
1204 fec->pnum = cpu_to_be32(wl_e->pnum);
1205 set_seen(ubi, wl_e->pnum, seen_pebs);
1206 fec->ec = cpu_to_be32(wl_e->ec);
1207
1208 free_peb_count++;
1209 fm_pos += sizeof(*fec);
1210 ubi_assert(fm_pos <= ubi->fm_size);
1211 }
1212 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1213
1214 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1215 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1216
1217 fec->pnum = cpu_to_be32(wl_e->pnum);
1218 set_seen(ubi, wl_e->pnum, seen_pebs);
1219 fec->ec = cpu_to_be32(wl_e->ec);
1220
1221 used_peb_count++;
1222 fm_pos += sizeof(*fec);
1223 ubi_assert(fm_pos <= ubi->fm_size);
1224 }
1225
1226 ubi_for_each_protected_peb(ubi, i, wl_e) {
1227 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1228
1229 fec->pnum = cpu_to_be32(wl_e->pnum);
1230 set_seen(ubi, wl_e->pnum, seen_pebs);
1231 fec->ec = cpu_to_be32(wl_e->ec);
1232
1233 used_peb_count++;
1234 fm_pos += sizeof(*fec);
1235 ubi_assert(fm_pos <= ubi->fm_size);
1236 }
1237 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1238
1239 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1240 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1241
1242 fec->pnum = cpu_to_be32(wl_e->pnum);
1243 set_seen(ubi, wl_e->pnum, seen_pebs);
1244 fec->ec = cpu_to_be32(wl_e->ec);
1245
1246 scrub_peb_count++;
1247 fm_pos += sizeof(*fec);
1248 ubi_assert(fm_pos <= ubi->fm_size);
1249 }
1250 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1251
1252
1253 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1254 if (ubi_is_erase_work(ubi_wrk)) {
1255 wl_e = ubi_wrk->e;
1256 ubi_assert(wl_e);
1257
1258 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1259
1260 fec->pnum = cpu_to_be32(wl_e->pnum);
1261 set_seen(ubi, wl_e->pnum, seen_pebs);
1262 fec->ec = cpu_to_be32(wl_e->ec);
1263
1264 erase_peb_count++;
1265 fm_pos += sizeof(*fec);
1266 ubi_assert(fm_pos <= ubi->fm_size);
1267 }
1268 }
1269 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1270
1271 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1272 vol = ubi->volumes[i];
1273
1274 if (!vol)
1275 continue;
1276
1277 vol_count++;
1278
1279 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1280 fm_pos += sizeof(*fvh);
1281 ubi_assert(fm_pos <= ubi->fm_size);
1282
1283 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1284 fvh->vol_id = cpu_to_be32(vol->vol_id);
1285 fvh->vol_type = vol->vol_type;
1286 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1287 fvh->data_pad = cpu_to_be32(vol->data_pad);
1288 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1289
1290 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1291 vol->vol_type == UBI_STATIC_VOLUME);
1292
1293 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1294 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1295 ubi_assert(fm_pos <= ubi->fm_size);
1296
1297 for (j = 0; j < vol->reserved_pebs; j++) {
1298 struct ubi_eba_leb_desc ldesc;
1299
1300 ubi_eba_get_ldesc(vol, j, &ldesc);
1301 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1302 }
1303
1304 feba->reserved_pebs = cpu_to_be32(j);
1305 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1306 }
1307 fmh->vol_count = cpu_to_be32(vol_count);
1308 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1309
1310 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1311 avhdr->lnum = 0;
1312
1313 spin_unlock(&ubi->wl_lock);
1314 spin_unlock(&ubi->volumes_lock);
1315
1316 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1317 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1318 if (ret) {
1319 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1320 goto out_kfree;
1321 }
1322
1323 for (i = 0; i < new_fm->used_blocks; i++) {
1324 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1325 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1326 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1327 }
1328
1329 fmsb->data_crc = 0;
1330 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1331 ubi->fm_size));
1332
1333 for (i = 1; i < new_fm->used_blocks; i++) {
1334 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1335 dvhdr->lnum = cpu_to_be32(i);
1336 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1337 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1338 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1339 if (ret) {
1340 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1341 new_fm->e[i]->pnum);
1342 goto out_kfree;
1343 }
1344 }
1345
1346 for (i = 0; i < new_fm->used_blocks; i++) {
1347 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1348 new_fm->e[i]->pnum, 0, ubi->leb_size);
1349 if (ret) {
1350 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1351 new_fm->e[i]->pnum);
1352 goto out_kfree;
1353 }
1354 }
1355
1356 ubi_assert(new_fm);
1357 ubi->fm = new_fm;
1358
1359 ret = self_check_seen(ubi, seen_pebs);
1360 dbg_bld("fastmap written!");
1361
1362out_kfree:
1363 ubi_free_vid_buf(avbuf);
1364 ubi_free_vid_buf(dvbuf);
1365 free_seen(seen_pebs);
1366out:
1367 return ret;
1368}
1369
1370/**
1371 * erase_block - Manually erase a PEB.
1372 * @ubi: UBI device object
1373 * @pnum: PEB to be erased
1374 *
1375 * Returns the new EC value on success, < 0 indicates an internal error.
1376 */
1377static int erase_block(struct ubi_device *ubi, int pnum)
1378{
1379 int ret;
1380 struct ubi_ec_hdr *ec_hdr;
1381 long long ec;
1382
1383 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1384 if (!ec_hdr)
1385 return -ENOMEM;
1386
1387 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1388 if (ret < 0)
1389 goto out;
1390 else if (ret && ret != UBI_IO_BITFLIPS) {
1391 ret = -EINVAL;
1392 goto out;
1393 }
1394
1395 ret = ubi_io_sync_erase(ubi, pnum, 0);
1396 if (ret < 0)
1397 goto out;
1398
1399 ec = be64_to_cpu(ec_hdr->ec);
1400 ec += ret;
1401 if (ec > UBI_MAX_ERASECOUNTER) {
1402 ret = -EINVAL;
1403 goto out;
1404 }
1405
1406 ec_hdr->ec = cpu_to_be64(ec);
1407 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1408 if (ret < 0)
1409 goto out;
1410
1411 ret = ec;
1412out:
1413 kfree(ec_hdr);
1414 return ret;
1415}
1416
1417/**
1418 * invalidate_fastmap - destroys a fastmap.
1419 * @ubi: UBI device object
1420 *
1421 * This function ensures that upon next UBI attach a full scan
1422 * is issued. We need this if UBI is about to write a new fastmap
1423 * but is unable to do so. In this case we have two options:
1424 * a) Make sure that the current fastmap will not be usued upon
1425 * attach time and contine or b) fall back to RO mode to have the
1426 * current fastmap in a valid state.
1427 * Returns 0 on success, < 0 indicates an internal error.
1428 */
1429static int invalidate_fastmap(struct ubi_device *ubi)
1430{
1431 int ret;
1432 struct ubi_fastmap_layout *fm;
1433 struct ubi_wl_entry *e;
1434 struct ubi_vid_io_buf *vb = NULL;
1435 struct ubi_vid_hdr *vh;
1436
1437 if (!ubi->fm)
1438 return 0;
1439
1440 ubi->fm = NULL;
1441
1442 ret = -ENOMEM;
1443 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1444 if (!fm)
1445 goto out;
1446
1447 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1448 if (!vb)
1449 goto out_free_fm;
1450
1451 vh = ubi_get_vid_hdr(vb);
1452
1453 ret = -ENOSPC;
1454 e = ubi_wl_get_fm_peb(ubi, 1);
1455 if (!e)
1456 goto out_free_fm;
1457
1458 /*
1459 * Create fake fastmap such that UBI will fall back
1460 * to scanning mode.
1461 */
1462 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1463 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1464 if (ret < 0) {
1465 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1466 goto out_free_fm;
1467 }
1468
1469 fm->used_blocks = 1;
1470 fm->e[0] = e;
1471
1472 ubi->fm = fm;
1473
1474out:
1475 ubi_free_vid_buf(vb);
1476 return ret;
1477
1478out_free_fm:
1479 kfree(fm);
1480 goto out;
1481}
1482
1483/**
1484 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1485 * WL sub-system.
1486 * @ubi: UBI device object
1487 * @fm: fastmap layout object
1488 */
1489static void return_fm_pebs(struct ubi_device *ubi,
1490 struct ubi_fastmap_layout *fm)
1491{
1492 int i;
1493
1494 if (!fm)
1495 return;
1496
1497 for (i = 0; i < fm->used_blocks; i++) {
1498 if (fm->e[i]) {
1499 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1500 fm->to_be_tortured[i]);
1501 fm->e[i] = NULL;
1502 }
1503 }
1504}
1505
1506/**
1507 * ubi_update_fastmap - will be called by UBI if a volume changes or
1508 * a fastmap pool becomes full.
1509 * @ubi: UBI device object
1510 *
1511 * Returns 0 on success, < 0 indicates an internal error.
1512 */
1513int ubi_update_fastmap(struct ubi_device *ubi)
1514{
1515 int ret, i, j;
1516 struct ubi_fastmap_layout *new_fm, *old_fm;
1517 struct ubi_wl_entry *tmp_e;
1518
1519 down_write(&ubi->fm_protect);
1520 down_write(&ubi->work_sem);
1521 down_write(&ubi->fm_eba_sem);
1522
1523 ubi_refill_pools(ubi);
1524
1525 if (ubi->ro_mode || ubi->fm_disabled) {
1526 up_write(&ubi->fm_eba_sem);
1527 up_write(&ubi->work_sem);
1528 up_write(&ubi->fm_protect);
1529 return 0;
1530 }
1531
1532 ret = ubi_ensure_anchor_pebs(ubi);
1533 if (ret) {
1534 up_write(&ubi->fm_eba_sem);
1535 up_write(&ubi->work_sem);
1536 up_write(&ubi->fm_protect);
1537 return ret;
1538 }
1539
1540 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1541 if (!new_fm) {
1542 up_write(&ubi->fm_eba_sem);
1543 up_write(&ubi->work_sem);
1544 up_write(&ubi->fm_protect);
1545 return -ENOMEM;
1546 }
1547
1548 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1549 old_fm = ubi->fm;
1550 ubi->fm = NULL;
1551
1552 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1553 ubi_err(ubi, "fastmap too large");
1554 ret = -ENOSPC;
1555 goto err;
1556 }
1557
1558 for (i = 1; i < new_fm->used_blocks; i++) {
1559 spin_lock(&ubi->wl_lock);
1560 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1561 spin_unlock(&ubi->wl_lock);
1562
1563 if (!tmp_e) {
1564 if (old_fm && old_fm->e[i]) {
1565 ret = erase_block(ubi, old_fm->e[i]->pnum);
1566 if (ret < 0) {
1567 ubi_err(ubi, "could not erase old fastmap PEB");
1568
1569 for (j = 1; j < i; j++) {
1570 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1571 j, 0);
1572 new_fm->e[j] = NULL;
1573 }
1574 goto err;
1575 }
1576 new_fm->e[i] = old_fm->e[i];
1577 old_fm->e[i] = NULL;
1578 } else {
1579 ubi_err(ubi, "could not get any free erase block");
1580
1581 for (j = 1; j < i; j++) {
1582 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1583 new_fm->e[j] = NULL;
1584 }
1585
1586 ret = -ENOSPC;
1587 goto err;
1588 }
1589 } else {
1590 new_fm->e[i] = tmp_e;
1591
1592 if (old_fm && old_fm->e[i]) {
1593 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1594 old_fm->to_be_tortured[i]);
1595 old_fm->e[i] = NULL;
1596 }
1597 }
1598 }
1599
1600 /* Old fastmap is larger than the new one */
1601 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1602 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1603 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1604 old_fm->to_be_tortured[i]);
1605 old_fm->e[i] = NULL;
1606 }
1607 }
1608
1609 spin_lock(&ubi->wl_lock);
1610 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1611 spin_unlock(&ubi->wl_lock);
1612
1613 if (old_fm) {
1614 /* no fresh anchor PEB was found, reuse the old one */
1615 if (!tmp_e) {
1616 ret = erase_block(ubi, old_fm->e[0]->pnum);
1617 if (ret < 0) {
1618 ubi_err(ubi, "could not erase old anchor PEB");
1619
1620 for (i = 1; i < new_fm->used_blocks; i++) {
1621 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1622 i, 0);
1623 new_fm->e[i] = NULL;
1624 }
1625 goto err;
1626 }
1627 new_fm->e[0] = old_fm->e[0];
1628 new_fm->e[0]->ec = ret;
1629 old_fm->e[0] = NULL;
1630 } else {
1631 /* we've got a new anchor PEB, return the old one */
1632 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1633 old_fm->to_be_tortured[0]);
1634 new_fm->e[0] = tmp_e;
1635 old_fm->e[0] = NULL;
1636 }
1637 } else {
1638 if (!tmp_e) {
1639 ubi_err(ubi, "could not find any anchor PEB");
1640
1641 for (i = 1; i < new_fm->used_blocks; i++) {
1642 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1643 new_fm->e[i] = NULL;
1644 }
1645
1646 ret = -ENOSPC;
1647 goto err;
1648 }
1649 new_fm->e[0] = tmp_e;
1650 }
1651
1652 ret = ubi_write_fastmap(ubi, new_fm);
1653
1654 if (ret)
1655 goto err;
1656
1657out_unlock:
1658 up_write(&ubi->fm_eba_sem);
1659 up_write(&ubi->work_sem);
1660 up_write(&ubi->fm_protect);
1661 kfree(old_fm);
1662 return ret;
1663
1664err:
1665 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1666
1667 ret = invalidate_fastmap(ubi);
1668 if (ret < 0) {
1669 ubi_err(ubi, "Unable to invalidate current fastmap!");
1670 ubi_ro_mode(ubi);
1671 } else {
1672 return_fm_pebs(ubi, old_fm);
1673 return_fm_pebs(ubi, new_fm);
1674 ret = 0;
1675 }
1676
1677 kfree(new_fm);
1678 goto out_unlock;
1679}