Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8#include <linux/crc32.h>
9#include <linux/bitmap.h>
10#include "ubi.h"
11
12/**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
16static inline unsigned long *init_seen(struct ubi_device *ubi)
17{
18 unsigned long *ret;
19
20 if (!ubi_dbg_chk_fastmap(ubi))
21 return NULL;
22
23 ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
24 if (!ret)
25 return ERR_PTR(-ENOMEM);
26
27 return ret;
28}
29
30/**
31 * free_seen - free the seen logic integer array.
32 * @seen: integer array of @ubi->peb_count size
33 */
34static inline void free_seen(unsigned long *seen)
35{
36 bitmap_free(seen);
37}
38
39/**
40 * set_seen - mark a PEB as seen.
41 * @ubi: UBI device description object
42 * @pnum: The PEB to be makred as seen
43 * @seen: integer array of @ubi->peb_count size
44 */
45static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
46{
47 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
48 return;
49
50 set_bit(pnum, seen);
51}
52
53/**
54 * self_check_seen - check whether all PEB have been seen by fastmap.
55 * @ubi: UBI device description object
56 * @seen: integer array of @ubi->peb_count size
57 */
58static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
59{
60 int pnum, ret = 0;
61
62 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
63 return 0;
64
65 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
66 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
67 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
68 ret = -EINVAL;
69 }
70 }
71
72 return ret;
73}
74
75/**
76 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
77 * @ubi: UBI device description object
78 */
79size_t ubi_calc_fm_size(struct ubi_device *ubi)
80{
81 size_t size;
82
83 size = sizeof(struct ubi_fm_sb) +
84 sizeof(struct ubi_fm_hdr) +
85 sizeof(struct ubi_fm_scan_pool) +
86 sizeof(struct ubi_fm_scan_pool) +
87 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
88 (sizeof(struct ubi_fm_eba) +
89 (ubi->peb_count * sizeof(__be32))) +
90 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
91 return roundup(size, ubi->leb_size);
92}
93
94
95/**
96 * new_fm_vhdr - allocate a new volume header for fastmap usage.
97 * @ubi: UBI device description object
98 * @vol_id: the VID of the new header
99 *
100 * Returns a new struct ubi_vid_hdr on success.
101 * NULL indicates out of memory.
102 */
103static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
104{
105 struct ubi_vid_io_buf *new;
106 struct ubi_vid_hdr *vh;
107
108 new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
109 if (!new)
110 goto out;
111
112 vh = ubi_get_vid_hdr(new);
113 vh->vol_type = UBI_VID_DYNAMIC;
114 vh->vol_id = cpu_to_be32(vol_id);
115
116 /* UBI implementations without fastmap support have to delete the
117 * fastmap.
118 */
119 vh->compat = UBI_COMPAT_DELETE;
120
121out:
122 return new;
123}
124
125/**
126 * add_aeb - create and add a attach erase block to a given list.
127 * @ai: UBI attach info object
128 * @list: the target list
129 * @pnum: PEB number of the new attach erase block
130 * @ec: erease counter of the new LEB
131 * @scrub: scrub this PEB after attaching
132 *
133 * Returns 0 on success, < 0 indicates an internal error.
134 */
135static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
136 int pnum, int ec, int scrub)
137{
138 struct ubi_ainf_peb *aeb;
139
140 aeb = ubi_alloc_aeb(ai, pnum, ec);
141 if (!aeb)
142 return -ENOMEM;
143
144 aeb->lnum = -1;
145 aeb->scrub = scrub;
146 aeb->copy_flag = aeb->sqnum = 0;
147
148 ai->ec_sum += aeb->ec;
149 ai->ec_count++;
150
151 if (ai->max_ec < aeb->ec)
152 ai->max_ec = aeb->ec;
153
154 if (ai->min_ec > aeb->ec)
155 ai->min_ec = aeb->ec;
156
157 list_add_tail(&aeb->u.list, list);
158
159 return 0;
160}
161
162/**
163 * add_vol - create and add a new volume to ubi_attach_info.
164 * @ai: ubi_attach_info object
165 * @vol_id: VID of the new volume
166 * @used_ebs: number of used EBS
167 * @data_pad: data padding value of the new volume
168 * @vol_type: volume type
169 * @last_eb_bytes: number of bytes in the last LEB
170 *
171 * Returns the new struct ubi_ainf_volume on success.
172 * NULL indicates an error.
173 */
174static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
175 int used_ebs, int data_pad, u8 vol_type,
176 int last_eb_bytes)
177{
178 struct ubi_ainf_volume *av;
179
180 av = ubi_add_av(ai, vol_id);
181 if (IS_ERR(av))
182 return av;
183
184 av->data_pad = data_pad;
185 av->last_data_size = last_eb_bytes;
186 av->compat = 0;
187 av->vol_type = vol_type;
188 if (av->vol_type == UBI_STATIC_VOLUME)
189 av->used_ebs = used_ebs;
190
191 dbg_bld("found volume (ID %i)", vol_id);
192 return av;
193}
194
195/**
196 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
197 * from it's original list.
198 * @ai: ubi_attach_info object
199 * @aeb: the to be assigned SEB
200 * @av: target scan volume
201 */
202static void assign_aeb_to_av(struct ubi_attach_info *ai,
203 struct ubi_ainf_peb *aeb,
204 struct ubi_ainf_volume *av)
205{
206 struct ubi_ainf_peb *tmp_aeb;
207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208
209 while (*p) {
210 parent = *p;
211
212 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
213 if (aeb->lnum != tmp_aeb->lnum) {
214 if (aeb->lnum < tmp_aeb->lnum)
215 p = &(*p)->rb_left;
216 else
217 p = &(*p)->rb_right;
218
219 continue;
220 } else
221 break;
222 }
223
224 list_del(&aeb->u.list);
225 av->leb_count++;
226
227 rb_link_node(&aeb->u.rb, parent, p);
228 rb_insert_color(&aeb->u.rb, &av->root);
229}
230
231/**
232 * update_vol - inserts or updates a LEB which was found a pool.
233 * @ubi: the UBI device object
234 * @ai: attach info object
235 * @av: the volume this LEB belongs to
236 * @new_vh: the volume header derived from new_aeb
237 * @new_aeb: the AEB to be examined
238 *
239 * Returns 0 on success, < 0 indicates an internal error.
240 */
241static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
242 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
243 struct ubi_ainf_peb *new_aeb)
244{
245 struct rb_node **p = &av->root.rb_node, *parent = NULL;
246 struct ubi_ainf_peb *aeb, *victim;
247 int cmp_res;
248
249 while (*p) {
250 parent = *p;
251 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
252
253 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
254 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
255 p = &(*p)->rb_left;
256 else
257 p = &(*p)->rb_right;
258
259 continue;
260 }
261
262 /* This case can happen if the fastmap gets written
263 * because of a volume change (creation, deletion, ..).
264 * Then a PEB can be within the persistent EBA and the pool.
265 */
266 if (aeb->pnum == new_aeb->pnum) {
267 ubi_assert(aeb->lnum == new_aeb->lnum);
268 ubi_free_aeb(ai, new_aeb);
269
270 return 0;
271 }
272
273 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
274 if (cmp_res < 0)
275 return cmp_res;
276
277 /* new_aeb is newer */
278 if (cmp_res & 1) {
279 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
280 if (!victim)
281 return -ENOMEM;
282
283 list_add_tail(&victim->u.list, &ai->erase);
284
285 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
286 av->last_data_size =
287 be32_to_cpu(new_vh->data_size);
288
289 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
290 av->vol_id, aeb->lnum, new_aeb->pnum);
291
292 aeb->ec = new_aeb->ec;
293 aeb->pnum = new_aeb->pnum;
294 aeb->copy_flag = new_vh->copy_flag;
295 aeb->scrub = new_aeb->scrub;
296 aeb->sqnum = new_aeb->sqnum;
297 ubi_free_aeb(ai, new_aeb);
298
299 /* new_aeb is older */
300 } else {
301 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
302 av->vol_id, aeb->lnum, new_aeb->pnum);
303 list_add_tail(&new_aeb->u.list, &ai->erase);
304 }
305
306 return 0;
307 }
308 /* This LEB is new, let's add it to the volume */
309
310 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
311 av->highest_lnum = be32_to_cpu(new_vh->lnum);
312 av->last_data_size = be32_to_cpu(new_vh->data_size);
313 }
314
315 if (av->vol_type == UBI_STATIC_VOLUME)
316 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
317
318 av->leb_count++;
319
320 rb_link_node(&new_aeb->u.rb, parent, p);
321 rb_insert_color(&new_aeb->u.rb, &av->root);
322
323 return 0;
324}
325
326/**
327 * process_pool_aeb - we found a non-empty PEB in a pool.
328 * @ubi: UBI device object
329 * @ai: attach info object
330 * @new_vh: the volume header derived from new_aeb
331 * @new_aeb: the AEB to be examined
332 *
333 * Returns 0 on success, < 0 indicates an internal error.
334 */
335static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
336 struct ubi_vid_hdr *new_vh,
337 struct ubi_ainf_peb *new_aeb)
338{
339 int vol_id = be32_to_cpu(new_vh->vol_id);
340 struct ubi_ainf_volume *av;
341
342 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
343 ubi_free_aeb(ai, new_aeb);
344
345 return 0;
346 }
347
348 /* Find the volume this SEB belongs to */
349 av = ubi_find_av(ai, vol_id);
350 if (!av) {
351 ubi_err(ubi, "orphaned volume in fastmap pool!");
352 ubi_free_aeb(ai, new_aeb);
353 return UBI_BAD_FASTMAP;
354 }
355
356 ubi_assert(vol_id == av->vol_id);
357
358 return update_vol(ubi, ai, av, new_vh, new_aeb);
359}
360
361/**
362 * unmap_peb - unmap a PEB.
363 * If fastmap detects a free PEB in the pool it has to check whether
364 * this PEB has been unmapped after writing the fastmap.
365 *
366 * @ai: UBI attach info object
367 * @pnum: The PEB to be unmapped
368 */
369static void unmap_peb(struct ubi_attach_info *ai, int pnum)
370{
371 struct ubi_ainf_volume *av;
372 struct rb_node *node, *node2;
373 struct ubi_ainf_peb *aeb;
374
375 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
376 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
377 if (aeb->pnum == pnum) {
378 rb_erase(&aeb->u.rb, &av->root);
379 av->leb_count--;
380 ubi_free_aeb(ai, aeb);
381 return;
382 }
383 }
384 }
385}
386
387/**
388 * scan_pool - scans a pool for changed (no longer empty PEBs).
389 * @ubi: UBI device object
390 * @ai: attach info object
391 * @pebs: an array of all PEB numbers in the to be scanned pool
392 * @pool_size: size of the pool (number of entries in @pebs)
393 * @max_sqnum: pointer to the maximal sequence number
394 * @free: list of PEBs which are most likely free (and go into @ai->free)
395 *
396 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
397 * < 0 indicates an internal error.
398 */
399static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
400 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
401 struct list_head *free)
402{
403 struct ubi_vid_io_buf *vb;
404 struct ubi_vid_hdr *vh;
405 struct ubi_ec_hdr *ech;
406 struct ubi_ainf_peb *new_aeb;
407 int i, pnum, err, ret = 0;
408
409 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
410 if (!ech)
411 return -ENOMEM;
412
413 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
414 if (!vb) {
415 kfree(ech);
416 return -ENOMEM;
417 }
418
419 vh = ubi_get_vid_hdr(vb);
420
421 dbg_bld("scanning fastmap pool: size = %i", pool_size);
422
423 /*
424 * Now scan all PEBs in the pool to find changes which have been made
425 * after the creation of the fastmap
426 */
427 for (i = 0; i < pool_size; i++) {
428 int scrub = 0;
429 int image_seq;
430
431 pnum = be32_to_cpu(pebs[i]);
432
433 if (ubi_io_is_bad(ubi, pnum)) {
434 ubi_err(ubi, "bad PEB in fastmap pool!");
435 ret = UBI_BAD_FASTMAP;
436 goto out;
437 }
438
439 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
440 if (err && err != UBI_IO_BITFLIPS) {
441 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
442 pnum, err);
443 ret = err > 0 ? UBI_BAD_FASTMAP : err;
444 goto out;
445 } else if (err == UBI_IO_BITFLIPS)
446 scrub = 1;
447
448 /*
449 * Older UBI implementations have image_seq set to zero, so
450 * we shouldn't fail if image_seq == 0.
451 */
452 image_seq = be32_to_cpu(ech->image_seq);
453
454 if (image_seq && (image_seq != ubi->image_seq)) {
455 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
456 be32_to_cpu(ech->image_seq), ubi->image_seq);
457 ret = UBI_BAD_FASTMAP;
458 goto out;
459 }
460
461 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
462 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
463 unsigned long long ec = be64_to_cpu(ech->ec);
464 unmap_peb(ai, pnum);
465 dbg_bld("Adding PEB to free: %i", pnum);
466
467 if (err == UBI_IO_FF_BITFLIPS)
468 scrub = 1;
469
470 ret = add_aeb(ai, free, pnum, ec, scrub);
471 if (ret)
472 goto out;
473 continue;
474 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
475 dbg_bld("Found non empty PEB:%i in pool", pnum);
476
477 if (err == UBI_IO_BITFLIPS)
478 scrub = 1;
479
480 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
481 if (!new_aeb) {
482 ret = -ENOMEM;
483 goto out;
484 }
485
486 new_aeb->lnum = be32_to_cpu(vh->lnum);
487 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
488 new_aeb->copy_flag = vh->copy_flag;
489 new_aeb->scrub = scrub;
490
491 if (*max_sqnum < new_aeb->sqnum)
492 *max_sqnum = new_aeb->sqnum;
493
494 err = process_pool_aeb(ubi, ai, vh, new_aeb);
495 if (err) {
496 ret = err > 0 ? UBI_BAD_FASTMAP : err;
497 goto out;
498 }
499 } else {
500 /* We are paranoid and fall back to scanning mode */
501 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
502 ret = err > 0 ? UBI_BAD_FASTMAP : err;
503 goto out;
504 }
505
506 }
507
508out:
509 ubi_free_vid_buf(vb);
510 kfree(ech);
511 return ret;
512}
513
514/**
515 * count_fastmap_pebs - Counts the PEBs found by fastmap.
516 * @ai: The UBI attach info object
517 */
518static int count_fastmap_pebs(struct ubi_attach_info *ai)
519{
520 struct ubi_ainf_peb *aeb;
521 struct ubi_ainf_volume *av;
522 struct rb_node *rb1, *rb2;
523 int n = 0;
524
525 list_for_each_entry(aeb, &ai->erase, u.list)
526 n++;
527
528 list_for_each_entry(aeb, &ai->free, u.list)
529 n++;
530
531 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
532 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
533 n++;
534
535 return n;
536}
537
538/**
539 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
540 * @ubi: UBI device object
541 * @ai: UBI attach info object
542 * @fm: the fastmap to be attached
543 *
544 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
545 * < 0 indicates an internal error.
546 */
547static int ubi_attach_fastmap(struct ubi_device *ubi,
548 struct ubi_attach_info *ai,
549 struct ubi_fastmap_layout *fm)
550{
551 struct list_head used, free;
552 struct ubi_ainf_volume *av;
553 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
554 struct ubi_fm_sb *fmsb;
555 struct ubi_fm_hdr *fmhdr;
556 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
557 struct ubi_fm_ec *fmec;
558 struct ubi_fm_volhdr *fmvhdr;
559 struct ubi_fm_eba *fm_eba;
560 int ret, i, j, pool_size, wl_pool_size;
561 size_t fm_pos = 0, fm_size = ubi->fm_size;
562 unsigned long long max_sqnum = 0;
563 void *fm_raw = ubi->fm_buf;
564
565 INIT_LIST_HEAD(&used);
566 INIT_LIST_HEAD(&free);
567 ai->min_ec = UBI_MAX_ERASECOUNTER;
568
569 fmsb = (struct ubi_fm_sb *)(fm_raw);
570 ai->max_sqnum = fmsb->sqnum;
571 fm_pos += sizeof(struct ubi_fm_sb);
572 if (fm_pos >= fm_size)
573 goto fail_bad;
574
575 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
576 fm_pos += sizeof(*fmhdr);
577 if (fm_pos >= fm_size)
578 goto fail_bad;
579
580 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
581 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
582 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
583 goto fail_bad;
584 }
585
586 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
587 fm_pos += sizeof(*fmpl);
588 if (fm_pos >= fm_size)
589 goto fail_bad;
590 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
591 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
592 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
593 goto fail_bad;
594 }
595
596 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
597 fm_pos += sizeof(*fmpl_wl);
598 if (fm_pos >= fm_size)
599 goto fail_bad;
600 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
601 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
602 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
603 goto fail_bad;
604 }
605
606 pool_size = be16_to_cpu(fmpl->size);
607 wl_pool_size = be16_to_cpu(fmpl_wl->size);
608 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
609 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
610
611 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
612 ubi_err(ubi, "bad pool size: %i", pool_size);
613 goto fail_bad;
614 }
615
616 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
617 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
618 goto fail_bad;
619 }
620
621
622 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
623 fm->max_pool_size < 0) {
624 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
625 goto fail_bad;
626 }
627
628 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
629 fm->max_wl_pool_size < 0) {
630 ubi_err(ubi, "bad maximal WL pool size: %i",
631 fm->max_wl_pool_size);
632 goto fail_bad;
633 }
634
635 /* read EC values from free list */
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size)
640 goto fail_bad;
641
642 ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0);
644 if (ret)
645 goto fail;
646 }
647
648 /* read EC values from used list */
649 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
650 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
651 fm_pos += sizeof(*fmec);
652 if (fm_pos >= fm_size)
653 goto fail_bad;
654
655 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
656 be32_to_cpu(fmec->ec), 0);
657 if (ret)
658 goto fail;
659 }
660
661 /* read EC values from scrub list */
662 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
663 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
664 fm_pos += sizeof(*fmec);
665 if (fm_pos >= fm_size)
666 goto fail_bad;
667
668 ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
669 be32_to_cpu(fmec->ec), 1);
670 if (ret)
671 goto fail;
672 }
673
674 /* read EC values from erase list */
675 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
676 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
677 fm_pos += sizeof(*fmec);
678 if (fm_pos >= fm_size)
679 goto fail_bad;
680
681 ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
682 be32_to_cpu(fmec->ec), 1);
683 if (ret)
684 goto fail;
685 }
686
687 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
688 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
689
690 /* Iterate over all volumes and read their EBA table */
691 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
692 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
693 fm_pos += sizeof(*fmvhdr);
694 if (fm_pos >= fm_size)
695 goto fail_bad;
696
697 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
698 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
700 goto fail_bad;
701 }
702
703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
704 be32_to_cpu(fmvhdr->used_ebs),
705 be32_to_cpu(fmvhdr->data_pad),
706 fmvhdr->vol_type,
707 be32_to_cpu(fmvhdr->last_eb_bytes));
708
709 if (IS_ERR(av)) {
710 if (PTR_ERR(av) == -EEXIST)
711 ubi_err(ubi, "volume (ID %i) already exists",
712 fmvhdr->vol_id);
713
714 goto fail_bad;
715 }
716
717 ai->vols_found++;
718 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
719 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
720
721 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
722 fm_pos += sizeof(*fm_eba);
723 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
724 if (fm_pos >= fm_size)
725 goto fail_bad;
726
727 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
728 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
729 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
730 goto fail_bad;
731 }
732
733 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
734 int pnum = be32_to_cpu(fm_eba->pnum[j]);
735
736 if (pnum < 0)
737 continue;
738
739 aeb = NULL;
740 list_for_each_entry(tmp_aeb, &used, u.list) {
741 if (tmp_aeb->pnum == pnum) {
742 aeb = tmp_aeb;
743 break;
744 }
745 }
746
747 if (!aeb) {
748 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
749 goto fail_bad;
750 }
751
752 aeb->lnum = j;
753
754 if (av->highest_lnum <= aeb->lnum)
755 av->highest_lnum = aeb->lnum;
756
757 assign_aeb_to_av(ai, aeb, av);
758
759 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
760 aeb->pnum, aeb->lnum, av->vol_id);
761 }
762 }
763
764 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
765 if (ret)
766 goto fail;
767
768 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
769 if (ret)
770 goto fail;
771
772 if (max_sqnum > ai->max_sqnum)
773 ai->max_sqnum = max_sqnum;
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
776 list_move_tail(&tmp_aeb->u.list, &ai->free);
777
778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
779 list_move_tail(&tmp_aeb->u.list, &ai->erase);
780
781 ubi_assert(list_empty(&free));
782
783 /*
784 * If fastmap is leaking PEBs (must not happen), raise a
785 * fat warning and fall back to scanning mode.
786 * We do this here because in ubi_wl_init() it's too late
787 * and we cannot fall back to scanning.
788 */
789 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
790 ai->bad_peb_count - fm->used_blocks))
791 goto fail_bad;
792
793 return 0;
794
795fail_bad:
796 ret = UBI_BAD_FASTMAP;
797fail:
798 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
799 list_del(&tmp_aeb->u.list);
800 ubi_free_aeb(ai, tmp_aeb);
801 }
802 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
803 list_del(&tmp_aeb->u.list);
804 ubi_free_aeb(ai, tmp_aeb);
805 }
806
807 return ret;
808}
809
810/**
811 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
812 * @ai: UBI attach info to be filled
813 */
814static int find_fm_anchor(struct ubi_attach_info *ai)
815{
816 int ret = -1;
817 struct ubi_ainf_peb *aeb;
818 unsigned long long max_sqnum = 0;
819
820 list_for_each_entry(aeb, &ai->fastmap, u.list) {
821 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
822 max_sqnum = aeb->sqnum;
823 ret = aeb->pnum;
824 }
825 }
826
827 return ret;
828}
829
830static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
831 struct ubi_ainf_peb *old)
832{
833 struct ubi_ainf_peb *new;
834
835 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
836 if (!new)
837 return NULL;
838
839 new->vol_id = old->vol_id;
840 new->sqnum = old->sqnum;
841 new->lnum = old->lnum;
842 new->scrub = old->scrub;
843 new->copy_flag = old->copy_flag;
844
845 return new;
846}
847
848/**
849 * ubi_scan_fastmap - scan the fastmap.
850 * @ubi: UBI device object
851 * @ai: UBI attach info to be filled
852 * @scan_ai: UBI attach info from the first 64 PEBs,
853 * used to find the most recent Fastmap data structure
854 *
855 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
856 * UBI_BAD_FASTMAP if one was found but is not usable.
857 * < 0 indicates an internal error.
858 */
859int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
860 struct ubi_attach_info *scan_ai)
861{
862 struct ubi_fm_sb *fmsb, *fmsb2;
863 struct ubi_vid_io_buf *vb;
864 struct ubi_vid_hdr *vh;
865 struct ubi_ec_hdr *ech;
866 struct ubi_fastmap_layout *fm;
867 struct ubi_ainf_peb *aeb;
868 int i, used_blocks, pnum, fm_anchor, ret = 0;
869 size_t fm_size;
870 __be32 crc, tmp_crc;
871 unsigned long long sqnum = 0;
872
873 fm_anchor = find_fm_anchor(scan_ai);
874 if (fm_anchor < 0)
875 return UBI_NO_FASTMAP;
876
877 /* Copy all (possible) fastmap blocks into our new attach structure. */
878 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
879 struct ubi_ainf_peb *new;
880
881 new = clone_aeb(ai, aeb);
882 if (!new)
883 return -ENOMEM;
884
885 list_add(&new->u.list, &ai->fastmap);
886 }
887
888 down_write(&ubi->fm_protect);
889 memset(ubi->fm_buf, 0, ubi->fm_size);
890
891 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 if (!fmsb) {
893 ret = -ENOMEM;
894 goto out;
895 }
896
897 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 if (!fm) {
899 ret = -ENOMEM;
900 kfree(fmsb);
901 goto out;
902 }
903
904 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
905 if (ret && ret != UBI_IO_BITFLIPS)
906 goto free_fm_sb;
907 else if (ret == UBI_IO_BITFLIPS)
908 fm->to_be_tortured[0] = 1;
909
910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 ret = UBI_BAD_FASTMAP;
914 goto free_fm_sb;
915 }
916
917 if (fmsb->version != UBI_FM_FMT_VERSION) {
918 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 fmsb->version, UBI_FM_FMT_VERSION);
920 ret = UBI_BAD_FASTMAP;
921 goto free_fm_sb;
922 }
923
924 used_blocks = be32_to_cpu(fmsb->used_blocks);
925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
927 used_blocks);
928 ret = UBI_BAD_FASTMAP;
929 goto free_fm_sb;
930 }
931
932 fm_size = ubi->leb_size * used_blocks;
933 if (fm_size != ubi->fm_size) {
934 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
935 fm_size, ubi->fm_size);
936 ret = UBI_BAD_FASTMAP;
937 goto free_fm_sb;
938 }
939
940 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
941 if (!ech) {
942 ret = -ENOMEM;
943 goto free_fm_sb;
944 }
945
946 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
947 if (!vb) {
948 ret = -ENOMEM;
949 goto free_hdr;
950 }
951
952 vh = ubi_get_vid_hdr(vb);
953
954 for (i = 0; i < used_blocks; i++) {
955 int image_seq;
956
957 pnum = be32_to_cpu(fmsb->block_loc[i]);
958
959 if (ubi_io_is_bad(ubi, pnum)) {
960 ret = UBI_BAD_FASTMAP;
961 goto free_hdr;
962 }
963
964 if (i == 0 && pnum != fm_anchor) {
965 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
966 pnum, fm_anchor);
967 ret = UBI_BAD_FASTMAP;
968 goto free_hdr;
969 }
970
971 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
972 if (ret && ret != UBI_IO_BITFLIPS) {
973 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
974 i, pnum);
975 if (ret > 0)
976 ret = UBI_BAD_FASTMAP;
977 goto free_hdr;
978 } else if (ret == UBI_IO_BITFLIPS)
979 fm->to_be_tortured[i] = 1;
980
981 image_seq = be32_to_cpu(ech->image_seq);
982 if (!ubi->image_seq)
983 ubi->image_seq = image_seq;
984
985 /*
986 * Older UBI implementations have image_seq set to zero, so
987 * we shouldn't fail if image_seq == 0.
988 */
989 if (image_seq && (image_seq != ubi->image_seq)) {
990 ubi_err(ubi, "wrong image seq:%d instead of %d",
991 be32_to_cpu(ech->image_seq), ubi->image_seq);
992 ret = UBI_BAD_FASTMAP;
993 goto free_hdr;
994 }
995
996 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
997 if (ret && ret != UBI_IO_BITFLIPS) {
998 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
999 i, pnum);
1000 goto free_hdr;
1001 }
1002
1003 if (i == 0) {
1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1005 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1006 be32_to_cpu(vh->vol_id),
1007 UBI_FM_SB_VOLUME_ID);
1008 ret = UBI_BAD_FASTMAP;
1009 goto free_hdr;
1010 }
1011 } else {
1012 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1013 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1014 be32_to_cpu(vh->vol_id),
1015 UBI_FM_DATA_VOLUME_ID);
1016 ret = UBI_BAD_FASTMAP;
1017 goto free_hdr;
1018 }
1019 }
1020
1021 if (sqnum < be64_to_cpu(vh->sqnum))
1022 sqnum = be64_to_cpu(vh->sqnum);
1023
1024 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1025 pnum, 0, ubi->leb_size);
1026 if (ret && ret != UBI_IO_BITFLIPS) {
1027 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1028 "err: %i)", i, pnum, ret);
1029 goto free_hdr;
1030 }
1031 }
1032
1033 kfree(fmsb);
1034 fmsb = NULL;
1035
1036 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1037 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1038 fmsb2->data_crc = 0;
1039 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1040 if (crc != tmp_crc) {
1041 ubi_err(ubi, "fastmap data CRC is invalid");
1042 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1043 tmp_crc, crc);
1044 ret = UBI_BAD_FASTMAP;
1045 goto free_hdr;
1046 }
1047
1048 fmsb2->sqnum = sqnum;
1049
1050 fm->used_blocks = used_blocks;
1051
1052 ret = ubi_attach_fastmap(ubi, ai, fm);
1053 if (ret) {
1054 if (ret > 0)
1055 ret = UBI_BAD_FASTMAP;
1056 goto free_hdr;
1057 }
1058
1059 for (i = 0; i < used_blocks; i++) {
1060 struct ubi_wl_entry *e;
1061
1062 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1063 if (!e) {
1064 while (i--)
1065 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1066
1067 ret = -ENOMEM;
1068 goto free_hdr;
1069 }
1070
1071 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1072 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 fm->e[i] = e;
1074 }
1075
1076 ubi->fm = fm;
1077 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1078 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1079 ubi_msg(ubi, "attached by fastmap");
1080 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1081 ubi_msg(ubi, "fastmap WL pool size: %d",
1082 ubi->fm_wl_pool.max_size);
1083 ubi->fm_disabled = 0;
1084 ubi->fast_attach = 1;
1085
1086 ubi_free_vid_buf(vb);
1087 kfree(ech);
1088out:
1089 up_write(&ubi->fm_protect);
1090 if (ret == UBI_BAD_FASTMAP)
1091 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1092 return ret;
1093
1094free_hdr:
1095 ubi_free_vid_buf(vb);
1096 kfree(ech);
1097free_fm_sb:
1098 kfree(fmsb);
1099 kfree(fm);
1100 goto out;
1101}
1102
1103int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1104{
1105 struct ubi_device *ubi = vol->ubi;
1106
1107 if (!ubi->fast_attach)
1108 return 0;
1109
1110 vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
1111 if (!vol->checkmap)
1112 return -ENOMEM;
1113
1114 return 0;
1115}
1116
1117void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1118{
1119 bitmap_free(vol->checkmap);
1120}
1121
1122/**
1123 * ubi_write_fastmap - writes a fastmap.
1124 * @ubi: UBI device object
1125 * @new_fm: the to be written fastmap
1126 *
1127 * Returns 0 on success, < 0 indicates an internal error.
1128 */
1129static int ubi_write_fastmap(struct ubi_device *ubi,
1130 struct ubi_fastmap_layout *new_fm)
1131{
1132 size_t fm_pos = 0;
1133 void *fm_raw;
1134 struct ubi_fm_sb *fmsb;
1135 struct ubi_fm_hdr *fmh;
1136 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1137 struct ubi_fm_ec *fec;
1138 struct ubi_fm_volhdr *fvh;
1139 struct ubi_fm_eba *feba;
1140 struct ubi_wl_entry *wl_e;
1141 struct ubi_volume *vol;
1142 struct ubi_vid_io_buf *avbuf, *dvbuf;
1143 struct ubi_vid_hdr *avhdr, *dvhdr;
1144 struct ubi_work *ubi_wrk;
1145 struct rb_node *tmp_rb;
1146 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1147 int scrub_peb_count, erase_peb_count;
1148 unsigned long *seen_pebs;
1149
1150 fm_raw = ubi->fm_buf;
1151 memset(ubi->fm_buf, 0, ubi->fm_size);
1152
1153 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1154 if (!avbuf) {
1155 ret = -ENOMEM;
1156 goto out;
1157 }
1158
1159 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1160 if (!dvbuf) {
1161 ret = -ENOMEM;
1162 goto out_free_avbuf;
1163 }
1164
1165 avhdr = ubi_get_vid_hdr(avbuf);
1166 dvhdr = ubi_get_vid_hdr(dvbuf);
1167
1168 seen_pebs = init_seen(ubi);
1169 if (IS_ERR(seen_pebs)) {
1170 ret = PTR_ERR(seen_pebs);
1171 goto out_free_dvbuf;
1172 }
1173
1174 spin_lock(&ubi->volumes_lock);
1175 spin_lock(&ubi->wl_lock);
1176
1177 fmsb = (struct ubi_fm_sb *)fm_raw;
1178 fm_pos += sizeof(*fmsb);
1179 ubi_assert(fm_pos <= ubi->fm_size);
1180
1181 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1182 fm_pos += sizeof(*fmh);
1183 ubi_assert(fm_pos <= ubi->fm_size);
1184
1185 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1186 fmsb->version = UBI_FM_FMT_VERSION;
1187 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1188 /* the max sqnum will be filled in while *reading* the fastmap */
1189 fmsb->sqnum = 0;
1190
1191 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1192 free_peb_count = 0;
1193 used_peb_count = 0;
1194 scrub_peb_count = 0;
1195 erase_peb_count = 0;
1196 vol_count = 0;
1197
1198 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1199 fm_pos += sizeof(*fmpl);
1200 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1201 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1202 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1203
1204 for (i = 0; i < ubi->fm_pool.size; i++) {
1205 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1206 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1207 }
1208
1209 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1210 fm_pos += sizeof(*fmpl_wl);
1211 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1212 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1213 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1214
1215 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1216 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1217 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1218 }
1219
1220 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1221 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1222
1223 fec->pnum = cpu_to_be32(wl_e->pnum);
1224 set_seen(ubi, wl_e->pnum, seen_pebs);
1225 fec->ec = cpu_to_be32(wl_e->ec);
1226
1227 free_peb_count++;
1228 fm_pos += sizeof(*fec);
1229 ubi_assert(fm_pos <= ubi->fm_size);
1230 }
1231 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1232
1233 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1234 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1235
1236 fec->pnum = cpu_to_be32(wl_e->pnum);
1237 set_seen(ubi, wl_e->pnum, seen_pebs);
1238 fec->ec = cpu_to_be32(wl_e->ec);
1239
1240 used_peb_count++;
1241 fm_pos += sizeof(*fec);
1242 ubi_assert(fm_pos <= ubi->fm_size);
1243 }
1244
1245 ubi_for_each_protected_peb(ubi, i, wl_e) {
1246 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1247
1248 fec->pnum = cpu_to_be32(wl_e->pnum);
1249 set_seen(ubi, wl_e->pnum, seen_pebs);
1250 fec->ec = cpu_to_be32(wl_e->ec);
1251
1252 used_peb_count++;
1253 fm_pos += sizeof(*fec);
1254 ubi_assert(fm_pos <= ubi->fm_size);
1255 }
1256 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1257
1258 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1259 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1260
1261 fec->pnum = cpu_to_be32(wl_e->pnum);
1262 set_seen(ubi, wl_e->pnum, seen_pebs);
1263 fec->ec = cpu_to_be32(wl_e->ec);
1264
1265 scrub_peb_count++;
1266 fm_pos += sizeof(*fec);
1267 ubi_assert(fm_pos <= ubi->fm_size);
1268 }
1269 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1270
1271
1272 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1273 if (ubi_is_erase_work(ubi_wrk)) {
1274 wl_e = ubi_wrk->e;
1275 ubi_assert(wl_e);
1276
1277 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1278
1279 fec->pnum = cpu_to_be32(wl_e->pnum);
1280 set_seen(ubi, wl_e->pnum, seen_pebs);
1281 fec->ec = cpu_to_be32(wl_e->ec);
1282
1283 erase_peb_count++;
1284 fm_pos += sizeof(*fec);
1285 ubi_assert(fm_pos <= ubi->fm_size);
1286 }
1287 }
1288 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1289
1290 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1291 vol = ubi->volumes[i];
1292
1293 if (!vol)
1294 continue;
1295
1296 vol_count++;
1297
1298 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1299 fm_pos += sizeof(*fvh);
1300 ubi_assert(fm_pos <= ubi->fm_size);
1301
1302 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1303 fvh->vol_id = cpu_to_be32(vol->vol_id);
1304 fvh->vol_type = vol->vol_type;
1305 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1306 fvh->data_pad = cpu_to_be32(vol->data_pad);
1307 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1308
1309 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1310 vol->vol_type == UBI_STATIC_VOLUME);
1311
1312 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1313 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1314 ubi_assert(fm_pos <= ubi->fm_size);
1315
1316 for (j = 0; j < vol->reserved_pebs; j++) {
1317 struct ubi_eba_leb_desc ldesc;
1318
1319 ubi_eba_get_ldesc(vol, j, &ldesc);
1320 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1321 }
1322
1323 feba->reserved_pebs = cpu_to_be32(j);
1324 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1325 }
1326 fmh->vol_count = cpu_to_be32(vol_count);
1327 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1328
1329 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1330 avhdr->lnum = 0;
1331
1332 spin_unlock(&ubi->wl_lock);
1333 spin_unlock(&ubi->volumes_lock);
1334
1335 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1336 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1337 if (ret) {
1338 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1339 goto out_free_seen;
1340 }
1341
1342 for (i = 0; i < new_fm->used_blocks; i++) {
1343 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1344 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1345 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1346 }
1347
1348 fmsb->data_crc = 0;
1349 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1350 ubi->fm_size));
1351
1352 for (i = 1; i < new_fm->used_blocks; i++) {
1353 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1354 dvhdr->lnum = cpu_to_be32(i);
1355 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1356 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1357 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1358 if (ret) {
1359 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1360 new_fm->e[i]->pnum);
1361 goto out_free_seen;
1362 }
1363 }
1364
1365 for (i = 0; i < new_fm->used_blocks; i++) {
1366 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1367 new_fm->e[i]->pnum, 0, ubi->leb_size);
1368 if (ret) {
1369 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1370 new_fm->e[i]->pnum);
1371 goto out_free_seen;
1372 }
1373 }
1374
1375 ubi_assert(new_fm);
1376 ubi->fm = new_fm;
1377
1378 ret = self_check_seen(ubi, seen_pebs);
1379 dbg_bld("fastmap written!");
1380
1381out_free_seen:
1382 free_seen(seen_pebs);
1383out_free_dvbuf:
1384 ubi_free_vid_buf(dvbuf);
1385out_free_avbuf:
1386 ubi_free_vid_buf(avbuf);
1387
1388out:
1389 return ret;
1390}
1391
1392/**
1393 * erase_block - Manually erase a PEB.
1394 * @ubi: UBI device object
1395 * @pnum: PEB to be erased
1396 *
1397 * Returns the new EC value on success, < 0 indicates an internal error.
1398 */
1399static int erase_block(struct ubi_device *ubi, int pnum)
1400{
1401 int ret;
1402 struct ubi_ec_hdr *ec_hdr;
1403 long long ec;
1404
1405 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1406 if (!ec_hdr)
1407 return -ENOMEM;
1408
1409 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1410 if (ret < 0)
1411 goto out;
1412 else if (ret && ret != UBI_IO_BITFLIPS) {
1413 ret = -EINVAL;
1414 goto out;
1415 }
1416
1417 ret = ubi_io_sync_erase(ubi, pnum, 0);
1418 if (ret < 0)
1419 goto out;
1420
1421 ec = be64_to_cpu(ec_hdr->ec);
1422 ec += ret;
1423 if (ec > UBI_MAX_ERASECOUNTER) {
1424 ret = -EINVAL;
1425 goto out;
1426 }
1427
1428 ec_hdr->ec = cpu_to_be64(ec);
1429 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1430 if (ret < 0)
1431 goto out;
1432
1433 ret = ec;
1434out:
1435 kfree(ec_hdr);
1436 return ret;
1437}
1438
1439/**
1440 * invalidate_fastmap - destroys a fastmap.
1441 * @ubi: UBI device object
1442 *
1443 * This function ensures that upon next UBI attach a full scan
1444 * is issued. We need this if UBI is about to write a new fastmap
1445 * but is unable to do so. In this case we have two options:
1446 * a) Make sure that the current fastmap will not be usued upon
1447 * attach time and contine or b) fall back to RO mode to have the
1448 * current fastmap in a valid state.
1449 * Returns 0 on success, < 0 indicates an internal error.
1450 */
1451static int invalidate_fastmap(struct ubi_device *ubi)
1452{
1453 int ret;
1454 struct ubi_fastmap_layout *fm;
1455 struct ubi_wl_entry *e;
1456 struct ubi_vid_io_buf *vb = NULL;
1457 struct ubi_vid_hdr *vh;
1458
1459 if (!ubi->fm)
1460 return 0;
1461
1462 ubi->fm = NULL;
1463
1464 ret = -ENOMEM;
1465 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1466 if (!fm)
1467 goto out;
1468
1469 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1470 if (!vb)
1471 goto out_free_fm;
1472
1473 vh = ubi_get_vid_hdr(vb);
1474
1475 ret = -ENOSPC;
1476 e = ubi_wl_get_fm_peb(ubi, 1);
1477 if (!e)
1478 goto out_free_fm;
1479
1480 /*
1481 * Create fake fastmap such that UBI will fall back
1482 * to scanning mode.
1483 */
1484 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1485 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1486 if (ret < 0) {
1487 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1488 goto out_free_fm;
1489 }
1490
1491 fm->used_blocks = 1;
1492 fm->e[0] = e;
1493
1494 ubi->fm = fm;
1495
1496out:
1497 ubi_free_vid_buf(vb);
1498 return ret;
1499
1500out_free_fm:
1501 kfree(fm);
1502 goto out;
1503}
1504
1505/**
1506 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1507 * WL sub-system.
1508 * @ubi: UBI device object
1509 * @fm: fastmap layout object
1510 */
1511static void return_fm_pebs(struct ubi_device *ubi,
1512 struct ubi_fastmap_layout *fm)
1513{
1514 int i;
1515
1516 if (!fm)
1517 return;
1518
1519 for (i = 0; i < fm->used_blocks; i++) {
1520 if (fm->e[i]) {
1521 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1522 fm->to_be_tortured[i]);
1523 fm->e[i] = NULL;
1524 }
1525 }
1526}
1527
1528/**
1529 * ubi_update_fastmap - will be called by UBI if a volume changes or
1530 * a fastmap pool becomes full.
1531 * @ubi: UBI device object
1532 *
1533 * Returns 0 on success, < 0 indicates an internal error.
1534 */
1535int ubi_update_fastmap(struct ubi_device *ubi)
1536{
1537 int ret, i, j;
1538 struct ubi_fastmap_layout *new_fm, *old_fm;
1539 struct ubi_wl_entry *tmp_e;
1540
1541 down_write(&ubi->fm_protect);
1542 down_write(&ubi->work_sem);
1543 down_write(&ubi->fm_eba_sem);
1544
1545 ubi_refill_pools(ubi);
1546
1547 if (ubi->ro_mode || ubi->fm_disabled) {
1548 up_write(&ubi->fm_eba_sem);
1549 up_write(&ubi->work_sem);
1550 up_write(&ubi->fm_protect);
1551 return 0;
1552 }
1553
1554 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1555 if (!new_fm) {
1556 up_write(&ubi->fm_eba_sem);
1557 up_write(&ubi->work_sem);
1558 up_write(&ubi->fm_protect);
1559 return -ENOMEM;
1560 }
1561
1562 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1563 old_fm = ubi->fm;
1564 ubi->fm = NULL;
1565
1566 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1567 ubi_err(ubi, "fastmap too large");
1568 ret = -ENOSPC;
1569 goto err;
1570 }
1571
1572 for (i = 1; i < new_fm->used_blocks; i++) {
1573 spin_lock(&ubi->wl_lock);
1574 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1575 spin_unlock(&ubi->wl_lock);
1576
1577 if (!tmp_e) {
1578 if (old_fm && old_fm->e[i]) {
1579 ret = erase_block(ubi, old_fm->e[i]->pnum);
1580 if (ret < 0) {
1581 ubi_err(ubi, "could not erase old fastmap PEB");
1582
1583 for (j = 1; j < i; j++) {
1584 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1585 j, 0);
1586 new_fm->e[j] = NULL;
1587 }
1588 goto err;
1589 }
1590 new_fm->e[i] = old_fm->e[i];
1591 old_fm->e[i] = NULL;
1592 } else {
1593 ubi_err(ubi, "could not get any free erase block");
1594
1595 for (j = 1; j < i; j++) {
1596 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1597 new_fm->e[j] = NULL;
1598 }
1599
1600 ret = -ENOSPC;
1601 goto err;
1602 }
1603 } else {
1604 new_fm->e[i] = tmp_e;
1605
1606 if (old_fm && old_fm->e[i]) {
1607 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1608 old_fm->to_be_tortured[i]);
1609 old_fm->e[i] = NULL;
1610 }
1611 }
1612 }
1613
1614 /* Old fastmap is larger than the new one */
1615 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1616 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1617 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1618 old_fm->to_be_tortured[i]);
1619 old_fm->e[i] = NULL;
1620 }
1621 }
1622
1623 spin_lock(&ubi->wl_lock);
1624 tmp_e = ubi->fm_anchor;
1625 ubi->fm_anchor = NULL;
1626 spin_unlock(&ubi->wl_lock);
1627
1628 if (old_fm) {
1629 /* no fresh anchor PEB was found, reuse the old one */
1630 if (!tmp_e) {
1631 ret = erase_block(ubi, old_fm->e[0]->pnum);
1632 if (ret < 0) {
1633 ubi_err(ubi, "could not erase old anchor PEB");
1634
1635 for (i = 1; i < new_fm->used_blocks; i++) {
1636 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1637 i, 0);
1638 new_fm->e[i] = NULL;
1639 }
1640 goto err;
1641 }
1642 new_fm->e[0] = old_fm->e[0];
1643 new_fm->e[0]->ec = ret;
1644 old_fm->e[0] = NULL;
1645 } else {
1646 /* we've got a new anchor PEB, return the old one */
1647 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1648 old_fm->to_be_tortured[0]);
1649 new_fm->e[0] = tmp_e;
1650 old_fm->e[0] = NULL;
1651 }
1652 } else {
1653 if (!tmp_e) {
1654 ubi_err(ubi, "could not find any anchor PEB");
1655
1656 for (i = 1; i < new_fm->used_blocks; i++) {
1657 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1658 new_fm->e[i] = NULL;
1659 }
1660
1661 ret = -ENOSPC;
1662 goto err;
1663 }
1664 new_fm->e[0] = tmp_e;
1665 }
1666
1667 ret = ubi_write_fastmap(ubi, new_fm);
1668
1669 if (ret)
1670 goto err;
1671
1672out_unlock:
1673 up_write(&ubi->fm_eba_sem);
1674 up_write(&ubi->work_sem);
1675 up_write(&ubi->fm_protect);
1676 kfree(old_fm);
1677
1678 ubi_ensure_anchor_pebs(ubi);
1679
1680 return ret;
1681
1682err:
1683 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1684
1685 ret = invalidate_fastmap(ubi);
1686 if (ret < 0) {
1687 ubi_err(ubi, "Unable to invalidate current fastmap!");
1688 ubi_ro_mode(ubi);
1689 } else {
1690 return_fm_pebs(ubi, old_fm);
1691 return_fm_pebs(ubi, new_fm);
1692 ret = 0;
1693 }
1694
1695 kfree(new_fm);
1696 goto out_unlock;
1697}
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#include <linux/crc32.h>
18#include "ubi.h"
19
20/**
21 * init_seen - allocate memory for used for debugging.
22 * @ubi: UBI device description object
23 */
24static inline int *init_seen(struct ubi_device *ubi)
25{
26 int *ret;
27
28 if (!ubi_dbg_chk_fastmap(ubi))
29 return NULL;
30
31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 if (!ret)
33 return ERR_PTR(-ENOMEM);
34
35 return ret;
36}
37
38/**
39 * free_seen - free the seen logic integer array.
40 * @seen: integer array of @ubi->peb_count size
41 */
42static inline void free_seen(int *seen)
43{
44 kfree(seen);
45}
46
47/**
48 * set_seen - mark a PEB as seen.
49 * @ubi: UBI device description object
50 * @pnum: The PEB to be makred as seen
51 * @seen: integer array of @ubi->peb_count size
52 */
53static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54{
55 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 return;
57
58 seen[pnum] = 1;
59}
60
61/**
62 * self_check_seen - check whether all PEB have been seen by fastmap.
63 * @ubi: UBI device description object
64 * @seen: integer array of @ubi->peb_count size
65 */
66static int self_check_seen(struct ubi_device *ubi, int *seen)
67{
68 int pnum, ret = 0;
69
70 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 return 0;
72
73 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 ret = -EINVAL;
77 }
78 }
79
80 return ret;
81}
82
83/**
84 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
85 * @ubi: UBI device description object
86 */
87size_t ubi_calc_fm_size(struct ubi_device *ubi)
88{
89 size_t size;
90
91 size = sizeof(struct ubi_fm_sb) +
92 sizeof(struct ubi_fm_hdr) +
93 sizeof(struct ubi_fm_scan_pool) +
94 sizeof(struct ubi_fm_scan_pool) +
95 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
96 (sizeof(struct ubi_fm_eba) +
97 (ubi->peb_count * sizeof(__be32))) +
98 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
99 return roundup(size, ubi->leb_size);
100}
101
102
103/**
104 * new_fm_vhdr - allocate a new volume header for fastmap usage.
105 * @ubi: UBI device description object
106 * @vol_id: the VID of the new header
107 *
108 * Returns a new struct ubi_vid_hdr on success.
109 * NULL indicates out of memory.
110 */
111static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
112{
113 struct ubi_vid_hdr *new;
114
115 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
116 if (!new)
117 goto out;
118
119 new->vol_type = UBI_VID_DYNAMIC;
120 new->vol_id = cpu_to_be32(vol_id);
121
122 /* UBI implementations without fastmap support have to delete the
123 * fastmap.
124 */
125 new->compat = UBI_COMPAT_DELETE;
126
127out:
128 return new;
129}
130
131/**
132 * add_aeb - create and add a attach erase block to a given list.
133 * @ai: UBI attach info object
134 * @list: the target list
135 * @pnum: PEB number of the new attach erase block
136 * @ec: erease counter of the new LEB
137 * @scrub: scrub this PEB after attaching
138 *
139 * Returns 0 on success, < 0 indicates an internal error.
140 */
141static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
142 int pnum, int ec, int scrub)
143{
144 struct ubi_ainf_peb *aeb;
145
146 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
147 if (!aeb)
148 return -ENOMEM;
149
150 aeb->pnum = pnum;
151 aeb->ec = ec;
152 aeb->lnum = -1;
153 aeb->scrub = scrub;
154 aeb->copy_flag = aeb->sqnum = 0;
155
156 ai->ec_sum += aeb->ec;
157 ai->ec_count++;
158
159 if (ai->max_ec < aeb->ec)
160 ai->max_ec = aeb->ec;
161
162 if (ai->min_ec > aeb->ec)
163 ai->min_ec = aeb->ec;
164
165 list_add_tail(&aeb->u.list, list);
166
167 return 0;
168}
169
170/**
171 * add_vol - create and add a new volume to ubi_attach_info.
172 * @ai: ubi_attach_info object
173 * @vol_id: VID of the new volume
174 * @used_ebs: number of used EBS
175 * @data_pad: data padding value of the new volume
176 * @vol_type: volume type
177 * @last_eb_bytes: number of bytes in the last LEB
178 *
179 * Returns the new struct ubi_ainf_volume on success.
180 * NULL indicates an error.
181 */
182static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 int used_ebs, int data_pad, u8 vol_type,
184 int last_eb_bytes)
185{
186 struct ubi_ainf_volume *av;
187 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
188
189 while (*p) {
190 parent = *p;
191 av = rb_entry(parent, struct ubi_ainf_volume, rb);
192
193 if (vol_id > av->vol_id)
194 p = &(*p)->rb_left;
195 else if (vol_id < av->vol_id)
196 p = &(*p)->rb_right;
197 else
198 return ERR_PTR(-EINVAL);
199 }
200
201 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
202 if (!av)
203 goto out;
204
205 av->highest_lnum = av->leb_count = av->used_ebs = 0;
206 av->vol_id = vol_id;
207 av->data_pad = data_pad;
208 av->last_data_size = last_eb_bytes;
209 av->compat = 0;
210 av->vol_type = vol_type;
211 av->root = RB_ROOT;
212 if (av->vol_type == UBI_STATIC_VOLUME)
213 av->used_ebs = used_ebs;
214
215 dbg_bld("found volume (ID %i)", vol_id);
216
217 rb_link_node(&av->rb, parent, p);
218 rb_insert_color(&av->rb, &ai->volumes);
219
220out:
221 return av;
222}
223
224/**
225 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
226 * from it's original list.
227 * @ai: ubi_attach_info object
228 * @aeb: the to be assigned SEB
229 * @av: target scan volume
230 */
231static void assign_aeb_to_av(struct ubi_attach_info *ai,
232 struct ubi_ainf_peb *aeb,
233 struct ubi_ainf_volume *av)
234{
235 struct ubi_ainf_peb *tmp_aeb;
236 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
237
238 p = &av->root.rb_node;
239 while (*p) {
240 parent = *p;
241
242 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
243 if (aeb->lnum != tmp_aeb->lnum) {
244 if (aeb->lnum < tmp_aeb->lnum)
245 p = &(*p)->rb_left;
246 else
247 p = &(*p)->rb_right;
248
249 continue;
250 } else
251 break;
252 }
253
254 list_del(&aeb->u.list);
255 av->leb_count++;
256
257 rb_link_node(&aeb->u.rb, parent, p);
258 rb_insert_color(&aeb->u.rb, &av->root);
259}
260
261/**
262 * update_vol - inserts or updates a LEB which was found a pool.
263 * @ubi: the UBI device object
264 * @ai: attach info object
265 * @av: the volume this LEB belongs to
266 * @new_vh: the volume header derived from new_aeb
267 * @new_aeb: the AEB to be examined
268 *
269 * Returns 0 on success, < 0 indicates an internal error.
270 */
271static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
272 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
273 struct ubi_ainf_peb *new_aeb)
274{
275 struct rb_node **p = &av->root.rb_node, *parent = NULL;
276 struct ubi_ainf_peb *aeb, *victim;
277 int cmp_res;
278
279 while (*p) {
280 parent = *p;
281 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
282
283 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
284 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
285 p = &(*p)->rb_left;
286 else
287 p = &(*p)->rb_right;
288
289 continue;
290 }
291
292 /* This case can happen if the fastmap gets written
293 * because of a volume change (creation, deletion, ..).
294 * Then a PEB can be within the persistent EBA and the pool.
295 */
296 if (aeb->pnum == new_aeb->pnum) {
297 ubi_assert(aeb->lnum == new_aeb->lnum);
298 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
299
300 return 0;
301 }
302
303 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
304 if (cmp_res < 0)
305 return cmp_res;
306
307 /* new_aeb is newer */
308 if (cmp_res & 1) {
309 victim = kmem_cache_alloc(ai->aeb_slab_cache,
310 GFP_KERNEL);
311 if (!victim)
312 return -ENOMEM;
313
314 victim->ec = aeb->ec;
315 victim->pnum = aeb->pnum;
316 list_add_tail(&victim->u.list, &ai->erase);
317
318 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
319 av->last_data_size =
320 be32_to_cpu(new_vh->data_size);
321
322 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
323 av->vol_id, aeb->lnum, new_aeb->pnum);
324
325 aeb->ec = new_aeb->ec;
326 aeb->pnum = new_aeb->pnum;
327 aeb->copy_flag = new_vh->copy_flag;
328 aeb->scrub = new_aeb->scrub;
329 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
330
331 /* new_aeb is older */
332 } else {
333 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
334 av->vol_id, aeb->lnum, new_aeb->pnum);
335 list_add_tail(&new_aeb->u.list, &ai->erase);
336 }
337
338 return 0;
339 }
340 /* This LEB is new, let's add it to the volume */
341
342 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
343 av->highest_lnum = be32_to_cpu(new_vh->lnum);
344 av->last_data_size = be32_to_cpu(new_vh->data_size);
345 }
346
347 if (av->vol_type == UBI_STATIC_VOLUME)
348 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
349
350 av->leb_count++;
351
352 rb_link_node(&new_aeb->u.rb, parent, p);
353 rb_insert_color(&new_aeb->u.rb, &av->root);
354
355 return 0;
356}
357
358/**
359 * process_pool_aeb - we found a non-empty PEB in a pool.
360 * @ubi: UBI device object
361 * @ai: attach info object
362 * @new_vh: the volume header derived from new_aeb
363 * @new_aeb: the AEB to be examined
364 *
365 * Returns 0 on success, < 0 indicates an internal error.
366 */
367static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
368 struct ubi_vid_hdr *new_vh,
369 struct ubi_ainf_peb *new_aeb)
370{
371 struct ubi_ainf_volume *av, *tmp_av = NULL;
372 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
373 int found = 0;
374
375 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
376 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
377 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
378
379 return 0;
380 }
381
382 /* Find the volume this SEB belongs to */
383 while (*p) {
384 parent = *p;
385 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
386
387 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
388 p = &(*p)->rb_left;
389 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
390 p = &(*p)->rb_right;
391 else {
392 found = 1;
393 break;
394 }
395 }
396
397 if (found)
398 av = tmp_av;
399 else {
400 ubi_err(ubi, "orphaned volume in fastmap pool!");
401 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
402 return UBI_BAD_FASTMAP;
403 }
404
405 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
406
407 return update_vol(ubi, ai, av, new_vh, new_aeb);
408}
409
410/**
411 * unmap_peb - unmap a PEB.
412 * If fastmap detects a free PEB in the pool it has to check whether
413 * this PEB has been unmapped after writing the fastmap.
414 *
415 * @ai: UBI attach info object
416 * @pnum: The PEB to be unmapped
417 */
418static void unmap_peb(struct ubi_attach_info *ai, int pnum)
419{
420 struct ubi_ainf_volume *av;
421 struct rb_node *node, *node2;
422 struct ubi_ainf_peb *aeb;
423
424 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
425 av = rb_entry(node, struct ubi_ainf_volume, rb);
426
427 for (node2 = rb_first(&av->root); node2;
428 node2 = rb_next(node2)) {
429 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
430 if (aeb->pnum == pnum) {
431 rb_erase(&aeb->u.rb, &av->root);
432 av->leb_count--;
433 kmem_cache_free(ai->aeb_slab_cache, aeb);
434 return;
435 }
436 }
437 }
438}
439
440/**
441 * scan_pool - scans a pool for changed (no longer empty PEBs).
442 * @ubi: UBI device object
443 * @ai: attach info object
444 * @pebs: an array of all PEB numbers in the to be scanned pool
445 * @pool_size: size of the pool (number of entries in @pebs)
446 * @max_sqnum: pointer to the maximal sequence number
447 * @free: list of PEBs which are most likely free (and go into @ai->free)
448 *
449 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
450 * < 0 indicates an internal error.
451 */
452static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
453 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
454 struct list_head *free)
455{
456 struct ubi_vid_hdr *vh;
457 struct ubi_ec_hdr *ech;
458 struct ubi_ainf_peb *new_aeb;
459 int i, pnum, err, ret = 0;
460
461 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
462 if (!ech)
463 return -ENOMEM;
464
465 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
466 if (!vh) {
467 kfree(ech);
468 return -ENOMEM;
469 }
470
471 dbg_bld("scanning fastmap pool: size = %i", pool_size);
472
473 /*
474 * Now scan all PEBs in the pool to find changes which have been made
475 * after the creation of the fastmap
476 */
477 for (i = 0; i < pool_size; i++) {
478 int scrub = 0;
479 int image_seq;
480
481 pnum = be32_to_cpu(pebs[i]);
482
483 if (ubi_io_is_bad(ubi, pnum)) {
484 ubi_err(ubi, "bad PEB in fastmap pool!");
485 ret = UBI_BAD_FASTMAP;
486 goto out;
487 }
488
489 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
490 if (err && err != UBI_IO_BITFLIPS) {
491 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
492 pnum, err);
493 ret = err > 0 ? UBI_BAD_FASTMAP : err;
494 goto out;
495 } else if (err == UBI_IO_BITFLIPS)
496 scrub = 1;
497
498 /*
499 * Older UBI implementations have image_seq set to zero, so
500 * we shouldn't fail if image_seq == 0.
501 */
502 image_seq = be32_to_cpu(ech->image_seq);
503
504 if (image_seq && (image_seq != ubi->image_seq)) {
505 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
506 be32_to_cpu(ech->image_seq), ubi->image_seq);
507 ret = UBI_BAD_FASTMAP;
508 goto out;
509 }
510
511 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
512 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
513 unsigned long long ec = be64_to_cpu(ech->ec);
514 unmap_peb(ai, pnum);
515 dbg_bld("Adding PEB to free: %i", pnum);
516 if (err == UBI_IO_FF_BITFLIPS)
517 add_aeb(ai, free, pnum, ec, 1);
518 else
519 add_aeb(ai, free, pnum, ec, 0);
520 continue;
521 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
522 dbg_bld("Found non empty PEB:%i in pool", pnum);
523
524 if (err == UBI_IO_BITFLIPS)
525 scrub = 1;
526
527 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
528 GFP_KERNEL);
529 if (!new_aeb) {
530 ret = -ENOMEM;
531 goto out;
532 }
533
534 new_aeb->ec = be64_to_cpu(ech->ec);
535 new_aeb->pnum = pnum;
536 new_aeb->lnum = be32_to_cpu(vh->lnum);
537 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
538 new_aeb->copy_flag = vh->copy_flag;
539 new_aeb->scrub = scrub;
540
541 if (*max_sqnum < new_aeb->sqnum)
542 *max_sqnum = new_aeb->sqnum;
543
544 err = process_pool_aeb(ubi, ai, vh, new_aeb);
545 if (err) {
546 ret = err > 0 ? UBI_BAD_FASTMAP : err;
547 goto out;
548 }
549 } else {
550 /* We are paranoid and fall back to scanning mode */
551 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
552 ret = err > 0 ? UBI_BAD_FASTMAP : err;
553 goto out;
554 }
555
556 }
557
558out:
559 ubi_free_vid_hdr(ubi, vh);
560 kfree(ech);
561 return ret;
562}
563
564/**
565 * count_fastmap_pebs - Counts the PEBs found by fastmap.
566 * @ai: The UBI attach info object
567 */
568static int count_fastmap_pebs(struct ubi_attach_info *ai)
569{
570 struct ubi_ainf_peb *aeb;
571 struct ubi_ainf_volume *av;
572 struct rb_node *rb1, *rb2;
573 int n = 0;
574
575 list_for_each_entry(aeb, &ai->erase, u.list)
576 n++;
577
578 list_for_each_entry(aeb, &ai->free, u.list)
579 n++;
580
581 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
582 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
583 n++;
584
585 return n;
586}
587
588/**
589 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
590 * @ubi: UBI device object
591 * @ai: UBI attach info object
592 * @fm: the fastmap to be attached
593 *
594 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
595 * < 0 indicates an internal error.
596 */
597static int ubi_attach_fastmap(struct ubi_device *ubi,
598 struct ubi_attach_info *ai,
599 struct ubi_fastmap_layout *fm)
600{
601 struct list_head used, free;
602 struct ubi_ainf_volume *av;
603 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
604 struct ubi_fm_sb *fmsb;
605 struct ubi_fm_hdr *fmhdr;
606 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
607 struct ubi_fm_ec *fmec;
608 struct ubi_fm_volhdr *fmvhdr;
609 struct ubi_fm_eba *fm_eba;
610 int ret, i, j, pool_size, wl_pool_size;
611 size_t fm_pos = 0, fm_size = ubi->fm_size;
612 unsigned long long max_sqnum = 0;
613 void *fm_raw = ubi->fm_buf;
614
615 INIT_LIST_HEAD(&used);
616 INIT_LIST_HEAD(&free);
617 ai->min_ec = UBI_MAX_ERASECOUNTER;
618
619 fmsb = (struct ubi_fm_sb *)(fm_raw);
620 ai->max_sqnum = fmsb->sqnum;
621 fm_pos += sizeof(struct ubi_fm_sb);
622 if (fm_pos >= fm_size)
623 goto fail_bad;
624
625 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
626 fm_pos += sizeof(*fmhdr);
627 if (fm_pos >= fm_size)
628 goto fail_bad;
629
630 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
631 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
632 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
633 goto fail_bad;
634 }
635
636 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
637 fm_pos += sizeof(*fmpl);
638 if (fm_pos >= fm_size)
639 goto fail_bad;
640 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
641 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
642 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
643 goto fail_bad;
644 }
645
646 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
647 fm_pos += sizeof(*fmpl_wl);
648 if (fm_pos >= fm_size)
649 goto fail_bad;
650 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
651 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
652 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
653 goto fail_bad;
654 }
655
656 pool_size = be16_to_cpu(fmpl->size);
657 wl_pool_size = be16_to_cpu(fmpl_wl->size);
658 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
659 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
660
661 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
662 ubi_err(ubi, "bad pool size: %i", pool_size);
663 goto fail_bad;
664 }
665
666 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
667 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
668 goto fail_bad;
669 }
670
671
672 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
673 fm->max_pool_size < 0) {
674 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
675 goto fail_bad;
676 }
677
678 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
679 fm->max_wl_pool_size < 0) {
680 ubi_err(ubi, "bad maximal WL pool size: %i",
681 fm->max_wl_pool_size);
682 goto fail_bad;
683 }
684
685 /* read EC values from free list */
686 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
687 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
688 fm_pos += sizeof(*fmec);
689 if (fm_pos >= fm_size)
690 goto fail_bad;
691
692 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
693 be32_to_cpu(fmec->ec), 0);
694 }
695
696 /* read EC values from used list */
697 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
698 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
699 fm_pos += sizeof(*fmec);
700 if (fm_pos >= fm_size)
701 goto fail_bad;
702
703 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
704 be32_to_cpu(fmec->ec), 0);
705 }
706
707 /* read EC values from scrub list */
708 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
709 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fmec);
711 if (fm_pos >= fm_size)
712 goto fail_bad;
713
714 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
715 be32_to_cpu(fmec->ec), 1);
716 }
717
718 /* read EC values from erase list */
719 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
720 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
721 fm_pos += sizeof(*fmec);
722 if (fm_pos >= fm_size)
723 goto fail_bad;
724
725 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
726 be32_to_cpu(fmec->ec), 1);
727 }
728
729 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
730 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
731
732 /* Iterate over all volumes and read their EBA table */
733 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
734 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
735 fm_pos += sizeof(*fmvhdr);
736 if (fm_pos >= fm_size)
737 goto fail_bad;
738
739 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
740 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
741 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
742 goto fail_bad;
743 }
744
745 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
746 be32_to_cpu(fmvhdr->used_ebs),
747 be32_to_cpu(fmvhdr->data_pad),
748 fmvhdr->vol_type,
749 be32_to_cpu(fmvhdr->last_eb_bytes));
750
751 if (!av)
752 goto fail_bad;
753 if (PTR_ERR(av) == -EINVAL) {
754 ubi_err(ubi, "volume (ID %i) already exists",
755 fmvhdr->vol_id);
756 goto fail_bad;
757 }
758
759 ai->vols_found++;
760 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
761 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
762
763 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
764 fm_pos += sizeof(*fm_eba);
765 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
766 if (fm_pos >= fm_size)
767 goto fail_bad;
768
769 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
770 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
771 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
772 goto fail_bad;
773 }
774
775 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
776 int pnum = be32_to_cpu(fm_eba->pnum[j]);
777
778 if (pnum < 0)
779 continue;
780
781 aeb = NULL;
782 list_for_each_entry(tmp_aeb, &used, u.list) {
783 if (tmp_aeb->pnum == pnum) {
784 aeb = tmp_aeb;
785 break;
786 }
787 }
788
789 if (!aeb) {
790 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
791 goto fail_bad;
792 }
793
794 aeb->lnum = j;
795
796 if (av->highest_lnum <= aeb->lnum)
797 av->highest_lnum = aeb->lnum;
798
799 assign_aeb_to_av(ai, aeb, av);
800
801 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
802 aeb->pnum, aeb->lnum, av->vol_id);
803 }
804 }
805
806 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
807 if (ret)
808 goto fail;
809
810 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
811 if (ret)
812 goto fail;
813
814 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum;
816
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
818 list_move_tail(&tmp_aeb->u.list, &ai->free);
819
820 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
821 list_move_tail(&tmp_aeb->u.list, &ai->erase);
822
823 ubi_assert(list_empty(&free));
824
825 /*
826 * If fastmap is leaking PEBs (must not happen), raise a
827 * fat warning and fall back to scanning mode.
828 * We do this here because in ubi_wl_init() it's too late
829 * and we cannot fall back to scanning.
830 */
831 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
832 ai->bad_peb_count - fm->used_blocks))
833 goto fail_bad;
834
835 return 0;
836
837fail_bad:
838 ret = UBI_BAD_FASTMAP;
839fail:
840 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
841 list_del(&tmp_aeb->u.list);
842 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
843 }
844 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
845 list_del(&tmp_aeb->u.list);
846 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
847 }
848
849 return ret;
850}
851
852/**
853 * ubi_scan_fastmap - scan the fastmap.
854 * @ubi: UBI device object
855 * @ai: UBI attach info to be filled
856 * @fm_anchor: The fastmap starts at this PEB
857 *
858 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
859 * UBI_BAD_FASTMAP if one was found but is not usable.
860 * < 0 indicates an internal error.
861 */
862int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
863 int fm_anchor)
864{
865 struct ubi_fm_sb *fmsb, *fmsb2;
866 struct ubi_vid_hdr *vh;
867 struct ubi_ec_hdr *ech;
868 struct ubi_fastmap_layout *fm;
869 int i, used_blocks, pnum, ret = 0;
870 size_t fm_size;
871 __be32 crc, tmp_crc;
872 unsigned long long sqnum = 0;
873
874 down_write(&ubi->fm_protect);
875 memset(ubi->fm_buf, 0, ubi->fm_size);
876
877 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
878 if (!fmsb) {
879 ret = -ENOMEM;
880 goto out;
881 }
882
883 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
884 if (!fm) {
885 ret = -ENOMEM;
886 kfree(fmsb);
887 goto out;
888 }
889
890 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
891 if (ret && ret != UBI_IO_BITFLIPS)
892 goto free_fm_sb;
893 else if (ret == UBI_IO_BITFLIPS)
894 fm->to_be_tortured[0] = 1;
895
896 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
897 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
898 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
899 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb;
901 }
902
903 if (fmsb->version != UBI_FM_FMT_VERSION) {
904 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
905 fmsb->version, UBI_FM_FMT_VERSION);
906 ret = UBI_BAD_FASTMAP;
907 goto free_fm_sb;
908 }
909
910 used_blocks = be32_to_cpu(fmsb->used_blocks);
911 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
912 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
913 used_blocks);
914 ret = UBI_BAD_FASTMAP;
915 goto free_fm_sb;
916 }
917
918 fm_size = ubi->leb_size * used_blocks;
919 if (fm_size != ubi->fm_size) {
920 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
921 fm_size, ubi->fm_size);
922 ret = UBI_BAD_FASTMAP;
923 goto free_fm_sb;
924 }
925
926 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
927 if (!ech) {
928 ret = -ENOMEM;
929 goto free_fm_sb;
930 }
931
932 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
933 if (!vh) {
934 ret = -ENOMEM;
935 goto free_hdr;
936 }
937
938 for (i = 0; i < used_blocks; i++) {
939 int image_seq;
940
941 pnum = be32_to_cpu(fmsb->block_loc[i]);
942
943 if (ubi_io_is_bad(ubi, pnum)) {
944 ret = UBI_BAD_FASTMAP;
945 goto free_hdr;
946 }
947
948 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
949 if (ret && ret != UBI_IO_BITFLIPS) {
950 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
951 i, pnum);
952 if (ret > 0)
953 ret = UBI_BAD_FASTMAP;
954 goto free_hdr;
955 } else if (ret == UBI_IO_BITFLIPS)
956 fm->to_be_tortured[i] = 1;
957
958 image_seq = be32_to_cpu(ech->image_seq);
959 if (!ubi->image_seq)
960 ubi->image_seq = image_seq;
961
962 /*
963 * Older UBI implementations have image_seq set to zero, so
964 * we shouldn't fail if image_seq == 0.
965 */
966 if (image_seq && (image_seq != ubi->image_seq)) {
967 ubi_err(ubi, "wrong image seq:%d instead of %d",
968 be32_to_cpu(ech->image_seq), ubi->image_seq);
969 ret = UBI_BAD_FASTMAP;
970 goto free_hdr;
971 }
972
973 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
974 if (ret && ret != UBI_IO_BITFLIPS) {
975 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
976 i, pnum);
977 goto free_hdr;
978 }
979
980 if (i == 0) {
981 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
982 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
983 be32_to_cpu(vh->vol_id),
984 UBI_FM_SB_VOLUME_ID);
985 ret = UBI_BAD_FASTMAP;
986 goto free_hdr;
987 }
988 } else {
989 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
990 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
991 be32_to_cpu(vh->vol_id),
992 UBI_FM_DATA_VOLUME_ID);
993 ret = UBI_BAD_FASTMAP;
994 goto free_hdr;
995 }
996 }
997
998 if (sqnum < be64_to_cpu(vh->sqnum))
999 sqnum = be64_to_cpu(vh->sqnum);
1000
1001 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1002 ubi->leb_start, ubi->leb_size);
1003 if (ret && ret != UBI_IO_BITFLIPS) {
1004 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1005 "err: %i)", i, pnum, ret);
1006 goto free_hdr;
1007 }
1008 }
1009
1010 kfree(fmsb);
1011 fmsb = NULL;
1012
1013 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1014 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1015 fmsb2->data_crc = 0;
1016 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1017 if (crc != tmp_crc) {
1018 ubi_err(ubi, "fastmap data CRC is invalid");
1019 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1020 tmp_crc, crc);
1021 ret = UBI_BAD_FASTMAP;
1022 goto free_hdr;
1023 }
1024
1025 fmsb2->sqnum = sqnum;
1026
1027 fm->used_blocks = used_blocks;
1028
1029 ret = ubi_attach_fastmap(ubi, ai, fm);
1030 if (ret) {
1031 if (ret > 0)
1032 ret = UBI_BAD_FASTMAP;
1033 goto free_hdr;
1034 }
1035
1036 for (i = 0; i < used_blocks; i++) {
1037 struct ubi_wl_entry *e;
1038
1039 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1040 if (!e) {
1041 while (i--)
1042 kfree(fm->e[i]);
1043
1044 ret = -ENOMEM;
1045 goto free_hdr;
1046 }
1047
1048 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1049 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1050 fm->e[i] = e;
1051 }
1052
1053 ubi->fm = fm;
1054 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1055 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1056 ubi_msg(ubi, "attached by fastmap");
1057 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1058 ubi_msg(ubi, "fastmap WL pool size: %d",
1059 ubi->fm_wl_pool.max_size);
1060 ubi->fm_disabled = 0;
1061
1062 ubi_free_vid_hdr(ubi, vh);
1063 kfree(ech);
1064out:
1065 up_write(&ubi->fm_protect);
1066 if (ret == UBI_BAD_FASTMAP)
1067 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1068 return ret;
1069
1070free_hdr:
1071 ubi_free_vid_hdr(ubi, vh);
1072 kfree(ech);
1073free_fm_sb:
1074 kfree(fmsb);
1075 kfree(fm);
1076 goto out;
1077}
1078
1079/**
1080 * ubi_write_fastmap - writes a fastmap.
1081 * @ubi: UBI device object
1082 * @new_fm: the to be written fastmap
1083 *
1084 * Returns 0 on success, < 0 indicates an internal error.
1085 */
1086static int ubi_write_fastmap(struct ubi_device *ubi,
1087 struct ubi_fastmap_layout *new_fm)
1088{
1089 size_t fm_pos = 0;
1090 void *fm_raw;
1091 struct ubi_fm_sb *fmsb;
1092 struct ubi_fm_hdr *fmh;
1093 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1094 struct ubi_fm_ec *fec;
1095 struct ubi_fm_volhdr *fvh;
1096 struct ubi_fm_eba *feba;
1097 struct ubi_wl_entry *wl_e;
1098 struct ubi_volume *vol;
1099 struct ubi_vid_hdr *avhdr, *dvhdr;
1100 struct ubi_work *ubi_wrk;
1101 struct rb_node *tmp_rb;
1102 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1103 int scrub_peb_count, erase_peb_count;
1104 int *seen_pebs = NULL;
1105
1106 fm_raw = ubi->fm_buf;
1107 memset(ubi->fm_buf, 0, ubi->fm_size);
1108
1109 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1110 if (!avhdr) {
1111 ret = -ENOMEM;
1112 goto out;
1113 }
1114
1115 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1116 if (!dvhdr) {
1117 ret = -ENOMEM;
1118 goto out_kfree;
1119 }
1120
1121 seen_pebs = init_seen(ubi);
1122 if (IS_ERR(seen_pebs)) {
1123 ret = PTR_ERR(seen_pebs);
1124 goto out_kfree;
1125 }
1126
1127 spin_lock(&ubi->volumes_lock);
1128 spin_lock(&ubi->wl_lock);
1129
1130 fmsb = (struct ubi_fm_sb *)fm_raw;
1131 fm_pos += sizeof(*fmsb);
1132 ubi_assert(fm_pos <= ubi->fm_size);
1133
1134 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1135 fm_pos += sizeof(*fmh);
1136 ubi_assert(fm_pos <= ubi->fm_size);
1137
1138 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1139 fmsb->version = UBI_FM_FMT_VERSION;
1140 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1141 /* the max sqnum will be filled in while *reading* the fastmap */
1142 fmsb->sqnum = 0;
1143
1144 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1145 free_peb_count = 0;
1146 used_peb_count = 0;
1147 scrub_peb_count = 0;
1148 erase_peb_count = 0;
1149 vol_count = 0;
1150
1151 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1152 fm_pos += sizeof(*fmpl);
1153 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1154 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1155 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1156
1157 for (i = 0; i < ubi->fm_pool.size; i++) {
1158 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1159 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1160 }
1161
1162 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1163 fm_pos += sizeof(*fmpl_wl);
1164 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1165 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1166 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1167
1168 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1169 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1170 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1171 }
1172
1173 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1174 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1175
1176 fec->pnum = cpu_to_be32(wl_e->pnum);
1177 set_seen(ubi, wl_e->pnum, seen_pebs);
1178 fec->ec = cpu_to_be32(wl_e->ec);
1179
1180 free_peb_count++;
1181 fm_pos += sizeof(*fec);
1182 ubi_assert(fm_pos <= ubi->fm_size);
1183 }
1184 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1185
1186 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1187 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1188
1189 fec->pnum = cpu_to_be32(wl_e->pnum);
1190 set_seen(ubi, wl_e->pnum, seen_pebs);
1191 fec->ec = cpu_to_be32(wl_e->ec);
1192
1193 used_peb_count++;
1194 fm_pos += sizeof(*fec);
1195 ubi_assert(fm_pos <= ubi->fm_size);
1196 }
1197
1198 ubi_for_each_protected_peb(ubi, i, wl_e) {
1199 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1200
1201 fec->pnum = cpu_to_be32(wl_e->pnum);
1202 set_seen(ubi, wl_e->pnum, seen_pebs);
1203 fec->ec = cpu_to_be32(wl_e->ec);
1204
1205 used_peb_count++;
1206 fm_pos += sizeof(*fec);
1207 ubi_assert(fm_pos <= ubi->fm_size);
1208 }
1209 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1210
1211 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1212 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1213
1214 fec->pnum = cpu_to_be32(wl_e->pnum);
1215 set_seen(ubi, wl_e->pnum, seen_pebs);
1216 fec->ec = cpu_to_be32(wl_e->ec);
1217
1218 scrub_peb_count++;
1219 fm_pos += sizeof(*fec);
1220 ubi_assert(fm_pos <= ubi->fm_size);
1221 }
1222 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1223
1224
1225 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1226 if (ubi_is_erase_work(ubi_wrk)) {
1227 wl_e = ubi_wrk->e;
1228 ubi_assert(wl_e);
1229
1230 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1231
1232 fec->pnum = cpu_to_be32(wl_e->pnum);
1233 set_seen(ubi, wl_e->pnum, seen_pebs);
1234 fec->ec = cpu_to_be32(wl_e->ec);
1235
1236 erase_peb_count++;
1237 fm_pos += sizeof(*fec);
1238 ubi_assert(fm_pos <= ubi->fm_size);
1239 }
1240 }
1241 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1242
1243 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1244 vol = ubi->volumes[i];
1245
1246 if (!vol)
1247 continue;
1248
1249 vol_count++;
1250
1251 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1252 fm_pos += sizeof(*fvh);
1253 ubi_assert(fm_pos <= ubi->fm_size);
1254
1255 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1256 fvh->vol_id = cpu_to_be32(vol->vol_id);
1257 fvh->vol_type = vol->vol_type;
1258 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1259 fvh->data_pad = cpu_to_be32(vol->data_pad);
1260 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1261
1262 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1263 vol->vol_type == UBI_STATIC_VOLUME);
1264
1265 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1266 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1267 ubi_assert(fm_pos <= ubi->fm_size);
1268
1269 for (j = 0; j < vol->reserved_pebs; j++)
1270 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1271
1272 feba->reserved_pebs = cpu_to_be32(j);
1273 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1274 }
1275 fmh->vol_count = cpu_to_be32(vol_count);
1276 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1277
1278 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1279 avhdr->lnum = 0;
1280
1281 spin_unlock(&ubi->wl_lock);
1282 spin_unlock(&ubi->volumes_lock);
1283
1284 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1285 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1286 if (ret) {
1287 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1288 goto out_kfree;
1289 }
1290
1291 for (i = 0; i < new_fm->used_blocks; i++) {
1292 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1293 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1294 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1295 }
1296
1297 fmsb->data_crc = 0;
1298 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1299 ubi->fm_size));
1300
1301 for (i = 1; i < new_fm->used_blocks; i++) {
1302 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1303 dvhdr->lnum = cpu_to_be32(i);
1304 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1305 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1306 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1307 if (ret) {
1308 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1309 new_fm->e[i]->pnum);
1310 goto out_kfree;
1311 }
1312 }
1313
1314 for (i = 0; i < new_fm->used_blocks; i++) {
1315 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1316 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1317 if (ret) {
1318 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1319 new_fm->e[i]->pnum);
1320 goto out_kfree;
1321 }
1322 }
1323
1324 ubi_assert(new_fm);
1325 ubi->fm = new_fm;
1326
1327 ret = self_check_seen(ubi, seen_pebs);
1328 dbg_bld("fastmap written!");
1329
1330out_kfree:
1331 ubi_free_vid_hdr(ubi, avhdr);
1332 ubi_free_vid_hdr(ubi, dvhdr);
1333 free_seen(seen_pebs);
1334out:
1335 return ret;
1336}
1337
1338/**
1339 * erase_block - Manually erase a PEB.
1340 * @ubi: UBI device object
1341 * @pnum: PEB to be erased
1342 *
1343 * Returns the new EC value on success, < 0 indicates an internal error.
1344 */
1345static int erase_block(struct ubi_device *ubi, int pnum)
1346{
1347 int ret;
1348 struct ubi_ec_hdr *ec_hdr;
1349 long long ec;
1350
1351 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1352 if (!ec_hdr)
1353 return -ENOMEM;
1354
1355 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1356 if (ret < 0)
1357 goto out;
1358 else if (ret && ret != UBI_IO_BITFLIPS) {
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
1363 ret = ubi_io_sync_erase(ubi, pnum, 0);
1364 if (ret < 0)
1365 goto out;
1366
1367 ec = be64_to_cpu(ec_hdr->ec);
1368 ec += ret;
1369 if (ec > UBI_MAX_ERASECOUNTER) {
1370 ret = -EINVAL;
1371 goto out;
1372 }
1373
1374 ec_hdr->ec = cpu_to_be64(ec);
1375 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1376 if (ret < 0)
1377 goto out;
1378
1379 ret = ec;
1380out:
1381 kfree(ec_hdr);
1382 return ret;
1383}
1384
1385/**
1386 * invalidate_fastmap - destroys a fastmap.
1387 * @ubi: UBI device object
1388 *
1389 * This function ensures that upon next UBI attach a full scan
1390 * is issued. We need this if UBI is about to write a new fastmap
1391 * but is unable to do so. In this case we have two options:
1392 * a) Make sure that the current fastmap will not be usued upon
1393 * attach time and contine or b) fall back to RO mode to have the
1394 * current fastmap in a valid state.
1395 * Returns 0 on success, < 0 indicates an internal error.
1396 */
1397static int invalidate_fastmap(struct ubi_device *ubi)
1398{
1399 int ret;
1400 struct ubi_fastmap_layout *fm;
1401 struct ubi_wl_entry *e;
1402 struct ubi_vid_hdr *vh = NULL;
1403
1404 if (!ubi->fm)
1405 return 0;
1406
1407 ubi->fm = NULL;
1408
1409 ret = -ENOMEM;
1410 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1411 if (!fm)
1412 goto out;
1413
1414 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1415 if (!vh)
1416 goto out_free_fm;
1417
1418 ret = -ENOSPC;
1419 e = ubi_wl_get_fm_peb(ubi, 1);
1420 if (!e)
1421 goto out_free_fm;
1422
1423 /*
1424 * Create fake fastmap such that UBI will fall back
1425 * to scanning mode.
1426 */
1427 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1428 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1429 if (ret < 0) {
1430 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1431 goto out_free_fm;
1432 }
1433
1434 fm->used_blocks = 1;
1435 fm->e[0] = e;
1436
1437 ubi->fm = fm;
1438
1439out:
1440 ubi_free_vid_hdr(ubi, vh);
1441 return ret;
1442
1443out_free_fm:
1444 kfree(fm);
1445 goto out;
1446}
1447
1448/**
1449 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1450 * WL sub-system.
1451 * @ubi: UBI device object
1452 * @fm: fastmap layout object
1453 */
1454static void return_fm_pebs(struct ubi_device *ubi,
1455 struct ubi_fastmap_layout *fm)
1456{
1457 int i;
1458
1459 if (!fm)
1460 return;
1461
1462 for (i = 0; i < fm->used_blocks; i++) {
1463 if (fm->e[i]) {
1464 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1465 fm->to_be_tortured[i]);
1466 fm->e[i] = NULL;
1467 }
1468 }
1469}
1470
1471/**
1472 * ubi_update_fastmap - will be called by UBI if a volume changes or
1473 * a fastmap pool becomes full.
1474 * @ubi: UBI device object
1475 *
1476 * Returns 0 on success, < 0 indicates an internal error.
1477 */
1478int ubi_update_fastmap(struct ubi_device *ubi)
1479{
1480 int ret, i, j;
1481 struct ubi_fastmap_layout *new_fm, *old_fm;
1482 struct ubi_wl_entry *tmp_e;
1483
1484 down_write(&ubi->fm_protect);
1485
1486 ubi_refill_pools(ubi);
1487
1488 if (ubi->ro_mode || ubi->fm_disabled) {
1489 up_write(&ubi->fm_protect);
1490 return 0;
1491 }
1492
1493 ret = ubi_ensure_anchor_pebs(ubi);
1494 if (ret) {
1495 up_write(&ubi->fm_protect);
1496 return ret;
1497 }
1498
1499 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1500 if (!new_fm) {
1501 up_write(&ubi->fm_protect);
1502 return -ENOMEM;
1503 }
1504
1505 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1506 old_fm = ubi->fm;
1507 ubi->fm = NULL;
1508
1509 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1510 ubi_err(ubi, "fastmap too large");
1511 ret = -ENOSPC;
1512 goto err;
1513 }
1514
1515 for (i = 1; i < new_fm->used_blocks; i++) {
1516 spin_lock(&ubi->wl_lock);
1517 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1518 spin_unlock(&ubi->wl_lock);
1519
1520 if (!tmp_e) {
1521 if (old_fm && old_fm->e[i]) {
1522 ret = erase_block(ubi, old_fm->e[i]->pnum);
1523 if (ret < 0) {
1524 ubi_err(ubi, "could not erase old fastmap PEB");
1525
1526 for (j = 1; j < i; j++) {
1527 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1528 j, 0);
1529 new_fm->e[j] = NULL;
1530 }
1531 goto err;
1532 }
1533 new_fm->e[i] = old_fm->e[i];
1534 old_fm->e[i] = NULL;
1535 } else {
1536 ubi_err(ubi, "could not get any free erase block");
1537
1538 for (j = 1; j < i; j++) {
1539 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1540 new_fm->e[j] = NULL;
1541 }
1542
1543 ret = -ENOSPC;
1544 goto err;
1545 }
1546 } else {
1547 new_fm->e[i] = tmp_e;
1548
1549 if (old_fm && old_fm->e[i]) {
1550 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1551 old_fm->to_be_tortured[i]);
1552 old_fm->e[i] = NULL;
1553 }
1554 }
1555 }
1556
1557 /* Old fastmap is larger than the new one */
1558 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1559 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1560 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1561 old_fm->to_be_tortured[i]);
1562 old_fm->e[i] = NULL;
1563 }
1564 }
1565
1566 spin_lock(&ubi->wl_lock);
1567 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1568 spin_unlock(&ubi->wl_lock);
1569
1570 if (old_fm) {
1571 /* no fresh anchor PEB was found, reuse the old one */
1572 if (!tmp_e) {
1573 ret = erase_block(ubi, old_fm->e[0]->pnum);
1574 if (ret < 0) {
1575 ubi_err(ubi, "could not erase old anchor PEB");
1576
1577 for (i = 1; i < new_fm->used_blocks; i++) {
1578 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1579 i, 0);
1580 new_fm->e[i] = NULL;
1581 }
1582 goto err;
1583 }
1584 new_fm->e[0] = old_fm->e[0];
1585 new_fm->e[0]->ec = ret;
1586 old_fm->e[0] = NULL;
1587 } else {
1588 /* we've got a new anchor PEB, return the old one */
1589 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1590 old_fm->to_be_tortured[0]);
1591 new_fm->e[0] = tmp_e;
1592 old_fm->e[0] = NULL;
1593 }
1594 } else {
1595 if (!tmp_e) {
1596 ubi_err(ubi, "could not find any anchor PEB");
1597
1598 for (i = 1; i < new_fm->used_blocks; i++) {
1599 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1600 new_fm->e[i] = NULL;
1601 }
1602
1603 ret = -ENOSPC;
1604 goto err;
1605 }
1606 new_fm->e[0] = tmp_e;
1607 }
1608
1609 down_write(&ubi->work_sem);
1610 down_write(&ubi->fm_eba_sem);
1611 ret = ubi_write_fastmap(ubi, new_fm);
1612 up_write(&ubi->fm_eba_sem);
1613 up_write(&ubi->work_sem);
1614
1615 if (ret)
1616 goto err;
1617
1618out_unlock:
1619 up_write(&ubi->fm_protect);
1620 kfree(old_fm);
1621 return ret;
1622
1623err:
1624 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1625
1626 ret = invalidate_fastmap(ubi);
1627 if (ret < 0) {
1628 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1629 ubi_ro_mode(ubi);
1630 } else {
1631 return_fm_pebs(ubi, old_fm);
1632 return_fm_pebs(ubi, new_fm);
1633 ret = 0;
1634 }
1635
1636 kfree(new_fm);
1637 goto out_unlock;
1638}