Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8#include <linux/crc32.h>
9#include <linux/bitmap.h>
10#include "ubi.h"
11
12/**
13 * init_seen - allocate memory for used for debugging.
14 * @ubi: UBI device description object
15 */
16static inline unsigned long *init_seen(struct ubi_device *ubi)
17{
18 unsigned long *ret;
19
20 if (!ubi_dbg_chk_fastmap(ubi))
21 return NULL;
22
23 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
24 GFP_KERNEL);
25 if (!ret)
26 return ERR_PTR(-ENOMEM);
27
28 return ret;
29}
30
31/**
32 * free_seen - free the seen logic integer array.
33 * @seen: integer array of @ubi->peb_count size
34 */
35static inline void free_seen(unsigned long *seen)
36{
37 kfree(seen);
38}
39
40/**
41 * set_seen - mark a PEB as seen.
42 * @ubi: UBI device description object
43 * @pnum: The PEB to be makred as seen
44 * @seen: integer array of @ubi->peb_count size
45 */
46static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
47{
48 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
49 return;
50
51 set_bit(pnum, seen);
52}
53
54/**
55 * self_check_seen - check whether all PEB have been seen by fastmap.
56 * @ubi: UBI device description object
57 * @seen: integer array of @ubi->peb_count size
58 */
59static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
60{
61 int pnum, ret = 0;
62
63 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
64 return 0;
65
66 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
67 if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
68 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
69 ret = -EINVAL;
70 }
71 }
72
73 return ret;
74}
75
76/**
77 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
78 * @ubi: UBI device description object
79 */
80size_t ubi_calc_fm_size(struct ubi_device *ubi)
81{
82 size_t size;
83
84 size = sizeof(struct ubi_fm_sb) +
85 sizeof(struct ubi_fm_hdr) +
86 sizeof(struct ubi_fm_scan_pool) +
87 sizeof(struct ubi_fm_scan_pool) +
88 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
89 (sizeof(struct ubi_fm_eba) +
90 (ubi->peb_count * sizeof(__be32))) +
91 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
92 return roundup(size, ubi->leb_size);
93}
94
95
96/**
97 * new_fm_vhdr - allocate a new volume header for fastmap usage.
98 * @ubi: UBI device description object
99 * @vol_id: the VID of the new header
100 *
101 * Returns a new struct ubi_vid_hdr on success.
102 * NULL indicates out of memory.
103 */
104static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
105{
106 struct ubi_vid_io_buf *new;
107 struct ubi_vid_hdr *vh;
108
109 new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
110 if (!new)
111 goto out;
112
113 vh = ubi_get_vid_hdr(new);
114 vh->vol_type = UBI_VID_DYNAMIC;
115 vh->vol_id = cpu_to_be32(vol_id);
116
117 /* UBI implementations without fastmap support have to delete the
118 * fastmap.
119 */
120 vh->compat = UBI_COMPAT_DELETE;
121
122out:
123 return new;
124}
125
126/**
127 * add_aeb - create and add a attach erase block to a given list.
128 * @ai: UBI attach info object
129 * @list: the target list
130 * @pnum: PEB number of the new attach erase block
131 * @ec: erease counter of the new LEB
132 * @scrub: scrub this PEB after attaching
133 *
134 * Returns 0 on success, < 0 indicates an internal error.
135 */
136static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
137 int pnum, int ec, int scrub)
138{
139 struct ubi_ainf_peb *aeb;
140
141 aeb = ubi_alloc_aeb(ai, pnum, ec);
142 if (!aeb)
143 return -ENOMEM;
144
145 aeb->lnum = -1;
146 aeb->scrub = scrub;
147 aeb->copy_flag = aeb->sqnum = 0;
148
149 ai->ec_sum += aeb->ec;
150 ai->ec_count++;
151
152 if (ai->max_ec < aeb->ec)
153 ai->max_ec = aeb->ec;
154
155 if (ai->min_ec > aeb->ec)
156 ai->min_ec = aeb->ec;
157
158 list_add_tail(&aeb->u.list, list);
159
160 return 0;
161}
162
163/**
164 * add_vol - create and add a new volume to ubi_attach_info.
165 * @ai: ubi_attach_info object
166 * @vol_id: VID of the new volume
167 * @used_ebs: number of used EBS
168 * @data_pad: data padding value of the new volume
169 * @vol_type: volume type
170 * @last_eb_bytes: number of bytes in the last LEB
171 *
172 * Returns the new struct ubi_ainf_volume on success.
173 * NULL indicates an error.
174 */
175static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
176 int used_ebs, int data_pad, u8 vol_type,
177 int last_eb_bytes)
178{
179 struct ubi_ainf_volume *av;
180
181 av = ubi_add_av(ai, vol_id);
182 if (IS_ERR(av))
183 return av;
184
185 av->data_pad = data_pad;
186 av->last_data_size = last_eb_bytes;
187 av->compat = 0;
188 av->vol_type = vol_type;
189 if (av->vol_type == UBI_STATIC_VOLUME)
190 av->used_ebs = used_ebs;
191
192 dbg_bld("found volume (ID %i)", vol_id);
193 return av;
194}
195
196/**
197 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
198 * from it's original list.
199 * @ai: ubi_attach_info object
200 * @aeb: the to be assigned SEB
201 * @av: target scan volume
202 */
203static void assign_aeb_to_av(struct ubi_attach_info *ai,
204 struct ubi_ainf_peb *aeb,
205 struct ubi_ainf_volume *av)
206{
207 struct ubi_ainf_peb *tmp_aeb;
208 struct rb_node **p = &av->root.rb_node, *parent = NULL;
209
210 while (*p) {
211 parent = *p;
212
213 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214 if (aeb->lnum != tmp_aeb->lnum) {
215 if (aeb->lnum < tmp_aeb->lnum)
216 p = &(*p)->rb_left;
217 else
218 p = &(*p)->rb_right;
219
220 continue;
221 } else
222 break;
223 }
224
225 list_del(&aeb->u.list);
226 av->leb_count++;
227
228 rb_link_node(&aeb->u.rb, parent, p);
229 rb_insert_color(&aeb->u.rb, &av->root);
230}
231
232/**
233 * update_vol - inserts or updates a LEB which was found a pool.
234 * @ubi: the UBI device object
235 * @ai: attach info object
236 * @av: the volume this LEB belongs to
237 * @new_vh: the volume header derived from new_aeb
238 * @new_aeb: the AEB to be examined
239 *
240 * Returns 0 on success, < 0 indicates an internal error.
241 */
242static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
243 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
244 struct ubi_ainf_peb *new_aeb)
245{
246 struct rb_node **p = &av->root.rb_node, *parent = NULL;
247 struct ubi_ainf_peb *aeb, *victim;
248 int cmp_res;
249
250 while (*p) {
251 parent = *p;
252 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
253
254 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
255 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
256 p = &(*p)->rb_left;
257 else
258 p = &(*p)->rb_right;
259
260 continue;
261 }
262
263 /* This case can happen if the fastmap gets written
264 * because of a volume change (creation, deletion, ..).
265 * Then a PEB can be within the persistent EBA and the pool.
266 */
267 if (aeb->pnum == new_aeb->pnum) {
268 ubi_assert(aeb->lnum == new_aeb->lnum);
269 ubi_free_aeb(ai, new_aeb);
270
271 return 0;
272 }
273
274 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
275 if (cmp_res < 0)
276 return cmp_res;
277
278 /* new_aeb is newer */
279 if (cmp_res & 1) {
280 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
281 if (!victim)
282 return -ENOMEM;
283
284 list_add_tail(&victim->u.list, &ai->erase);
285
286 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
287 av->last_data_size =
288 be32_to_cpu(new_vh->data_size);
289
290 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
291 av->vol_id, aeb->lnum, new_aeb->pnum);
292
293 aeb->ec = new_aeb->ec;
294 aeb->pnum = new_aeb->pnum;
295 aeb->copy_flag = new_vh->copy_flag;
296 aeb->scrub = new_aeb->scrub;
297 aeb->sqnum = new_aeb->sqnum;
298 ubi_free_aeb(ai, new_aeb);
299
300 /* new_aeb is older */
301 } else {
302 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
303 av->vol_id, aeb->lnum, new_aeb->pnum);
304 list_add_tail(&new_aeb->u.list, &ai->erase);
305 }
306
307 return 0;
308 }
309 /* This LEB is new, let's add it to the volume */
310
311 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
312 av->highest_lnum = be32_to_cpu(new_vh->lnum);
313 av->last_data_size = be32_to_cpu(new_vh->data_size);
314 }
315
316 if (av->vol_type == UBI_STATIC_VOLUME)
317 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
318
319 av->leb_count++;
320
321 rb_link_node(&new_aeb->u.rb, parent, p);
322 rb_insert_color(&new_aeb->u.rb, &av->root);
323
324 return 0;
325}
326
327/**
328 * process_pool_aeb - we found a non-empty PEB in a pool.
329 * @ubi: UBI device object
330 * @ai: attach info object
331 * @new_vh: the volume header derived from new_aeb
332 * @new_aeb: the AEB to be examined
333 *
334 * Returns 0 on success, < 0 indicates an internal error.
335 */
336static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
337 struct ubi_vid_hdr *new_vh,
338 struct ubi_ainf_peb *new_aeb)
339{
340 int vol_id = be32_to_cpu(new_vh->vol_id);
341 struct ubi_ainf_volume *av;
342
343 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
344 ubi_free_aeb(ai, new_aeb);
345
346 return 0;
347 }
348
349 /* Find the volume this SEB belongs to */
350 av = ubi_find_av(ai, vol_id);
351 if (!av) {
352 ubi_err(ubi, "orphaned volume in fastmap pool!");
353 ubi_free_aeb(ai, new_aeb);
354 return UBI_BAD_FASTMAP;
355 }
356
357 ubi_assert(vol_id == av->vol_id);
358
359 return update_vol(ubi, ai, av, new_vh, new_aeb);
360}
361
362/**
363 * unmap_peb - unmap a PEB.
364 * If fastmap detects a free PEB in the pool it has to check whether
365 * this PEB has been unmapped after writing the fastmap.
366 *
367 * @ai: UBI attach info object
368 * @pnum: The PEB to be unmapped
369 */
370static void unmap_peb(struct ubi_attach_info *ai, int pnum)
371{
372 struct ubi_ainf_volume *av;
373 struct rb_node *node, *node2;
374 struct ubi_ainf_peb *aeb;
375
376 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
377 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
378 if (aeb->pnum == pnum) {
379 rb_erase(&aeb->u.rb, &av->root);
380 av->leb_count--;
381 ubi_free_aeb(ai, aeb);
382 return;
383 }
384 }
385 }
386}
387
388/**
389 * scan_pool - scans a pool for changed (no longer empty PEBs).
390 * @ubi: UBI device object
391 * @ai: attach info object
392 * @pebs: an array of all PEB numbers in the to be scanned pool
393 * @pool_size: size of the pool (number of entries in @pebs)
394 * @max_sqnum: pointer to the maximal sequence number
395 * @free: list of PEBs which are most likely free (and go into @ai->free)
396 *
397 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
398 * < 0 indicates an internal error.
399 */
400static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
401 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
402 struct list_head *free)
403{
404 struct ubi_vid_io_buf *vb;
405 struct ubi_vid_hdr *vh;
406 struct ubi_ec_hdr *ech;
407 struct ubi_ainf_peb *new_aeb;
408 int i, pnum, err, ret = 0;
409
410 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
411 if (!ech)
412 return -ENOMEM;
413
414 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
415 if (!vb) {
416 kfree(ech);
417 return -ENOMEM;
418 }
419
420 vh = ubi_get_vid_hdr(vb);
421
422 dbg_bld("scanning fastmap pool: size = %i", pool_size);
423
424 /*
425 * Now scan all PEBs in the pool to find changes which have been made
426 * after the creation of the fastmap
427 */
428 for (i = 0; i < pool_size; i++) {
429 int scrub = 0;
430 int image_seq;
431
432 pnum = be32_to_cpu(pebs[i]);
433
434 if (ubi_io_is_bad(ubi, pnum)) {
435 ubi_err(ubi, "bad PEB in fastmap pool!");
436 ret = UBI_BAD_FASTMAP;
437 goto out;
438 }
439
440 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
441 if (err && err != UBI_IO_BITFLIPS) {
442 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
443 pnum, err);
444 ret = err > 0 ? UBI_BAD_FASTMAP : err;
445 goto out;
446 } else if (err == UBI_IO_BITFLIPS)
447 scrub = 1;
448
449 /*
450 * Older UBI implementations have image_seq set to zero, so
451 * we shouldn't fail if image_seq == 0.
452 */
453 image_seq = be32_to_cpu(ech->image_seq);
454
455 if (image_seq && (image_seq != ubi->image_seq)) {
456 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
457 be32_to_cpu(ech->image_seq), ubi->image_seq);
458 ret = UBI_BAD_FASTMAP;
459 goto out;
460 }
461
462 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
463 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
464 unsigned long long ec = be64_to_cpu(ech->ec);
465 unmap_peb(ai, pnum);
466 dbg_bld("Adding PEB to free: %i", pnum);
467
468 if (err == UBI_IO_FF_BITFLIPS)
469 scrub = 1;
470
471 add_aeb(ai, free, pnum, ec, scrub);
472 continue;
473 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
474 dbg_bld("Found non empty PEB:%i in pool", pnum);
475
476 if (err == UBI_IO_BITFLIPS)
477 scrub = 1;
478
479 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
480 if (!new_aeb) {
481 ret = -ENOMEM;
482 goto out;
483 }
484
485 new_aeb->lnum = be32_to_cpu(vh->lnum);
486 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
487 new_aeb->copy_flag = vh->copy_flag;
488 new_aeb->scrub = scrub;
489
490 if (*max_sqnum < new_aeb->sqnum)
491 *max_sqnum = new_aeb->sqnum;
492
493 err = process_pool_aeb(ubi, ai, vh, new_aeb);
494 if (err) {
495 ret = err > 0 ? UBI_BAD_FASTMAP : err;
496 goto out;
497 }
498 } else {
499 /* We are paranoid and fall back to scanning mode */
500 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
501 ret = err > 0 ? UBI_BAD_FASTMAP : err;
502 goto out;
503 }
504
505 }
506
507out:
508 ubi_free_vid_buf(vb);
509 kfree(ech);
510 return ret;
511}
512
513/**
514 * count_fastmap_pebs - Counts the PEBs found by fastmap.
515 * @ai: The UBI attach info object
516 */
517static int count_fastmap_pebs(struct ubi_attach_info *ai)
518{
519 struct ubi_ainf_peb *aeb;
520 struct ubi_ainf_volume *av;
521 struct rb_node *rb1, *rb2;
522 int n = 0;
523
524 list_for_each_entry(aeb, &ai->erase, u.list)
525 n++;
526
527 list_for_each_entry(aeb, &ai->free, u.list)
528 n++;
529
530 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
531 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
532 n++;
533
534 return n;
535}
536
537/**
538 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
539 * @ubi: UBI device object
540 * @ai: UBI attach info object
541 * @fm: the fastmap to be attached
542 *
543 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
544 * < 0 indicates an internal error.
545 */
546static int ubi_attach_fastmap(struct ubi_device *ubi,
547 struct ubi_attach_info *ai,
548 struct ubi_fastmap_layout *fm)
549{
550 struct list_head used, free;
551 struct ubi_ainf_volume *av;
552 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
553 struct ubi_fm_sb *fmsb;
554 struct ubi_fm_hdr *fmhdr;
555 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
556 struct ubi_fm_ec *fmec;
557 struct ubi_fm_volhdr *fmvhdr;
558 struct ubi_fm_eba *fm_eba;
559 int ret, i, j, pool_size, wl_pool_size;
560 size_t fm_pos = 0, fm_size = ubi->fm_size;
561 unsigned long long max_sqnum = 0;
562 void *fm_raw = ubi->fm_buf;
563
564 INIT_LIST_HEAD(&used);
565 INIT_LIST_HEAD(&free);
566 ai->min_ec = UBI_MAX_ERASECOUNTER;
567
568 fmsb = (struct ubi_fm_sb *)(fm_raw);
569 ai->max_sqnum = fmsb->sqnum;
570 fm_pos += sizeof(struct ubi_fm_sb);
571 if (fm_pos >= fm_size)
572 goto fail_bad;
573
574 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
575 fm_pos += sizeof(*fmhdr);
576 if (fm_pos >= fm_size)
577 goto fail_bad;
578
579 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
580 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
581 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
582 goto fail_bad;
583 }
584
585 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
586 fm_pos += sizeof(*fmpl);
587 if (fm_pos >= fm_size)
588 goto fail_bad;
589 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
590 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
591 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
592 goto fail_bad;
593 }
594
595 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
596 fm_pos += sizeof(*fmpl_wl);
597 if (fm_pos >= fm_size)
598 goto fail_bad;
599 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
600 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
601 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
602 goto fail_bad;
603 }
604
605 pool_size = be16_to_cpu(fmpl->size);
606 wl_pool_size = be16_to_cpu(fmpl_wl->size);
607 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
608 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
609
610 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
611 ubi_err(ubi, "bad pool size: %i", pool_size);
612 goto fail_bad;
613 }
614
615 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
616 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
617 goto fail_bad;
618 }
619
620
621 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
622 fm->max_pool_size < 0) {
623 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
624 goto fail_bad;
625 }
626
627 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
628 fm->max_wl_pool_size < 0) {
629 ubi_err(ubi, "bad maximal WL pool size: %i",
630 fm->max_wl_pool_size);
631 goto fail_bad;
632 }
633
634 /* read EC values from free list */
635 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
636 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
637 fm_pos += sizeof(*fmec);
638 if (fm_pos >= fm_size)
639 goto fail_bad;
640
641 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
642 be32_to_cpu(fmec->ec), 0);
643 }
644
645 /* read EC values from used list */
646 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
647 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
648 fm_pos += sizeof(*fmec);
649 if (fm_pos >= fm_size)
650 goto fail_bad;
651
652 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
653 be32_to_cpu(fmec->ec), 0);
654 }
655
656 /* read EC values from scrub list */
657 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
658 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
659 fm_pos += sizeof(*fmec);
660 if (fm_pos >= fm_size)
661 goto fail_bad;
662
663 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
664 be32_to_cpu(fmec->ec), 1);
665 }
666
667 /* read EC values from erase list */
668 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
669 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
670 fm_pos += sizeof(*fmec);
671 if (fm_pos >= fm_size)
672 goto fail_bad;
673
674 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
675 be32_to_cpu(fmec->ec), 1);
676 }
677
678 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
679 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
680
681 /* Iterate over all volumes and read their EBA table */
682 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
683 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
684 fm_pos += sizeof(*fmvhdr);
685 if (fm_pos >= fm_size)
686 goto fail_bad;
687
688 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
689 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
690 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
691 goto fail_bad;
692 }
693
694 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
695 be32_to_cpu(fmvhdr->used_ebs),
696 be32_to_cpu(fmvhdr->data_pad),
697 fmvhdr->vol_type,
698 be32_to_cpu(fmvhdr->last_eb_bytes));
699
700 if (IS_ERR(av)) {
701 if (PTR_ERR(av) == -EEXIST)
702 ubi_err(ubi, "volume (ID %i) already exists",
703 fmvhdr->vol_id);
704
705 goto fail_bad;
706 }
707
708 ai->vols_found++;
709 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
710 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
711
712 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
713 fm_pos += sizeof(*fm_eba);
714 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
715 if (fm_pos >= fm_size)
716 goto fail_bad;
717
718 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
719 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
720 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
721 goto fail_bad;
722 }
723
724 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
725 int pnum = be32_to_cpu(fm_eba->pnum[j]);
726
727 if (pnum < 0)
728 continue;
729
730 aeb = NULL;
731 list_for_each_entry(tmp_aeb, &used, u.list) {
732 if (tmp_aeb->pnum == pnum) {
733 aeb = tmp_aeb;
734 break;
735 }
736 }
737
738 if (!aeb) {
739 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
740 goto fail_bad;
741 }
742
743 aeb->lnum = j;
744
745 if (av->highest_lnum <= aeb->lnum)
746 av->highest_lnum = aeb->lnum;
747
748 assign_aeb_to_av(ai, aeb, av);
749
750 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
751 aeb->pnum, aeb->lnum, av->vol_id);
752 }
753 }
754
755 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
756 if (ret)
757 goto fail;
758
759 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
760 if (ret)
761 goto fail;
762
763 if (max_sqnum > ai->max_sqnum)
764 ai->max_sqnum = max_sqnum;
765
766 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
767 list_move_tail(&tmp_aeb->u.list, &ai->free);
768
769 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
770 list_move_tail(&tmp_aeb->u.list, &ai->erase);
771
772 ubi_assert(list_empty(&free));
773
774 /*
775 * If fastmap is leaking PEBs (must not happen), raise a
776 * fat warning and fall back to scanning mode.
777 * We do this here because in ubi_wl_init() it's too late
778 * and we cannot fall back to scanning.
779 */
780 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
781 ai->bad_peb_count - fm->used_blocks))
782 goto fail_bad;
783
784 return 0;
785
786fail_bad:
787 ret = UBI_BAD_FASTMAP;
788fail:
789 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
790 list_del(&tmp_aeb->u.list);
791 ubi_free_aeb(ai, tmp_aeb);
792 }
793 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
794 list_del(&tmp_aeb->u.list);
795 ubi_free_aeb(ai, tmp_aeb);
796 }
797
798 return ret;
799}
800
801/**
802 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
803 * @ai: UBI attach info to be filled
804 */
805static int find_fm_anchor(struct ubi_attach_info *ai)
806{
807 int ret = -1;
808 struct ubi_ainf_peb *aeb;
809 unsigned long long max_sqnum = 0;
810
811 list_for_each_entry(aeb, &ai->fastmap, u.list) {
812 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
813 max_sqnum = aeb->sqnum;
814 ret = aeb->pnum;
815 }
816 }
817
818 return ret;
819}
820
821static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
822 struct ubi_ainf_peb *old)
823{
824 struct ubi_ainf_peb *new;
825
826 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
827 if (!new)
828 return NULL;
829
830 new->vol_id = old->vol_id;
831 new->sqnum = old->sqnum;
832 new->lnum = old->lnum;
833 new->scrub = old->scrub;
834 new->copy_flag = old->copy_flag;
835
836 return new;
837}
838
839/**
840 * ubi_scan_fastmap - scan the fastmap.
841 * @ubi: UBI device object
842 * @ai: UBI attach info to be filled
843 * @scan_ai: UBI attach info from the first 64 PEBs,
844 * used to find the most recent Fastmap data structure
845 *
846 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
847 * UBI_BAD_FASTMAP if one was found but is not usable.
848 * < 0 indicates an internal error.
849 */
850int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
851 struct ubi_attach_info *scan_ai)
852{
853 struct ubi_fm_sb *fmsb, *fmsb2;
854 struct ubi_vid_io_buf *vb;
855 struct ubi_vid_hdr *vh;
856 struct ubi_ec_hdr *ech;
857 struct ubi_fastmap_layout *fm;
858 struct ubi_ainf_peb *aeb;
859 int i, used_blocks, pnum, fm_anchor, ret = 0;
860 size_t fm_size;
861 __be32 crc, tmp_crc;
862 unsigned long long sqnum = 0;
863
864 fm_anchor = find_fm_anchor(scan_ai);
865 if (fm_anchor < 0)
866 return UBI_NO_FASTMAP;
867
868 /* Copy all (possible) fastmap blocks into our new attach structure. */
869 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
870 struct ubi_ainf_peb *new;
871
872 new = clone_aeb(ai, aeb);
873 if (!new)
874 return -ENOMEM;
875
876 list_add(&new->u.list, &ai->fastmap);
877 }
878
879 down_write(&ubi->fm_protect);
880 memset(ubi->fm_buf, 0, ubi->fm_size);
881
882 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
883 if (!fmsb) {
884 ret = -ENOMEM;
885 goto out;
886 }
887
888 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
889 if (!fm) {
890 ret = -ENOMEM;
891 kfree(fmsb);
892 goto out;
893 }
894
895 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
896 if (ret && ret != UBI_IO_BITFLIPS)
897 goto free_fm_sb;
898 else if (ret == UBI_IO_BITFLIPS)
899 fm->to_be_tortured[0] = 1;
900
901 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
902 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
903 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
904 ret = UBI_BAD_FASTMAP;
905 goto free_fm_sb;
906 }
907
908 if (fmsb->version != UBI_FM_FMT_VERSION) {
909 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
910 fmsb->version, UBI_FM_FMT_VERSION);
911 ret = UBI_BAD_FASTMAP;
912 goto free_fm_sb;
913 }
914
915 used_blocks = be32_to_cpu(fmsb->used_blocks);
916 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
917 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
918 used_blocks);
919 ret = UBI_BAD_FASTMAP;
920 goto free_fm_sb;
921 }
922
923 fm_size = ubi->leb_size * used_blocks;
924 if (fm_size != ubi->fm_size) {
925 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
926 fm_size, ubi->fm_size);
927 ret = UBI_BAD_FASTMAP;
928 goto free_fm_sb;
929 }
930
931 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
932 if (!ech) {
933 ret = -ENOMEM;
934 goto free_fm_sb;
935 }
936
937 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
938 if (!vb) {
939 ret = -ENOMEM;
940 goto free_hdr;
941 }
942
943 vh = ubi_get_vid_hdr(vb);
944
945 for (i = 0; i < used_blocks; i++) {
946 int image_seq;
947
948 pnum = be32_to_cpu(fmsb->block_loc[i]);
949
950 if (ubi_io_is_bad(ubi, pnum)) {
951 ret = UBI_BAD_FASTMAP;
952 goto free_hdr;
953 }
954
955 if (i == 0 && pnum != fm_anchor) {
956 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
957 pnum, fm_anchor);
958 ret = UBI_BAD_FASTMAP;
959 goto free_hdr;
960 }
961
962 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
963 if (ret && ret != UBI_IO_BITFLIPS) {
964 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
965 i, pnum);
966 if (ret > 0)
967 ret = UBI_BAD_FASTMAP;
968 goto free_hdr;
969 } else if (ret == UBI_IO_BITFLIPS)
970 fm->to_be_tortured[i] = 1;
971
972 image_seq = be32_to_cpu(ech->image_seq);
973 if (!ubi->image_seq)
974 ubi->image_seq = image_seq;
975
976 /*
977 * Older UBI implementations have image_seq set to zero, so
978 * we shouldn't fail if image_seq == 0.
979 */
980 if (image_seq && (image_seq != ubi->image_seq)) {
981 ubi_err(ubi, "wrong image seq:%d instead of %d",
982 be32_to_cpu(ech->image_seq), ubi->image_seq);
983 ret = UBI_BAD_FASTMAP;
984 goto free_hdr;
985 }
986
987 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
988 if (ret && ret != UBI_IO_BITFLIPS) {
989 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
990 i, pnum);
991 goto free_hdr;
992 }
993
994 if (i == 0) {
995 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
996 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
997 be32_to_cpu(vh->vol_id),
998 UBI_FM_SB_VOLUME_ID);
999 ret = UBI_BAD_FASTMAP;
1000 goto free_hdr;
1001 }
1002 } else {
1003 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1004 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1005 be32_to_cpu(vh->vol_id),
1006 UBI_FM_DATA_VOLUME_ID);
1007 ret = UBI_BAD_FASTMAP;
1008 goto free_hdr;
1009 }
1010 }
1011
1012 if (sqnum < be64_to_cpu(vh->sqnum))
1013 sqnum = be64_to_cpu(vh->sqnum);
1014
1015 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1016 pnum, 0, ubi->leb_size);
1017 if (ret && ret != UBI_IO_BITFLIPS) {
1018 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1019 "err: %i)", i, pnum, ret);
1020 goto free_hdr;
1021 }
1022 }
1023
1024 kfree(fmsb);
1025 fmsb = NULL;
1026
1027 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1028 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1029 fmsb2->data_crc = 0;
1030 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1031 if (crc != tmp_crc) {
1032 ubi_err(ubi, "fastmap data CRC is invalid");
1033 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1034 tmp_crc, crc);
1035 ret = UBI_BAD_FASTMAP;
1036 goto free_hdr;
1037 }
1038
1039 fmsb2->sqnum = sqnum;
1040
1041 fm->used_blocks = used_blocks;
1042
1043 ret = ubi_attach_fastmap(ubi, ai, fm);
1044 if (ret) {
1045 if (ret > 0)
1046 ret = UBI_BAD_FASTMAP;
1047 goto free_hdr;
1048 }
1049
1050 for (i = 0; i < used_blocks; i++) {
1051 struct ubi_wl_entry *e;
1052
1053 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1054 if (!e) {
1055 while (i--)
1056 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1057
1058 ret = -ENOMEM;
1059 goto free_hdr;
1060 }
1061
1062 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1063 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1064 fm->e[i] = e;
1065 }
1066
1067 ubi->fm = fm;
1068 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1069 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1070 ubi_msg(ubi, "attached by fastmap");
1071 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1072 ubi_msg(ubi, "fastmap WL pool size: %d",
1073 ubi->fm_wl_pool.max_size);
1074 ubi->fm_disabled = 0;
1075 ubi->fast_attach = 1;
1076
1077 ubi_free_vid_buf(vb);
1078 kfree(ech);
1079out:
1080 up_write(&ubi->fm_protect);
1081 if (ret == UBI_BAD_FASTMAP)
1082 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1083 return ret;
1084
1085free_hdr:
1086 ubi_free_vid_buf(vb);
1087 kfree(ech);
1088free_fm_sb:
1089 kfree(fmsb);
1090 kfree(fm);
1091 goto out;
1092}
1093
1094int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1095{
1096 struct ubi_device *ubi = vol->ubi;
1097
1098 if (!ubi->fast_attach)
1099 return 0;
1100
1101 vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1102 GFP_KERNEL);
1103 if (!vol->checkmap)
1104 return -ENOMEM;
1105
1106 return 0;
1107}
1108
1109void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1110{
1111 kfree(vol->checkmap);
1112}
1113
1114/**
1115 * ubi_write_fastmap - writes a fastmap.
1116 * @ubi: UBI device object
1117 * @new_fm: the to be written fastmap
1118 *
1119 * Returns 0 on success, < 0 indicates an internal error.
1120 */
1121static int ubi_write_fastmap(struct ubi_device *ubi,
1122 struct ubi_fastmap_layout *new_fm)
1123{
1124 size_t fm_pos = 0;
1125 void *fm_raw;
1126 struct ubi_fm_sb *fmsb;
1127 struct ubi_fm_hdr *fmh;
1128 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1129 struct ubi_fm_ec *fec;
1130 struct ubi_fm_volhdr *fvh;
1131 struct ubi_fm_eba *feba;
1132 struct ubi_wl_entry *wl_e;
1133 struct ubi_volume *vol;
1134 struct ubi_vid_io_buf *avbuf, *dvbuf;
1135 struct ubi_vid_hdr *avhdr, *dvhdr;
1136 struct ubi_work *ubi_wrk;
1137 struct rb_node *tmp_rb;
1138 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1139 int scrub_peb_count, erase_peb_count;
1140 unsigned long *seen_pebs;
1141
1142 fm_raw = ubi->fm_buf;
1143 memset(ubi->fm_buf, 0, ubi->fm_size);
1144
1145 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1146 if (!avbuf) {
1147 ret = -ENOMEM;
1148 goto out;
1149 }
1150
1151 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1152 if (!dvbuf) {
1153 ret = -ENOMEM;
1154 goto out_free_avbuf;
1155 }
1156
1157 avhdr = ubi_get_vid_hdr(avbuf);
1158 dvhdr = ubi_get_vid_hdr(dvbuf);
1159
1160 seen_pebs = init_seen(ubi);
1161 if (IS_ERR(seen_pebs)) {
1162 ret = PTR_ERR(seen_pebs);
1163 goto out_free_dvbuf;
1164 }
1165
1166 spin_lock(&ubi->volumes_lock);
1167 spin_lock(&ubi->wl_lock);
1168
1169 fmsb = (struct ubi_fm_sb *)fm_raw;
1170 fm_pos += sizeof(*fmsb);
1171 ubi_assert(fm_pos <= ubi->fm_size);
1172
1173 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1174 fm_pos += sizeof(*fmh);
1175 ubi_assert(fm_pos <= ubi->fm_size);
1176
1177 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1178 fmsb->version = UBI_FM_FMT_VERSION;
1179 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1180 /* the max sqnum will be filled in while *reading* the fastmap */
1181 fmsb->sqnum = 0;
1182
1183 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1184 free_peb_count = 0;
1185 used_peb_count = 0;
1186 scrub_peb_count = 0;
1187 erase_peb_count = 0;
1188 vol_count = 0;
1189
1190 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1191 fm_pos += sizeof(*fmpl);
1192 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1193 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1194 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1195
1196 for (i = 0; i < ubi->fm_pool.size; i++) {
1197 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1198 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1199 }
1200
1201 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1202 fm_pos += sizeof(*fmpl_wl);
1203 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1204 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1205 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1206
1207 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1208 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1209 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1210 }
1211
1212 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1213 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1214
1215 fec->pnum = cpu_to_be32(wl_e->pnum);
1216 set_seen(ubi, wl_e->pnum, seen_pebs);
1217 fec->ec = cpu_to_be32(wl_e->ec);
1218
1219 free_peb_count++;
1220 fm_pos += sizeof(*fec);
1221 ubi_assert(fm_pos <= ubi->fm_size);
1222 }
1223 if (ubi->fm_next_anchor) {
1224 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1225
1226 fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
1227 set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
1228 fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
1229
1230 free_peb_count++;
1231 fm_pos += sizeof(*fec);
1232 ubi_assert(fm_pos <= ubi->fm_size);
1233 }
1234 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1235
1236 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1237 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1238
1239 fec->pnum = cpu_to_be32(wl_e->pnum);
1240 set_seen(ubi, wl_e->pnum, seen_pebs);
1241 fec->ec = cpu_to_be32(wl_e->ec);
1242
1243 used_peb_count++;
1244 fm_pos += sizeof(*fec);
1245 ubi_assert(fm_pos <= ubi->fm_size);
1246 }
1247
1248 ubi_for_each_protected_peb(ubi, i, wl_e) {
1249 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1250
1251 fec->pnum = cpu_to_be32(wl_e->pnum);
1252 set_seen(ubi, wl_e->pnum, seen_pebs);
1253 fec->ec = cpu_to_be32(wl_e->ec);
1254
1255 used_peb_count++;
1256 fm_pos += sizeof(*fec);
1257 ubi_assert(fm_pos <= ubi->fm_size);
1258 }
1259 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1260
1261 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1262 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1263
1264 fec->pnum = cpu_to_be32(wl_e->pnum);
1265 set_seen(ubi, wl_e->pnum, seen_pebs);
1266 fec->ec = cpu_to_be32(wl_e->ec);
1267
1268 scrub_peb_count++;
1269 fm_pos += sizeof(*fec);
1270 ubi_assert(fm_pos <= ubi->fm_size);
1271 }
1272 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1273
1274
1275 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1276 if (ubi_is_erase_work(ubi_wrk)) {
1277 wl_e = ubi_wrk->e;
1278 ubi_assert(wl_e);
1279
1280 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1281
1282 fec->pnum = cpu_to_be32(wl_e->pnum);
1283 set_seen(ubi, wl_e->pnum, seen_pebs);
1284 fec->ec = cpu_to_be32(wl_e->ec);
1285
1286 erase_peb_count++;
1287 fm_pos += sizeof(*fec);
1288 ubi_assert(fm_pos <= ubi->fm_size);
1289 }
1290 }
1291 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1292
1293 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1294 vol = ubi->volumes[i];
1295
1296 if (!vol)
1297 continue;
1298
1299 vol_count++;
1300
1301 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1302 fm_pos += sizeof(*fvh);
1303 ubi_assert(fm_pos <= ubi->fm_size);
1304
1305 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1306 fvh->vol_id = cpu_to_be32(vol->vol_id);
1307 fvh->vol_type = vol->vol_type;
1308 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1309 fvh->data_pad = cpu_to_be32(vol->data_pad);
1310 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1311
1312 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1313 vol->vol_type == UBI_STATIC_VOLUME);
1314
1315 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1316 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1317 ubi_assert(fm_pos <= ubi->fm_size);
1318
1319 for (j = 0; j < vol->reserved_pebs; j++) {
1320 struct ubi_eba_leb_desc ldesc;
1321
1322 ubi_eba_get_ldesc(vol, j, &ldesc);
1323 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1324 }
1325
1326 feba->reserved_pebs = cpu_to_be32(j);
1327 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1328 }
1329 fmh->vol_count = cpu_to_be32(vol_count);
1330 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1331
1332 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1333 avhdr->lnum = 0;
1334
1335 spin_unlock(&ubi->wl_lock);
1336 spin_unlock(&ubi->volumes_lock);
1337
1338 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1339 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1340 if (ret) {
1341 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1342 goto out_free_seen;
1343 }
1344
1345 for (i = 0; i < new_fm->used_blocks; i++) {
1346 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1347 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1348 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1349 }
1350
1351 fmsb->data_crc = 0;
1352 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1353 ubi->fm_size));
1354
1355 for (i = 1; i < new_fm->used_blocks; i++) {
1356 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1357 dvhdr->lnum = cpu_to_be32(i);
1358 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1359 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1360 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1361 if (ret) {
1362 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1363 new_fm->e[i]->pnum);
1364 goto out_free_seen;
1365 }
1366 }
1367
1368 for (i = 0; i < new_fm->used_blocks; i++) {
1369 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1370 new_fm->e[i]->pnum, 0, ubi->leb_size);
1371 if (ret) {
1372 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1373 new_fm->e[i]->pnum);
1374 goto out_free_seen;
1375 }
1376 }
1377
1378 ubi_assert(new_fm);
1379 ubi->fm = new_fm;
1380
1381 ret = self_check_seen(ubi, seen_pebs);
1382 dbg_bld("fastmap written!");
1383
1384out_free_seen:
1385 free_seen(seen_pebs);
1386out_free_dvbuf:
1387 ubi_free_vid_buf(dvbuf);
1388out_free_avbuf:
1389 ubi_free_vid_buf(avbuf);
1390
1391out:
1392 return ret;
1393}
1394
1395/**
1396 * erase_block - Manually erase a PEB.
1397 * @ubi: UBI device object
1398 * @pnum: PEB to be erased
1399 *
1400 * Returns the new EC value on success, < 0 indicates an internal error.
1401 */
1402static int erase_block(struct ubi_device *ubi, int pnum)
1403{
1404 int ret;
1405 struct ubi_ec_hdr *ec_hdr;
1406 long long ec;
1407
1408 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1409 if (!ec_hdr)
1410 return -ENOMEM;
1411
1412 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1413 if (ret < 0)
1414 goto out;
1415 else if (ret && ret != UBI_IO_BITFLIPS) {
1416 ret = -EINVAL;
1417 goto out;
1418 }
1419
1420 ret = ubi_io_sync_erase(ubi, pnum, 0);
1421 if (ret < 0)
1422 goto out;
1423
1424 ec = be64_to_cpu(ec_hdr->ec);
1425 ec += ret;
1426 if (ec > UBI_MAX_ERASECOUNTER) {
1427 ret = -EINVAL;
1428 goto out;
1429 }
1430
1431 ec_hdr->ec = cpu_to_be64(ec);
1432 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1433 if (ret < 0)
1434 goto out;
1435
1436 ret = ec;
1437out:
1438 kfree(ec_hdr);
1439 return ret;
1440}
1441
1442/**
1443 * invalidate_fastmap - destroys a fastmap.
1444 * @ubi: UBI device object
1445 *
1446 * This function ensures that upon next UBI attach a full scan
1447 * is issued. We need this if UBI is about to write a new fastmap
1448 * but is unable to do so. In this case we have two options:
1449 * a) Make sure that the current fastmap will not be usued upon
1450 * attach time and contine or b) fall back to RO mode to have the
1451 * current fastmap in a valid state.
1452 * Returns 0 on success, < 0 indicates an internal error.
1453 */
1454static int invalidate_fastmap(struct ubi_device *ubi)
1455{
1456 int ret;
1457 struct ubi_fastmap_layout *fm;
1458 struct ubi_wl_entry *e;
1459 struct ubi_vid_io_buf *vb = NULL;
1460 struct ubi_vid_hdr *vh;
1461
1462 if (!ubi->fm)
1463 return 0;
1464
1465 ubi->fm = NULL;
1466
1467 ret = -ENOMEM;
1468 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1469 if (!fm)
1470 goto out;
1471
1472 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1473 if (!vb)
1474 goto out_free_fm;
1475
1476 vh = ubi_get_vid_hdr(vb);
1477
1478 ret = -ENOSPC;
1479 e = ubi_wl_get_fm_peb(ubi, 1);
1480 if (!e)
1481 goto out_free_fm;
1482
1483 /*
1484 * Create fake fastmap such that UBI will fall back
1485 * to scanning mode.
1486 */
1487 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1488 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1489 if (ret < 0) {
1490 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1491 goto out_free_fm;
1492 }
1493
1494 fm->used_blocks = 1;
1495 fm->e[0] = e;
1496
1497 ubi->fm = fm;
1498
1499out:
1500 ubi_free_vid_buf(vb);
1501 return ret;
1502
1503out_free_fm:
1504 kfree(fm);
1505 goto out;
1506}
1507
1508/**
1509 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1510 * WL sub-system.
1511 * @ubi: UBI device object
1512 * @fm: fastmap layout object
1513 */
1514static void return_fm_pebs(struct ubi_device *ubi,
1515 struct ubi_fastmap_layout *fm)
1516{
1517 int i;
1518
1519 if (!fm)
1520 return;
1521
1522 for (i = 0; i < fm->used_blocks; i++) {
1523 if (fm->e[i]) {
1524 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1525 fm->to_be_tortured[i]);
1526 fm->e[i] = NULL;
1527 }
1528 }
1529}
1530
1531/**
1532 * ubi_update_fastmap - will be called by UBI if a volume changes or
1533 * a fastmap pool becomes full.
1534 * @ubi: UBI device object
1535 *
1536 * Returns 0 on success, < 0 indicates an internal error.
1537 */
1538int ubi_update_fastmap(struct ubi_device *ubi)
1539{
1540 int ret, i, j;
1541 struct ubi_fastmap_layout *new_fm, *old_fm;
1542 struct ubi_wl_entry *tmp_e;
1543
1544 down_write(&ubi->fm_protect);
1545 down_write(&ubi->work_sem);
1546 down_write(&ubi->fm_eba_sem);
1547
1548 ubi_refill_pools(ubi);
1549
1550 if (ubi->ro_mode || ubi->fm_disabled) {
1551 up_write(&ubi->fm_eba_sem);
1552 up_write(&ubi->work_sem);
1553 up_write(&ubi->fm_protect);
1554 return 0;
1555 }
1556
1557 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1558 if (!new_fm) {
1559 up_write(&ubi->fm_eba_sem);
1560 up_write(&ubi->work_sem);
1561 up_write(&ubi->fm_protect);
1562 return -ENOMEM;
1563 }
1564
1565 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1566 old_fm = ubi->fm;
1567 ubi->fm = NULL;
1568
1569 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1570 ubi_err(ubi, "fastmap too large");
1571 ret = -ENOSPC;
1572 goto err;
1573 }
1574
1575 for (i = 1; i < new_fm->used_blocks; i++) {
1576 spin_lock(&ubi->wl_lock);
1577 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1578 spin_unlock(&ubi->wl_lock);
1579
1580 if (!tmp_e) {
1581 if (old_fm && old_fm->e[i]) {
1582 ret = erase_block(ubi, old_fm->e[i]->pnum);
1583 if (ret < 0) {
1584 ubi_err(ubi, "could not erase old fastmap PEB");
1585
1586 for (j = 1; j < i; j++) {
1587 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1588 j, 0);
1589 new_fm->e[j] = NULL;
1590 }
1591 goto err;
1592 }
1593 new_fm->e[i] = old_fm->e[i];
1594 old_fm->e[i] = NULL;
1595 } else {
1596 ubi_err(ubi, "could not get any free erase block");
1597
1598 for (j = 1; j < i; j++) {
1599 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1600 new_fm->e[j] = NULL;
1601 }
1602
1603 ret = -ENOSPC;
1604 goto err;
1605 }
1606 } else {
1607 new_fm->e[i] = tmp_e;
1608
1609 if (old_fm && old_fm->e[i]) {
1610 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1611 old_fm->to_be_tortured[i]);
1612 old_fm->e[i] = NULL;
1613 }
1614 }
1615 }
1616
1617 /* Old fastmap is larger than the new one */
1618 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1619 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1620 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1621 old_fm->to_be_tortured[i]);
1622 old_fm->e[i] = NULL;
1623 }
1624 }
1625
1626 spin_lock(&ubi->wl_lock);
1627 tmp_e = ubi->fm_anchor;
1628 ubi->fm_anchor = NULL;
1629 spin_unlock(&ubi->wl_lock);
1630
1631 if (old_fm) {
1632 /* no fresh anchor PEB was found, reuse the old one */
1633 if (!tmp_e) {
1634 ret = erase_block(ubi, old_fm->e[0]->pnum);
1635 if (ret < 0) {
1636 ubi_err(ubi, "could not erase old anchor PEB");
1637
1638 for (i = 1; i < new_fm->used_blocks; i++) {
1639 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1640 i, 0);
1641 new_fm->e[i] = NULL;
1642 }
1643 goto err;
1644 }
1645 new_fm->e[0] = old_fm->e[0];
1646 new_fm->e[0]->ec = ret;
1647 old_fm->e[0] = NULL;
1648 } else {
1649 /* we've got a new anchor PEB, return the old one */
1650 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1651 old_fm->to_be_tortured[0]);
1652 new_fm->e[0] = tmp_e;
1653 old_fm->e[0] = NULL;
1654 }
1655 } else {
1656 if (!tmp_e) {
1657 ubi_err(ubi, "could not find any anchor PEB");
1658
1659 for (i = 1; i < new_fm->used_blocks; i++) {
1660 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1661 new_fm->e[i] = NULL;
1662 }
1663
1664 ret = -ENOSPC;
1665 goto err;
1666 }
1667 new_fm->e[0] = tmp_e;
1668 }
1669
1670 ret = ubi_write_fastmap(ubi, new_fm);
1671
1672 if (ret)
1673 goto err;
1674
1675out_unlock:
1676 up_write(&ubi->fm_eba_sem);
1677 up_write(&ubi->work_sem);
1678 up_write(&ubi->fm_protect);
1679 kfree(old_fm);
1680
1681 ubi_ensure_anchor_pebs(ubi);
1682
1683 return ret;
1684
1685err:
1686 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1687
1688 ret = invalidate_fastmap(ubi);
1689 if (ret < 0) {
1690 ubi_err(ubi, "Unable to invalidate current fastmap!");
1691 ubi_ro_mode(ubi);
1692 } else {
1693 return_fm_pebs(ubi, old_fm);
1694 return_fm_pebs(ubi, new_fm);
1695 ret = 0;
1696 }
1697
1698 kfree(new_fm);
1699 goto out_unlock;
1700}
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17#include <linux/crc32.h>
18#include "ubi.h"
19
20/**
21 * init_seen - allocate memory for used for debugging.
22 * @ubi: UBI device description object
23 */
24static inline int *init_seen(struct ubi_device *ubi)
25{
26 int *ret;
27
28 if (!ubi_dbg_chk_fastmap(ubi))
29 return NULL;
30
31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 if (!ret)
33 return ERR_PTR(-ENOMEM);
34
35 return ret;
36}
37
38/**
39 * free_seen - free the seen logic integer array.
40 * @seen: integer array of @ubi->peb_count size
41 */
42static inline void free_seen(int *seen)
43{
44 kfree(seen);
45}
46
47/**
48 * set_seen - mark a PEB as seen.
49 * @ubi: UBI device description object
50 * @pnum: The PEB to be makred as seen
51 * @seen: integer array of @ubi->peb_count size
52 */
53static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54{
55 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 return;
57
58 seen[pnum] = 1;
59}
60
61/**
62 * self_check_seen - check whether all PEB have been seen by fastmap.
63 * @ubi: UBI device description object
64 * @seen: integer array of @ubi->peb_count size
65 */
66static int self_check_seen(struct ubi_device *ubi, int *seen)
67{
68 int pnum, ret = 0;
69
70 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 return 0;
72
73 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 ret = -EINVAL;
77 }
78 }
79
80 return ret;
81}
82
83/**
84 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
85 * @ubi: UBI device description object
86 */
87size_t ubi_calc_fm_size(struct ubi_device *ubi)
88{
89 size_t size;
90
91 size = sizeof(struct ubi_fm_sb) +
92 sizeof(struct ubi_fm_hdr) +
93 sizeof(struct ubi_fm_scan_pool) +
94 sizeof(struct ubi_fm_scan_pool) +
95 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
96 (sizeof(struct ubi_fm_eba) +
97 (ubi->peb_count * sizeof(__be32))) +
98 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
99 return roundup(size, ubi->leb_size);
100}
101
102
103/**
104 * new_fm_vhdr - allocate a new volume header for fastmap usage.
105 * @ubi: UBI device description object
106 * @vol_id: the VID of the new header
107 *
108 * Returns a new struct ubi_vid_hdr on success.
109 * NULL indicates out of memory.
110 */
111static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
112{
113 struct ubi_vid_hdr *new;
114
115 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
116 if (!new)
117 goto out;
118
119 new->vol_type = UBI_VID_DYNAMIC;
120 new->vol_id = cpu_to_be32(vol_id);
121
122 /* UBI implementations without fastmap support have to delete the
123 * fastmap.
124 */
125 new->compat = UBI_COMPAT_DELETE;
126
127out:
128 return new;
129}
130
131/**
132 * add_aeb - create and add a attach erase block to a given list.
133 * @ai: UBI attach info object
134 * @list: the target list
135 * @pnum: PEB number of the new attach erase block
136 * @ec: erease counter of the new LEB
137 * @scrub: scrub this PEB after attaching
138 *
139 * Returns 0 on success, < 0 indicates an internal error.
140 */
141static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
142 int pnum, int ec, int scrub)
143{
144 struct ubi_ainf_peb *aeb;
145
146 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
147 if (!aeb)
148 return -ENOMEM;
149
150 aeb->pnum = pnum;
151 aeb->ec = ec;
152 aeb->lnum = -1;
153 aeb->scrub = scrub;
154 aeb->copy_flag = aeb->sqnum = 0;
155
156 ai->ec_sum += aeb->ec;
157 ai->ec_count++;
158
159 if (ai->max_ec < aeb->ec)
160 ai->max_ec = aeb->ec;
161
162 if (ai->min_ec > aeb->ec)
163 ai->min_ec = aeb->ec;
164
165 list_add_tail(&aeb->u.list, list);
166
167 return 0;
168}
169
170/**
171 * add_vol - create and add a new volume to ubi_attach_info.
172 * @ai: ubi_attach_info object
173 * @vol_id: VID of the new volume
174 * @used_ebs: number of used EBS
175 * @data_pad: data padding value of the new volume
176 * @vol_type: volume type
177 * @last_eb_bytes: number of bytes in the last LEB
178 *
179 * Returns the new struct ubi_ainf_volume on success.
180 * NULL indicates an error.
181 */
182static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 int used_ebs, int data_pad, u8 vol_type,
184 int last_eb_bytes)
185{
186 struct ubi_ainf_volume *av;
187 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
188
189 while (*p) {
190 parent = *p;
191 av = rb_entry(parent, struct ubi_ainf_volume, rb);
192
193 if (vol_id > av->vol_id)
194 p = &(*p)->rb_left;
195 else if (vol_id < av->vol_id)
196 p = &(*p)->rb_right;
197 else
198 return ERR_PTR(-EINVAL);
199 }
200
201 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
202 if (!av)
203 goto out;
204
205 av->highest_lnum = av->leb_count = av->used_ebs = 0;
206 av->vol_id = vol_id;
207 av->data_pad = data_pad;
208 av->last_data_size = last_eb_bytes;
209 av->compat = 0;
210 av->vol_type = vol_type;
211 av->root = RB_ROOT;
212 if (av->vol_type == UBI_STATIC_VOLUME)
213 av->used_ebs = used_ebs;
214
215 dbg_bld("found volume (ID %i)", vol_id);
216
217 rb_link_node(&av->rb, parent, p);
218 rb_insert_color(&av->rb, &ai->volumes);
219
220out:
221 return av;
222}
223
224/**
225 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
226 * from it's original list.
227 * @ai: ubi_attach_info object
228 * @aeb: the to be assigned SEB
229 * @av: target scan volume
230 */
231static void assign_aeb_to_av(struct ubi_attach_info *ai,
232 struct ubi_ainf_peb *aeb,
233 struct ubi_ainf_volume *av)
234{
235 struct ubi_ainf_peb *tmp_aeb;
236 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
237
238 p = &av->root.rb_node;
239 while (*p) {
240 parent = *p;
241
242 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
243 if (aeb->lnum != tmp_aeb->lnum) {
244 if (aeb->lnum < tmp_aeb->lnum)
245 p = &(*p)->rb_left;
246 else
247 p = &(*p)->rb_right;
248
249 continue;
250 } else
251 break;
252 }
253
254 list_del(&aeb->u.list);
255 av->leb_count++;
256
257 rb_link_node(&aeb->u.rb, parent, p);
258 rb_insert_color(&aeb->u.rb, &av->root);
259}
260
261/**
262 * update_vol - inserts or updates a LEB which was found a pool.
263 * @ubi: the UBI device object
264 * @ai: attach info object
265 * @av: the volume this LEB belongs to
266 * @new_vh: the volume header derived from new_aeb
267 * @new_aeb: the AEB to be examined
268 *
269 * Returns 0 on success, < 0 indicates an internal error.
270 */
271static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
272 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
273 struct ubi_ainf_peb *new_aeb)
274{
275 struct rb_node **p = &av->root.rb_node, *parent = NULL;
276 struct ubi_ainf_peb *aeb, *victim;
277 int cmp_res;
278
279 while (*p) {
280 parent = *p;
281 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
282
283 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
284 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
285 p = &(*p)->rb_left;
286 else
287 p = &(*p)->rb_right;
288
289 continue;
290 }
291
292 /* This case can happen if the fastmap gets written
293 * because of a volume change (creation, deletion, ..).
294 * Then a PEB can be within the persistent EBA and the pool.
295 */
296 if (aeb->pnum == new_aeb->pnum) {
297 ubi_assert(aeb->lnum == new_aeb->lnum);
298 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
299
300 return 0;
301 }
302
303 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
304 if (cmp_res < 0)
305 return cmp_res;
306
307 /* new_aeb is newer */
308 if (cmp_res & 1) {
309 victim = kmem_cache_alloc(ai->aeb_slab_cache,
310 GFP_KERNEL);
311 if (!victim)
312 return -ENOMEM;
313
314 victim->ec = aeb->ec;
315 victim->pnum = aeb->pnum;
316 list_add_tail(&victim->u.list, &ai->erase);
317
318 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
319 av->last_data_size =
320 be32_to_cpu(new_vh->data_size);
321
322 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
323 av->vol_id, aeb->lnum, new_aeb->pnum);
324
325 aeb->ec = new_aeb->ec;
326 aeb->pnum = new_aeb->pnum;
327 aeb->copy_flag = new_vh->copy_flag;
328 aeb->scrub = new_aeb->scrub;
329 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
330
331 /* new_aeb is older */
332 } else {
333 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
334 av->vol_id, aeb->lnum, new_aeb->pnum);
335 list_add_tail(&new_aeb->u.list, &ai->erase);
336 }
337
338 return 0;
339 }
340 /* This LEB is new, let's add it to the volume */
341
342 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
343 av->highest_lnum = be32_to_cpu(new_vh->lnum);
344 av->last_data_size = be32_to_cpu(new_vh->data_size);
345 }
346
347 if (av->vol_type == UBI_STATIC_VOLUME)
348 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
349
350 av->leb_count++;
351
352 rb_link_node(&new_aeb->u.rb, parent, p);
353 rb_insert_color(&new_aeb->u.rb, &av->root);
354
355 return 0;
356}
357
358/**
359 * process_pool_aeb - we found a non-empty PEB in a pool.
360 * @ubi: UBI device object
361 * @ai: attach info object
362 * @new_vh: the volume header derived from new_aeb
363 * @new_aeb: the AEB to be examined
364 *
365 * Returns 0 on success, < 0 indicates an internal error.
366 */
367static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
368 struct ubi_vid_hdr *new_vh,
369 struct ubi_ainf_peb *new_aeb)
370{
371 struct ubi_ainf_volume *av, *tmp_av = NULL;
372 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
373 int found = 0;
374
375 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
376 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
377 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
378
379 return 0;
380 }
381
382 /* Find the volume this SEB belongs to */
383 while (*p) {
384 parent = *p;
385 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
386
387 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
388 p = &(*p)->rb_left;
389 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
390 p = &(*p)->rb_right;
391 else {
392 found = 1;
393 break;
394 }
395 }
396
397 if (found)
398 av = tmp_av;
399 else {
400 ubi_err(ubi, "orphaned volume in fastmap pool!");
401 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
402 return UBI_BAD_FASTMAP;
403 }
404
405 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
406
407 return update_vol(ubi, ai, av, new_vh, new_aeb);
408}
409
410/**
411 * unmap_peb - unmap a PEB.
412 * If fastmap detects a free PEB in the pool it has to check whether
413 * this PEB has been unmapped after writing the fastmap.
414 *
415 * @ai: UBI attach info object
416 * @pnum: The PEB to be unmapped
417 */
418static void unmap_peb(struct ubi_attach_info *ai, int pnum)
419{
420 struct ubi_ainf_volume *av;
421 struct rb_node *node, *node2;
422 struct ubi_ainf_peb *aeb;
423
424 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
425 av = rb_entry(node, struct ubi_ainf_volume, rb);
426
427 for (node2 = rb_first(&av->root); node2;
428 node2 = rb_next(node2)) {
429 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
430 if (aeb->pnum == pnum) {
431 rb_erase(&aeb->u.rb, &av->root);
432 av->leb_count--;
433 kmem_cache_free(ai->aeb_slab_cache, aeb);
434 return;
435 }
436 }
437 }
438}
439
440/**
441 * scan_pool - scans a pool for changed (no longer empty PEBs).
442 * @ubi: UBI device object
443 * @ai: attach info object
444 * @pebs: an array of all PEB numbers in the to be scanned pool
445 * @pool_size: size of the pool (number of entries in @pebs)
446 * @max_sqnum: pointer to the maximal sequence number
447 * @free: list of PEBs which are most likely free (and go into @ai->free)
448 *
449 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
450 * < 0 indicates an internal error.
451 */
452static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
453 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
454 struct list_head *free)
455{
456 struct ubi_vid_hdr *vh;
457 struct ubi_ec_hdr *ech;
458 struct ubi_ainf_peb *new_aeb;
459 int i, pnum, err, ret = 0;
460
461 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
462 if (!ech)
463 return -ENOMEM;
464
465 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
466 if (!vh) {
467 kfree(ech);
468 return -ENOMEM;
469 }
470
471 dbg_bld("scanning fastmap pool: size = %i", pool_size);
472
473 /*
474 * Now scan all PEBs in the pool to find changes which have been made
475 * after the creation of the fastmap
476 */
477 for (i = 0; i < pool_size; i++) {
478 int scrub = 0;
479 int image_seq;
480
481 pnum = be32_to_cpu(pebs[i]);
482
483 if (ubi_io_is_bad(ubi, pnum)) {
484 ubi_err(ubi, "bad PEB in fastmap pool!");
485 ret = UBI_BAD_FASTMAP;
486 goto out;
487 }
488
489 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
490 if (err && err != UBI_IO_BITFLIPS) {
491 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
492 pnum, err);
493 ret = err > 0 ? UBI_BAD_FASTMAP : err;
494 goto out;
495 } else if (err == UBI_IO_BITFLIPS)
496 scrub = 1;
497
498 /*
499 * Older UBI implementations have image_seq set to zero, so
500 * we shouldn't fail if image_seq == 0.
501 */
502 image_seq = be32_to_cpu(ech->image_seq);
503
504 if (image_seq && (image_seq != ubi->image_seq)) {
505 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
506 be32_to_cpu(ech->image_seq), ubi->image_seq);
507 ret = UBI_BAD_FASTMAP;
508 goto out;
509 }
510
511 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
512 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
513 unsigned long long ec = be64_to_cpu(ech->ec);
514 unmap_peb(ai, pnum);
515 dbg_bld("Adding PEB to free: %i", pnum);
516 if (err == UBI_IO_FF_BITFLIPS)
517 add_aeb(ai, free, pnum, ec, 1);
518 else
519 add_aeb(ai, free, pnum, ec, 0);
520 continue;
521 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
522 dbg_bld("Found non empty PEB:%i in pool", pnum);
523
524 if (err == UBI_IO_BITFLIPS)
525 scrub = 1;
526
527 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
528 GFP_KERNEL);
529 if (!new_aeb) {
530 ret = -ENOMEM;
531 goto out;
532 }
533
534 new_aeb->ec = be64_to_cpu(ech->ec);
535 new_aeb->pnum = pnum;
536 new_aeb->lnum = be32_to_cpu(vh->lnum);
537 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
538 new_aeb->copy_flag = vh->copy_flag;
539 new_aeb->scrub = scrub;
540
541 if (*max_sqnum < new_aeb->sqnum)
542 *max_sqnum = new_aeb->sqnum;
543
544 err = process_pool_aeb(ubi, ai, vh, new_aeb);
545 if (err) {
546 ret = err > 0 ? UBI_BAD_FASTMAP : err;
547 goto out;
548 }
549 } else {
550 /* We are paranoid and fall back to scanning mode */
551 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
552 ret = err > 0 ? UBI_BAD_FASTMAP : err;
553 goto out;
554 }
555
556 }
557
558out:
559 ubi_free_vid_hdr(ubi, vh);
560 kfree(ech);
561 return ret;
562}
563
564/**
565 * count_fastmap_pebs - Counts the PEBs found by fastmap.
566 * @ai: The UBI attach info object
567 */
568static int count_fastmap_pebs(struct ubi_attach_info *ai)
569{
570 struct ubi_ainf_peb *aeb;
571 struct ubi_ainf_volume *av;
572 struct rb_node *rb1, *rb2;
573 int n = 0;
574
575 list_for_each_entry(aeb, &ai->erase, u.list)
576 n++;
577
578 list_for_each_entry(aeb, &ai->free, u.list)
579 n++;
580
581 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
582 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
583 n++;
584
585 return n;
586}
587
588/**
589 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
590 * @ubi: UBI device object
591 * @ai: UBI attach info object
592 * @fm: the fastmap to be attached
593 *
594 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
595 * < 0 indicates an internal error.
596 */
597static int ubi_attach_fastmap(struct ubi_device *ubi,
598 struct ubi_attach_info *ai,
599 struct ubi_fastmap_layout *fm)
600{
601 struct list_head used, free;
602 struct ubi_ainf_volume *av;
603 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
604 struct ubi_fm_sb *fmsb;
605 struct ubi_fm_hdr *fmhdr;
606 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
607 struct ubi_fm_ec *fmec;
608 struct ubi_fm_volhdr *fmvhdr;
609 struct ubi_fm_eba *fm_eba;
610 int ret, i, j, pool_size, wl_pool_size;
611 size_t fm_pos = 0, fm_size = ubi->fm_size;
612 unsigned long long max_sqnum = 0;
613 void *fm_raw = ubi->fm_buf;
614
615 INIT_LIST_HEAD(&used);
616 INIT_LIST_HEAD(&free);
617 ai->min_ec = UBI_MAX_ERASECOUNTER;
618
619 fmsb = (struct ubi_fm_sb *)(fm_raw);
620 ai->max_sqnum = fmsb->sqnum;
621 fm_pos += sizeof(struct ubi_fm_sb);
622 if (fm_pos >= fm_size)
623 goto fail_bad;
624
625 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
626 fm_pos += sizeof(*fmhdr);
627 if (fm_pos >= fm_size)
628 goto fail_bad;
629
630 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
631 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
632 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
633 goto fail_bad;
634 }
635
636 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
637 fm_pos += sizeof(*fmpl);
638 if (fm_pos >= fm_size)
639 goto fail_bad;
640 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
641 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
642 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
643 goto fail_bad;
644 }
645
646 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
647 fm_pos += sizeof(*fmpl_wl);
648 if (fm_pos >= fm_size)
649 goto fail_bad;
650 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
651 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
652 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
653 goto fail_bad;
654 }
655
656 pool_size = be16_to_cpu(fmpl->size);
657 wl_pool_size = be16_to_cpu(fmpl_wl->size);
658 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
659 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
660
661 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
662 ubi_err(ubi, "bad pool size: %i", pool_size);
663 goto fail_bad;
664 }
665
666 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
667 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
668 goto fail_bad;
669 }
670
671
672 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
673 fm->max_pool_size < 0) {
674 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
675 goto fail_bad;
676 }
677
678 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
679 fm->max_wl_pool_size < 0) {
680 ubi_err(ubi, "bad maximal WL pool size: %i",
681 fm->max_wl_pool_size);
682 goto fail_bad;
683 }
684
685 /* read EC values from free list */
686 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
687 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
688 fm_pos += sizeof(*fmec);
689 if (fm_pos >= fm_size)
690 goto fail_bad;
691
692 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
693 be32_to_cpu(fmec->ec), 0);
694 }
695
696 /* read EC values from used list */
697 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
698 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
699 fm_pos += sizeof(*fmec);
700 if (fm_pos >= fm_size)
701 goto fail_bad;
702
703 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
704 be32_to_cpu(fmec->ec), 0);
705 }
706
707 /* read EC values from scrub list */
708 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
709 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fmec);
711 if (fm_pos >= fm_size)
712 goto fail_bad;
713
714 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
715 be32_to_cpu(fmec->ec), 1);
716 }
717
718 /* read EC values from erase list */
719 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
720 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
721 fm_pos += sizeof(*fmec);
722 if (fm_pos >= fm_size)
723 goto fail_bad;
724
725 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
726 be32_to_cpu(fmec->ec), 1);
727 }
728
729 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
730 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
731
732 /* Iterate over all volumes and read their EBA table */
733 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
734 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
735 fm_pos += sizeof(*fmvhdr);
736 if (fm_pos >= fm_size)
737 goto fail_bad;
738
739 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
740 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
741 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
742 goto fail_bad;
743 }
744
745 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
746 be32_to_cpu(fmvhdr->used_ebs),
747 be32_to_cpu(fmvhdr->data_pad),
748 fmvhdr->vol_type,
749 be32_to_cpu(fmvhdr->last_eb_bytes));
750
751 if (!av)
752 goto fail_bad;
753 if (PTR_ERR(av) == -EINVAL) {
754 ubi_err(ubi, "volume (ID %i) already exists",
755 fmvhdr->vol_id);
756 goto fail_bad;
757 }
758
759 ai->vols_found++;
760 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
761 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
762
763 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
764 fm_pos += sizeof(*fm_eba);
765 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
766 if (fm_pos >= fm_size)
767 goto fail_bad;
768
769 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
770 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
771 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
772 goto fail_bad;
773 }
774
775 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
776 int pnum = be32_to_cpu(fm_eba->pnum[j]);
777
778 if (pnum < 0)
779 continue;
780
781 aeb = NULL;
782 list_for_each_entry(tmp_aeb, &used, u.list) {
783 if (tmp_aeb->pnum == pnum) {
784 aeb = tmp_aeb;
785 break;
786 }
787 }
788
789 if (!aeb) {
790 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
791 goto fail_bad;
792 }
793
794 aeb->lnum = j;
795
796 if (av->highest_lnum <= aeb->lnum)
797 av->highest_lnum = aeb->lnum;
798
799 assign_aeb_to_av(ai, aeb, av);
800
801 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
802 aeb->pnum, aeb->lnum, av->vol_id);
803 }
804 }
805
806 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
807 if (ret)
808 goto fail;
809
810 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
811 if (ret)
812 goto fail;
813
814 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum;
816
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
818 list_move_tail(&tmp_aeb->u.list, &ai->free);
819
820 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
821 list_move_tail(&tmp_aeb->u.list, &ai->erase);
822
823 ubi_assert(list_empty(&free));
824
825 /*
826 * If fastmap is leaking PEBs (must not happen), raise a
827 * fat warning and fall back to scanning mode.
828 * We do this here because in ubi_wl_init() it's too late
829 * and we cannot fall back to scanning.
830 */
831 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
832 ai->bad_peb_count - fm->used_blocks))
833 goto fail_bad;
834
835 return 0;
836
837fail_bad:
838 ret = UBI_BAD_FASTMAP;
839fail:
840 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
841 list_del(&tmp_aeb->u.list);
842 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
843 }
844 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
845 list_del(&tmp_aeb->u.list);
846 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
847 }
848
849 return ret;
850}
851
852/**
853 * ubi_scan_fastmap - scan the fastmap.
854 * @ubi: UBI device object
855 * @ai: UBI attach info to be filled
856 * @fm_anchor: The fastmap starts at this PEB
857 *
858 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
859 * UBI_BAD_FASTMAP if one was found but is not usable.
860 * < 0 indicates an internal error.
861 */
862int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
863 int fm_anchor)
864{
865 struct ubi_fm_sb *fmsb, *fmsb2;
866 struct ubi_vid_hdr *vh;
867 struct ubi_ec_hdr *ech;
868 struct ubi_fastmap_layout *fm;
869 int i, used_blocks, pnum, ret = 0;
870 size_t fm_size;
871 __be32 crc, tmp_crc;
872 unsigned long long sqnum = 0;
873
874 down_write(&ubi->fm_protect);
875 memset(ubi->fm_buf, 0, ubi->fm_size);
876
877 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
878 if (!fmsb) {
879 ret = -ENOMEM;
880 goto out;
881 }
882
883 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
884 if (!fm) {
885 ret = -ENOMEM;
886 kfree(fmsb);
887 goto out;
888 }
889
890 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
891 if (ret && ret != UBI_IO_BITFLIPS)
892 goto free_fm_sb;
893 else if (ret == UBI_IO_BITFLIPS)
894 fm->to_be_tortured[0] = 1;
895
896 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
897 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
898 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
899 ret = UBI_BAD_FASTMAP;
900 goto free_fm_sb;
901 }
902
903 if (fmsb->version != UBI_FM_FMT_VERSION) {
904 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
905 fmsb->version, UBI_FM_FMT_VERSION);
906 ret = UBI_BAD_FASTMAP;
907 goto free_fm_sb;
908 }
909
910 used_blocks = be32_to_cpu(fmsb->used_blocks);
911 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
912 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
913 used_blocks);
914 ret = UBI_BAD_FASTMAP;
915 goto free_fm_sb;
916 }
917
918 fm_size = ubi->leb_size * used_blocks;
919 if (fm_size != ubi->fm_size) {
920 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
921 fm_size, ubi->fm_size);
922 ret = UBI_BAD_FASTMAP;
923 goto free_fm_sb;
924 }
925
926 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
927 if (!ech) {
928 ret = -ENOMEM;
929 goto free_fm_sb;
930 }
931
932 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
933 if (!vh) {
934 ret = -ENOMEM;
935 goto free_hdr;
936 }
937
938 for (i = 0; i < used_blocks; i++) {
939 int image_seq;
940
941 pnum = be32_to_cpu(fmsb->block_loc[i]);
942
943 if (ubi_io_is_bad(ubi, pnum)) {
944 ret = UBI_BAD_FASTMAP;
945 goto free_hdr;
946 }
947
948 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
949 if (ret && ret != UBI_IO_BITFLIPS) {
950 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
951 i, pnum);
952 if (ret > 0)
953 ret = UBI_BAD_FASTMAP;
954 goto free_hdr;
955 } else if (ret == UBI_IO_BITFLIPS)
956 fm->to_be_tortured[i] = 1;
957
958 image_seq = be32_to_cpu(ech->image_seq);
959 if (!ubi->image_seq)
960 ubi->image_seq = image_seq;
961
962 /*
963 * Older UBI implementations have image_seq set to zero, so
964 * we shouldn't fail if image_seq == 0.
965 */
966 if (image_seq && (image_seq != ubi->image_seq)) {
967 ubi_err(ubi, "wrong image seq:%d instead of %d",
968 be32_to_cpu(ech->image_seq), ubi->image_seq);
969 ret = UBI_BAD_FASTMAP;
970 goto free_hdr;
971 }
972
973 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
974 if (ret && ret != UBI_IO_BITFLIPS) {
975 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
976 i, pnum);
977 goto free_hdr;
978 }
979
980 if (i == 0) {
981 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
982 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
983 be32_to_cpu(vh->vol_id),
984 UBI_FM_SB_VOLUME_ID);
985 ret = UBI_BAD_FASTMAP;
986 goto free_hdr;
987 }
988 } else {
989 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
990 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
991 be32_to_cpu(vh->vol_id),
992 UBI_FM_DATA_VOLUME_ID);
993 ret = UBI_BAD_FASTMAP;
994 goto free_hdr;
995 }
996 }
997
998 if (sqnum < be64_to_cpu(vh->sqnum))
999 sqnum = be64_to_cpu(vh->sqnum);
1000
1001 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1002 ubi->leb_start, ubi->leb_size);
1003 if (ret && ret != UBI_IO_BITFLIPS) {
1004 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1005 "err: %i)", i, pnum, ret);
1006 goto free_hdr;
1007 }
1008 }
1009
1010 kfree(fmsb);
1011 fmsb = NULL;
1012
1013 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1014 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1015 fmsb2->data_crc = 0;
1016 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1017 if (crc != tmp_crc) {
1018 ubi_err(ubi, "fastmap data CRC is invalid");
1019 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1020 tmp_crc, crc);
1021 ret = UBI_BAD_FASTMAP;
1022 goto free_hdr;
1023 }
1024
1025 fmsb2->sqnum = sqnum;
1026
1027 fm->used_blocks = used_blocks;
1028
1029 ret = ubi_attach_fastmap(ubi, ai, fm);
1030 if (ret) {
1031 if (ret > 0)
1032 ret = UBI_BAD_FASTMAP;
1033 goto free_hdr;
1034 }
1035
1036 for (i = 0; i < used_blocks; i++) {
1037 struct ubi_wl_entry *e;
1038
1039 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1040 if (!e) {
1041 while (i--)
1042 kfree(fm->e[i]);
1043
1044 ret = -ENOMEM;
1045 goto free_hdr;
1046 }
1047
1048 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1049 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1050 fm->e[i] = e;
1051 }
1052
1053 ubi->fm = fm;
1054 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1055 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1056 ubi_msg(ubi, "attached by fastmap");
1057 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1058 ubi_msg(ubi, "fastmap WL pool size: %d",
1059 ubi->fm_wl_pool.max_size);
1060 ubi->fm_disabled = 0;
1061
1062 ubi_free_vid_hdr(ubi, vh);
1063 kfree(ech);
1064out:
1065 up_write(&ubi->fm_protect);
1066 if (ret == UBI_BAD_FASTMAP)
1067 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1068 return ret;
1069
1070free_hdr:
1071 ubi_free_vid_hdr(ubi, vh);
1072 kfree(ech);
1073free_fm_sb:
1074 kfree(fmsb);
1075 kfree(fm);
1076 goto out;
1077}
1078
1079/**
1080 * ubi_write_fastmap - writes a fastmap.
1081 * @ubi: UBI device object
1082 * @new_fm: the to be written fastmap
1083 *
1084 * Returns 0 on success, < 0 indicates an internal error.
1085 */
1086static int ubi_write_fastmap(struct ubi_device *ubi,
1087 struct ubi_fastmap_layout *new_fm)
1088{
1089 size_t fm_pos = 0;
1090 void *fm_raw;
1091 struct ubi_fm_sb *fmsb;
1092 struct ubi_fm_hdr *fmh;
1093 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1094 struct ubi_fm_ec *fec;
1095 struct ubi_fm_volhdr *fvh;
1096 struct ubi_fm_eba *feba;
1097 struct ubi_wl_entry *wl_e;
1098 struct ubi_volume *vol;
1099 struct ubi_vid_hdr *avhdr, *dvhdr;
1100 struct ubi_work *ubi_wrk;
1101 struct rb_node *tmp_rb;
1102 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1103 int scrub_peb_count, erase_peb_count;
1104 int *seen_pebs = NULL;
1105
1106 fm_raw = ubi->fm_buf;
1107 memset(ubi->fm_buf, 0, ubi->fm_size);
1108
1109 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1110 if (!avhdr) {
1111 ret = -ENOMEM;
1112 goto out;
1113 }
1114
1115 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1116 if (!dvhdr) {
1117 ret = -ENOMEM;
1118 goto out_kfree;
1119 }
1120
1121 seen_pebs = init_seen(ubi);
1122 if (IS_ERR(seen_pebs)) {
1123 ret = PTR_ERR(seen_pebs);
1124 goto out_kfree;
1125 }
1126
1127 spin_lock(&ubi->volumes_lock);
1128 spin_lock(&ubi->wl_lock);
1129
1130 fmsb = (struct ubi_fm_sb *)fm_raw;
1131 fm_pos += sizeof(*fmsb);
1132 ubi_assert(fm_pos <= ubi->fm_size);
1133
1134 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1135 fm_pos += sizeof(*fmh);
1136 ubi_assert(fm_pos <= ubi->fm_size);
1137
1138 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1139 fmsb->version = UBI_FM_FMT_VERSION;
1140 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1141 /* the max sqnum will be filled in while *reading* the fastmap */
1142 fmsb->sqnum = 0;
1143
1144 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1145 free_peb_count = 0;
1146 used_peb_count = 0;
1147 scrub_peb_count = 0;
1148 erase_peb_count = 0;
1149 vol_count = 0;
1150
1151 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1152 fm_pos += sizeof(*fmpl);
1153 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1154 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1155 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1156
1157 for (i = 0; i < ubi->fm_pool.size; i++) {
1158 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1159 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1160 }
1161
1162 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1163 fm_pos += sizeof(*fmpl_wl);
1164 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1165 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1166 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1167
1168 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1169 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1170 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1171 }
1172
1173 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1174 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1175
1176 fec->pnum = cpu_to_be32(wl_e->pnum);
1177 set_seen(ubi, wl_e->pnum, seen_pebs);
1178 fec->ec = cpu_to_be32(wl_e->ec);
1179
1180 free_peb_count++;
1181 fm_pos += sizeof(*fec);
1182 ubi_assert(fm_pos <= ubi->fm_size);
1183 }
1184 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1185
1186 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1187 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1188
1189 fec->pnum = cpu_to_be32(wl_e->pnum);
1190 set_seen(ubi, wl_e->pnum, seen_pebs);
1191 fec->ec = cpu_to_be32(wl_e->ec);
1192
1193 used_peb_count++;
1194 fm_pos += sizeof(*fec);
1195 ubi_assert(fm_pos <= ubi->fm_size);
1196 }
1197
1198 ubi_for_each_protected_peb(ubi, i, wl_e) {
1199 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1200
1201 fec->pnum = cpu_to_be32(wl_e->pnum);
1202 set_seen(ubi, wl_e->pnum, seen_pebs);
1203 fec->ec = cpu_to_be32(wl_e->ec);
1204
1205 used_peb_count++;
1206 fm_pos += sizeof(*fec);
1207 ubi_assert(fm_pos <= ubi->fm_size);
1208 }
1209 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1210
1211 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1212 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1213
1214 fec->pnum = cpu_to_be32(wl_e->pnum);
1215 set_seen(ubi, wl_e->pnum, seen_pebs);
1216 fec->ec = cpu_to_be32(wl_e->ec);
1217
1218 scrub_peb_count++;
1219 fm_pos += sizeof(*fec);
1220 ubi_assert(fm_pos <= ubi->fm_size);
1221 }
1222 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1223
1224
1225 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1226 if (ubi_is_erase_work(ubi_wrk)) {
1227 wl_e = ubi_wrk->e;
1228 ubi_assert(wl_e);
1229
1230 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1231
1232 fec->pnum = cpu_to_be32(wl_e->pnum);
1233 set_seen(ubi, wl_e->pnum, seen_pebs);
1234 fec->ec = cpu_to_be32(wl_e->ec);
1235
1236 erase_peb_count++;
1237 fm_pos += sizeof(*fec);
1238 ubi_assert(fm_pos <= ubi->fm_size);
1239 }
1240 }
1241 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1242
1243 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1244 vol = ubi->volumes[i];
1245
1246 if (!vol)
1247 continue;
1248
1249 vol_count++;
1250
1251 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1252 fm_pos += sizeof(*fvh);
1253 ubi_assert(fm_pos <= ubi->fm_size);
1254
1255 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1256 fvh->vol_id = cpu_to_be32(vol->vol_id);
1257 fvh->vol_type = vol->vol_type;
1258 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1259 fvh->data_pad = cpu_to_be32(vol->data_pad);
1260 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1261
1262 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1263 vol->vol_type == UBI_STATIC_VOLUME);
1264
1265 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1266 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1267 ubi_assert(fm_pos <= ubi->fm_size);
1268
1269 for (j = 0; j < vol->reserved_pebs; j++)
1270 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1271
1272 feba->reserved_pebs = cpu_to_be32(j);
1273 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1274 }
1275 fmh->vol_count = cpu_to_be32(vol_count);
1276 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1277
1278 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1279 avhdr->lnum = 0;
1280
1281 spin_unlock(&ubi->wl_lock);
1282 spin_unlock(&ubi->volumes_lock);
1283
1284 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1285 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1286 if (ret) {
1287 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1288 goto out_kfree;
1289 }
1290
1291 for (i = 0; i < new_fm->used_blocks; i++) {
1292 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1293 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1294 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1295 }
1296
1297 fmsb->data_crc = 0;
1298 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1299 ubi->fm_size));
1300
1301 for (i = 1; i < new_fm->used_blocks; i++) {
1302 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1303 dvhdr->lnum = cpu_to_be32(i);
1304 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1305 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1306 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1307 if (ret) {
1308 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1309 new_fm->e[i]->pnum);
1310 goto out_kfree;
1311 }
1312 }
1313
1314 for (i = 0; i < new_fm->used_blocks; i++) {
1315 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1316 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1317 if (ret) {
1318 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1319 new_fm->e[i]->pnum);
1320 goto out_kfree;
1321 }
1322 }
1323
1324 ubi_assert(new_fm);
1325 ubi->fm = new_fm;
1326
1327 ret = self_check_seen(ubi, seen_pebs);
1328 dbg_bld("fastmap written!");
1329
1330out_kfree:
1331 ubi_free_vid_hdr(ubi, avhdr);
1332 ubi_free_vid_hdr(ubi, dvhdr);
1333 free_seen(seen_pebs);
1334out:
1335 return ret;
1336}
1337
1338/**
1339 * erase_block - Manually erase a PEB.
1340 * @ubi: UBI device object
1341 * @pnum: PEB to be erased
1342 *
1343 * Returns the new EC value on success, < 0 indicates an internal error.
1344 */
1345static int erase_block(struct ubi_device *ubi, int pnum)
1346{
1347 int ret;
1348 struct ubi_ec_hdr *ec_hdr;
1349 long long ec;
1350
1351 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1352 if (!ec_hdr)
1353 return -ENOMEM;
1354
1355 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1356 if (ret < 0)
1357 goto out;
1358 else if (ret && ret != UBI_IO_BITFLIPS) {
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
1363 ret = ubi_io_sync_erase(ubi, pnum, 0);
1364 if (ret < 0)
1365 goto out;
1366
1367 ec = be64_to_cpu(ec_hdr->ec);
1368 ec += ret;
1369 if (ec > UBI_MAX_ERASECOUNTER) {
1370 ret = -EINVAL;
1371 goto out;
1372 }
1373
1374 ec_hdr->ec = cpu_to_be64(ec);
1375 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1376 if (ret < 0)
1377 goto out;
1378
1379 ret = ec;
1380out:
1381 kfree(ec_hdr);
1382 return ret;
1383}
1384
1385/**
1386 * invalidate_fastmap - destroys a fastmap.
1387 * @ubi: UBI device object
1388 *
1389 * This function ensures that upon next UBI attach a full scan
1390 * is issued. We need this if UBI is about to write a new fastmap
1391 * but is unable to do so. In this case we have two options:
1392 * a) Make sure that the current fastmap will not be usued upon
1393 * attach time and contine or b) fall back to RO mode to have the
1394 * current fastmap in a valid state.
1395 * Returns 0 on success, < 0 indicates an internal error.
1396 */
1397static int invalidate_fastmap(struct ubi_device *ubi)
1398{
1399 int ret;
1400 struct ubi_fastmap_layout *fm;
1401 struct ubi_wl_entry *e;
1402 struct ubi_vid_hdr *vh = NULL;
1403
1404 if (!ubi->fm)
1405 return 0;
1406
1407 ubi->fm = NULL;
1408
1409 ret = -ENOMEM;
1410 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1411 if (!fm)
1412 goto out;
1413
1414 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1415 if (!vh)
1416 goto out_free_fm;
1417
1418 ret = -ENOSPC;
1419 e = ubi_wl_get_fm_peb(ubi, 1);
1420 if (!e)
1421 goto out_free_fm;
1422
1423 /*
1424 * Create fake fastmap such that UBI will fall back
1425 * to scanning mode.
1426 */
1427 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1428 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1429 if (ret < 0) {
1430 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1431 goto out_free_fm;
1432 }
1433
1434 fm->used_blocks = 1;
1435 fm->e[0] = e;
1436
1437 ubi->fm = fm;
1438
1439out:
1440 ubi_free_vid_hdr(ubi, vh);
1441 return ret;
1442
1443out_free_fm:
1444 kfree(fm);
1445 goto out;
1446}
1447
1448/**
1449 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1450 * WL sub-system.
1451 * @ubi: UBI device object
1452 * @fm: fastmap layout object
1453 */
1454static void return_fm_pebs(struct ubi_device *ubi,
1455 struct ubi_fastmap_layout *fm)
1456{
1457 int i;
1458
1459 if (!fm)
1460 return;
1461
1462 for (i = 0; i < fm->used_blocks; i++) {
1463 if (fm->e[i]) {
1464 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1465 fm->to_be_tortured[i]);
1466 fm->e[i] = NULL;
1467 }
1468 }
1469}
1470
1471/**
1472 * ubi_update_fastmap - will be called by UBI if a volume changes or
1473 * a fastmap pool becomes full.
1474 * @ubi: UBI device object
1475 *
1476 * Returns 0 on success, < 0 indicates an internal error.
1477 */
1478int ubi_update_fastmap(struct ubi_device *ubi)
1479{
1480 int ret, i, j;
1481 struct ubi_fastmap_layout *new_fm, *old_fm;
1482 struct ubi_wl_entry *tmp_e;
1483
1484 down_write(&ubi->fm_protect);
1485
1486 ubi_refill_pools(ubi);
1487
1488 if (ubi->ro_mode || ubi->fm_disabled) {
1489 up_write(&ubi->fm_protect);
1490 return 0;
1491 }
1492
1493 ret = ubi_ensure_anchor_pebs(ubi);
1494 if (ret) {
1495 up_write(&ubi->fm_protect);
1496 return ret;
1497 }
1498
1499 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1500 if (!new_fm) {
1501 up_write(&ubi->fm_protect);
1502 return -ENOMEM;
1503 }
1504
1505 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1506 old_fm = ubi->fm;
1507 ubi->fm = NULL;
1508
1509 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1510 ubi_err(ubi, "fastmap too large");
1511 ret = -ENOSPC;
1512 goto err;
1513 }
1514
1515 for (i = 1; i < new_fm->used_blocks; i++) {
1516 spin_lock(&ubi->wl_lock);
1517 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1518 spin_unlock(&ubi->wl_lock);
1519
1520 if (!tmp_e) {
1521 if (old_fm && old_fm->e[i]) {
1522 ret = erase_block(ubi, old_fm->e[i]->pnum);
1523 if (ret < 0) {
1524 ubi_err(ubi, "could not erase old fastmap PEB");
1525
1526 for (j = 1; j < i; j++) {
1527 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1528 j, 0);
1529 new_fm->e[j] = NULL;
1530 }
1531 goto err;
1532 }
1533 new_fm->e[i] = old_fm->e[i];
1534 old_fm->e[i] = NULL;
1535 } else {
1536 ubi_err(ubi, "could not get any free erase block");
1537
1538 for (j = 1; j < i; j++) {
1539 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1540 new_fm->e[j] = NULL;
1541 }
1542
1543 ret = -ENOSPC;
1544 goto err;
1545 }
1546 } else {
1547 new_fm->e[i] = tmp_e;
1548
1549 if (old_fm && old_fm->e[i]) {
1550 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1551 old_fm->to_be_tortured[i]);
1552 old_fm->e[i] = NULL;
1553 }
1554 }
1555 }
1556
1557 /* Old fastmap is larger than the new one */
1558 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1559 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1560 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1561 old_fm->to_be_tortured[i]);
1562 old_fm->e[i] = NULL;
1563 }
1564 }
1565
1566 spin_lock(&ubi->wl_lock);
1567 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1568 spin_unlock(&ubi->wl_lock);
1569
1570 if (old_fm) {
1571 /* no fresh anchor PEB was found, reuse the old one */
1572 if (!tmp_e) {
1573 ret = erase_block(ubi, old_fm->e[0]->pnum);
1574 if (ret < 0) {
1575 ubi_err(ubi, "could not erase old anchor PEB");
1576
1577 for (i = 1; i < new_fm->used_blocks; i++) {
1578 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1579 i, 0);
1580 new_fm->e[i] = NULL;
1581 }
1582 goto err;
1583 }
1584 new_fm->e[0] = old_fm->e[0];
1585 new_fm->e[0]->ec = ret;
1586 old_fm->e[0] = NULL;
1587 } else {
1588 /* we've got a new anchor PEB, return the old one */
1589 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1590 old_fm->to_be_tortured[0]);
1591 new_fm->e[0] = tmp_e;
1592 old_fm->e[0] = NULL;
1593 }
1594 } else {
1595 if (!tmp_e) {
1596 ubi_err(ubi, "could not find any anchor PEB");
1597
1598 for (i = 1; i < new_fm->used_blocks; i++) {
1599 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1600 new_fm->e[i] = NULL;
1601 }
1602
1603 ret = -ENOSPC;
1604 goto err;
1605 }
1606 new_fm->e[0] = tmp_e;
1607 }
1608
1609 down_write(&ubi->work_sem);
1610 down_write(&ubi->fm_eba_sem);
1611 ret = ubi_write_fastmap(ubi, new_fm);
1612 up_write(&ubi->fm_eba_sem);
1613 up_write(&ubi->work_sem);
1614
1615 if (ret)
1616 goto err;
1617
1618out_unlock:
1619 up_write(&ubi->fm_protect);
1620 kfree(old_fm);
1621 return ret;
1622
1623err:
1624 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1625
1626 ret = invalidate_fastmap(ubi);
1627 if (ret < 0) {
1628 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1629 ubi_ro_mode(ubi);
1630 } else {
1631 return_fm_pebs(ubi, old_fm);
1632 return_fm_pebs(ubi, new_fm);
1633 ret = 0;
1634 }
1635
1636 kfree(new_fm);
1637 goto out_unlock;
1638}