Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8/**
9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10 * @wrk: the work description object
11 */
12static void update_fastmap_work_fn(struct work_struct *wrk)
13{
14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15
16 ubi_update_fastmap(ubi);
17 spin_lock(&ubi->wl_lock);
18 ubi->fm_work_scheduled = 0;
19 spin_unlock(&ubi->wl_lock);
20}
21
22/**
23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24 * @root: the RB-tree where to look for
25 */
26static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27{
28 struct rb_node *p;
29 struct ubi_wl_entry *e, *victim = NULL;
30 int max_ec = UBI_MAX_ERASECOUNTER;
31
32 ubi_rb_for_each_entry(p, e, root, u.rb) {
33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 victim = e;
35 max_ec = e->ec;
36 }
37 }
38
39 return victim;
40}
41
42static inline void return_unused_peb(struct ubi_device *ubi,
43 struct ubi_wl_entry *e)
44{
45 wl_tree_add(e, &ubi->free);
46 ubi->free_count++;
47}
48
49/**
50 * return_unused_pool_pebs - returns unused PEB to the free tree.
51 * @ubi: UBI device description object
52 * @pool: fastmap pool description object
53 */
54static void return_unused_pool_pebs(struct ubi_device *ubi,
55 struct ubi_fm_pool *pool)
56{
57 int i;
58 struct ubi_wl_entry *e;
59
60 for (i = pool->used; i < pool->size; i++) {
61 e = ubi->lookuptbl[pool->pebs[i]];
62 return_unused_peb(ubi, e);
63 }
64}
65
66/**
67 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
68 * @ubi: UBI device description object
69 * @anchor: This PEB will be used as anchor PEB by fastmap
70 *
71 * The function returns a physical erase block with a given maximal number
72 * and removes it from the wl subsystem.
73 * Must be called with wl_lock held!
74 */
75struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
76{
77 struct ubi_wl_entry *e = NULL;
78
79 if (!ubi->free.rb_node)
80 goto out;
81
82 if (anchor)
83 e = find_anchor_wl_entry(&ubi->free);
84 else
85 e = find_mean_wl_entry(ubi, &ubi->free);
86
87 if (!e)
88 goto out;
89
90 self_check_in_wl_tree(ubi, e, &ubi->free);
91
92 /* remove it from the free list,
93 * the wl subsystem does no longer know this erase block */
94 rb_erase(&e->u.rb, &ubi->free);
95 ubi->free_count--;
96out:
97 return e;
98}
99
100/*
101 * wait_free_pebs_for_pool - wait until there enough free pebs
102 * @ubi: UBI device description object
103 *
104 * Wait and execute do_work until there are enough free pebs, fill pool
105 * as much as we can. This will reduce pool refilling times, which can
106 * reduce the fastmap updating frequency.
107 */
108static void wait_free_pebs_for_pool(struct ubi_device *ubi)
109{
110 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
111 struct ubi_fm_pool *pool = &ubi->fm_pool;
112 int free, expect_free, executed;
113 /*
114 * There are at least following free pebs which reserved by UBI:
115 * 1. WL_RESERVED_PEBS[1]
116 * 2. EBA_RESERVED_PEBS[1]
117 * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
118 * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
119 */
120 int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
121 ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
122
123 do {
124 spin_lock(&ubi->wl_lock);
125 free = ubi->free_count;
126 free += pool->size - pool->used + wl_pool->size - wl_pool->used;
127 expect_free = reserved + ubi->beb_rsvd_pebs;
128 spin_unlock(&ubi->wl_lock);
129
130 /*
131 * Break out if there are no works or work is executed failure,
132 * given the fact that erase_worker will schedule itself when
133 * -EBUSY is returned from mtd layer caused by system shutdown.
134 */
135 if (do_work(ubi, &executed) || !executed)
136 break;
137 } while (free < expect_free);
138}
139
140/*
141 * left_free_count - returns the number of free pebs to fill fm pools
142 * @ubi: UBI device description object
143 *
144 * This helper function returns the number of free pebs (deducted
145 * by fastmap pebs) to fill fm_pool and fm_wl_pool.
146 */
147static int left_free_count(struct ubi_device *ubi)
148{
149 int fm_used = 0; // fastmap non anchor pebs.
150
151 if (!ubi->free.rb_node)
152 return 0;
153
154 if (!ubi->ro_mode && !ubi->fm_disabled)
155 fm_used = ubi->fm_size / ubi->leb_size - 1;
156
157 return ubi->free_count - fm_used;
158}
159
160/*
161 * can_fill_pools - whether free PEBs will be left after filling pools
162 * @ubi: UBI device description object
163 * @free: current number of free PEBs
164 *
165 * Return %1 if there are still left free PEBs after filling pools,
166 * otherwise %0 is returned.
167 */
168static int can_fill_pools(struct ubi_device *ubi, int free)
169{
170 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
171 struct ubi_fm_pool *pool = &ubi->fm_pool;
172 int pool_need = pool->max_size - pool->size +
173 wl_pool->max_size - wl_pool->size;
174
175 if (free - pool_need < 1)
176 return 0;
177
178 return 1;
179}
180
181/**
182 * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
183 * @ubi: UBI device description object
184 */
185void ubi_refill_pools_and_lock(struct ubi_device *ubi)
186{
187 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
188 struct ubi_fm_pool *pool = &ubi->fm_pool;
189 struct ubi_wl_entry *e;
190 int enough;
191
192 if (!ubi->ro_mode && !ubi->fm_disabled)
193 wait_free_pebs_for_pool(ubi);
194
195 down_write(&ubi->fm_protect);
196 down_write(&ubi->work_sem);
197 down_write(&ubi->fm_eba_sem);
198
199 spin_lock(&ubi->wl_lock);
200
201 return_unused_pool_pebs(ubi, wl_pool);
202 return_unused_pool_pebs(ubi, pool);
203
204 wl_pool->size = 0;
205 pool->size = 0;
206
207 if (ubi->fm_anchor) {
208 wl_tree_add(ubi->fm_anchor, &ubi->free);
209 ubi->free_count++;
210 ubi->fm_anchor = NULL;
211 }
212
213 if (!ubi->fm_disabled)
214 /*
215 * All available PEBs are in ubi->free, now is the time to get
216 * the best anchor PEBs.
217 */
218 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
219
220 for (;;) {
221 enough = 0;
222 if (pool->size < pool->max_size) {
223 if (left_free_count(ubi) <= 0)
224 break;
225
226 e = wl_get_wle(ubi);
227 if (!e)
228 break;
229
230 pool->pebs[pool->size] = e->pnum;
231 pool->size++;
232 } else
233 enough++;
234
235 if (wl_pool->size < wl_pool->max_size) {
236 int left_free = left_free_count(ubi);
237
238 if (left_free <= 0)
239 break;
240
241 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
242 !can_fill_pools(ubi, left_free));
243 self_check_in_wl_tree(ubi, e, &ubi->free);
244 rb_erase(&e->u.rb, &ubi->free);
245 ubi->free_count--;
246
247 wl_pool->pebs[wl_pool->size] = e->pnum;
248 wl_pool->size++;
249 } else
250 enough++;
251
252 if (enough == 2)
253 break;
254 }
255
256 wl_pool->used = 0;
257 pool->used = 0;
258
259 spin_unlock(&ubi->wl_lock);
260}
261
262/**
263 * produce_free_peb - produce a free physical eraseblock.
264 * @ubi: UBI device description object
265 *
266 * This function tries to make a free PEB by means of synchronous execution of
267 * pending works. This may be needed if, for example the background thread is
268 * disabled. Returns zero in case of success and a negative error code in case
269 * of failure.
270 */
271static int produce_free_peb(struct ubi_device *ubi)
272{
273 int err;
274
275 while (!ubi->free.rb_node && ubi->works_count) {
276 dbg_wl("do one work synchronously");
277 err = do_work(ubi, NULL);
278
279 if (err)
280 return err;
281 }
282
283 return 0;
284}
285
286/**
287 * ubi_wl_get_peb - get a physical eraseblock.
288 * @ubi: UBI device description object
289 *
290 * This function returns a physical eraseblock in case of success and a
291 * negative error code in case of failure.
292 * Returns with ubi->fm_eba_sem held in read mode!
293 */
294int ubi_wl_get_peb(struct ubi_device *ubi)
295{
296 int ret, attempts = 0;
297 struct ubi_fm_pool *pool = &ubi->fm_pool;
298 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
299
300again:
301 down_read(&ubi->fm_eba_sem);
302 spin_lock(&ubi->wl_lock);
303
304 /* We check here also for the WL pool because at this point we can
305 * refill the WL pool synchronous. */
306 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
307 spin_unlock(&ubi->wl_lock);
308 up_read(&ubi->fm_eba_sem);
309 ret = ubi_update_fastmap(ubi);
310 if (ret) {
311 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
312 down_read(&ubi->fm_eba_sem);
313 return -ENOSPC;
314 }
315 down_read(&ubi->fm_eba_sem);
316 spin_lock(&ubi->wl_lock);
317 }
318
319 if (pool->used == pool->size) {
320 spin_unlock(&ubi->wl_lock);
321 attempts++;
322 if (attempts == 10) {
323 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
324 ret = -ENOSPC;
325 goto out;
326 }
327 up_read(&ubi->fm_eba_sem);
328 ret = produce_free_peb(ubi);
329 if (ret < 0) {
330 down_read(&ubi->fm_eba_sem);
331 goto out;
332 }
333 goto again;
334 }
335
336 ubi_assert(pool->used < pool->size);
337 ret = pool->pebs[pool->used++];
338 prot_queue_add(ubi, ubi->lookuptbl[ret]);
339 spin_unlock(&ubi->wl_lock);
340out:
341 return ret;
342}
343
344/**
345 * next_peb_for_wl - returns next PEB to be used internally by the
346 * WL sub-system.
347 *
348 * @ubi: UBI device description object
349 */
350static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
351{
352 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
353 int pnum;
354
355 if (pool->used == pool->size)
356 return NULL;
357
358 pnum = pool->pebs[pool->used];
359 return ubi->lookuptbl[pnum];
360}
361
362/**
363 * need_wear_leveling - checks whether to trigger a wear leveling work.
364 * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
365 * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
366 * 'wl_pool' by ubi_refill_pools().
367 *
368 * @ubi: UBI device description object
369 */
370static bool need_wear_leveling(struct ubi_device *ubi)
371{
372 int ec;
373 struct ubi_wl_entry *e;
374
375 if (!ubi->used.rb_node)
376 return false;
377
378 e = next_peb_for_wl(ubi);
379 if (!e) {
380 if (!ubi->free.rb_node)
381 return false;
382 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
383 ec = e->ec;
384 } else {
385 ec = e->ec;
386 if (ubi->free.rb_node) {
387 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
388 ec = max(ec, e->ec);
389 }
390 }
391 e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
392
393 return ec - e->ec >= UBI_WL_THRESHOLD;
394}
395
396/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
397 *
398 * @ubi: UBI device description object
399 */
400static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
401{
402 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
403 int pnum;
404
405 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
406
407 if (pool->used == pool->size) {
408 /* We cannot update the fastmap here because this
409 * function is called in atomic context.
410 * Let's fail here and refill/update it as soon as possible. */
411 if (!ubi->fm_work_scheduled) {
412 ubi->fm_work_scheduled = 1;
413 schedule_work(&ubi->fm_work);
414 }
415 return NULL;
416 }
417
418 pnum = pool->pebs[pool->used++];
419 return ubi->lookuptbl[pnum];
420}
421
422/**
423 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
424 * @ubi: UBI device description object
425 */
426int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
427{
428 struct ubi_work *wrk;
429 struct ubi_wl_entry *anchor;
430
431 spin_lock(&ubi->wl_lock);
432
433 /* Do we already have an anchor? */
434 if (ubi->fm_anchor) {
435 spin_unlock(&ubi->wl_lock);
436 return 0;
437 }
438
439 /* See if we can find an anchor PEB on the list of free PEBs */
440 anchor = ubi_wl_get_fm_peb(ubi, 1);
441 if (anchor) {
442 ubi->fm_anchor = anchor;
443 spin_unlock(&ubi->wl_lock);
444 return 0;
445 }
446
447 ubi->fm_do_produce_anchor = 1;
448 /* No luck, trigger wear leveling to produce a new anchor PEB. */
449 if (ubi->wl_scheduled) {
450 spin_unlock(&ubi->wl_lock);
451 return 0;
452 }
453 ubi->wl_scheduled = 1;
454 spin_unlock(&ubi->wl_lock);
455
456 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
457 if (!wrk) {
458 spin_lock(&ubi->wl_lock);
459 ubi->wl_scheduled = 0;
460 spin_unlock(&ubi->wl_lock);
461 return -ENOMEM;
462 }
463
464 wrk->func = &wear_leveling_worker;
465 __schedule_ubi_work(ubi, wrk);
466 return 0;
467}
468
469/**
470 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
471 * sub-system.
472 * see: ubi_wl_put_peb()
473 *
474 * @ubi: UBI device description object
475 * @fm_e: physical eraseblock to return
476 * @lnum: the last used logical eraseblock number for the PEB
477 * @torture: if this physical eraseblock has to be tortured
478 */
479int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
480 int lnum, int torture)
481{
482 struct ubi_wl_entry *e;
483 int vol_id, pnum = fm_e->pnum;
484
485 dbg_wl("PEB %d", pnum);
486
487 ubi_assert(pnum >= 0);
488 ubi_assert(pnum < ubi->peb_count);
489
490 spin_lock(&ubi->wl_lock);
491 e = ubi->lookuptbl[pnum];
492
493 /* This can happen if we recovered from a fastmap the very
494 * first time and writing now a new one. In this case the wl system
495 * has never seen any PEB used by the original fastmap.
496 */
497 if (!e) {
498 e = fm_e;
499 ubi_assert(e->ec >= 0);
500 ubi->lookuptbl[pnum] = e;
501 }
502
503 spin_unlock(&ubi->wl_lock);
504
505 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
506 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
507}
508
509/**
510 * ubi_is_erase_work - checks whether a work is erase work.
511 * @wrk: The work object to be checked
512 */
513int ubi_is_erase_work(struct ubi_work *wrk)
514{
515 return wrk->func == erase_worker;
516}
517
518static void ubi_fastmap_close(struct ubi_device *ubi)
519{
520 int i;
521
522 return_unused_pool_pebs(ubi, &ubi->fm_pool);
523 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
524
525 if (ubi->fm_anchor) {
526 return_unused_peb(ubi, ubi->fm_anchor);
527 ubi->fm_anchor = NULL;
528 }
529
530 if (ubi->fm) {
531 for (i = 0; i < ubi->fm->used_blocks; i++)
532 kfree(ubi->fm->e[i]);
533 }
534 kfree(ubi->fm);
535}
536
537/**
538 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
539 * See find_mean_wl_entry()
540 *
541 * @ubi: UBI device description object
542 * @e: physical eraseblock to return
543 * @root: RB tree to test against.
544 */
545static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
546 struct ubi_wl_entry *e,
547 struct rb_root *root) {
548 if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
549 e->pnum < UBI_FM_MAX_START)
550 e = rb_entry(rb_next(root->rb_node),
551 struct ubi_wl_entry, u.rb);
552
553 return e;
554}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 Linutronix GmbH
4 * Copyright (c) 2014 sigma star gmbh
5 * Author: Richard Weinberger <richard@nod.at>
6 */
7
8/**
9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10 * @wrk: the work description object
11 */
12static void update_fastmap_work_fn(struct work_struct *wrk)
13{
14 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15
16 ubi_update_fastmap(ubi);
17 spin_lock(&ubi->wl_lock);
18 ubi->fm_work_scheduled = 0;
19 spin_unlock(&ubi->wl_lock);
20}
21
22/**
23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24 * @root: the RB-tree where to look for
25 */
26static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27{
28 struct rb_node *p;
29 struct ubi_wl_entry *e, *victim = NULL;
30 int max_ec = UBI_MAX_ERASECOUNTER;
31
32 ubi_rb_for_each_entry(p, e, root, u.rb) {
33 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 victim = e;
35 max_ec = e->ec;
36 }
37 }
38
39 return victim;
40}
41
42/**
43 * return_unused_pool_pebs - returns unused PEB to the free tree.
44 * @ubi: UBI device description object
45 * @pool: fastmap pool description object
46 */
47static void return_unused_pool_pebs(struct ubi_device *ubi,
48 struct ubi_fm_pool *pool)
49{
50 int i;
51 struct ubi_wl_entry *e;
52
53 for (i = pool->used; i < pool->size; i++) {
54 e = ubi->lookuptbl[pool->pebs[i]];
55 wl_tree_add(e, &ubi->free);
56 ubi->free_count++;
57 }
58}
59
60static int anchor_pebs_available(struct rb_root *root)
61{
62 struct rb_node *p;
63 struct ubi_wl_entry *e;
64
65 ubi_rb_for_each_entry(p, e, root, u.rb)
66 if (e->pnum < UBI_FM_MAX_START)
67 return 1;
68
69 return 0;
70}
71
72/**
73 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
74 * @ubi: UBI device description object
75 * @anchor: This PEB will be used as anchor PEB by fastmap
76 *
77 * The function returns a physical erase block with a given maximal number
78 * and removes it from the wl subsystem.
79 * Must be called with wl_lock held!
80 */
81struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
82{
83 struct ubi_wl_entry *e = NULL;
84
85 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
86 goto out;
87
88 if (anchor)
89 e = find_anchor_wl_entry(&ubi->free);
90 else
91 e = find_mean_wl_entry(ubi, &ubi->free);
92
93 if (!e)
94 goto out;
95
96 self_check_in_wl_tree(ubi, e, &ubi->free);
97
98 /* remove it from the free list,
99 * the wl subsystem does no longer know this erase block */
100 rb_erase(&e->u.rb, &ubi->free);
101 ubi->free_count--;
102out:
103 return e;
104}
105
106/**
107 * ubi_refill_pools - refills all fastmap PEB pools.
108 * @ubi: UBI device description object
109 */
110void ubi_refill_pools(struct ubi_device *ubi)
111{
112 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
113 struct ubi_fm_pool *pool = &ubi->fm_pool;
114 struct ubi_wl_entry *e;
115 int enough;
116
117 spin_lock(&ubi->wl_lock);
118
119 return_unused_pool_pebs(ubi, wl_pool);
120 return_unused_pool_pebs(ubi, pool);
121
122 wl_pool->size = 0;
123 pool->size = 0;
124
125 for (;;) {
126 enough = 0;
127 if (pool->size < pool->max_size) {
128 if (!ubi->free.rb_node)
129 break;
130
131 e = wl_get_wle(ubi);
132 if (!e)
133 break;
134
135 pool->pebs[pool->size] = e->pnum;
136 pool->size++;
137 } else
138 enough++;
139
140 if (wl_pool->size < wl_pool->max_size) {
141 if (!ubi->free.rb_node ||
142 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
143 break;
144
145 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
146 self_check_in_wl_tree(ubi, e, &ubi->free);
147 rb_erase(&e->u.rb, &ubi->free);
148 ubi->free_count--;
149
150 wl_pool->pebs[wl_pool->size] = e->pnum;
151 wl_pool->size++;
152 } else
153 enough++;
154
155 if (enough == 2)
156 break;
157 }
158
159 wl_pool->used = 0;
160 pool->used = 0;
161
162 spin_unlock(&ubi->wl_lock);
163}
164
165/**
166 * produce_free_peb - produce a free physical eraseblock.
167 * @ubi: UBI device description object
168 *
169 * This function tries to make a free PEB by means of synchronous execution of
170 * pending works. This may be needed if, for example the background thread is
171 * disabled. Returns zero in case of success and a negative error code in case
172 * of failure.
173 */
174static int produce_free_peb(struct ubi_device *ubi)
175{
176 int err;
177
178 while (!ubi->free.rb_node && ubi->works_count) {
179 dbg_wl("do one work synchronously");
180 err = do_work(ubi);
181
182 if (err)
183 return err;
184 }
185
186 return 0;
187}
188
189/**
190 * ubi_wl_get_peb - get a physical eraseblock.
191 * @ubi: UBI device description object
192 *
193 * This function returns a physical eraseblock in case of success and a
194 * negative error code in case of failure.
195 * Returns with ubi->fm_eba_sem held in read mode!
196 */
197int ubi_wl_get_peb(struct ubi_device *ubi)
198{
199 int ret, attempts = 0;
200 struct ubi_fm_pool *pool = &ubi->fm_pool;
201 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
202
203again:
204 down_read(&ubi->fm_eba_sem);
205 spin_lock(&ubi->wl_lock);
206
207 /* We check here also for the WL pool because at this point we can
208 * refill the WL pool synchronous. */
209 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
210 spin_unlock(&ubi->wl_lock);
211 up_read(&ubi->fm_eba_sem);
212 ret = ubi_update_fastmap(ubi);
213 if (ret) {
214 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
215 down_read(&ubi->fm_eba_sem);
216 return -ENOSPC;
217 }
218 down_read(&ubi->fm_eba_sem);
219 spin_lock(&ubi->wl_lock);
220 }
221
222 if (pool->used == pool->size) {
223 spin_unlock(&ubi->wl_lock);
224 attempts++;
225 if (attempts == 10) {
226 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
227 ret = -ENOSPC;
228 goto out;
229 }
230 up_read(&ubi->fm_eba_sem);
231 ret = produce_free_peb(ubi);
232 if (ret < 0) {
233 down_read(&ubi->fm_eba_sem);
234 goto out;
235 }
236 goto again;
237 }
238
239 ubi_assert(pool->used < pool->size);
240 ret = pool->pebs[pool->used++];
241 prot_queue_add(ubi, ubi->lookuptbl[ret]);
242 spin_unlock(&ubi->wl_lock);
243out:
244 return ret;
245}
246
247/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
248 *
249 * @ubi: UBI device description object
250 */
251static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
252{
253 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
254 int pnum;
255
256 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
257
258 if (pool->used == pool->size) {
259 /* We cannot update the fastmap here because this
260 * function is called in atomic context.
261 * Let's fail here and refill/update it as soon as possible. */
262 if (!ubi->fm_work_scheduled) {
263 ubi->fm_work_scheduled = 1;
264 schedule_work(&ubi->fm_work);
265 }
266 return NULL;
267 }
268
269 pnum = pool->pebs[pool->used++];
270 return ubi->lookuptbl[pnum];
271}
272
273/**
274 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
275 * @ubi: UBI device description object
276 */
277int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
278{
279 struct ubi_work *wrk;
280
281 spin_lock(&ubi->wl_lock);
282 if (ubi->wl_scheduled) {
283 spin_unlock(&ubi->wl_lock);
284 return 0;
285 }
286 ubi->wl_scheduled = 1;
287 spin_unlock(&ubi->wl_lock);
288
289 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
290 if (!wrk) {
291 spin_lock(&ubi->wl_lock);
292 ubi->wl_scheduled = 0;
293 spin_unlock(&ubi->wl_lock);
294 return -ENOMEM;
295 }
296
297 wrk->anchor = 1;
298 wrk->func = &wear_leveling_worker;
299 __schedule_ubi_work(ubi, wrk);
300 return 0;
301}
302
303/**
304 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
305 * sub-system.
306 * see: ubi_wl_put_peb()
307 *
308 * @ubi: UBI device description object
309 * @fm_e: physical eraseblock to return
310 * @lnum: the last used logical eraseblock number for the PEB
311 * @torture: if this physical eraseblock has to be tortured
312 */
313int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
314 int lnum, int torture)
315{
316 struct ubi_wl_entry *e;
317 int vol_id, pnum = fm_e->pnum;
318
319 dbg_wl("PEB %d", pnum);
320
321 ubi_assert(pnum >= 0);
322 ubi_assert(pnum < ubi->peb_count);
323
324 spin_lock(&ubi->wl_lock);
325 e = ubi->lookuptbl[pnum];
326
327 /* This can happen if we recovered from a fastmap the very
328 * first time and writing now a new one. In this case the wl system
329 * has never seen any PEB used by the original fastmap.
330 */
331 if (!e) {
332 e = fm_e;
333 ubi_assert(e->ec >= 0);
334 ubi->lookuptbl[pnum] = e;
335 }
336
337 spin_unlock(&ubi->wl_lock);
338
339 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
340 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
341}
342
343/**
344 * ubi_is_erase_work - checks whether a work is erase work.
345 * @wrk: The work object to be checked
346 */
347int ubi_is_erase_work(struct ubi_work *wrk)
348{
349 return wrk->func == erase_worker;
350}
351
352static void ubi_fastmap_close(struct ubi_device *ubi)
353{
354 int i;
355
356 return_unused_pool_pebs(ubi, &ubi->fm_pool);
357 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
358
359 if (ubi->fm) {
360 for (i = 0; i < ubi->fm->used_blocks; i++)
361 kfree(ubi->fm->e[i]);
362 }
363 kfree(ubi->fm);
364}
365
366/**
367 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
368 * See find_mean_wl_entry()
369 *
370 * @ubi: UBI device description object
371 * @e: physical eraseblock to return
372 * @root: RB tree to test against.
373 */
374static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
375 struct ubi_wl_entry *e,
376 struct rb_root *root) {
377 if (e && !ubi->fm_disabled && !ubi->fm &&
378 e->pnum < UBI_FM_MAX_START)
379 e = rb_entry(rb_next(root->rb_node),
380 struct ubi_wl_entry, u.rb);
381
382 return e;
383}