Loading...
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/mtd/mtd.h>
14#include <linux/compiler.h>
15#include <linux/sched.h> /* For cond_resched() */
16#include "nodelist.h"
17#include "debug.h"
18
19/**
20 * jffs2_reserve_space - request physical space to write nodes to flash
21 * @c: superblock info
22 * @minsize: Minimum acceptable size of allocation
23 * @len: Returned value of allocation length
24 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
25 *
26 * Requests a block of physical space on the flash. Returns zero for success
27 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
28 * error if appropriate. Doesn't return len since that's
29 *
30 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31 * allocation semaphore, to prevent more than one allocation from being
32 * active at any time. The semaphore is later released by jffs2_commit_allocation()
33 *
34 * jffs2_reserve_space() may trigger garbage collection in order to make room
35 * for the requested allocation.
36 */
37
38static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
39 uint32_t *len, uint32_t sumsize);
40
41int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 uint32_t *len, int prio, uint32_t sumsize)
43{
44 int ret = -EAGAIN;
45 int blocksneeded = c->resv_blocks_write;
46 /* align it */
47 minsize = PAD(minsize);
48
49 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
50 mutex_lock(&c->alloc_sem);
51
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
53
54 spin_lock(&c->erase_completion_lock);
55
56 /* this needs a little more thought (true <tglx> :)) */
57 while(ret == -EAGAIN) {
58 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
59 uint32_t dirty, avail;
60
61 /* calculate real dirty size
62 * dirty_size contains blocks on erase_pending_list
63 * those blocks are counted in c->nr_erasing_blocks.
64 * If one block is actually erased, it is not longer counted as dirty_space
65 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66 * with c->nr_erasing_blocks * c->sector_size again.
67 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68 * This helps us to force gc and pick eventually a clean block to spread the load.
69 * We add unchecked_size here, as we hopefully will find some space to use.
70 * This will affect the sum only once, as gc first finishes checking
71 * of nodes.
72 */
73 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 if (dirty < c->nospc_dirty_size) {
75 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
77 break;
78 }
79 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
80 dirty, c->unchecked_size, c->sector_size));
81
82 spin_unlock(&c->erase_completion_lock);
83 mutex_unlock(&c->alloc_sem);
84 return -ENOSPC;
85 }
86
87 /* Calc possibly available space. Possibly available means that we
88 * don't know, if unchecked size contains obsoleted nodes, which could give us some
89 * more usable space. This will affect the sum only once, as gc first finishes checking
90 * of nodes.
91 + Return -ENOSPC, if the maximum possibly available space is less or equal than
92 * blocksneeded * sector_size.
93 * This blocks endless gc looping on a filesystem, which is nearly full, even if
94 * the check above passes.
95 */
96 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
97 if ( (avail / c->sector_size) <= blocksneeded) {
98 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
99 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
100 break;
101 }
102
103 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
104 avail, blocksneeded * c->sector_size));
105 spin_unlock(&c->erase_completion_lock);
106 mutex_unlock(&c->alloc_sem);
107 return -ENOSPC;
108 }
109
110 mutex_unlock(&c->alloc_sem);
111
112 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
113 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
114 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
115 spin_unlock(&c->erase_completion_lock);
116
117 ret = jffs2_garbage_collect_pass(c);
118
119 if (ret == -EAGAIN) {
120 spin_lock(&c->erase_completion_lock);
121 if (c->nr_erasing_blocks &&
122 list_empty(&c->erase_pending_list) &&
123 list_empty(&c->erase_complete_list)) {
124 DECLARE_WAITQUEUE(wait, current);
125 set_current_state(TASK_UNINTERRUPTIBLE);
126 add_wait_queue(&c->erase_wait, &wait);
127 D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__));
128 spin_unlock(&c->erase_completion_lock);
129
130 schedule();
131 } else
132 spin_unlock(&c->erase_completion_lock);
133 } else if (ret)
134 return ret;
135
136 cond_resched();
137
138 if (signal_pending(current))
139 return -EINTR;
140
141 mutex_lock(&c->alloc_sem);
142 spin_lock(&c->erase_completion_lock);
143 }
144
145 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
146 if (ret) {
147 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
148 }
149 }
150 spin_unlock(&c->erase_completion_lock);
151 if (!ret)
152 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
153 if (ret)
154 mutex_unlock(&c->alloc_sem);
155 return ret;
156}
157
158int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
159 uint32_t *len, uint32_t sumsize)
160{
161 int ret = -EAGAIN;
162 minsize = PAD(minsize);
163
164 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
165
166 spin_lock(&c->erase_completion_lock);
167 while(ret == -EAGAIN) {
168 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
169 if (ret) {
170 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
171 }
172 }
173 spin_unlock(&c->erase_completion_lock);
174 if (!ret)
175 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
176
177 return ret;
178}
179
180
181/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
182
183static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
184{
185
186 if (c->nextblock == NULL) {
187 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
188 jeb->offset));
189 return;
190 }
191 /* Check, if we have a dirty block now, or if it was dirty already */
192 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
193 c->dirty_size += jeb->wasted_size;
194 c->wasted_size -= jeb->wasted_size;
195 jeb->dirty_size += jeb->wasted_size;
196 jeb->wasted_size = 0;
197 if (VERYDIRTY(c, jeb->dirty_size)) {
198 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
199 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
200 list_add_tail(&jeb->list, &c->very_dirty_list);
201 } else {
202 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
203 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
204 list_add_tail(&jeb->list, &c->dirty_list);
205 }
206 } else {
207 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
208 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
209 list_add_tail(&jeb->list, &c->clean_list);
210 }
211 c->nextblock = NULL;
212
213}
214
215/* Select a new jeb for nextblock */
216
217static int jffs2_find_nextblock(struct jffs2_sb_info *c)
218{
219 struct list_head *next;
220
221 /* Take the next block off the 'free' list */
222
223 if (list_empty(&c->free_list)) {
224
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_list)) {
227 struct jffs2_eraseblock *ejeb;
228
229 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
230 list_move_tail(&ejeb->list, &c->erase_pending_list);
231 c->nr_erasing_blocks++;
232 jffs2_garbage_collect_trigger(c);
233 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
234 ejeb->offset));
235 }
236
237 if (!c->nr_erasing_blocks &&
238 !list_empty(&c->erasable_pending_wbuf_list)) {
239 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
240 /* c->nextblock is NULL, no update to c->nextblock allowed */
241 spin_unlock(&c->erase_completion_lock);
242 jffs2_flush_wbuf_pad(c);
243 spin_lock(&c->erase_completion_lock);
244 /* Have another go. It'll be on the erasable_list now */
245 return -EAGAIN;
246 }
247
248 if (!c->nr_erasing_blocks) {
249 /* Ouch. We're in GC, or we wouldn't have got here.
250 And there's no space left. At all. */
251 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
252 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
253 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
254 return -ENOSPC;
255 }
256
257 spin_unlock(&c->erase_completion_lock);
258 /* Don't wait for it; just erase one right now */
259 jffs2_erase_pending_blocks(c, 1);
260 spin_lock(&c->erase_completion_lock);
261
262 /* An erase may have failed, decreasing the
263 amount of free space available. So we must
264 restart from the beginning */
265 return -EAGAIN;
266 }
267
268 next = c->free_list.next;
269 list_del(next);
270 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
271 c->nr_free_blocks--;
272
273 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
274
275#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
276 /* adjust write buffer offset, else we get a non contiguous write bug */
277 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
278 c->wbuf_ofs = 0xffffffff;
279#endif
280
281 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
282
283 return 0;
284}
285
286/* Called with alloc sem _and_ erase_completion_lock */
287static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
288 uint32_t *len, uint32_t sumsize)
289{
290 struct jffs2_eraseblock *jeb = c->nextblock;
291 uint32_t reserved_size; /* for summary information at the end of the jeb */
292 int ret;
293
294 restart:
295 reserved_size = 0;
296
297 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
298 /* NOSUM_SIZE means not to generate summary */
299
300 if (jeb) {
301 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
302 dbg_summary("minsize=%d , jeb->free=%d ,"
303 "summary->size=%d , sumsize=%d\n",
304 minsize, jeb->free_size,
305 c->summary->sum_size, sumsize);
306 }
307
308 /* Is there enough space for writing out the current node, or we have to
309 write out summary information now, close this jeb and select new nextblock? */
310 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
311 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
312
313 /* Has summary been disabled for this jeb? */
314 if (jffs2_sum_is_disabled(c->summary)) {
315 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
316 goto restart;
317 }
318
319 /* Writing out the collected summary information */
320 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
321 ret = jffs2_sum_write_sumnode(c);
322
323 if (ret)
324 return ret;
325
326 if (jffs2_sum_is_disabled(c->summary)) {
327 /* jffs2_write_sumnode() couldn't write out the summary information
328 diabling summary for this jeb and free the collected information
329 */
330 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
331 goto restart;
332 }
333
334 jffs2_close_nextblock(c, jeb);
335 jeb = NULL;
336 /* keep always valid value in reserved_size */
337 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
338 }
339 } else {
340 if (jeb && minsize > jeb->free_size) {
341 uint32_t waste;
342
343 /* Skip the end of this block and file it as having some dirty space */
344 /* If there's a pending write to it, flush now */
345
346 if (jffs2_wbuf_dirty(c)) {
347 spin_unlock(&c->erase_completion_lock);
348 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
349 jffs2_flush_wbuf_pad(c);
350 spin_lock(&c->erase_completion_lock);
351 jeb = c->nextblock;
352 goto restart;
353 }
354
355 spin_unlock(&c->erase_completion_lock);
356
357 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
358 if (ret)
359 return ret;
360 /* Just lock it again and continue. Nothing much can change because
361 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
362 we hold c->erase_completion_lock in the majority of this function...
363 but that's a question for another (more caffeine-rich) day. */
364 spin_lock(&c->erase_completion_lock);
365
366 waste = jeb->free_size;
367 jffs2_link_node_ref(c, jeb,
368 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
369 waste, NULL);
370 /* FIXME: that made it count as dirty. Convert to wasted */
371 jeb->dirty_size -= waste;
372 c->dirty_size -= waste;
373 jeb->wasted_size += waste;
374 c->wasted_size += waste;
375
376 jffs2_close_nextblock(c, jeb);
377 jeb = NULL;
378 }
379 }
380
381 if (!jeb) {
382
383 ret = jffs2_find_nextblock(c);
384 if (ret)
385 return ret;
386
387 jeb = c->nextblock;
388
389 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
390 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
391 goto restart;
392 }
393 }
394 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
395 enough space */
396 *len = jeb->free_size - reserved_size;
397
398 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
399 !jeb->first_node->next_in_ino) {
400 /* Only node in it beforehand was a CLEANMARKER node (we think).
401 So mark it obsolete now that there's going to be another node
402 in the block. This will reduce used_size to zero but We've
403 already set c->nextblock so that jffs2_mark_node_obsolete()
404 won't try to refile it to the dirty_list.
405 */
406 spin_unlock(&c->erase_completion_lock);
407 jffs2_mark_node_obsolete(c, jeb->first_node);
408 spin_lock(&c->erase_completion_lock);
409 }
410
411 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
412 *len, jeb->offset + (c->sector_size - jeb->free_size)));
413 return 0;
414}
415
416/**
417 * jffs2_add_physical_node_ref - add a physical node reference to the list
418 * @c: superblock info
419 * @new: new node reference to add
420 * @len: length of this physical node
421 *
422 * Should only be used to report nodes for which space has been allocated
423 * by jffs2_reserve_space.
424 *
425 * Must be called with the alloc_sem held.
426 */
427
428struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
429 uint32_t ofs, uint32_t len,
430 struct jffs2_inode_cache *ic)
431{
432 struct jffs2_eraseblock *jeb;
433 struct jffs2_raw_node_ref *new;
434
435 jeb = &c->blocks[ofs / c->sector_size];
436
437 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
438 ofs & ~3, ofs & 3, len));
439#if 1
440 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
441 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
442 even after refiling c->nextblock */
443 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
444 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
445 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
446 if (c->nextblock)
447 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
448 else
449 printk(KERN_WARNING "No nextblock");
450 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
451 return ERR_PTR(-EINVAL);
452 }
453#endif
454 spin_lock(&c->erase_completion_lock);
455
456 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
457
458 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
459 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
460 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
461 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
462 if (jffs2_wbuf_dirty(c)) {
463 /* Flush the last write in the block if it's outstanding */
464 spin_unlock(&c->erase_completion_lock);
465 jffs2_flush_wbuf_pad(c);
466 spin_lock(&c->erase_completion_lock);
467 }
468
469 list_add_tail(&jeb->list, &c->clean_list);
470 c->nextblock = NULL;
471 }
472 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
473 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
474
475 spin_unlock(&c->erase_completion_lock);
476
477 return new;
478}
479
480
481void jffs2_complete_reservation(struct jffs2_sb_info *c)
482{
483 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
484 spin_lock(&c->erase_completion_lock);
485 jffs2_garbage_collect_trigger(c);
486 spin_unlock(&c->erase_completion_lock);
487 mutex_unlock(&c->alloc_sem);
488}
489
490static inline int on_list(struct list_head *obj, struct list_head *head)
491{
492 struct list_head *this;
493
494 list_for_each(this, head) {
495 if (this == obj) {
496 D1(printk("%p is on list at %p\n", obj, head));
497 return 1;
498
499 }
500 }
501 return 0;
502}
503
504void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
505{
506 struct jffs2_eraseblock *jeb;
507 int blocknr;
508 struct jffs2_unknown_node n;
509 int ret, addedsize;
510 size_t retlen;
511 uint32_t freed_len;
512
513 if(unlikely(!ref)) {
514 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
515 return;
516 }
517 if (ref_obsolete(ref)) {
518 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
519 return;
520 }
521 blocknr = ref->flash_offset / c->sector_size;
522 if (blocknr >= c->nr_blocks) {
523 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
524 BUG();
525 }
526 jeb = &c->blocks[blocknr];
527
528 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
529 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
530 /* Hm. This may confuse static lock analysis. If any of the above
531 three conditions is false, we're going to return from this
532 function without actually obliterating any nodes or freeing
533 any jffs2_raw_node_refs. So we don't need to stop erases from
534 happening, or protect against people holding an obsolete
535 jffs2_raw_node_ref without the erase_completion_lock. */
536 mutex_lock(&c->erase_free_sem);
537 }
538
539 spin_lock(&c->erase_completion_lock);
540
541 freed_len = ref_totlen(c, jeb, ref);
542
543 if (ref_flags(ref) == REF_UNCHECKED) {
544 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
545 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
546 freed_len, blocknr, ref->flash_offset, jeb->used_size);
547 BUG();
548 })
549 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
550 jeb->unchecked_size -= freed_len;
551 c->unchecked_size -= freed_len;
552 } else {
553 D1(if (unlikely(jeb->used_size < freed_len)) {
554 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
555 freed_len, blocknr, ref->flash_offset, jeb->used_size);
556 BUG();
557 })
558 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
559 jeb->used_size -= freed_len;
560 c->used_size -= freed_len;
561 }
562
563 // Take care, that wasted size is taken into concern
564 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
565 D1(printk("Dirtying\n"));
566 addedsize = freed_len;
567 jeb->dirty_size += freed_len;
568 c->dirty_size += freed_len;
569
570 /* Convert wasted space to dirty, if not a bad block */
571 if (jeb->wasted_size) {
572 if (on_list(&jeb->list, &c->bad_used_list)) {
573 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
574 jeb->offset));
575 addedsize = 0; /* To fool the refiling code later */
576 } else {
577 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
578 jeb->wasted_size, jeb->offset));
579 addedsize += jeb->wasted_size;
580 jeb->dirty_size += jeb->wasted_size;
581 c->dirty_size += jeb->wasted_size;
582 c->wasted_size -= jeb->wasted_size;
583 jeb->wasted_size = 0;
584 }
585 }
586 } else {
587 D1(printk("Wasting\n"));
588 addedsize = 0;
589 jeb->wasted_size += freed_len;
590 c->wasted_size += freed_len;
591 }
592 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
593
594 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
595 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
596
597 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
598 /* Flash scanning is in progress. Don't muck about with the block
599 lists because they're not ready yet, and don't actually
600 obliterate nodes that look obsolete. If they weren't
601 marked obsolete on the flash at the time they _became_
602 obsolete, there was probably a reason for that. */
603 spin_unlock(&c->erase_completion_lock);
604 /* We didn't lock the erase_free_sem */
605 return;
606 }
607
608 if (jeb == c->nextblock) {
609 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
610 } else if (!jeb->used_size && !jeb->unchecked_size) {
611 if (jeb == c->gcblock) {
612 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
613 c->gcblock = NULL;
614 } else {
615 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
616 list_del(&jeb->list);
617 }
618 if (jffs2_wbuf_dirty(c)) {
619 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
620 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
621 } else {
622 if (jiffies & 127) {
623 /* Most of the time, we just erase it immediately. Otherwise we
624 spend ages scanning it on mount, etc. */
625 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
626 list_add_tail(&jeb->list, &c->erase_pending_list);
627 c->nr_erasing_blocks++;
628 jffs2_garbage_collect_trigger(c);
629 } else {
630 /* Sometimes, however, we leave it elsewhere so it doesn't get
631 immediately reused, and we spread the load a bit. */
632 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
633 list_add_tail(&jeb->list, &c->erasable_list);
634 }
635 }
636 D1(printk(KERN_DEBUG "Done OK\n"));
637 } else if (jeb == c->gcblock) {
638 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
639 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
640 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
641 list_del(&jeb->list);
642 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
643 list_add_tail(&jeb->list, &c->dirty_list);
644 } else if (VERYDIRTY(c, jeb->dirty_size) &&
645 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
646 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
647 list_del(&jeb->list);
648 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
649 list_add_tail(&jeb->list, &c->very_dirty_list);
650 } else {
651 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
652 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
653 }
654
655 spin_unlock(&c->erase_completion_lock);
656
657 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
658 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
659 /* We didn't lock the erase_free_sem */
660 return;
661 }
662
663 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
664 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
665 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
666 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
667
668 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
669 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
670 if (ret) {
671 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
672 goto out_erase_sem;
673 }
674 if (retlen != sizeof(n)) {
675 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
676 goto out_erase_sem;
677 }
678 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
679 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
680 goto out_erase_sem;
681 }
682 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
683 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
684 goto out_erase_sem;
685 }
686 /* XXX FIXME: This is ugly now */
687 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
688 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
689 if (ret) {
690 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
691 goto out_erase_sem;
692 }
693 if (retlen != sizeof(n)) {
694 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
695 goto out_erase_sem;
696 }
697
698 /* Nodes which have been marked obsolete no longer need to be
699 associated with any inode. Remove them from the per-inode list.
700
701 Note we can't do this for NAND at the moment because we need
702 obsolete dirent nodes to stay on the lists, because of the
703 horridness in jffs2_garbage_collect_deletion_dirent(). Also
704 because we delete the inocache, and on NAND we need that to
705 stay around until all the nodes are actually erased, in order
706 to stop us from giving the same inode number to another newly
707 created inode. */
708 if (ref->next_in_ino) {
709 struct jffs2_inode_cache *ic;
710 struct jffs2_raw_node_ref **p;
711
712 spin_lock(&c->erase_completion_lock);
713
714 ic = jffs2_raw_ref_to_ic(ref);
715 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
716 ;
717
718 *p = ref->next_in_ino;
719 ref->next_in_ino = NULL;
720
721 switch (ic->class) {
722#ifdef CONFIG_JFFS2_FS_XATTR
723 case RAWNODE_CLASS_XATTR_DATUM:
724 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
725 break;
726 case RAWNODE_CLASS_XATTR_REF:
727 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
728 break;
729#endif
730 default:
731 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
732 jffs2_del_ino_cache(c, ic);
733 break;
734 }
735 spin_unlock(&c->erase_completion_lock);
736 }
737
738 out_erase_sem:
739 mutex_unlock(&c->erase_free_sem);
740}
741
742int jffs2_thread_should_wake(struct jffs2_sb_info *c)
743{
744 int ret = 0;
745 uint32_t dirty;
746 int nr_very_dirty = 0;
747 struct jffs2_eraseblock *jeb;
748
749 if (!list_empty(&c->erase_complete_list) ||
750 !list_empty(&c->erase_pending_list))
751 return 1;
752
753 if (c->unchecked_size) {
754 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
755 c->unchecked_size, c->checked_ino));
756 return 1;
757 }
758
759 /* dirty_size contains blocks on erase_pending_list
760 * those blocks are counted in c->nr_erasing_blocks.
761 * If one block is actually erased, it is not longer counted as dirty_space
762 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
763 * with c->nr_erasing_blocks * c->sector_size again.
764 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
765 * This helps us to force gc and pick eventually a clean block to spread the load.
766 */
767 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
768
769 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
770 (dirty > c->nospc_dirty_size))
771 ret = 1;
772
773 list_for_each_entry(jeb, &c->very_dirty_list, list) {
774 nr_very_dirty++;
775 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
776 ret = 1;
777 /* In debug mode, actually go through and count them all */
778 D1(continue);
779 break;
780 }
781 }
782
783 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
784 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
785
786 return ret;
787}
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/mtd/mtd.h>
16#include <linux/compiler.h>
17#include <linux/sched.h> /* For cond_resched() */
18#include "nodelist.h"
19#include "debug.h"
20
21/*
22 * Check whether the user is allowed to write.
23 */
24static int jffs2_rp_can_write(struct jffs2_sb_info *c)
25{
26 uint32_t avail;
27 struct jffs2_mount_opts *opts = &c->mount_opts;
28
29 avail = c->dirty_size + c->free_size + c->unchecked_size +
30 c->erasing_size - c->resv_blocks_write * c->sector_size
31 - c->nospc_dirty_size;
32
33 if (avail < 2 * opts->rp_size)
34 jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
35 "erasing_size %u, unchecked_size %u, "
36 "nr_erasing_blocks %u, avail %u, resrv %u\n",
37 opts->rp_size, c->dirty_size, c->free_size,
38 c->erasing_size, c->unchecked_size,
39 c->nr_erasing_blocks, avail, c->nospc_dirty_size);
40
41 if (avail > opts->rp_size)
42 return 1;
43
44 /* Always allow root */
45 if (capable(CAP_SYS_RESOURCE))
46 return 1;
47
48 jffs2_dbg(1, "forbid writing\n");
49 return 0;
50}
51
52/**
53 * jffs2_reserve_space - request physical space to write nodes to flash
54 * @c: superblock info
55 * @minsize: Minimum acceptable size of allocation
56 * @len: Returned value of allocation length
57 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
58 *
59 * Requests a block of physical space on the flash. Returns zero for success
60 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
61 * error if appropriate. Doesn't return len since that's
62 *
63 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
64 * allocation semaphore, to prevent more than one allocation from being
65 * active at any time. The semaphore is later released by jffs2_commit_allocation()
66 *
67 * jffs2_reserve_space() may trigger garbage collection in order to make room
68 * for the requested allocation.
69 */
70
71static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
72 uint32_t *len, uint32_t sumsize);
73
74int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
75 uint32_t *len, int prio, uint32_t sumsize)
76{
77 int ret = -EAGAIN;
78 int blocksneeded = c->resv_blocks_write;
79 /* align it */
80 minsize = PAD(minsize);
81
82 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
83 mutex_lock(&c->alloc_sem);
84
85 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
86
87 spin_lock(&c->erase_completion_lock);
88
89 /*
90 * Check if the free space is greater then size of the reserved pool.
91 * If not, only allow root to proceed with writing.
92 */
93 if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
94 ret = -ENOSPC;
95 goto out;
96 }
97
98 /* this needs a little more thought (true <tglx> :)) */
99 while(ret == -EAGAIN) {
100 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
101 uint32_t dirty, avail;
102
103 /* calculate real dirty size
104 * dirty_size contains blocks on erase_pending_list
105 * those blocks are counted in c->nr_erasing_blocks.
106 * If one block is actually erased, it is not longer counted as dirty_space
107 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
108 * with c->nr_erasing_blocks * c->sector_size again.
109 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
110 * This helps us to force gc and pick eventually a clean block to spread the load.
111 * We add unchecked_size here, as we hopefully will find some space to use.
112 * This will affect the sum only once, as gc first finishes checking
113 * of nodes.
114 */
115 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
116 if (dirty < c->nospc_dirty_size) {
117 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
118 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
119 __func__);
120 break;
121 }
122 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
123 dirty, c->unchecked_size,
124 c->sector_size);
125
126 spin_unlock(&c->erase_completion_lock);
127 mutex_unlock(&c->alloc_sem);
128 return -ENOSPC;
129 }
130
131 /* Calc possibly available space. Possibly available means that we
132 * don't know, if unchecked size contains obsoleted nodes, which could give us some
133 * more usable space. This will affect the sum only once, as gc first finishes checking
134 * of nodes.
135 + Return -ENOSPC, if the maximum possibly available space is less or equal than
136 * blocksneeded * sector_size.
137 * This blocks endless gc looping on a filesystem, which is nearly full, even if
138 * the check above passes.
139 */
140 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
141 if ( (avail / c->sector_size) <= blocksneeded) {
142 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
143 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
144 __func__);
145 break;
146 }
147
148 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
149 avail, blocksneeded * c->sector_size);
150 spin_unlock(&c->erase_completion_lock);
151 mutex_unlock(&c->alloc_sem);
152 return -ENOSPC;
153 }
154
155 mutex_unlock(&c->alloc_sem);
156
157 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
158 c->nr_free_blocks, c->nr_erasing_blocks,
159 c->free_size, c->dirty_size, c->wasted_size,
160 c->used_size, c->erasing_size, c->bad_size,
161 c->free_size + c->dirty_size +
162 c->wasted_size + c->used_size +
163 c->erasing_size + c->bad_size,
164 c->flash_size);
165 spin_unlock(&c->erase_completion_lock);
166
167 ret = jffs2_garbage_collect_pass(c);
168
169 if (ret == -EAGAIN) {
170 spin_lock(&c->erase_completion_lock);
171 if (c->nr_erasing_blocks &&
172 list_empty(&c->erase_pending_list) &&
173 list_empty(&c->erase_complete_list)) {
174 DECLARE_WAITQUEUE(wait, current);
175 set_current_state(TASK_UNINTERRUPTIBLE);
176 add_wait_queue(&c->erase_wait, &wait);
177 jffs2_dbg(1, "%s waiting for erase to complete\n",
178 __func__);
179 spin_unlock(&c->erase_completion_lock);
180
181 schedule();
182 } else
183 spin_unlock(&c->erase_completion_lock);
184 } else if (ret)
185 return ret;
186
187 cond_resched();
188
189 if (signal_pending(current))
190 return -EINTR;
191
192 mutex_lock(&c->alloc_sem);
193 spin_lock(&c->erase_completion_lock);
194 }
195
196 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
197 if (ret) {
198 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
199 }
200 }
201
202out:
203 spin_unlock(&c->erase_completion_lock);
204 if (!ret)
205 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
206 if (ret)
207 mutex_unlock(&c->alloc_sem);
208 return ret;
209}
210
211int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
212 uint32_t *len, uint32_t sumsize)
213{
214 int ret = -EAGAIN;
215 minsize = PAD(minsize);
216
217 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
218
219 spin_lock(&c->erase_completion_lock);
220 while(ret == -EAGAIN) {
221 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
222 if (ret) {
223 jffs2_dbg(1, "%s(): looping, ret is %d\n",
224 __func__, ret);
225 }
226 }
227 spin_unlock(&c->erase_completion_lock);
228 if (!ret)
229 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
230
231 return ret;
232}
233
234
235/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
236
237static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
238{
239
240 if (c->nextblock == NULL) {
241 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
242 __func__, jeb->offset);
243 return;
244 }
245 /* Check, if we have a dirty block now, or if it was dirty already */
246 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
247 c->dirty_size += jeb->wasted_size;
248 c->wasted_size -= jeb->wasted_size;
249 jeb->dirty_size += jeb->wasted_size;
250 jeb->wasted_size = 0;
251 if (VERYDIRTY(c, jeb->dirty_size)) {
252 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
253 jeb->offset, jeb->free_size, jeb->dirty_size,
254 jeb->used_size);
255 list_add_tail(&jeb->list, &c->very_dirty_list);
256 } else {
257 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
258 jeb->offset, jeb->free_size, jeb->dirty_size,
259 jeb->used_size);
260 list_add_tail(&jeb->list, &c->dirty_list);
261 }
262 } else {
263 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
264 jeb->offset, jeb->free_size, jeb->dirty_size,
265 jeb->used_size);
266 list_add_tail(&jeb->list, &c->clean_list);
267 }
268 c->nextblock = NULL;
269
270}
271
272/* Select a new jeb for nextblock */
273
274static int jffs2_find_nextblock(struct jffs2_sb_info *c)
275{
276 struct list_head *next;
277
278 /* Take the next block off the 'free' list */
279
280 if (list_empty(&c->free_list)) {
281
282 if (!c->nr_erasing_blocks &&
283 !list_empty(&c->erasable_list)) {
284 struct jffs2_eraseblock *ejeb;
285
286 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
287 list_move_tail(&ejeb->list, &c->erase_pending_list);
288 c->nr_erasing_blocks++;
289 jffs2_garbage_collect_trigger(c);
290 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
291 __func__, ejeb->offset);
292 }
293
294 if (!c->nr_erasing_blocks &&
295 !list_empty(&c->erasable_pending_wbuf_list)) {
296 jffs2_dbg(1, "%s(): Flushing write buffer\n",
297 __func__);
298 /* c->nextblock is NULL, no update to c->nextblock allowed */
299 spin_unlock(&c->erase_completion_lock);
300 jffs2_flush_wbuf_pad(c);
301 spin_lock(&c->erase_completion_lock);
302 /* Have another go. It'll be on the erasable_list now */
303 return -EAGAIN;
304 }
305
306 if (!c->nr_erasing_blocks) {
307 /* Ouch. We're in GC, or we wouldn't have got here.
308 And there's no space left. At all. */
309 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
310 c->nr_erasing_blocks, c->nr_free_blocks,
311 list_empty(&c->erasable_list) ? "yes" : "no",
312 list_empty(&c->erasing_list) ? "yes" : "no",
313 list_empty(&c->erase_pending_list) ? "yes" : "no");
314 return -ENOSPC;
315 }
316
317 spin_unlock(&c->erase_completion_lock);
318 /* Don't wait for it; just erase one right now */
319 jffs2_erase_pending_blocks(c, 1);
320 spin_lock(&c->erase_completion_lock);
321
322 /* An erase may have failed, decreasing the
323 amount of free space available. So we must
324 restart from the beginning */
325 return -EAGAIN;
326 }
327
328 next = c->free_list.next;
329 list_del(next);
330 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
331 c->nr_free_blocks--;
332
333 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
334
335#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
336 /* adjust write buffer offset, else we get a non contiguous write bug */
337 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
338 c->wbuf_ofs = 0xffffffff;
339#endif
340
341 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
342 __func__, c->nextblock->offset);
343
344 return 0;
345}
346
347/* Called with alloc sem _and_ erase_completion_lock */
348static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
349 uint32_t *len, uint32_t sumsize)
350{
351 struct jffs2_eraseblock *jeb = c->nextblock;
352 uint32_t reserved_size; /* for summary information at the end of the jeb */
353 int ret;
354
355 restart:
356 reserved_size = 0;
357
358 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
359 /* NOSUM_SIZE means not to generate summary */
360
361 if (jeb) {
362 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
363 dbg_summary("minsize=%d , jeb->free=%d ,"
364 "summary->size=%d , sumsize=%d\n",
365 minsize, jeb->free_size,
366 c->summary->sum_size, sumsize);
367 }
368
369 /* Is there enough space for writing out the current node, or we have to
370 write out summary information now, close this jeb and select new nextblock? */
371 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
372 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
373
374 /* Has summary been disabled for this jeb? */
375 if (jffs2_sum_is_disabled(c->summary)) {
376 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
377 goto restart;
378 }
379
380 /* Writing out the collected summary information */
381 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
382 ret = jffs2_sum_write_sumnode(c);
383
384 if (ret)
385 return ret;
386
387 if (jffs2_sum_is_disabled(c->summary)) {
388 /* jffs2_write_sumnode() couldn't write out the summary information
389 diabling summary for this jeb and free the collected information
390 */
391 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
392 goto restart;
393 }
394
395 jffs2_close_nextblock(c, jeb);
396 jeb = NULL;
397 /* keep always valid value in reserved_size */
398 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
399 }
400 } else {
401 if (jeb && minsize > jeb->free_size) {
402 uint32_t waste;
403
404 /* Skip the end of this block and file it as having some dirty space */
405 /* If there's a pending write to it, flush now */
406
407 if (jffs2_wbuf_dirty(c)) {
408 spin_unlock(&c->erase_completion_lock);
409 jffs2_dbg(1, "%s(): Flushing write buffer\n",
410 __func__);
411 jffs2_flush_wbuf_pad(c);
412 spin_lock(&c->erase_completion_lock);
413 jeb = c->nextblock;
414 goto restart;
415 }
416
417 spin_unlock(&c->erase_completion_lock);
418
419 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
420 if (ret)
421 return ret;
422 /* Just lock it again and continue. Nothing much can change because
423 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
424 we hold c->erase_completion_lock in the majority of this function...
425 but that's a question for another (more caffeine-rich) day. */
426 spin_lock(&c->erase_completion_lock);
427
428 waste = jeb->free_size;
429 jffs2_link_node_ref(c, jeb,
430 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
431 waste, NULL);
432 /* FIXME: that made it count as dirty. Convert to wasted */
433 jeb->dirty_size -= waste;
434 c->dirty_size -= waste;
435 jeb->wasted_size += waste;
436 c->wasted_size += waste;
437
438 jffs2_close_nextblock(c, jeb);
439 jeb = NULL;
440 }
441 }
442
443 if (!jeb) {
444
445 ret = jffs2_find_nextblock(c);
446 if (ret)
447 return ret;
448
449 jeb = c->nextblock;
450
451 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
452 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
453 jeb->offset, jeb->free_size);
454 goto restart;
455 }
456 }
457 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
458 enough space */
459 *len = jeb->free_size - reserved_size;
460
461 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
462 !jeb->first_node->next_in_ino) {
463 /* Only node in it beforehand was a CLEANMARKER node (we think).
464 So mark it obsolete now that there's going to be another node
465 in the block. This will reduce used_size to zero but We've
466 already set c->nextblock so that jffs2_mark_node_obsolete()
467 won't try to refile it to the dirty_list.
468 */
469 spin_unlock(&c->erase_completion_lock);
470 jffs2_mark_node_obsolete(c, jeb->first_node);
471 spin_lock(&c->erase_completion_lock);
472 }
473
474 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
475 __func__,
476 *len, jeb->offset + (c->sector_size - jeb->free_size));
477 return 0;
478}
479
480/**
481 * jffs2_add_physical_node_ref - add a physical node reference to the list
482 * @c: superblock info
483 * @new: new node reference to add
484 * @len: length of this physical node
485 *
486 * Should only be used to report nodes for which space has been allocated
487 * by jffs2_reserve_space.
488 *
489 * Must be called with the alloc_sem held.
490 */
491
492struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
493 uint32_t ofs, uint32_t len,
494 struct jffs2_inode_cache *ic)
495{
496 struct jffs2_eraseblock *jeb;
497 struct jffs2_raw_node_ref *new;
498
499 jeb = &c->blocks[ofs / c->sector_size];
500
501 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
502 __func__, ofs & ~3, ofs & 3, len);
503#if 1
504 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
505 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
506 even after refiling c->nextblock */
507 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
508 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
509 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
510 ofs & ~3, ofs & 3);
511 if (c->nextblock)
512 pr_warn("nextblock 0x%08x", c->nextblock->offset);
513 else
514 pr_warn("No nextblock");
515 pr_cont(", expected at %08x\n",
516 jeb->offset + (c->sector_size - jeb->free_size));
517 return ERR_PTR(-EINVAL);
518 }
519#endif
520 spin_lock(&c->erase_completion_lock);
521
522 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
523
524 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
525 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
526 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
527 jeb->offset, jeb->free_size, jeb->dirty_size,
528 jeb->used_size);
529 if (jffs2_wbuf_dirty(c)) {
530 /* Flush the last write in the block if it's outstanding */
531 spin_unlock(&c->erase_completion_lock);
532 jffs2_flush_wbuf_pad(c);
533 spin_lock(&c->erase_completion_lock);
534 }
535
536 list_add_tail(&jeb->list, &c->clean_list);
537 c->nextblock = NULL;
538 }
539 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
540 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
541
542 spin_unlock(&c->erase_completion_lock);
543
544 return new;
545}
546
547
548void jffs2_complete_reservation(struct jffs2_sb_info *c)
549{
550 jffs2_dbg(1, "jffs2_complete_reservation()\n");
551 spin_lock(&c->erase_completion_lock);
552 jffs2_garbage_collect_trigger(c);
553 spin_unlock(&c->erase_completion_lock);
554 mutex_unlock(&c->alloc_sem);
555}
556
557static inline int on_list(struct list_head *obj, struct list_head *head)
558{
559 struct list_head *this;
560
561 list_for_each(this, head) {
562 if (this == obj) {
563 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
564 return 1;
565
566 }
567 }
568 return 0;
569}
570
571void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
572{
573 struct jffs2_eraseblock *jeb;
574 int blocknr;
575 struct jffs2_unknown_node n;
576 int ret, addedsize;
577 size_t retlen;
578 uint32_t freed_len;
579
580 if(unlikely(!ref)) {
581 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
582 return;
583 }
584 if (ref_obsolete(ref)) {
585 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
586 __func__, ref_offset(ref));
587 return;
588 }
589 blocknr = ref->flash_offset / c->sector_size;
590 if (blocknr >= c->nr_blocks) {
591 pr_notice("raw node at 0x%08x is off the end of device!\n",
592 ref->flash_offset);
593 BUG();
594 }
595 jeb = &c->blocks[blocknr];
596
597 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
598 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
599 /* Hm. This may confuse static lock analysis. If any of the above
600 three conditions is false, we're going to return from this
601 function without actually obliterating any nodes or freeing
602 any jffs2_raw_node_refs. So we don't need to stop erases from
603 happening, or protect against people holding an obsolete
604 jffs2_raw_node_ref without the erase_completion_lock. */
605 mutex_lock(&c->erase_free_sem);
606 }
607
608 spin_lock(&c->erase_completion_lock);
609
610 freed_len = ref_totlen(c, jeb, ref);
611
612 if (ref_flags(ref) == REF_UNCHECKED) {
613 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
614 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
615 freed_len, blocknr,
616 ref->flash_offset, jeb->used_size);
617 BUG();
618 })
619 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
620 ref_offset(ref), freed_len);
621 jeb->unchecked_size -= freed_len;
622 c->unchecked_size -= freed_len;
623 } else {
624 D1(if (unlikely(jeb->used_size < freed_len)) {
625 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
626 freed_len, blocknr,
627 ref->flash_offset, jeb->used_size);
628 BUG();
629 })
630 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
631 ref_offset(ref), freed_len);
632 jeb->used_size -= freed_len;
633 c->used_size -= freed_len;
634 }
635
636 // Take care, that wasted size is taken into concern
637 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
638 jffs2_dbg(1, "Dirtying\n");
639 addedsize = freed_len;
640 jeb->dirty_size += freed_len;
641 c->dirty_size += freed_len;
642
643 /* Convert wasted space to dirty, if not a bad block */
644 if (jeb->wasted_size) {
645 if (on_list(&jeb->list, &c->bad_used_list)) {
646 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
647 jeb->offset);
648 addedsize = 0; /* To fool the refiling code later */
649 } else {
650 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
651 jeb->wasted_size, jeb->offset);
652 addedsize += jeb->wasted_size;
653 jeb->dirty_size += jeb->wasted_size;
654 c->dirty_size += jeb->wasted_size;
655 c->wasted_size -= jeb->wasted_size;
656 jeb->wasted_size = 0;
657 }
658 }
659 } else {
660 jffs2_dbg(1, "Wasting\n");
661 addedsize = 0;
662 jeb->wasted_size += freed_len;
663 c->wasted_size += freed_len;
664 }
665 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
666
667 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
668 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
669
670 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
671 /* Flash scanning is in progress. Don't muck about with the block
672 lists because they're not ready yet, and don't actually
673 obliterate nodes that look obsolete. If they weren't
674 marked obsolete on the flash at the time they _became_
675 obsolete, there was probably a reason for that. */
676 spin_unlock(&c->erase_completion_lock);
677 /* We didn't lock the erase_free_sem */
678 return;
679 }
680
681 if (jeb == c->nextblock) {
682 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
683 jeb->offset);
684 } else if (!jeb->used_size && !jeb->unchecked_size) {
685 if (jeb == c->gcblock) {
686 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
687 jeb->offset);
688 c->gcblock = NULL;
689 } else {
690 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
691 jeb->offset);
692 list_del(&jeb->list);
693 }
694 if (jffs2_wbuf_dirty(c)) {
695 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
696 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
697 } else {
698 if (jiffies & 127) {
699 /* Most of the time, we just erase it immediately. Otherwise we
700 spend ages scanning it on mount, etc. */
701 jffs2_dbg(1, "...and adding to erase_pending_list\n");
702 list_add_tail(&jeb->list, &c->erase_pending_list);
703 c->nr_erasing_blocks++;
704 jffs2_garbage_collect_trigger(c);
705 } else {
706 /* Sometimes, however, we leave it elsewhere so it doesn't get
707 immediately reused, and we spread the load a bit. */
708 jffs2_dbg(1, "...and adding to erasable_list\n");
709 list_add_tail(&jeb->list, &c->erasable_list);
710 }
711 }
712 jffs2_dbg(1, "Done OK\n");
713 } else if (jeb == c->gcblock) {
714 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
715 jeb->offset);
716 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
717 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
718 jeb->offset);
719 list_del(&jeb->list);
720 jffs2_dbg(1, "...and adding to dirty_list\n");
721 list_add_tail(&jeb->list, &c->dirty_list);
722 } else if (VERYDIRTY(c, jeb->dirty_size) &&
723 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
724 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
725 jeb->offset);
726 list_del(&jeb->list);
727 jffs2_dbg(1, "...and adding to very_dirty_list\n");
728 list_add_tail(&jeb->list, &c->very_dirty_list);
729 } else {
730 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
731 jeb->offset, jeb->free_size, jeb->dirty_size,
732 jeb->used_size);
733 }
734
735 spin_unlock(&c->erase_completion_lock);
736
737 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
738 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
739 /* We didn't lock the erase_free_sem */
740 return;
741 }
742
743 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
744 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
745 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
746 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
747
748 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
749 ref_offset(ref));
750 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
751 if (ret) {
752 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
753 ref_offset(ref), ret);
754 goto out_erase_sem;
755 }
756 if (retlen != sizeof(n)) {
757 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
758 ref_offset(ref), retlen);
759 goto out_erase_sem;
760 }
761 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
762 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
763 je32_to_cpu(n.totlen), freed_len);
764 goto out_erase_sem;
765 }
766 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
767 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
768 ref_offset(ref), je16_to_cpu(n.nodetype));
769 goto out_erase_sem;
770 }
771 /* XXX FIXME: This is ugly now */
772 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
773 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
774 if (ret) {
775 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
776 ref_offset(ref), ret);
777 goto out_erase_sem;
778 }
779 if (retlen != sizeof(n)) {
780 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
781 ref_offset(ref), retlen);
782 goto out_erase_sem;
783 }
784
785 /* Nodes which have been marked obsolete no longer need to be
786 associated with any inode. Remove them from the per-inode list.
787
788 Note we can't do this for NAND at the moment because we need
789 obsolete dirent nodes to stay on the lists, because of the
790 horridness in jffs2_garbage_collect_deletion_dirent(). Also
791 because we delete the inocache, and on NAND we need that to
792 stay around until all the nodes are actually erased, in order
793 to stop us from giving the same inode number to another newly
794 created inode. */
795 if (ref->next_in_ino) {
796 struct jffs2_inode_cache *ic;
797 struct jffs2_raw_node_ref **p;
798
799 spin_lock(&c->erase_completion_lock);
800
801 ic = jffs2_raw_ref_to_ic(ref);
802 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
803 ;
804
805 *p = ref->next_in_ino;
806 ref->next_in_ino = NULL;
807
808 switch (ic->class) {
809#ifdef CONFIG_JFFS2_FS_XATTR
810 case RAWNODE_CLASS_XATTR_DATUM:
811 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
812 break;
813 case RAWNODE_CLASS_XATTR_REF:
814 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
815 break;
816#endif
817 default:
818 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
819 jffs2_del_ino_cache(c, ic);
820 break;
821 }
822 spin_unlock(&c->erase_completion_lock);
823 }
824
825 out_erase_sem:
826 mutex_unlock(&c->erase_free_sem);
827}
828
829int jffs2_thread_should_wake(struct jffs2_sb_info *c)
830{
831 int ret = 0;
832 uint32_t dirty;
833 int nr_very_dirty = 0;
834 struct jffs2_eraseblock *jeb;
835
836 if (!list_empty(&c->erase_complete_list) ||
837 !list_empty(&c->erase_pending_list))
838 return 1;
839
840 if (c->unchecked_size) {
841 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
842 c->unchecked_size, c->checked_ino);
843 return 1;
844 }
845
846 /* dirty_size contains blocks on erase_pending_list
847 * those blocks are counted in c->nr_erasing_blocks.
848 * If one block is actually erased, it is not longer counted as dirty_space
849 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
850 * with c->nr_erasing_blocks * c->sector_size again.
851 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
852 * This helps us to force gc and pick eventually a clean block to spread the load.
853 */
854 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
855
856 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
857 (dirty > c->nospc_dirty_size))
858 ret = 1;
859
860 list_for_each_entry(jeb, &c->very_dirty_list, list) {
861 nr_very_dirty++;
862 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
863 ret = 1;
864 /* In debug mode, actually go through and count them all */
865 D1(continue);
866 break;
867 }
868 }
869
870 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
871 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
872 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
873
874 return ret;
875}