Linux Audio

Check our new training course

Loading...
  1/*
  2 * JFFS2 -- Journalling Flash File System, Version 2.
  3 *
  4 * Copyright © 2001-2007 Red Hat, Inc.
  5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
  6 *
  7 * Created by David Woodhouse <dwmw2@infradead.org>
  8 *
  9 * For licensing information, see the file 'LICENCE' in this directory.
 10 *
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/capability.h>
 16#include <linux/kernel.h>
 17#include <linux/sched.h>
 18#include <linux/fs.h>
 19#include <linux/list.h>
 20#include <linux/mtd/mtd.h>
 21#include <linux/pagemap.h>
 22#include <linux/slab.h>
 23#include <linux/vmalloc.h>
 24#include <linux/vfs.h>
 25#include <linux/crc32.h>
 26#include "nodelist.h"
 27
 28static int jffs2_flash_setup(struct jffs2_sb_info *c);
 29
 30int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
 31{
 32	struct jffs2_full_dnode *old_metadata, *new_metadata;
 33	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
 34	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 35	struct jffs2_raw_inode *ri;
 36	union jffs2_device_node dev;
 37	unsigned char *mdata = NULL;
 38	int mdatalen = 0;
 39	unsigned int ivalid;
 40	uint32_t alloclen;
 41	int ret;
 42	int alloc_type = ALLOC_NORMAL;
 43
 44	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
 45
 46	/* Special cases - we don't want more than one data node
 47	   for these types on the medium at any time. So setattr
 48	   must read the original data associated with the node
 49	   (i.e. the device numbers or the target name) and write
 50	   it out again with the appropriate data attached */
 51	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
 52		/* For these, we don't actually need to read the old node */
 53		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
 54		mdata = (char *)&dev;
 55		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
 56			  __func__, mdatalen);
 57	} else if (S_ISLNK(inode->i_mode)) {
 58		mutex_lock(&f->sem);
 59		mdatalen = f->metadata->size;
 60		mdata = kmalloc(f->metadata->size, GFP_USER);
 61		if (!mdata) {
 62			mutex_unlock(&f->sem);
 63			return -ENOMEM;
 64		}
 65		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
 66		if (ret) {
 67			mutex_unlock(&f->sem);
 68			kfree(mdata);
 69			return ret;
 70		}
 71		mutex_unlock(&f->sem);
 72		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
 73			  __func__, mdatalen);
 74	}
 75
 76	ri = jffs2_alloc_raw_inode();
 77	if (!ri) {
 78		if (S_ISLNK(inode->i_mode))
 79			kfree(mdata);
 80		return -ENOMEM;
 81	}
 82
 83	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
 84				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
 85	if (ret) {
 86		jffs2_free_raw_inode(ri);
 87		if (S_ISLNK(inode->i_mode))
 88			 kfree(mdata);
 89		return ret;
 90	}
 91	mutex_lock(&f->sem);
 92	ivalid = iattr->ia_valid;
 93
 94	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
 95	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
 96	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
 97	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
 98
 99	ri->ino = cpu_to_je32(inode->i_ino);
100	ri->version = cpu_to_je32(++f->highest_version);
101
102	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
103	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
104
105	if (ivalid & ATTR_MODE)
106		ri->mode = cpu_to_jemode(iattr->ia_mode);
107	else
108		ri->mode = cpu_to_jemode(inode->i_mode);
109
110
111	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
112	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
113	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
114	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
115
116	ri->offset = cpu_to_je32(0);
117	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
118	ri->compr = JFFS2_COMPR_NONE;
119	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
120		/* It's an extension. Make it a hole node */
121		ri->compr = JFFS2_COMPR_ZERO;
122		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
123		ri->offset = cpu_to_je32(inode->i_size);
124	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
125		/* For truncate-to-zero, treat it as deletion because
126		   it'll always be obsoleting all previous nodes */
127		alloc_type = ALLOC_DELETION;
128	}
129	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
130	if (mdatalen)
131		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
132	else
133		ri->data_crc = cpu_to_je32(0);
134
135	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
136	if (S_ISLNK(inode->i_mode))
137		kfree(mdata);
138
139	if (IS_ERR(new_metadata)) {
140		jffs2_complete_reservation(c);
141		jffs2_free_raw_inode(ri);
142		mutex_unlock(&f->sem);
143		return PTR_ERR(new_metadata);
144	}
145	/* It worked. Update the inode */
146	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
147	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
148	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
149	inode->i_mode = jemode_to_cpu(ri->mode);
150	inode->i_uid = je16_to_cpu(ri->uid);
151	inode->i_gid = je16_to_cpu(ri->gid);
152
153
154	old_metadata = f->metadata;
155
156	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
157		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
158
159	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
160		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
161		inode->i_size = iattr->ia_size;
162		inode->i_blocks = (inode->i_size + 511) >> 9;
163		f->metadata = NULL;
164	} else {
165		f->metadata = new_metadata;
166	}
167	if (old_metadata) {
168		jffs2_mark_node_obsolete(c, old_metadata->raw);
169		jffs2_free_full_dnode(old_metadata);
170	}
171	jffs2_free_raw_inode(ri);
172
173	mutex_unlock(&f->sem);
174	jffs2_complete_reservation(c);
175
176	/* We have to do the truncate_setsize() without f->sem held, since
177	   some pages may be locked and waiting for it in readpage().
178	   We are protected from a simultaneous write() extending i_size
179	   back past iattr->ia_size, because do_truncate() holds the
180	   generic inode semaphore. */
181	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
182		truncate_setsize(inode, iattr->ia_size);
183		inode->i_blocks = (inode->i_size + 511) >> 9;
184	}	
185
186	return 0;
187}
188
189int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
190{
191	int rc;
192
193	rc = inode_change_ok(dentry->d_inode, iattr);
194	if (rc)
195		return rc;
196
197	rc = jffs2_do_setattr(dentry->d_inode, iattr);
198	if (!rc && (iattr->ia_valid & ATTR_MODE))
199		rc = jffs2_acl_chmod(dentry->d_inode);
200
201	return rc;
202}
203
204int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
205{
206	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
207	unsigned long avail;
208
209	buf->f_type = JFFS2_SUPER_MAGIC;
210	buf->f_bsize = 1 << PAGE_SHIFT;
211	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
212	buf->f_files = 0;
213	buf->f_ffree = 0;
214	buf->f_namelen = JFFS2_MAX_NAME_LEN;
215	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
216	buf->f_fsid.val[1] = c->mtd->index;
217
218	spin_lock(&c->erase_completion_lock);
219	avail = c->dirty_size + c->free_size;
220	if (avail > c->sector_size * c->resv_blocks_write)
221		avail -= c->sector_size * c->resv_blocks_write;
222	else
223		avail = 0;
224	spin_unlock(&c->erase_completion_lock);
225
226	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
227
228	return 0;
229}
230
231
232void jffs2_evict_inode (struct inode *inode)
233{
234	/* We can forget about this inode for now - drop all
235	 *  the nodelists associated with it, etc.
236	 */
237	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
238	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
239
240	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
241		  __func__, inode->i_ino, inode->i_mode);
242	truncate_inode_pages(&inode->i_data, 0);
243	clear_inode(inode);
244	jffs2_do_clear_inode(c, f);
245}
246
247struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
248{
249	struct jffs2_inode_info *f;
250	struct jffs2_sb_info *c;
251	struct jffs2_raw_inode latest_node;
252	union jffs2_device_node jdev;
253	struct inode *inode;
254	dev_t rdev = 0;
255	int ret;
256
257	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
258
259	inode = iget_locked(sb, ino);
260	if (!inode)
261		return ERR_PTR(-ENOMEM);
262	if (!(inode->i_state & I_NEW))
263		return inode;
264
265	f = JFFS2_INODE_INFO(inode);
266	c = JFFS2_SB_INFO(inode->i_sb);
267
268	jffs2_init_inode_info(f);
269	mutex_lock(&f->sem);
270
271	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
272
273	if (ret) {
274		mutex_unlock(&f->sem);
275		iget_failed(inode);
276		return ERR_PTR(ret);
277	}
278	inode->i_mode = jemode_to_cpu(latest_node.mode);
279	inode->i_uid = je16_to_cpu(latest_node.uid);
280	inode->i_gid = je16_to_cpu(latest_node.gid);
281	inode->i_size = je32_to_cpu(latest_node.isize);
282	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
283	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
284	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
285
286	set_nlink(inode, f->inocache->pino_nlink);
287
288	inode->i_blocks = (inode->i_size + 511) >> 9;
289
290	switch (inode->i_mode & S_IFMT) {
291
292	case S_IFLNK:
293		inode->i_op = &jffs2_symlink_inode_operations;
294		break;
295
296	case S_IFDIR:
297	{
298		struct jffs2_full_dirent *fd;
299		set_nlink(inode, 2); /* parent and '.' */
300
301		for (fd=f->dents; fd; fd = fd->next) {
302			if (fd->type == DT_DIR && fd->ino)
303				inc_nlink(inode);
304		}
305		/* Root dir gets i_nlink 3 for some reason */
306		if (inode->i_ino == 1)
307			inc_nlink(inode);
308
309		inode->i_op = &jffs2_dir_inode_operations;
310		inode->i_fop = &jffs2_dir_operations;
311		break;
312	}
313	case S_IFREG:
314		inode->i_op = &jffs2_file_inode_operations;
315		inode->i_fop = &jffs2_file_operations;
316		inode->i_mapping->a_ops = &jffs2_file_address_operations;
317		inode->i_mapping->nrpages = 0;
318		break;
319
320	case S_IFBLK:
321	case S_IFCHR:
322		/* Read the device numbers from the media */
323		if (f->metadata->size != sizeof(jdev.old_id) &&
324		    f->metadata->size != sizeof(jdev.new_id)) {
325			pr_notice("Device node has strange size %d\n",
326				  f->metadata->size);
327			goto error_io;
328		}
329		jffs2_dbg(1, "Reading device numbers from flash\n");
330		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
331		if (ret < 0) {
332			/* Eep */
333			pr_notice("Read device numbers for inode %lu failed\n",
334				  (unsigned long)inode->i_ino);
335			goto error;
336		}
337		if (f->metadata->size == sizeof(jdev.old_id))
338			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
339		else
340			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
341
342	case S_IFSOCK:
343	case S_IFIFO:
344		inode->i_op = &jffs2_file_inode_operations;
345		init_special_inode(inode, inode->i_mode, rdev);
346		break;
347
348	default:
349		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
350			__func__, inode->i_mode, (unsigned long)inode->i_ino);
351	}
352
353	mutex_unlock(&f->sem);
354
355	jffs2_dbg(1, "jffs2_read_inode() returning\n");
356	unlock_new_inode(inode);
357	return inode;
358
359error_io:
360	ret = -EIO;
361error:
362	mutex_unlock(&f->sem);
363	jffs2_do_clear_inode(c, f);
364	iget_failed(inode);
365	return ERR_PTR(ret);
366}
367
368void jffs2_dirty_inode(struct inode *inode, int flags)
369{
370	struct iattr iattr;
371
372	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
373		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
374			  __func__, inode->i_ino);
375		return;
376	}
377
378	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
379		  __func__, inode->i_ino);
380
381	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
382	iattr.ia_mode = inode->i_mode;
383	iattr.ia_uid = inode->i_uid;
384	iattr.ia_gid = inode->i_gid;
385	iattr.ia_atime = inode->i_atime;
386	iattr.ia_mtime = inode->i_mtime;
387	iattr.ia_ctime = inode->i_ctime;
388
389	jffs2_do_setattr(inode, &iattr);
390}
391
392int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
393{
394	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
395
396	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
397		return -EROFS;
398
399	/* We stop if it was running, then restart if it needs to.
400	   This also catches the case where it was stopped and this
401	   is just a remount to restart it.
402	   Flush the writebuffer, if neccecary, else we loose it */
403	if (!(sb->s_flags & MS_RDONLY)) {
404		jffs2_stop_garbage_collect_thread(c);
405		mutex_lock(&c->alloc_sem);
406		jffs2_flush_wbuf_pad(c);
407		mutex_unlock(&c->alloc_sem);
408	}
409
410	if (!(*flags & MS_RDONLY))
411		jffs2_start_garbage_collect_thread(c);
412
413	*flags |= MS_NOATIME;
414	return 0;
415}
416
417/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
418   fill in the raw_inode while you're at it. */
419struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
420{
421	struct inode *inode;
422	struct super_block *sb = dir_i->i_sb;
423	struct jffs2_sb_info *c;
424	struct jffs2_inode_info *f;
425	int ret;
426
427	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
428		  __func__, dir_i->i_ino, mode);
429
430	c = JFFS2_SB_INFO(sb);
431
432	inode = new_inode(sb);
433
434	if (!inode)
435		return ERR_PTR(-ENOMEM);
436
437	f = JFFS2_INODE_INFO(inode);
438	jffs2_init_inode_info(f);
439	mutex_lock(&f->sem);
440
441	memset(ri, 0, sizeof(*ri));
442	/* Set OS-specific defaults for new inodes */
443	ri->uid = cpu_to_je16(current_fsuid());
444
445	if (dir_i->i_mode & S_ISGID) {
446		ri->gid = cpu_to_je16(dir_i->i_gid);
447		if (S_ISDIR(mode))
448			mode |= S_ISGID;
449	} else {
450		ri->gid = cpu_to_je16(current_fsgid());
451	}
452
453	/* POSIX ACLs have to be processed now, at least partly.
454	   The umask is only applied if there's no default ACL */
455	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
456	if (ret) {
457	    make_bad_inode(inode);
458	    iput(inode);
459	    return ERR_PTR(ret);
460	}
461	ret = jffs2_do_new_inode (c, f, mode, ri);
462	if (ret) {
463		make_bad_inode(inode);
464		iput(inode);
465		return ERR_PTR(ret);
466	}
467	set_nlink(inode, 1);
468	inode->i_ino = je32_to_cpu(ri->ino);
469	inode->i_mode = jemode_to_cpu(ri->mode);
470	inode->i_gid = je16_to_cpu(ri->gid);
471	inode->i_uid = je16_to_cpu(ri->uid);
472	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
473	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
474
475	inode->i_blocks = 0;
476	inode->i_size = 0;
477
478	if (insert_inode_locked(inode) < 0) {
479		make_bad_inode(inode);
480		iput(inode);
481		return ERR_PTR(-EINVAL);
482	}
483
484	return inode;
485}
486
487static int calculate_inocache_hashsize(uint32_t flash_size)
488{
489	/*
490	 * Pick a inocache hash size based on the size of the medium.
491	 * Count how many megabytes we're dealing with, apply a hashsize twice
492	 * that size, but rounding down to the usual big powers of 2. And keep
493	 * to sensible bounds.
494	 */
495
496	int size_mb = flash_size / 1024 / 1024;
497	int hashsize = (size_mb * 2) & ~0x3f;
498
499	if (hashsize < INOCACHE_HASHSIZE_MIN)
500		return INOCACHE_HASHSIZE_MIN;
501	if (hashsize > INOCACHE_HASHSIZE_MAX)
502		return INOCACHE_HASHSIZE_MAX;
503
504	return hashsize;
505}
506
507int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
508{
509	struct jffs2_sb_info *c;
510	struct inode *root_i;
511	int ret;
512	size_t blocks;
513
514	c = JFFS2_SB_INFO(sb);
515
516#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
517	if (c->mtd->type == MTD_NANDFLASH) {
518		pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
519		return -EINVAL;
520	}
521	if (c->mtd->type == MTD_DATAFLASH) {
522		pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
523		return -EINVAL;
524	}
525#endif
526
527	c->flash_size = c->mtd->size;
528	c->sector_size = c->mtd->erasesize;
529	blocks = c->flash_size / c->sector_size;
530
531	/*
532	 * Size alignment check
533	 */
534	if ((c->sector_size * blocks) != c->flash_size) {
535		c->flash_size = c->sector_size * blocks;
536		pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
537			c->flash_size / 1024);
538	}
539
540	if (c->flash_size < 5*c->sector_size) {
541		pr_err("Too few erase blocks (%d)\n",
542		       c->flash_size / c->sector_size);
543		return -EINVAL;
544	}
545
546	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
547
548	/* NAND (or other bizarre) flash... do setup accordingly */
549	ret = jffs2_flash_setup(c);
550	if (ret)
551		return ret;
552
553	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
554	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
555	if (!c->inocache_list) {
556		ret = -ENOMEM;
557		goto out_wbuf;
558	}
559
560	jffs2_init_xattr_subsystem(c);
561
562	if ((ret = jffs2_do_mount_fs(c)))
563		goto out_inohash;
564
565	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
566	root_i = jffs2_iget(sb, 1);
567	if (IS_ERR(root_i)) {
568		jffs2_dbg(1, "get root inode failed\n");
569		ret = PTR_ERR(root_i);
570		goto out_root;
571	}
572
573	ret = -ENOMEM;
574
575	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
576	sb->s_root = d_make_root(root_i);
577	if (!sb->s_root)
578		goto out_root;
579
580	sb->s_maxbytes = 0xFFFFFFFF;
581	sb->s_blocksize = PAGE_CACHE_SIZE;
582	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
583	sb->s_magic = JFFS2_SUPER_MAGIC;
584	if (!(sb->s_flags & MS_RDONLY))
585		jffs2_start_garbage_collect_thread(c);
586	return 0;
587
588out_root:
589	jffs2_free_ino_caches(c);
590	jffs2_free_raw_node_refs(c);
591	if (jffs2_blocks_use_vmalloc(c))
592		vfree(c->blocks);
593	else
594		kfree(c->blocks);
595 out_inohash:
596	jffs2_clear_xattr_subsystem(c);
597	kfree(c->inocache_list);
598 out_wbuf:
599	jffs2_flash_cleanup(c);
600
601	return ret;
602}
603
604void jffs2_gc_release_inode(struct jffs2_sb_info *c,
605				   struct jffs2_inode_info *f)
606{
607	iput(OFNI_EDONI_2SFFJ(f));
608}
609
610struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
611					      int inum, int unlinked)
612{
613	struct inode *inode;
614	struct jffs2_inode_cache *ic;
615
616	if (unlinked) {
617		/* The inode has zero nlink but its nodes weren't yet marked
618		   obsolete. This has to be because we're still waiting for
619		   the final (close() and) iput() to happen.
620
621		   There's a possibility that the final iput() could have
622		   happened while we were contemplating. In order to ensure
623		   that we don't cause a new read_inode() (which would fail)
624		   for the inode in question, we use ilookup() in this case
625		   instead of iget().
626
627		   The nlink can't _become_ zero at this point because we're
628		   holding the alloc_sem, and jffs2_do_unlink() would also
629		   need that while decrementing nlink on any inode.
630		*/
631		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
632		if (!inode) {
633			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
634				  inum);
635
636			spin_lock(&c->inocache_lock);
637			ic = jffs2_get_ino_cache(c, inum);
638			if (!ic) {
639				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
640					  inum);
641				spin_unlock(&c->inocache_lock);
642				return NULL;
643			}
644			if (ic->state != INO_STATE_CHECKEDABSENT) {
645				/* Wait for progress. Don't just loop */
646				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
647					  ic->ino, ic->state);
648				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
649			} else {
650				spin_unlock(&c->inocache_lock);
651			}
652
653			return NULL;
654		}
655	} else {
656		/* Inode has links to it still; they're not going away because
657		   jffs2_do_unlink() would need the alloc_sem and we have it.
658		   Just iget() it, and if read_inode() is necessary that's OK.
659		*/
660		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
661		if (IS_ERR(inode))
662			return ERR_CAST(inode);
663	}
664	if (is_bad_inode(inode)) {
665		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
666			  inum, unlinked);
667		/* NB. This will happen again. We need to do something appropriate here. */
668		iput(inode);
669		return ERR_PTR(-EIO);
670	}
671
672	return JFFS2_INODE_INFO(inode);
673}
674
675unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
676				   struct jffs2_inode_info *f,
677				   unsigned long offset,
678				   unsigned long *priv)
679{
680	struct inode *inode = OFNI_EDONI_2SFFJ(f);
681	struct page *pg;
682
683	pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
684			     (void *)jffs2_do_readpage_unlock, inode);
685	if (IS_ERR(pg))
686		return (void *)pg;
687
688	*priv = (unsigned long)pg;
689	return kmap(pg);
690}
691
692void jffs2_gc_release_page(struct jffs2_sb_info *c,
693			   unsigned char *ptr,
694			   unsigned long *priv)
695{
696	struct page *pg = (void *)*priv;
697
698	kunmap(pg);
699	page_cache_release(pg);
700}
701
702static int jffs2_flash_setup(struct jffs2_sb_info *c) {
703	int ret = 0;
704
705	if (jffs2_cleanmarker_oob(c)) {
706		/* NAND flash... do setup accordingly */
707		ret = jffs2_nand_flash_setup(c);
708		if (ret)
709			return ret;
710	}
711
712	/* and Dataflash */
713	if (jffs2_dataflash(c)) {
714		ret = jffs2_dataflash_setup(c);
715		if (ret)
716			return ret;
717	}
718
719	/* and Intel "Sibley" flash */
720	if (jffs2_nor_wbuf_flash(c)) {
721		ret = jffs2_nor_wbuf_flash_setup(c);
722		if (ret)
723			return ret;
724	}
725
726	/* and an UBI volume */
727	if (jffs2_ubivol(c)) {
728		ret = jffs2_ubivol_setup(c);
729		if (ret)
730			return ret;
731	}
732
733	return ret;
734}
735
736void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
737
738	if (jffs2_cleanmarker_oob(c)) {
739		jffs2_nand_flash_cleanup(c);
740	}
741
742	/* and DataFlash */
743	if (jffs2_dataflash(c)) {
744		jffs2_dataflash_cleanup(c);
745	}
746
747	/* and Intel "Sibley" flash */
748	if (jffs2_nor_wbuf_flash(c)) {
749		jffs2_nor_wbuf_flash_cleanup(c);
750	}
751
752	/* and an UBI volume */
753	if (jffs2_ubivol(c)) {
754		jffs2_ubivol_cleanup(c);
755	}
756}