Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * JFFS2 -- Journalling Flash File System, Version 2.
  3 *
  4 * Copyright © 2001-2007 Red Hat, Inc.
  5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
  6 *
  7 * Created by David Woodhouse <dwmw2@infradead.org>
  8 *
  9 * For licensing information, see the file 'LICENCE' in this directory.
 10 *
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/capability.h>
 16#include <linux/kernel.h>
 17#include <linux/sched.h>
 18#include <linux/cred.h>
 19#include <linux/fs.h>
 20#include <linux/fs_context.h>
 21#include <linux/list.h>
 22#include <linux/mtd/mtd.h>
 23#include <linux/pagemap.h>
 24#include <linux/slab.h>
 25#include <linux/vmalloc.h>
 26#include <linux/vfs.h>
 27#include <linux/crc32.h>
 28#include "nodelist.h"
 29
 30static int jffs2_flash_setup(struct jffs2_sb_info *c);
 31
 32int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
 33{
 34	struct jffs2_full_dnode *old_metadata, *new_metadata;
 35	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
 36	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 37	struct jffs2_raw_inode *ri;
 38	union jffs2_device_node dev;
 39	unsigned char *mdata = NULL;
 40	int mdatalen = 0;
 41	unsigned int ivalid;
 42	uint32_t alloclen;
 43	int ret;
 44	int alloc_type = ALLOC_NORMAL;
 45
 46	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
 47
 48	/* Special cases - we don't want more than one data node
 49	   for these types on the medium at any time. So setattr
 50	   must read the original data associated with the node
 51	   (i.e. the device numbers or the target name) and write
 52	   it out again with the appropriate data attached */
 53	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
 54		/* For these, we don't actually need to read the old node */
 55		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
 56		mdata = (char *)&dev;
 57		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
 58			  __func__, mdatalen);
 59	} else if (S_ISLNK(inode->i_mode)) {
 60		mutex_lock(&f->sem);
 61		mdatalen = f->metadata->size;
 62		mdata = kmalloc(f->metadata->size, GFP_USER);
 63		if (!mdata) {
 64			mutex_unlock(&f->sem);
 65			return -ENOMEM;
 66		}
 67		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
 68		if (ret) {
 69			mutex_unlock(&f->sem);
 70			kfree(mdata);
 71			return ret;
 72		}
 73		mutex_unlock(&f->sem);
 74		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
 75			  __func__, mdatalen);
 76	}
 77
 78	ri = jffs2_alloc_raw_inode();
 79	if (!ri) {
 80		if (S_ISLNK(inode->i_mode))
 81			kfree(mdata);
 82		return -ENOMEM;
 83	}
 84
 85	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
 86				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
 87	if (ret) {
 88		jffs2_free_raw_inode(ri);
 89		if (S_ISLNK(inode->i_mode))
 90			 kfree(mdata);
 91		return ret;
 92	}
 93	mutex_lock(&f->sem);
 94	ivalid = iattr->ia_valid;
 95
 96	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
 97	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
 98	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
 99	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
100
101	ri->ino = cpu_to_je32(inode->i_ino);
102	ri->version = cpu_to_je32(++f->highest_version);
103
104	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
105		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
106	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
107		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
108
109	if (ivalid & ATTR_MODE)
110		ri->mode = cpu_to_jemode(iattr->ia_mode);
111	else
112		ri->mode = cpu_to_jemode(inode->i_mode);
113
114
115	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
116	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode_get_atime(inode)));
117	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode_get_mtime(inode)));
118	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
119
120	ri->offset = cpu_to_je32(0);
121	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
122	ri->compr = JFFS2_COMPR_NONE;
123	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
124		/* It's an extension. Make it a hole node */
125		ri->compr = JFFS2_COMPR_ZERO;
126		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
127		ri->offset = cpu_to_je32(inode->i_size);
128	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
129		/* For truncate-to-zero, treat it as deletion because
130		   it'll always be obsoleting all previous nodes */
131		alloc_type = ALLOC_DELETION;
132	}
133	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
134	if (mdatalen)
135		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
136	else
137		ri->data_crc = cpu_to_je32(0);
138
139	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
140	if (S_ISLNK(inode->i_mode))
141		kfree(mdata);
142
143	if (IS_ERR(new_metadata)) {
144		jffs2_complete_reservation(c);
145		jffs2_free_raw_inode(ri);
146		mutex_unlock(&f->sem);
147		return PTR_ERR(new_metadata);
148	}
149	/* It worked. Update the inode */
150	inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(ri->atime)));
151	inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
152	inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(ri->mtime)));
153	inode->i_mode = jemode_to_cpu(ri->mode);
154	i_uid_write(inode, je16_to_cpu(ri->uid));
155	i_gid_write(inode, je16_to_cpu(ri->gid));
156
157
158	old_metadata = f->metadata;
159
160	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
161		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
162
163	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
164		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
165		inode->i_size = iattr->ia_size;
166		inode->i_blocks = (inode->i_size + 511) >> 9;
167		f->metadata = NULL;
168	} else {
169		f->metadata = new_metadata;
170	}
171	if (old_metadata) {
172		jffs2_mark_node_obsolete(c, old_metadata->raw);
173		jffs2_free_full_dnode(old_metadata);
174	}
175	jffs2_free_raw_inode(ri);
176
177	mutex_unlock(&f->sem);
178	jffs2_complete_reservation(c);
179
180	/* We have to do the truncate_setsize() without f->sem held, since
181	   some pages may be locked and waiting for it in read_folio().
182	   We are protected from a simultaneous write() extending i_size
183	   back past iattr->ia_size, because do_truncate() holds the
184	   generic inode semaphore. */
185	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
186		truncate_setsize(inode, iattr->ia_size);
187		inode->i_blocks = (inode->i_size + 511) >> 9;
188	}
189
190	return 0;
191}
192
193int jffs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
194		  struct iattr *iattr)
195{
196	struct inode *inode = d_inode(dentry);
197	int rc;
198
199	rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
200	if (rc)
201		return rc;
202
203	rc = jffs2_do_setattr(inode, iattr);
204	if (!rc && (iattr->ia_valid & ATTR_MODE))
205		rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
206
207	return rc;
208}
209
210int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
211{
212	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
213	unsigned long avail;
214
215	buf->f_type = JFFS2_SUPER_MAGIC;
216	buf->f_bsize = 1 << PAGE_SHIFT;
217	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
218	buf->f_files = 0;
219	buf->f_ffree = 0;
220	buf->f_namelen = JFFS2_MAX_NAME_LEN;
221	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
222	buf->f_fsid.val[1] = c->mtd->index;
223
224	spin_lock(&c->erase_completion_lock);
225	avail = c->dirty_size + c->free_size;
226	if (avail > c->sector_size * c->resv_blocks_write)
227		avail -= c->sector_size * c->resv_blocks_write;
228	else
229		avail = 0;
230	spin_unlock(&c->erase_completion_lock);
231
232	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
233
234	return 0;
235}
236
237
238void jffs2_evict_inode (struct inode *inode)
239{
240	/* We can forget about this inode for now - drop all
241	 *  the nodelists associated with it, etc.
242	 */
243	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
244	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
245
246	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
247		  __func__, inode->i_ino, inode->i_mode);
248	truncate_inode_pages_final(&inode->i_data);
249	clear_inode(inode);
250	jffs2_do_clear_inode(c, f);
251}
252
253struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
254{
255	struct jffs2_inode_info *f;
256	struct jffs2_sb_info *c;
257	struct jffs2_raw_inode latest_node;
258	union jffs2_device_node jdev;
259	struct inode *inode;
260	dev_t rdev = 0;
261	int ret;
262
263	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
264
265	inode = iget_locked(sb, ino);
266	if (!inode)
267		return ERR_PTR(-ENOMEM);
268	if (!(inode->i_state & I_NEW))
269		return inode;
270
271	f = JFFS2_INODE_INFO(inode);
272	c = JFFS2_SB_INFO(inode->i_sb);
273
274	jffs2_init_inode_info(f);
275	mutex_lock(&f->sem);
276
277	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
278	if (ret)
279		goto error;
280
 
 
 
 
 
281	inode->i_mode = jemode_to_cpu(latest_node.mode);
282	i_uid_write(inode, je16_to_cpu(latest_node.uid));
283	i_gid_write(inode, je16_to_cpu(latest_node.gid));
284	inode->i_size = je32_to_cpu(latest_node.isize);
285	inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(latest_node.atime)));
286	inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(latest_node.mtime)));
287	inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
288
289	set_nlink(inode, f->inocache->pino_nlink);
290
291	inode->i_blocks = (inode->i_size + 511) >> 9;
292
293	switch (inode->i_mode & S_IFMT) {
294
295	case S_IFLNK:
296		inode->i_op = &jffs2_symlink_inode_operations;
297		inode->i_link = f->target;
298		break;
299
300	case S_IFDIR:
301	{
302		struct jffs2_full_dirent *fd;
303		set_nlink(inode, 2); /* parent and '.' */
304
305		for (fd=f->dents; fd; fd = fd->next) {
306			if (fd->type == DT_DIR && fd->ino)
307				inc_nlink(inode);
308		}
309		/* Root dir gets i_nlink 3 for some reason */
310		if (inode->i_ino == 1)
311			inc_nlink(inode);
312
313		inode->i_op = &jffs2_dir_inode_operations;
314		inode->i_fop = &jffs2_dir_operations;
315		break;
316	}
317	case S_IFREG:
318		inode->i_op = &jffs2_file_inode_operations;
319		inode->i_fop = &jffs2_file_operations;
320		inode->i_mapping->a_ops = &jffs2_file_address_operations;
321		inode->i_mapping->nrpages = 0;
322		break;
323
324	case S_IFBLK:
325	case S_IFCHR:
326		/* Read the device numbers from the media */
327		if (f->metadata->size != sizeof(jdev.old_id) &&
328		    f->metadata->size != sizeof(jdev.new_id)) {
329			pr_notice("Device node has strange size %d\n",
330				  f->metadata->size);
331			goto error_io;
332		}
333		jffs2_dbg(1, "Reading device numbers from flash\n");
334		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
335		if (ret < 0) {
336			/* Eep */
337			pr_notice("Read device numbers for inode %lu failed\n",
338				  (unsigned long)inode->i_ino);
339			goto error;
340		}
341		if (f->metadata->size == sizeof(jdev.old_id))
342			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
343		else
344			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
345		fallthrough;
346
347	case S_IFSOCK:
348	case S_IFIFO:
349		inode->i_op = &jffs2_file_inode_operations;
350		init_special_inode(inode, inode->i_mode, rdev);
351		break;
352
353	default:
354		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
355			__func__, inode->i_mode, (unsigned long)inode->i_ino);
356	}
357
358	mutex_unlock(&f->sem);
359
360	jffs2_dbg(1, "jffs2_read_inode() returning\n");
361	unlock_new_inode(inode);
362	return inode;
363
364error_io:
365	ret = -EIO;
366error:
367	mutex_unlock(&f->sem);
 
368	iget_failed(inode);
369	return ERR_PTR(ret);
370}
371
372void jffs2_dirty_inode(struct inode *inode, int flags)
373{
374	struct iattr iattr;
375
376	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
377		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
378			  __func__, inode->i_ino);
379		return;
380	}
381
382	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
383		  __func__, inode->i_ino);
384
385	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
386	iattr.ia_mode = inode->i_mode;
387	iattr.ia_uid = inode->i_uid;
388	iattr.ia_gid = inode->i_gid;
389	iattr.ia_atime = inode_get_atime(inode);
390	iattr.ia_mtime = inode_get_mtime(inode);
391	iattr.ia_ctime = inode_get_ctime(inode);
392
393	jffs2_do_setattr(inode, &iattr);
394}
395
396int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
397{
398	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
399
400	if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
401		return -EROFS;
402
403	/* We stop if it was running, then restart if it needs to.
404	   This also catches the case where it was stopped and this
405	   is just a remount to restart it.
406	   Flush the writebuffer, if necessary, else we loose it */
407	if (!sb_rdonly(sb)) {
408		jffs2_stop_garbage_collect_thread(c);
409		mutex_lock(&c->alloc_sem);
410		jffs2_flush_wbuf_pad(c);
411		mutex_unlock(&c->alloc_sem);
412	}
413
414	if (!(fc->sb_flags & SB_RDONLY))
415		jffs2_start_garbage_collect_thread(c);
416
417	fc->sb_flags |= SB_NOATIME;
418	return 0;
419}
420
421/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
422   fill in the raw_inode while you're at it. */
423struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
424{
425	struct inode *inode;
426	struct super_block *sb = dir_i->i_sb;
427	struct jffs2_sb_info *c;
428	struct jffs2_inode_info *f;
429	int ret;
430
431	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
432		  __func__, dir_i->i_ino, mode);
433
434	c = JFFS2_SB_INFO(sb);
435
436	inode = new_inode(sb);
437
438	if (!inode)
439		return ERR_PTR(-ENOMEM);
440
441	f = JFFS2_INODE_INFO(inode);
442	jffs2_init_inode_info(f);
443	mutex_lock(&f->sem);
444
445	memset(ri, 0, sizeof(*ri));
446	/* Set OS-specific defaults for new inodes */
447	ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
448
449	if (dir_i->i_mode & S_ISGID) {
450		ri->gid = cpu_to_je16(i_gid_read(dir_i));
451		if (S_ISDIR(mode))
452			mode |= S_ISGID;
453	} else {
454		ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
455	}
456
457	/* POSIX ACLs have to be processed now, at least partly.
458	   The umask is only applied if there's no default ACL */
459	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
460	if (ret) {
461		mutex_unlock(&f->sem);
462		make_bad_inode(inode);
463		iput(inode);
464		return ERR_PTR(ret);
465	}
466	ret = jffs2_do_new_inode (c, f, mode, ri);
467	if (ret) {
468		mutex_unlock(&f->sem);
469		make_bad_inode(inode);
470		iput(inode);
471		return ERR_PTR(ret);
472	}
473	set_nlink(inode, 1);
474	inode->i_ino = je32_to_cpu(ri->ino);
475	inode->i_mode = jemode_to_cpu(ri->mode);
476	i_gid_write(inode, je16_to_cpu(ri->gid));
477	i_uid_write(inode, je16_to_cpu(ri->uid));
478	simple_inode_init_ts(inode);
479	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode_get_mtime(inode)));
480
481	inode->i_blocks = 0;
482	inode->i_size = 0;
483
484	if (insert_inode_locked(inode) < 0) {
485		mutex_unlock(&f->sem);
486		make_bad_inode(inode);
 
487		iput(inode);
488		return ERR_PTR(-EINVAL);
489	}
490
491	return inode;
492}
493
494static int calculate_inocache_hashsize(uint32_t flash_size)
495{
496	/*
497	 * Pick a inocache hash size based on the size of the medium.
498	 * Count how many megabytes we're dealing with, apply a hashsize twice
499	 * that size, but rounding down to the usual big powers of 2. And keep
500	 * to sensible bounds.
501	 */
502
503	int size_mb = flash_size / 1024 / 1024;
504	int hashsize = (size_mb * 2) & ~0x3f;
505
506	if (hashsize < INOCACHE_HASHSIZE_MIN)
507		return INOCACHE_HASHSIZE_MIN;
508	if (hashsize > INOCACHE_HASHSIZE_MAX)
509		return INOCACHE_HASHSIZE_MAX;
510
511	return hashsize;
512}
513
514int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
515{
516	struct jffs2_sb_info *c;
517	struct inode *root_i;
518	int ret;
519	size_t blocks;
520
521	c = JFFS2_SB_INFO(sb);
522
523	/* Do not support the MLC nand */
524	if (c->mtd->type == MTD_MLCNANDFLASH)
525		return -EINVAL;
526
527#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
528	if (c->mtd->type == MTD_NANDFLASH) {
529		errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in");
530		return -EINVAL;
531	}
532	if (c->mtd->type == MTD_DATAFLASH) {
533		errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in");
534		return -EINVAL;
535	}
536#endif
537
538	c->flash_size = c->mtd->size;
539	c->sector_size = c->mtd->erasesize;
540	blocks = c->flash_size / c->sector_size;
541
542	/*
543	 * Size alignment check
544	 */
545	if ((c->sector_size * blocks) != c->flash_size) {
546		c->flash_size = c->sector_size * blocks;
547		infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB",
548		      c->flash_size / 1024);
549	}
550
551	if (c->flash_size < 5*c->sector_size) {
552		errorf(fc, "Too few erase blocks (%d)",
553		       c->flash_size / c->sector_size);
554		return -EINVAL;
555	}
556
557	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
558
559	/* NAND (or other bizarre) flash... do setup accordingly */
560	ret = jffs2_flash_setup(c);
561	if (ret)
562		return ret;
563
564	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
565	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
566	if (!c->inocache_list) {
567		ret = -ENOMEM;
568		goto out_wbuf;
569	}
570
571	jffs2_init_xattr_subsystem(c);
572
573	if ((ret = jffs2_do_mount_fs(c)))
574		goto out_inohash;
575
576	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
577	root_i = jffs2_iget(sb, 1);
578	if (IS_ERR(root_i)) {
579		jffs2_dbg(1, "get root inode failed\n");
580		ret = PTR_ERR(root_i);
581		goto out_root;
582	}
583
584	ret = -ENOMEM;
585
586	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
587	sb->s_root = d_make_root(root_i);
588	if (!sb->s_root)
589		goto out_root;
590
591	sb->s_maxbytes = 0xFFFFFFFF;
592	sb->s_blocksize = PAGE_SIZE;
593	sb->s_blocksize_bits = PAGE_SHIFT;
594	sb->s_magic = JFFS2_SUPER_MAGIC;
595	sb->s_time_min = 0;
596	sb->s_time_max = U32_MAX;
597
598	if (!sb_rdonly(sb))
599		jffs2_start_garbage_collect_thread(c);
600	return 0;
601
 
 
602out_root:
603	jffs2_free_ino_caches(c);
604	jffs2_free_raw_node_refs(c);
605	kvfree(c->blocks);
606	jffs2_clear_xattr_subsystem(c);
607	jffs2_sum_exit(c);
 
608 out_inohash:
 
609	kfree(c->inocache_list);
610 out_wbuf:
611	jffs2_flash_cleanup(c);
612
613	return ret;
614}
615
616void jffs2_gc_release_inode(struct jffs2_sb_info *c,
617				   struct jffs2_inode_info *f)
618{
619	iput(OFNI_EDONI_2SFFJ(f));
620}
621
622struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
623					      int inum, int unlinked)
624{
625	struct inode *inode;
626	struct jffs2_inode_cache *ic;
627
628	if (unlinked) {
629		/* The inode has zero nlink but its nodes weren't yet marked
630		   obsolete. This has to be because we're still waiting for
631		   the final (close() and) iput() to happen.
632
633		   There's a possibility that the final iput() could have
634		   happened while we were contemplating. In order to ensure
635		   that we don't cause a new read_inode() (which would fail)
636		   for the inode in question, we use ilookup() in this case
637		   instead of iget().
638
639		   The nlink can't _become_ zero at this point because we're
640		   holding the alloc_sem, and jffs2_do_unlink() would also
641		   need that while decrementing nlink on any inode.
642		*/
643		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
644		if (!inode) {
645			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
646				  inum);
647
648			spin_lock(&c->inocache_lock);
649			ic = jffs2_get_ino_cache(c, inum);
650			if (!ic) {
651				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
652					  inum);
653				spin_unlock(&c->inocache_lock);
654				return NULL;
655			}
656			if (ic->state != INO_STATE_CHECKEDABSENT) {
657				/* Wait for progress. Don't just loop */
658				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
659					  ic->ino, ic->state);
660				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
661			} else {
662				spin_unlock(&c->inocache_lock);
663			}
664
665			return NULL;
666		}
667	} else {
668		/* Inode has links to it still; they're not going away because
669		   jffs2_do_unlink() would need the alloc_sem and we have it.
670		   Just iget() it, and if read_inode() is necessary that's OK.
671		*/
672		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
673		if (IS_ERR(inode))
674			return ERR_CAST(inode);
675	}
676	if (is_bad_inode(inode)) {
677		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
678			  inum, unlinked);
679		/* NB. This will happen again. We need to do something appropriate here. */
680		iput(inode);
681		return ERR_PTR(-EIO);
682	}
683
684	return JFFS2_INODE_INFO(inode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
685}
686
687static int jffs2_flash_setup(struct jffs2_sb_info *c) {
688	int ret = 0;
689
690	if (jffs2_cleanmarker_oob(c)) {
691		/* NAND flash... do setup accordingly */
692		ret = jffs2_nand_flash_setup(c);
693		if (ret)
694			return ret;
695	}
696
697	/* and Dataflash */
698	if (jffs2_dataflash(c)) {
699		ret = jffs2_dataflash_setup(c);
700		if (ret)
701			return ret;
702	}
703
704	/* and Intel "Sibley" flash */
705	if (jffs2_nor_wbuf_flash(c)) {
706		ret = jffs2_nor_wbuf_flash_setup(c);
707		if (ret)
708			return ret;
709	}
710
711	/* and an UBI volume */
712	if (jffs2_ubivol(c)) {
713		ret = jffs2_ubivol_setup(c);
714		if (ret)
715			return ret;
716	}
717
718	return ret;
719}
720
721void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
722
723	if (jffs2_cleanmarker_oob(c)) {
724		jffs2_nand_flash_cleanup(c);
725	}
726
727	/* and DataFlash */
728	if (jffs2_dataflash(c)) {
729		jffs2_dataflash_cleanup(c);
730	}
731
732	/* and Intel "Sibley" flash */
733	if (jffs2_nor_wbuf_flash(c)) {
734		jffs2_nor_wbuf_flash_cleanup(c);
735	}
736
737	/* and an UBI volume */
738	if (jffs2_ubivol(c)) {
739		jffs2_ubivol_cleanup(c);
740	}
741}
v3.1
  1/*
  2 * JFFS2 -- Journalling Flash File System, Version 2.
  3 *
  4 * Copyright © 2001-2007 Red Hat, Inc.
  5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
  6 *
  7 * Created by David Woodhouse <dwmw2@infradead.org>
  8 *
  9 * For licensing information, see the file 'LICENCE' in this directory.
 10 *
 11 */
 12
 
 
 13#include <linux/capability.h>
 14#include <linux/kernel.h>
 15#include <linux/sched.h>
 
 16#include <linux/fs.h>
 
 17#include <linux/list.h>
 18#include <linux/mtd/mtd.h>
 19#include <linux/pagemap.h>
 20#include <linux/slab.h>
 21#include <linux/vmalloc.h>
 22#include <linux/vfs.h>
 23#include <linux/crc32.h>
 24#include "nodelist.h"
 25
 26static int jffs2_flash_setup(struct jffs2_sb_info *c);
 27
 28int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
 29{
 30	struct jffs2_full_dnode *old_metadata, *new_metadata;
 31	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
 32	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
 33	struct jffs2_raw_inode *ri;
 34	union jffs2_device_node dev;
 35	unsigned char *mdata = NULL;
 36	int mdatalen = 0;
 37	unsigned int ivalid;
 38	uint32_t alloclen;
 39	int ret;
 40	int alloc_type = ALLOC_NORMAL;
 41
 42	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
 43
 44	/* Special cases - we don't want more than one data node
 45	   for these types on the medium at any time. So setattr
 46	   must read the original data associated with the node
 47	   (i.e. the device numbers or the target name) and write
 48	   it out again with the appropriate data attached */
 49	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
 50		/* For these, we don't actually need to read the old node */
 51		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
 52		mdata = (char *)&dev;
 53		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
 
 54	} else if (S_ISLNK(inode->i_mode)) {
 55		mutex_lock(&f->sem);
 56		mdatalen = f->metadata->size;
 57		mdata = kmalloc(f->metadata->size, GFP_USER);
 58		if (!mdata) {
 59			mutex_unlock(&f->sem);
 60			return -ENOMEM;
 61		}
 62		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
 63		if (ret) {
 64			mutex_unlock(&f->sem);
 65			kfree(mdata);
 66			return ret;
 67		}
 68		mutex_unlock(&f->sem);
 69		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
 
 70	}
 71
 72	ri = jffs2_alloc_raw_inode();
 73	if (!ri) {
 74		if (S_ISLNK(inode->i_mode))
 75			kfree(mdata);
 76		return -ENOMEM;
 77	}
 78
 79	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
 80				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
 81	if (ret) {
 82		jffs2_free_raw_inode(ri);
 83		if (S_ISLNK(inode->i_mode))
 84			 kfree(mdata);
 85		return ret;
 86	}
 87	mutex_lock(&f->sem);
 88	ivalid = iattr->ia_valid;
 89
 90	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
 91	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
 92	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
 93	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
 94
 95	ri->ino = cpu_to_je32(inode->i_ino);
 96	ri->version = cpu_to_je32(++f->highest_version);
 97
 98	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
 99	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
 
 
100
101	if (ivalid & ATTR_MODE)
102		ri->mode = cpu_to_jemode(iattr->ia_mode);
103	else
104		ri->mode = cpu_to_jemode(inode->i_mode);
105
106
107	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
108	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
109	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
110	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
111
112	ri->offset = cpu_to_je32(0);
113	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
114	ri->compr = JFFS2_COMPR_NONE;
115	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
116		/* It's an extension. Make it a hole node */
117		ri->compr = JFFS2_COMPR_ZERO;
118		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
119		ri->offset = cpu_to_je32(inode->i_size);
120	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
121		/* For truncate-to-zero, treat it as deletion because
122		   it'll always be obsoleting all previous nodes */
123		alloc_type = ALLOC_DELETION;
124	}
125	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
126	if (mdatalen)
127		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
128	else
129		ri->data_crc = cpu_to_je32(0);
130
131	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
132	if (S_ISLNK(inode->i_mode))
133		kfree(mdata);
134
135	if (IS_ERR(new_metadata)) {
136		jffs2_complete_reservation(c);
137		jffs2_free_raw_inode(ri);
138		mutex_unlock(&f->sem);
139		return PTR_ERR(new_metadata);
140	}
141	/* It worked. Update the inode */
142	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
143	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
144	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
145	inode->i_mode = jemode_to_cpu(ri->mode);
146	inode->i_uid = je16_to_cpu(ri->uid);
147	inode->i_gid = je16_to_cpu(ri->gid);
148
149
150	old_metadata = f->metadata;
151
152	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
153		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
154
155	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
156		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
157		inode->i_size = iattr->ia_size;
158		inode->i_blocks = (inode->i_size + 511) >> 9;
159		f->metadata = NULL;
160	} else {
161		f->metadata = new_metadata;
162	}
163	if (old_metadata) {
164		jffs2_mark_node_obsolete(c, old_metadata->raw);
165		jffs2_free_full_dnode(old_metadata);
166	}
167	jffs2_free_raw_inode(ri);
168
169	mutex_unlock(&f->sem);
170	jffs2_complete_reservation(c);
171
172	/* We have to do the truncate_setsize() without f->sem held, since
173	   some pages may be locked and waiting for it in readpage().
174	   We are protected from a simultaneous write() extending i_size
175	   back past iattr->ia_size, because do_truncate() holds the
176	   generic inode semaphore. */
177	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
178		truncate_setsize(inode, iattr->ia_size);
179		inode->i_blocks = (inode->i_size + 511) >> 9;
180	}	
181
182	return 0;
183}
184
185int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
 
186{
 
187	int rc;
188
189	rc = inode_change_ok(dentry->d_inode, iattr);
190	if (rc)
191		return rc;
192
193	rc = jffs2_do_setattr(dentry->d_inode, iattr);
194	if (!rc && (iattr->ia_valid & ATTR_MODE))
195		rc = jffs2_acl_chmod(dentry->d_inode);
196
197	return rc;
198}
199
200int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
201{
202	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
203	unsigned long avail;
204
205	buf->f_type = JFFS2_SUPER_MAGIC;
206	buf->f_bsize = 1 << PAGE_SHIFT;
207	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
208	buf->f_files = 0;
209	buf->f_ffree = 0;
210	buf->f_namelen = JFFS2_MAX_NAME_LEN;
211	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
212	buf->f_fsid.val[1] = c->mtd->index;
213
214	spin_lock(&c->erase_completion_lock);
215	avail = c->dirty_size + c->free_size;
216	if (avail > c->sector_size * c->resv_blocks_write)
217		avail -= c->sector_size * c->resv_blocks_write;
218	else
219		avail = 0;
220	spin_unlock(&c->erase_completion_lock);
221
222	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
223
224	return 0;
225}
226
227
228void jffs2_evict_inode (struct inode *inode)
229{
230	/* We can forget about this inode for now - drop all
231	 *  the nodelists associated with it, etc.
232	 */
233	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
234	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
235
236	D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
237	truncate_inode_pages(&inode->i_data, 0);
238	end_writeback(inode);
 
239	jffs2_do_clear_inode(c, f);
240}
241
242struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
243{
244	struct jffs2_inode_info *f;
245	struct jffs2_sb_info *c;
246	struct jffs2_raw_inode latest_node;
247	union jffs2_device_node jdev;
248	struct inode *inode;
249	dev_t rdev = 0;
250	int ret;
251
252	D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
253
254	inode = iget_locked(sb, ino);
255	if (!inode)
256		return ERR_PTR(-ENOMEM);
257	if (!(inode->i_state & I_NEW))
258		return inode;
259
260	f = JFFS2_INODE_INFO(inode);
261	c = JFFS2_SB_INFO(inode->i_sb);
262
263	jffs2_init_inode_info(f);
264	mutex_lock(&f->sem);
265
266	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
 
 
267
268	if (ret) {
269		mutex_unlock(&f->sem);
270		iget_failed(inode);
271		return ERR_PTR(ret);
272	}
273	inode->i_mode = jemode_to_cpu(latest_node.mode);
274	inode->i_uid = je16_to_cpu(latest_node.uid);
275	inode->i_gid = je16_to_cpu(latest_node.gid);
276	inode->i_size = je32_to_cpu(latest_node.isize);
277	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
278	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
279	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
280
281	inode->i_nlink = f->inocache->pino_nlink;
282
283	inode->i_blocks = (inode->i_size + 511) >> 9;
284
285	switch (inode->i_mode & S_IFMT) {
286
287	case S_IFLNK:
288		inode->i_op = &jffs2_symlink_inode_operations;
 
289		break;
290
291	case S_IFDIR:
292	{
293		struct jffs2_full_dirent *fd;
294		inode->i_nlink = 2; /* parent and '.' */
295
296		for (fd=f->dents; fd; fd = fd->next) {
297			if (fd->type == DT_DIR && fd->ino)
298				inc_nlink(inode);
299		}
300		/* Root dir gets i_nlink 3 for some reason */
301		if (inode->i_ino == 1)
302			inc_nlink(inode);
303
304		inode->i_op = &jffs2_dir_inode_operations;
305		inode->i_fop = &jffs2_dir_operations;
306		break;
307	}
308	case S_IFREG:
309		inode->i_op = &jffs2_file_inode_operations;
310		inode->i_fop = &jffs2_file_operations;
311		inode->i_mapping->a_ops = &jffs2_file_address_operations;
312		inode->i_mapping->nrpages = 0;
313		break;
314
315	case S_IFBLK:
316	case S_IFCHR:
317		/* Read the device numbers from the media */
318		if (f->metadata->size != sizeof(jdev.old_id) &&
319		    f->metadata->size != sizeof(jdev.new_id)) {
320			printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
 
321			goto error_io;
322		}
323		D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
324		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
325		if (ret < 0) {
326			/* Eep */
327			printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
 
328			goto error;
329		}
330		if (f->metadata->size == sizeof(jdev.old_id))
331			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
332		else
333			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
 
334
335	case S_IFSOCK:
336	case S_IFIFO:
337		inode->i_op = &jffs2_file_inode_operations;
338		init_special_inode(inode, inode->i_mode, rdev);
339		break;
340
341	default:
342		printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
 
343	}
344
345	mutex_unlock(&f->sem);
346
347	D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
348	unlock_new_inode(inode);
349	return inode;
350
351error_io:
352	ret = -EIO;
353error:
354	mutex_unlock(&f->sem);
355	jffs2_do_clear_inode(c, f);
356	iget_failed(inode);
357	return ERR_PTR(ret);
358}
359
360void jffs2_dirty_inode(struct inode *inode, int flags)
361{
362	struct iattr iattr;
363
364	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
365		D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
 
366		return;
367	}
368
369	D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
 
370
371	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
372	iattr.ia_mode = inode->i_mode;
373	iattr.ia_uid = inode->i_uid;
374	iattr.ia_gid = inode->i_gid;
375	iattr.ia_atime = inode->i_atime;
376	iattr.ia_mtime = inode->i_mtime;
377	iattr.ia_ctime = inode->i_ctime;
378
379	jffs2_do_setattr(inode, &iattr);
380}
381
382int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
383{
384	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
385
386	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
387		return -EROFS;
388
389	/* We stop if it was running, then restart if it needs to.
390	   This also catches the case where it was stopped and this
391	   is just a remount to restart it.
392	   Flush the writebuffer, if neccecary, else we loose it */
393	if (!(sb->s_flags & MS_RDONLY)) {
394		jffs2_stop_garbage_collect_thread(c);
395		mutex_lock(&c->alloc_sem);
396		jffs2_flush_wbuf_pad(c);
397		mutex_unlock(&c->alloc_sem);
398	}
399
400	if (!(*flags & MS_RDONLY))
401		jffs2_start_garbage_collect_thread(c);
402
403	*flags |= MS_NOATIME;
404	return 0;
405}
406
407/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
408   fill in the raw_inode while you're at it. */
409struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
410{
411	struct inode *inode;
412	struct super_block *sb = dir_i->i_sb;
413	struct jffs2_sb_info *c;
414	struct jffs2_inode_info *f;
415	int ret;
416
417	D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
 
418
419	c = JFFS2_SB_INFO(sb);
420
421	inode = new_inode(sb);
422
423	if (!inode)
424		return ERR_PTR(-ENOMEM);
425
426	f = JFFS2_INODE_INFO(inode);
427	jffs2_init_inode_info(f);
428	mutex_lock(&f->sem);
429
430	memset(ri, 0, sizeof(*ri));
431	/* Set OS-specific defaults for new inodes */
432	ri->uid = cpu_to_je16(current_fsuid());
433
434	if (dir_i->i_mode & S_ISGID) {
435		ri->gid = cpu_to_je16(dir_i->i_gid);
436		if (S_ISDIR(mode))
437			mode |= S_ISGID;
438	} else {
439		ri->gid = cpu_to_je16(current_fsgid());
440	}
441
442	/* POSIX ACLs have to be processed now, at least partly.
443	   The umask is only applied if there's no default ACL */
444	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
445	if (ret) {
446	    make_bad_inode(inode);
447	    iput(inode);
448	    return ERR_PTR(ret);
 
449	}
450	ret = jffs2_do_new_inode (c, f, mode, ri);
451	if (ret) {
 
452		make_bad_inode(inode);
453		iput(inode);
454		return ERR_PTR(ret);
455	}
456	inode->i_nlink = 1;
457	inode->i_ino = je32_to_cpu(ri->ino);
458	inode->i_mode = jemode_to_cpu(ri->mode);
459	inode->i_gid = je16_to_cpu(ri->gid);
460	inode->i_uid = je16_to_cpu(ri->uid);
461	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
462	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
463
464	inode->i_blocks = 0;
465	inode->i_size = 0;
466
467	if (insert_inode_locked(inode) < 0) {
 
468		make_bad_inode(inode);
469		unlock_new_inode(inode);
470		iput(inode);
471		return ERR_PTR(-EINVAL);
472	}
473
474	return inode;
475}
476
477static int calculate_inocache_hashsize(uint32_t flash_size)
478{
479	/*
480	 * Pick a inocache hash size based on the size of the medium.
481	 * Count how many megabytes we're dealing with, apply a hashsize twice
482	 * that size, but rounding down to the usual big powers of 2. And keep
483	 * to sensible bounds.
484	 */
485
486	int size_mb = flash_size / 1024 / 1024;
487	int hashsize = (size_mb * 2) & ~0x3f;
488
489	if (hashsize < INOCACHE_HASHSIZE_MIN)
490		return INOCACHE_HASHSIZE_MIN;
491	if (hashsize > INOCACHE_HASHSIZE_MAX)
492		return INOCACHE_HASHSIZE_MAX;
493
494	return hashsize;
495}
496
497int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
498{
499	struct jffs2_sb_info *c;
500	struct inode *root_i;
501	int ret;
502	size_t blocks;
503
504	c = JFFS2_SB_INFO(sb);
505
 
 
 
 
506#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
507	if (c->mtd->type == MTD_NANDFLASH) {
508		printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
509		return -EINVAL;
510	}
511	if (c->mtd->type == MTD_DATAFLASH) {
512		printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
513		return -EINVAL;
514	}
515#endif
516
517	c->flash_size = c->mtd->size;
518	c->sector_size = c->mtd->erasesize;
519	blocks = c->flash_size / c->sector_size;
520
521	/*
522	 * Size alignment check
523	 */
524	if ((c->sector_size * blocks) != c->flash_size) {
525		c->flash_size = c->sector_size * blocks;
526		printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
527			c->flash_size / 1024);
528	}
529
530	if (c->flash_size < 5*c->sector_size) {
531		printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
 
532		return -EINVAL;
533	}
534
535	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
536
537	/* NAND (or other bizarre) flash... do setup accordingly */
538	ret = jffs2_flash_setup(c);
539	if (ret)
540		return ret;
541
542	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
543	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
544	if (!c->inocache_list) {
545		ret = -ENOMEM;
546		goto out_wbuf;
547	}
548
549	jffs2_init_xattr_subsystem(c);
550
551	if ((ret = jffs2_do_mount_fs(c)))
552		goto out_inohash;
553
554	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
555	root_i = jffs2_iget(sb, 1);
556	if (IS_ERR(root_i)) {
557		D1(printk(KERN_WARNING "get root inode failed\n"));
558		ret = PTR_ERR(root_i);
559		goto out_root;
560	}
561
562	ret = -ENOMEM;
563
564	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
565	sb->s_root = d_alloc_root(root_i);
566	if (!sb->s_root)
567		goto out_root_i;
568
569	sb->s_maxbytes = 0xFFFFFFFF;
570	sb->s_blocksize = PAGE_CACHE_SIZE;
571	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
572	sb->s_magic = JFFS2_SUPER_MAGIC;
573	if (!(sb->s_flags & MS_RDONLY))
 
 
 
574		jffs2_start_garbage_collect_thread(c);
575	return 0;
576
577 out_root_i:
578	iput(root_i);
579out_root:
580	jffs2_free_ino_caches(c);
581	jffs2_free_raw_node_refs(c);
582	if (jffs2_blocks_use_vmalloc(c))
583		vfree(c->blocks);
584	else
585		kfree(c->blocks);
586 out_inohash:
587	jffs2_clear_xattr_subsystem(c);
588	kfree(c->inocache_list);
589 out_wbuf:
590	jffs2_flash_cleanup(c);
591
592	return ret;
593}
594
595void jffs2_gc_release_inode(struct jffs2_sb_info *c,
596				   struct jffs2_inode_info *f)
597{
598	iput(OFNI_EDONI_2SFFJ(f));
599}
600
601struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
602					      int inum, int unlinked)
603{
604	struct inode *inode;
605	struct jffs2_inode_cache *ic;
606
607	if (unlinked) {
608		/* The inode has zero nlink but its nodes weren't yet marked
609		   obsolete. This has to be because we're still waiting for
610		   the final (close() and) iput() to happen.
611
612		   There's a possibility that the final iput() could have
613		   happened while we were contemplating. In order to ensure
614		   that we don't cause a new read_inode() (which would fail)
615		   for the inode in question, we use ilookup() in this case
616		   instead of iget().
617
618		   The nlink can't _become_ zero at this point because we're
619		   holding the alloc_sem, and jffs2_do_unlink() would also
620		   need that while decrementing nlink on any inode.
621		*/
622		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
623		if (!inode) {
624			D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
625				  inum));
626
627			spin_lock(&c->inocache_lock);
628			ic = jffs2_get_ino_cache(c, inum);
629			if (!ic) {
630				D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
 
631				spin_unlock(&c->inocache_lock);
632				return NULL;
633			}
634			if (ic->state != INO_STATE_CHECKEDABSENT) {
635				/* Wait for progress. Don't just loop */
636				D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
637					  ic->ino, ic->state));
638				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
639			} else {
640				spin_unlock(&c->inocache_lock);
641			}
642
643			return NULL;
644		}
645	} else {
646		/* Inode has links to it still; they're not going away because
647		   jffs2_do_unlink() would need the alloc_sem and we have it.
648		   Just iget() it, and if read_inode() is necessary that's OK.
649		*/
650		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
651		if (IS_ERR(inode))
652			return ERR_CAST(inode);
653	}
654	if (is_bad_inode(inode)) {
655		printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n",
656		       inum, unlinked);
657		/* NB. This will happen again. We need to do something appropriate here. */
658		iput(inode);
659		return ERR_PTR(-EIO);
660	}
661
662	return JFFS2_INODE_INFO(inode);
663}
664
665unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
666				   struct jffs2_inode_info *f,
667				   unsigned long offset,
668				   unsigned long *priv)
669{
670	struct inode *inode = OFNI_EDONI_2SFFJ(f);
671	struct page *pg;
672
673	pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
674			     (void *)jffs2_do_readpage_unlock, inode);
675	if (IS_ERR(pg))
676		return (void *)pg;
677
678	*priv = (unsigned long)pg;
679	return kmap(pg);
680}
681
682void jffs2_gc_release_page(struct jffs2_sb_info *c,
683			   unsigned char *ptr,
684			   unsigned long *priv)
685{
686	struct page *pg = (void *)*priv;
687
688	kunmap(pg);
689	page_cache_release(pg);
690}
691
692static int jffs2_flash_setup(struct jffs2_sb_info *c) {
693	int ret = 0;
694
695	if (jffs2_cleanmarker_oob(c)) {
696		/* NAND flash... do setup accordingly */
697		ret = jffs2_nand_flash_setup(c);
698		if (ret)
699			return ret;
700	}
701
702	/* and Dataflash */
703	if (jffs2_dataflash(c)) {
704		ret = jffs2_dataflash_setup(c);
705		if (ret)
706			return ret;
707	}
708
709	/* and Intel "Sibley" flash */
710	if (jffs2_nor_wbuf_flash(c)) {
711		ret = jffs2_nor_wbuf_flash_setup(c);
712		if (ret)
713			return ret;
714	}
715
716	/* and an UBI volume */
717	if (jffs2_ubivol(c)) {
718		ret = jffs2_ubivol_setup(c);
719		if (ret)
720			return ret;
721	}
722
723	return ret;
724}
725
726void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
727
728	if (jffs2_cleanmarker_oob(c)) {
729		jffs2_nand_flash_cleanup(c);
730	}
731
732	/* and DataFlash */
733	if (jffs2_dataflash(c)) {
734		jffs2_dataflash_cleanup(c);
735	}
736
737	/* and Intel "Sibley" flash */
738	if (jffs2_nor_wbuf_flash(c)) {
739		jffs2_nor_wbuf_flash_cleanup(c);
740	}
741
742	/* and an UBI volume */
743	if (jffs2_ubivol(c)) {
744		jffs2_ubivol_cleanup(c);
745	}
746}