Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
  8
  9#include <trace/events/erofs.h>
 10
 11static void *erofs_read_inode(struct erofs_buf *buf,
 12			      struct inode *inode, unsigned int *ofs)
 
 
 
 
 
 13{
 14	struct super_block *sb = inode->i_sb;
 15	struct erofs_sb_info *sbi = EROFS_SB(sb);
 16	struct erofs_inode *vi = EROFS_I(inode);
 17	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
 18
 19	erofs_blk_t blkaddr, nblks = 0;
 20	void *kaddr;
 21	struct erofs_inode_compact *dic;
 22	struct erofs_inode_extended *die, *copied = NULL;
 23	unsigned int ifmt;
 24	int err;
 25
 26	blkaddr = erofs_blknr(inode_loc);
 27	*ofs = erofs_blkoff(inode_loc);
 28
 29	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
 30		  __func__, vi->nid, *ofs, blkaddr);
 31
 32	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
 33	if (IS_ERR(kaddr)) {
 34		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 35			  vi->nid, PTR_ERR(kaddr));
 36		return kaddr;
 37	}
 38
 39	dic = kaddr + *ofs;
 40	ifmt = le16_to_cpu(dic->i_format);
 41
 42	if (ifmt & ~EROFS_I_ALL) {
 43		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
 44			  ifmt, vi->nid);
 45		err = -EOPNOTSUPP;
 46		goto err_out;
 47	}
 48
 49	vi->datalayout = erofs_inode_datalayout(ifmt);
 50	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 51		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
 52			  vi->datalayout, vi->nid);
 53		err = -EOPNOTSUPP;
 54		goto err_out;
 55	}
 56
 57	switch (erofs_inode_version(ifmt)) {
 58	case EROFS_INODE_LAYOUT_EXTENDED:
 59		vi->inode_isize = sizeof(struct erofs_inode_extended);
 60		/* check if the extended inode acrosses block boundary */
 61		if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
 62			*ofs += vi->inode_isize;
 63			die = (struct erofs_inode_extended *)dic;
 64		} else {
 65			const unsigned int gotten = EROFS_BLKSIZ - *ofs;
 66
 67			copied = kmalloc(vi->inode_isize, GFP_NOFS);
 68			if (!copied) {
 69				err = -ENOMEM;
 70				goto err_out;
 71			}
 72			memcpy(copied, dic, gotten);
 73			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
 74						   EROFS_KMAP);
 75			if (IS_ERR(kaddr)) {
 76				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 77					  vi->nid, PTR_ERR(kaddr));
 
 
 78				kfree(copied);
 79				return kaddr;
 80			}
 81			*ofs = vi->inode_isize - gotten;
 82			memcpy((u8 *)copied + gotten, kaddr, *ofs);
 83			die = copied;
 84		}
 85		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 86
 87		inode->i_mode = le16_to_cpu(die->i_mode);
 88		switch (inode->i_mode & S_IFMT) {
 89		case S_IFREG:
 90		case S_IFDIR:
 91		case S_IFLNK:
 92			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
 93			break;
 94		case S_IFCHR:
 95		case S_IFBLK:
 96			inode->i_rdev =
 97				new_decode_dev(le32_to_cpu(die->i_u.rdev));
 98			break;
 99		case S_IFIFO:
100		case S_IFSOCK:
101			inode->i_rdev = 0;
102			break;
103		default:
104			goto bogusimode;
105		}
106		i_uid_write(inode, le32_to_cpu(die->i_uid));
107		i_gid_write(inode, le32_to_cpu(die->i_gid));
108		set_nlink(inode, le32_to_cpu(die->i_nlink));
109
110		/* extended inode has its own timestamp */
111		inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
112		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
113
114		inode->i_size = le64_to_cpu(die->i_size);
115
116		/* total blocks for compressed files */
117		if (erofs_inode_is_data_compressed(vi->datalayout))
118			nblks = le32_to_cpu(die->i_u.compressed_blocks);
119		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
120			/* fill chunked inode summary info */
121			vi->chunkformat = le16_to_cpu(die->i_u.c.format);
122		kfree(copied);
123		copied = NULL;
124		break;
125	case EROFS_INODE_LAYOUT_COMPACT:
126		vi->inode_isize = sizeof(struct erofs_inode_compact);
127		*ofs += vi->inode_isize;
128		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
129
130		inode->i_mode = le16_to_cpu(dic->i_mode);
131		switch (inode->i_mode & S_IFMT) {
132		case S_IFREG:
133		case S_IFDIR:
134		case S_IFLNK:
135			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
136			break;
137		case S_IFCHR:
138		case S_IFBLK:
139			inode->i_rdev =
140				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
141			break;
142		case S_IFIFO:
143		case S_IFSOCK:
144			inode->i_rdev = 0;
145			break;
146		default:
147			goto bogusimode;
148		}
149		i_uid_write(inode, le16_to_cpu(dic->i_uid));
150		i_gid_write(inode, le16_to_cpu(dic->i_gid));
151		set_nlink(inode, le16_to_cpu(dic->i_nlink));
152
153		/* use build time for compact inodes */
154		inode->i_ctime.tv_sec = sbi->build_time;
155		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
156
157		inode->i_size = le32_to_cpu(dic->i_size);
158		if (erofs_inode_is_data_compressed(vi->datalayout))
159			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
160		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
161			vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
162		break;
163	default:
164		erofs_err(inode->i_sb,
165			  "unsupported on-disk inode version %u of nid %llu",
166			  erofs_inode_version(ifmt), vi->nid);
167		err = -EOPNOTSUPP;
168		goto err_out;
169	}
170
171	if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
172		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
173			erofs_err(inode->i_sb,
174				  "unsupported chunk format %x of nid %llu",
175				  vi->chunkformat, vi->nid);
176			err = -EOPNOTSUPP;
177			goto err_out;
178		}
179		vi->chunkbits = LOG_BLOCK_SIZE +
180			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
181	}
182	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
183	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
184	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
185	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
186
187	inode->i_flags &= ~S_DAX;
188	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
189	    vi->datalayout == EROFS_INODE_FLAT_PLAIN)
190		inode->i_flags |= S_DAX;
191	if (!nblks)
192		/* measure inode.i_blocks as generic filesystems */
193		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
194	else
195		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
196	return kaddr;
197
198bogusimode:
199	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
200		  inode->i_mode, vi->nid);
201	err = -EFSCORRUPTED;
202err_out:
203	DBG_BUGON(1);
204	kfree(copied);
205	erofs_put_metabuf(buf);
 
206	return ERR_PTR(err);
207}
208
209static int erofs_fill_symlink(struct inode *inode, void *kaddr,
210			      unsigned int m_pofs)
211{
212	struct erofs_inode *vi = EROFS_I(inode);
213	char *lnk;
214
215	/* if it cannot be handled with fast symlink scheme */
216	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
217	    inode->i_size >= EROFS_BLKSIZ || inode->i_size < 0) {
218		inode->i_op = &erofs_symlink_iops;
219		return 0;
220	}
221
222	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
223	if (!lnk)
224		return -ENOMEM;
225
226	m_pofs += vi->xattr_isize;
227	/* inline symlink data shouldn't cross block boundary */
228	if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
229		kfree(lnk);
230		erofs_err(inode->i_sb,
231			  "inline data cross block boundary @ nid %llu",
232			  vi->nid);
233		DBG_BUGON(1);
234		return -EFSCORRUPTED;
235	}
236	memcpy(lnk, kaddr + m_pofs, inode->i_size);
 
237	lnk[inode->i_size] = '\0';
238
239	inode->i_link = lnk;
240	inode->i_op = &erofs_fast_symlink_iops;
241	return 0;
242}
243
244static int erofs_fill_inode(struct inode *inode)
245{
246	struct erofs_inode *vi = EROFS_I(inode);
247	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
248	void *kaddr;
249	unsigned int ofs;
250	int err = 0;
251
252	trace_erofs_fill_inode(inode);
253
254	/* read inode base data from disk */
255	kaddr = erofs_read_inode(&buf, inode, &ofs);
256	if (IS_ERR(kaddr))
257		return PTR_ERR(kaddr);
258
259	/* setup the new inode */
260	switch (inode->i_mode & S_IFMT) {
261	case S_IFREG:
262		inode->i_op = &erofs_generic_iops;
263		if (erofs_inode_is_data_compressed(vi->datalayout))
264			inode->i_fop = &generic_ro_fops;
265		else
266			inode->i_fop = &erofs_file_fops;
267		break;
268	case S_IFDIR:
269		inode->i_op = &erofs_dir_iops;
270		inode->i_fop = &erofs_dir_fops;
271		inode_nohighmem(inode);
272		break;
273	case S_IFLNK:
274		err = erofs_fill_symlink(inode, kaddr, ofs);
275		if (err)
276			goto out_unlock;
277		inode_nohighmem(inode);
278		break;
279	case S_IFCHR:
280	case S_IFBLK:
281	case S_IFIFO:
282	case S_IFSOCK:
283		inode->i_op = &erofs_generic_iops;
284		init_special_inode(inode, inode->i_mode, inode->i_rdev);
285		goto out_unlock;
286	default:
287		err = -EFSCORRUPTED;
288		goto out_unlock;
289	}
290
291	if (erofs_inode_is_data_compressed(vi->datalayout)) {
292		if (!erofs_is_fscache_mode(inode->i_sb))
293			err = z_erofs_fill_inode(inode);
294		else
295			err = -EOPNOTSUPP;
296		goto out_unlock;
297	}
298	inode->i_mapping->a_ops = &erofs_raw_access_aops;
299	mapping_set_large_folios(inode->i_mapping);
300#ifdef CONFIG_EROFS_FS_ONDEMAND
301	if (erofs_is_fscache_mode(inode->i_sb))
302		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
303#endif
304
305out_unlock:
306	erofs_put_metabuf(&buf);
 
307	return err;
308}
309
310/*
311 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
312 * we should do more for 32-bit platform to find the right inode.
313 */
314static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
315{
316	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
317
318	return EROFS_I(inode)->nid == nid;
319}
320
321static int erofs_iget_set_actor(struct inode *inode, void *opaque)
322{
323	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
324
325	inode->i_ino = erofs_inode_hash(nid);
326	return 0;
327}
328
329struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
 
330{
331	const unsigned long hashval = erofs_inode_hash(nid);
332	struct inode *inode;
333
334	inode = iget5_locked(sb, hashval, erofs_ilookup_test_actor,
335		erofs_iget_set_actor, &nid);
 
 
 
 
 
 
 
 
336	if (!inode)
337		return ERR_PTR(-ENOMEM);
338
339	if (inode->i_state & I_NEW) {
340		int err;
341		struct erofs_inode *vi = EROFS_I(inode);
342
343		vi->nid = nid;
344
345		err = erofs_fill_inode(inode);
346		if (!err) {
347			unlock_new_inode(inode);
348		} else {
349			iget_failed(inode);
350			inode = ERR_PTR(err);
351		}
352	}
353	return inode;
354}
355
356int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
357		  struct kstat *stat, u32 request_mask,
358		  unsigned int query_flags)
359{
360	struct inode *const inode = d_inode(path->dentry);
361
362	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
363		stat->attributes |= STATX_ATTR_COMPRESSED;
364
365	stat->attributes |= STATX_ATTR_IMMUTABLE;
366	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
367				  STATX_ATTR_IMMUTABLE);
368
369	generic_fillattr(mnt_userns, inode, stat);
370	return 0;
371}
372
373const struct inode_operations erofs_generic_iops = {
374	.getattr = erofs_getattr,
375	.listxattr = erofs_listxattr,
376	.get_inode_acl = erofs_get_acl,
377	.fiemap = erofs_fiemap,
378};
379
380const struct inode_operations erofs_symlink_iops = {
381	.get_link = page_get_link,
382	.getattr = erofs_getattr,
383	.listxattr = erofs_listxattr,
384	.get_inode_acl = erofs_get_acl,
385};
386
387const struct inode_operations erofs_fast_symlink_iops = {
388	.get_link = simple_get_link,
389	.getattr = erofs_getattr,
390	.listxattr = erofs_listxattr,
391	.get_inode_acl = erofs_get_acl,
392};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
 
  5 */
  6#include "xattr.h"
  7
  8#include <trace/events/erofs.h>
  9
 10/*
 11 * if inode is successfully read, return its inode page (or sometimes
 12 * the inode payload page if it's an extended inode) in order to fill
 13 * inline data if possible.
 14 */
 15static struct page *erofs_read_inode(struct inode *inode,
 16				     unsigned int *ofs)
 17{
 18	struct super_block *sb = inode->i_sb;
 19	struct erofs_sb_info *sbi = EROFS_SB(sb);
 20	struct erofs_inode *vi = EROFS_I(inode);
 21	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
 22
 23	erofs_blk_t blkaddr, nblks = 0;
 24	struct page *page;
 25	struct erofs_inode_compact *dic;
 26	struct erofs_inode_extended *die, *copied = NULL;
 27	unsigned int ifmt;
 28	int err;
 29
 30	blkaddr = erofs_blknr(inode_loc);
 31	*ofs = erofs_blkoff(inode_loc);
 32
 33	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
 34		  __func__, vi->nid, *ofs, blkaddr);
 35
 36	page = erofs_get_meta_page(sb, blkaddr);
 37	if (IS_ERR(page)) {
 38		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 39			  vi->nid, PTR_ERR(page));
 40		return page;
 41	}
 42
 43	dic = page_address(page) + *ofs;
 44	ifmt = le16_to_cpu(dic->i_format);
 45
 46	if (ifmt & ~EROFS_I_ALL) {
 47		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
 48			  ifmt, vi->nid);
 49		err = -EOPNOTSUPP;
 50		goto err_out;
 51	}
 52
 53	vi->datalayout = erofs_inode_datalayout(ifmt);
 54	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 55		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
 56			  vi->datalayout, vi->nid);
 57		err = -EOPNOTSUPP;
 58		goto err_out;
 59	}
 60
 61	switch (erofs_inode_version(ifmt)) {
 62	case EROFS_INODE_LAYOUT_EXTENDED:
 63		vi->inode_isize = sizeof(struct erofs_inode_extended);
 64		/* check if the inode acrosses page boundary */
 65		if (*ofs + vi->inode_isize <= PAGE_SIZE) {
 66			*ofs += vi->inode_isize;
 67			die = (struct erofs_inode_extended *)dic;
 68		} else {
 69			const unsigned int gotten = PAGE_SIZE - *ofs;
 70
 71			copied = kmalloc(vi->inode_isize, GFP_NOFS);
 72			if (!copied) {
 73				err = -ENOMEM;
 74				goto err_out;
 75			}
 76			memcpy(copied, dic, gotten);
 77			unlock_page(page);
 78			put_page(page);
 79
 80			page = erofs_get_meta_page(sb, blkaddr + 1);
 81			if (IS_ERR(page)) {
 82				erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
 83					  vi->nid, PTR_ERR(page));
 84				kfree(copied);
 85				return page;
 86			}
 87			*ofs = vi->inode_isize - gotten;
 88			memcpy((u8 *)copied + gotten, page_address(page), *ofs);
 89			die = copied;
 90		}
 91		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 92
 93		inode->i_mode = le16_to_cpu(die->i_mode);
 94		switch (inode->i_mode & S_IFMT) {
 95		case S_IFREG:
 96		case S_IFDIR:
 97		case S_IFLNK:
 98			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
 99			break;
100		case S_IFCHR:
101		case S_IFBLK:
102			inode->i_rdev =
103				new_decode_dev(le32_to_cpu(die->i_u.rdev));
104			break;
105		case S_IFIFO:
106		case S_IFSOCK:
107			inode->i_rdev = 0;
108			break;
109		default:
110			goto bogusimode;
111		}
112		i_uid_write(inode, le32_to_cpu(die->i_uid));
113		i_gid_write(inode, le32_to_cpu(die->i_gid));
114		set_nlink(inode, le32_to_cpu(die->i_nlink));
115
116		/* extended inode has its own timestamp */
117		inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
118		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
119
120		inode->i_size = le64_to_cpu(die->i_size);
121
122		/* total blocks for compressed files */
123		if (erofs_inode_is_data_compressed(vi->datalayout))
124			nblks = le32_to_cpu(die->i_u.compressed_blocks);
125
 
 
126		kfree(copied);
 
127		break;
128	case EROFS_INODE_LAYOUT_COMPACT:
129		vi->inode_isize = sizeof(struct erofs_inode_compact);
130		*ofs += vi->inode_isize;
131		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
132
133		inode->i_mode = le16_to_cpu(dic->i_mode);
134		switch (inode->i_mode & S_IFMT) {
135		case S_IFREG:
136		case S_IFDIR:
137		case S_IFLNK:
138			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
139			break;
140		case S_IFCHR:
141		case S_IFBLK:
142			inode->i_rdev =
143				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
144			break;
145		case S_IFIFO:
146		case S_IFSOCK:
147			inode->i_rdev = 0;
148			break;
149		default:
150			goto bogusimode;
151		}
152		i_uid_write(inode, le16_to_cpu(dic->i_uid));
153		i_gid_write(inode, le16_to_cpu(dic->i_gid));
154		set_nlink(inode, le16_to_cpu(dic->i_nlink));
155
156		/* use build time for compact inodes */
157		inode->i_ctime.tv_sec = sbi->build_time;
158		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
159
160		inode->i_size = le32_to_cpu(dic->i_size);
161		if (erofs_inode_is_data_compressed(vi->datalayout))
162			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
 
 
163		break;
164	default:
165		erofs_err(inode->i_sb,
166			  "unsupported on-disk inode version %u of nid %llu",
167			  erofs_inode_version(ifmt), vi->nid);
168		err = -EOPNOTSUPP;
169		goto err_out;
170	}
171
 
 
 
 
 
 
 
 
 
 
 
172	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
173	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
174	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
175	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
176
 
 
 
 
177	if (!nblks)
178		/* measure inode.i_blocks as generic filesystems */
179		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
180	else
181		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
182	return page;
183
184bogusimode:
185	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
186		  inode->i_mode, vi->nid);
187	err = -EFSCORRUPTED;
188err_out:
189	DBG_BUGON(1);
190	kfree(copied);
191	unlock_page(page);
192	put_page(page);
193	return ERR_PTR(err);
194}
195
196static int erofs_fill_symlink(struct inode *inode, void *data,
197			      unsigned int m_pofs)
198{
199	struct erofs_inode *vi = EROFS_I(inode);
200	char *lnk;
201
202	/* if it cannot be handled with fast symlink scheme */
203	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
204	    inode->i_size >= PAGE_SIZE) {
205		inode->i_op = &erofs_symlink_iops;
206		return 0;
207	}
208
209	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
210	if (!lnk)
211		return -ENOMEM;
212
213	m_pofs += vi->xattr_isize;
214	/* inline symlink data shouldn't cross page boundary as well */
215	if (m_pofs + inode->i_size > PAGE_SIZE) {
216		kfree(lnk);
217		erofs_err(inode->i_sb,
218			  "inline data cross block boundary @ nid %llu",
219			  vi->nid);
220		DBG_BUGON(1);
221		return -EFSCORRUPTED;
222	}
223
224	memcpy(lnk, data + m_pofs, inode->i_size);
225	lnk[inode->i_size] = '\0';
226
227	inode->i_link = lnk;
228	inode->i_op = &erofs_fast_symlink_iops;
229	return 0;
230}
231
232static int erofs_fill_inode(struct inode *inode, int isdir)
233{
234	struct erofs_inode *vi = EROFS_I(inode);
235	struct page *page;
 
236	unsigned int ofs;
237	int err = 0;
238
239	trace_erofs_fill_inode(inode, isdir);
240
241	/* read inode base data from disk */
242	page = erofs_read_inode(inode, &ofs);
243	if (IS_ERR(page))
244		return PTR_ERR(page);
245
246	/* setup the new inode */
247	switch (inode->i_mode & S_IFMT) {
248	case S_IFREG:
249		inode->i_op = &erofs_generic_iops;
250		inode->i_fop = &generic_ro_fops;
 
 
 
251		break;
252	case S_IFDIR:
253		inode->i_op = &erofs_dir_iops;
254		inode->i_fop = &erofs_dir_fops;
 
255		break;
256	case S_IFLNK:
257		err = erofs_fill_symlink(inode, page_address(page), ofs);
258		if (err)
259			goto out_unlock;
260		inode_nohighmem(inode);
261		break;
262	case S_IFCHR:
263	case S_IFBLK:
264	case S_IFIFO:
265	case S_IFSOCK:
266		inode->i_op = &erofs_generic_iops;
267		init_special_inode(inode, inode->i_mode, inode->i_rdev);
268		goto out_unlock;
269	default:
270		err = -EFSCORRUPTED;
271		goto out_unlock;
272	}
273
274	if (erofs_inode_is_data_compressed(vi->datalayout)) {
275		err = z_erofs_fill_inode(inode);
 
 
 
276		goto out_unlock;
277	}
278	inode->i_mapping->a_ops = &erofs_raw_access_aops;
 
 
 
 
 
279
280out_unlock:
281	unlock_page(page);
282	put_page(page);
283	return err;
284}
285
286/*
287 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
288 * we should do more for 32-bit platform to find the right inode.
289 */
290static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
291{
292	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
293
294	return EROFS_I(inode)->nid == nid;
295}
296
297static int erofs_iget_set_actor(struct inode *inode, void *opaque)
298{
299	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
300
301	inode->i_ino = erofs_inode_hash(nid);
302	return 0;
303}
304
305static inline struct inode *erofs_iget_locked(struct super_block *sb,
306					      erofs_nid_t nid)
307{
308	const unsigned long hashval = erofs_inode_hash(nid);
 
309
310	return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
311		erofs_iget_set_actor, &nid);
312}
313
314struct inode *erofs_iget(struct super_block *sb,
315			 erofs_nid_t nid,
316			 bool isdir)
317{
318	struct inode *inode = erofs_iget_locked(sb, nid);
319
320	if (!inode)
321		return ERR_PTR(-ENOMEM);
322
323	if (inode->i_state & I_NEW) {
324		int err;
325		struct erofs_inode *vi = EROFS_I(inode);
326
327		vi->nid = nid;
328
329		err = erofs_fill_inode(inode, isdir);
330		if (!err)
331			unlock_new_inode(inode);
332		else {
333			iget_failed(inode);
334			inode = ERR_PTR(err);
335		}
336	}
337	return inode;
338}
339
340int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
341		  struct kstat *stat, u32 request_mask,
342		  unsigned int query_flags)
343{
344	struct inode *const inode = d_inode(path->dentry);
345
346	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
347		stat->attributes |= STATX_ATTR_COMPRESSED;
348
349	stat->attributes |= STATX_ATTR_IMMUTABLE;
350	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
351				  STATX_ATTR_IMMUTABLE);
352
353	generic_fillattr(&init_user_ns, inode, stat);
354	return 0;
355}
356
357const struct inode_operations erofs_generic_iops = {
358	.getattr = erofs_getattr,
359	.listxattr = erofs_listxattr,
360	.get_acl = erofs_get_acl,
 
361};
362
363const struct inode_operations erofs_symlink_iops = {
364	.get_link = page_get_link,
365	.getattr = erofs_getattr,
366	.listxattr = erofs_listxattr,
367	.get_acl = erofs_get_acl,
368};
369
370const struct inode_operations erofs_fast_symlink_iops = {
371	.get_link = simple_get_link,
372	.getattr = erofs_getattr,
373	.listxattr = erofs_listxattr,
374	.get_acl = erofs_get_acl,
375};