Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
  8
  9#include <trace/events/erofs.h>
 10
 11static void *erofs_read_inode(struct erofs_buf *buf,
 12			      struct inode *inode, unsigned int *ofs)
 
 
 
 
 
 13{
 14	struct super_block *sb = inode->i_sb;
 15	struct erofs_sb_info *sbi = EROFS_SB(sb);
 16	struct erofs_inode *vi = EROFS_I(inode);
 17	const erofs_off_t inode_loc = erofs_iloc(inode);
 
 18	erofs_blk_t blkaddr, nblks = 0;
 19	void *kaddr;
 20	struct erofs_inode_compact *dic;
 21	struct erofs_inode_extended *die, *copied = NULL;
 22	union erofs_inode_i_u iu;
 23	unsigned int ifmt;
 24	int err;
 25
 26	blkaddr = erofs_blknr(sb, inode_loc);
 27	*ofs = erofs_blkoff(sb, inode_loc);
 
 
 
 28
 29	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
 30	if (IS_ERR(kaddr)) {
 31		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 32			  vi->nid, PTR_ERR(kaddr));
 33		return kaddr;
 34	}
 35
 36	dic = kaddr + *ofs;
 37	ifmt = le16_to_cpu(dic->i_format);
 
 38	if (ifmt & ~EROFS_I_ALL) {
 39		erofs_err(sb, "unsupported i_format %u of nid %llu",
 40			  ifmt, vi->nid);
 41		err = -EOPNOTSUPP;
 42		goto err_out;
 43	}
 44
 45	vi->datalayout = erofs_inode_datalayout(ifmt);
 46	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 47		erofs_err(sb, "unsupported datalayout %u of nid %llu",
 48			  vi->datalayout, vi->nid);
 49		err = -EOPNOTSUPP;
 50		goto err_out;
 51	}
 52
 53	switch (erofs_inode_version(ifmt)) {
 54	case EROFS_INODE_LAYOUT_EXTENDED:
 55		vi->inode_isize = sizeof(struct erofs_inode_extended);
 56		/* check if the extended inode acrosses block boundary */
 57		if (*ofs + vi->inode_isize <= sb->s_blocksize) {
 58			*ofs += vi->inode_isize;
 59			die = (struct erofs_inode_extended *)dic;
 60		} else {
 61			const unsigned int gotten = sb->s_blocksize - *ofs;
 62
 63			copied = kmalloc(vi->inode_isize, GFP_KERNEL);
 64			if (!copied) {
 65				err = -ENOMEM;
 66				goto err_out;
 67			}
 68			memcpy(copied, dic, gotten);
 69			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
 70						   EROFS_KMAP);
 71			if (IS_ERR(kaddr)) {
 72				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 73					  vi->nid, PTR_ERR(kaddr));
 
 
 74				kfree(copied);
 75				return kaddr;
 76			}
 77			*ofs = vi->inode_isize - gotten;
 78			memcpy((u8 *)copied + gotten, kaddr, *ofs);
 79			die = copied;
 80		}
 81		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 82
 83		inode->i_mode = le16_to_cpu(die->i_mode);
 84		iu = die->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85		i_uid_write(inode, le32_to_cpu(die->i_uid));
 86		i_gid_write(inode, le32_to_cpu(die->i_gid));
 87		set_nlink(inode, le32_to_cpu(die->i_nlink));
 88		/* each extended inode has its own timestamp */
 89		inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
 90				le32_to_cpu(die->i_mtime_nsec));
 
 91
 92		inode->i_size = le64_to_cpu(die->i_size);
 
 
 
 
 
 93		kfree(copied);
 94		copied = NULL;
 95		break;
 96	case EROFS_INODE_LAYOUT_COMPACT:
 97		vi->inode_isize = sizeof(struct erofs_inode_compact);
 98		*ofs += vi->inode_isize;
 99		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
100
101		inode->i_mode = le16_to_cpu(dic->i_mode);
102		iu = dic->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103		i_uid_write(inode, le16_to_cpu(dic->i_uid));
104		i_gid_write(inode, le16_to_cpu(dic->i_gid));
105		set_nlink(inode, le16_to_cpu(dic->i_nlink));
 
106		/* use build time for compact inodes */
107		inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
 
108
109		inode->i_size = le32_to_cpu(dic->i_size);
 
 
110		break;
111	default:
112		erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
 
113			  erofs_inode_version(ifmt), vi->nid);
114		err = -EOPNOTSUPP;
115		goto err_out;
116	}
117
118	switch (inode->i_mode & S_IFMT) {
119	case S_IFREG:
120	case S_IFDIR:
121	case S_IFLNK:
122		vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
123		break;
124	case S_IFCHR:
125	case S_IFBLK:
126		inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev));
127		break;
128	case S_IFIFO:
129	case S_IFSOCK:
130		inode->i_rdev = 0;
131		break;
132	default:
133		erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
134			  vi->nid);
135		err = -EFSCORRUPTED;
136		goto err_out;
137	}
138
139	/* total blocks for compressed files */
140	if (erofs_inode_is_data_compressed(vi->datalayout)) {
141		nblks = le32_to_cpu(iu.compressed_blocks);
142	} else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
143		/* fill chunked inode summary info */
144		vi->chunkformat = le16_to_cpu(iu.c.format);
145		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
146			erofs_err(sb, "unsupported chunk format %x of nid %llu",
147				  vi->chunkformat, vi->nid);
148			err = -EOPNOTSUPP;
149			goto err_out;
150		}
151		vi->chunkbits = sb->s_blocksize_bits +
152			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
153	}
154	inode_set_mtime_to_ts(inode,
155			      inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
156
157	inode->i_flags &= ~S_DAX;
158	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
159	    (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
160	     vi->datalayout == EROFS_INODE_CHUNK_BASED))
161		inode->i_flags |= S_DAX;
162
163	if (!nblks)
164		/* measure inode.i_blocks as generic filesystems */
165		inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
166	else
167		inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
168	return kaddr;
169
 
 
 
 
170err_out:
171	DBG_BUGON(1);
172	kfree(copied);
173	erofs_put_metabuf(buf);
 
174	return ERR_PTR(err);
175}
176
177static int erofs_fill_symlink(struct inode *inode, void *kaddr,
178			      unsigned int m_pofs)
179{
180	struct erofs_inode *vi = EROFS_I(inode);
181	unsigned int bsz = i_blocksize(inode);
182	char *lnk;
183
184	/* if it cannot be handled with fast symlink scheme */
185	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
186	    inode->i_size >= bsz || inode->i_size < 0) {
187		inode->i_op = &erofs_symlink_iops;
188		return 0;
189	}
190
191	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
192	if (!lnk)
193		return -ENOMEM;
194
195	m_pofs += vi->xattr_isize;
196	/* inline symlink data shouldn't cross block boundary */
197	if (m_pofs + inode->i_size > bsz) {
198		kfree(lnk);
199		erofs_err(inode->i_sb,
200			  "inline data cross block boundary @ nid %llu",
201			  vi->nid);
202		DBG_BUGON(1);
203		return -EFSCORRUPTED;
204	}
205	memcpy(lnk, kaddr + m_pofs, inode->i_size);
 
206	lnk[inode->i_size] = '\0';
207
208	inode->i_link = lnk;
209	inode->i_op = &erofs_fast_symlink_iops;
210	return 0;
211}
212
213static int erofs_fill_inode(struct inode *inode)
214{
215	struct erofs_inode *vi = EROFS_I(inode);
216	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
217	void *kaddr;
218	unsigned int ofs;
219	int err = 0;
220
221	trace_erofs_fill_inode(inode);
222
223	/* read inode base data from disk */
224	kaddr = erofs_read_inode(&buf, inode, &ofs);
225	if (IS_ERR(kaddr))
226		return PTR_ERR(kaddr);
227
228	/* setup the new inode */
229	switch (inode->i_mode & S_IFMT) {
230	case S_IFREG:
231		inode->i_op = &erofs_generic_iops;
232		if (erofs_inode_is_data_compressed(vi->datalayout))
233			inode->i_fop = &generic_ro_fops;
234		else
235			inode->i_fop = &erofs_file_fops;
236		break;
237	case S_IFDIR:
238		inode->i_op = &erofs_dir_iops;
239		inode->i_fop = &erofs_dir_fops;
240		inode_nohighmem(inode);
241		break;
242	case S_IFLNK:
243		err = erofs_fill_symlink(inode, kaddr, ofs);
244		if (err)
245			goto out_unlock;
246		inode_nohighmem(inode);
247		break;
248	case S_IFCHR:
249	case S_IFBLK:
250	case S_IFIFO:
251	case S_IFSOCK:
252		inode->i_op = &erofs_generic_iops;
253		init_special_inode(inode, inode->i_mode, inode->i_rdev);
254		goto out_unlock;
255	default:
256		err = -EFSCORRUPTED;
257		goto out_unlock;
258	}
259
260	if (erofs_inode_is_data_compressed(vi->datalayout)) {
261#ifdef CONFIG_EROFS_FS_ZIP
262		if (!erofs_is_fscache_mode(inode->i_sb)) {
263			DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
264				  erofs_info, inode->i_sb,
265				  "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
266			inode->i_mapping->a_ops = &z_erofs_aops;
267			err = 0;
268			goto out_unlock;
269		}
270#endif
271		err = -EOPNOTSUPP;
272		goto out_unlock;
273	}
274	inode->i_mapping->a_ops = &erofs_raw_access_aops;
275	mapping_set_large_folios(inode->i_mapping);
276#ifdef CONFIG_EROFS_FS_ONDEMAND
277	if (erofs_is_fscache_mode(inode->i_sb))
278		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
279#endif
280
281out_unlock:
282	erofs_put_metabuf(&buf);
 
283	return err;
284}
285
286/*
287 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
288 * so that it will fit.
289 */
290static ino_t erofs_squash_ino(erofs_nid_t nid)
291{
292	ino_t ino = (ino_t)nid;
293
294	if (sizeof(ino_t) < sizeof(erofs_nid_t))
295		ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
296	return ino;
297}
298
299static int erofs_iget5_eq(struct inode *inode, void *opaque)
300{
301	return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
 
 
 
302}
303
304static int erofs_iget5_set(struct inode *inode, void *opaque)
 
305{
306	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
307
308	inode->i_ino = erofs_squash_ino(nid);
309	EROFS_I(inode)->nid = nid;
310	return 0;
311}
312
313struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
 
 
314{
315	struct inode *inode;
316
317	inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
318			     erofs_iget5_set, &nid);
319	if (!inode)
320		return ERR_PTR(-ENOMEM);
321
322	if (inode->i_state & I_NEW) {
323		int err = erofs_fill_inode(inode);
 
 
 
324
325		if (err) {
 
 
 
326			iget_failed(inode);
327			return ERR_PTR(err);
328		}
329		unlock_new_inode(inode);
330	}
331	return inode;
332}
333
334int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
335		  struct kstat *stat, u32 request_mask,
336		  unsigned int query_flags)
337{
338	struct inode *const inode = d_inode(path->dentry);
339
340	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
341		stat->attributes |= STATX_ATTR_COMPRESSED;
342
343	stat->attributes |= STATX_ATTR_IMMUTABLE;
344	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
345				  STATX_ATTR_IMMUTABLE);
346
347	generic_fillattr(idmap, request_mask, inode, stat);
348	return 0;
349}
350
351const struct inode_operations erofs_generic_iops = {
352	.getattr = erofs_getattr,
353	.listxattr = erofs_listxattr,
354	.get_inode_acl = erofs_get_acl,
355	.fiemap = erofs_fiemap,
356};
357
358const struct inode_operations erofs_symlink_iops = {
359	.get_link = page_get_link,
360	.getattr = erofs_getattr,
361	.listxattr = erofs_listxattr,
362	.get_inode_acl = erofs_get_acl,
363};
364
365const struct inode_operations erofs_fast_symlink_iops = {
366	.get_link = simple_get_link,
367	.getattr = erofs_getattr,
368	.listxattr = erofs_listxattr,
369	.get_inode_acl = erofs_get_acl,
370};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
 
  5 */
  6#include "xattr.h"
  7
  8#include <trace/events/erofs.h>
  9
 10/*
 11 * if inode is successfully read, return its inode page (or sometimes
 12 * the inode payload page if it's an extended inode) in order to fill
 13 * inline data if possible.
 14 */
 15static struct page *erofs_read_inode(struct inode *inode,
 16				     unsigned int *ofs)
 17{
 18	struct super_block *sb = inode->i_sb;
 19	struct erofs_sb_info *sbi = EROFS_SB(sb);
 20	struct erofs_inode *vi = EROFS_I(inode);
 21	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
 22
 23	erofs_blk_t blkaddr, nblks = 0;
 24	struct page *page;
 25	struct erofs_inode_compact *dic;
 26	struct erofs_inode_extended *die, *copied = NULL;
 
 27	unsigned int ifmt;
 28	int err;
 29
 30	blkaddr = erofs_blknr(inode_loc);
 31	*ofs = erofs_blkoff(inode_loc);
 32
 33	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
 34		  __func__, vi->nid, *ofs, blkaddr);
 35
 36	page = erofs_get_meta_page(sb, blkaddr);
 37	if (IS_ERR(page)) {
 38		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 39			  vi->nid, PTR_ERR(page));
 40		return page;
 41	}
 42
 43	dic = page_address(page) + *ofs;
 44	ifmt = le16_to_cpu(dic->i_format);
 45
 46	if (ifmt & ~EROFS_I_ALL) {
 47		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
 48			  ifmt, vi->nid);
 49		err = -EOPNOTSUPP;
 50		goto err_out;
 51	}
 52
 53	vi->datalayout = erofs_inode_datalayout(ifmt);
 54	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 55		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
 56			  vi->datalayout, vi->nid);
 57		err = -EOPNOTSUPP;
 58		goto err_out;
 59	}
 60
 61	switch (erofs_inode_version(ifmt)) {
 62	case EROFS_INODE_LAYOUT_EXTENDED:
 63		vi->inode_isize = sizeof(struct erofs_inode_extended);
 64		/* check if the inode acrosses page boundary */
 65		if (*ofs + vi->inode_isize <= PAGE_SIZE) {
 66			*ofs += vi->inode_isize;
 67			die = (struct erofs_inode_extended *)dic;
 68		} else {
 69			const unsigned int gotten = PAGE_SIZE - *ofs;
 70
 71			copied = kmalloc(vi->inode_isize, GFP_NOFS);
 72			if (!copied) {
 73				err = -ENOMEM;
 74				goto err_out;
 75			}
 76			memcpy(copied, dic, gotten);
 77			unlock_page(page);
 78			put_page(page);
 79
 80			page = erofs_get_meta_page(sb, blkaddr + 1);
 81			if (IS_ERR(page)) {
 82				erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
 83					  vi->nid, PTR_ERR(page));
 84				kfree(copied);
 85				return page;
 86			}
 87			*ofs = vi->inode_isize - gotten;
 88			memcpy((u8 *)copied + gotten, page_address(page), *ofs);
 89			die = copied;
 90		}
 91		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 92
 93		inode->i_mode = le16_to_cpu(die->i_mode);
 94		switch (inode->i_mode & S_IFMT) {
 95		case S_IFREG:
 96		case S_IFDIR:
 97		case S_IFLNK:
 98			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
 99			break;
100		case S_IFCHR:
101		case S_IFBLK:
102			inode->i_rdev =
103				new_decode_dev(le32_to_cpu(die->i_u.rdev));
104			break;
105		case S_IFIFO:
106		case S_IFSOCK:
107			inode->i_rdev = 0;
108			break;
109		default:
110			goto bogusimode;
111		}
112		i_uid_write(inode, le32_to_cpu(die->i_uid));
113		i_gid_write(inode, le32_to_cpu(die->i_gid));
114		set_nlink(inode, le32_to_cpu(die->i_nlink));
115
116		/* extended inode has its own timestamp */
117		inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
118		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
119
120		inode->i_size = le64_to_cpu(die->i_size);
121
122		/* total blocks for compressed files */
123		if (erofs_inode_is_data_compressed(vi->datalayout))
124			nblks = le32_to_cpu(die->i_u.compressed_blocks);
125
126		kfree(copied);
 
127		break;
128	case EROFS_INODE_LAYOUT_COMPACT:
129		vi->inode_isize = sizeof(struct erofs_inode_compact);
130		*ofs += vi->inode_isize;
131		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
132
133		inode->i_mode = le16_to_cpu(dic->i_mode);
134		switch (inode->i_mode & S_IFMT) {
135		case S_IFREG:
136		case S_IFDIR:
137		case S_IFLNK:
138			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
139			break;
140		case S_IFCHR:
141		case S_IFBLK:
142			inode->i_rdev =
143				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
144			break;
145		case S_IFIFO:
146		case S_IFSOCK:
147			inode->i_rdev = 0;
148			break;
149		default:
150			goto bogusimode;
151		}
152		i_uid_write(inode, le16_to_cpu(dic->i_uid));
153		i_gid_write(inode, le16_to_cpu(dic->i_gid));
154		set_nlink(inode, le16_to_cpu(dic->i_nlink));
155
156		/* use build time for compact inodes */
157		inode->i_ctime.tv_sec = sbi->build_time;
158		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
159
160		inode->i_size = le32_to_cpu(dic->i_size);
161		if (erofs_inode_is_data_compressed(vi->datalayout))
162			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
163		break;
164	default:
165		erofs_err(inode->i_sb,
166			  "unsupported on-disk inode version %u of nid %llu",
167			  erofs_inode_version(ifmt), vi->nid);
168		err = -EOPNOTSUPP;
169		goto err_out;
170	}
171
172	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
173	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
174	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
175	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177	if (!nblks)
178		/* measure inode.i_blocks as generic filesystems */
179		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
180	else
181		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
182	return page;
183
184bogusimode:
185	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
186		  inode->i_mode, vi->nid);
187	err = -EFSCORRUPTED;
188err_out:
189	DBG_BUGON(1);
190	kfree(copied);
191	unlock_page(page);
192	put_page(page);
193	return ERR_PTR(err);
194}
195
196static int erofs_fill_symlink(struct inode *inode, void *data,
197			      unsigned int m_pofs)
198{
199	struct erofs_inode *vi = EROFS_I(inode);
 
200	char *lnk;
201
202	/* if it cannot be handled with fast symlink scheme */
203	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
204	    inode->i_size >= PAGE_SIZE) {
205		inode->i_op = &erofs_symlink_iops;
206		return 0;
207	}
208
209	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
210	if (!lnk)
211		return -ENOMEM;
212
213	m_pofs += vi->xattr_isize;
214	/* inline symlink data shouldn't cross page boundary as well */
215	if (m_pofs + inode->i_size > PAGE_SIZE) {
216		kfree(lnk);
217		erofs_err(inode->i_sb,
218			  "inline data cross block boundary @ nid %llu",
219			  vi->nid);
220		DBG_BUGON(1);
221		return -EFSCORRUPTED;
222	}
223
224	memcpy(lnk, data + m_pofs, inode->i_size);
225	lnk[inode->i_size] = '\0';
226
227	inode->i_link = lnk;
228	inode->i_op = &erofs_fast_symlink_iops;
229	return 0;
230}
231
232static int erofs_fill_inode(struct inode *inode, int isdir)
233{
234	struct erofs_inode *vi = EROFS_I(inode);
235	struct page *page;
 
236	unsigned int ofs;
237	int err = 0;
238
239	trace_erofs_fill_inode(inode, isdir);
240
241	/* read inode base data from disk */
242	page = erofs_read_inode(inode, &ofs);
243	if (IS_ERR(page))
244		return PTR_ERR(page);
245
246	/* setup the new inode */
247	switch (inode->i_mode & S_IFMT) {
248	case S_IFREG:
249		inode->i_op = &erofs_generic_iops;
250		inode->i_fop = &generic_ro_fops;
 
 
 
251		break;
252	case S_IFDIR:
253		inode->i_op = &erofs_dir_iops;
254		inode->i_fop = &erofs_dir_fops;
 
255		break;
256	case S_IFLNK:
257		err = erofs_fill_symlink(inode, page_address(page), ofs);
258		if (err)
259			goto out_unlock;
260		inode_nohighmem(inode);
261		break;
262	case S_IFCHR:
263	case S_IFBLK:
264	case S_IFIFO:
265	case S_IFSOCK:
266		inode->i_op = &erofs_generic_iops;
267		init_special_inode(inode, inode->i_mode, inode->i_rdev);
268		goto out_unlock;
269	default:
270		err = -EFSCORRUPTED;
271		goto out_unlock;
272	}
273
274	if (erofs_inode_is_data_compressed(vi->datalayout)) {
275		err = z_erofs_fill_inode(inode);
 
 
 
 
 
 
 
 
 
 
276		goto out_unlock;
277	}
278	inode->i_mapping->a_ops = &erofs_raw_access_aops;
 
 
 
 
 
279
280out_unlock:
281	unlock_page(page);
282	put_page(page);
283	return err;
284}
285
286/*
287 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
288 * we should do more for 32-bit platform to find the right inode.
289 */
290static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
291{
292	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
293
294	return EROFS_I(inode)->nid == nid;
 
 
295}
296
297static int erofs_iget_set_actor(struct inode *inode, void *opaque)
298{
299	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
300
301	inode->i_ino = erofs_inode_hash(nid);
302	return 0;
303}
304
305static inline struct inode *erofs_iget_locked(struct super_block *sb,
306					      erofs_nid_t nid)
307{
308	const unsigned long hashval = erofs_inode_hash(nid);
309
310	return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
311		erofs_iget_set_actor, &nid);
 
312}
313
314struct inode *erofs_iget(struct super_block *sb,
315			 erofs_nid_t nid,
316			 bool isdir)
317{
318	struct inode *inode = erofs_iget_locked(sb, nid);
319
 
 
320	if (!inode)
321		return ERR_PTR(-ENOMEM);
322
323	if (inode->i_state & I_NEW) {
324		int err;
325		struct erofs_inode *vi = EROFS_I(inode);
326
327		vi->nid = nid;
328
329		err = erofs_fill_inode(inode, isdir);
330		if (!err)
331			unlock_new_inode(inode);
332		else {
333			iget_failed(inode);
334			inode = ERR_PTR(err);
335		}
 
336	}
337	return inode;
338}
339
340int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
341		  struct kstat *stat, u32 request_mask,
342		  unsigned int query_flags)
343{
344	struct inode *const inode = d_inode(path->dentry);
345
346	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
347		stat->attributes |= STATX_ATTR_COMPRESSED;
348
349	stat->attributes |= STATX_ATTR_IMMUTABLE;
350	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
351				  STATX_ATTR_IMMUTABLE);
352
353	generic_fillattr(&init_user_ns, inode, stat);
354	return 0;
355}
356
357const struct inode_operations erofs_generic_iops = {
358	.getattr = erofs_getattr,
359	.listxattr = erofs_listxattr,
360	.get_acl = erofs_get_acl,
 
361};
362
363const struct inode_operations erofs_symlink_iops = {
364	.get_link = page_get_link,
365	.getattr = erofs_getattr,
366	.listxattr = erofs_listxattr,
367	.get_acl = erofs_get_acl,
368};
369
370const struct inode_operations erofs_fast_symlink_iops = {
371	.get_link = simple_get_link,
372	.getattr = erofs_getattr,
373	.listxattr = erofs_listxattr,
374	.get_acl = erofs_get_acl,
375};