Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
  8
  9#include <trace/events/erofs.h>
 10
 11static void *erofs_read_inode(struct erofs_buf *buf,
 12			      struct inode *inode, unsigned int *ofs)
 13{
 14	struct super_block *sb = inode->i_sb;
 15	struct erofs_sb_info *sbi = EROFS_SB(sb);
 16	struct erofs_inode *vi = EROFS_I(inode);
 17	const erofs_off_t inode_loc = erofs_iloc(inode);
 
 18	erofs_blk_t blkaddr, nblks = 0;
 19	void *kaddr;
 20	struct erofs_inode_compact *dic;
 21	struct erofs_inode_extended *die, *copied = NULL;
 22	union erofs_inode_i_u iu;
 23	unsigned int ifmt;
 24	int err;
 25
 26	blkaddr = erofs_blknr(sb, inode_loc);
 27	*ofs = erofs_blkoff(sb, inode_loc);
 
 
 
 28
 29	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
 30	if (IS_ERR(kaddr)) {
 31		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 32			  vi->nid, PTR_ERR(kaddr));
 33		return kaddr;
 34	}
 35
 36	dic = kaddr + *ofs;
 37	ifmt = le16_to_cpu(dic->i_format);
 
 38	if (ifmt & ~EROFS_I_ALL) {
 39		erofs_err(sb, "unsupported i_format %u of nid %llu",
 40			  ifmt, vi->nid);
 41		err = -EOPNOTSUPP;
 42		goto err_out;
 43	}
 44
 45	vi->datalayout = erofs_inode_datalayout(ifmt);
 46	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 47		erofs_err(sb, "unsupported datalayout %u of nid %llu",
 48			  vi->datalayout, vi->nid);
 49		err = -EOPNOTSUPP;
 50		goto err_out;
 51	}
 52
 53	switch (erofs_inode_version(ifmt)) {
 54	case EROFS_INODE_LAYOUT_EXTENDED:
 55		vi->inode_isize = sizeof(struct erofs_inode_extended);
 56		/* check if the extended inode acrosses block boundary */
 57		if (*ofs + vi->inode_isize <= sb->s_blocksize) {
 58			*ofs += vi->inode_isize;
 59			die = (struct erofs_inode_extended *)dic;
 60		} else {
 61			const unsigned int gotten = sb->s_blocksize - *ofs;
 62
 63			copied = kmalloc(vi->inode_isize, GFP_KERNEL);
 64			if (!copied) {
 65				err = -ENOMEM;
 66				goto err_out;
 67			}
 68			memcpy(copied, dic, gotten);
 69			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
 70						   EROFS_KMAP);
 71			if (IS_ERR(kaddr)) {
 72				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 73					  vi->nid, PTR_ERR(kaddr));
 74				kfree(copied);
 75				return kaddr;
 76			}
 77			*ofs = vi->inode_isize - gotten;
 78			memcpy((u8 *)copied + gotten, kaddr, *ofs);
 79			die = copied;
 80		}
 81		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 82
 83		inode->i_mode = le16_to_cpu(die->i_mode);
 84		iu = die->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85		i_uid_write(inode, le32_to_cpu(die->i_uid));
 86		i_gid_write(inode, le32_to_cpu(die->i_gid));
 87		set_nlink(inode, le32_to_cpu(die->i_nlink));
 88		/* each extended inode has its own timestamp */
 89		inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
 90				le32_to_cpu(die->i_mtime_nsec));
 
 91
 92		inode->i_size = le64_to_cpu(die->i_size);
 
 
 
 
 
 
 
 93		kfree(copied);
 94		copied = NULL;
 95		break;
 96	case EROFS_INODE_LAYOUT_COMPACT:
 97		vi->inode_isize = sizeof(struct erofs_inode_compact);
 98		*ofs += vi->inode_isize;
 99		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
100
101		inode->i_mode = le16_to_cpu(dic->i_mode);
102		iu = dic->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103		i_uid_write(inode, le16_to_cpu(dic->i_uid));
104		i_gid_write(inode, le16_to_cpu(dic->i_gid));
105		set_nlink(inode, le16_to_cpu(dic->i_nlink));
 
106		/* use build time for compact inodes */
107		inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
 
108
109		inode->i_size = le32_to_cpu(dic->i_size);
 
 
 
 
110		break;
111	default:
112		erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
 
113			  erofs_inode_version(ifmt), vi->nid);
114		err = -EOPNOTSUPP;
115		goto err_out;
116	}
117
118	switch (inode->i_mode & S_IFMT) {
119	case S_IFREG:
120	case S_IFDIR:
121	case S_IFLNK:
122		vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
123		break;
124	case S_IFCHR:
125	case S_IFBLK:
126		inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev));
127		break;
128	case S_IFIFO:
129	case S_IFSOCK:
130		inode->i_rdev = 0;
131		break;
132	default:
133		erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
134			  vi->nid);
135		err = -EFSCORRUPTED;
136		goto err_out;
137	}
138
139	/* total blocks for compressed files */
140	if (erofs_inode_is_data_compressed(vi->datalayout)) {
141		nblks = le32_to_cpu(iu.compressed_blocks);
142	} else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
143		/* fill chunked inode summary info */
144		vi->chunkformat = le16_to_cpu(iu.c.format);
145		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
146			erofs_err(sb, "unsupported chunk format %x of nid %llu",
 
147				  vi->chunkformat, vi->nid);
148			err = -EOPNOTSUPP;
149			goto err_out;
150		}
151		vi->chunkbits = sb->s_blocksize_bits +
152			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
153	}
154	inode_set_mtime_to_ts(inode,
155			      inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
 
 
156
157	inode->i_flags &= ~S_DAX;
158	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
159	    (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
160	     vi->datalayout == EROFS_INODE_CHUNK_BASED))
161		inode->i_flags |= S_DAX;
162
163	if (!nblks)
164		/* measure inode.i_blocks as generic filesystems */
165		inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
166	else
167		inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
168	return kaddr;
169
 
 
 
 
170err_out:
171	DBG_BUGON(1);
172	kfree(copied);
173	erofs_put_metabuf(buf);
174	return ERR_PTR(err);
175}
176
177static int erofs_fill_symlink(struct inode *inode, void *kaddr,
178			      unsigned int m_pofs)
179{
180	struct erofs_inode *vi = EROFS_I(inode);
181	unsigned int bsz = i_blocksize(inode);
182	char *lnk;
183
184	/* if it cannot be handled with fast symlink scheme */
185	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
186	    inode->i_size >= bsz || inode->i_size < 0) {
187		inode->i_op = &erofs_symlink_iops;
188		return 0;
189	}
190
191	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
192	if (!lnk)
193		return -ENOMEM;
194
195	m_pofs += vi->xattr_isize;
196	/* inline symlink data shouldn't cross block boundary */
197	if (m_pofs + inode->i_size > bsz) {
198		kfree(lnk);
199		erofs_err(inode->i_sb,
200			  "inline data cross block boundary @ nid %llu",
201			  vi->nid);
202		DBG_BUGON(1);
203		return -EFSCORRUPTED;
204	}
205	memcpy(lnk, kaddr + m_pofs, inode->i_size);
206	lnk[inode->i_size] = '\0';
207
208	inode->i_link = lnk;
209	inode->i_op = &erofs_fast_symlink_iops;
210	return 0;
211}
212
213static int erofs_fill_inode(struct inode *inode)
214{
215	struct erofs_inode *vi = EROFS_I(inode);
216	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
217	void *kaddr;
218	unsigned int ofs;
219	int err = 0;
220
221	trace_erofs_fill_inode(inode);
222
223	/* read inode base data from disk */
224	kaddr = erofs_read_inode(&buf, inode, &ofs);
225	if (IS_ERR(kaddr))
226		return PTR_ERR(kaddr);
227
228	/* setup the new inode */
229	switch (inode->i_mode & S_IFMT) {
230	case S_IFREG:
231		inode->i_op = &erofs_generic_iops;
232		if (erofs_inode_is_data_compressed(vi->datalayout))
233			inode->i_fop = &generic_ro_fops;
234		else
235			inode->i_fop = &erofs_file_fops;
236		break;
237	case S_IFDIR:
238		inode->i_op = &erofs_dir_iops;
239		inode->i_fop = &erofs_dir_fops;
240		inode_nohighmem(inode);
241		break;
242	case S_IFLNK:
243		err = erofs_fill_symlink(inode, kaddr, ofs);
244		if (err)
245			goto out_unlock;
246		inode_nohighmem(inode);
247		break;
248	case S_IFCHR:
249	case S_IFBLK:
250	case S_IFIFO:
251	case S_IFSOCK:
252		inode->i_op = &erofs_generic_iops;
253		init_special_inode(inode, inode->i_mode, inode->i_rdev);
254		goto out_unlock;
255	default:
256		err = -EFSCORRUPTED;
257		goto out_unlock;
258	}
259
260	if (erofs_inode_is_data_compressed(vi->datalayout)) {
261#ifdef CONFIG_EROFS_FS_ZIP
262		if (!erofs_is_fscache_mode(inode->i_sb)) {
263			DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
264				  erofs_info, inode->i_sb,
265				  "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
266			inode->i_mapping->a_ops = &z_erofs_aops;
267			err = 0;
268			goto out_unlock;
269		}
270#endif
271		err = -EOPNOTSUPP;
272		goto out_unlock;
273	}
274	inode->i_mapping->a_ops = &erofs_raw_access_aops;
275	mapping_set_large_folios(inode->i_mapping);
276#ifdef CONFIG_EROFS_FS_ONDEMAND
277	if (erofs_is_fscache_mode(inode->i_sb))
278		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
279#endif
280
281out_unlock:
282	erofs_put_metabuf(&buf);
283	return err;
284}
285
286/*
287 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
288 * so that it will fit.
289 */
290static ino_t erofs_squash_ino(erofs_nid_t nid)
291{
292	ino_t ino = (ino_t)nid;
293
294	if (sizeof(ino_t) < sizeof(erofs_nid_t))
295		ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
296	return ino;
297}
298
299static int erofs_iget5_eq(struct inode *inode, void *opaque)
300{
301	return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
302}
303
304static int erofs_iget5_set(struct inode *inode, void *opaque)
305{
306	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
307
308	inode->i_ino = erofs_squash_ino(nid);
309	EROFS_I(inode)->nid = nid;
310	return 0;
311}
312
313struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
314{
 
315	struct inode *inode;
316
317	inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
318			     erofs_iget5_set, &nid);
319	if (!inode)
320		return ERR_PTR(-ENOMEM);
321
322	if (inode->i_state & I_NEW) {
323		int err = erofs_fill_inode(inode);
 
324
325		if (err) {
 
 
 
 
 
326			iget_failed(inode);
327			return ERR_PTR(err);
328		}
329		unlock_new_inode(inode);
330	}
331	return inode;
332}
333
334int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
335		  struct kstat *stat, u32 request_mask,
336		  unsigned int query_flags)
337{
338	struct inode *const inode = d_inode(path->dentry);
339
340	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
341		stat->attributes |= STATX_ATTR_COMPRESSED;
342
343	stat->attributes |= STATX_ATTR_IMMUTABLE;
344	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
345				  STATX_ATTR_IMMUTABLE);
346
347	generic_fillattr(idmap, request_mask, inode, stat);
348	return 0;
349}
350
351const struct inode_operations erofs_generic_iops = {
352	.getattr = erofs_getattr,
353	.listxattr = erofs_listxattr,
354	.get_inode_acl = erofs_get_acl,
355	.fiemap = erofs_fiemap,
356};
357
358const struct inode_operations erofs_symlink_iops = {
359	.get_link = page_get_link,
360	.getattr = erofs_getattr,
361	.listxattr = erofs_listxattr,
362	.get_inode_acl = erofs_get_acl,
363};
364
365const struct inode_operations erofs_fast_symlink_iops = {
366	.get_link = simple_get_link,
367	.getattr = erofs_getattr,
368	.listxattr = erofs_listxattr,
369	.get_inode_acl = erofs_get_acl,
370};
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
  8
  9#include <trace/events/erofs.h>
 10
 11static void *erofs_read_inode(struct erofs_buf *buf,
 12			      struct inode *inode, unsigned int *ofs)
 13{
 14	struct super_block *sb = inode->i_sb;
 15	struct erofs_sb_info *sbi = EROFS_SB(sb);
 16	struct erofs_inode *vi = EROFS_I(inode);
 17	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
 18
 19	erofs_blk_t blkaddr, nblks = 0;
 20	void *kaddr;
 21	struct erofs_inode_compact *dic;
 22	struct erofs_inode_extended *die, *copied = NULL;
 
 23	unsigned int ifmt;
 24	int err;
 25
 26	blkaddr = erofs_blknr(inode_loc);
 27	*ofs = erofs_blkoff(inode_loc);
 28
 29	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
 30		  __func__, vi->nid, *ofs, blkaddr);
 31
 32	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
 33	if (IS_ERR(kaddr)) {
 34		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 35			  vi->nid, PTR_ERR(kaddr));
 36		return kaddr;
 37	}
 38
 39	dic = kaddr + *ofs;
 40	ifmt = le16_to_cpu(dic->i_format);
 41
 42	if (ifmt & ~EROFS_I_ALL) {
 43		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
 44			  ifmt, vi->nid);
 45		err = -EOPNOTSUPP;
 46		goto err_out;
 47	}
 48
 49	vi->datalayout = erofs_inode_datalayout(ifmt);
 50	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 51		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
 52			  vi->datalayout, vi->nid);
 53		err = -EOPNOTSUPP;
 54		goto err_out;
 55	}
 56
 57	switch (erofs_inode_version(ifmt)) {
 58	case EROFS_INODE_LAYOUT_EXTENDED:
 59		vi->inode_isize = sizeof(struct erofs_inode_extended);
 60		/* check if the extended inode acrosses block boundary */
 61		if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
 62			*ofs += vi->inode_isize;
 63			die = (struct erofs_inode_extended *)dic;
 64		} else {
 65			const unsigned int gotten = EROFS_BLKSIZ - *ofs;
 66
 67			copied = kmalloc(vi->inode_isize, GFP_NOFS);
 68			if (!copied) {
 69				err = -ENOMEM;
 70				goto err_out;
 71			}
 72			memcpy(copied, dic, gotten);
 73			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
 74						   EROFS_KMAP);
 75			if (IS_ERR(kaddr)) {
 76				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 77					  vi->nid, PTR_ERR(kaddr));
 78				kfree(copied);
 79				return kaddr;
 80			}
 81			*ofs = vi->inode_isize - gotten;
 82			memcpy((u8 *)copied + gotten, kaddr, *ofs);
 83			die = copied;
 84		}
 85		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 86
 87		inode->i_mode = le16_to_cpu(die->i_mode);
 88		switch (inode->i_mode & S_IFMT) {
 89		case S_IFREG:
 90		case S_IFDIR:
 91		case S_IFLNK:
 92			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
 93			break;
 94		case S_IFCHR:
 95		case S_IFBLK:
 96			inode->i_rdev =
 97				new_decode_dev(le32_to_cpu(die->i_u.rdev));
 98			break;
 99		case S_IFIFO:
100		case S_IFSOCK:
101			inode->i_rdev = 0;
102			break;
103		default:
104			goto bogusimode;
105		}
106		i_uid_write(inode, le32_to_cpu(die->i_uid));
107		i_gid_write(inode, le32_to_cpu(die->i_gid));
108		set_nlink(inode, le32_to_cpu(die->i_nlink));
109
110		/* extended inode has its own timestamp */
111		inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
112		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
113
114		inode->i_size = le64_to_cpu(die->i_size);
115
116		/* total blocks for compressed files */
117		if (erofs_inode_is_data_compressed(vi->datalayout))
118			nblks = le32_to_cpu(die->i_u.compressed_blocks);
119		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
120			/* fill chunked inode summary info */
121			vi->chunkformat = le16_to_cpu(die->i_u.c.format);
122		kfree(copied);
123		copied = NULL;
124		break;
125	case EROFS_INODE_LAYOUT_COMPACT:
126		vi->inode_isize = sizeof(struct erofs_inode_compact);
127		*ofs += vi->inode_isize;
128		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
129
130		inode->i_mode = le16_to_cpu(dic->i_mode);
131		switch (inode->i_mode & S_IFMT) {
132		case S_IFREG:
133		case S_IFDIR:
134		case S_IFLNK:
135			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
136			break;
137		case S_IFCHR:
138		case S_IFBLK:
139			inode->i_rdev =
140				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
141			break;
142		case S_IFIFO:
143		case S_IFSOCK:
144			inode->i_rdev = 0;
145			break;
146		default:
147			goto bogusimode;
148		}
149		i_uid_write(inode, le16_to_cpu(dic->i_uid));
150		i_gid_write(inode, le16_to_cpu(dic->i_gid));
151		set_nlink(inode, le16_to_cpu(dic->i_nlink));
152
153		/* use build time for compact inodes */
154		inode->i_ctime.tv_sec = sbi->build_time;
155		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
156
157		inode->i_size = le32_to_cpu(dic->i_size);
158		if (erofs_inode_is_data_compressed(vi->datalayout))
159			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
160		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
161			vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
162		break;
163	default:
164		erofs_err(inode->i_sb,
165			  "unsupported on-disk inode version %u of nid %llu",
166			  erofs_inode_version(ifmt), vi->nid);
167		err = -EOPNOTSUPP;
168		goto err_out;
169	}
170
171	if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
173			erofs_err(inode->i_sb,
174				  "unsupported chunk format %x of nid %llu",
175				  vi->chunkformat, vi->nid);
176			err = -EOPNOTSUPP;
177			goto err_out;
178		}
179		vi->chunkbits = LOG_BLOCK_SIZE +
180			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
181	}
182	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
183	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
184	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
185	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
186
187	inode->i_flags &= ~S_DAX;
188	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
189	    vi->datalayout == EROFS_INODE_FLAT_PLAIN)
 
190		inode->i_flags |= S_DAX;
 
191	if (!nblks)
192		/* measure inode.i_blocks as generic filesystems */
193		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
194	else
195		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
196	return kaddr;
197
198bogusimode:
199	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
200		  inode->i_mode, vi->nid);
201	err = -EFSCORRUPTED;
202err_out:
203	DBG_BUGON(1);
204	kfree(copied);
205	erofs_put_metabuf(buf);
206	return ERR_PTR(err);
207}
208
209static int erofs_fill_symlink(struct inode *inode, void *kaddr,
210			      unsigned int m_pofs)
211{
212	struct erofs_inode *vi = EROFS_I(inode);
 
213	char *lnk;
214
215	/* if it cannot be handled with fast symlink scheme */
216	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
217	    inode->i_size >= EROFS_BLKSIZ || inode->i_size < 0) {
218		inode->i_op = &erofs_symlink_iops;
219		return 0;
220	}
221
222	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
223	if (!lnk)
224		return -ENOMEM;
225
226	m_pofs += vi->xattr_isize;
227	/* inline symlink data shouldn't cross block boundary */
228	if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
229		kfree(lnk);
230		erofs_err(inode->i_sb,
231			  "inline data cross block boundary @ nid %llu",
232			  vi->nid);
233		DBG_BUGON(1);
234		return -EFSCORRUPTED;
235	}
236	memcpy(lnk, kaddr + m_pofs, inode->i_size);
237	lnk[inode->i_size] = '\0';
238
239	inode->i_link = lnk;
240	inode->i_op = &erofs_fast_symlink_iops;
241	return 0;
242}
243
244static int erofs_fill_inode(struct inode *inode)
245{
246	struct erofs_inode *vi = EROFS_I(inode);
247	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
248	void *kaddr;
249	unsigned int ofs;
250	int err = 0;
251
252	trace_erofs_fill_inode(inode);
253
254	/* read inode base data from disk */
255	kaddr = erofs_read_inode(&buf, inode, &ofs);
256	if (IS_ERR(kaddr))
257		return PTR_ERR(kaddr);
258
259	/* setup the new inode */
260	switch (inode->i_mode & S_IFMT) {
261	case S_IFREG:
262		inode->i_op = &erofs_generic_iops;
263		if (erofs_inode_is_data_compressed(vi->datalayout))
264			inode->i_fop = &generic_ro_fops;
265		else
266			inode->i_fop = &erofs_file_fops;
267		break;
268	case S_IFDIR:
269		inode->i_op = &erofs_dir_iops;
270		inode->i_fop = &erofs_dir_fops;
271		inode_nohighmem(inode);
272		break;
273	case S_IFLNK:
274		err = erofs_fill_symlink(inode, kaddr, ofs);
275		if (err)
276			goto out_unlock;
277		inode_nohighmem(inode);
278		break;
279	case S_IFCHR:
280	case S_IFBLK:
281	case S_IFIFO:
282	case S_IFSOCK:
283		inode->i_op = &erofs_generic_iops;
284		init_special_inode(inode, inode->i_mode, inode->i_rdev);
285		goto out_unlock;
286	default:
287		err = -EFSCORRUPTED;
288		goto out_unlock;
289	}
290
291	if (erofs_inode_is_data_compressed(vi->datalayout)) {
292		if (!erofs_is_fscache_mode(inode->i_sb))
293			err = z_erofs_fill_inode(inode);
294		else
295			err = -EOPNOTSUPP;
 
 
 
 
 
 
 
296		goto out_unlock;
297	}
298	inode->i_mapping->a_ops = &erofs_raw_access_aops;
299	mapping_set_large_folios(inode->i_mapping);
300#ifdef CONFIG_EROFS_FS_ONDEMAND
301	if (erofs_is_fscache_mode(inode->i_sb))
302		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
303#endif
304
305out_unlock:
306	erofs_put_metabuf(&buf);
307	return err;
308}
309
310/*
311 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
312 * we should do more for 32-bit platform to find the right inode.
313 */
314static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
315{
316	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
 
 
 
 
 
317
318	return EROFS_I(inode)->nid == nid;
 
 
319}
320
321static int erofs_iget_set_actor(struct inode *inode, void *opaque)
322{
323	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
324
325	inode->i_ino = erofs_inode_hash(nid);
 
326	return 0;
327}
328
329struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
330{
331	const unsigned long hashval = erofs_inode_hash(nid);
332	struct inode *inode;
333
334	inode = iget5_locked(sb, hashval, erofs_ilookup_test_actor,
335		erofs_iget_set_actor, &nid);
336	if (!inode)
337		return ERR_PTR(-ENOMEM);
338
339	if (inode->i_state & I_NEW) {
340		int err;
341		struct erofs_inode *vi = EROFS_I(inode);
342
343		vi->nid = nid;
344
345		err = erofs_fill_inode(inode);
346		if (!err) {
347			unlock_new_inode(inode);
348		} else {
349			iget_failed(inode);
350			inode = ERR_PTR(err);
351		}
 
352	}
353	return inode;
354}
355
356int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
357		  struct kstat *stat, u32 request_mask,
358		  unsigned int query_flags)
359{
360	struct inode *const inode = d_inode(path->dentry);
361
362	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
363		stat->attributes |= STATX_ATTR_COMPRESSED;
364
365	stat->attributes |= STATX_ATTR_IMMUTABLE;
366	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
367				  STATX_ATTR_IMMUTABLE);
368
369	generic_fillattr(mnt_userns, inode, stat);
370	return 0;
371}
372
373const struct inode_operations erofs_generic_iops = {
374	.getattr = erofs_getattr,
375	.listxattr = erofs_listxattr,
376	.get_inode_acl = erofs_get_acl,
377	.fiemap = erofs_fiemap,
378};
379
380const struct inode_operations erofs_symlink_iops = {
381	.get_link = page_get_link,
382	.getattr = erofs_getattr,
383	.listxattr = erofs_listxattr,
384	.get_inode_acl = erofs_get_acl,
385};
386
387const struct inode_operations erofs_fast_symlink_iops = {
388	.get_link = simple_get_link,
389	.getattr = erofs_getattr,
390	.listxattr = erofs_listxattr,
391	.get_inode_acl = erofs_get_acl,
392};