Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
  8
  9#include <trace/events/erofs.h>
 10
 11static void *erofs_read_inode(struct erofs_buf *buf,
 12			      struct inode *inode, unsigned int *ofs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13{
 14	struct super_block *sb = inode->i_sb;
 15	struct erofs_sb_info *sbi = EROFS_SB(sb);
 16	struct erofs_inode *vi = EROFS_I(inode);
 17	const erofs_off_t inode_loc = iloc(sbi, vi->nid);
 18
 19	erofs_blk_t blkaddr, nblks = 0;
 20	void *kaddr;
 21	struct erofs_inode_compact *dic;
 22	struct erofs_inode_extended *die, *copied = NULL;
 23	unsigned int ifmt;
 24	int err;
 25
 26	blkaddr = erofs_blknr(inode_loc);
 27	*ofs = erofs_blkoff(inode_loc);
 28
 29	erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
 30		  __func__, vi->nid, *ofs, blkaddr);
 31
 32	kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
 33	if (IS_ERR(kaddr)) {
 34		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 35			  vi->nid, PTR_ERR(kaddr));
 36		return kaddr;
 37	}
 38
 39	dic = kaddr + *ofs;
 40	ifmt = le16_to_cpu(dic->i_format);
 41
 42	if (ifmt & ~EROFS_I_ALL) {
 43		erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
 44			  ifmt, vi->nid);
 45		err = -EOPNOTSUPP;
 46		goto err_out;
 47	}
 48
 49	vi->datalayout = erofs_inode_datalayout(ifmt);
 50	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 51		erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
 52			  vi->datalayout, vi->nid);
 53		err = -EOPNOTSUPP;
 54		goto err_out;
 55	}
 56
 57	switch (erofs_inode_version(ifmt)) {
 58	case EROFS_INODE_LAYOUT_EXTENDED:
 59		vi->inode_isize = sizeof(struct erofs_inode_extended);
 60		/* check if the extended inode acrosses block boundary */
 61		if (*ofs + vi->inode_isize <= EROFS_BLKSIZ) {
 62			*ofs += vi->inode_isize;
 63			die = (struct erofs_inode_extended *)dic;
 64		} else {
 65			const unsigned int gotten = EROFS_BLKSIZ - *ofs;
 66
 67			copied = kmalloc(vi->inode_isize, GFP_NOFS);
 68			if (!copied) {
 69				err = -ENOMEM;
 70				goto err_out;
 71			}
 72			memcpy(copied, dic, gotten);
 73			kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
 74						   EROFS_KMAP);
 75			if (IS_ERR(kaddr)) {
 76				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 77					  vi->nid, PTR_ERR(kaddr));
 78				kfree(copied);
 79				return kaddr;
 80			}
 81			*ofs = vi->inode_isize - gotten;
 82			memcpy((u8 *)copied + gotten, kaddr, *ofs);
 83			die = copied;
 84		}
 85		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 86
 87		inode->i_mode = le16_to_cpu(die->i_mode);
 88		switch (inode->i_mode & S_IFMT) {
 89		case S_IFREG:
 90		case S_IFDIR:
 91		case S_IFLNK:
 92			vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
 93			break;
 94		case S_IFCHR:
 95		case S_IFBLK:
 96			inode->i_rdev =
 97				new_decode_dev(le32_to_cpu(die->i_u.rdev));
 98			break;
 99		case S_IFIFO:
100		case S_IFSOCK:
101			inode->i_rdev = 0;
102			break;
103		default:
104			goto bogusimode;
105		}
106		i_uid_write(inode, le32_to_cpu(die->i_uid));
107		i_gid_write(inode, le32_to_cpu(die->i_gid));
108		set_nlink(inode, le32_to_cpu(die->i_nlink));
109
110		/* extended inode has its own timestamp */
111		inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
112		inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
113
114		inode->i_size = le64_to_cpu(die->i_size);
115
116		/* total blocks for compressed files */
117		if (erofs_inode_is_data_compressed(vi->datalayout))
118			nblks = le32_to_cpu(die->i_u.compressed_blocks);
119		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
120			/* fill chunked inode summary info */
121			vi->chunkformat = le16_to_cpu(die->i_u.c.format);
122		kfree(copied);
123		copied = NULL;
124		break;
125	case EROFS_INODE_LAYOUT_COMPACT:
126		vi->inode_isize = sizeof(struct erofs_inode_compact);
127		*ofs += vi->inode_isize;
128		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
129
130		inode->i_mode = le16_to_cpu(dic->i_mode);
131		switch (inode->i_mode & S_IFMT) {
132		case S_IFREG:
133		case S_IFDIR:
134		case S_IFLNK:
135			vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
136			break;
137		case S_IFCHR:
138		case S_IFBLK:
139			inode->i_rdev =
140				new_decode_dev(le32_to_cpu(dic->i_u.rdev));
141			break;
142		case S_IFIFO:
143		case S_IFSOCK:
144			inode->i_rdev = 0;
145			break;
146		default:
147			goto bogusimode;
148		}
149		i_uid_write(inode, le16_to_cpu(dic->i_uid));
150		i_gid_write(inode, le16_to_cpu(dic->i_gid));
151		set_nlink(inode, le16_to_cpu(dic->i_nlink));
152
153		/* use build time for compact inodes */
154		inode->i_ctime.tv_sec = sbi->build_time;
155		inode->i_ctime.tv_nsec = sbi->build_time_nsec;
156
157		inode->i_size = le32_to_cpu(dic->i_size);
158		if (erofs_inode_is_data_compressed(vi->datalayout))
159			nblks = le32_to_cpu(dic->i_u.compressed_blocks);
160		else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
161			vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
162		break;
163	default:
164		erofs_err(inode->i_sb,
165			  "unsupported on-disk inode version %u of nid %llu",
166			  erofs_inode_version(ifmt), vi->nid);
167		err = -EOPNOTSUPP;
168		goto err_out;
169	}
170
171	if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
173			erofs_err(inode->i_sb,
174				  "unsupported chunk format %x of nid %llu",
175				  vi->chunkformat, vi->nid);
176			err = -EOPNOTSUPP;
177			goto err_out;
178		}
179		vi->chunkbits = LOG_BLOCK_SIZE +
180			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
181	}
182	inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
183	inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
184	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
185	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
186
187	inode->i_flags &= ~S_DAX;
188	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
189	    vi->datalayout == EROFS_INODE_FLAT_PLAIN)
 
190		inode->i_flags |= S_DAX;
 
191	if (!nblks)
192		/* measure inode.i_blocks as generic filesystems */
193		inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
194	else
195		inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
196	return kaddr;
197
198bogusimode:
199	erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
200		  inode->i_mode, vi->nid);
201	err = -EFSCORRUPTED;
202err_out:
203	DBG_BUGON(1);
204	kfree(copied);
205	erofs_put_metabuf(buf);
206	return ERR_PTR(err);
207}
208
209static int erofs_fill_symlink(struct inode *inode, void *kaddr,
210			      unsigned int m_pofs)
211{
212	struct erofs_inode *vi = EROFS_I(inode);
213	char *lnk;
214
215	/* if it cannot be handled with fast symlink scheme */
216	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
217	    inode->i_size >= EROFS_BLKSIZ || inode->i_size < 0) {
218		inode->i_op = &erofs_symlink_iops;
219		return 0;
220	}
221
222	lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
223	if (!lnk)
224		return -ENOMEM;
225
226	m_pofs += vi->xattr_isize;
227	/* inline symlink data shouldn't cross block boundary */
228	if (m_pofs + inode->i_size > EROFS_BLKSIZ) {
229		kfree(lnk);
230		erofs_err(inode->i_sb,
231			  "inline data cross block boundary @ nid %llu",
232			  vi->nid);
233		DBG_BUGON(1);
234		return -EFSCORRUPTED;
235	}
236	memcpy(lnk, kaddr + m_pofs, inode->i_size);
237	lnk[inode->i_size] = '\0';
238
239	inode->i_link = lnk;
240	inode->i_op = &erofs_fast_symlink_iops;
241	return 0;
242}
243
244static int erofs_fill_inode(struct inode *inode)
245{
246	struct erofs_inode *vi = EROFS_I(inode);
247	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
248	void *kaddr;
249	unsigned int ofs;
250	int err = 0;
251
252	trace_erofs_fill_inode(inode);
253
254	/* read inode base data from disk */
255	kaddr = erofs_read_inode(&buf, inode, &ofs);
256	if (IS_ERR(kaddr))
257		return PTR_ERR(kaddr);
258
259	/* setup the new inode */
260	switch (inode->i_mode & S_IFMT) {
261	case S_IFREG:
262		inode->i_op = &erofs_generic_iops;
263		if (erofs_inode_is_data_compressed(vi->datalayout))
264			inode->i_fop = &generic_ro_fops;
265		else
266			inode->i_fop = &erofs_file_fops;
267		break;
268	case S_IFDIR:
269		inode->i_op = &erofs_dir_iops;
270		inode->i_fop = &erofs_dir_fops;
271		inode_nohighmem(inode);
272		break;
273	case S_IFLNK:
274		err = erofs_fill_symlink(inode, kaddr, ofs);
275		if (err)
276			goto out_unlock;
 
277		inode_nohighmem(inode);
278		break;
279	case S_IFCHR:
280	case S_IFBLK:
281	case S_IFIFO:
282	case S_IFSOCK:
283		inode->i_op = &erofs_generic_iops;
284		init_special_inode(inode, inode->i_mode, inode->i_rdev);
285		goto out_unlock;
286	default:
287		err = -EFSCORRUPTED;
288		goto out_unlock;
289	}
290
291	if (erofs_inode_is_data_compressed(vi->datalayout)) {
292		if (!erofs_is_fscache_mode(inode->i_sb))
293			err = z_erofs_fill_inode(inode);
294		else
295			err = -EOPNOTSUPP;
296		goto out_unlock;
297	}
298	inode->i_mapping->a_ops = &erofs_raw_access_aops;
299	mapping_set_large_folios(inode->i_mapping);
 
 
 
 
 
 
 
 
 
 
 
300#ifdef CONFIG_EROFS_FS_ONDEMAND
301	if (erofs_is_fscache_mode(inode->i_sb))
302		inode->i_mapping->a_ops = &erofs_fscache_access_aops;
303#endif
 
 
 
 
 
304
305out_unlock:
306	erofs_put_metabuf(&buf);
307	return err;
308}
309
310/*
311 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
312 * we should do more for 32-bit platform to find the right inode.
313 */
314static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
315{
316	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
 
 
 
 
 
317
318	return EROFS_I(inode)->nid == nid;
 
 
319}
320
321static int erofs_iget_set_actor(struct inode *inode, void *opaque)
322{
323	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
324
325	inode->i_ino = erofs_inode_hash(nid);
 
326	return 0;
327}
328
329struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
330{
331	const unsigned long hashval = erofs_inode_hash(nid);
332	struct inode *inode;
333
334	inode = iget5_locked(sb, hashval, erofs_ilookup_test_actor,
335		erofs_iget_set_actor, &nid);
336	if (!inode)
337		return ERR_PTR(-ENOMEM);
338
339	if (inode->i_state & I_NEW) {
340		int err;
341		struct erofs_inode *vi = EROFS_I(inode);
342
343		vi->nid = nid;
344
345		err = erofs_fill_inode(inode);
346		if (!err) {
347			unlock_new_inode(inode);
348		} else {
349			iget_failed(inode);
350			inode = ERR_PTR(err);
351		}
 
352	}
353	return inode;
354}
355
356int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
357		  struct kstat *stat, u32 request_mask,
358		  unsigned int query_flags)
359{
360	struct inode *const inode = d_inode(path->dentry);
 
 
 
361
362	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
363		stat->attributes |= STATX_ATTR_COMPRESSED;
364
365	stat->attributes |= STATX_ATTR_IMMUTABLE;
366	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
367				  STATX_ATTR_IMMUTABLE);
368
369	generic_fillattr(mnt_userns, inode, stat);
 
 
 
 
 
 
 
 
 
 
 
 
 
370	return 0;
371}
372
373const struct inode_operations erofs_generic_iops = {
374	.getattr = erofs_getattr,
375	.listxattr = erofs_listxattr,
376	.get_inode_acl = erofs_get_acl,
377	.fiemap = erofs_fiemap,
378};
379
380const struct inode_operations erofs_symlink_iops = {
381	.get_link = page_get_link,
382	.getattr = erofs_getattr,
383	.listxattr = erofs_listxattr,
384	.get_inode_acl = erofs_get_acl,
385};
386
387const struct inode_operations erofs_fast_symlink_iops = {
388	.get_link = simple_get_link,
389	.getattr = erofs_getattr,
390	.listxattr = erofs_listxattr,
391	.get_inode_acl = erofs_get_acl,
392};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021, Alibaba Cloud
  6 */
  7#include "xattr.h"
 
  8#include <trace/events/erofs.h>
  9
 10static int erofs_fill_symlink(struct inode *inode, void *kaddr,
 11			      unsigned int m_pofs)
 12{
 13	struct erofs_inode *vi = EROFS_I(inode);
 14	loff_t off;
 15
 16	m_pofs += vi->xattr_isize;
 17	/* check if it cannot be handled with fast symlink scheme */
 18	if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
 19	    check_add_overflow(m_pofs, inode->i_size, &off) ||
 20	    off > i_blocksize(inode))
 21		return 0;
 22
 23	inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL);
 24	return inode->i_link ? 0 : -ENOMEM;
 25}
 26
 27static int erofs_read_inode(struct inode *inode)
 28{
 29	struct super_block *sb = inode->i_sb;
 30	struct erofs_sb_info *sbi = EROFS_SB(sb);
 31	struct erofs_inode *vi = EROFS_I(inode);
 32	const erofs_off_t inode_loc = erofs_iloc(inode);
 
 33	erofs_blk_t blkaddr, nblks = 0;
 34	void *kaddr;
 35	struct erofs_inode_compact *dic;
 36	struct erofs_inode_extended *die, *copied = NULL;
 37	union erofs_inode_i_u iu;
 38	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
 39	unsigned int ifmt, ofs;
 40	int err = 0;
 
 41
 42	blkaddr = erofs_blknr(sb, inode_loc);
 43	ofs = erofs_blkoff(sb, inode_loc);
 44
 45	kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
 46	if (IS_ERR(kaddr)) {
 47		erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
 48			  vi->nid, PTR_ERR(kaddr));
 49		return PTR_ERR(kaddr);
 50	}
 51
 52	dic = kaddr + ofs;
 53	ifmt = le16_to_cpu(dic->i_format);
 
 54	if (ifmt & ~EROFS_I_ALL) {
 55		erofs_err(sb, "unsupported i_format %u of nid %llu",
 56			  ifmt, vi->nid);
 57		err = -EOPNOTSUPP;
 58		goto err_out;
 59	}
 60
 61	vi->datalayout = erofs_inode_datalayout(ifmt);
 62	if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
 63		erofs_err(sb, "unsupported datalayout %u of nid %llu",
 64			  vi->datalayout, vi->nid);
 65		err = -EOPNOTSUPP;
 66		goto err_out;
 67	}
 68
 69	switch (erofs_inode_version(ifmt)) {
 70	case EROFS_INODE_LAYOUT_EXTENDED:
 71		vi->inode_isize = sizeof(struct erofs_inode_extended);
 72		/* check if the extended inode acrosses block boundary */
 73		if (ofs + vi->inode_isize <= sb->s_blocksize) {
 74			ofs += vi->inode_isize;
 75			die = (struct erofs_inode_extended *)dic;
 76		} else {
 77			const unsigned int gotten = sb->s_blocksize - ofs;
 78
 79			copied = kmalloc(vi->inode_isize, GFP_KERNEL);
 80			if (!copied) {
 81				err = -ENOMEM;
 82				goto err_out;
 83			}
 84			memcpy(copied, dic, gotten);
 85			kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1),
 86						   EROFS_KMAP);
 87			if (IS_ERR(kaddr)) {
 88				erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
 89					  vi->nid, PTR_ERR(kaddr));
 90				kfree(copied);
 91				return PTR_ERR(kaddr);
 92			}
 93			ofs = vi->inode_isize - gotten;
 94			memcpy((u8 *)copied + gotten, kaddr, ofs);
 95			die = copied;
 96		}
 97		vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
 98
 99		inode->i_mode = le16_to_cpu(die->i_mode);
100		iu = die->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101		i_uid_write(inode, le32_to_cpu(die->i_uid));
102		i_gid_write(inode, le32_to_cpu(die->i_gid));
103		set_nlink(inode, le32_to_cpu(die->i_nlink));
104		/* each extended inode has its own timestamp */
105		inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
106				le32_to_cpu(die->i_mtime_nsec));
 
107
108		inode->i_size = le64_to_cpu(die->i_size);
 
 
 
 
 
 
 
109		kfree(copied);
 
110		break;
111	case EROFS_INODE_LAYOUT_COMPACT:
112		vi->inode_isize = sizeof(struct erofs_inode_compact);
113		ofs += vi->inode_isize;
114		vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
115
116		inode->i_mode = le16_to_cpu(dic->i_mode);
117		iu = dic->i_u;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118		i_uid_write(inode, le16_to_cpu(dic->i_uid));
119		i_gid_write(inode, le16_to_cpu(dic->i_gid));
120		set_nlink(inode, le16_to_cpu(dic->i_nlink));
 
121		/* use build time for compact inodes */
122		inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
 
123
124		inode->i_size = le32_to_cpu(dic->i_size);
 
 
 
 
125		break;
126	default:
127		erofs_err(sb, "unsupported on-disk inode version %u of nid %llu",
 
128			  erofs_inode_version(ifmt), vi->nid);
129		err = -EOPNOTSUPP;
130		goto err_out;
131	}
132
133	if (unlikely(inode->i_size < 0)) {
134		erofs_err(sb, "negative i_size @ nid %llu", vi->nid);
135		err = -EFSCORRUPTED;
136		goto err_out;
137	}
138	switch (inode->i_mode & S_IFMT) {
139	case S_IFREG:
140	case S_IFDIR:
141	case S_IFLNK:
142		vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr);
143		if(S_ISLNK(inode->i_mode)) {
144			err = erofs_fill_symlink(inode, kaddr, ofs);
145			if (err)
146				goto err_out;
147		}
148		break;
149	case S_IFCHR:
150	case S_IFBLK:
151		inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev));
152		break;
153	case S_IFIFO:
154	case S_IFSOCK:
155		inode->i_rdev = 0;
156		break;
157	default:
158		erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode,
159			  vi->nid);
160		err = -EFSCORRUPTED;
161		goto err_out;
162	}
163
164	/* total blocks for compressed files */
165	if (erofs_inode_is_data_compressed(vi->datalayout)) {
166		nblks = le32_to_cpu(iu.compressed_blocks);
167	} else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
168		/* fill chunked inode summary info */
169		vi->chunkformat = le16_to_cpu(iu.c.format);
170		if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
171			erofs_err(sb, "unsupported chunk format %x of nid %llu",
 
172				  vi->chunkformat, vi->nid);
173			err = -EOPNOTSUPP;
174			goto err_out;
175		}
176		vi->chunkbits = sb->s_blocksize_bits +
177			(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
178	}
179	inode_set_mtime_to_ts(inode,
180			      inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
 
 
181
182	inode->i_flags &= ~S_DAX;
183	if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
184	    (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
185	     vi->datalayout == EROFS_INODE_CHUNK_BASED))
186		inode->i_flags |= S_DAX;
187
188	if (!nblks)
189		/* measure inode.i_blocks as generic filesystems */
190		inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
191	else
192		inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
 
 
 
 
 
 
193err_out:
194	DBG_BUGON(err);
195	erofs_put_metabuf(&buf);
196	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197}
198
199static int erofs_fill_inode(struct inode *inode)
200{
201	struct erofs_inode *vi = EROFS_I(inode);
202	int err;
 
 
 
203
204	trace_erofs_fill_inode(inode);
205
206	/* read inode base data from disk */
207	err = erofs_read_inode(inode);
208	if (err)
209		return err;
210
211	/* setup the new inode */
212	switch (inode->i_mode & S_IFMT) {
213	case S_IFREG:
214		inode->i_op = &erofs_generic_iops;
215		if (erofs_inode_is_data_compressed(vi->datalayout))
216			inode->i_fop = &generic_ro_fops;
217		else
218			inode->i_fop = &erofs_file_fops;
219		break;
220	case S_IFDIR:
221		inode->i_op = &erofs_dir_iops;
222		inode->i_fop = &erofs_dir_fops;
223		inode_nohighmem(inode);
224		break;
225	case S_IFLNK:
226		if (inode->i_link)
227			inode->i_op = &erofs_fast_symlink_iops;
228		else
229			inode->i_op = &erofs_symlink_iops;
230		inode_nohighmem(inode);
231		break;
232	case S_IFCHR:
233	case S_IFBLK:
234	case S_IFIFO:
235	case S_IFSOCK:
236		inode->i_op = &erofs_generic_iops;
237		init_special_inode(inode, inode->i_mode, inode->i_rdev);
238		return 0;
239	default:
240		return -EFSCORRUPTED;
 
241	}
242
 
 
 
 
 
 
 
 
243	mapping_set_large_folios(inode->i_mapping);
244	if (erofs_inode_is_data_compressed(vi->datalayout)) {
245#ifdef CONFIG_EROFS_FS_ZIP
246		DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT,
247			  erofs_info, inode->i_sb,
248			  "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
249		inode->i_mapping->a_ops = &z_erofs_aops;
250#else
251		err = -EOPNOTSUPP;
252#endif
253	} else {
254		inode->i_mapping->a_ops = &erofs_aops;
255#ifdef CONFIG_EROFS_FS_ONDEMAND
256		if (erofs_is_fscache_mode(inode->i_sb))
257			inode->i_mapping->a_ops = &erofs_fscache_access_aops;
258#endif
259#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
260		if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb)))
261			inode->i_mapping->a_ops = &erofs_fileio_aops;
262#endif
263	}
264
 
 
265	return err;
266}
267
268/*
269 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
270 * so that it will fit.
271 */
272static ino_t erofs_squash_ino(erofs_nid_t nid)
273{
274	ino_t ino = (ino_t)nid;
275
276	if (sizeof(ino_t) < sizeof(erofs_nid_t))
277		ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
278	return ino;
279}
280
281static int erofs_iget5_eq(struct inode *inode, void *opaque)
282{
283	return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
284}
285
286static int erofs_iget5_set(struct inode *inode, void *opaque)
287{
288	const erofs_nid_t nid = *(erofs_nid_t *)opaque;
289
290	inode->i_ino = erofs_squash_ino(nid);
291	EROFS_I(inode)->nid = nid;
292	return 0;
293}
294
295struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
296{
 
297	struct inode *inode;
298
299	inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
300			     erofs_iget5_set, &nid);
301	if (!inode)
302		return ERR_PTR(-ENOMEM);
303
304	if (inode->i_state & I_NEW) {
305		int err = erofs_fill_inode(inode);
 
 
 
306
307		if (err) {
 
 
 
308			iget_failed(inode);
309			return ERR_PTR(err);
310		}
311		unlock_new_inode(inode);
312	}
313	return inode;
314}
315
316int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
317		  struct kstat *stat, u32 request_mask,
318		  unsigned int query_flags)
319{
320	struct inode *const inode = d_inode(path->dentry);
321	struct block_device *bdev = inode->i_sb->s_bdev;
322	bool compressed =
323		erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout);
324
325	if (compressed)
326		stat->attributes |= STATX_ATTR_COMPRESSED;
 
327	stat->attributes |= STATX_ATTR_IMMUTABLE;
328	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
329				  STATX_ATTR_IMMUTABLE);
330
331	/*
332	 * Return the DIO alignment restrictions if requested.
333	 *
334	 * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode
335	 * and uncompressed inodes, otherwise we report no DIO support.
336	 */
337	if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
338		stat->result_mask |= STATX_DIOALIGN;
339		if (bdev && !compressed) {
340			stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
341			stat->dio_offset_align = bdev_logical_block_size(bdev);
342		}
343	}
344	generic_fillattr(idmap, request_mask, inode, stat);
345	return 0;
346}
347
348const struct inode_operations erofs_generic_iops = {
349	.getattr = erofs_getattr,
350	.listxattr = erofs_listxattr,
351	.get_inode_acl = erofs_get_acl,
352	.fiemap = erofs_fiemap,
353};
354
355const struct inode_operations erofs_symlink_iops = {
356	.get_link = page_get_link,
357	.getattr = erofs_getattr,
358	.listxattr = erofs_listxattr,
359	.get_inode_acl = erofs_get_acl,
360};
361
362const struct inode_operations erofs_fast_symlink_iops = {
363	.get_link = simple_get_link,
364	.getattr = erofs_getattr,
365	.listxattr = erofs_listxattr,
366	.get_inode_acl = erofs_get_acl,
367};