Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Copyright (C) 2021-2022, Alibaba Cloud
  6 */
  7#include <linux/security.h>
  8#include <linux/xxhash.h>
  9#include "xattr.h"
 10
 11struct erofs_xattr_iter {
 12	struct super_block *sb;
 13	struct erofs_buf buf;
 14	erofs_off_t pos;
 15	void *kaddr;
 16
 17	char *buffer;
 18	int buffer_size, buffer_ofs;
 
 19
 20	/* getxattr */
 21	int index, infix_len;
 22	struct qstr name;
 
 
 
 
 23
 24	/* listxattr */
 25	struct dentry *dentry;
 26};
 27
 28static int erofs_init_inode_xattrs(struct inode *inode)
 
 
 
 
 
 
 
 
 29{
 30	struct erofs_inode *const vi = EROFS_I(inode);
 31	struct erofs_xattr_iter it;
 32	unsigned int i;
 33	struct erofs_xattr_ibody_header *ih;
 34	struct super_block *sb = inode->i_sb;
 
 
 35	int ret = 0;
 36
 37	/* the most case is that xattrs of this inode are initialized. */
 38	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
 39		/*
 40		 * paired with smp_mb() at the end of the function to ensure
 41		 * fields will only be observed after the bit is set.
 42		 */
 43		smp_mb();
 44		return 0;
 45	}
 46
 47	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
 48		return -ERESTARTSYS;
 49
 50	/* someone has initialized xattrs for us? */
 51	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
 52		goto out_unlock;
 53
 54	/*
 55	 * bypass all xattr operations if ->xattr_isize is not greater than
 56	 * sizeof(struct erofs_xattr_ibody_header), in detail:
 57	 * 1) it is not enough to contain erofs_xattr_ibody_header then
 58	 *    ->xattr_isize should be 0 (it means no xattr);
 59	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
 60	 *    undefined right now (maybe use later with some new sb feature).
 61	 */
 62	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
 63		erofs_err(sb,
 64			  "xattr_isize %d of nid %llu is not supported yet",
 65			  vi->xattr_isize, vi->nid);
 66		ret = -EOPNOTSUPP;
 67		goto out_unlock;
 68	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
 69		if (vi->xattr_isize) {
 70			erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid);
 
 71			DBG_BUGON(1);
 72			ret = -EFSCORRUPTED;
 73			goto out_unlock;	/* xattr ondisk layout error */
 74		}
 75		ret = -ENOATTR;
 76		goto out_unlock;
 77	}
 78
 79	it.buf = __EROFS_BUF_INITIALIZER;
 80	erofs_init_metabuf(&it.buf, sb);
 81	it.pos = erofs_iloc(inode) + vi->inode_isize;
 82
 83	/* read in shared xattr array (non-atomic, see kmalloc below) */
 84	it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP);
 85	if (IS_ERR(it.kaddr)) {
 86		ret = PTR_ERR(it.kaddr);
 87		goto out_unlock;
 88	}
 89
 90	ih = it.kaddr + erofs_blkoff(sb, it.pos);
 91	vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
 
 
 
 
 92	vi->xattr_shared_count = ih->h_shared_count;
 93	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
 94						sizeof(uint), GFP_KERNEL);
 95	if (!vi->xattr_shared_xattrs) {
 96		erofs_put_metabuf(&it.buf);
 97		ret = -ENOMEM;
 98		goto out_unlock;
 99	}
100
101	/* let's skip ibody header */
102	it.pos += sizeof(struct erofs_xattr_ibody_header);
103
104	for (i = 0; i < vi->xattr_shared_count; ++i) {
105		it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos),
106				       EROFS_KMAP);
107		if (IS_ERR(it.kaddr)) {
108			kfree(vi->xattr_shared_xattrs);
109			vi->xattr_shared_xattrs = NULL;
110			ret = PTR_ERR(it.kaddr);
111			goto out_unlock;
 
 
 
 
 
 
 
 
 
112		}
113		vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)
114				(it.kaddr + erofs_blkoff(sb, it.pos)));
115		it.pos += sizeof(__le32);
116	}
117	erofs_put_metabuf(&it.buf);
118
119	/* paired with smp_mb() at the beginning of the function. */
120	smp_mb();
121	set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
122
123out_unlock:
124	clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
125	return ret;
126}
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128static bool erofs_xattr_user_list(struct dentry *dentry)
129{
130	return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
131}
132
133static bool erofs_xattr_trusted_list(struct dentry *dentry)
134{
135	return capable(CAP_SYS_ADMIN);
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138static int erofs_xattr_generic_get(const struct xattr_handler *handler,
139				   struct dentry *unused, struct inode *inode,
140				   const char *name, void *buffer, size_t size)
141{
142	if (handler->flags == EROFS_XATTR_INDEX_USER &&
143	    !test_opt(&EROFS_I_SB(inode)->opt, XATTR_USER))
144		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146	return erofs_getxattr(inode, handler->flags, name, buffer, size);
147}
148
149const struct xattr_handler erofs_xattr_user_handler = {
150	.prefix	= XATTR_USER_PREFIX,
151	.flags	= EROFS_XATTR_INDEX_USER,
152	.list	= erofs_xattr_user_list,
153	.get	= erofs_xattr_generic_get,
154};
155
156const struct xattr_handler erofs_xattr_trusted_handler = {
157	.prefix	= XATTR_TRUSTED_PREFIX,
158	.flags	= EROFS_XATTR_INDEX_TRUSTED,
159	.list	= erofs_xattr_trusted_list,
160	.get	= erofs_xattr_generic_get,
161};
162
163#ifdef CONFIG_EROFS_FS_SECURITY
164const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
165	.prefix	= XATTR_SECURITY_PREFIX,
166	.flags	= EROFS_XATTR_INDEX_SECURITY,
167	.get	= erofs_xattr_generic_get,
168};
169#endif
170
171const struct xattr_handler * const erofs_xattr_handlers[] = {
172	&erofs_xattr_user_handler,
 
 
 
 
173	&erofs_xattr_trusted_handler,
174#ifdef CONFIG_EROFS_FS_SECURITY
175	&erofs_xattr_security_handler,
176#endif
177	NULL,
178};
179
180static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
181				      unsigned int len)
182{
183	unsigned int slice, processed;
184	struct super_block *sb = it->sb;
185	void *src;
186
187	for (processed = 0; processed < len; processed += slice) {
188		it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
189					EROFS_KMAP);
190		if (IS_ERR(it->kaddr))
191			return PTR_ERR(it->kaddr);
192
193		src = it->kaddr + erofs_blkoff(sb, it->pos);
194		slice = min_t(unsigned int, sb->s_blocksize -
195				erofs_blkoff(sb, it->pos), len - processed);
196		memcpy(it->buffer + it->buffer_ofs, src, slice);
197		it->buffer_ofs += slice;
198		it->pos += slice;
199	}
200	return 0;
201}
202
203static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
 
 
 
 
 
 
204{
205	struct erofs_xattr_entry entry;
206	unsigned int base_index, name_total, prefix_len, infix_len = 0;
207	const char *prefix, *infix = NULL;
208	int err;
209
210	/* 1. handle xattr entry */
211	entry = *(struct erofs_xattr_entry *)
212			(it->kaddr + erofs_blkoff(it->sb, it->pos));
213	it->pos += sizeof(struct erofs_xattr_entry);
214
215	base_index = entry.e_name_index;
216	if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
217		struct erofs_sb_info *sbi = EROFS_SB(it->sb);
218		struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
219			(entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
220
221		if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
222			return 0;
223		infix = pf->prefix->infix;
224		infix_len = pf->infix_len;
225		base_index = pf->prefix->base_index;
226	}
227
228	prefix = erofs_xattr_prefix(base_index, it->dentry);
229	if (!prefix)
230		return 0;
 
231	prefix_len = strlen(prefix);
232	name_total = prefix_len + infix_len + entry.e_name_len + 1;
233
234	if (!it->buffer) {
235		it->buffer_ofs += name_total;
236		return 0;
237	}
238
239	if (it->buffer_ofs + name_total > it->buffer_size)
 
240		return -ERANGE;
241
242	memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
243	memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
244	it->buffer_ofs += prefix_len + infix_len;
245
246	/* 2. handle xattr name */
247	err = erofs_xattr_copy_to_buffer(it, entry.e_name_len);
248	if (err)
249		return err;
250
251	it->buffer[it->buffer_ofs++] = '\0';
252	return 0;
253}
254
255static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
 
256{
257	struct super_block *sb = it->sb;
258	struct erofs_xattr_entry entry;
259	unsigned int slice, processed, value_sz;
260
261	/* 1. handle xattr entry */
262	entry = *(struct erofs_xattr_entry *)
263			(it->kaddr + erofs_blkoff(sb, it->pos));
264	it->pos += sizeof(struct erofs_xattr_entry);
265	value_sz = le16_to_cpu(entry.e_value_size);
266
267	/* should also match the infix for long name prefixes */
268	if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
269		struct erofs_sb_info *sbi = EROFS_SB(sb);
270		struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
271			(entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
272
273		if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
274			return -ENOATTR;
275
276		if (it->index != pf->prefix->base_index ||
277		    it->name.len != entry.e_name_len + pf->infix_len)
278			return -ENOATTR;
279
280		if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
281			return -ENOATTR;
282
283		it->infix_len = pf->infix_len;
284	} else {
285		if (it->index != entry.e_name_index ||
286		    it->name.len != entry.e_name_len)
287			return -ENOATTR;
288
289		it->infix_len = 0;
290	}
291
292	/* 2. handle xattr name */
293	for (processed = 0; processed < entry.e_name_len; processed += slice) {
294		it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
295					EROFS_KMAP);
296		if (IS_ERR(it->kaddr))
297			return PTR_ERR(it->kaddr);
298
299		slice = min_t(unsigned int,
300				sb->s_blocksize - erofs_blkoff(sb, it->pos),
301				entry.e_name_len - processed);
302		if (memcmp(it->name.name + it->infix_len + processed,
303			   it->kaddr + erofs_blkoff(sb, it->pos), slice))
304			return -ENOATTR;
305		it->pos += slice;
306	}
307
308	/* 3. handle xattr value */
309	if (!it->buffer) {
310		it->buffer_ofs = value_sz;
311		return 0;
312	}
313
314	if (it->buffer_size < value_sz)
315		return -ERANGE;
 
 
 
316
317	return erofs_xattr_copy_to_buffer(it, value_sz);
 
318}
319
320static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
321				   struct inode *inode, bool getxattr)
 
 
 
 
 
 
322{
323	struct erofs_inode *const vi = EROFS_I(inode);
324	unsigned int xattr_header_sz, remaining, entry_sz;
325	erofs_off_t next_pos;
326	int ret;
 
327
328	xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
329			  sizeof(u32) * vi->xattr_shared_count;
330	if (xattr_header_sz >= vi->xattr_isize) {
331		DBG_BUGON(xattr_header_sz > vi->xattr_isize);
332		return -ENOATTR;
333	}
334
335	remaining = vi->xattr_isize - xattr_header_sz;
336	it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
337
 
338	while (remaining) {
339		it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos),
340					EROFS_KMAP);
341		if (IS_ERR(it->kaddr))
342			return PTR_ERR(it->kaddr);
343
344		entry_sz = erofs_xattr_entry_size(it->kaddr +
345				erofs_blkoff(it->sb, it->pos));
346		/* xattr on-disk corruption: xattr entry beyond xattr_isize */
347		if (remaining < entry_sz) {
348			DBG_BUGON(1);
349			return -EFSCORRUPTED;
350		}
351		remaining -= entry_sz;
352		next_pos = it->pos + entry_sz;
353
354		if (getxattr)
355			ret = erofs_getxattr_foreach(it);
356		else
357			ret = erofs_listxattr_foreach(it);
358		if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
359			break;
360
361		it->pos = next_pos;
362	}
363	return ret;
 
364}
365
366static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
367				   struct inode *inode, bool getxattr)
368{
 
369	struct erofs_inode *const vi = EROFS_I(inode);
370	struct super_block *const sb = it->sb;
371	struct erofs_sb_info *sbi = EROFS_SB(sb);
372	unsigned int i;
373	int ret = -ENOATTR;
374
375	for (i = 0; i < vi->xattr_shared_count; ++i) {
376		it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
377				vi->xattr_shared_xattrs[i] * sizeof(__le32);
378		it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
379					EROFS_KMAP);
380		if (IS_ERR(it->kaddr))
381			return PTR_ERR(it->kaddr);
382
383		if (getxattr)
384			ret = erofs_getxattr_foreach(it);
385		else
386			ret = erofs_listxattr_foreach(it);
387		if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
388			break;
389	}
390	return ret;
391}
392
393int erofs_getxattr(struct inode *inode, int index, const char *name,
394		   void *buffer, size_t buffer_size)
395{
396	int ret;
397	unsigned int hashbit;
398	struct erofs_xattr_iter it;
399	struct erofs_inode *vi = EROFS_I(inode);
400	struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
401
402	if (!name)
403		return -EINVAL;
404
405	ret = erofs_init_inode_xattrs(inode);
406	if (ret)
407		return ret;
408
409	/* reserved flag is non-zero if there's any change of on-disk format */
410	if (erofs_sb_has_xattr_filter(sbi) && !sbi->xattr_filter_reserved) {
411		hashbit = xxh32(name, strlen(name),
412				EROFS_XATTR_FILTER_SEED + index);
413		hashbit &= EROFS_XATTR_FILTER_BITS - 1;
414		if (vi->xattr_name_filter & (1U << hashbit))
415			return -ENOATTR;
416	}
 
 
417
418	it.index = index;
419	it.name = (struct qstr)QSTR_INIT(name, strlen(name));
420	if (it.name.len > EROFS_NAME_LEN)
421		return -ERANGE;
422
423	it.sb = inode->i_sb;
424	it.buf = __EROFS_BUF_INITIALIZER;
425	erofs_init_metabuf(&it.buf, it.sb);
426	it.buffer = buffer;
427	it.buffer_size = buffer_size;
428	it.buffer_ofs = 0;
429
430	ret = erofs_xattr_iter_inline(&it, inode, true);
431	if (ret == -ENOATTR)
432		ret = erofs_xattr_iter_shared(&it, inode, true);
433	erofs_put_metabuf(&it.buf);
434	return ret ? ret : it.buffer_ofs;
435}
436
437ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
 
438{
439	int ret;
440	struct erofs_xattr_iter it;
441	struct inode *inode = d_inode(dentry);
442
443	ret = erofs_init_inode_xattrs(inode);
444	if (ret == -ENOATTR)
445		return 0;
446	if (ret)
447		return ret;
448
449	it.sb = dentry->d_sb;
450	it.buf = __EROFS_BUF_INITIALIZER;
451	erofs_init_metabuf(&it.buf, it.sb);
452	it.dentry = dentry;
453	it.buffer = buffer;
454	it.buffer_size = buffer_size;
455	it.buffer_ofs = 0;
456
457	ret = erofs_xattr_iter_inline(&it, inode, false);
458	if (!ret || ret == -ENOATTR)
459		ret = erofs_xattr_iter_shared(&it, inode, false);
460	if (ret == -ENOATTR)
461		ret = 0;
462	erofs_put_metabuf(&it.buf);
463	return ret ? ret : it.buffer_ofs;
464}
465
466void erofs_xattr_prefixes_cleanup(struct super_block *sb)
467{
468	struct erofs_sb_info *sbi = EROFS_SB(sb);
469	int i;
470
471	if (sbi->xattr_prefixes) {
472		for (i = 0; i < sbi->xattr_prefix_count; i++)
473			kfree(sbi->xattr_prefixes[i].prefix);
474		kfree(sbi->xattr_prefixes);
475		sbi->xattr_prefixes = NULL;
476	}
477}
478
479int erofs_xattr_prefixes_init(struct super_block *sb)
480{
481	struct erofs_sb_info *sbi = EROFS_SB(sb);
482	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
483	erofs_off_t pos = (erofs_off_t)sbi->xattr_prefix_start << 2;
484	struct erofs_xattr_prefix_item *pfs;
485	int ret = 0, i, len;
486
487	if (!sbi->xattr_prefix_count)
488		return 0;
489
490	pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL);
491	if (!pfs)
492		return -ENOMEM;
493
494	if (sbi->packed_inode)
495		buf.inode = sbi->packed_inode;
496	else
497		erofs_init_metabuf(&buf, sb);
498
499	for (i = 0; i < sbi->xattr_prefix_count; i++) {
500		void *ptr = erofs_read_metadata(sb, &buf, &pos, &len);
501
502		if (IS_ERR(ptr)) {
503			ret = PTR_ERR(ptr);
504			break;
505		} else if (len < sizeof(*pfs->prefix) ||
506			   len > EROFS_NAME_LEN + sizeof(*pfs->prefix)) {
507			kfree(ptr);
508			ret = -EFSCORRUPTED;
509			break;
510		}
511		pfs[i].prefix = ptr;
512		pfs[i].infix_len = len - sizeof(struct erofs_xattr_long_prefix);
513	}
514
515	erofs_put_metabuf(&buf);
516	sbi->xattr_prefixes = pfs;
517	if (ret)
518		erofs_xattr_prefixes_cleanup(sb);
519	return ret;
520}
521
522#ifdef CONFIG_EROFS_FS_POSIX_ACL
523struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu)
524{
525	struct posix_acl *acl;
526	int prefix, rc;
527	char *value = NULL;
528
529	if (rcu)
530		return ERR_PTR(-ECHILD);
531
532	switch (type) {
533	case ACL_TYPE_ACCESS:
534		prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
535		break;
536	case ACL_TYPE_DEFAULT:
537		prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
538		break;
539	default:
540		return ERR_PTR(-EINVAL);
541	}
542
543	rc = erofs_getxattr(inode, prefix, "", NULL, 0);
544	if (rc > 0) {
545		value = kmalloc(rc, GFP_KERNEL);
546		if (!value)
547			return ERR_PTR(-ENOMEM);
548		rc = erofs_getxattr(inode, prefix, "", value, rc);
549	}
550
551	if (rc == -ENOATTR)
552		acl = NULL;
553	else if (rc < 0)
554		acl = ERR_PTR(rc);
555	else
556		acl = posix_acl_from_xattr(&init_user_ns, value, rc);
557	kfree(value);
558	return acl;
559}
560#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2017-2018 HUAWEI, Inc.
  4 *             https://www.huawei.com/
  5 * Created by Gao Xiang <gaoxiang25@huawei.com>
  6 */
  7#include <linux/security.h>
 
  8#include "xattr.h"
  9
 10struct xattr_iter {
 11	struct super_block *sb;
 12	struct page *page;
 
 13	void *kaddr;
 14
 15	erofs_blk_t blkaddr;
 16	unsigned int ofs;
 17};
 18
 19static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
 20{
 21	/* the only user of kunmap() is 'init_inode_xattrs' */
 22	if (!atomic)
 23		kunmap(it->page);
 24	else
 25		kunmap_atomic(it->kaddr);
 26
 27	unlock_page(it->page);
 28	put_page(it->page);
 29}
 30
 31static inline void xattr_iter_end_final(struct xattr_iter *it)
 32{
 33	if (!it->page)
 34		return;
 35
 36	xattr_iter_end(it, true);
 37}
 38
 39static int init_inode_xattrs(struct inode *inode)
 40{
 41	struct erofs_inode *const vi = EROFS_I(inode);
 42	struct xattr_iter it;
 43	unsigned int i;
 44	struct erofs_xattr_ibody_header *ih;
 45	struct super_block *sb;
 46	struct erofs_sb_info *sbi;
 47	bool atomic_map;
 48	int ret = 0;
 49
 50	/* the most case is that xattrs of this inode are initialized. */
 51	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
 
 
 
 
 
 52		return 0;
 
 53
 54	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
 55		return -ERESTARTSYS;
 56
 57	/* someone has initialized xattrs for us? */
 58	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
 59		goto out_unlock;
 60
 61	/*
 62	 * bypass all xattr operations if ->xattr_isize is not greater than
 63	 * sizeof(struct erofs_xattr_ibody_header), in detail:
 64	 * 1) it is not enough to contain erofs_xattr_ibody_header then
 65	 *    ->xattr_isize should be 0 (it means no xattr);
 66	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
 67	 *    undefined right now (maybe use later with some new sb feature).
 68	 */
 69	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
 70		erofs_err(inode->i_sb,
 71			  "xattr_isize %d of nid %llu is not supported yet",
 72			  vi->xattr_isize, vi->nid);
 73		ret = -EOPNOTSUPP;
 74		goto out_unlock;
 75	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
 76		if (vi->xattr_isize) {
 77			erofs_err(inode->i_sb,
 78				  "bogus xattr ibody @ nid %llu", vi->nid);
 79			DBG_BUGON(1);
 80			ret = -EFSCORRUPTED;
 81			goto out_unlock;	/* xattr ondisk layout error */
 82		}
 83		ret = -ENOATTR;
 84		goto out_unlock;
 85	}
 86
 87	sb = inode->i_sb;
 88	sbi = EROFS_SB(sb);
 89	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
 90	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
 91
 92	it.page = erofs_get_meta_page(sb, it.blkaddr);
 93	if (IS_ERR(it.page)) {
 94		ret = PTR_ERR(it.page);
 95		goto out_unlock;
 96	}
 97
 98	/* read in shared xattr array (non-atomic, see kmalloc below) */
 99	it.kaddr = kmap(it.page);
100	atomic_map = false;
101
102	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
103
104	vi->xattr_shared_count = ih->h_shared_count;
105	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
106						sizeof(uint), GFP_KERNEL);
107	if (!vi->xattr_shared_xattrs) {
108		xattr_iter_end(&it, atomic_map);
109		ret = -ENOMEM;
110		goto out_unlock;
111	}
112
113	/* let's skip ibody header */
114	it.ofs += sizeof(struct erofs_xattr_ibody_header);
115
116	for (i = 0; i < vi->xattr_shared_count; ++i) {
117		if (it.ofs >= EROFS_BLKSIZ) {
118			/* cannot be unaligned */
119			DBG_BUGON(it.ofs != EROFS_BLKSIZ);
120			xattr_iter_end(&it, atomic_map);
121
122			it.page = erofs_get_meta_page(sb, ++it.blkaddr);
123			if (IS_ERR(it.page)) {
124				kfree(vi->xattr_shared_xattrs);
125				vi->xattr_shared_xattrs = NULL;
126				ret = PTR_ERR(it.page);
127				goto out_unlock;
128			}
129
130			it.kaddr = kmap_atomic(it.page);
131			atomic_map = true;
132			it.ofs = 0;
133		}
134		vi->xattr_shared_xattrs[i] =
135			le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
136		it.ofs += sizeof(__le32);
137	}
138	xattr_iter_end(&it, atomic_map);
139
 
 
140	set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
141
142out_unlock:
143	clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
144	return ret;
145}
146
147/*
148 * the general idea for these return values is
149 * if    0 is returned, go on processing the current xattr;
150 *       1 (> 0) is returned, skip this round to process the next xattr;
151 *    -err (< 0) is returned, an error (maybe ENOXATTR) occurred
152 *                            and need to be handled
153 */
154struct xattr_iter_handlers {
155	int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
156	int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
157		    unsigned int len);
158	int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
159	void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
160		      unsigned int len);
161};
162
163static inline int xattr_iter_fixup(struct xattr_iter *it)
164{
165	if (it->ofs < EROFS_BLKSIZ)
166		return 0;
167
168	xattr_iter_end(it, true);
169
170	it->blkaddr += erofs_blknr(it->ofs);
171
172	it->page = erofs_get_meta_page(it->sb, it->blkaddr);
173	if (IS_ERR(it->page)) {
174		int err = PTR_ERR(it->page);
175
176		it->page = NULL;
177		return err;
178	}
179
180	it->kaddr = kmap_atomic(it->page);
181	it->ofs = erofs_blkoff(it->ofs);
182	return 0;
183}
184
185static int inline_xattr_iter_begin(struct xattr_iter *it,
186				   struct inode *inode)
187{
188	struct erofs_inode *const vi = EROFS_I(inode);
189	struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
190	unsigned int xattr_header_sz, inline_xattr_ofs;
191
192	xattr_header_sz = inlinexattr_header_size(inode);
193	if (xattr_header_sz >= vi->xattr_isize) {
194		DBG_BUGON(xattr_header_sz > vi->xattr_isize);
195		return -ENOATTR;
196	}
197
198	inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
199
200	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
201	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
202
203	it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
204	if (IS_ERR(it->page))
205		return PTR_ERR(it->page);
206
207	it->kaddr = kmap_atomic(it->page);
208	return vi->xattr_isize - xattr_header_sz;
209}
210
211/*
212 * Regardless of success or failure, `xattr_foreach' will end up with
213 * `ofs' pointing to the next xattr item rather than an arbitrary position.
214 */
215static int xattr_foreach(struct xattr_iter *it,
216			 const struct xattr_iter_handlers *op,
217			 unsigned int *tlimit)
218{
219	struct erofs_xattr_entry entry;
220	unsigned int value_sz, processed, slice;
221	int err;
222
223	/* 0. fixup blkaddr, ofs, ipage */
224	err = xattr_iter_fixup(it);
225	if (err)
226		return err;
227
228	/*
229	 * 1. read xattr entry to the memory,
230	 *    since we do EROFS_XATTR_ALIGN
231	 *    therefore entry should be in the page
232	 */
233	entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
234	if (tlimit) {
235		unsigned int entry_sz = erofs_xattr_entry_size(&entry);
236
237		/* xattr on-disk corruption: xattr entry beyond xattr_isize */
238		if (*tlimit < entry_sz) {
239			DBG_BUGON(1);
240			return -EFSCORRUPTED;
241		}
242		*tlimit -= entry_sz;
243	}
244
245	it->ofs += sizeof(struct erofs_xattr_entry);
246	value_sz = le16_to_cpu(entry.e_value_size);
247
248	/* handle entry */
249	err = op->entry(it, &entry);
250	if (err) {
251		it->ofs += entry.e_name_len + value_sz;
252		goto out;
253	}
254
255	/* 2. handle xattr name (ofs will finally be at the end of name) */
256	processed = 0;
257
258	while (processed < entry.e_name_len) {
259		if (it->ofs >= EROFS_BLKSIZ) {
260			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
261
262			err = xattr_iter_fixup(it);
263			if (err)
264				goto out;
265			it->ofs = 0;
266		}
267
268		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
269			      entry.e_name_len - processed);
270
271		/* handle name */
272		err = op->name(it, processed, it->kaddr + it->ofs, slice);
273		if (err) {
274			it->ofs += entry.e_name_len - processed + value_sz;
275			goto out;
276		}
277
278		it->ofs += slice;
279		processed += slice;
280	}
281
282	/* 3. handle xattr value */
283	processed = 0;
284
285	if (op->alloc_buffer) {
286		err = op->alloc_buffer(it, value_sz);
287		if (err) {
288			it->ofs += value_sz;
289			goto out;
290		}
291	}
292
293	while (processed < value_sz) {
294		if (it->ofs >= EROFS_BLKSIZ) {
295			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
296
297			err = xattr_iter_fixup(it);
298			if (err)
299				goto out;
300			it->ofs = 0;
301		}
302
303		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
304			      value_sz - processed);
305		op->value(it, processed, it->kaddr + it->ofs, slice);
306		it->ofs += slice;
307		processed += slice;
308	}
309
310out:
311	/* xattrs should be 4-byte aligned (on-disk constraint) */
312	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
313	return err < 0 ? err : 0;
314}
315
316struct getxattr_iter {
317	struct xattr_iter it;
318
319	char *buffer;
320	int buffer_size, index;
321	struct qstr name;
322};
323
324static int xattr_entrymatch(struct xattr_iter *_it,
325			    struct erofs_xattr_entry *entry)
326{
327	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
328
329	return (it->index != entry->e_name_index ||
330		it->name.len != entry->e_name_len) ? -ENOATTR : 0;
331}
332
333static int xattr_namematch(struct xattr_iter *_it,
334			   unsigned int processed, char *buf, unsigned int len)
335{
336	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
337
338	return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
339}
340
341static int xattr_checkbuffer(struct xattr_iter *_it,
342			     unsigned int value_sz)
343{
344	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
345	int err = it->buffer_size < value_sz ? -ERANGE : 0;
346
347	it->buffer_size = value_sz;
348	return !it->buffer ? 1 : err;
349}
350
351static void xattr_copyvalue(struct xattr_iter *_it,
352			    unsigned int processed,
353			    char *buf, unsigned int len)
354{
355	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
356
357	memcpy(it->buffer + processed, buf, len);
358}
359
360static const struct xattr_iter_handlers find_xattr_handlers = {
361	.entry = xattr_entrymatch,
362	.name = xattr_namematch,
363	.alloc_buffer = xattr_checkbuffer,
364	.value = xattr_copyvalue
365};
366
367static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
368{
369	int ret;
370	unsigned int remaining;
371
372	ret = inline_xattr_iter_begin(&it->it, inode);
373	if (ret < 0)
374		return ret;
375
376	remaining = ret;
377	while (remaining) {
378		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
379		if (ret != -ENOATTR)
380			break;
381	}
382	xattr_iter_end_final(&it->it);
383
384	return ret ? ret : it->buffer_size;
385}
386
387static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
388{
389	struct erofs_inode *const vi = EROFS_I(inode);
390	struct super_block *const sb = inode->i_sb;
391	struct erofs_sb_info *const sbi = EROFS_SB(sb);
392	unsigned int i;
393	int ret = -ENOATTR;
394
395	for (i = 0; i < vi->xattr_shared_count; ++i) {
396		erofs_blk_t blkaddr =
397			xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
398
399		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
400
401		if (!i || blkaddr != it->it.blkaddr) {
402			if (i)
403				xattr_iter_end(&it->it, true);
404
405			it->it.page = erofs_get_meta_page(sb, blkaddr);
406			if (IS_ERR(it->it.page))
407				return PTR_ERR(it->it.page);
408
409			it->it.kaddr = kmap_atomic(it->it.page);
410			it->it.blkaddr = blkaddr;
411		}
412
413		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
414		if (ret != -ENOATTR)
415			break;
416	}
417	if (vi->xattr_shared_count)
418		xattr_iter_end_final(&it->it);
419
420	return ret ? ret : it->buffer_size;
421}
422
423static bool erofs_xattr_user_list(struct dentry *dentry)
424{
425	return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER);
426}
427
428static bool erofs_xattr_trusted_list(struct dentry *dentry)
429{
430	return capable(CAP_SYS_ADMIN);
431}
432
433int erofs_getxattr(struct inode *inode, int index,
434		   const char *name,
435		   void *buffer, size_t buffer_size)
436{
437	int ret;
438	struct getxattr_iter it;
439
440	if (!name)
441		return -EINVAL;
442
443	ret = init_inode_xattrs(inode);
444	if (ret)
445		return ret;
446
447	it.index = index;
448
449	it.name.len = strlen(name);
450	if (it.name.len > EROFS_NAME_LEN)
451		return -ERANGE;
452	it.name.name = name;
453
454	it.buffer = buffer;
455	it.buffer_size = buffer_size;
456
457	it.it.sb = inode->i_sb;
458	ret = inline_getxattr(inode, &it);
459	if (ret == -ENOATTR)
460		ret = shared_getxattr(inode, &it);
461	return ret;
462}
463
464static int erofs_xattr_generic_get(const struct xattr_handler *handler,
465				   struct dentry *unused, struct inode *inode,
466				   const char *name, void *buffer, size_t size)
467{
468	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
469
470	switch (handler->flags) {
471	case EROFS_XATTR_INDEX_USER:
472		if (!test_opt(&sbi->ctx, XATTR_USER))
473			return -EOPNOTSUPP;
474		break;
475	case EROFS_XATTR_INDEX_TRUSTED:
476		if (!capable(CAP_SYS_ADMIN))
477			return -EPERM;
478		break;
479	case EROFS_XATTR_INDEX_SECURITY:
480		break;
481	default:
482		return -EINVAL;
483	}
484
485	return erofs_getxattr(inode, handler->flags, name, buffer, size);
486}
487
488const struct xattr_handler erofs_xattr_user_handler = {
489	.prefix	= XATTR_USER_PREFIX,
490	.flags	= EROFS_XATTR_INDEX_USER,
491	.list	= erofs_xattr_user_list,
492	.get	= erofs_xattr_generic_get,
493};
494
495const struct xattr_handler erofs_xattr_trusted_handler = {
496	.prefix	= XATTR_TRUSTED_PREFIX,
497	.flags	= EROFS_XATTR_INDEX_TRUSTED,
498	.list	= erofs_xattr_trusted_list,
499	.get	= erofs_xattr_generic_get,
500};
501
502#ifdef CONFIG_EROFS_FS_SECURITY
503const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
504	.prefix	= XATTR_SECURITY_PREFIX,
505	.flags	= EROFS_XATTR_INDEX_SECURITY,
506	.get	= erofs_xattr_generic_get,
507};
508#endif
509
510const struct xattr_handler *erofs_xattr_handlers[] = {
511	&erofs_xattr_user_handler,
512#ifdef CONFIG_EROFS_FS_POSIX_ACL
513	&posix_acl_access_xattr_handler,
514	&posix_acl_default_xattr_handler,
515#endif
516	&erofs_xattr_trusted_handler,
517#ifdef CONFIG_EROFS_FS_SECURITY
518	&erofs_xattr_security_handler,
519#endif
520	NULL,
521};
522
523struct listxattr_iter {
524	struct xattr_iter it;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
526	struct dentry *dentry;
527	char *buffer;
528	int buffer_size, buffer_ofs;
529};
530
531static int xattr_entrylist(struct xattr_iter *_it,
532			   struct erofs_xattr_entry *entry)
533{
534	struct listxattr_iter *it =
535		container_of(_it, struct listxattr_iter, it);
536	unsigned int prefix_len;
537	const char *prefix;
538
539	const struct xattr_handler *h =
540		erofs_xattr_handler(entry->e_name_index);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541
542	if (!h || (h->list && !h->list(it->dentry)))
543		return 1;
544
545	prefix = xattr_prefix(h);
546	prefix_len = strlen(prefix);
 
547
548	if (!it->buffer) {
549		it->buffer_ofs += prefix_len + entry->e_name_len + 1;
550		return 1;
551	}
552
553	if (it->buffer_ofs + prefix_len
554		+ entry->e_name_len + 1 > it->buffer_size)
555		return -ERANGE;
556
557	memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
558	it->buffer_ofs += prefix_len;
 
 
 
 
 
 
 
 
559	return 0;
560}
561
562static int xattr_namelist(struct xattr_iter *_it,
563			  unsigned int processed, char *buf, unsigned int len)
564{
565	struct listxattr_iter *it =
566		container_of(_it, struct listxattr_iter, it);
 
 
 
 
 
 
 
567
568	memcpy(it->buffer + it->buffer_ofs, buf, len);
569	it->buffer_ofs += len;
570	return 0;
571}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
572
573static int xattr_skipvalue(struct xattr_iter *_it,
574			   unsigned int value_sz)
575{
576	struct listxattr_iter *it =
577		container_of(_it, struct listxattr_iter, it);
578
579	it->buffer[it->buffer_ofs++] = '\0';
580	return 1;
581}
582
583static const struct xattr_iter_handlers list_xattr_handlers = {
584	.entry = xattr_entrylist,
585	.name = xattr_namelist,
586	.alloc_buffer = xattr_skipvalue,
587	.value = NULL
588};
589
590static int inline_listxattr(struct listxattr_iter *it)
591{
 
 
 
592	int ret;
593	unsigned int remaining;
594
595	ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
596	if (ret < 0)
597		return ret;
 
 
 
 
 
 
598
599	remaining = ret;
600	while (remaining) {
601		ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
602		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603			break;
 
 
604	}
605	xattr_iter_end_final(&it->it);
606	return ret ? ret : it->buffer_ofs;
607}
608
609static int shared_listxattr(struct listxattr_iter *it)
 
610{
611	struct inode *const inode = d_inode(it->dentry);
612	struct erofs_inode *const vi = EROFS_I(inode);
613	struct super_block *const sb = inode->i_sb;
614	struct erofs_sb_info *const sbi = EROFS_SB(sb);
615	unsigned int i;
616	int ret = 0;
617
618	for (i = 0; i < vi->xattr_shared_count; ++i) {
619		erofs_blk_t blkaddr =
620			xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
622		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
623		if (!i || blkaddr != it->it.blkaddr) {
624			if (i)
625				xattr_iter_end(&it->it, true);
626
627			it->it.page = erofs_get_meta_page(sb, blkaddr);
628			if (IS_ERR(it->it.page))
629				return PTR_ERR(it->it.page);
630
631			it->it.kaddr = kmap_atomic(it->it.page);
632			it->it.blkaddr = blkaddr;
633		}
 
 
 
634
635		ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
636		if (ret)
637			break;
 
 
 
 
638	}
639	if (vi->xattr_shared_count)
640		xattr_iter_end_final(&it->it);
641
642	return ret ? ret : it->buffer_ofs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643}
644
645ssize_t erofs_listxattr(struct dentry *dentry,
646			char *buffer, size_t buffer_size)
647{
648	int ret;
649	struct listxattr_iter it;
 
650
651	ret = init_inode_xattrs(d_inode(dentry));
652	if (ret == -ENOATTR)
653		return 0;
654	if (ret)
655		return ret;
656
 
 
 
657	it.dentry = dentry;
658	it.buffer = buffer;
659	it.buffer_size = buffer_size;
660	it.buffer_ofs = 0;
661
662	it.it.sb = dentry->d_sb;
 
 
 
 
 
 
 
663
664	ret = inline_listxattr(&it);
665	if (ret < 0 && ret != -ENOATTR)
666		return ret;
667	return shared_listxattr(&it);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
668}
669
670#ifdef CONFIG_EROFS_FS_POSIX_ACL
671struct posix_acl *erofs_get_acl(struct inode *inode, int type)
672{
673	struct posix_acl *acl;
674	int prefix, rc;
675	char *value = NULL;
676
 
 
 
677	switch (type) {
678	case ACL_TYPE_ACCESS:
679		prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
680		break;
681	case ACL_TYPE_DEFAULT:
682		prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
683		break;
684	default:
685		return ERR_PTR(-EINVAL);
686	}
687
688	rc = erofs_getxattr(inode, prefix, "", NULL, 0);
689	if (rc > 0) {
690		value = kmalloc(rc, GFP_KERNEL);
691		if (!value)
692			return ERR_PTR(-ENOMEM);
693		rc = erofs_getxattr(inode, prefix, "", value, rc);
694	}
695
696	if (rc == -ENOATTR)
697		acl = NULL;
698	else if (rc < 0)
699		acl = ERR_PTR(rc);
700	else
701		acl = posix_acl_from_xattr(&init_user_ns, value, rc);
702	kfree(value);
703	return acl;
704}
705#endif
706