Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * memfd_create system call and file sealing support
  3 *
  4 * Code was originally included in shmem.c, and broken out to facilitate
  5 * use by hugetlbfs as well as tmpfs.
  6 *
  7 * This file is released under the GPL.
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/vfs.h>
 12#include <linux/pagemap.h>
 13#include <linux/file.h>
 14#include <linux/mm.h>
 15#include <linux/sched/signal.h>
 16#include <linux/khugepaged.h>
 17#include <linux/syscalls.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <linux/memfd.h>
 
 21#include <uapi/linux/memfd.h>
 22
 23/*
 24 * We need a tag: a new tag would expand every xa_node by 8 bytes,
 25 * so reuse a tag which we firmly believe is never set or cleared on tmpfs
 26 * or hugetlbfs because they are memory only filesystems.
 27 */
 28#define MEMFD_TAG_PINNED        PAGECACHE_TAG_TOWRITE
 29#define LAST_SCAN               4       /* about 150ms max */
 30
 
 
 
 
 
 
 31static void memfd_tag_pins(struct xa_state *xas)
 32{
 33	struct page *page;
 34	unsigned int tagged = 0;
 35
 36	lru_add_drain();
 37
 38	xas_lock_irq(xas);
 39	xas_for_each(xas, page, ULONG_MAX) {
 40		if (xa_is_value(page))
 41			continue;
 42		page = find_subpage(page, xas->xa_index);
 43		if (page_count(page) - page_mapcount(page) > 1)
 44			xas_set_mark(xas, MEMFD_TAG_PINNED);
 45
 46		if (++tagged % XA_CHECK_SCHED)
 47			continue;
 
 48
 49		xas_pause(xas);
 50		xas_unlock_irq(xas);
 51		cond_resched();
 52		xas_lock_irq(xas);
 53	}
 54	xas_unlock_irq(xas);
 55}
 56
 57/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58 * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
 59 * via get_user_pages(), drivers might have some pending I/O without any active
 60 * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
 61 * and see whether it has an elevated ref-count. If so, we tag them and wait for
 62 * them to be dropped.
 63 * The caller must guarantee that no new user will acquire writable references
 64 * to those pages to avoid races.
 65 */
 66static int memfd_wait_for_pins(struct address_space *mapping)
 67{
 68	XA_STATE(xas, &mapping->i_pages, 0);
 69	struct page *page;
 70	int error, scan;
 71
 72	memfd_tag_pins(&xas);
 73
 74	error = 0;
 75	for (scan = 0; scan <= LAST_SCAN; scan++) {
 76		unsigned int tagged = 0;
 77
 78		if (!xas_marked(&xas, MEMFD_TAG_PINNED))
 79			break;
 80
 81		if (!scan)
 82			lru_add_drain_all();
 83		else if (schedule_timeout_killable((HZ << scan) / 200))
 84			scan = LAST_SCAN;
 85
 86		xas_set(&xas, 0);
 87		xas_lock_irq(&xas);
 88		xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
 89			bool clear = true;
 90			if (xa_is_value(page))
 91				continue;
 92			page = find_subpage(page, xas.xa_index);
 93			if (page_count(page) - page_mapcount(page) != 1) {
 94				/*
 95				 * On the last scan, we clean up all those tags
 96				 * we inserted; but make a note that we still
 97				 * found pages pinned.
 98				 */
 99				if (scan == LAST_SCAN)
100					error = -EBUSY;
101				else
102					clear = false;
103			}
104			if (clear)
105				xas_clear_mark(&xas, MEMFD_TAG_PINNED);
106			if (++tagged % XA_CHECK_SCHED)
 
107				continue;
 
108
109			xas_pause(&xas);
110			xas_unlock_irq(&xas);
111			cond_resched();
112			xas_lock_irq(&xas);
113		}
114		xas_unlock_irq(&xas);
115	}
116
117	return error;
118}
119
120static unsigned int *memfd_file_seals_ptr(struct file *file)
121{
122	if (shmem_file(file))
123		return &SHMEM_I(file_inode(file))->seals;
124
125#ifdef CONFIG_HUGETLBFS
126	if (is_file_hugepages(file))
127		return &HUGETLBFS_I(file_inode(file))->seals;
128#endif
129
130	return NULL;
131}
132
133#define F_ALL_SEALS (F_SEAL_SEAL | \
 
134		     F_SEAL_SHRINK | \
135		     F_SEAL_GROW | \
136		     F_SEAL_WRITE | \
137		     F_SEAL_FUTURE_WRITE)
138
139static int memfd_add_seals(struct file *file, unsigned int seals)
140{
141	struct inode *inode = file_inode(file);
142	unsigned int *file_seals;
143	int error;
144
145	/*
146	 * SEALING
147	 * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
148	 * but restrict access to a specific subset of file operations. Seals
149	 * can only be added, but never removed. This way, mutually untrusted
150	 * parties can share common memory regions with a well-defined policy.
151	 * A malicious peer can thus never perform unwanted operations on a
152	 * shared object.
153	 *
154	 * Seals are only supported on special tmpfs or hugetlbfs files and
155	 * always affect the whole underlying inode. Once a seal is set, it
156	 * may prevent some kinds of access to the file. Currently, the
157	 * following seals are defined:
158	 *   SEAL_SEAL: Prevent further seals from being set on this file
159	 *   SEAL_SHRINK: Prevent the file from shrinking
160	 *   SEAL_GROW: Prevent the file from growing
161	 *   SEAL_WRITE: Prevent write access to the file
 
162	 *
163	 * As we don't require any trust relationship between two parties, we
164	 * must prevent seals from being removed. Therefore, sealing a file
165	 * only adds a given set of seals to the file, it never touches
166	 * existing seals. Furthermore, the "setting seals"-operation can be
167	 * sealed itself, which basically prevents any further seal from being
168	 * added.
169	 *
170	 * Semantics of sealing are only defined on volatile files. Only
171	 * anonymous tmpfs and hugetlbfs files support sealing. More
172	 * importantly, seals are never written to disk. Therefore, there's
173	 * no plan to support it on other file types.
174	 */
175
176	if (!(file->f_mode & FMODE_WRITE))
177		return -EPERM;
178	if (seals & ~(unsigned int)F_ALL_SEALS)
179		return -EINVAL;
180
181	inode_lock(inode);
182
183	file_seals = memfd_file_seals_ptr(file);
184	if (!file_seals) {
185		error = -EINVAL;
186		goto unlock;
187	}
188
189	if (*file_seals & F_SEAL_SEAL) {
190		error = -EPERM;
191		goto unlock;
192	}
193
194	if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
195		error = mapping_deny_writable(file->f_mapping);
196		if (error)
197			goto unlock;
198
199		error = memfd_wait_for_pins(file->f_mapping);
200		if (error) {
201			mapping_allow_writable(file->f_mapping);
202			goto unlock;
203		}
204	}
205
 
 
 
 
 
 
206	*file_seals |= seals;
207	error = 0;
208
209unlock:
210	inode_unlock(inode);
211	return error;
212}
213
214static int memfd_get_seals(struct file *file)
215{
216	unsigned int *seals = memfd_file_seals_ptr(file);
217
218	return seals ? *seals : -EINVAL;
219}
220
221long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
222{
223	long error;
224
225	switch (cmd) {
226	case F_ADD_SEALS:
227		/* disallow upper 32bit */
228		if (arg > UINT_MAX)
229			return -EINVAL;
230
231		error = memfd_add_seals(file, arg);
232		break;
233	case F_GET_SEALS:
234		error = memfd_get_seals(file);
235		break;
236	default:
237		error = -EINVAL;
238		break;
239	}
240
241	return error;
242}
243
244#define MFD_NAME_PREFIX "memfd:"
245#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
246#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
247
248#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
250SYSCALL_DEFINE2(memfd_create,
251		const char __user *, uname,
252		unsigned int, flags)
253{
254	unsigned int *file_seals;
255	struct file *file;
256	int fd, error;
257	char *name;
258	long len;
259
260	if (!(flags & MFD_HUGETLB)) {
261		if (flags & ~(unsigned int)MFD_ALL_FLAGS)
262			return -EINVAL;
263	} else {
264		/* Allow huge page size encoding in flags. */
265		if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
266				(MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
267			return -EINVAL;
268	}
269
 
 
 
 
 
 
 
 
270	/* length includes terminating zero */
271	len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
272	if (len <= 0)
273		return -EFAULT;
274	if (len > MFD_NAME_MAX_LEN + 1)
275		return -EINVAL;
276
277	name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
278	if (!name)
279		return -ENOMEM;
280
281	strcpy(name, MFD_NAME_PREFIX);
282	if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
283		error = -EFAULT;
284		goto err_name;
285	}
286
287	/* terminating-zero may have changed after strnlen_user() returned */
288	if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
289		error = -EFAULT;
290		goto err_name;
291	}
292
293	fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
294	if (fd < 0) {
295		error = fd;
296		goto err_name;
297	}
298
299	if (flags & MFD_HUGETLB) {
300		struct user_struct *user = NULL;
301
302		file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user,
303					HUGETLB_ANONHUGE_INODE,
304					(flags >> MFD_HUGE_SHIFT) &
305					MFD_HUGE_MASK);
306	} else
307		file = shmem_file_setup(name, 0, VM_NORESERVE);
308	if (IS_ERR(file)) {
309		error = PTR_ERR(file);
310		goto err_fd;
311	}
312	file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
313	file->f_flags |= O_LARGEFILE;
314
315	if (flags & MFD_ALLOW_SEALING) {
 
 
 
 
 
 
 
 
 
 
316		file_seals = memfd_file_seals_ptr(file);
317		*file_seals &= ~F_SEAL_SEAL;
 
318	}
319
320	fd_install(fd, file);
321	kfree(name);
322	return fd;
323
324err_fd:
325	put_unused_fd(fd);
326err_name:
327	kfree(name);
328	return error;
329}
v6.13.7
  1/*
  2 * memfd_create system call and file sealing support
  3 *
  4 * Code was originally included in shmem.c, and broken out to facilitate
  5 * use by hugetlbfs as well as tmpfs.
  6 *
  7 * This file is released under the GPL.
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/vfs.h>
 12#include <linux/pagemap.h>
 13#include <linux/file.h>
 14#include <linux/mm.h>
 15#include <linux/sched/signal.h>
 16#include <linux/khugepaged.h>
 17#include <linux/syscalls.h>
 18#include <linux/hugetlb.h>
 19#include <linux/shmem_fs.h>
 20#include <linux/memfd.h>
 21#include <linux/pid_namespace.h>
 22#include <uapi/linux/memfd.h>
 23
 24/*
 25 * We need a tag: a new tag would expand every xa_node by 8 bytes,
 26 * so reuse a tag which we firmly believe is never set or cleared on tmpfs
 27 * or hugetlbfs because they are memory only filesystems.
 28 */
 29#define MEMFD_TAG_PINNED        PAGECACHE_TAG_TOWRITE
 30#define LAST_SCAN               4       /* about 150ms max */
 31
 32static bool memfd_folio_has_extra_refs(struct folio *folio)
 33{
 34	return folio_ref_count(folio) - folio_mapcount(folio) !=
 35	       folio_nr_pages(folio);
 36}
 37
 38static void memfd_tag_pins(struct xa_state *xas)
 39{
 40	struct folio *folio;
 41	int latency = 0;
 42
 43	lru_add_drain();
 44
 45	xas_lock_irq(xas);
 46	xas_for_each(xas, folio, ULONG_MAX) {
 47		if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio))
 
 
 
 48			xas_set_mark(xas, MEMFD_TAG_PINNED);
 49
 50		if (++latency < XA_CHECK_SCHED)
 51			continue;
 52		latency = 0;
 53
 54		xas_pause(xas);
 55		xas_unlock_irq(xas);
 56		cond_resched();
 57		xas_lock_irq(xas);
 58	}
 59	xas_unlock_irq(xas);
 60}
 61
 62/*
 63 * This is a helper function used by memfd_pin_user_pages() in GUP (gup.c).
 64 * It is mainly called to allocate a folio in a memfd when the caller
 65 * (memfd_pin_folios()) cannot find a folio in the page cache at a given
 66 * index in the mapping.
 67 */
 68struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
 69{
 70#ifdef CONFIG_HUGETLB_PAGE
 71	struct folio *folio;
 72	gfp_t gfp_mask;
 73	int err;
 74
 75	if (is_file_hugepages(memfd)) {
 76		/*
 77		 * The folio would most likely be accessed by a DMA driver,
 78		 * therefore, we have zone memory constraints where we can
 79		 * alloc from. Also, the folio will be pinned for an indefinite
 80		 * amount of time, so it is not expected to be migrated away.
 81		 */
 82		struct hstate *h = hstate_file(memfd);
 83
 84		gfp_mask = htlb_alloc_mask(h);
 85		gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE);
 86		idx >>= huge_page_order(h);
 87
 88		folio = alloc_hugetlb_folio_reserve(h,
 89						    numa_node_id(),
 90						    NULL,
 91						    gfp_mask);
 92		if (folio) {
 93			err = hugetlb_add_to_page_cache(folio,
 94							memfd->f_mapping,
 95							idx);
 96			if (err) {
 97				folio_put(folio);
 98				return ERR_PTR(err);
 99			}
100			folio_unlock(folio);
101			return folio;
102		}
103		return ERR_PTR(-ENOMEM);
104	}
105#endif
106	return shmem_read_folio(memfd->f_mapping, idx);
107}
108
109/*
110 * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
111 * via get_user_pages(), drivers might have some pending I/O without any active
112 * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios
113 * and see whether it has an elevated ref-count. If so, we tag them and wait for
114 * them to be dropped.
115 * The caller must guarantee that no new user will acquire writable references
116 * to those folios to avoid races.
117 */
118static int memfd_wait_for_pins(struct address_space *mapping)
119{
120	XA_STATE(xas, &mapping->i_pages, 0);
121	struct folio *folio;
122	int error, scan;
123
124	memfd_tag_pins(&xas);
125
126	error = 0;
127	for (scan = 0; scan <= LAST_SCAN; scan++) {
128		int latency = 0;
129
130		if (!xas_marked(&xas, MEMFD_TAG_PINNED))
131			break;
132
133		if (!scan)
134			lru_add_drain_all();
135		else if (schedule_timeout_killable((HZ << scan) / 200))
136			scan = LAST_SCAN;
137
138		xas_set(&xas, 0);
139		xas_lock_irq(&xas);
140		xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) {
141			bool clear = true;
142
143			if (!xa_is_value(folio) &&
144			    memfd_folio_has_extra_refs(folio)) {
 
145				/*
146				 * On the last scan, we clean up all those tags
147				 * we inserted; but make a note that we still
148				 * found folios pinned.
149				 */
150				if (scan == LAST_SCAN)
151					error = -EBUSY;
152				else
153					clear = false;
154			}
155			if (clear)
156				xas_clear_mark(&xas, MEMFD_TAG_PINNED);
157
158			if (++latency < XA_CHECK_SCHED)
159				continue;
160			latency = 0;
161
162			xas_pause(&xas);
163			xas_unlock_irq(&xas);
164			cond_resched();
165			xas_lock_irq(&xas);
166		}
167		xas_unlock_irq(&xas);
168	}
169
170	return error;
171}
172
173unsigned int *memfd_file_seals_ptr(struct file *file)
174{
175	if (shmem_file(file))
176		return &SHMEM_I(file_inode(file))->seals;
177
178#ifdef CONFIG_HUGETLBFS
179	if (is_file_hugepages(file))
180		return &HUGETLBFS_I(file_inode(file))->seals;
181#endif
182
183	return NULL;
184}
185
186#define F_ALL_SEALS (F_SEAL_SEAL | \
187		     F_SEAL_EXEC | \
188		     F_SEAL_SHRINK | \
189		     F_SEAL_GROW | \
190		     F_SEAL_WRITE | \
191		     F_SEAL_FUTURE_WRITE)
192
193static int memfd_add_seals(struct file *file, unsigned int seals)
194{
195	struct inode *inode = file_inode(file);
196	unsigned int *file_seals;
197	int error;
198
199	/*
200	 * SEALING
201	 * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
202	 * but restrict access to a specific subset of file operations. Seals
203	 * can only be added, but never removed. This way, mutually untrusted
204	 * parties can share common memory regions with a well-defined policy.
205	 * A malicious peer can thus never perform unwanted operations on a
206	 * shared object.
207	 *
208	 * Seals are only supported on special tmpfs or hugetlbfs files and
209	 * always affect the whole underlying inode. Once a seal is set, it
210	 * may prevent some kinds of access to the file. Currently, the
211	 * following seals are defined:
212	 *   SEAL_SEAL: Prevent further seals from being set on this file
213	 *   SEAL_SHRINK: Prevent the file from shrinking
214	 *   SEAL_GROW: Prevent the file from growing
215	 *   SEAL_WRITE: Prevent write access to the file
216	 *   SEAL_EXEC: Prevent modification of the exec bits in the file mode
217	 *
218	 * As we don't require any trust relationship between two parties, we
219	 * must prevent seals from being removed. Therefore, sealing a file
220	 * only adds a given set of seals to the file, it never touches
221	 * existing seals. Furthermore, the "setting seals"-operation can be
222	 * sealed itself, which basically prevents any further seal from being
223	 * added.
224	 *
225	 * Semantics of sealing are only defined on volatile files. Only
226	 * anonymous tmpfs and hugetlbfs files support sealing. More
227	 * importantly, seals are never written to disk. Therefore, there's
228	 * no plan to support it on other file types.
229	 */
230
231	if (!(file->f_mode & FMODE_WRITE))
232		return -EPERM;
233	if (seals & ~(unsigned int)F_ALL_SEALS)
234		return -EINVAL;
235
236	inode_lock(inode);
237
238	file_seals = memfd_file_seals_ptr(file);
239	if (!file_seals) {
240		error = -EINVAL;
241		goto unlock;
242	}
243
244	if (*file_seals & F_SEAL_SEAL) {
245		error = -EPERM;
246		goto unlock;
247	}
248
249	if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
250		error = mapping_deny_writable(file->f_mapping);
251		if (error)
252			goto unlock;
253
254		error = memfd_wait_for_pins(file->f_mapping);
255		if (error) {
256			mapping_allow_writable(file->f_mapping);
257			goto unlock;
258		}
259	}
260
261	/*
262	 * SEAL_EXEC implys SEAL_WRITE, making W^X from the start.
263	 */
264	if (seals & F_SEAL_EXEC && inode->i_mode & 0111)
265		seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE;
266
267	*file_seals |= seals;
268	error = 0;
269
270unlock:
271	inode_unlock(inode);
272	return error;
273}
274
275static int memfd_get_seals(struct file *file)
276{
277	unsigned int *seals = memfd_file_seals_ptr(file);
278
279	return seals ? *seals : -EINVAL;
280}
281
282long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
283{
284	long error;
285
286	switch (cmd) {
287	case F_ADD_SEALS:
 
 
 
 
288		error = memfd_add_seals(file, arg);
289		break;
290	case F_GET_SEALS:
291		error = memfd_get_seals(file);
292		break;
293	default:
294		error = -EINVAL;
295		break;
296	}
297
298	return error;
299}
300
301#define MFD_NAME_PREFIX "memfd:"
302#define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
303#define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
304
305#define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC)
306
307static int check_sysctl_memfd_noexec(unsigned int *flags)
308{
309#ifdef CONFIG_SYSCTL
310	struct pid_namespace *ns = task_active_pid_ns(current);
311	int sysctl = pidns_memfd_noexec_scope(ns);
312
313	if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
314		if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL)
315			*flags |= MFD_NOEXEC_SEAL;
316		else
317			*flags |= MFD_EXEC;
318	}
319
320	if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) {
321		pr_err_ratelimited(
322			"%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n",
323			current->comm, task_pid_nr(current), sysctl);
324		return -EACCES;
325	}
326#endif
327	return 0;
328}
329
330SYSCALL_DEFINE2(memfd_create,
331		const char __user *, uname,
332		unsigned int, flags)
333{
334	unsigned int *file_seals;
335	struct file *file;
336	int fd, error;
337	char *name;
338	long len;
339
340	if (!(flags & MFD_HUGETLB)) {
341		if (flags & ~(unsigned int)MFD_ALL_FLAGS)
342			return -EINVAL;
343	} else {
344		/* Allow huge page size encoding in flags. */
345		if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
346				(MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
347			return -EINVAL;
348	}
349
350	/* Invalid if both EXEC and NOEXEC_SEAL are set.*/
351	if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL))
352		return -EINVAL;
353
354	error = check_sysctl_memfd_noexec(&flags);
355	if (error < 0)
356		return error;
357
358	/* length includes terminating zero */
359	len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
360	if (len <= 0)
361		return -EFAULT;
362	if (len > MFD_NAME_MAX_LEN + 1)
363		return -EINVAL;
364
365	name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
366	if (!name)
367		return -ENOMEM;
368
369	strcpy(name, MFD_NAME_PREFIX);
370	if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
371		error = -EFAULT;
372		goto err_name;
373	}
374
375	/* terminating-zero may have changed after strnlen_user() returned */
376	if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
377		error = -EFAULT;
378		goto err_name;
379	}
380
381	fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
382	if (fd < 0) {
383		error = fd;
384		goto err_name;
385	}
386
387	if (flags & MFD_HUGETLB) {
388		file = hugetlb_file_setup(name, 0, VM_NORESERVE,
 
 
389					HUGETLB_ANONHUGE_INODE,
390					(flags >> MFD_HUGE_SHIFT) &
391					MFD_HUGE_MASK);
392	} else
393		file = shmem_file_setup(name, 0, VM_NORESERVE);
394	if (IS_ERR(file)) {
395		error = PTR_ERR(file);
396		goto err_fd;
397	}
398	file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
399	file->f_flags |= O_LARGEFILE;
400
401	if (flags & MFD_NOEXEC_SEAL) {
402		struct inode *inode = file_inode(file);
403
404		inode->i_mode &= ~0111;
405		file_seals = memfd_file_seals_ptr(file);
406		if (file_seals) {
407			*file_seals &= ~F_SEAL_SEAL;
408			*file_seals |= F_SEAL_EXEC;
409		}
410	} else if (flags & MFD_ALLOW_SEALING) {
411		/* MFD_EXEC and MFD_ALLOW_SEALING are set */
412		file_seals = memfd_file_seals_ptr(file);
413		if (file_seals)
414			*file_seals &= ~F_SEAL_SEAL;
415	}
416
417	fd_install(fd, file);
418	kfree(name);
419	return fd;
420
421err_fd:
422	put_unused_fd(fd);
423err_name:
424	kfree(name);
425	return error;
426}