Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/fs/fcntl.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 */
  6
  7#include <linux/syscalls.h>
  8#include <linux/init.h>
  9#include <linux/mm.h>
 
 10#include <linux/fs.h>
 11#include <linux/file.h>
 12#include <linux/fdtable.h>
 13#include <linux/capability.h>
 14#include <linux/dnotify.h>
 15#include <linux/slab.h>
 16#include <linux/module.h>
 17#include <linux/pipe_fs_i.h>
 18#include <linux/security.h>
 19#include <linux/ptrace.h>
 20#include <linux/signal.h>
 21#include <linux/rcupdate.h>
 22#include <linux/pid_namespace.h>
 23#include <linux/user_namespace.h>
 24#include <linux/shmem_fs.h>
 
 
 25
 26#include <asm/poll.h>
 27#include <asm/siginfo.h>
 28#include <asm/uaccess.h>
 29
 30#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
 31
 32static int setfl(int fd, struct file * filp, unsigned long arg)
 33{
 34	struct inode * inode = file_inode(filp);
 35	int error = 0;
 36
 37	/*
 38	 * O_APPEND cannot be cleared if the file is marked as append-only
 39	 * and the file is open for write.
 40	 */
 41	if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
 42		return -EPERM;
 43
 44	/* O_NOATIME can only be set by the owner or superuser */
 45	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
 46		if (!inode_owner_or_capable(inode))
 47			return -EPERM;
 48
 49	/* required for strict SunOS emulation */
 50	if (O_NONBLOCK != O_NDELAY)
 51	       if (arg & O_NDELAY)
 52		   arg |= O_NONBLOCK;
 53
 54	/* Pipe packetized mode is controlled by O_DIRECT flag */
 55	if (!S_ISFIFO(filp->f_inode->i_mode) && (arg & O_DIRECT)) {
 56		if (!filp->f_mapping || !filp->f_mapping->a_ops ||
 57			!filp->f_mapping->a_ops->direct_IO)
 58				return -EINVAL;
 59	}
 60
 61	if (filp->f_op->check_flags)
 62		error = filp->f_op->check_flags(arg);
 63	if (error)
 64		return error;
 65
 66	/*
 67	 * ->fasync() is responsible for setting the FASYNC bit.
 68	 */
 69	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
 70		error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
 71		if (error < 0)
 72			goto out;
 73		if (error > 0)
 74			error = 0;
 75	}
 76	spin_lock(&filp->f_lock);
 77	filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
 
 78	spin_unlock(&filp->f_lock);
 79
 80 out:
 81	return error;
 82}
 83
 84static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
 85                     int force)
 86{
 87	write_lock_irq(&filp->f_owner.lock);
 88	if (force || !filp->f_owner.pid) {
 89		put_pid(filp->f_owner.pid);
 90		filp->f_owner.pid = get_pid(pid);
 91		filp->f_owner.pid_type = type;
 92
 93		if (pid) {
 94			const struct cred *cred = current_cred();
 95			filp->f_owner.uid = cred->uid;
 96			filp->f_owner.euid = cred->euid;
 97		}
 98	}
 99	write_unlock_irq(&filp->f_owner.lock);
100}
101
102void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
103		int force)
104{
105	security_file_set_fowner(filp);
106	f_modown(filp, pid, type, force);
107}
108EXPORT_SYMBOL(__f_setown);
109
110void f_setown(struct file *filp, unsigned long arg, int force)
111{
112	enum pid_type type;
113	struct pid *pid;
114	int who = arg;
115	type = PIDTYPE_PID;
 
116	if (who < 0) {
 
 
 
 
117		type = PIDTYPE_PGID;
118		who = -who;
119	}
 
120	rcu_read_lock();
121	pid = find_vpid(who);
122	__f_setown(filp, pid, type, force);
 
 
 
 
 
 
123	rcu_read_unlock();
 
 
124}
125EXPORT_SYMBOL(f_setown);
126
127void f_delown(struct file *filp)
128{
129	f_modown(filp, NULL, PIDTYPE_PID, 1);
130}
131
132pid_t f_getown(struct file *filp)
133{
134	pid_t pid;
135	read_lock(&filp->f_owner.lock);
136	pid = pid_vnr(filp->f_owner.pid);
137	if (filp->f_owner.pid_type == PIDTYPE_PGID)
138		pid = -pid;
139	read_unlock(&filp->f_owner.lock);
 
 
 
 
 
140	return pid;
141}
142
143static int f_setown_ex(struct file *filp, unsigned long arg)
144{
145	struct f_owner_ex __user *owner_p = (void __user *)arg;
146	struct f_owner_ex owner;
147	struct pid *pid;
148	int type;
149	int ret;
150
151	ret = copy_from_user(&owner, owner_p, sizeof(owner));
152	if (ret)
153		return -EFAULT;
154
155	switch (owner.type) {
156	case F_OWNER_TID:
157		type = PIDTYPE_MAX;
158		break;
159
160	case F_OWNER_PID:
161		type = PIDTYPE_PID;
162		break;
163
164	case F_OWNER_PGRP:
165		type = PIDTYPE_PGID;
166		break;
167
168	default:
169		return -EINVAL;
170	}
171
172	rcu_read_lock();
173	pid = find_vpid(owner.pid);
174	if (owner.pid && !pid)
175		ret = -ESRCH;
176	else
177		 __f_setown(filp, pid, type, 1);
178	rcu_read_unlock();
179
180	return ret;
181}
182
183static int f_getown_ex(struct file *filp, unsigned long arg)
184{
185	struct f_owner_ex __user *owner_p = (void __user *)arg;
186	struct f_owner_ex owner;
187	int ret = 0;
188
189	read_lock(&filp->f_owner.lock);
190	owner.pid = pid_vnr(filp->f_owner.pid);
 
 
 
191	switch (filp->f_owner.pid_type) {
192	case PIDTYPE_MAX:
193		owner.type = F_OWNER_TID;
194		break;
195
196	case PIDTYPE_PID:
197		owner.type = F_OWNER_PID;
198		break;
199
200	case PIDTYPE_PGID:
201		owner.type = F_OWNER_PGRP;
202		break;
203
204	default:
205		WARN_ON(1);
206		ret = -EINVAL;
207		break;
208	}
209	read_unlock(&filp->f_owner.lock);
210
211	if (!ret) {
212		ret = copy_to_user(owner_p, &owner, sizeof(owner));
213		if (ret)
214			ret = -EFAULT;
215	}
216	return ret;
217}
218
219#ifdef CONFIG_CHECKPOINT_RESTORE
220static int f_getowner_uids(struct file *filp, unsigned long arg)
221{
222	struct user_namespace *user_ns = current_user_ns();
223	uid_t __user *dst = (void __user *)arg;
224	uid_t src[2];
225	int err;
226
227	read_lock(&filp->f_owner.lock);
228	src[0] = from_kuid(user_ns, filp->f_owner.uid);
229	src[1] = from_kuid(user_ns, filp->f_owner.euid);
230	read_unlock(&filp->f_owner.lock);
231
232	err  = put_user(src[0], &dst[0]);
233	err |= put_user(src[1], &dst[1]);
234
235	return err;
236}
237#else
238static int f_getowner_uids(struct file *filp, unsigned long arg)
239{
240	return -EINVAL;
241}
242#endif
243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
245		struct file *filp)
246{
 
 
247	long err = -EINVAL;
248
249	switch (cmd) {
250	case F_DUPFD:
251		err = f_dupfd(arg, filp, 0);
252		break;
253	case F_DUPFD_CLOEXEC:
254		err = f_dupfd(arg, filp, O_CLOEXEC);
255		break;
256	case F_GETFD:
257		err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
258		break;
259	case F_SETFD:
260		err = 0;
261		set_close_on_exec(fd, arg & FD_CLOEXEC);
262		break;
263	case F_GETFL:
264		err = filp->f_flags;
265		break;
266	case F_SETFL:
267		err = setfl(fd, filp, arg);
268		break;
269#if BITS_PER_LONG != 32
270	/* 32-bit arches must use fcntl64() */
271	case F_OFD_GETLK:
272#endif
273	case F_GETLK:
274		err = fcntl_getlk(filp, cmd, (struct flock __user *) arg);
 
 
 
 
275		break;
276#if BITS_PER_LONG != 32
277	/* 32-bit arches must use fcntl64() */
278	case F_OFD_SETLK:
279	case F_OFD_SETLKW:
 
280#endif
281		/* Fallthrough */
282	case F_SETLK:
283	case F_SETLKW:
284		err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
 
 
285		break;
286	case F_GETOWN:
287		/*
288		 * XXX If f_owner is a process group, the
289		 * negative return value will get converted
290		 * into an error.  Oops.  If we keep the
291		 * current syscall conventions, the only way
292		 * to fix this will be in libc.
293		 */
294		err = f_getown(filp);
295		force_successful_syscall_return();
296		break;
297	case F_SETOWN:
298		f_setown(filp, arg, 1);
299		err = 0;
300		break;
301	case F_GETOWN_EX:
302		err = f_getown_ex(filp, arg);
303		break;
304	case F_SETOWN_EX:
305		err = f_setown_ex(filp, arg);
306		break;
307	case F_GETOWNER_UIDS:
308		err = f_getowner_uids(filp, arg);
309		break;
310	case F_GETSIG:
311		err = filp->f_owner.signum;
312		break;
313	case F_SETSIG:
314		/* arg == 0 restores default behaviour. */
315		if (!valid_signal(arg)) {
316			break;
317		}
318		err = 0;
319		filp->f_owner.signum = arg;
320		break;
321	case F_GETLEASE:
322		err = fcntl_getlease(filp);
323		break;
324	case F_SETLEASE:
325		err = fcntl_setlease(fd, filp, arg);
326		break;
327	case F_NOTIFY:
328		err = fcntl_dirnotify(fd, filp, arg);
329		break;
330	case F_SETPIPE_SZ:
331	case F_GETPIPE_SZ:
332		err = pipe_fcntl(filp, cmd, arg);
333		break;
334	case F_ADD_SEALS:
335	case F_GET_SEALS:
336		err = shmem_fcntl(filp, cmd, arg);
 
 
 
 
337		break;
338	default:
339		break;
340	}
341	return err;
342}
343
344static int check_fcntl_cmd(unsigned cmd)
345{
346	switch (cmd) {
347	case F_DUPFD:
348	case F_DUPFD_CLOEXEC:
349	case F_GETFD:
350	case F_SETFD:
351	case F_GETFL:
352		return 1;
353	}
354	return 0;
355}
356
357SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
358{	
359	struct fd f = fdget_raw(fd);
360	long err = -EBADF;
361
362	if (!f.file)
363		goto out;
364
365	if (unlikely(f.file->f_mode & FMODE_PATH)) {
366		if (!check_fcntl_cmd(cmd))
367			goto out1;
368	}
369
370	err = security_file_fcntl(f.file, cmd, arg);
371	if (!err)
372		err = do_fcntl(fd, cmd, arg, f.file);
373
374out1:
375 	fdput(f);
376out:
377	return err;
378}
379
380#if BITS_PER_LONG == 32
381SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
382		unsigned long, arg)
383{	
 
384	struct fd f = fdget_raw(fd);
 
385	long err = -EBADF;
386
387	if (!f.file)
388		goto out;
389
390	if (unlikely(f.file->f_mode & FMODE_PATH)) {
391		if (!check_fcntl_cmd(cmd))
392			goto out1;
393	}
394
395	err = security_file_fcntl(f.file, cmd, arg);
396	if (err)
397		goto out1;
398	
399	switch (cmd) {
400	case F_GETLK64:
401	case F_OFD_GETLK:
402		err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg);
 
 
 
 
 
403		break;
404	case F_SETLK64:
405	case F_SETLKW64:
406	case F_OFD_SETLK:
407	case F_OFD_SETLKW:
408		err = fcntl_setlk64(fd, f.file, cmd,
409				(struct flock64 __user *) arg);
 
 
410		break;
411	default:
412		err = do_fcntl(fd, cmd, arg, f.file);
413		break;
414	}
415out1:
416	fdput(f);
417out:
418	return err;
419}
420#endif
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422/* Table to convert sigio signal codes into poll band bitmaps */
423
424static const long band_table[NSIGPOLL] = {
425	POLLIN | POLLRDNORM,			/* POLL_IN */
426	POLLOUT | POLLWRNORM | POLLWRBAND,	/* POLL_OUT */
427	POLLIN | POLLRDNORM | POLLMSG,		/* POLL_MSG */
428	POLLERR,				/* POLL_ERR */
429	POLLPRI | POLLRDBAND,			/* POLL_PRI */
430	POLLHUP | POLLERR			/* POLL_HUP */
431};
432
433static inline int sigio_perm(struct task_struct *p,
434                             struct fown_struct *fown, int sig)
435{
436	const struct cred *cred;
437	int ret;
438
439	rcu_read_lock();
440	cred = __task_cred(p);
441	ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
442		uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
443		uid_eq(fown->uid,  cred->suid) || uid_eq(fown->uid,  cred->uid)) &&
444	       !security_file_send_sigiotask(p, fown, sig));
445	rcu_read_unlock();
446	return ret;
447}
448
449static void send_sigio_to_task(struct task_struct *p,
450			       struct fown_struct *fown,
451			       int fd, int reason, int group)
452{
453	/*
454	 * F_SETSIG can change ->signum lockless in parallel, make
455	 * sure we read it once and use the same value throughout.
456	 */
457	int signum = ACCESS_ONCE(fown->signum);
458
459	if (!sigio_perm(p, fown, signum))
460		return;
461
462	switch (signum) {
463		siginfo_t si;
464		default:
 
465			/* Queue a rt signal with the appropriate fd as its
466			   value.  We use SI_SIGIO as the source, not 
467			   SI_KERNEL, since kernel signals always get 
468			   delivered even if we can't queue.  Failure to
469			   queue in this case _should_ be reported; we fall
470			   back to SIGIO in that case. --sct */
 
471			si.si_signo = signum;
472			si.si_errno = 0;
473		        si.si_code  = reason;
 
 
 
 
 
 
 
 
 
 
 
474			/* Make sure we are called with one of the POLL_*
475			   reasons, otherwise we could leak kernel stack into
476			   userspace.  */
477			BUG_ON((reason & __SI_MASK) != __SI_POLL);
478			if (reason - POLL_IN >= NSIGPOLL)
479				si.si_band  = ~0L;
480			else
481				si.si_band = band_table[reason - POLL_IN];
482			si.si_fd    = fd;
483			if (!do_send_sig_info(signum, &si, p, group))
484				break;
485		/* fall-through: fall back on the old plain SIGIO signal */
 
486		case 0:
487			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
488	}
489}
490
491void send_sigio(struct fown_struct *fown, int fd, int band)
492{
493	struct task_struct *p;
494	enum pid_type type;
 
495	struct pid *pid;
496	int group = 1;
497	
498	read_lock(&fown->lock);
499
500	type = fown->pid_type;
501	if (type == PIDTYPE_MAX) {
502		group = 0;
503		type = PIDTYPE_PID;
504	}
505
506	pid = fown->pid;
507	if (!pid)
508		goto out_unlock_fown;
509	
510	read_lock(&tasklist_lock);
511	do_each_pid_task(pid, type, p) {
512		send_sigio_to_task(p, fown, fd, band, group);
513	} while_each_pid_task(pid, type, p);
514	read_unlock(&tasklist_lock);
 
 
 
 
 
 
 
 
515 out_unlock_fown:
516	read_unlock(&fown->lock);
517}
518
519static void send_sigurg_to_task(struct task_struct *p,
520				struct fown_struct *fown, int group)
521{
522	if (sigio_perm(p, fown, SIGURG))
523		do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
524}
525
526int send_sigurg(struct fown_struct *fown)
527{
528	struct task_struct *p;
529	enum pid_type type;
530	struct pid *pid;
531	int group = 1;
532	int ret = 0;
533	
534	read_lock(&fown->lock);
535
536	type = fown->pid_type;
537	if (type == PIDTYPE_MAX) {
538		group = 0;
539		type = PIDTYPE_PID;
540	}
541
542	pid = fown->pid;
543	if (!pid)
544		goto out_unlock_fown;
545
546	ret = 1;
547	
548	read_lock(&tasklist_lock);
549	do_each_pid_task(pid, type, p) {
550		send_sigurg_to_task(p, fown, group);
551	} while_each_pid_task(pid, type, p);
552	read_unlock(&tasklist_lock);
 
 
 
 
 
 
 
 
553 out_unlock_fown:
554	read_unlock(&fown->lock);
555	return ret;
556}
557
558static DEFINE_SPINLOCK(fasync_lock);
559static struct kmem_cache *fasync_cache __read_mostly;
560
561static void fasync_free_rcu(struct rcu_head *head)
562{
563	kmem_cache_free(fasync_cache,
564			container_of(head, struct fasync_struct, fa_rcu));
565}
566
567/*
568 * Remove a fasync entry. If successfully removed, return
569 * positive and clear the FASYNC flag. If no entry exists,
570 * do nothing and return 0.
571 *
572 * NOTE! It is very important that the FASYNC flag always
573 * match the state "is the filp on a fasync list".
574 *
575 */
576int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
577{
578	struct fasync_struct *fa, **fp;
579	int result = 0;
580
581	spin_lock(&filp->f_lock);
582	spin_lock(&fasync_lock);
583	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
584		if (fa->fa_file != filp)
585			continue;
586
587		spin_lock_irq(&fa->fa_lock);
588		fa->fa_file = NULL;
589		spin_unlock_irq(&fa->fa_lock);
590
591		*fp = fa->fa_next;
592		call_rcu(&fa->fa_rcu, fasync_free_rcu);
593		filp->f_flags &= ~FASYNC;
594		result = 1;
595		break;
596	}
597	spin_unlock(&fasync_lock);
598	spin_unlock(&filp->f_lock);
599	return result;
600}
601
602struct fasync_struct *fasync_alloc(void)
603{
604	return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
605}
606
607/*
608 * NOTE! This can be used only for unused fasync entries:
609 * entries that actually got inserted on the fasync list
610 * need to be released by rcu - see fasync_remove_entry.
611 */
612void fasync_free(struct fasync_struct *new)
613{
614	kmem_cache_free(fasync_cache, new);
615}
616
617/*
618 * Insert a new entry into the fasync list.  Return the pointer to the
619 * old one if we didn't use the new one.
620 *
621 * NOTE! It is very important that the FASYNC flag always
622 * match the state "is the filp on a fasync list".
623 */
624struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
625{
626        struct fasync_struct *fa, **fp;
627
628	spin_lock(&filp->f_lock);
629	spin_lock(&fasync_lock);
630	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
631		if (fa->fa_file != filp)
632			continue;
633
634		spin_lock_irq(&fa->fa_lock);
635		fa->fa_fd = fd;
636		spin_unlock_irq(&fa->fa_lock);
637		goto out;
638	}
639
640	spin_lock_init(&new->fa_lock);
641	new->magic = FASYNC_MAGIC;
642	new->fa_file = filp;
643	new->fa_fd = fd;
644	new->fa_next = *fapp;
645	rcu_assign_pointer(*fapp, new);
646	filp->f_flags |= FASYNC;
647
648out:
649	spin_unlock(&fasync_lock);
650	spin_unlock(&filp->f_lock);
651	return fa;
652}
653
654/*
655 * Add a fasync entry. Return negative on error, positive if
656 * added, and zero if did nothing but change an existing one.
657 */
658static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
659{
660	struct fasync_struct *new;
661
662	new = fasync_alloc();
663	if (!new)
664		return -ENOMEM;
665
666	/*
667	 * fasync_insert_entry() returns the old (update) entry if
668	 * it existed.
669	 *
670	 * So free the (unused) new entry and return 0 to let the
671	 * caller know that we didn't add any new fasync entries.
672	 */
673	if (fasync_insert_entry(fd, filp, fapp, new)) {
674		fasync_free(new);
675		return 0;
676	}
677
678	return 1;
679}
680
681/*
682 * fasync_helper() is used by almost all character device drivers
683 * to set up the fasync queue, and for regular files by the file
684 * lease code. It returns negative on error, 0 if it did no changes
685 * and positive if it added/deleted the entry.
686 */
687int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
688{
689	if (!on)
690		return fasync_remove_entry(filp, fapp);
691	return fasync_add_entry(fd, filp, fapp);
692}
693
694EXPORT_SYMBOL(fasync_helper);
695
696/*
697 * rcu_read_lock() is held
698 */
699static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
700{
701	while (fa) {
702		struct fown_struct *fown;
703		unsigned long flags;
704
705		if (fa->magic != FASYNC_MAGIC) {
706			printk(KERN_ERR "kill_fasync: bad magic number in "
707			       "fasync_struct!\n");
708			return;
709		}
710		spin_lock_irqsave(&fa->fa_lock, flags);
711		if (fa->fa_file) {
712			fown = &fa->fa_file->f_owner;
713			/* Don't send SIGURG to processes which have not set a
714			   queued signum: SIGURG has its own default signalling
715			   mechanism. */
716			if (!(sig == SIGURG && fown->signum == 0))
717				send_sigio(fown, fa->fa_fd, band);
718		}
719		spin_unlock_irqrestore(&fa->fa_lock, flags);
720		fa = rcu_dereference(fa->fa_next);
721	}
722}
723
724void kill_fasync(struct fasync_struct **fp, int sig, int band)
725{
726	/* First a quick test without locking: usually
727	 * the list is empty.
728	 */
729	if (*fp) {
730		rcu_read_lock();
731		kill_fasync_rcu(rcu_dereference(*fp), sig, band);
732		rcu_read_unlock();
733	}
734}
735EXPORT_SYMBOL(kill_fasync);
736
737static int __init fcntl_init(void)
738{
739	/*
740	 * Please add new bits here to ensure allocation uniqueness.
741	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
742	 * is defined as O_NONBLOCK on some platforms and not on others.
743	 */
744	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
745		O_RDONLY	| O_WRONLY	| O_RDWR	|
746		O_CREAT		| O_EXCL	| O_NOCTTY	|
747		O_TRUNC		| O_APPEND	| /* O_NONBLOCK	| */
748		__O_SYNC	| O_DSYNC	| FASYNC	|
749		O_DIRECT	| O_LARGEFILE	| O_DIRECTORY	|
750		O_NOFOLLOW	| O_NOATIME	| O_CLOEXEC	|
751		__FMODE_EXEC	| O_PATH	| __O_TMPFILE	|
752		__FMODE_NONOTIFY
753		));
754
755	fasync_cache = kmem_cache_create("fasync_cache",
756		sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
 
757	return 0;
758}
759
760module_init(fcntl_init)
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/fs/fcntl.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 */
   7
   8#include <linux/syscalls.h>
   9#include <linux/init.h>
  10#include <linux/mm.h>
  11#include <linux/sched/task.h>
  12#include <linux/fs.h>
  13#include <linux/file.h>
  14#include <linux/fdtable.h>
  15#include <linux/capability.h>
  16#include <linux/dnotify.h>
  17#include <linux/slab.h>
  18#include <linux/module.h>
  19#include <linux/pipe_fs_i.h>
  20#include <linux/security.h>
  21#include <linux/ptrace.h>
  22#include <linux/signal.h>
  23#include <linux/rcupdate.h>
  24#include <linux/pid_namespace.h>
  25#include <linux/user_namespace.h>
  26#include <linux/memfd.h>
  27#include <linux/compat.h>
  28#include <linux/mount.h>
  29
  30#include <linux/poll.h>
  31#include <asm/siginfo.h>
  32#include <linux/uaccess.h>
  33
  34#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
  35
  36static int setfl(int fd, struct file * filp, unsigned long arg)
  37{
  38	struct inode * inode = file_inode(filp);
  39	int error = 0;
  40
  41	/*
  42	 * O_APPEND cannot be cleared if the file is marked as append-only
  43	 * and the file is open for write.
  44	 */
  45	if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
  46		return -EPERM;
  47
  48	/* O_NOATIME can only be set by the owner or superuser */
  49	if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
  50		if (!inode_owner_or_capable(file_mnt_user_ns(filp), inode))
  51			return -EPERM;
  52
  53	/* required for strict SunOS emulation */
  54	if (O_NONBLOCK != O_NDELAY)
  55	       if (arg & O_NDELAY)
  56		   arg |= O_NONBLOCK;
  57
  58	/* Pipe packetized mode is controlled by O_DIRECT flag */
  59	if (!S_ISFIFO(inode->i_mode) &&
  60	    (arg & O_DIRECT) &&
  61	    !(filp->f_mode & FMODE_CAN_ODIRECT))
  62		return -EINVAL;
 
  63
  64	if (filp->f_op->check_flags)
  65		error = filp->f_op->check_flags(arg);
  66	if (error)
  67		return error;
  68
  69	/*
  70	 * ->fasync() is responsible for setting the FASYNC bit.
  71	 */
  72	if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
  73		error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
  74		if (error < 0)
  75			goto out;
  76		if (error > 0)
  77			error = 0;
  78	}
  79	spin_lock(&filp->f_lock);
  80	filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
  81	filp->f_iocb_flags = iocb_flags(filp);
  82	spin_unlock(&filp->f_lock);
  83
  84 out:
  85	return error;
  86}
  87
  88static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
  89                     int force)
  90{
  91	write_lock_irq(&filp->f_owner.lock);
  92	if (force || !filp->f_owner.pid) {
  93		put_pid(filp->f_owner.pid);
  94		filp->f_owner.pid = get_pid(pid);
  95		filp->f_owner.pid_type = type;
  96
  97		if (pid) {
  98			const struct cred *cred = current_cred();
  99			filp->f_owner.uid = cred->uid;
 100			filp->f_owner.euid = cred->euid;
 101		}
 102	}
 103	write_unlock_irq(&filp->f_owner.lock);
 104}
 105
 106void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
 107		int force)
 108{
 109	security_file_set_fowner(filp);
 110	f_modown(filp, pid, type, force);
 111}
 112EXPORT_SYMBOL(__f_setown);
 113
 114int f_setown(struct file *filp, unsigned long arg, int force)
 115{
 116	enum pid_type type;
 117	struct pid *pid = NULL;
 118	int who = arg, ret = 0;
 119
 120	type = PIDTYPE_TGID;
 121	if (who < 0) {
 122		/* avoid overflow below */
 123		if (who == INT_MIN)
 124			return -EINVAL;
 125
 126		type = PIDTYPE_PGID;
 127		who = -who;
 128	}
 129
 130	rcu_read_lock();
 131	if (who) {
 132		pid = find_vpid(who);
 133		if (!pid)
 134			ret = -ESRCH;
 135	}
 136
 137	if (!ret)
 138		__f_setown(filp, pid, type, force);
 139	rcu_read_unlock();
 140
 141	return ret;
 142}
 143EXPORT_SYMBOL(f_setown);
 144
 145void f_delown(struct file *filp)
 146{
 147	f_modown(filp, NULL, PIDTYPE_TGID, 1);
 148}
 149
 150pid_t f_getown(struct file *filp)
 151{
 152	pid_t pid = 0;
 153
 154	read_lock_irq(&filp->f_owner.lock);
 155	rcu_read_lock();
 156	if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
 157		pid = pid_vnr(filp->f_owner.pid);
 158		if (filp->f_owner.pid_type == PIDTYPE_PGID)
 159			pid = -pid;
 160	}
 161	rcu_read_unlock();
 162	read_unlock_irq(&filp->f_owner.lock);
 163	return pid;
 164}
 165
 166static int f_setown_ex(struct file *filp, unsigned long arg)
 167{
 168	struct f_owner_ex __user *owner_p = (void __user *)arg;
 169	struct f_owner_ex owner;
 170	struct pid *pid;
 171	int type;
 172	int ret;
 173
 174	ret = copy_from_user(&owner, owner_p, sizeof(owner));
 175	if (ret)
 176		return -EFAULT;
 177
 178	switch (owner.type) {
 179	case F_OWNER_TID:
 180		type = PIDTYPE_PID;
 181		break;
 182
 183	case F_OWNER_PID:
 184		type = PIDTYPE_TGID;
 185		break;
 186
 187	case F_OWNER_PGRP:
 188		type = PIDTYPE_PGID;
 189		break;
 190
 191	default:
 192		return -EINVAL;
 193	}
 194
 195	rcu_read_lock();
 196	pid = find_vpid(owner.pid);
 197	if (owner.pid && !pid)
 198		ret = -ESRCH;
 199	else
 200		 __f_setown(filp, pid, type, 1);
 201	rcu_read_unlock();
 202
 203	return ret;
 204}
 205
 206static int f_getown_ex(struct file *filp, unsigned long arg)
 207{
 208	struct f_owner_ex __user *owner_p = (void __user *)arg;
 209	struct f_owner_ex owner = {};
 210	int ret = 0;
 211
 212	read_lock_irq(&filp->f_owner.lock);
 213	rcu_read_lock();
 214	if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
 215		owner.pid = pid_vnr(filp->f_owner.pid);
 216	rcu_read_unlock();
 217	switch (filp->f_owner.pid_type) {
 218	case PIDTYPE_PID:
 219		owner.type = F_OWNER_TID;
 220		break;
 221
 222	case PIDTYPE_TGID:
 223		owner.type = F_OWNER_PID;
 224		break;
 225
 226	case PIDTYPE_PGID:
 227		owner.type = F_OWNER_PGRP;
 228		break;
 229
 230	default:
 231		WARN_ON(1);
 232		ret = -EINVAL;
 233		break;
 234	}
 235	read_unlock_irq(&filp->f_owner.lock);
 236
 237	if (!ret) {
 238		ret = copy_to_user(owner_p, &owner, sizeof(owner));
 239		if (ret)
 240			ret = -EFAULT;
 241	}
 242	return ret;
 243}
 244
 245#ifdef CONFIG_CHECKPOINT_RESTORE
 246static int f_getowner_uids(struct file *filp, unsigned long arg)
 247{
 248	struct user_namespace *user_ns = current_user_ns();
 249	uid_t __user *dst = (void __user *)arg;
 250	uid_t src[2];
 251	int err;
 252
 253	read_lock_irq(&filp->f_owner.lock);
 254	src[0] = from_kuid(user_ns, filp->f_owner.uid);
 255	src[1] = from_kuid(user_ns, filp->f_owner.euid);
 256	read_unlock_irq(&filp->f_owner.lock);
 257
 258	err  = put_user(src[0], &dst[0]);
 259	err |= put_user(src[1], &dst[1]);
 260
 261	return err;
 262}
 263#else
 264static int f_getowner_uids(struct file *filp, unsigned long arg)
 265{
 266	return -EINVAL;
 267}
 268#endif
 269
 270static bool rw_hint_valid(enum rw_hint hint)
 271{
 272	switch (hint) {
 273	case RWH_WRITE_LIFE_NOT_SET:
 274	case RWH_WRITE_LIFE_NONE:
 275	case RWH_WRITE_LIFE_SHORT:
 276	case RWH_WRITE_LIFE_MEDIUM:
 277	case RWH_WRITE_LIFE_LONG:
 278	case RWH_WRITE_LIFE_EXTREME:
 279		return true;
 280	default:
 281		return false;
 282	}
 283}
 284
 285static long fcntl_rw_hint(struct file *file, unsigned int cmd,
 286			  unsigned long arg)
 287{
 288	struct inode *inode = file_inode(file);
 289	u64 __user *argp = (u64 __user *)arg;
 290	enum rw_hint hint;
 291	u64 h;
 292
 293	switch (cmd) {
 294	case F_GET_RW_HINT:
 295		h = inode->i_write_hint;
 296		if (copy_to_user(argp, &h, sizeof(*argp)))
 297			return -EFAULT;
 298		return 0;
 299	case F_SET_RW_HINT:
 300		if (copy_from_user(&h, argp, sizeof(h)))
 301			return -EFAULT;
 302		hint = (enum rw_hint) h;
 303		if (!rw_hint_valid(hint))
 304			return -EINVAL;
 305
 306		inode_lock(inode);
 307		inode->i_write_hint = hint;
 308		inode_unlock(inode);
 309		return 0;
 310	default:
 311		return -EINVAL;
 312	}
 313}
 314
 315static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
 316		struct file *filp)
 317{
 318	void __user *argp = (void __user *)arg;
 319	struct flock flock;
 320	long err = -EINVAL;
 321
 322	switch (cmd) {
 323	case F_DUPFD:
 324		err = f_dupfd(arg, filp, 0);
 325		break;
 326	case F_DUPFD_CLOEXEC:
 327		err = f_dupfd(arg, filp, O_CLOEXEC);
 328		break;
 329	case F_GETFD:
 330		err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
 331		break;
 332	case F_SETFD:
 333		err = 0;
 334		set_close_on_exec(fd, arg & FD_CLOEXEC);
 335		break;
 336	case F_GETFL:
 337		err = filp->f_flags;
 338		break;
 339	case F_SETFL:
 340		err = setfl(fd, filp, arg);
 341		break;
 342#if BITS_PER_LONG != 32
 343	/* 32-bit arches must use fcntl64() */
 344	case F_OFD_GETLK:
 345#endif
 346	case F_GETLK:
 347		if (copy_from_user(&flock, argp, sizeof(flock)))
 348			return -EFAULT;
 349		err = fcntl_getlk(filp, cmd, &flock);
 350		if (!err && copy_to_user(argp, &flock, sizeof(flock)))
 351			return -EFAULT;
 352		break;
 353#if BITS_PER_LONG != 32
 354	/* 32-bit arches must use fcntl64() */
 355	case F_OFD_SETLK:
 356	case F_OFD_SETLKW:
 357		fallthrough;
 358#endif
 
 359	case F_SETLK:
 360	case F_SETLKW:
 361		if (copy_from_user(&flock, argp, sizeof(flock)))
 362			return -EFAULT;
 363		err = fcntl_setlk(fd, filp, cmd, &flock);
 364		break;
 365	case F_GETOWN:
 366		/*
 367		 * XXX If f_owner is a process group, the
 368		 * negative return value will get converted
 369		 * into an error.  Oops.  If we keep the
 370		 * current syscall conventions, the only way
 371		 * to fix this will be in libc.
 372		 */
 373		err = f_getown(filp);
 374		force_successful_syscall_return();
 375		break;
 376	case F_SETOWN:
 377		err = f_setown(filp, arg, 1);
 
 378		break;
 379	case F_GETOWN_EX:
 380		err = f_getown_ex(filp, arg);
 381		break;
 382	case F_SETOWN_EX:
 383		err = f_setown_ex(filp, arg);
 384		break;
 385	case F_GETOWNER_UIDS:
 386		err = f_getowner_uids(filp, arg);
 387		break;
 388	case F_GETSIG:
 389		err = filp->f_owner.signum;
 390		break;
 391	case F_SETSIG:
 392		/* arg == 0 restores default behaviour. */
 393		if (!valid_signal(arg)) {
 394			break;
 395		}
 396		err = 0;
 397		filp->f_owner.signum = arg;
 398		break;
 399	case F_GETLEASE:
 400		err = fcntl_getlease(filp);
 401		break;
 402	case F_SETLEASE:
 403		err = fcntl_setlease(fd, filp, arg);
 404		break;
 405	case F_NOTIFY:
 406		err = fcntl_dirnotify(fd, filp, arg);
 407		break;
 408	case F_SETPIPE_SZ:
 409	case F_GETPIPE_SZ:
 410		err = pipe_fcntl(filp, cmd, arg);
 411		break;
 412	case F_ADD_SEALS:
 413	case F_GET_SEALS:
 414		err = memfd_fcntl(filp, cmd, arg);
 415		break;
 416	case F_GET_RW_HINT:
 417	case F_SET_RW_HINT:
 418		err = fcntl_rw_hint(filp, cmd, arg);
 419		break;
 420	default:
 421		break;
 422	}
 423	return err;
 424}
 425
 426static int check_fcntl_cmd(unsigned cmd)
 427{
 428	switch (cmd) {
 429	case F_DUPFD:
 430	case F_DUPFD_CLOEXEC:
 431	case F_GETFD:
 432	case F_SETFD:
 433	case F_GETFL:
 434		return 1;
 435	}
 436	return 0;
 437}
 438
 439SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 440{	
 441	struct fd f = fdget_raw(fd);
 442	long err = -EBADF;
 443
 444	if (!f.file)
 445		goto out;
 446
 447	if (unlikely(f.file->f_mode & FMODE_PATH)) {
 448		if (!check_fcntl_cmd(cmd))
 449			goto out1;
 450	}
 451
 452	err = security_file_fcntl(f.file, cmd, arg);
 453	if (!err)
 454		err = do_fcntl(fd, cmd, arg, f.file);
 455
 456out1:
 457 	fdput(f);
 458out:
 459	return err;
 460}
 461
 462#if BITS_PER_LONG == 32
 463SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
 464		unsigned long, arg)
 465{	
 466	void __user *argp = (void __user *)arg;
 467	struct fd f = fdget_raw(fd);
 468	struct flock64 flock;
 469	long err = -EBADF;
 470
 471	if (!f.file)
 472		goto out;
 473
 474	if (unlikely(f.file->f_mode & FMODE_PATH)) {
 475		if (!check_fcntl_cmd(cmd))
 476			goto out1;
 477	}
 478
 479	err = security_file_fcntl(f.file, cmd, arg);
 480	if (err)
 481		goto out1;
 482	
 483	switch (cmd) {
 484	case F_GETLK64:
 485	case F_OFD_GETLK:
 486		err = -EFAULT;
 487		if (copy_from_user(&flock, argp, sizeof(flock)))
 488			break;
 489		err = fcntl_getlk64(f.file, cmd, &flock);
 490		if (!err && copy_to_user(argp, &flock, sizeof(flock)))
 491			err = -EFAULT;
 492		break;
 493	case F_SETLK64:
 494	case F_SETLKW64:
 495	case F_OFD_SETLK:
 496	case F_OFD_SETLKW:
 497		err = -EFAULT;
 498		if (copy_from_user(&flock, argp, sizeof(flock)))
 499			break;
 500		err = fcntl_setlk64(fd, f.file, cmd, &flock);
 501		break;
 502	default:
 503		err = do_fcntl(fd, cmd, arg, f.file);
 504		break;
 505	}
 506out1:
 507	fdput(f);
 508out:
 509	return err;
 510}
 511#endif
 512
 513#ifdef CONFIG_COMPAT
 514/* careful - don't use anywhere else */
 515#define copy_flock_fields(dst, src)		\
 516	(dst)->l_type = (src)->l_type;		\
 517	(dst)->l_whence = (src)->l_whence;	\
 518	(dst)->l_start = (src)->l_start;	\
 519	(dst)->l_len = (src)->l_len;		\
 520	(dst)->l_pid = (src)->l_pid;
 521
 522static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
 523{
 524	struct compat_flock fl;
 525
 526	if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
 527		return -EFAULT;
 528	copy_flock_fields(kfl, &fl);
 529	return 0;
 530}
 531
 532static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
 533{
 534	struct compat_flock64 fl;
 535
 536	if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
 537		return -EFAULT;
 538	copy_flock_fields(kfl, &fl);
 539	return 0;
 540}
 541
 542static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
 543{
 544	struct compat_flock fl;
 545
 546	memset(&fl, 0, sizeof(struct compat_flock));
 547	copy_flock_fields(&fl, kfl);
 548	if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
 549		return -EFAULT;
 550	return 0;
 551}
 552
 553static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
 554{
 555	struct compat_flock64 fl;
 556
 557	BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
 558	BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
 559
 560	memset(&fl, 0, sizeof(struct compat_flock64));
 561	copy_flock_fields(&fl, kfl);
 562	if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
 563		return -EFAULT;
 564	return 0;
 565}
 566#undef copy_flock_fields
 567
 568static unsigned int
 569convert_fcntl_cmd(unsigned int cmd)
 570{
 571	switch (cmd) {
 572	case F_GETLK64:
 573		return F_GETLK;
 574	case F_SETLK64:
 575		return F_SETLK;
 576	case F_SETLKW64:
 577		return F_SETLKW;
 578	}
 579
 580	return cmd;
 581}
 582
 583/*
 584 * GETLK was successful and we need to return the data, but it needs to fit in
 585 * the compat structure.
 586 * l_start shouldn't be too big, unless the original start + end is greater than
 587 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
 588 * -EOVERFLOW in that case.  l_len could be too big, in which case we just
 589 * truncate it, and only allow the app to see that part of the conflicting lock
 590 * that might make sense to it anyway
 591 */
 592static int fixup_compat_flock(struct flock *flock)
 593{
 594	if (flock->l_start > COMPAT_OFF_T_MAX)
 595		return -EOVERFLOW;
 596	if (flock->l_len > COMPAT_OFF_T_MAX)
 597		flock->l_len = COMPAT_OFF_T_MAX;
 598	return 0;
 599}
 600
 601static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
 602			     compat_ulong_t arg)
 603{
 604	struct fd f = fdget_raw(fd);
 605	struct flock flock;
 606	long err = -EBADF;
 607
 608	if (!f.file)
 609		return err;
 610
 611	if (unlikely(f.file->f_mode & FMODE_PATH)) {
 612		if (!check_fcntl_cmd(cmd))
 613			goto out_put;
 614	}
 615
 616	err = security_file_fcntl(f.file, cmd, arg);
 617	if (err)
 618		goto out_put;
 619
 620	switch (cmd) {
 621	case F_GETLK:
 622		err = get_compat_flock(&flock, compat_ptr(arg));
 623		if (err)
 624			break;
 625		err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
 626		if (err)
 627			break;
 628		err = fixup_compat_flock(&flock);
 629		if (!err)
 630			err = put_compat_flock(&flock, compat_ptr(arg));
 631		break;
 632	case F_GETLK64:
 633	case F_OFD_GETLK:
 634		err = get_compat_flock64(&flock, compat_ptr(arg));
 635		if (err)
 636			break;
 637		err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
 638		if (!err)
 639			err = put_compat_flock64(&flock, compat_ptr(arg));
 640		break;
 641	case F_SETLK:
 642	case F_SETLKW:
 643		err = get_compat_flock(&flock, compat_ptr(arg));
 644		if (err)
 645			break;
 646		err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
 647		break;
 648	case F_SETLK64:
 649	case F_SETLKW64:
 650	case F_OFD_SETLK:
 651	case F_OFD_SETLKW:
 652		err = get_compat_flock64(&flock, compat_ptr(arg));
 653		if (err)
 654			break;
 655		err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
 656		break;
 657	default:
 658		err = do_fcntl(fd, cmd, arg, f.file);
 659		break;
 660	}
 661out_put:
 662	fdput(f);
 663	return err;
 664}
 665
 666COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
 667		       compat_ulong_t, arg)
 668{
 669	return do_compat_fcntl64(fd, cmd, arg);
 670}
 671
 672COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
 673		       compat_ulong_t, arg)
 674{
 675	switch (cmd) {
 676	case F_GETLK64:
 677	case F_SETLK64:
 678	case F_SETLKW64:
 679	case F_OFD_GETLK:
 680	case F_OFD_SETLK:
 681	case F_OFD_SETLKW:
 682		return -EINVAL;
 683	}
 684	return do_compat_fcntl64(fd, cmd, arg);
 685}
 686#endif
 687
 688/* Table to convert sigio signal codes into poll band bitmaps */
 689
 690static const __poll_t band_table[NSIGPOLL] = {
 691	EPOLLIN | EPOLLRDNORM,			/* POLL_IN */
 692	EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND,	/* POLL_OUT */
 693	EPOLLIN | EPOLLRDNORM | EPOLLMSG,		/* POLL_MSG */
 694	EPOLLERR,				/* POLL_ERR */
 695	EPOLLPRI | EPOLLRDBAND,			/* POLL_PRI */
 696	EPOLLHUP | EPOLLERR			/* POLL_HUP */
 697};
 698
 699static inline int sigio_perm(struct task_struct *p,
 700                             struct fown_struct *fown, int sig)
 701{
 702	const struct cred *cred;
 703	int ret;
 704
 705	rcu_read_lock();
 706	cred = __task_cred(p);
 707	ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
 708		uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
 709		uid_eq(fown->uid,  cred->suid) || uid_eq(fown->uid,  cred->uid)) &&
 710	       !security_file_send_sigiotask(p, fown, sig));
 711	rcu_read_unlock();
 712	return ret;
 713}
 714
 715static void send_sigio_to_task(struct task_struct *p,
 716			       struct fown_struct *fown,
 717			       int fd, int reason, enum pid_type type)
 718{
 719	/*
 720	 * F_SETSIG can change ->signum lockless in parallel, make
 721	 * sure we read it once and use the same value throughout.
 722	 */
 723	int signum = READ_ONCE(fown->signum);
 724
 725	if (!sigio_perm(p, fown, signum))
 726		return;
 727
 728	switch (signum) {
 729		default: {
 730			kernel_siginfo_t si;
 731
 732			/* Queue a rt signal with the appropriate fd as its
 733			   value.  We use SI_SIGIO as the source, not 
 734			   SI_KERNEL, since kernel signals always get 
 735			   delivered even if we can't queue.  Failure to
 736			   queue in this case _should_ be reported; we fall
 737			   back to SIGIO in that case. --sct */
 738			clear_siginfo(&si);
 739			si.si_signo = signum;
 740			si.si_errno = 0;
 741		        si.si_code  = reason;
 742			/*
 743			 * Posix definies POLL_IN and friends to be signal
 744			 * specific si_codes for SIG_POLL.  Linux extended
 745			 * these si_codes to other signals in a way that is
 746			 * ambiguous if other signals also have signal
 747			 * specific si_codes.  In that case use SI_SIGIO instead
 748			 * to remove the ambiguity.
 749			 */
 750			if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
 751				si.si_code = SI_SIGIO;
 752
 753			/* Make sure we are called with one of the POLL_*
 754			   reasons, otherwise we could leak kernel stack into
 755			   userspace.  */
 756			BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
 757			if (reason - POLL_IN >= NSIGPOLL)
 758				si.si_band  = ~0L;
 759			else
 760				si.si_band = mangle_poll(band_table[reason - POLL_IN]);
 761			si.si_fd    = fd;
 762			if (!do_send_sig_info(signum, &si, p, type))
 763				break;
 764		}
 765			fallthrough;	/* fall back on the old plain SIGIO signal */
 766		case 0:
 767			do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
 768	}
 769}
 770
 771void send_sigio(struct fown_struct *fown, int fd, int band)
 772{
 773	struct task_struct *p;
 774	enum pid_type type;
 775	unsigned long flags;
 776	struct pid *pid;
 
 777	
 778	read_lock_irqsave(&fown->lock, flags);
 779
 780	type = fown->pid_type;
 
 
 
 
 
 781	pid = fown->pid;
 782	if (!pid)
 783		goto out_unlock_fown;
 784
 785	if (type <= PIDTYPE_TGID) {
 786		rcu_read_lock();
 787		p = pid_task(pid, PIDTYPE_PID);
 788		if (p)
 789			send_sigio_to_task(p, fown, fd, band, type);
 790		rcu_read_unlock();
 791	} else {
 792		read_lock(&tasklist_lock);
 793		do_each_pid_task(pid, type, p) {
 794			send_sigio_to_task(p, fown, fd, band, type);
 795		} while_each_pid_task(pid, type, p);
 796		read_unlock(&tasklist_lock);
 797	}
 798 out_unlock_fown:
 799	read_unlock_irqrestore(&fown->lock, flags);
 800}
 801
 802static void send_sigurg_to_task(struct task_struct *p,
 803				struct fown_struct *fown, enum pid_type type)
 804{
 805	if (sigio_perm(p, fown, SIGURG))
 806		do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
 807}
 808
 809int send_sigurg(struct fown_struct *fown)
 810{
 811	struct task_struct *p;
 812	enum pid_type type;
 813	struct pid *pid;
 814	unsigned long flags;
 815	int ret = 0;
 816	
 817	read_lock_irqsave(&fown->lock, flags);
 818
 819	type = fown->pid_type;
 
 
 
 
 
 820	pid = fown->pid;
 821	if (!pid)
 822		goto out_unlock_fown;
 823
 824	ret = 1;
 825
 826	if (type <= PIDTYPE_TGID) {
 827		rcu_read_lock();
 828		p = pid_task(pid, PIDTYPE_PID);
 829		if (p)
 830			send_sigurg_to_task(p, fown, type);
 831		rcu_read_unlock();
 832	} else {
 833		read_lock(&tasklist_lock);
 834		do_each_pid_task(pid, type, p) {
 835			send_sigurg_to_task(p, fown, type);
 836		} while_each_pid_task(pid, type, p);
 837		read_unlock(&tasklist_lock);
 838	}
 839 out_unlock_fown:
 840	read_unlock_irqrestore(&fown->lock, flags);
 841	return ret;
 842}
 843
 844static DEFINE_SPINLOCK(fasync_lock);
 845static struct kmem_cache *fasync_cache __read_mostly;
 846
 847static void fasync_free_rcu(struct rcu_head *head)
 848{
 849	kmem_cache_free(fasync_cache,
 850			container_of(head, struct fasync_struct, fa_rcu));
 851}
 852
 853/*
 854 * Remove a fasync entry. If successfully removed, return
 855 * positive and clear the FASYNC flag. If no entry exists,
 856 * do nothing and return 0.
 857 *
 858 * NOTE! It is very important that the FASYNC flag always
 859 * match the state "is the filp on a fasync list".
 860 *
 861 */
 862int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
 863{
 864	struct fasync_struct *fa, **fp;
 865	int result = 0;
 866
 867	spin_lock(&filp->f_lock);
 868	spin_lock(&fasync_lock);
 869	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
 870		if (fa->fa_file != filp)
 871			continue;
 872
 873		write_lock_irq(&fa->fa_lock);
 874		fa->fa_file = NULL;
 875		write_unlock_irq(&fa->fa_lock);
 876
 877		*fp = fa->fa_next;
 878		call_rcu(&fa->fa_rcu, fasync_free_rcu);
 879		filp->f_flags &= ~FASYNC;
 880		result = 1;
 881		break;
 882	}
 883	spin_unlock(&fasync_lock);
 884	spin_unlock(&filp->f_lock);
 885	return result;
 886}
 887
 888struct fasync_struct *fasync_alloc(void)
 889{
 890	return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
 891}
 892
 893/*
 894 * NOTE! This can be used only for unused fasync entries:
 895 * entries that actually got inserted on the fasync list
 896 * need to be released by rcu - see fasync_remove_entry.
 897 */
 898void fasync_free(struct fasync_struct *new)
 899{
 900	kmem_cache_free(fasync_cache, new);
 901}
 902
 903/*
 904 * Insert a new entry into the fasync list.  Return the pointer to the
 905 * old one if we didn't use the new one.
 906 *
 907 * NOTE! It is very important that the FASYNC flag always
 908 * match the state "is the filp on a fasync list".
 909 */
 910struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
 911{
 912        struct fasync_struct *fa, **fp;
 913
 914	spin_lock(&filp->f_lock);
 915	spin_lock(&fasync_lock);
 916	for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
 917		if (fa->fa_file != filp)
 918			continue;
 919
 920		write_lock_irq(&fa->fa_lock);
 921		fa->fa_fd = fd;
 922		write_unlock_irq(&fa->fa_lock);
 923		goto out;
 924	}
 925
 926	rwlock_init(&new->fa_lock);
 927	new->magic = FASYNC_MAGIC;
 928	new->fa_file = filp;
 929	new->fa_fd = fd;
 930	new->fa_next = *fapp;
 931	rcu_assign_pointer(*fapp, new);
 932	filp->f_flags |= FASYNC;
 933
 934out:
 935	spin_unlock(&fasync_lock);
 936	spin_unlock(&filp->f_lock);
 937	return fa;
 938}
 939
 940/*
 941 * Add a fasync entry. Return negative on error, positive if
 942 * added, and zero if did nothing but change an existing one.
 943 */
 944static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
 945{
 946	struct fasync_struct *new;
 947
 948	new = fasync_alloc();
 949	if (!new)
 950		return -ENOMEM;
 951
 952	/*
 953	 * fasync_insert_entry() returns the old (update) entry if
 954	 * it existed.
 955	 *
 956	 * So free the (unused) new entry and return 0 to let the
 957	 * caller know that we didn't add any new fasync entries.
 958	 */
 959	if (fasync_insert_entry(fd, filp, fapp, new)) {
 960		fasync_free(new);
 961		return 0;
 962	}
 963
 964	return 1;
 965}
 966
 967/*
 968 * fasync_helper() is used by almost all character device drivers
 969 * to set up the fasync queue, and for regular files by the file
 970 * lease code. It returns negative on error, 0 if it did no changes
 971 * and positive if it added/deleted the entry.
 972 */
 973int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
 974{
 975	if (!on)
 976		return fasync_remove_entry(filp, fapp);
 977	return fasync_add_entry(fd, filp, fapp);
 978}
 979
 980EXPORT_SYMBOL(fasync_helper);
 981
 982/*
 983 * rcu_read_lock() is held
 984 */
 985static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
 986{
 987	while (fa) {
 988		struct fown_struct *fown;
 989		unsigned long flags;
 990
 991		if (fa->magic != FASYNC_MAGIC) {
 992			printk(KERN_ERR "kill_fasync: bad magic number in "
 993			       "fasync_struct!\n");
 994			return;
 995		}
 996		read_lock_irqsave(&fa->fa_lock, flags);
 997		if (fa->fa_file) {
 998			fown = &fa->fa_file->f_owner;
 999			/* Don't send SIGURG to processes which have not set a
1000			   queued signum: SIGURG has its own default signalling
1001			   mechanism. */
1002			if (!(sig == SIGURG && fown->signum == 0))
1003				send_sigio(fown, fa->fa_fd, band);
1004		}
1005		read_unlock_irqrestore(&fa->fa_lock, flags);
1006		fa = rcu_dereference(fa->fa_next);
1007	}
1008}
1009
1010void kill_fasync(struct fasync_struct **fp, int sig, int band)
1011{
1012	/* First a quick test without locking: usually
1013	 * the list is empty.
1014	 */
1015	if (*fp) {
1016		rcu_read_lock();
1017		kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1018		rcu_read_unlock();
1019	}
1020}
1021EXPORT_SYMBOL(kill_fasync);
1022
1023static int __init fcntl_init(void)
1024{
1025	/*
1026	 * Please add new bits here to ensure allocation uniqueness.
1027	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1028	 * is defined as O_NONBLOCK on some platforms and not on others.
1029	 */
1030	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1031		HWEIGHT32(
1032			(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1033			__FMODE_EXEC | __FMODE_NONOTIFY));
 
 
 
 
 
 
1034
1035	fasync_cache = kmem_cache_create("fasync_cache",
1036					 sizeof(struct fasync_struct), 0,
1037					 SLAB_PANIC | SLAB_ACCOUNT, NULL);
1038	return 0;
1039}
1040
1041module_init(fcntl_init)