Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/fcntl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/sched/task.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/fdtable.h>
15#include <linux/capability.h>
16#include <linux/dnotify.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/security.h>
21#include <linux/ptrace.h>
22#include <linux/signal.h>
23#include <linux/rcupdate.h>
24#include <linux/pid_namespace.h>
25#include <linux/user_namespace.h>
26#include <linux/memfd.h>
27#include <linux/compat.h>
28#include <linux/mount.h>
29
30#include <linux/poll.h>
31#include <asm/siginfo.h>
32#include <linux/uaccess.h>
33
34#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
35
36static int setfl(int fd, struct file * filp, unsigned long arg)
37{
38 struct inode * inode = file_inode(filp);
39 int error = 0;
40
41 /*
42 * O_APPEND cannot be cleared if the file is marked as append-only
43 * and the file is open for write.
44 */
45 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
46 return -EPERM;
47
48 /* O_NOATIME can only be set by the owner or superuser */
49 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
50 if (!inode_owner_or_capable(file_mnt_user_ns(filp), inode))
51 return -EPERM;
52
53 /* required for strict SunOS emulation */
54 if (O_NONBLOCK != O_NDELAY)
55 if (arg & O_NDELAY)
56 arg |= O_NONBLOCK;
57
58 /* Pipe packetized mode is controlled by O_DIRECT flag */
59 if (!S_ISFIFO(inode->i_mode) &&
60 (arg & O_DIRECT) &&
61 !(filp->f_mode & FMODE_CAN_ODIRECT))
62 return -EINVAL;
63
64 if (filp->f_op->check_flags)
65 error = filp->f_op->check_flags(arg);
66 if (error)
67 return error;
68
69 /*
70 * ->fasync() is responsible for setting the FASYNC bit.
71 */
72 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
73 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
74 if (error < 0)
75 goto out;
76 if (error > 0)
77 error = 0;
78 }
79 spin_lock(&filp->f_lock);
80 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
81 filp->f_iocb_flags = iocb_flags(filp);
82 spin_unlock(&filp->f_lock);
83
84 out:
85 return error;
86}
87
88static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
89 int force)
90{
91 write_lock_irq(&filp->f_owner.lock);
92 if (force || !filp->f_owner.pid) {
93 put_pid(filp->f_owner.pid);
94 filp->f_owner.pid = get_pid(pid);
95 filp->f_owner.pid_type = type;
96
97 if (pid) {
98 const struct cred *cred = current_cred();
99 filp->f_owner.uid = cred->uid;
100 filp->f_owner.euid = cred->euid;
101 }
102 }
103 write_unlock_irq(&filp->f_owner.lock);
104}
105
106void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
107 int force)
108{
109 security_file_set_fowner(filp);
110 f_modown(filp, pid, type, force);
111}
112EXPORT_SYMBOL(__f_setown);
113
114int f_setown(struct file *filp, unsigned long arg, int force)
115{
116 enum pid_type type;
117 struct pid *pid = NULL;
118 int who = arg, ret = 0;
119
120 type = PIDTYPE_TGID;
121 if (who < 0) {
122 /* avoid overflow below */
123 if (who == INT_MIN)
124 return -EINVAL;
125
126 type = PIDTYPE_PGID;
127 who = -who;
128 }
129
130 rcu_read_lock();
131 if (who) {
132 pid = find_vpid(who);
133 if (!pid)
134 ret = -ESRCH;
135 }
136
137 if (!ret)
138 __f_setown(filp, pid, type, force);
139 rcu_read_unlock();
140
141 return ret;
142}
143EXPORT_SYMBOL(f_setown);
144
145void f_delown(struct file *filp)
146{
147 f_modown(filp, NULL, PIDTYPE_TGID, 1);
148}
149
150pid_t f_getown(struct file *filp)
151{
152 pid_t pid = 0;
153
154 read_lock_irq(&filp->f_owner.lock);
155 rcu_read_lock();
156 if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
157 pid = pid_vnr(filp->f_owner.pid);
158 if (filp->f_owner.pid_type == PIDTYPE_PGID)
159 pid = -pid;
160 }
161 rcu_read_unlock();
162 read_unlock_irq(&filp->f_owner.lock);
163 return pid;
164}
165
166static int f_setown_ex(struct file *filp, unsigned long arg)
167{
168 struct f_owner_ex __user *owner_p = (void __user *)arg;
169 struct f_owner_ex owner;
170 struct pid *pid;
171 int type;
172 int ret;
173
174 ret = copy_from_user(&owner, owner_p, sizeof(owner));
175 if (ret)
176 return -EFAULT;
177
178 switch (owner.type) {
179 case F_OWNER_TID:
180 type = PIDTYPE_PID;
181 break;
182
183 case F_OWNER_PID:
184 type = PIDTYPE_TGID;
185 break;
186
187 case F_OWNER_PGRP:
188 type = PIDTYPE_PGID;
189 break;
190
191 default:
192 return -EINVAL;
193 }
194
195 rcu_read_lock();
196 pid = find_vpid(owner.pid);
197 if (owner.pid && !pid)
198 ret = -ESRCH;
199 else
200 __f_setown(filp, pid, type, 1);
201 rcu_read_unlock();
202
203 return ret;
204}
205
206static int f_getown_ex(struct file *filp, unsigned long arg)
207{
208 struct f_owner_ex __user *owner_p = (void __user *)arg;
209 struct f_owner_ex owner = {};
210 int ret = 0;
211
212 read_lock_irq(&filp->f_owner.lock);
213 rcu_read_lock();
214 if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
215 owner.pid = pid_vnr(filp->f_owner.pid);
216 rcu_read_unlock();
217 switch (filp->f_owner.pid_type) {
218 case PIDTYPE_PID:
219 owner.type = F_OWNER_TID;
220 break;
221
222 case PIDTYPE_TGID:
223 owner.type = F_OWNER_PID;
224 break;
225
226 case PIDTYPE_PGID:
227 owner.type = F_OWNER_PGRP;
228 break;
229
230 default:
231 WARN_ON(1);
232 ret = -EINVAL;
233 break;
234 }
235 read_unlock_irq(&filp->f_owner.lock);
236
237 if (!ret) {
238 ret = copy_to_user(owner_p, &owner, sizeof(owner));
239 if (ret)
240 ret = -EFAULT;
241 }
242 return ret;
243}
244
245#ifdef CONFIG_CHECKPOINT_RESTORE
246static int f_getowner_uids(struct file *filp, unsigned long arg)
247{
248 struct user_namespace *user_ns = current_user_ns();
249 uid_t __user *dst = (void __user *)arg;
250 uid_t src[2];
251 int err;
252
253 read_lock_irq(&filp->f_owner.lock);
254 src[0] = from_kuid(user_ns, filp->f_owner.uid);
255 src[1] = from_kuid(user_ns, filp->f_owner.euid);
256 read_unlock_irq(&filp->f_owner.lock);
257
258 err = put_user(src[0], &dst[0]);
259 err |= put_user(src[1], &dst[1]);
260
261 return err;
262}
263#else
264static int f_getowner_uids(struct file *filp, unsigned long arg)
265{
266 return -EINVAL;
267}
268#endif
269
270static bool rw_hint_valid(enum rw_hint hint)
271{
272 switch (hint) {
273 case RWH_WRITE_LIFE_NOT_SET:
274 case RWH_WRITE_LIFE_NONE:
275 case RWH_WRITE_LIFE_SHORT:
276 case RWH_WRITE_LIFE_MEDIUM:
277 case RWH_WRITE_LIFE_LONG:
278 case RWH_WRITE_LIFE_EXTREME:
279 return true;
280 default:
281 return false;
282 }
283}
284
285static long fcntl_rw_hint(struct file *file, unsigned int cmd,
286 unsigned long arg)
287{
288 struct inode *inode = file_inode(file);
289 u64 __user *argp = (u64 __user *)arg;
290 enum rw_hint hint;
291 u64 h;
292
293 switch (cmd) {
294 case F_GET_RW_HINT:
295 h = inode->i_write_hint;
296 if (copy_to_user(argp, &h, sizeof(*argp)))
297 return -EFAULT;
298 return 0;
299 case F_SET_RW_HINT:
300 if (copy_from_user(&h, argp, sizeof(h)))
301 return -EFAULT;
302 hint = (enum rw_hint) h;
303 if (!rw_hint_valid(hint))
304 return -EINVAL;
305
306 inode_lock(inode);
307 inode->i_write_hint = hint;
308 inode_unlock(inode);
309 return 0;
310 default:
311 return -EINVAL;
312 }
313}
314
315static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
316 struct file *filp)
317{
318 void __user *argp = (void __user *)arg;
319 struct flock flock;
320 long err = -EINVAL;
321
322 switch (cmd) {
323 case F_DUPFD:
324 err = f_dupfd(arg, filp, 0);
325 break;
326 case F_DUPFD_CLOEXEC:
327 err = f_dupfd(arg, filp, O_CLOEXEC);
328 break;
329 case F_GETFD:
330 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
331 break;
332 case F_SETFD:
333 err = 0;
334 set_close_on_exec(fd, arg & FD_CLOEXEC);
335 break;
336 case F_GETFL:
337 err = filp->f_flags;
338 break;
339 case F_SETFL:
340 err = setfl(fd, filp, arg);
341 break;
342#if BITS_PER_LONG != 32
343 /* 32-bit arches must use fcntl64() */
344 case F_OFD_GETLK:
345#endif
346 case F_GETLK:
347 if (copy_from_user(&flock, argp, sizeof(flock)))
348 return -EFAULT;
349 err = fcntl_getlk(filp, cmd, &flock);
350 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
351 return -EFAULT;
352 break;
353#if BITS_PER_LONG != 32
354 /* 32-bit arches must use fcntl64() */
355 case F_OFD_SETLK:
356 case F_OFD_SETLKW:
357 fallthrough;
358#endif
359 case F_SETLK:
360 case F_SETLKW:
361 if (copy_from_user(&flock, argp, sizeof(flock)))
362 return -EFAULT;
363 err = fcntl_setlk(fd, filp, cmd, &flock);
364 break;
365 case F_GETOWN:
366 /*
367 * XXX If f_owner is a process group, the
368 * negative return value will get converted
369 * into an error. Oops. If we keep the
370 * current syscall conventions, the only way
371 * to fix this will be in libc.
372 */
373 err = f_getown(filp);
374 force_successful_syscall_return();
375 break;
376 case F_SETOWN:
377 err = f_setown(filp, arg, 1);
378 break;
379 case F_GETOWN_EX:
380 err = f_getown_ex(filp, arg);
381 break;
382 case F_SETOWN_EX:
383 err = f_setown_ex(filp, arg);
384 break;
385 case F_GETOWNER_UIDS:
386 err = f_getowner_uids(filp, arg);
387 break;
388 case F_GETSIG:
389 err = filp->f_owner.signum;
390 break;
391 case F_SETSIG:
392 /* arg == 0 restores default behaviour. */
393 if (!valid_signal(arg)) {
394 break;
395 }
396 err = 0;
397 filp->f_owner.signum = arg;
398 break;
399 case F_GETLEASE:
400 err = fcntl_getlease(filp);
401 break;
402 case F_SETLEASE:
403 err = fcntl_setlease(fd, filp, arg);
404 break;
405 case F_NOTIFY:
406 err = fcntl_dirnotify(fd, filp, arg);
407 break;
408 case F_SETPIPE_SZ:
409 case F_GETPIPE_SZ:
410 err = pipe_fcntl(filp, cmd, arg);
411 break;
412 case F_ADD_SEALS:
413 case F_GET_SEALS:
414 err = memfd_fcntl(filp, cmd, arg);
415 break;
416 case F_GET_RW_HINT:
417 case F_SET_RW_HINT:
418 err = fcntl_rw_hint(filp, cmd, arg);
419 break;
420 default:
421 break;
422 }
423 return err;
424}
425
426static int check_fcntl_cmd(unsigned cmd)
427{
428 switch (cmd) {
429 case F_DUPFD:
430 case F_DUPFD_CLOEXEC:
431 case F_GETFD:
432 case F_SETFD:
433 case F_GETFL:
434 return 1;
435 }
436 return 0;
437}
438
439SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
440{
441 struct fd f = fdget_raw(fd);
442 long err = -EBADF;
443
444 if (!f.file)
445 goto out;
446
447 if (unlikely(f.file->f_mode & FMODE_PATH)) {
448 if (!check_fcntl_cmd(cmd))
449 goto out1;
450 }
451
452 err = security_file_fcntl(f.file, cmd, arg);
453 if (!err)
454 err = do_fcntl(fd, cmd, arg, f.file);
455
456out1:
457 fdput(f);
458out:
459 return err;
460}
461
462#if BITS_PER_LONG == 32
463SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
464 unsigned long, arg)
465{
466 void __user *argp = (void __user *)arg;
467 struct fd f = fdget_raw(fd);
468 struct flock64 flock;
469 long err = -EBADF;
470
471 if (!f.file)
472 goto out;
473
474 if (unlikely(f.file->f_mode & FMODE_PATH)) {
475 if (!check_fcntl_cmd(cmd))
476 goto out1;
477 }
478
479 err = security_file_fcntl(f.file, cmd, arg);
480 if (err)
481 goto out1;
482
483 switch (cmd) {
484 case F_GETLK64:
485 case F_OFD_GETLK:
486 err = -EFAULT;
487 if (copy_from_user(&flock, argp, sizeof(flock)))
488 break;
489 err = fcntl_getlk64(f.file, cmd, &flock);
490 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
491 err = -EFAULT;
492 break;
493 case F_SETLK64:
494 case F_SETLKW64:
495 case F_OFD_SETLK:
496 case F_OFD_SETLKW:
497 err = -EFAULT;
498 if (copy_from_user(&flock, argp, sizeof(flock)))
499 break;
500 err = fcntl_setlk64(fd, f.file, cmd, &flock);
501 break;
502 default:
503 err = do_fcntl(fd, cmd, arg, f.file);
504 break;
505 }
506out1:
507 fdput(f);
508out:
509 return err;
510}
511#endif
512
513#ifdef CONFIG_COMPAT
514/* careful - don't use anywhere else */
515#define copy_flock_fields(dst, src) \
516 (dst)->l_type = (src)->l_type; \
517 (dst)->l_whence = (src)->l_whence; \
518 (dst)->l_start = (src)->l_start; \
519 (dst)->l_len = (src)->l_len; \
520 (dst)->l_pid = (src)->l_pid;
521
522static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
523{
524 struct compat_flock fl;
525
526 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
527 return -EFAULT;
528 copy_flock_fields(kfl, &fl);
529 return 0;
530}
531
532static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
533{
534 struct compat_flock64 fl;
535
536 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
537 return -EFAULT;
538 copy_flock_fields(kfl, &fl);
539 return 0;
540}
541
542static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
543{
544 struct compat_flock fl;
545
546 memset(&fl, 0, sizeof(struct compat_flock));
547 copy_flock_fields(&fl, kfl);
548 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
549 return -EFAULT;
550 return 0;
551}
552
553static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
554{
555 struct compat_flock64 fl;
556
557 BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
558 BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
559
560 memset(&fl, 0, sizeof(struct compat_flock64));
561 copy_flock_fields(&fl, kfl);
562 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
563 return -EFAULT;
564 return 0;
565}
566#undef copy_flock_fields
567
568static unsigned int
569convert_fcntl_cmd(unsigned int cmd)
570{
571 switch (cmd) {
572 case F_GETLK64:
573 return F_GETLK;
574 case F_SETLK64:
575 return F_SETLK;
576 case F_SETLKW64:
577 return F_SETLKW;
578 }
579
580 return cmd;
581}
582
583/*
584 * GETLK was successful and we need to return the data, but it needs to fit in
585 * the compat structure.
586 * l_start shouldn't be too big, unless the original start + end is greater than
587 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
588 * -EOVERFLOW in that case. l_len could be too big, in which case we just
589 * truncate it, and only allow the app to see that part of the conflicting lock
590 * that might make sense to it anyway
591 */
592static int fixup_compat_flock(struct flock *flock)
593{
594 if (flock->l_start > COMPAT_OFF_T_MAX)
595 return -EOVERFLOW;
596 if (flock->l_len > COMPAT_OFF_T_MAX)
597 flock->l_len = COMPAT_OFF_T_MAX;
598 return 0;
599}
600
601static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
602 compat_ulong_t arg)
603{
604 struct fd f = fdget_raw(fd);
605 struct flock flock;
606 long err = -EBADF;
607
608 if (!f.file)
609 return err;
610
611 if (unlikely(f.file->f_mode & FMODE_PATH)) {
612 if (!check_fcntl_cmd(cmd))
613 goto out_put;
614 }
615
616 err = security_file_fcntl(f.file, cmd, arg);
617 if (err)
618 goto out_put;
619
620 switch (cmd) {
621 case F_GETLK:
622 err = get_compat_flock(&flock, compat_ptr(arg));
623 if (err)
624 break;
625 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
626 if (err)
627 break;
628 err = fixup_compat_flock(&flock);
629 if (!err)
630 err = put_compat_flock(&flock, compat_ptr(arg));
631 break;
632 case F_GETLK64:
633 case F_OFD_GETLK:
634 err = get_compat_flock64(&flock, compat_ptr(arg));
635 if (err)
636 break;
637 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
638 if (!err)
639 err = put_compat_flock64(&flock, compat_ptr(arg));
640 break;
641 case F_SETLK:
642 case F_SETLKW:
643 err = get_compat_flock(&flock, compat_ptr(arg));
644 if (err)
645 break;
646 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
647 break;
648 case F_SETLK64:
649 case F_SETLKW64:
650 case F_OFD_SETLK:
651 case F_OFD_SETLKW:
652 err = get_compat_flock64(&flock, compat_ptr(arg));
653 if (err)
654 break;
655 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
656 break;
657 default:
658 err = do_fcntl(fd, cmd, arg, f.file);
659 break;
660 }
661out_put:
662 fdput(f);
663 return err;
664}
665
666COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
667 compat_ulong_t, arg)
668{
669 return do_compat_fcntl64(fd, cmd, arg);
670}
671
672COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
673 compat_ulong_t, arg)
674{
675 switch (cmd) {
676 case F_GETLK64:
677 case F_SETLK64:
678 case F_SETLKW64:
679 case F_OFD_GETLK:
680 case F_OFD_SETLK:
681 case F_OFD_SETLKW:
682 return -EINVAL;
683 }
684 return do_compat_fcntl64(fd, cmd, arg);
685}
686#endif
687
688/* Table to convert sigio signal codes into poll band bitmaps */
689
690static const __poll_t band_table[NSIGPOLL] = {
691 EPOLLIN | EPOLLRDNORM, /* POLL_IN */
692 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
693 EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
694 EPOLLERR, /* POLL_ERR */
695 EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
696 EPOLLHUP | EPOLLERR /* POLL_HUP */
697};
698
699static inline int sigio_perm(struct task_struct *p,
700 struct fown_struct *fown, int sig)
701{
702 const struct cred *cred;
703 int ret;
704
705 rcu_read_lock();
706 cred = __task_cred(p);
707 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
708 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
709 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
710 !security_file_send_sigiotask(p, fown, sig));
711 rcu_read_unlock();
712 return ret;
713}
714
715static void send_sigio_to_task(struct task_struct *p,
716 struct fown_struct *fown,
717 int fd, int reason, enum pid_type type)
718{
719 /*
720 * F_SETSIG can change ->signum lockless in parallel, make
721 * sure we read it once and use the same value throughout.
722 */
723 int signum = READ_ONCE(fown->signum);
724
725 if (!sigio_perm(p, fown, signum))
726 return;
727
728 switch (signum) {
729 default: {
730 kernel_siginfo_t si;
731
732 /* Queue a rt signal with the appropriate fd as its
733 value. We use SI_SIGIO as the source, not
734 SI_KERNEL, since kernel signals always get
735 delivered even if we can't queue. Failure to
736 queue in this case _should_ be reported; we fall
737 back to SIGIO in that case. --sct */
738 clear_siginfo(&si);
739 si.si_signo = signum;
740 si.si_errno = 0;
741 si.si_code = reason;
742 /*
743 * Posix definies POLL_IN and friends to be signal
744 * specific si_codes for SIG_POLL. Linux extended
745 * these si_codes to other signals in a way that is
746 * ambiguous if other signals also have signal
747 * specific si_codes. In that case use SI_SIGIO instead
748 * to remove the ambiguity.
749 */
750 if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
751 si.si_code = SI_SIGIO;
752
753 /* Make sure we are called with one of the POLL_*
754 reasons, otherwise we could leak kernel stack into
755 userspace. */
756 BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
757 if (reason - POLL_IN >= NSIGPOLL)
758 si.si_band = ~0L;
759 else
760 si.si_band = mangle_poll(band_table[reason - POLL_IN]);
761 si.si_fd = fd;
762 if (!do_send_sig_info(signum, &si, p, type))
763 break;
764 }
765 fallthrough; /* fall back on the old plain SIGIO signal */
766 case 0:
767 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
768 }
769}
770
771void send_sigio(struct fown_struct *fown, int fd, int band)
772{
773 struct task_struct *p;
774 enum pid_type type;
775 unsigned long flags;
776 struct pid *pid;
777
778 read_lock_irqsave(&fown->lock, flags);
779
780 type = fown->pid_type;
781 pid = fown->pid;
782 if (!pid)
783 goto out_unlock_fown;
784
785 if (type <= PIDTYPE_TGID) {
786 rcu_read_lock();
787 p = pid_task(pid, PIDTYPE_PID);
788 if (p)
789 send_sigio_to_task(p, fown, fd, band, type);
790 rcu_read_unlock();
791 } else {
792 read_lock(&tasklist_lock);
793 do_each_pid_task(pid, type, p) {
794 send_sigio_to_task(p, fown, fd, band, type);
795 } while_each_pid_task(pid, type, p);
796 read_unlock(&tasklist_lock);
797 }
798 out_unlock_fown:
799 read_unlock_irqrestore(&fown->lock, flags);
800}
801
802static void send_sigurg_to_task(struct task_struct *p,
803 struct fown_struct *fown, enum pid_type type)
804{
805 if (sigio_perm(p, fown, SIGURG))
806 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
807}
808
809int send_sigurg(struct fown_struct *fown)
810{
811 struct task_struct *p;
812 enum pid_type type;
813 struct pid *pid;
814 unsigned long flags;
815 int ret = 0;
816
817 read_lock_irqsave(&fown->lock, flags);
818
819 type = fown->pid_type;
820 pid = fown->pid;
821 if (!pid)
822 goto out_unlock_fown;
823
824 ret = 1;
825
826 if (type <= PIDTYPE_TGID) {
827 rcu_read_lock();
828 p = pid_task(pid, PIDTYPE_PID);
829 if (p)
830 send_sigurg_to_task(p, fown, type);
831 rcu_read_unlock();
832 } else {
833 read_lock(&tasklist_lock);
834 do_each_pid_task(pid, type, p) {
835 send_sigurg_to_task(p, fown, type);
836 } while_each_pid_task(pid, type, p);
837 read_unlock(&tasklist_lock);
838 }
839 out_unlock_fown:
840 read_unlock_irqrestore(&fown->lock, flags);
841 return ret;
842}
843
844static DEFINE_SPINLOCK(fasync_lock);
845static struct kmem_cache *fasync_cache __read_mostly;
846
847static void fasync_free_rcu(struct rcu_head *head)
848{
849 kmem_cache_free(fasync_cache,
850 container_of(head, struct fasync_struct, fa_rcu));
851}
852
853/*
854 * Remove a fasync entry. If successfully removed, return
855 * positive and clear the FASYNC flag. If no entry exists,
856 * do nothing and return 0.
857 *
858 * NOTE! It is very important that the FASYNC flag always
859 * match the state "is the filp on a fasync list".
860 *
861 */
862int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
863{
864 struct fasync_struct *fa, **fp;
865 int result = 0;
866
867 spin_lock(&filp->f_lock);
868 spin_lock(&fasync_lock);
869 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
870 if (fa->fa_file != filp)
871 continue;
872
873 write_lock_irq(&fa->fa_lock);
874 fa->fa_file = NULL;
875 write_unlock_irq(&fa->fa_lock);
876
877 *fp = fa->fa_next;
878 call_rcu(&fa->fa_rcu, fasync_free_rcu);
879 filp->f_flags &= ~FASYNC;
880 result = 1;
881 break;
882 }
883 spin_unlock(&fasync_lock);
884 spin_unlock(&filp->f_lock);
885 return result;
886}
887
888struct fasync_struct *fasync_alloc(void)
889{
890 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
891}
892
893/*
894 * NOTE! This can be used only for unused fasync entries:
895 * entries that actually got inserted on the fasync list
896 * need to be released by rcu - see fasync_remove_entry.
897 */
898void fasync_free(struct fasync_struct *new)
899{
900 kmem_cache_free(fasync_cache, new);
901}
902
903/*
904 * Insert a new entry into the fasync list. Return the pointer to the
905 * old one if we didn't use the new one.
906 *
907 * NOTE! It is very important that the FASYNC flag always
908 * match the state "is the filp on a fasync list".
909 */
910struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
911{
912 struct fasync_struct *fa, **fp;
913
914 spin_lock(&filp->f_lock);
915 spin_lock(&fasync_lock);
916 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
917 if (fa->fa_file != filp)
918 continue;
919
920 write_lock_irq(&fa->fa_lock);
921 fa->fa_fd = fd;
922 write_unlock_irq(&fa->fa_lock);
923 goto out;
924 }
925
926 rwlock_init(&new->fa_lock);
927 new->magic = FASYNC_MAGIC;
928 new->fa_file = filp;
929 new->fa_fd = fd;
930 new->fa_next = *fapp;
931 rcu_assign_pointer(*fapp, new);
932 filp->f_flags |= FASYNC;
933
934out:
935 spin_unlock(&fasync_lock);
936 spin_unlock(&filp->f_lock);
937 return fa;
938}
939
940/*
941 * Add a fasync entry. Return negative on error, positive if
942 * added, and zero if did nothing but change an existing one.
943 */
944static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
945{
946 struct fasync_struct *new;
947
948 new = fasync_alloc();
949 if (!new)
950 return -ENOMEM;
951
952 /*
953 * fasync_insert_entry() returns the old (update) entry if
954 * it existed.
955 *
956 * So free the (unused) new entry and return 0 to let the
957 * caller know that we didn't add any new fasync entries.
958 */
959 if (fasync_insert_entry(fd, filp, fapp, new)) {
960 fasync_free(new);
961 return 0;
962 }
963
964 return 1;
965}
966
967/*
968 * fasync_helper() is used by almost all character device drivers
969 * to set up the fasync queue, and for regular files by the file
970 * lease code. It returns negative on error, 0 if it did no changes
971 * and positive if it added/deleted the entry.
972 */
973int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
974{
975 if (!on)
976 return fasync_remove_entry(filp, fapp);
977 return fasync_add_entry(fd, filp, fapp);
978}
979
980EXPORT_SYMBOL(fasync_helper);
981
982/*
983 * rcu_read_lock() is held
984 */
985static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
986{
987 while (fa) {
988 struct fown_struct *fown;
989 unsigned long flags;
990
991 if (fa->magic != FASYNC_MAGIC) {
992 printk(KERN_ERR "kill_fasync: bad magic number in "
993 "fasync_struct!\n");
994 return;
995 }
996 read_lock_irqsave(&fa->fa_lock, flags);
997 if (fa->fa_file) {
998 fown = &fa->fa_file->f_owner;
999 /* Don't send SIGURG to processes which have not set a
1000 queued signum: SIGURG has its own default signalling
1001 mechanism. */
1002 if (!(sig == SIGURG && fown->signum == 0))
1003 send_sigio(fown, fa->fa_fd, band);
1004 }
1005 read_unlock_irqrestore(&fa->fa_lock, flags);
1006 fa = rcu_dereference(fa->fa_next);
1007 }
1008}
1009
1010void kill_fasync(struct fasync_struct **fp, int sig, int band)
1011{
1012 /* First a quick test without locking: usually
1013 * the list is empty.
1014 */
1015 if (*fp) {
1016 rcu_read_lock();
1017 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1018 rcu_read_unlock();
1019 }
1020}
1021EXPORT_SYMBOL(kill_fasync);
1022
1023static int __init fcntl_init(void)
1024{
1025 /*
1026 * Please add new bits here to ensure allocation uniqueness.
1027 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1028 * is defined as O_NONBLOCK on some platforms and not on others.
1029 */
1030 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1031 HWEIGHT32(
1032 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1033 __FMODE_EXEC | __FMODE_NONOTIFY));
1034
1035 fasync_cache = kmem_cache_create("fasync_cache",
1036 sizeof(struct fasync_struct), 0,
1037 SLAB_PANIC | SLAB_ACCOUNT, NULL);
1038 return 0;
1039}
1040
1041module_init(fcntl_init)
1/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/fdtable.h>
13#include <linux/capability.h>
14#include <linux/dnotify.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/pipe_fs_i.h>
18#include <linux/security.h>
19#include <linux/ptrace.h>
20#include <linux/signal.h>
21#include <linux/rcupdate.h>
22#include <linux/pid_namespace.h>
23#include <linux/user_namespace.h>
24
25#include <asm/poll.h>
26#include <asm/siginfo.h>
27#include <asm/uaccess.h>
28
29#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
30
31static int setfl(int fd, struct file * filp, unsigned long arg)
32{
33 struct inode * inode = file_inode(filp);
34 int error = 0;
35
36 /*
37 * O_APPEND cannot be cleared if the file is marked as append-only
38 * and the file is open for write.
39 */
40 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
41 return -EPERM;
42
43 /* O_NOATIME can only be set by the owner or superuser */
44 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
45 if (!inode_owner_or_capable(inode))
46 return -EPERM;
47
48 /* required for strict SunOS emulation */
49 if (O_NONBLOCK != O_NDELAY)
50 if (arg & O_NDELAY)
51 arg |= O_NONBLOCK;
52
53 if (arg & O_DIRECT) {
54 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
55 !filp->f_mapping->a_ops->direct_IO)
56 return -EINVAL;
57 }
58
59 if (filp->f_op->check_flags)
60 error = filp->f_op->check_flags(arg);
61 if (error)
62 return error;
63
64 /*
65 * ->fasync() is responsible for setting the FASYNC bit.
66 */
67 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
68 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
69 if (error < 0)
70 goto out;
71 if (error > 0)
72 error = 0;
73 }
74 spin_lock(&filp->f_lock);
75 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
76 spin_unlock(&filp->f_lock);
77
78 out:
79 return error;
80}
81
82static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
83 int force)
84{
85 write_lock_irq(&filp->f_owner.lock);
86 if (force || !filp->f_owner.pid) {
87 put_pid(filp->f_owner.pid);
88 filp->f_owner.pid = get_pid(pid);
89 filp->f_owner.pid_type = type;
90
91 if (pid) {
92 const struct cred *cred = current_cred();
93 filp->f_owner.uid = cred->uid;
94 filp->f_owner.euid = cred->euid;
95 }
96 }
97 write_unlock_irq(&filp->f_owner.lock);
98}
99
100int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
101 int force)
102{
103 int err;
104
105 err = security_file_set_fowner(filp);
106 if (err)
107 return err;
108
109 f_modown(filp, pid, type, force);
110 return 0;
111}
112EXPORT_SYMBOL(__f_setown);
113
114int f_setown(struct file *filp, unsigned long arg, int force)
115{
116 enum pid_type type;
117 struct pid *pid;
118 int who = arg;
119 int result;
120 type = PIDTYPE_PID;
121 if (who < 0) {
122 type = PIDTYPE_PGID;
123 who = -who;
124 }
125 rcu_read_lock();
126 pid = find_vpid(who);
127 result = __f_setown(filp, pid, type, force);
128 rcu_read_unlock();
129 return result;
130}
131EXPORT_SYMBOL(f_setown);
132
133void f_delown(struct file *filp)
134{
135 f_modown(filp, NULL, PIDTYPE_PID, 1);
136}
137
138pid_t f_getown(struct file *filp)
139{
140 pid_t pid;
141 read_lock(&filp->f_owner.lock);
142 pid = pid_vnr(filp->f_owner.pid);
143 if (filp->f_owner.pid_type == PIDTYPE_PGID)
144 pid = -pid;
145 read_unlock(&filp->f_owner.lock);
146 return pid;
147}
148
149static int f_setown_ex(struct file *filp, unsigned long arg)
150{
151 struct f_owner_ex __user *owner_p = (void __user *)arg;
152 struct f_owner_ex owner;
153 struct pid *pid;
154 int type;
155 int ret;
156
157 ret = copy_from_user(&owner, owner_p, sizeof(owner));
158 if (ret)
159 return -EFAULT;
160
161 switch (owner.type) {
162 case F_OWNER_TID:
163 type = PIDTYPE_MAX;
164 break;
165
166 case F_OWNER_PID:
167 type = PIDTYPE_PID;
168 break;
169
170 case F_OWNER_PGRP:
171 type = PIDTYPE_PGID;
172 break;
173
174 default:
175 return -EINVAL;
176 }
177
178 rcu_read_lock();
179 pid = find_vpid(owner.pid);
180 if (owner.pid && !pid)
181 ret = -ESRCH;
182 else
183 ret = __f_setown(filp, pid, type, 1);
184 rcu_read_unlock();
185
186 return ret;
187}
188
189static int f_getown_ex(struct file *filp, unsigned long arg)
190{
191 struct f_owner_ex __user *owner_p = (void __user *)arg;
192 struct f_owner_ex owner;
193 int ret = 0;
194
195 read_lock(&filp->f_owner.lock);
196 owner.pid = pid_vnr(filp->f_owner.pid);
197 switch (filp->f_owner.pid_type) {
198 case PIDTYPE_MAX:
199 owner.type = F_OWNER_TID;
200 break;
201
202 case PIDTYPE_PID:
203 owner.type = F_OWNER_PID;
204 break;
205
206 case PIDTYPE_PGID:
207 owner.type = F_OWNER_PGRP;
208 break;
209
210 default:
211 WARN_ON(1);
212 ret = -EINVAL;
213 break;
214 }
215 read_unlock(&filp->f_owner.lock);
216
217 if (!ret) {
218 ret = copy_to_user(owner_p, &owner, sizeof(owner));
219 if (ret)
220 ret = -EFAULT;
221 }
222 return ret;
223}
224
225#ifdef CONFIG_CHECKPOINT_RESTORE
226static int f_getowner_uids(struct file *filp, unsigned long arg)
227{
228 struct user_namespace *user_ns = current_user_ns();
229 uid_t __user *dst = (void __user *)arg;
230 uid_t src[2];
231 int err;
232
233 read_lock(&filp->f_owner.lock);
234 src[0] = from_kuid(user_ns, filp->f_owner.uid);
235 src[1] = from_kuid(user_ns, filp->f_owner.euid);
236 read_unlock(&filp->f_owner.lock);
237
238 err = put_user(src[0], &dst[0]);
239 err |= put_user(src[1], &dst[1]);
240
241 return err;
242}
243#else
244static int f_getowner_uids(struct file *filp, unsigned long arg)
245{
246 return -EINVAL;
247}
248#endif
249
250static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
251 struct file *filp)
252{
253 long err = -EINVAL;
254
255 switch (cmd) {
256 case F_DUPFD:
257 err = f_dupfd(arg, filp, 0);
258 break;
259 case F_DUPFD_CLOEXEC:
260 err = f_dupfd(arg, filp, O_CLOEXEC);
261 break;
262 case F_GETFD:
263 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
264 break;
265 case F_SETFD:
266 err = 0;
267 set_close_on_exec(fd, arg & FD_CLOEXEC);
268 break;
269 case F_GETFL:
270 err = filp->f_flags;
271 break;
272 case F_SETFL:
273 err = setfl(fd, filp, arg);
274 break;
275#if BITS_PER_LONG != 32
276 /* 32-bit arches must use fcntl64() */
277 case F_OFD_GETLK:
278#endif
279 case F_GETLK:
280 err = fcntl_getlk(filp, cmd, (struct flock __user *) arg);
281 break;
282#if BITS_PER_LONG != 32
283 /* 32-bit arches must use fcntl64() */
284 case F_OFD_SETLK:
285 case F_OFD_SETLKW:
286#endif
287 /* Fallthrough */
288 case F_SETLK:
289 case F_SETLKW:
290 err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
291 break;
292 case F_GETOWN:
293 /*
294 * XXX If f_owner is a process group, the
295 * negative return value will get converted
296 * into an error. Oops. If we keep the
297 * current syscall conventions, the only way
298 * to fix this will be in libc.
299 */
300 err = f_getown(filp);
301 force_successful_syscall_return();
302 break;
303 case F_SETOWN:
304 err = f_setown(filp, arg, 1);
305 break;
306 case F_GETOWN_EX:
307 err = f_getown_ex(filp, arg);
308 break;
309 case F_SETOWN_EX:
310 err = f_setown_ex(filp, arg);
311 break;
312 case F_GETOWNER_UIDS:
313 err = f_getowner_uids(filp, arg);
314 break;
315 case F_GETSIG:
316 err = filp->f_owner.signum;
317 break;
318 case F_SETSIG:
319 /* arg == 0 restores default behaviour. */
320 if (!valid_signal(arg)) {
321 break;
322 }
323 err = 0;
324 filp->f_owner.signum = arg;
325 break;
326 case F_GETLEASE:
327 err = fcntl_getlease(filp);
328 break;
329 case F_SETLEASE:
330 err = fcntl_setlease(fd, filp, arg);
331 break;
332 case F_NOTIFY:
333 err = fcntl_dirnotify(fd, filp, arg);
334 break;
335 case F_SETPIPE_SZ:
336 case F_GETPIPE_SZ:
337 err = pipe_fcntl(filp, cmd, arg);
338 break;
339 default:
340 break;
341 }
342 return err;
343}
344
345static int check_fcntl_cmd(unsigned cmd)
346{
347 switch (cmd) {
348 case F_DUPFD:
349 case F_DUPFD_CLOEXEC:
350 case F_GETFD:
351 case F_SETFD:
352 case F_GETFL:
353 return 1;
354 }
355 return 0;
356}
357
358SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
359{
360 struct fd f = fdget_raw(fd);
361 long err = -EBADF;
362
363 if (!f.file)
364 goto out;
365
366 if (unlikely(f.file->f_mode & FMODE_PATH)) {
367 if (!check_fcntl_cmd(cmd))
368 goto out1;
369 }
370
371 err = security_file_fcntl(f.file, cmd, arg);
372 if (!err)
373 err = do_fcntl(fd, cmd, arg, f.file);
374
375out1:
376 fdput(f);
377out:
378 return err;
379}
380
381#if BITS_PER_LONG == 32
382SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
383 unsigned long, arg)
384{
385 struct fd f = fdget_raw(fd);
386 long err = -EBADF;
387
388 if (!f.file)
389 goto out;
390
391 if (unlikely(f.file->f_mode & FMODE_PATH)) {
392 if (!check_fcntl_cmd(cmd))
393 goto out1;
394 }
395
396 err = security_file_fcntl(f.file, cmd, arg);
397 if (err)
398 goto out1;
399
400 switch (cmd) {
401 case F_GETLK64:
402 case F_OFD_GETLK:
403 err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg);
404 break;
405 case F_SETLK64:
406 case F_SETLKW64:
407 case F_OFD_SETLK:
408 case F_OFD_SETLKW:
409 err = fcntl_setlk64(fd, f.file, cmd,
410 (struct flock64 __user *) arg);
411 break;
412 default:
413 err = do_fcntl(fd, cmd, arg, f.file);
414 break;
415 }
416out1:
417 fdput(f);
418out:
419 return err;
420}
421#endif
422
423/* Table to convert sigio signal codes into poll band bitmaps */
424
425static const long band_table[NSIGPOLL] = {
426 POLLIN | POLLRDNORM, /* POLL_IN */
427 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
428 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
429 POLLERR, /* POLL_ERR */
430 POLLPRI | POLLRDBAND, /* POLL_PRI */
431 POLLHUP | POLLERR /* POLL_HUP */
432};
433
434static inline int sigio_perm(struct task_struct *p,
435 struct fown_struct *fown, int sig)
436{
437 const struct cred *cred;
438 int ret;
439
440 rcu_read_lock();
441 cred = __task_cred(p);
442 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
443 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
444 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
445 !security_file_send_sigiotask(p, fown, sig));
446 rcu_read_unlock();
447 return ret;
448}
449
450static void send_sigio_to_task(struct task_struct *p,
451 struct fown_struct *fown,
452 int fd, int reason, int group)
453{
454 /*
455 * F_SETSIG can change ->signum lockless in parallel, make
456 * sure we read it once and use the same value throughout.
457 */
458 int signum = ACCESS_ONCE(fown->signum);
459
460 if (!sigio_perm(p, fown, signum))
461 return;
462
463 switch (signum) {
464 siginfo_t si;
465 default:
466 /* Queue a rt signal with the appropriate fd as its
467 value. We use SI_SIGIO as the source, not
468 SI_KERNEL, since kernel signals always get
469 delivered even if we can't queue. Failure to
470 queue in this case _should_ be reported; we fall
471 back to SIGIO in that case. --sct */
472 si.si_signo = signum;
473 si.si_errno = 0;
474 si.si_code = reason;
475 /* Make sure we are called with one of the POLL_*
476 reasons, otherwise we could leak kernel stack into
477 userspace. */
478 BUG_ON((reason & __SI_MASK) != __SI_POLL);
479 if (reason - POLL_IN >= NSIGPOLL)
480 si.si_band = ~0L;
481 else
482 si.si_band = band_table[reason - POLL_IN];
483 si.si_fd = fd;
484 if (!do_send_sig_info(signum, &si, p, group))
485 break;
486 /* fall-through: fall back on the old plain SIGIO signal */
487 case 0:
488 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
489 }
490}
491
492void send_sigio(struct fown_struct *fown, int fd, int band)
493{
494 struct task_struct *p;
495 enum pid_type type;
496 struct pid *pid;
497 int group = 1;
498
499 read_lock(&fown->lock);
500
501 type = fown->pid_type;
502 if (type == PIDTYPE_MAX) {
503 group = 0;
504 type = PIDTYPE_PID;
505 }
506
507 pid = fown->pid;
508 if (!pid)
509 goto out_unlock_fown;
510
511 read_lock(&tasklist_lock);
512 do_each_pid_task(pid, type, p) {
513 send_sigio_to_task(p, fown, fd, band, group);
514 } while_each_pid_task(pid, type, p);
515 read_unlock(&tasklist_lock);
516 out_unlock_fown:
517 read_unlock(&fown->lock);
518}
519
520static void send_sigurg_to_task(struct task_struct *p,
521 struct fown_struct *fown, int group)
522{
523 if (sigio_perm(p, fown, SIGURG))
524 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
525}
526
527int send_sigurg(struct fown_struct *fown)
528{
529 struct task_struct *p;
530 enum pid_type type;
531 struct pid *pid;
532 int group = 1;
533 int ret = 0;
534
535 read_lock(&fown->lock);
536
537 type = fown->pid_type;
538 if (type == PIDTYPE_MAX) {
539 group = 0;
540 type = PIDTYPE_PID;
541 }
542
543 pid = fown->pid;
544 if (!pid)
545 goto out_unlock_fown;
546
547 ret = 1;
548
549 read_lock(&tasklist_lock);
550 do_each_pid_task(pid, type, p) {
551 send_sigurg_to_task(p, fown, group);
552 } while_each_pid_task(pid, type, p);
553 read_unlock(&tasklist_lock);
554 out_unlock_fown:
555 read_unlock(&fown->lock);
556 return ret;
557}
558
559static DEFINE_SPINLOCK(fasync_lock);
560static struct kmem_cache *fasync_cache __read_mostly;
561
562static void fasync_free_rcu(struct rcu_head *head)
563{
564 kmem_cache_free(fasync_cache,
565 container_of(head, struct fasync_struct, fa_rcu));
566}
567
568/*
569 * Remove a fasync entry. If successfully removed, return
570 * positive and clear the FASYNC flag. If no entry exists,
571 * do nothing and return 0.
572 *
573 * NOTE! It is very important that the FASYNC flag always
574 * match the state "is the filp on a fasync list".
575 *
576 */
577int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
578{
579 struct fasync_struct *fa, **fp;
580 int result = 0;
581
582 spin_lock(&filp->f_lock);
583 spin_lock(&fasync_lock);
584 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
585 if (fa->fa_file != filp)
586 continue;
587
588 spin_lock_irq(&fa->fa_lock);
589 fa->fa_file = NULL;
590 spin_unlock_irq(&fa->fa_lock);
591
592 *fp = fa->fa_next;
593 call_rcu(&fa->fa_rcu, fasync_free_rcu);
594 filp->f_flags &= ~FASYNC;
595 result = 1;
596 break;
597 }
598 spin_unlock(&fasync_lock);
599 spin_unlock(&filp->f_lock);
600 return result;
601}
602
603struct fasync_struct *fasync_alloc(void)
604{
605 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
606}
607
608/*
609 * NOTE! This can be used only for unused fasync entries:
610 * entries that actually got inserted on the fasync list
611 * need to be released by rcu - see fasync_remove_entry.
612 */
613void fasync_free(struct fasync_struct *new)
614{
615 kmem_cache_free(fasync_cache, new);
616}
617
618/*
619 * Insert a new entry into the fasync list. Return the pointer to the
620 * old one if we didn't use the new one.
621 *
622 * NOTE! It is very important that the FASYNC flag always
623 * match the state "is the filp on a fasync list".
624 */
625struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
626{
627 struct fasync_struct *fa, **fp;
628
629 spin_lock(&filp->f_lock);
630 spin_lock(&fasync_lock);
631 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
632 if (fa->fa_file != filp)
633 continue;
634
635 spin_lock_irq(&fa->fa_lock);
636 fa->fa_fd = fd;
637 spin_unlock_irq(&fa->fa_lock);
638 goto out;
639 }
640
641 spin_lock_init(&new->fa_lock);
642 new->magic = FASYNC_MAGIC;
643 new->fa_file = filp;
644 new->fa_fd = fd;
645 new->fa_next = *fapp;
646 rcu_assign_pointer(*fapp, new);
647 filp->f_flags |= FASYNC;
648
649out:
650 spin_unlock(&fasync_lock);
651 spin_unlock(&filp->f_lock);
652 return fa;
653}
654
655/*
656 * Add a fasync entry. Return negative on error, positive if
657 * added, and zero if did nothing but change an existing one.
658 */
659static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
660{
661 struct fasync_struct *new;
662
663 new = fasync_alloc();
664 if (!new)
665 return -ENOMEM;
666
667 /*
668 * fasync_insert_entry() returns the old (update) entry if
669 * it existed.
670 *
671 * So free the (unused) new entry and return 0 to let the
672 * caller know that we didn't add any new fasync entries.
673 */
674 if (fasync_insert_entry(fd, filp, fapp, new)) {
675 fasync_free(new);
676 return 0;
677 }
678
679 return 1;
680}
681
682/*
683 * fasync_helper() is used by almost all character device drivers
684 * to set up the fasync queue, and for regular files by the file
685 * lease code. It returns negative on error, 0 if it did no changes
686 * and positive if it added/deleted the entry.
687 */
688int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
689{
690 if (!on)
691 return fasync_remove_entry(filp, fapp);
692 return fasync_add_entry(fd, filp, fapp);
693}
694
695EXPORT_SYMBOL(fasync_helper);
696
697/*
698 * rcu_read_lock() is held
699 */
700static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
701{
702 while (fa) {
703 struct fown_struct *fown;
704 unsigned long flags;
705
706 if (fa->magic != FASYNC_MAGIC) {
707 printk(KERN_ERR "kill_fasync: bad magic number in "
708 "fasync_struct!\n");
709 return;
710 }
711 spin_lock_irqsave(&fa->fa_lock, flags);
712 if (fa->fa_file) {
713 fown = &fa->fa_file->f_owner;
714 /* Don't send SIGURG to processes which have not set a
715 queued signum: SIGURG has its own default signalling
716 mechanism. */
717 if (!(sig == SIGURG && fown->signum == 0))
718 send_sigio(fown, fa->fa_fd, band);
719 }
720 spin_unlock_irqrestore(&fa->fa_lock, flags);
721 fa = rcu_dereference(fa->fa_next);
722 }
723}
724
725void kill_fasync(struct fasync_struct **fp, int sig, int band)
726{
727 /* First a quick test without locking: usually
728 * the list is empty.
729 */
730 if (*fp) {
731 rcu_read_lock();
732 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
733 rcu_read_unlock();
734 }
735}
736EXPORT_SYMBOL(kill_fasync);
737
738static int __init fcntl_init(void)
739{
740 /*
741 * Please add new bits here to ensure allocation uniqueness.
742 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
743 * is defined as O_NONBLOCK on some platforms and not on others.
744 */
745 BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
746 O_RDONLY | O_WRONLY | O_RDWR |
747 O_CREAT | O_EXCL | O_NOCTTY |
748 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
749 __O_SYNC | O_DSYNC | FASYNC |
750 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
751 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
752 __FMODE_EXEC | O_PATH | __O_TMPFILE
753 ));
754
755 fasync_cache = kmem_cache_create("fasync_cache",
756 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
757 return 0;
758}
759
760module_init(fcntl_init)