Loading...
1/*
2 * linux/fs/locks.c
3 *
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
6 *
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11 *
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14 *
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
18 *
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26 *
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30 *
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32 *
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
35 *
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
39 *
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
45 *
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51 *
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55 *
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60 *
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/mandatory.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65 *
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69 * Manual, Section 2.
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71 *
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74 *
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78 *
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84 *
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88 *
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93 *
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96 *
97 * Made mandatory locking a mount option. Default is not to allow mandatory
98 * locking.
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100 *
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
103 *
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106 *
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
111 *
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115 */
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136int leases_enable = 1;
137int lease_break_time = 45;
138
139#define for_each_lock(inode, lockp) \
140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141
142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list);
144static DEFINE_SPINLOCK(file_lock_lock);
145
146/*
147 * Protects the two list heads above, plus the inode->i_flock list
148 */
149void lock_flocks(void)
150{
151 spin_lock(&file_lock_lock);
152}
153EXPORT_SYMBOL_GPL(lock_flocks);
154
155void unlock_flocks(void)
156{
157 spin_unlock(&file_lock_lock);
158}
159EXPORT_SYMBOL_GPL(unlock_flocks);
160
161static struct kmem_cache *filelock_cache __read_mostly;
162
163static void locks_init_lock_heads(struct file_lock *fl)
164{
165 INIT_LIST_HEAD(&fl->fl_link);
166 INIT_LIST_HEAD(&fl->fl_block);
167 init_waitqueue_head(&fl->fl_wait);
168}
169
170/* Allocate an empty lock structure. */
171struct file_lock *locks_alloc_lock(void)
172{
173 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
174
175 if (fl)
176 locks_init_lock_heads(fl);
177
178 return fl;
179}
180EXPORT_SYMBOL_GPL(locks_alloc_lock);
181
182void locks_release_private(struct file_lock *fl)
183{
184 if (fl->fl_ops) {
185 if (fl->fl_ops->fl_release_private)
186 fl->fl_ops->fl_release_private(fl);
187 fl->fl_ops = NULL;
188 }
189 if (fl->fl_lmops) {
190 if (fl->fl_lmops->lm_release_private)
191 fl->fl_lmops->lm_release_private(fl);
192 fl->fl_lmops = NULL;
193 }
194
195}
196EXPORT_SYMBOL_GPL(locks_release_private);
197
198/* Free a lock which is not in use. */
199void locks_free_lock(struct file_lock *fl)
200{
201 BUG_ON(waitqueue_active(&fl->fl_wait));
202 BUG_ON(!list_empty(&fl->fl_block));
203 BUG_ON(!list_empty(&fl->fl_link));
204
205 locks_release_private(fl);
206 kmem_cache_free(filelock_cache, fl);
207}
208EXPORT_SYMBOL(locks_free_lock);
209
210void locks_init_lock(struct file_lock *fl)
211{
212 memset(fl, 0, sizeof(struct file_lock));
213 locks_init_lock_heads(fl);
214}
215
216EXPORT_SYMBOL(locks_init_lock);
217
218static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
219{
220 if (fl->fl_ops) {
221 if (fl->fl_ops->fl_copy_lock)
222 fl->fl_ops->fl_copy_lock(new, fl);
223 new->fl_ops = fl->fl_ops;
224 }
225 if (fl->fl_lmops)
226 new->fl_lmops = fl->fl_lmops;
227}
228
229/*
230 * Initialize a new lock from an existing file_lock structure.
231 */
232void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
233{
234 new->fl_owner = fl->fl_owner;
235 new->fl_pid = fl->fl_pid;
236 new->fl_file = NULL;
237 new->fl_flags = fl->fl_flags;
238 new->fl_type = fl->fl_type;
239 new->fl_start = fl->fl_start;
240 new->fl_end = fl->fl_end;
241 new->fl_ops = NULL;
242 new->fl_lmops = NULL;
243}
244EXPORT_SYMBOL(__locks_copy_lock);
245
246void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
247{
248 locks_release_private(new);
249
250 __locks_copy_lock(new, fl);
251 new->fl_file = fl->fl_file;
252 new->fl_ops = fl->fl_ops;
253 new->fl_lmops = fl->fl_lmops;
254
255 locks_copy_private(new, fl);
256}
257
258EXPORT_SYMBOL(locks_copy_lock);
259
260static inline int flock_translate_cmd(int cmd) {
261 if (cmd & LOCK_MAND)
262 return cmd & (LOCK_MAND | LOCK_RW);
263 switch (cmd) {
264 case LOCK_SH:
265 return F_RDLCK;
266 case LOCK_EX:
267 return F_WRLCK;
268 case LOCK_UN:
269 return F_UNLCK;
270 }
271 return -EINVAL;
272}
273
274/* Fill in a file_lock structure with an appropriate FLOCK lock. */
275static int flock_make_lock(struct file *filp, struct file_lock **lock,
276 unsigned int cmd)
277{
278 struct file_lock *fl;
279 int type = flock_translate_cmd(cmd);
280 if (type < 0)
281 return type;
282
283 fl = locks_alloc_lock();
284 if (fl == NULL)
285 return -ENOMEM;
286
287 fl->fl_file = filp;
288 fl->fl_pid = current->tgid;
289 fl->fl_flags = FL_FLOCK;
290 fl->fl_type = type;
291 fl->fl_end = OFFSET_MAX;
292
293 *lock = fl;
294 return 0;
295}
296
297static int assign_type(struct file_lock *fl, int type)
298{
299 switch (type) {
300 case F_RDLCK:
301 case F_WRLCK:
302 case F_UNLCK:
303 fl->fl_type = type;
304 break;
305 default:
306 return -EINVAL;
307 }
308 return 0;
309}
310
311/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
312 * style lock.
313 */
314static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
315 struct flock *l)
316{
317 off_t start, end;
318
319 switch (l->l_whence) {
320 case SEEK_SET:
321 start = 0;
322 break;
323 case SEEK_CUR:
324 start = filp->f_pos;
325 break;
326 case SEEK_END:
327 start = i_size_read(filp->f_path.dentry->d_inode);
328 break;
329 default:
330 return -EINVAL;
331 }
332
333 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
334 POSIX-2001 defines it. */
335 start += l->l_start;
336 if (start < 0)
337 return -EINVAL;
338 fl->fl_end = OFFSET_MAX;
339 if (l->l_len > 0) {
340 end = start + l->l_len - 1;
341 fl->fl_end = end;
342 } else if (l->l_len < 0) {
343 end = start - 1;
344 fl->fl_end = end;
345 start += l->l_len;
346 if (start < 0)
347 return -EINVAL;
348 }
349 fl->fl_start = start; /* we record the absolute position */
350 if (fl->fl_end < fl->fl_start)
351 return -EOVERFLOW;
352
353 fl->fl_owner = current->files;
354 fl->fl_pid = current->tgid;
355 fl->fl_file = filp;
356 fl->fl_flags = FL_POSIX;
357 fl->fl_ops = NULL;
358 fl->fl_lmops = NULL;
359
360 return assign_type(fl, l->l_type);
361}
362
363#if BITS_PER_LONG == 32
364static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
365 struct flock64 *l)
366{
367 loff_t start;
368
369 switch (l->l_whence) {
370 case SEEK_SET:
371 start = 0;
372 break;
373 case SEEK_CUR:
374 start = filp->f_pos;
375 break;
376 case SEEK_END:
377 start = i_size_read(filp->f_path.dentry->d_inode);
378 break;
379 default:
380 return -EINVAL;
381 }
382
383 start += l->l_start;
384 if (start < 0)
385 return -EINVAL;
386 fl->fl_end = OFFSET_MAX;
387 if (l->l_len > 0) {
388 fl->fl_end = start + l->l_len - 1;
389 } else if (l->l_len < 0) {
390 fl->fl_end = start - 1;
391 start += l->l_len;
392 if (start < 0)
393 return -EINVAL;
394 }
395 fl->fl_start = start; /* we record the absolute position */
396 if (fl->fl_end < fl->fl_start)
397 return -EOVERFLOW;
398
399 fl->fl_owner = current->files;
400 fl->fl_pid = current->tgid;
401 fl->fl_file = filp;
402 fl->fl_flags = FL_POSIX;
403 fl->fl_ops = NULL;
404 fl->fl_lmops = NULL;
405
406 return assign_type(fl, l->l_type);
407}
408#endif
409
410/* default lease lock manager operations */
411static void lease_break_callback(struct file_lock *fl)
412{
413 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
414}
415
416static void lease_release_private_callback(struct file_lock *fl)
417{
418 if (!fl->fl_file)
419 return;
420
421 f_delown(fl->fl_file);
422 fl->fl_file->f_owner.signum = 0;
423}
424
425static const struct lock_manager_operations lease_manager_ops = {
426 .lm_break = lease_break_callback,
427 .lm_release_private = lease_release_private_callback,
428 .lm_change = lease_modify,
429};
430
431/*
432 * Initialize a lease, use the default lock manager operations
433 */
434static int lease_init(struct file *filp, int type, struct file_lock *fl)
435 {
436 if (assign_type(fl, type) != 0)
437 return -EINVAL;
438
439 fl->fl_owner = current->files;
440 fl->fl_pid = current->tgid;
441
442 fl->fl_file = filp;
443 fl->fl_flags = FL_LEASE;
444 fl->fl_start = 0;
445 fl->fl_end = OFFSET_MAX;
446 fl->fl_ops = NULL;
447 fl->fl_lmops = &lease_manager_ops;
448 return 0;
449}
450
451/* Allocate a file_lock initialised to this type of lease */
452static struct file_lock *lease_alloc(struct file *filp, int type)
453{
454 struct file_lock *fl = locks_alloc_lock();
455 int error = -ENOMEM;
456
457 if (fl == NULL)
458 return ERR_PTR(error);
459
460 error = lease_init(filp, type, fl);
461 if (error) {
462 locks_free_lock(fl);
463 return ERR_PTR(error);
464 }
465 return fl;
466}
467
468/* Check if two locks overlap each other.
469 */
470static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
471{
472 return ((fl1->fl_end >= fl2->fl_start) &&
473 (fl2->fl_end >= fl1->fl_start));
474}
475
476/*
477 * Check whether two locks have the same owner.
478 */
479static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
480{
481 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
482 return fl2->fl_lmops == fl1->fl_lmops &&
483 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
484 return fl1->fl_owner == fl2->fl_owner;
485}
486
487/* Remove waiter from blocker's block list.
488 * When blocker ends up pointing to itself then the list is empty.
489 */
490static void __locks_delete_block(struct file_lock *waiter)
491{
492 list_del_init(&waiter->fl_block);
493 list_del_init(&waiter->fl_link);
494 waiter->fl_next = NULL;
495}
496
497/*
498 */
499static void locks_delete_block(struct file_lock *waiter)
500{
501 lock_flocks();
502 __locks_delete_block(waiter);
503 unlock_flocks();
504}
505
506/* Insert waiter into blocker's block list.
507 * We use a circular list so that processes can be easily woken up in
508 * the order they blocked. The documentation doesn't require this but
509 * it seems like the reasonable thing to do.
510 */
511static void locks_insert_block(struct file_lock *blocker,
512 struct file_lock *waiter)
513{
514 BUG_ON(!list_empty(&waiter->fl_block));
515 list_add_tail(&waiter->fl_block, &blocker->fl_block);
516 waiter->fl_next = blocker;
517 if (IS_POSIX(blocker))
518 list_add(&waiter->fl_link, &blocked_list);
519}
520
521/* Wake up processes blocked waiting for blocker.
522 * If told to wait then schedule the processes until the block list
523 * is empty, otherwise empty the block list ourselves.
524 */
525static void locks_wake_up_blocks(struct file_lock *blocker)
526{
527 while (!list_empty(&blocker->fl_block)) {
528 struct file_lock *waiter;
529
530 waiter = list_first_entry(&blocker->fl_block,
531 struct file_lock, fl_block);
532 __locks_delete_block(waiter);
533 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
534 waiter->fl_lmops->lm_notify(waiter);
535 else
536 wake_up(&waiter->fl_wait);
537 }
538}
539
540/* Insert file lock fl into an inode's lock list at the position indicated
541 * by pos. At the same time add the lock to the global file lock list.
542 */
543static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
544{
545 list_add(&fl->fl_link, &file_lock_list);
546
547 fl->fl_nspid = get_pid(task_tgid(current));
548
549 /* insert into file's list */
550 fl->fl_next = *pos;
551 *pos = fl;
552}
553
554/*
555 * Delete a lock and then free it.
556 * Wake up processes that are blocked waiting for this lock,
557 * notify the FS that the lock has been cleared and
558 * finally free the lock.
559 */
560static void locks_delete_lock(struct file_lock **thisfl_p)
561{
562 struct file_lock *fl = *thisfl_p;
563
564 *thisfl_p = fl->fl_next;
565 fl->fl_next = NULL;
566 list_del_init(&fl->fl_link);
567
568 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
569 if (fl->fl_fasync != NULL) {
570 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
571 fl->fl_fasync = NULL;
572 }
573
574 if (fl->fl_nspid) {
575 put_pid(fl->fl_nspid);
576 fl->fl_nspid = NULL;
577 }
578
579 locks_wake_up_blocks(fl);
580 locks_free_lock(fl);
581}
582
583/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
584 * checks for shared/exclusive status of overlapping locks.
585 */
586static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
587{
588 if (sys_fl->fl_type == F_WRLCK)
589 return 1;
590 if (caller_fl->fl_type == F_WRLCK)
591 return 1;
592 return 0;
593}
594
595/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
596 * checking before calling the locks_conflict().
597 */
598static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
599{
600 /* POSIX locks owned by the same process do not conflict with
601 * each other.
602 */
603 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
604 return (0);
605
606 /* Check whether they overlap */
607 if (!locks_overlap(caller_fl, sys_fl))
608 return 0;
609
610 return (locks_conflict(caller_fl, sys_fl));
611}
612
613/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
614 * checking before calling the locks_conflict().
615 */
616static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
617{
618 /* FLOCK locks referring to the same filp do not conflict with
619 * each other.
620 */
621 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
622 return (0);
623 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
624 return 0;
625
626 return (locks_conflict(caller_fl, sys_fl));
627}
628
629void
630posix_test_lock(struct file *filp, struct file_lock *fl)
631{
632 struct file_lock *cfl;
633
634 lock_flocks();
635 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
636 if (!IS_POSIX(cfl))
637 continue;
638 if (posix_locks_conflict(fl, cfl))
639 break;
640 }
641 if (cfl) {
642 __locks_copy_lock(fl, cfl);
643 if (cfl->fl_nspid)
644 fl->fl_pid = pid_vnr(cfl->fl_nspid);
645 } else
646 fl->fl_type = F_UNLCK;
647 unlock_flocks();
648 return;
649}
650EXPORT_SYMBOL(posix_test_lock);
651
652/*
653 * Deadlock detection:
654 *
655 * We attempt to detect deadlocks that are due purely to posix file
656 * locks.
657 *
658 * We assume that a task can be waiting for at most one lock at a time.
659 * So for any acquired lock, the process holding that lock may be
660 * waiting on at most one other lock. That lock in turns may be held by
661 * someone waiting for at most one other lock. Given a requested lock
662 * caller_fl which is about to wait for a conflicting lock block_fl, we
663 * follow this chain of waiters to ensure we are not about to create a
664 * cycle.
665 *
666 * Since we do this before we ever put a process to sleep on a lock, we
667 * are ensured that there is never a cycle; that is what guarantees that
668 * the while() loop in posix_locks_deadlock() eventually completes.
669 *
670 * Note: the above assumption may not be true when handling lock
671 * requests from a broken NFS client. It may also fail in the presence
672 * of tasks (such as posix threads) sharing the same open file table.
673 *
674 * To handle those cases, we just bail out after a few iterations.
675 */
676
677#define MAX_DEADLK_ITERATIONS 10
678
679/* Find a lock that the owner of the given block_fl is blocking on. */
680static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
681{
682 struct file_lock *fl;
683
684 list_for_each_entry(fl, &blocked_list, fl_link) {
685 if (posix_same_owner(fl, block_fl))
686 return fl->fl_next;
687 }
688 return NULL;
689}
690
691static int posix_locks_deadlock(struct file_lock *caller_fl,
692 struct file_lock *block_fl)
693{
694 int i = 0;
695
696 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
697 if (i++ > MAX_DEADLK_ITERATIONS)
698 return 0;
699 if (posix_same_owner(caller_fl, block_fl))
700 return 1;
701 }
702 return 0;
703}
704
705/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
706 * after any leases, but before any posix locks.
707 *
708 * Note that if called with an FL_EXISTS argument, the caller may determine
709 * whether or not a lock was successfully freed by testing the return
710 * value for -ENOENT.
711 */
712static int flock_lock_file(struct file *filp, struct file_lock *request)
713{
714 struct file_lock *new_fl = NULL;
715 struct file_lock **before;
716 struct inode * inode = filp->f_path.dentry->d_inode;
717 int error = 0;
718 int found = 0;
719
720 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
721 new_fl = locks_alloc_lock();
722 if (!new_fl)
723 return -ENOMEM;
724 }
725
726 lock_flocks();
727 if (request->fl_flags & FL_ACCESS)
728 goto find_conflict;
729
730 for_each_lock(inode, before) {
731 struct file_lock *fl = *before;
732 if (IS_POSIX(fl))
733 break;
734 if (IS_LEASE(fl))
735 continue;
736 if (filp != fl->fl_file)
737 continue;
738 if (request->fl_type == fl->fl_type)
739 goto out;
740 found = 1;
741 locks_delete_lock(before);
742 break;
743 }
744
745 if (request->fl_type == F_UNLCK) {
746 if ((request->fl_flags & FL_EXISTS) && !found)
747 error = -ENOENT;
748 goto out;
749 }
750
751 /*
752 * If a higher-priority process was blocked on the old file lock,
753 * give it the opportunity to lock the file.
754 */
755 if (found) {
756 unlock_flocks();
757 cond_resched();
758 lock_flocks();
759 }
760
761find_conflict:
762 for_each_lock(inode, before) {
763 struct file_lock *fl = *before;
764 if (IS_POSIX(fl))
765 break;
766 if (IS_LEASE(fl))
767 continue;
768 if (!flock_locks_conflict(request, fl))
769 continue;
770 error = -EAGAIN;
771 if (!(request->fl_flags & FL_SLEEP))
772 goto out;
773 error = FILE_LOCK_DEFERRED;
774 locks_insert_block(fl, request);
775 goto out;
776 }
777 if (request->fl_flags & FL_ACCESS)
778 goto out;
779 locks_copy_lock(new_fl, request);
780 locks_insert_lock(before, new_fl);
781 new_fl = NULL;
782 error = 0;
783
784out:
785 unlock_flocks();
786 if (new_fl)
787 locks_free_lock(new_fl);
788 return error;
789}
790
791static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
792{
793 struct file_lock *fl;
794 struct file_lock *new_fl = NULL;
795 struct file_lock *new_fl2 = NULL;
796 struct file_lock *left = NULL;
797 struct file_lock *right = NULL;
798 struct file_lock **before;
799 int error, added = 0;
800
801 /*
802 * We may need two file_lock structures for this operation,
803 * so we get them in advance to avoid races.
804 *
805 * In some cases we can be sure, that no new locks will be needed
806 */
807 if (!(request->fl_flags & FL_ACCESS) &&
808 (request->fl_type != F_UNLCK ||
809 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
810 new_fl = locks_alloc_lock();
811 new_fl2 = locks_alloc_lock();
812 }
813
814 lock_flocks();
815 if (request->fl_type != F_UNLCK) {
816 for_each_lock(inode, before) {
817 fl = *before;
818 if (!IS_POSIX(fl))
819 continue;
820 if (!posix_locks_conflict(request, fl))
821 continue;
822 if (conflock)
823 __locks_copy_lock(conflock, fl);
824 error = -EAGAIN;
825 if (!(request->fl_flags & FL_SLEEP))
826 goto out;
827 error = -EDEADLK;
828 if (posix_locks_deadlock(request, fl))
829 goto out;
830 error = FILE_LOCK_DEFERRED;
831 locks_insert_block(fl, request);
832 goto out;
833 }
834 }
835
836 /* If we're just looking for a conflict, we're done. */
837 error = 0;
838 if (request->fl_flags & FL_ACCESS)
839 goto out;
840
841 /*
842 * Find the first old lock with the same owner as the new lock.
843 */
844
845 before = &inode->i_flock;
846
847 /* First skip locks owned by other processes. */
848 while ((fl = *before) && (!IS_POSIX(fl) ||
849 !posix_same_owner(request, fl))) {
850 before = &fl->fl_next;
851 }
852
853 /* Process locks with this owner. */
854 while ((fl = *before) && posix_same_owner(request, fl)) {
855 /* Detect adjacent or overlapping regions (if same lock type)
856 */
857 if (request->fl_type == fl->fl_type) {
858 /* In all comparisons of start vs end, use
859 * "start - 1" rather than "end + 1". If end
860 * is OFFSET_MAX, end + 1 will become negative.
861 */
862 if (fl->fl_end < request->fl_start - 1)
863 goto next_lock;
864 /* If the next lock in the list has entirely bigger
865 * addresses than the new one, insert the lock here.
866 */
867 if (fl->fl_start - 1 > request->fl_end)
868 break;
869
870 /* If we come here, the new and old lock are of the
871 * same type and adjacent or overlapping. Make one
872 * lock yielding from the lower start address of both
873 * locks to the higher end address.
874 */
875 if (fl->fl_start > request->fl_start)
876 fl->fl_start = request->fl_start;
877 else
878 request->fl_start = fl->fl_start;
879 if (fl->fl_end < request->fl_end)
880 fl->fl_end = request->fl_end;
881 else
882 request->fl_end = fl->fl_end;
883 if (added) {
884 locks_delete_lock(before);
885 continue;
886 }
887 request = fl;
888 added = 1;
889 }
890 else {
891 /* Processing for different lock types is a bit
892 * more complex.
893 */
894 if (fl->fl_end < request->fl_start)
895 goto next_lock;
896 if (fl->fl_start > request->fl_end)
897 break;
898 if (request->fl_type == F_UNLCK)
899 added = 1;
900 if (fl->fl_start < request->fl_start)
901 left = fl;
902 /* If the next lock in the list has a higher end
903 * address than the new one, insert the new one here.
904 */
905 if (fl->fl_end > request->fl_end) {
906 right = fl;
907 break;
908 }
909 if (fl->fl_start >= request->fl_start) {
910 /* The new lock completely replaces an old
911 * one (This may happen several times).
912 */
913 if (added) {
914 locks_delete_lock(before);
915 continue;
916 }
917 /* Replace the old lock with the new one.
918 * Wake up anybody waiting for the old one,
919 * as the change in lock type might satisfy
920 * their needs.
921 */
922 locks_wake_up_blocks(fl);
923 fl->fl_start = request->fl_start;
924 fl->fl_end = request->fl_end;
925 fl->fl_type = request->fl_type;
926 locks_release_private(fl);
927 locks_copy_private(fl, request);
928 request = fl;
929 added = 1;
930 }
931 }
932 /* Go on to next lock.
933 */
934 next_lock:
935 before = &fl->fl_next;
936 }
937
938 /*
939 * The above code only modifies existing locks in case of
940 * merging or replacing. If new lock(s) need to be inserted
941 * all modifications are done bellow this, so it's safe yet to
942 * bail out.
943 */
944 error = -ENOLCK; /* "no luck" */
945 if (right && left == right && !new_fl2)
946 goto out;
947
948 error = 0;
949 if (!added) {
950 if (request->fl_type == F_UNLCK) {
951 if (request->fl_flags & FL_EXISTS)
952 error = -ENOENT;
953 goto out;
954 }
955
956 if (!new_fl) {
957 error = -ENOLCK;
958 goto out;
959 }
960 locks_copy_lock(new_fl, request);
961 locks_insert_lock(before, new_fl);
962 new_fl = NULL;
963 }
964 if (right) {
965 if (left == right) {
966 /* The new lock breaks the old one in two pieces,
967 * so we have to use the second new lock.
968 */
969 left = new_fl2;
970 new_fl2 = NULL;
971 locks_copy_lock(left, right);
972 locks_insert_lock(before, left);
973 }
974 right->fl_start = request->fl_end + 1;
975 locks_wake_up_blocks(right);
976 }
977 if (left) {
978 left->fl_end = request->fl_start - 1;
979 locks_wake_up_blocks(left);
980 }
981 out:
982 unlock_flocks();
983 /*
984 * Free any unused locks.
985 */
986 if (new_fl)
987 locks_free_lock(new_fl);
988 if (new_fl2)
989 locks_free_lock(new_fl2);
990 return error;
991}
992
993/**
994 * posix_lock_file - Apply a POSIX-style lock to a file
995 * @filp: The file to apply the lock to
996 * @fl: The lock to be applied
997 * @conflock: Place to return a copy of the conflicting lock, if found.
998 *
999 * Add a POSIX style lock to a file.
1000 * We merge adjacent & overlapping locks whenever possible.
1001 * POSIX locks are sorted by owner task, then by starting address
1002 *
1003 * Note that if called with an FL_EXISTS argument, the caller may determine
1004 * whether or not a lock was successfully freed by testing the return
1005 * value for -ENOENT.
1006 */
1007int posix_lock_file(struct file *filp, struct file_lock *fl,
1008 struct file_lock *conflock)
1009{
1010 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1011}
1012EXPORT_SYMBOL(posix_lock_file);
1013
1014/**
1015 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1016 * @filp: The file to apply the lock to
1017 * @fl: The lock to be applied
1018 *
1019 * Add a POSIX style lock to a file.
1020 * We merge adjacent & overlapping locks whenever possible.
1021 * POSIX locks are sorted by owner task, then by starting address
1022 */
1023int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1024{
1025 int error;
1026 might_sleep ();
1027 for (;;) {
1028 error = posix_lock_file(filp, fl, NULL);
1029 if (error != FILE_LOCK_DEFERRED)
1030 break;
1031 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1032 if (!error)
1033 continue;
1034
1035 locks_delete_block(fl);
1036 break;
1037 }
1038 return error;
1039}
1040EXPORT_SYMBOL(posix_lock_file_wait);
1041
1042/**
1043 * locks_mandatory_locked - Check for an active lock
1044 * @inode: the file to check
1045 *
1046 * Searches the inode's list of locks to find any POSIX locks which conflict.
1047 * This function is called from locks_verify_locked() only.
1048 */
1049int locks_mandatory_locked(struct inode *inode)
1050{
1051 fl_owner_t owner = current->files;
1052 struct file_lock *fl;
1053
1054 /*
1055 * Search the lock list for this inode for any POSIX locks.
1056 */
1057 lock_flocks();
1058 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1059 if (!IS_POSIX(fl))
1060 continue;
1061 if (fl->fl_owner != owner)
1062 break;
1063 }
1064 unlock_flocks();
1065 return fl ? -EAGAIN : 0;
1066}
1067
1068/**
1069 * locks_mandatory_area - Check for a conflicting lock
1070 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1071 * for shared
1072 * @inode: the file to check
1073 * @filp: how the file was opened (if it was)
1074 * @offset: start of area to check
1075 * @count: length of area to check
1076 *
1077 * Searches the inode's list of locks to find any POSIX locks which conflict.
1078 * This function is called from rw_verify_area() and
1079 * locks_verify_truncate().
1080 */
1081int locks_mandatory_area(int read_write, struct inode *inode,
1082 struct file *filp, loff_t offset,
1083 size_t count)
1084{
1085 struct file_lock fl;
1086 int error;
1087
1088 locks_init_lock(&fl);
1089 fl.fl_owner = current->files;
1090 fl.fl_pid = current->tgid;
1091 fl.fl_file = filp;
1092 fl.fl_flags = FL_POSIX | FL_ACCESS;
1093 if (filp && !(filp->f_flags & O_NONBLOCK))
1094 fl.fl_flags |= FL_SLEEP;
1095 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1096 fl.fl_start = offset;
1097 fl.fl_end = offset + count - 1;
1098
1099 for (;;) {
1100 error = __posix_lock_file(inode, &fl, NULL);
1101 if (error != FILE_LOCK_DEFERRED)
1102 break;
1103 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1104 if (!error) {
1105 /*
1106 * If we've been sleeping someone might have
1107 * changed the permissions behind our back.
1108 */
1109 if (__mandatory_lock(inode))
1110 continue;
1111 }
1112
1113 locks_delete_block(&fl);
1114 break;
1115 }
1116
1117 return error;
1118}
1119
1120EXPORT_SYMBOL(locks_mandatory_area);
1121
1122/* We already had a lease on this file; just change its type */
1123int lease_modify(struct file_lock **before, int arg)
1124{
1125 struct file_lock *fl = *before;
1126 int error = assign_type(fl, arg);
1127
1128 if (error)
1129 return error;
1130 locks_wake_up_blocks(fl);
1131 if (arg == F_UNLCK)
1132 locks_delete_lock(before);
1133 return 0;
1134}
1135
1136EXPORT_SYMBOL(lease_modify);
1137
1138static void time_out_leases(struct inode *inode)
1139{
1140 struct file_lock **before;
1141 struct file_lock *fl;
1142
1143 before = &inode->i_flock;
1144 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1145 if ((fl->fl_break_time == 0)
1146 || time_before(jiffies, fl->fl_break_time)) {
1147 before = &fl->fl_next;
1148 continue;
1149 }
1150 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1151 if (fl == *before) /* lease_modify may have freed fl */
1152 before = &fl->fl_next;
1153 }
1154}
1155
1156/**
1157 * __break_lease - revoke all outstanding leases on file
1158 * @inode: the inode of the file to return
1159 * @mode: the open mode (read or write)
1160 *
1161 * break_lease (inlined for speed) has checked there already is at least
1162 * some kind of lock (maybe a lease) on this file. Leases are broken on
1163 * a call to open() or truncate(). This function can sleep unless you
1164 * specified %O_NONBLOCK to your open().
1165 */
1166int __break_lease(struct inode *inode, unsigned int mode)
1167{
1168 int error = 0, future;
1169 struct file_lock *new_fl, *flock;
1170 struct file_lock *fl;
1171 unsigned long break_time;
1172 int i_have_this_lease = 0;
1173 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1174
1175 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1176
1177 lock_flocks();
1178
1179 time_out_leases(inode);
1180
1181 flock = inode->i_flock;
1182 if ((flock == NULL) || !IS_LEASE(flock))
1183 goto out;
1184
1185 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1186 if (fl->fl_owner == current->files)
1187 i_have_this_lease = 1;
1188
1189 if (want_write) {
1190 /* If we want write access, we have to revoke any lease. */
1191 future = F_UNLCK | F_INPROGRESS;
1192 } else if (flock->fl_type & F_INPROGRESS) {
1193 /* If the lease is already being broken, we just leave it */
1194 future = flock->fl_type;
1195 } else if (flock->fl_type & F_WRLCK) {
1196 /* Downgrade the exclusive lease to a read-only lease. */
1197 future = F_RDLCK | F_INPROGRESS;
1198 } else {
1199 /* the existing lease was read-only, so we can read too. */
1200 goto out;
1201 }
1202
1203 if (IS_ERR(new_fl) && !i_have_this_lease
1204 && ((mode & O_NONBLOCK) == 0)) {
1205 error = PTR_ERR(new_fl);
1206 goto out;
1207 }
1208
1209 break_time = 0;
1210 if (lease_break_time > 0) {
1211 break_time = jiffies + lease_break_time * HZ;
1212 if (break_time == 0)
1213 break_time++; /* so that 0 means no break time */
1214 }
1215
1216 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1217 if (fl->fl_type != future) {
1218 fl->fl_type = future;
1219 fl->fl_break_time = break_time;
1220 /* lease must have lmops break callback */
1221 fl->fl_lmops->lm_break(fl);
1222 }
1223 }
1224
1225 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1226 error = -EWOULDBLOCK;
1227 goto out;
1228 }
1229
1230restart:
1231 break_time = flock->fl_break_time;
1232 if (break_time != 0) {
1233 break_time -= jiffies;
1234 if (break_time == 0)
1235 break_time++;
1236 }
1237 locks_insert_block(flock, new_fl);
1238 unlock_flocks();
1239 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1240 !new_fl->fl_next, break_time);
1241 lock_flocks();
1242 __locks_delete_block(new_fl);
1243 if (error >= 0) {
1244 if (error == 0)
1245 time_out_leases(inode);
1246 /* Wait for the next lease that has not been broken yet */
1247 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1248 flock = flock->fl_next) {
1249 if (flock->fl_type & F_INPROGRESS)
1250 goto restart;
1251 }
1252 error = 0;
1253 }
1254
1255out:
1256 unlock_flocks();
1257 if (!IS_ERR(new_fl))
1258 locks_free_lock(new_fl);
1259 return error;
1260}
1261
1262EXPORT_SYMBOL(__break_lease);
1263
1264/**
1265 * lease_get_mtime - get the last modified time of an inode
1266 * @inode: the inode
1267 * @time: pointer to a timespec which will contain the last modified time
1268 *
1269 * This is to force NFS clients to flush their caches for files with
1270 * exclusive leases. The justification is that if someone has an
1271 * exclusive lease, then they could be modifying it.
1272 */
1273void lease_get_mtime(struct inode *inode, struct timespec *time)
1274{
1275 struct file_lock *flock = inode->i_flock;
1276 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1277 *time = current_fs_time(inode->i_sb);
1278 else
1279 *time = inode->i_mtime;
1280}
1281
1282EXPORT_SYMBOL(lease_get_mtime);
1283
1284/**
1285 * fcntl_getlease - Enquire what lease is currently active
1286 * @filp: the file
1287 *
1288 * The value returned by this function will be one of
1289 * (if no lease break is pending):
1290 *
1291 * %F_RDLCK to indicate a shared lease is held.
1292 *
1293 * %F_WRLCK to indicate an exclusive lease is held.
1294 *
1295 * %F_UNLCK to indicate no lease is held.
1296 *
1297 * (if a lease break is pending):
1298 *
1299 * %F_RDLCK to indicate an exclusive lease needs to be
1300 * changed to a shared lease (or removed).
1301 *
1302 * %F_UNLCK to indicate the lease needs to be removed.
1303 *
1304 * XXX: sfr & willy disagree over whether F_INPROGRESS
1305 * should be returned to userspace.
1306 */
1307int fcntl_getlease(struct file *filp)
1308{
1309 struct file_lock *fl;
1310 int type = F_UNLCK;
1311
1312 lock_flocks();
1313 time_out_leases(filp->f_path.dentry->d_inode);
1314 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1315 fl = fl->fl_next) {
1316 if (fl->fl_file == filp) {
1317 type = fl->fl_type & ~F_INPROGRESS;
1318 break;
1319 }
1320 }
1321 unlock_flocks();
1322 return type;
1323}
1324
1325/**
1326 * generic_setlease - sets a lease on an open file
1327 * @filp: file pointer
1328 * @arg: type of lease to obtain
1329 * @flp: input - file_lock to use, output - file_lock inserted
1330 *
1331 * The (input) flp->fl_lmops->lm_break function is required
1332 * by break_lease().
1333 *
1334 * Called with file_lock_lock held.
1335 */
1336int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1337{
1338 struct file_lock *fl, **before, **my_before = NULL, *lease;
1339 struct dentry *dentry = filp->f_path.dentry;
1340 struct inode *inode = dentry->d_inode;
1341 int error, rdlease_count = 0, wrlease_count = 0;
1342
1343 lease = *flp;
1344
1345 error = -EACCES;
1346 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1347 goto out;
1348 error = -EINVAL;
1349 if (!S_ISREG(inode->i_mode))
1350 goto out;
1351 error = security_file_lock(filp, arg);
1352 if (error)
1353 goto out;
1354
1355 time_out_leases(inode);
1356
1357 BUG_ON(!(*flp)->fl_lmops->lm_break);
1358
1359 if (arg != F_UNLCK) {
1360 error = -EAGAIN;
1361 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1362 goto out;
1363 if ((arg == F_WRLCK)
1364 && ((dentry->d_count > 1)
1365 || (atomic_read(&inode->i_count) > 1)))
1366 goto out;
1367 }
1368
1369 /*
1370 * At this point, we know that if there is an exclusive
1371 * lease on this file, then we hold it on this filp
1372 * (otherwise our open of this file would have blocked).
1373 * And if we are trying to acquire an exclusive lease,
1374 * then the file is not open by anyone (including us)
1375 * except for this filp.
1376 */
1377 for (before = &inode->i_flock;
1378 ((fl = *before) != NULL) && IS_LEASE(fl);
1379 before = &fl->fl_next) {
1380 if (fl->fl_file == filp)
1381 my_before = before;
1382 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1383 /*
1384 * Someone is in the process of opening this
1385 * file for writing so we may not take an
1386 * exclusive lease on it.
1387 */
1388 wrlease_count++;
1389 else
1390 rdlease_count++;
1391 }
1392
1393 error = -EAGAIN;
1394 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1395 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1396 goto out;
1397
1398 if (my_before != NULL) {
1399 error = lease->fl_lmops->lm_change(my_before, arg);
1400 if (!error)
1401 *flp = *my_before;
1402 goto out;
1403 }
1404
1405 if (arg == F_UNLCK)
1406 goto out;
1407
1408 error = -EINVAL;
1409 if (!leases_enable)
1410 goto out;
1411
1412 locks_insert_lock(before, lease);
1413 return 0;
1414
1415out:
1416 return error;
1417}
1418EXPORT_SYMBOL(generic_setlease);
1419
1420static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1421{
1422 if (filp->f_op && filp->f_op->setlease)
1423 return filp->f_op->setlease(filp, arg, lease);
1424 else
1425 return generic_setlease(filp, arg, lease);
1426}
1427
1428/**
1429 * vfs_setlease - sets a lease on an open file
1430 * @filp: file pointer
1431 * @arg: type of lease to obtain
1432 * @lease: file_lock to use
1433 *
1434 * Call this to establish a lease on the file.
1435 * The (*lease)->fl_lmops->lm_break operation must be set; if not,
1436 * break_lease will oops!
1437 *
1438 * This will call the filesystem's setlease file method, if
1439 * defined. Note that there is no getlease method; instead, the
1440 * filesystem setlease method should call back to setlease() to
1441 * add a lease to the inode's lease list, where fcntl_getlease() can
1442 * find it. Since fcntl_getlease() only reports whether the current
1443 * task holds a lease, a cluster filesystem need only do this for
1444 * leases held by processes on this node.
1445 *
1446 * There is also no break_lease method; filesystems that
1447 * handle their own leases should break leases themselves from the
1448 * filesystem's open, create, and (on truncate) setattr methods.
1449 *
1450 * Warning: the only current setlease methods exist only to disable
1451 * leases in certain cases. More vfs changes may be required to
1452 * allow a full filesystem lease implementation.
1453 */
1454
1455int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1456{
1457 int error;
1458
1459 lock_flocks();
1460 error = __vfs_setlease(filp, arg, lease);
1461 unlock_flocks();
1462
1463 return error;
1464}
1465EXPORT_SYMBOL_GPL(vfs_setlease);
1466
1467static int do_fcntl_delete_lease(struct file *filp)
1468{
1469 struct file_lock fl, *flp = &fl;
1470
1471 lease_init(filp, F_UNLCK, flp);
1472
1473 return vfs_setlease(filp, F_UNLCK, &flp);
1474}
1475
1476static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1477{
1478 struct file_lock *fl, *ret;
1479 struct fasync_struct *new;
1480 int error;
1481
1482 fl = lease_alloc(filp, arg);
1483 if (IS_ERR(fl))
1484 return PTR_ERR(fl);
1485
1486 new = fasync_alloc();
1487 if (!new) {
1488 locks_free_lock(fl);
1489 return -ENOMEM;
1490 }
1491 ret = fl;
1492 lock_flocks();
1493 error = __vfs_setlease(filp, arg, &ret);
1494 if (error) {
1495 unlock_flocks();
1496 locks_free_lock(fl);
1497 goto out_free_fasync;
1498 }
1499 if (ret != fl)
1500 locks_free_lock(fl);
1501
1502 /*
1503 * fasync_insert_entry() returns the old entry if any.
1504 * If there was no old entry, then it used 'new' and
1505 * inserted it into the fasync list. Clear new so that
1506 * we don't release it here.
1507 */
1508 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1509 new = NULL;
1510
1511 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1512 unlock_flocks();
1513
1514out_free_fasync:
1515 if (new)
1516 fasync_free(new);
1517 return error;
1518}
1519
1520/**
1521 * fcntl_setlease - sets a lease on an open file
1522 * @fd: open file descriptor
1523 * @filp: file pointer
1524 * @arg: type of lease to obtain
1525 *
1526 * Call this fcntl to establish a lease on the file.
1527 * Note that you also need to call %F_SETSIG to
1528 * receive a signal when the lease is broken.
1529 */
1530int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1531{
1532 if (arg == F_UNLCK)
1533 return do_fcntl_delete_lease(filp);
1534 return do_fcntl_add_lease(fd, filp, arg);
1535}
1536
1537/**
1538 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1539 * @filp: The file to apply the lock to
1540 * @fl: The lock to be applied
1541 *
1542 * Add a FLOCK style lock to a file.
1543 */
1544int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1545{
1546 int error;
1547 might_sleep();
1548 for (;;) {
1549 error = flock_lock_file(filp, fl);
1550 if (error != FILE_LOCK_DEFERRED)
1551 break;
1552 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1553 if (!error)
1554 continue;
1555
1556 locks_delete_block(fl);
1557 break;
1558 }
1559 return error;
1560}
1561
1562EXPORT_SYMBOL(flock_lock_file_wait);
1563
1564/**
1565 * sys_flock: - flock() system call.
1566 * @fd: the file descriptor to lock.
1567 * @cmd: the type of lock to apply.
1568 *
1569 * Apply a %FL_FLOCK style lock to an open file descriptor.
1570 * The @cmd can be one of
1571 *
1572 * %LOCK_SH -- a shared lock.
1573 *
1574 * %LOCK_EX -- an exclusive lock.
1575 *
1576 * %LOCK_UN -- remove an existing lock.
1577 *
1578 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1579 *
1580 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1581 * processes read and write access respectively.
1582 */
1583SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1584{
1585 struct file *filp;
1586 struct file_lock *lock;
1587 int can_sleep, unlock;
1588 int error;
1589
1590 error = -EBADF;
1591 filp = fget(fd);
1592 if (!filp)
1593 goto out;
1594
1595 can_sleep = !(cmd & LOCK_NB);
1596 cmd &= ~LOCK_NB;
1597 unlock = (cmd == LOCK_UN);
1598
1599 if (!unlock && !(cmd & LOCK_MAND) &&
1600 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1601 goto out_putf;
1602
1603 error = flock_make_lock(filp, &lock, cmd);
1604 if (error)
1605 goto out_putf;
1606 if (can_sleep)
1607 lock->fl_flags |= FL_SLEEP;
1608
1609 error = security_file_lock(filp, lock->fl_type);
1610 if (error)
1611 goto out_free;
1612
1613 if (filp->f_op && filp->f_op->flock)
1614 error = filp->f_op->flock(filp,
1615 (can_sleep) ? F_SETLKW : F_SETLK,
1616 lock);
1617 else
1618 error = flock_lock_file_wait(filp, lock);
1619
1620 out_free:
1621 locks_free_lock(lock);
1622
1623 out_putf:
1624 fput(filp);
1625 out:
1626 return error;
1627}
1628
1629/**
1630 * vfs_test_lock - test file byte range lock
1631 * @filp: The file to test lock for
1632 * @fl: The lock to test; also used to hold result
1633 *
1634 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
1635 * setting conf->fl_type to something other than F_UNLCK.
1636 */
1637int vfs_test_lock(struct file *filp, struct file_lock *fl)
1638{
1639 if (filp->f_op && filp->f_op->lock)
1640 return filp->f_op->lock(filp, F_GETLK, fl);
1641 posix_test_lock(filp, fl);
1642 return 0;
1643}
1644EXPORT_SYMBOL_GPL(vfs_test_lock);
1645
1646static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1647{
1648 flock->l_pid = fl->fl_pid;
1649#if BITS_PER_LONG == 32
1650 /*
1651 * Make sure we can represent the posix lock via
1652 * legacy 32bit flock.
1653 */
1654 if (fl->fl_start > OFFT_OFFSET_MAX)
1655 return -EOVERFLOW;
1656 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1657 return -EOVERFLOW;
1658#endif
1659 flock->l_start = fl->fl_start;
1660 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1661 fl->fl_end - fl->fl_start + 1;
1662 flock->l_whence = 0;
1663 flock->l_type = fl->fl_type;
1664 return 0;
1665}
1666
1667#if BITS_PER_LONG == 32
1668static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1669{
1670 flock->l_pid = fl->fl_pid;
1671 flock->l_start = fl->fl_start;
1672 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1673 fl->fl_end - fl->fl_start + 1;
1674 flock->l_whence = 0;
1675 flock->l_type = fl->fl_type;
1676}
1677#endif
1678
1679/* Report the first existing lock that would conflict with l.
1680 * This implements the F_GETLK command of fcntl().
1681 */
1682int fcntl_getlk(struct file *filp, struct flock __user *l)
1683{
1684 struct file_lock file_lock;
1685 struct flock flock;
1686 int error;
1687
1688 error = -EFAULT;
1689 if (copy_from_user(&flock, l, sizeof(flock)))
1690 goto out;
1691 error = -EINVAL;
1692 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1693 goto out;
1694
1695 error = flock_to_posix_lock(filp, &file_lock, &flock);
1696 if (error)
1697 goto out;
1698
1699 error = vfs_test_lock(filp, &file_lock);
1700 if (error)
1701 goto out;
1702
1703 flock.l_type = file_lock.fl_type;
1704 if (file_lock.fl_type != F_UNLCK) {
1705 error = posix_lock_to_flock(&flock, &file_lock);
1706 if (error)
1707 goto out;
1708 }
1709 error = -EFAULT;
1710 if (!copy_to_user(l, &flock, sizeof(flock)))
1711 error = 0;
1712out:
1713 return error;
1714}
1715
1716/**
1717 * vfs_lock_file - file byte range lock
1718 * @filp: The file to apply the lock to
1719 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1720 * @fl: The lock to be applied
1721 * @conf: Place to return a copy of the conflicting lock, if found.
1722 *
1723 * A caller that doesn't care about the conflicting lock may pass NULL
1724 * as the final argument.
1725 *
1726 * If the filesystem defines a private ->lock() method, then @conf will
1727 * be left unchanged; so a caller that cares should initialize it to
1728 * some acceptable default.
1729 *
1730 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1731 * locks, the ->lock() interface may return asynchronously, before the lock has
1732 * been granted or denied by the underlying filesystem, if (and only if)
1733 * lm_grant is set. Callers expecting ->lock() to return asynchronously
1734 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1735 * the request is for a blocking lock. When ->lock() does return asynchronously,
1736 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1737 * request completes.
1738 * If the request is for non-blocking lock the file system should return
1739 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1740 * with the result. If the request timed out the callback routine will return a
1741 * nonzero return code and the file system should release the lock. The file
1742 * system is also responsible to keep a corresponding posix lock when it
1743 * grants a lock so the VFS can find out which locks are locally held and do
1744 * the correct lock cleanup when required.
1745 * The underlying filesystem must not drop the kernel lock or call
1746 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1747 * return code.
1748 */
1749int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1750{
1751 if (filp->f_op && filp->f_op->lock)
1752 return filp->f_op->lock(filp, cmd, fl);
1753 else
1754 return posix_lock_file(filp, fl, conf);
1755}
1756EXPORT_SYMBOL_GPL(vfs_lock_file);
1757
1758static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1759 struct file_lock *fl)
1760{
1761 int error;
1762
1763 error = security_file_lock(filp, fl->fl_type);
1764 if (error)
1765 return error;
1766
1767 for (;;) {
1768 error = vfs_lock_file(filp, cmd, fl, NULL);
1769 if (error != FILE_LOCK_DEFERRED)
1770 break;
1771 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1772 if (!error)
1773 continue;
1774
1775 locks_delete_block(fl);
1776 break;
1777 }
1778
1779 return error;
1780}
1781
1782/* Apply the lock described by l to an open file descriptor.
1783 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1784 */
1785int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1786 struct flock __user *l)
1787{
1788 struct file_lock *file_lock = locks_alloc_lock();
1789 struct flock flock;
1790 struct inode *inode;
1791 struct file *f;
1792 int error;
1793
1794 if (file_lock == NULL)
1795 return -ENOLCK;
1796
1797 /*
1798 * This might block, so we do it before checking the inode.
1799 */
1800 error = -EFAULT;
1801 if (copy_from_user(&flock, l, sizeof(flock)))
1802 goto out;
1803
1804 inode = filp->f_path.dentry->d_inode;
1805
1806 /* Don't allow mandatory locks on files that may be memory mapped
1807 * and shared.
1808 */
1809 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1810 error = -EAGAIN;
1811 goto out;
1812 }
1813
1814again:
1815 error = flock_to_posix_lock(filp, file_lock, &flock);
1816 if (error)
1817 goto out;
1818 if (cmd == F_SETLKW) {
1819 file_lock->fl_flags |= FL_SLEEP;
1820 }
1821
1822 error = -EBADF;
1823 switch (flock.l_type) {
1824 case F_RDLCK:
1825 if (!(filp->f_mode & FMODE_READ))
1826 goto out;
1827 break;
1828 case F_WRLCK:
1829 if (!(filp->f_mode & FMODE_WRITE))
1830 goto out;
1831 break;
1832 case F_UNLCK:
1833 break;
1834 default:
1835 error = -EINVAL;
1836 goto out;
1837 }
1838
1839 error = do_lock_file_wait(filp, cmd, file_lock);
1840
1841 /*
1842 * Attempt to detect a close/fcntl race and recover by
1843 * releasing the lock that was just acquired.
1844 */
1845 /*
1846 * we need that spin_lock here - it prevents reordering between
1847 * update of inode->i_flock and check for it done in close().
1848 * rcu_read_lock() wouldn't do.
1849 */
1850 spin_lock(¤t->files->file_lock);
1851 f = fcheck(fd);
1852 spin_unlock(¤t->files->file_lock);
1853 if (!error && f != filp && flock.l_type != F_UNLCK) {
1854 flock.l_type = F_UNLCK;
1855 goto again;
1856 }
1857
1858out:
1859 locks_free_lock(file_lock);
1860 return error;
1861}
1862
1863#if BITS_PER_LONG == 32
1864/* Report the first existing lock that would conflict with l.
1865 * This implements the F_GETLK command of fcntl().
1866 */
1867int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1868{
1869 struct file_lock file_lock;
1870 struct flock64 flock;
1871 int error;
1872
1873 error = -EFAULT;
1874 if (copy_from_user(&flock, l, sizeof(flock)))
1875 goto out;
1876 error = -EINVAL;
1877 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1878 goto out;
1879
1880 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1881 if (error)
1882 goto out;
1883
1884 error = vfs_test_lock(filp, &file_lock);
1885 if (error)
1886 goto out;
1887
1888 flock.l_type = file_lock.fl_type;
1889 if (file_lock.fl_type != F_UNLCK)
1890 posix_lock_to_flock64(&flock, &file_lock);
1891
1892 error = -EFAULT;
1893 if (!copy_to_user(l, &flock, sizeof(flock)))
1894 error = 0;
1895
1896out:
1897 return error;
1898}
1899
1900/* Apply the lock described by l to an open file descriptor.
1901 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1902 */
1903int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1904 struct flock64 __user *l)
1905{
1906 struct file_lock *file_lock = locks_alloc_lock();
1907 struct flock64 flock;
1908 struct inode *inode;
1909 struct file *f;
1910 int error;
1911
1912 if (file_lock == NULL)
1913 return -ENOLCK;
1914
1915 /*
1916 * This might block, so we do it before checking the inode.
1917 */
1918 error = -EFAULT;
1919 if (copy_from_user(&flock, l, sizeof(flock)))
1920 goto out;
1921
1922 inode = filp->f_path.dentry->d_inode;
1923
1924 /* Don't allow mandatory locks on files that may be memory mapped
1925 * and shared.
1926 */
1927 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1928 error = -EAGAIN;
1929 goto out;
1930 }
1931
1932again:
1933 error = flock64_to_posix_lock(filp, file_lock, &flock);
1934 if (error)
1935 goto out;
1936 if (cmd == F_SETLKW64) {
1937 file_lock->fl_flags |= FL_SLEEP;
1938 }
1939
1940 error = -EBADF;
1941 switch (flock.l_type) {
1942 case F_RDLCK:
1943 if (!(filp->f_mode & FMODE_READ))
1944 goto out;
1945 break;
1946 case F_WRLCK:
1947 if (!(filp->f_mode & FMODE_WRITE))
1948 goto out;
1949 break;
1950 case F_UNLCK:
1951 break;
1952 default:
1953 error = -EINVAL;
1954 goto out;
1955 }
1956
1957 error = do_lock_file_wait(filp, cmd, file_lock);
1958
1959 /*
1960 * Attempt to detect a close/fcntl race and recover by
1961 * releasing the lock that was just acquired.
1962 */
1963 spin_lock(¤t->files->file_lock);
1964 f = fcheck(fd);
1965 spin_unlock(¤t->files->file_lock);
1966 if (!error && f != filp && flock.l_type != F_UNLCK) {
1967 flock.l_type = F_UNLCK;
1968 goto again;
1969 }
1970
1971out:
1972 locks_free_lock(file_lock);
1973 return error;
1974}
1975#endif /* BITS_PER_LONG == 32 */
1976
1977/*
1978 * This function is called when the file is being removed
1979 * from the task's fd array. POSIX locks belonging to this task
1980 * are deleted at this time.
1981 */
1982void locks_remove_posix(struct file *filp, fl_owner_t owner)
1983{
1984 struct file_lock lock;
1985
1986 /*
1987 * If there are no locks held on this file, we don't need to call
1988 * posix_lock_file(). Another process could be setting a lock on this
1989 * file at the same time, but we wouldn't remove that lock anyway.
1990 */
1991 if (!filp->f_path.dentry->d_inode->i_flock)
1992 return;
1993
1994 lock.fl_type = F_UNLCK;
1995 lock.fl_flags = FL_POSIX | FL_CLOSE;
1996 lock.fl_start = 0;
1997 lock.fl_end = OFFSET_MAX;
1998 lock.fl_owner = owner;
1999 lock.fl_pid = current->tgid;
2000 lock.fl_file = filp;
2001 lock.fl_ops = NULL;
2002 lock.fl_lmops = NULL;
2003
2004 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2005
2006 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2007 lock.fl_ops->fl_release_private(&lock);
2008}
2009
2010EXPORT_SYMBOL(locks_remove_posix);
2011
2012/*
2013 * This function is called on the last close of an open file.
2014 */
2015void locks_remove_flock(struct file *filp)
2016{
2017 struct inode * inode = filp->f_path.dentry->d_inode;
2018 struct file_lock *fl;
2019 struct file_lock **before;
2020
2021 if (!inode->i_flock)
2022 return;
2023
2024 if (filp->f_op && filp->f_op->flock) {
2025 struct file_lock fl = {
2026 .fl_pid = current->tgid,
2027 .fl_file = filp,
2028 .fl_flags = FL_FLOCK,
2029 .fl_type = F_UNLCK,
2030 .fl_end = OFFSET_MAX,
2031 };
2032 filp->f_op->flock(filp, F_SETLKW, &fl);
2033 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2034 fl.fl_ops->fl_release_private(&fl);
2035 }
2036
2037 lock_flocks();
2038 before = &inode->i_flock;
2039
2040 while ((fl = *before) != NULL) {
2041 if (fl->fl_file == filp) {
2042 if (IS_FLOCK(fl)) {
2043 locks_delete_lock(before);
2044 continue;
2045 }
2046 if (IS_LEASE(fl)) {
2047 lease_modify(before, F_UNLCK);
2048 continue;
2049 }
2050 /* What? */
2051 BUG();
2052 }
2053 before = &fl->fl_next;
2054 }
2055 unlock_flocks();
2056}
2057
2058/**
2059 * posix_unblock_lock - stop waiting for a file lock
2060 * @filp: how the file was opened
2061 * @waiter: the lock which was waiting
2062 *
2063 * lockd needs to block waiting for locks.
2064 */
2065int
2066posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2067{
2068 int status = 0;
2069
2070 lock_flocks();
2071 if (waiter->fl_next)
2072 __locks_delete_block(waiter);
2073 else
2074 status = -ENOENT;
2075 unlock_flocks();
2076 return status;
2077}
2078
2079EXPORT_SYMBOL(posix_unblock_lock);
2080
2081/**
2082 * vfs_cancel_lock - file byte range unblock lock
2083 * @filp: The file to apply the unblock to
2084 * @fl: The lock to be unblocked
2085 *
2086 * Used by lock managers to cancel blocked requests
2087 */
2088int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2089{
2090 if (filp->f_op && filp->f_op->lock)
2091 return filp->f_op->lock(filp, F_CANCELLK, fl);
2092 return 0;
2093}
2094
2095EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2096
2097#ifdef CONFIG_PROC_FS
2098#include <linux/proc_fs.h>
2099#include <linux/seq_file.h>
2100
2101static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2102 loff_t id, char *pfx)
2103{
2104 struct inode *inode = NULL;
2105 unsigned int fl_pid;
2106
2107 if (fl->fl_nspid)
2108 fl_pid = pid_vnr(fl->fl_nspid);
2109 else
2110 fl_pid = fl->fl_pid;
2111
2112 if (fl->fl_file != NULL)
2113 inode = fl->fl_file->f_path.dentry->d_inode;
2114
2115 seq_printf(f, "%lld:%s ", id, pfx);
2116 if (IS_POSIX(fl)) {
2117 seq_printf(f, "%6s %s ",
2118 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2119 (inode == NULL) ? "*NOINODE*" :
2120 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2121 } else if (IS_FLOCK(fl)) {
2122 if (fl->fl_type & LOCK_MAND) {
2123 seq_printf(f, "FLOCK MSNFS ");
2124 } else {
2125 seq_printf(f, "FLOCK ADVISORY ");
2126 }
2127 } else if (IS_LEASE(fl)) {
2128 seq_printf(f, "LEASE ");
2129 if (fl->fl_type & F_INPROGRESS)
2130 seq_printf(f, "BREAKING ");
2131 else if (fl->fl_file)
2132 seq_printf(f, "ACTIVE ");
2133 else
2134 seq_printf(f, "BREAKER ");
2135 } else {
2136 seq_printf(f, "UNKNOWN UNKNOWN ");
2137 }
2138 if (fl->fl_type & LOCK_MAND) {
2139 seq_printf(f, "%s ",
2140 (fl->fl_type & LOCK_READ)
2141 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2142 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2143 } else {
2144 seq_printf(f, "%s ",
2145 (fl->fl_type & F_INPROGRESS)
2146 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2147 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2148 }
2149 if (inode) {
2150#ifdef WE_CAN_BREAK_LSLK_NOW
2151 seq_printf(f, "%d %s:%ld ", fl_pid,
2152 inode->i_sb->s_id, inode->i_ino);
2153#else
2154 /* userspace relies on this representation of dev_t ;-( */
2155 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2156 MAJOR(inode->i_sb->s_dev),
2157 MINOR(inode->i_sb->s_dev), inode->i_ino);
2158#endif
2159 } else {
2160 seq_printf(f, "%d <none>:0 ", fl_pid);
2161 }
2162 if (IS_POSIX(fl)) {
2163 if (fl->fl_end == OFFSET_MAX)
2164 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2165 else
2166 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2167 } else {
2168 seq_printf(f, "0 EOF\n");
2169 }
2170}
2171
2172static int locks_show(struct seq_file *f, void *v)
2173{
2174 struct file_lock *fl, *bfl;
2175
2176 fl = list_entry(v, struct file_lock, fl_link);
2177
2178 lock_get_status(f, fl, *((loff_t *)f->private), "");
2179
2180 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2181 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2182
2183 return 0;
2184}
2185
2186static void *locks_start(struct seq_file *f, loff_t *pos)
2187{
2188 loff_t *p = f->private;
2189
2190 lock_flocks();
2191 *p = (*pos + 1);
2192 return seq_list_start(&file_lock_list, *pos);
2193}
2194
2195static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2196{
2197 loff_t *p = f->private;
2198 ++*p;
2199 return seq_list_next(v, &file_lock_list, pos);
2200}
2201
2202static void locks_stop(struct seq_file *f, void *v)
2203{
2204 unlock_flocks();
2205}
2206
2207static const struct seq_operations locks_seq_operations = {
2208 .start = locks_start,
2209 .next = locks_next,
2210 .stop = locks_stop,
2211 .show = locks_show,
2212};
2213
2214static int locks_open(struct inode *inode, struct file *filp)
2215{
2216 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2217}
2218
2219static const struct file_operations proc_locks_operations = {
2220 .open = locks_open,
2221 .read = seq_read,
2222 .llseek = seq_lseek,
2223 .release = seq_release_private,
2224};
2225
2226static int __init proc_locks_init(void)
2227{
2228 proc_create("locks", 0, NULL, &proc_locks_operations);
2229 return 0;
2230}
2231module_init(proc_locks_init);
2232#endif
2233
2234/**
2235 * lock_may_read - checks that the region is free of locks
2236 * @inode: the inode that is being read
2237 * @start: the first byte to read
2238 * @len: the number of bytes to read
2239 *
2240 * Emulates Windows locking requirements. Whole-file
2241 * mandatory locks (share modes) can prohibit a read and
2242 * byte-range POSIX locks can prohibit a read if they overlap.
2243 *
2244 * N.B. this function is only ever called
2245 * from knfsd and ownership of locks is never checked.
2246 */
2247int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2248{
2249 struct file_lock *fl;
2250 int result = 1;
2251 lock_flocks();
2252 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2253 if (IS_POSIX(fl)) {
2254 if (fl->fl_type == F_RDLCK)
2255 continue;
2256 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2257 continue;
2258 } else if (IS_FLOCK(fl)) {
2259 if (!(fl->fl_type & LOCK_MAND))
2260 continue;
2261 if (fl->fl_type & LOCK_READ)
2262 continue;
2263 } else
2264 continue;
2265 result = 0;
2266 break;
2267 }
2268 unlock_flocks();
2269 return result;
2270}
2271
2272EXPORT_SYMBOL(lock_may_read);
2273
2274/**
2275 * lock_may_write - checks that the region is free of locks
2276 * @inode: the inode that is being written
2277 * @start: the first byte to write
2278 * @len: the number of bytes to write
2279 *
2280 * Emulates Windows locking requirements. Whole-file
2281 * mandatory locks (share modes) can prohibit a write and
2282 * byte-range POSIX locks can prohibit a write if they overlap.
2283 *
2284 * N.B. this function is only ever called
2285 * from knfsd and ownership of locks is never checked.
2286 */
2287int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2288{
2289 struct file_lock *fl;
2290 int result = 1;
2291 lock_flocks();
2292 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2293 if (IS_POSIX(fl)) {
2294 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2295 continue;
2296 } else if (IS_FLOCK(fl)) {
2297 if (!(fl->fl_type & LOCK_MAND))
2298 continue;
2299 if (fl->fl_type & LOCK_WRITE)
2300 continue;
2301 } else
2302 continue;
2303 result = 0;
2304 break;
2305 }
2306 unlock_flocks();
2307 return result;
2308}
2309
2310EXPORT_SYMBOL(lock_may_write);
2311
2312static int __init filelock_init(void)
2313{
2314 filelock_cache = kmem_cache_create("file_lock_cache",
2315 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2316
2317 return 0;
2318}
2319
2320core_initcall(filelock_init);
1/*
2 * linux/fs/locks.c
3 *
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
6 *
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
11 *
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
14 *
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
18 *
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
26 *
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
30 *
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
32 *
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
35 *
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
39 *
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
45 *
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
51 *
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
55 *
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
60 *
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/filesystems/mandatory-locking.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
65 *
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69 * Manual, Section 2.
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
71 *
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
74 *
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
78 *
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
84 *
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
88 *
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
93 *
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
96 *
97 * Made mandatory locking a mount option. Default is not to allow mandatory
98 * locking.
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
100 *
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
103 *
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
106 *
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
111 *
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
115 */
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136static bool lease_breaking(struct file_lock *fl)
137{
138 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139}
140
141static int target_leasetype(struct file_lock *fl)
142{
143 if (fl->fl_flags & FL_UNLOCK_PENDING)
144 return F_UNLCK;
145 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 return F_RDLCK;
147 return fl->fl_type;
148}
149
150int leases_enable = 1;
151int lease_break_time = 45;
152
153#define for_each_lock(inode, lockp) \
154 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155
156static LIST_HEAD(file_lock_list);
157static LIST_HEAD(blocked_list);
158static DEFINE_SPINLOCK(file_lock_lock);
159
160/*
161 * Protects the two list heads above, plus the inode->i_flock list
162 */
163void lock_flocks(void)
164{
165 spin_lock(&file_lock_lock);
166}
167EXPORT_SYMBOL_GPL(lock_flocks);
168
169void unlock_flocks(void)
170{
171 spin_unlock(&file_lock_lock);
172}
173EXPORT_SYMBOL_GPL(unlock_flocks);
174
175static struct kmem_cache *filelock_cache __read_mostly;
176
177static void locks_init_lock_heads(struct file_lock *fl)
178{
179 INIT_LIST_HEAD(&fl->fl_link);
180 INIT_LIST_HEAD(&fl->fl_block);
181 init_waitqueue_head(&fl->fl_wait);
182}
183
184/* Allocate an empty lock structure. */
185struct file_lock *locks_alloc_lock(void)
186{
187 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188
189 if (fl)
190 locks_init_lock_heads(fl);
191
192 return fl;
193}
194EXPORT_SYMBOL_GPL(locks_alloc_lock);
195
196void locks_release_private(struct file_lock *fl)
197{
198 if (fl->fl_ops) {
199 if (fl->fl_ops->fl_release_private)
200 fl->fl_ops->fl_release_private(fl);
201 fl->fl_ops = NULL;
202 }
203 if (fl->fl_lmops) {
204 if (fl->fl_lmops->lm_release_private)
205 fl->fl_lmops->lm_release_private(fl);
206 fl->fl_lmops = NULL;
207 }
208
209}
210EXPORT_SYMBOL_GPL(locks_release_private);
211
212/* Free a lock which is not in use. */
213void locks_free_lock(struct file_lock *fl)
214{
215 BUG_ON(waitqueue_active(&fl->fl_wait));
216 BUG_ON(!list_empty(&fl->fl_block));
217 BUG_ON(!list_empty(&fl->fl_link));
218
219 locks_release_private(fl);
220 kmem_cache_free(filelock_cache, fl);
221}
222EXPORT_SYMBOL(locks_free_lock);
223
224void locks_init_lock(struct file_lock *fl)
225{
226 memset(fl, 0, sizeof(struct file_lock));
227 locks_init_lock_heads(fl);
228}
229
230EXPORT_SYMBOL(locks_init_lock);
231
232static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
233{
234 if (fl->fl_ops) {
235 if (fl->fl_ops->fl_copy_lock)
236 fl->fl_ops->fl_copy_lock(new, fl);
237 new->fl_ops = fl->fl_ops;
238 }
239 if (fl->fl_lmops)
240 new->fl_lmops = fl->fl_lmops;
241}
242
243/*
244 * Initialize a new lock from an existing file_lock structure.
245 */
246void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
247{
248 new->fl_owner = fl->fl_owner;
249 new->fl_pid = fl->fl_pid;
250 new->fl_file = NULL;
251 new->fl_flags = fl->fl_flags;
252 new->fl_type = fl->fl_type;
253 new->fl_start = fl->fl_start;
254 new->fl_end = fl->fl_end;
255 new->fl_ops = NULL;
256 new->fl_lmops = NULL;
257}
258EXPORT_SYMBOL(__locks_copy_lock);
259
260void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
261{
262 locks_release_private(new);
263
264 __locks_copy_lock(new, fl);
265 new->fl_file = fl->fl_file;
266 new->fl_ops = fl->fl_ops;
267 new->fl_lmops = fl->fl_lmops;
268
269 locks_copy_private(new, fl);
270}
271
272EXPORT_SYMBOL(locks_copy_lock);
273
274static inline int flock_translate_cmd(int cmd) {
275 if (cmd & LOCK_MAND)
276 return cmd & (LOCK_MAND | LOCK_RW);
277 switch (cmd) {
278 case LOCK_SH:
279 return F_RDLCK;
280 case LOCK_EX:
281 return F_WRLCK;
282 case LOCK_UN:
283 return F_UNLCK;
284 }
285 return -EINVAL;
286}
287
288/* Fill in a file_lock structure with an appropriate FLOCK lock. */
289static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 unsigned int cmd)
291{
292 struct file_lock *fl;
293 int type = flock_translate_cmd(cmd);
294 if (type < 0)
295 return type;
296
297 fl = locks_alloc_lock();
298 if (fl == NULL)
299 return -ENOMEM;
300
301 fl->fl_file = filp;
302 fl->fl_pid = current->tgid;
303 fl->fl_flags = FL_FLOCK;
304 fl->fl_type = type;
305 fl->fl_end = OFFSET_MAX;
306
307 *lock = fl;
308 return 0;
309}
310
311static int assign_type(struct file_lock *fl, long type)
312{
313 switch (type) {
314 case F_RDLCK:
315 case F_WRLCK:
316 case F_UNLCK:
317 fl->fl_type = type;
318 break;
319 default:
320 return -EINVAL;
321 }
322 return 0;
323}
324
325/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
326 * style lock.
327 */
328static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 struct flock *l)
330{
331 off_t start, end;
332
333 switch (l->l_whence) {
334 case SEEK_SET:
335 start = 0;
336 break;
337 case SEEK_CUR:
338 start = filp->f_pos;
339 break;
340 case SEEK_END:
341 start = i_size_read(filp->f_path.dentry->d_inode);
342 break;
343 default:
344 return -EINVAL;
345 }
346
347 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
348 POSIX-2001 defines it. */
349 start += l->l_start;
350 if (start < 0)
351 return -EINVAL;
352 fl->fl_end = OFFSET_MAX;
353 if (l->l_len > 0) {
354 end = start + l->l_len - 1;
355 fl->fl_end = end;
356 } else if (l->l_len < 0) {
357 end = start - 1;
358 fl->fl_end = end;
359 start += l->l_len;
360 if (start < 0)
361 return -EINVAL;
362 }
363 fl->fl_start = start; /* we record the absolute position */
364 if (fl->fl_end < fl->fl_start)
365 return -EOVERFLOW;
366
367 fl->fl_owner = current->files;
368 fl->fl_pid = current->tgid;
369 fl->fl_file = filp;
370 fl->fl_flags = FL_POSIX;
371 fl->fl_ops = NULL;
372 fl->fl_lmops = NULL;
373
374 return assign_type(fl, l->l_type);
375}
376
377#if BITS_PER_LONG == 32
378static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 struct flock64 *l)
380{
381 loff_t start;
382
383 switch (l->l_whence) {
384 case SEEK_SET:
385 start = 0;
386 break;
387 case SEEK_CUR:
388 start = filp->f_pos;
389 break;
390 case SEEK_END:
391 start = i_size_read(filp->f_path.dentry->d_inode);
392 break;
393 default:
394 return -EINVAL;
395 }
396
397 start += l->l_start;
398 if (start < 0)
399 return -EINVAL;
400 fl->fl_end = OFFSET_MAX;
401 if (l->l_len > 0) {
402 fl->fl_end = start + l->l_len - 1;
403 } else if (l->l_len < 0) {
404 fl->fl_end = start - 1;
405 start += l->l_len;
406 if (start < 0)
407 return -EINVAL;
408 }
409 fl->fl_start = start; /* we record the absolute position */
410 if (fl->fl_end < fl->fl_start)
411 return -EOVERFLOW;
412
413 fl->fl_owner = current->files;
414 fl->fl_pid = current->tgid;
415 fl->fl_file = filp;
416 fl->fl_flags = FL_POSIX;
417 fl->fl_ops = NULL;
418 fl->fl_lmops = NULL;
419
420 return assign_type(fl, l->l_type);
421}
422#endif
423
424/* default lease lock manager operations */
425static void lease_break_callback(struct file_lock *fl)
426{
427 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
428}
429
430static void lease_release_private_callback(struct file_lock *fl)
431{
432 if (!fl->fl_file)
433 return;
434
435 f_delown(fl->fl_file);
436 fl->fl_file->f_owner.signum = 0;
437}
438
439static const struct lock_manager_operations lease_manager_ops = {
440 .lm_break = lease_break_callback,
441 .lm_release_private = lease_release_private_callback,
442 .lm_change = lease_modify,
443};
444
445/*
446 * Initialize a lease, use the default lock manager operations
447 */
448static int lease_init(struct file *filp, long type, struct file_lock *fl)
449 {
450 if (assign_type(fl, type) != 0)
451 return -EINVAL;
452
453 fl->fl_owner = current->files;
454 fl->fl_pid = current->tgid;
455
456 fl->fl_file = filp;
457 fl->fl_flags = FL_LEASE;
458 fl->fl_start = 0;
459 fl->fl_end = OFFSET_MAX;
460 fl->fl_ops = NULL;
461 fl->fl_lmops = &lease_manager_ops;
462 return 0;
463}
464
465/* Allocate a file_lock initialised to this type of lease */
466static struct file_lock *lease_alloc(struct file *filp, long type)
467{
468 struct file_lock *fl = locks_alloc_lock();
469 int error = -ENOMEM;
470
471 if (fl == NULL)
472 return ERR_PTR(error);
473
474 error = lease_init(filp, type, fl);
475 if (error) {
476 locks_free_lock(fl);
477 return ERR_PTR(error);
478 }
479 return fl;
480}
481
482/* Check if two locks overlap each other.
483 */
484static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
485{
486 return ((fl1->fl_end >= fl2->fl_start) &&
487 (fl2->fl_end >= fl1->fl_start));
488}
489
490/*
491 * Check whether two locks have the same owner.
492 */
493static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
494{
495 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
496 return fl2->fl_lmops == fl1->fl_lmops &&
497 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
498 return fl1->fl_owner == fl2->fl_owner;
499}
500
501/* Remove waiter from blocker's block list.
502 * When blocker ends up pointing to itself then the list is empty.
503 */
504static void __locks_delete_block(struct file_lock *waiter)
505{
506 list_del_init(&waiter->fl_block);
507 list_del_init(&waiter->fl_link);
508 waiter->fl_next = NULL;
509}
510
511/*
512 */
513void locks_delete_block(struct file_lock *waiter)
514{
515 lock_flocks();
516 __locks_delete_block(waiter);
517 unlock_flocks();
518}
519EXPORT_SYMBOL(locks_delete_block);
520
521/* Insert waiter into blocker's block list.
522 * We use a circular list so that processes can be easily woken up in
523 * the order they blocked. The documentation doesn't require this but
524 * it seems like the reasonable thing to do.
525 */
526static void locks_insert_block(struct file_lock *blocker,
527 struct file_lock *waiter)
528{
529 BUG_ON(!list_empty(&waiter->fl_block));
530 list_add_tail(&waiter->fl_block, &blocker->fl_block);
531 waiter->fl_next = blocker;
532 if (IS_POSIX(blocker))
533 list_add(&waiter->fl_link, &blocked_list);
534}
535
536/* Wake up processes blocked waiting for blocker.
537 * If told to wait then schedule the processes until the block list
538 * is empty, otherwise empty the block list ourselves.
539 */
540static void locks_wake_up_blocks(struct file_lock *blocker)
541{
542 while (!list_empty(&blocker->fl_block)) {
543 struct file_lock *waiter;
544
545 waiter = list_first_entry(&blocker->fl_block,
546 struct file_lock, fl_block);
547 __locks_delete_block(waiter);
548 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
549 waiter->fl_lmops->lm_notify(waiter);
550 else
551 wake_up(&waiter->fl_wait);
552 }
553}
554
555/* Insert file lock fl into an inode's lock list at the position indicated
556 * by pos. At the same time add the lock to the global file lock list.
557 */
558static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
559{
560 list_add(&fl->fl_link, &file_lock_list);
561
562 fl->fl_nspid = get_pid(task_tgid(current));
563
564 /* insert into file's list */
565 fl->fl_next = *pos;
566 *pos = fl;
567}
568
569/*
570 * Delete a lock and then free it.
571 * Wake up processes that are blocked waiting for this lock,
572 * notify the FS that the lock has been cleared and
573 * finally free the lock.
574 */
575static void locks_delete_lock(struct file_lock **thisfl_p)
576{
577 struct file_lock *fl = *thisfl_p;
578
579 *thisfl_p = fl->fl_next;
580 fl->fl_next = NULL;
581 list_del_init(&fl->fl_link);
582
583 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
584 if (fl->fl_fasync != NULL) {
585 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
586 fl->fl_fasync = NULL;
587 }
588
589 if (fl->fl_nspid) {
590 put_pid(fl->fl_nspid);
591 fl->fl_nspid = NULL;
592 }
593
594 locks_wake_up_blocks(fl);
595 locks_free_lock(fl);
596}
597
598/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
599 * checks for shared/exclusive status of overlapping locks.
600 */
601static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
602{
603 if (sys_fl->fl_type == F_WRLCK)
604 return 1;
605 if (caller_fl->fl_type == F_WRLCK)
606 return 1;
607 return 0;
608}
609
610/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
611 * checking before calling the locks_conflict().
612 */
613static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
614{
615 /* POSIX locks owned by the same process do not conflict with
616 * each other.
617 */
618 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
619 return (0);
620
621 /* Check whether they overlap */
622 if (!locks_overlap(caller_fl, sys_fl))
623 return 0;
624
625 return (locks_conflict(caller_fl, sys_fl));
626}
627
628/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
629 * checking before calling the locks_conflict().
630 */
631static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
632{
633 /* FLOCK locks referring to the same filp do not conflict with
634 * each other.
635 */
636 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
637 return (0);
638 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
639 return 0;
640
641 return (locks_conflict(caller_fl, sys_fl));
642}
643
644void
645posix_test_lock(struct file *filp, struct file_lock *fl)
646{
647 struct file_lock *cfl;
648
649 lock_flocks();
650 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
651 if (!IS_POSIX(cfl))
652 continue;
653 if (posix_locks_conflict(fl, cfl))
654 break;
655 }
656 if (cfl) {
657 __locks_copy_lock(fl, cfl);
658 if (cfl->fl_nspid)
659 fl->fl_pid = pid_vnr(cfl->fl_nspid);
660 } else
661 fl->fl_type = F_UNLCK;
662 unlock_flocks();
663 return;
664}
665EXPORT_SYMBOL(posix_test_lock);
666
667/*
668 * Deadlock detection:
669 *
670 * We attempt to detect deadlocks that are due purely to posix file
671 * locks.
672 *
673 * We assume that a task can be waiting for at most one lock at a time.
674 * So for any acquired lock, the process holding that lock may be
675 * waiting on at most one other lock. That lock in turns may be held by
676 * someone waiting for at most one other lock. Given a requested lock
677 * caller_fl which is about to wait for a conflicting lock block_fl, we
678 * follow this chain of waiters to ensure we are not about to create a
679 * cycle.
680 *
681 * Since we do this before we ever put a process to sleep on a lock, we
682 * are ensured that there is never a cycle; that is what guarantees that
683 * the while() loop in posix_locks_deadlock() eventually completes.
684 *
685 * Note: the above assumption may not be true when handling lock
686 * requests from a broken NFS client. It may also fail in the presence
687 * of tasks (such as posix threads) sharing the same open file table.
688 *
689 * To handle those cases, we just bail out after a few iterations.
690 */
691
692#define MAX_DEADLK_ITERATIONS 10
693
694/* Find a lock that the owner of the given block_fl is blocking on. */
695static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
696{
697 struct file_lock *fl;
698
699 list_for_each_entry(fl, &blocked_list, fl_link) {
700 if (posix_same_owner(fl, block_fl))
701 return fl->fl_next;
702 }
703 return NULL;
704}
705
706static int posix_locks_deadlock(struct file_lock *caller_fl,
707 struct file_lock *block_fl)
708{
709 int i = 0;
710
711 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
712 if (i++ > MAX_DEADLK_ITERATIONS)
713 return 0;
714 if (posix_same_owner(caller_fl, block_fl))
715 return 1;
716 }
717 return 0;
718}
719
720/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
721 * after any leases, but before any posix locks.
722 *
723 * Note that if called with an FL_EXISTS argument, the caller may determine
724 * whether or not a lock was successfully freed by testing the return
725 * value for -ENOENT.
726 */
727static int flock_lock_file(struct file *filp, struct file_lock *request)
728{
729 struct file_lock *new_fl = NULL;
730 struct file_lock **before;
731 struct inode * inode = filp->f_path.dentry->d_inode;
732 int error = 0;
733 int found = 0;
734
735 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
736 new_fl = locks_alloc_lock();
737 if (!new_fl)
738 return -ENOMEM;
739 }
740
741 lock_flocks();
742 if (request->fl_flags & FL_ACCESS)
743 goto find_conflict;
744
745 for_each_lock(inode, before) {
746 struct file_lock *fl = *before;
747 if (IS_POSIX(fl))
748 break;
749 if (IS_LEASE(fl))
750 continue;
751 if (filp != fl->fl_file)
752 continue;
753 if (request->fl_type == fl->fl_type)
754 goto out;
755 found = 1;
756 locks_delete_lock(before);
757 break;
758 }
759
760 if (request->fl_type == F_UNLCK) {
761 if ((request->fl_flags & FL_EXISTS) && !found)
762 error = -ENOENT;
763 goto out;
764 }
765
766 /*
767 * If a higher-priority process was blocked on the old file lock,
768 * give it the opportunity to lock the file.
769 */
770 if (found) {
771 unlock_flocks();
772 cond_resched();
773 lock_flocks();
774 }
775
776find_conflict:
777 for_each_lock(inode, before) {
778 struct file_lock *fl = *before;
779 if (IS_POSIX(fl))
780 break;
781 if (IS_LEASE(fl))
782 continue;
783 if (!flock_locks_conflict(request, fl))
784 continue;
785 error = -EAGAIN;
786 if (!(request->fl_flags & FL_SLEEP))
787 goto out;
788 error = FILE_LOCK_DEFERRED;
789 locks_insert_block(fl, request);
790 goto out;
791 }
792 if (request->fl_flags & FL_ACCESS)
793 goto out;
794 locks_copy_lock(new_fl, request);
795 locks_insert_lock(before, new_fl);
796 new_fl = NULL;
797 error = 0;
798
799out:
800 unlock_flocks();
801 if (new_fl)
802 locks_free_lock(new_fl);
803 return error;
804}
805
806static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
807{
808 struct file_lock *fl;
809 struct file_lock *new_fl = NULL;
810 struct file_lock *new_fl2 = NULL;
811 struct file_lock *left = NULL;
812 struct file_lock *right = NULL;
813 struct file_lock **before;
814 int error, added = 0;
815
816 /*
817 * We may need two file_lock structures for this operation,
818 * so we get them in advance to avoid races.
819 *
820 * In some cases we can be sure, that no new locks will be needed
821 */
822 if (!(request->fl_flags & FL_ACCESS) &&
823 (request->fl_type != F_UNLCK ||
824 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
825 new_fl = locks_alloc_lock();
826 new_fl2 = locks_alloc_lock();
827 }
828
829 lock_flocks();
830 if (request->fl_type != F_UNLCK) {
831 for_each_lock(inode, before) {
832 fl = *before;
833 if (!IS_POSIX(fl))
834 continue;
835 if (!posix_locks_conflict(request, fl))
836 continue;
837 if (conflock)
838 __locks_copy_lock(conflock, fl);
839 error = -EAGAIN;
840 if (!(request->fl_flags & FL_SLEEP))
841 goto out;
842 error = -EDEADLK;
843 if (posix_locks_deadlock(request, fl))
844 goto out;
845 error = FILE_LOCK_DEFERRED;
846 locks_insert_block(fl, request);
847 goto out;
848 }
849 }
850
851 /* If we're just looking for a conflict, we're done. */
852 error = 0;
853 if (request->fl_flags & FL_ACCESS)
854 goto out;
855
856 /*
857 * Find the first old lock with the same owner as the new lock.
858 */
859
860 before = &inode->i_flock;
861
862 /* First skip locks owned by other processes. */
863 while ((fl = *before) && (!IS_POSIX(fl) ||
864 !posix_same_owner(request, fl))) {
865 before = &fl->fl_next;
866 }
867
868 /* Process locks with this owner. */
869 while ((fl = *before) && posix_same_owner(request, fl)) {
870 /* Detect adjacent or overlapping regions (if same lock type)
871 */
872 if (request->fl_type == fl->fl_type) {
873 /* In all comparisons of start vs end, use
874 * "start - 1" rather than "end + 1". If end
875 * is OFFSET_MAX, end + 1 will become negative.
876 */
877 if (fl->fl_end < request->fl_start - 1)
878 goto next_lock;
879 /* If the next lock in the list has entirely bigger
880 * addresses than the new one, insert the lock here.
881 */
882 if (fl->fl_start - 1 > request->fl_end)
883 break;
884
885 /* If we come here, the new and old lock are of the
886 * same type and adjacent or overlapping. Make one
887 * lock yielding from the lower start address of both
888 * locks to the higher end address.
889 */
890 if (fl->fl_start > request->fl_start)
891 fl->fl_start = request->fl_start;
892 else
893 request->fl_start = fl->fl_start;
894 if (fl->fl_end < request->fl_end)
895 fl->fl_end = request->fl_end;
896 else
897 request->fl_end = fl->fl_end;
898 if (added) {
899 locks_delete_lock(before);
900 continue;
901 }
902 request = fl;
903 added = 1;
904 }
905 else {
906 /* Processing for different lock types is a bit
907 * more complex.
908 */
909 if (fl->fl_end < request->fl_start)
910 goto next_lock;
911 if (fl->fl_start > request->fl_end)
912 break;
913 if (request->fl_type == F_UNLCK)
914 added = 1;
915 if (fl->fl_start < request->fl_start)
916 left = fl;
917 /* If the next lock in the list has a higher end
918 * address than the new one, insert the new one here.
919 */
920 if (fl->fl_end > request->fl_end) {
921 right = fl;
922 break;
923 }
924 if (fl->fl_start >= request->fl_start) {
925 /* The new lock completely replaces an old
926 * one (This may happen several times).
927 */
928 if (added) {
929 locks_delete_lock(before);
930 continue;
931 }
932 /* Replace the old lock with the new one.
933 * Wake up anybody waiting for the old one,
934 * as the change in lock type might satisfy
935 * their needs.
936 */
937 locks_wake_up_blocks(fl);
938 fl->fl_start = request->fl_start;
939 fl->fl_end = request->fl_end;
940 fl->fl_type = request->fl_type;
941 locks_release_private(fl);
942 locks_copy_private(fl, request);
943 request = fl;
944 added = 1;
945 }
946 }
947 /* Go on to next lock.
948 */
949 next_lock:
950 before = &fl->fl_next;
951 }
952
953 /*
954 * The above code only modifies existing locks in case of
955 * merging or replacing. If new lock(s) need to be inserted
956 * all modifications are done bellow this, so it's safe yet to
957 * bail out.
958 */
959 error = -ENOLCK; /* "no luck" */
960 if (right && left == right && !new_fl2)
961 goto out;
962
963 error = 0;
964 if (!added) {
965 if (request->fl_type == F_UNLCK) {
966 if (request->fl_flags & FL_EXISTS)
967 error = -ENOENT;
968 goto out;
969 }
970
971 if (!new_fl) {
972 error = -ENOLCK;
973 goto out;
974 }
975 locks_copy_lock(new_fl, request);
976 locks_insert_lock(before, new_fl);
977 new_fl = NULL;
978 }
979 if (right) {
980 if (left == right) {
981 /* The new lock breaks the old one in two pieces,
982 * so we have to use the second new lock.
983 */
984 left = new_fl2;
985 new_fl2 = NULL;
986 locks_copy_lock(left, right);
987 locks_insert_lock(before, left);
988 }
989 right->fl_start = request->fl_end + 1;
990 locks_wake_up_blocks(right);
991 }
992 if (left) {
993 left->fl_end = request->fl_start - 1;
994 locks_wake_up_blocks(left);
995 }
996 out:
997 unlock_flocks();
998 /*
999 * Free any unused locks.
1000 */
1001 if (new_fl)
1002 locks_free_lock(new_fl);
1003 if (new_fl2)
1004 locks_free_lock(new_fl2);
1005 return error;
1006}
1007
1008/**
1009 * posix_lock_file - Apply a POSIX-style lock to a file
1010 * @filp: The file to apply the lock to
1011 * @fl: The lock to be applied
1012 * @conflock: Place to return a copy of the conflicting lock, if found.
1013 *
1014 * Add a POSIX style lock to a file.
1015 * We merge adjacent & overlapping locks whenever possible.
1016 * POSIX locks are sorted by owner task, then by starting address
1017 *
1018 * Note that if called with an FL_EXISTS argument, the caller may determine
1019 * whether or not a lock was successfully freed by testing the return
1020 * value for -ENOENT.
1021 */
1022int posix_lock_file(struct file *filp, struct file_lock *fl,
1023 struct file_lock *conflock)
1024{
1025 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1026}
1027EXPORT_SYMBOL(posix_lock_file);
1028
1029/**
1030 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1031 * @filp: The file to apply the lock to
1032 * @fl: The lock to be applied
1033 *
1034 * Add a POSIX style lock to a file.
1035 * We merge adjacent & overlapping locks whenever possible.
1036 * POSIX locks are sorted by owner task, then by starting address
1037 */
1038int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1039{
1040 int error;
1041 might_sleep ();
1042 for (;;) {
1043 error = posix_lock_file(filp, fl, NULL);
1044 if (error != FILE_LOCK_DEFERRED)
1045 break;
1046 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1047 if (!error)
1048 continue;
1049
1050 locks_delete_block(fl);
1051 break;
1052 }
1053 return error;
1054}
1055EXPORT_SYMBOL(posix_lock_file_wait);
1056
1057/**
1058 * locks_mandatory_locked - Check for an active lock
1059 * @inode: the file to check
1060 *
1061 * Searches the inode's list of locks to find any POSIX locks which conflict.
1062 * This function is called from locks_verify_locked() only.
1063 */
1064int locks_mandatory_locked(struct inode *inode)
1065{
1066 fl_owner_t owner = current->files;
1067 struct file_lock *fl;
1068
1069 /*
1070 * Search the lock list for this inode for any POSIX locks.
1071 */
1072 lock_flocks();
1073 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1074 if (!IS_POSIX(fl))
1075 continue;
1076 if (fl->fl_owner != owner)
1077 break;
1078 }
1079 unlock_flocks();
1080 return fl ? -EAGAIN : 0;
1081}
1082
1083/**
1084 * locks_mandatory_area - Check for a conflicting lock
1085 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1086 * for shared
1087 * @inode: the file to check
1088 * @filp: how the file was opened (if it was)
1089 * @offset: start of area to check
1090 * @count: length of area to check
1091 *
1092 * Searches the inode's list of locks to find any POSIX locks which conflict.
1093 * This function is called from rw_verify_area() and
1094 * locks_verify_truncate().
1095 */
1096int locks_mandatory_area(int read_write, struct inode *inode,
1097 struct file *filp, loff_t offset,
1098 size_t count)
1099{
1100 struct file_lock fl;
1101 int error;
1102
1103 locks_init_lock(&fl);
1104 fl.fl_owner = current->files;
1105 fl.fl_pid = current->tgid;
1106 fl.fl_file = filp;
1107 fl.fl_flags = FL_POSIX | FL_ACCESS;
1108 if (filp && !(filp->f_flags & O_NONBLOCK))
1109 fl.fl_flags |= FL_SLEEP;
1110 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1111 fl.fl_start = offset;
1112 fl.fl_end = offset + count - 1;
1113
1114 for (;;) {
1115 error = __posix_lock_file(inode, &fl, NULL);
1116 if (error != FILE_LOCK_DEFERRED)
1117 break;
1118 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1119 if (!error) {
1120 /*
1121 * If we've been sleeping someone might have
1122 * changed the permissions behind our back.
1123 */
1124 if (__mandatory_lock(inode))
1125 continue;
1126 }
1127
1128 locks_delete_block(&fl);
1129 break;
1130 }
1131
1132 return error;
1133}
1134
1135EXPORT_SYMBOL(locks_mandatory_area);
1136
1137static void lease_clear_pending(struct file_lock *fl, int arg)
1138{
1139 switch (arg) {
1140 case F_UNLCK:
1141 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1142 /* fall through: */
1143 case F_RDLCK:
1144 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1145 }
1146}
1147
1148/* We already had a lease on this file; just change its type */
1149int lease_modify(struct file_lock **before, int arg)
1150{
1151 struct file_lock *fl = *before;
1152 int error = assign_type(fl, arg);
1153
1154 if (error)
1155 return error;
1156 lease_clear_pending(fl, arg);
1157 locks_wake_up_blocks(fl);
1158 if (arg == F_UNLCK)
1159 locks_delete_lock(before);
1160 return 0;
1161}
1162
1163EXPORT_SYMBOL(lease_modify);
1164
1165static bool past_time(unsigned long then)
1166{
1167 if (!then)
1168 /* 0 is a special value meaning "this never expires": */
1169 return false;
1170 return time_after(jiffies, then);
1171}
1172
1173static void time_out_leases(struct inode *inode)
1174{
1175 struct file_lock **before;
1176 struct file_lock *fl;
1177
1178 before = &inode->i_flock;
1179 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1180 if (past_time(fl->fl_downgrade_time))
1181 lease_modify(before, F_RDLCK);
1182 if (past_time(fl->fl_break_time))
1183 lease_modify(before, F_UNLCK);
1184 if (fl == *before) /* lease_modify may have freed fl */
1185 before = &fl->fl_next;
1186 }
1187}
1188
1189/**
1190 * __break_lease - revoke all outstanding leases on file
1191 * @inode: the inode of the file to return
1192 * @mode: the open mode (read or write)
1193 *
1194 * break_lease (inlined for speed) has checked there already is at least
1195 * some kind of lock (maybe a lease) on this file. Leases are broken on
1196 * a call to open() or truncate(). This function can sleep unless you
1197 * specified %O_NONBLOCK to your open().
1198 */
1199int __break_lease(struct inode *inode, unsigned int mode)
1200{
1201 int error = 0;
1202 struct file_lock *new_fl, *flock;
1203 struct file_lock *fl;
1204 unsigned long break_time;
1205 int i_have_this_lease = 0;
1206 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1207
1208 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1209 if (IS_ERR(new_fl))
1210 return PTR_ERR(new_fl);
1211
1212 lock_flocks();
1213
1214 time_out_leases(inode);
1215
1216 flock = inode->i_flock;
1217 if ((flock == NULL) || !IS_LEASE(flock))
1218 goto out;
1219
1220 if (!locks_conflict(flock, new_fl))
1221 goto out;
1222
1223 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1224 if (fl->fl_owner == current->files)
1225 i_have_this_lease = 1;
1226
1227 break_time = 0;
1228 if (lease_break_time > 0) {
1229 break_time = jiffies + lease_break_time * HZ;
1230 if (break_time == 0)
1231 break_time++; /* so that 0 means no break time */
1232 }
1233
1234 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1235 if (want_write) {
1236 if (fl->fl_flags & FL_UNLOCK_PENDING)
1237 continue;
1238 fl->fl_flags |= FL_UNLOCK_PENDING;
1239 fl->fl_break_time = break_time;
1240 } else {
1241 if (lease_breaking(flock))
1242 continue;
1243 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1244 fl->fl_downgrade_time = break_time;
1245 }
1246 fl->fl_lmops->lm_break(fl);
1247 }
1248
1249 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1250 error = -EWOULDBLOCK;
1251 goto out;
1252 }
1253
1254restart:
1255 break_time = flock->fl_break_time;
1256 if (break_time != 0) {
1257 break_time -= jiffies;
1258 if (break_time == 0)
1259 break_time++;
1260 }
1261 locks_insert_block(flock, new_fl);
1262 unlock_flocks();
1263 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1264 !new_fl->fl_next, break_time);
1265 lock_flocks();
1266 __locks_delete_block(new_fl);
1267 if (error >= 0) {
1268 if (error == 0)
1269 time_out_leases(inode);
1270 /*
1271 * Wait for the next conflicting lease that has not been
1272 * broken yet
1273 */
1274 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1275 flock = flock->fl_next) {
1276 if (locks_conflict(new_fl, flock))
1277 goto restart;
1278 }
1279 error = 0;
1280 }
1281
1282out:
1283 unlock_flocks();
1284 locks_free_lock(new_fl);
1285 return error;
1286}
1287
1288EXPORT_SYMBOL(__break_lease);
1289
1290/**
1291 * lease_get_mtime - get the last modified time of an inode
1292 * @inode: the inode
1293 * @time: pointer to a timespec which will contain the last modified time
1294 *
1295 * This is to force NFS clients to flush their caches for files with
1296 * exclusive leases. The justification is that if someone has an
1297 * exclusive lease, then they could be modifying it.
1298 */
1299void lease_get_mtime(struct inode *inode, struct timespec *time)
1300{
1301 struct file_lock *flock = inode->i_flock;
1302 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1303 *time = current_fs_time(inode->i_sb);
1304 else
1305 *time = inode->i_mtime;
1306}
1307
1308EXPORT_SYMBOL(lease_get_mtime);
1309
1310/**
1311 * fcntl_getlease - Enquire what lease is currently active
1312 * @filp: the file
1313 *
1314 * The value returned by this function will be one of
1315 * (if no lease break is pending):
1316 *
1317 * %F_RDLCK to indicate a shared lease is held.
1318 *
1319 * %F_WRLCK to indicate an exclusive lease is held.
1320 *
1321 * %F_UNLCK to indicate no lease is held.
1322 *
1323 * (if a lease break is pending):
1324 *
1325 * %F_RDLCK to indicate an exclusive lease needs to be
1326 * changed to a shared lease (or removed).
1327 *
1328 * %F_UNLCK to indicate the lease needs to be removed.
1329 *
1330 * XXX: sfr & willy disagree over whether F_INPROGRESS
1331 * should be returned to userspace.
1332 */
1333int fcntl_getlease(struct file *filp)
1334{
1335 struct file_lock *fl;
1336 int type = F_UNLCK;
1337
1338 lock_flocks();
1339 time_out_leases(filp->f_path.dentry->d_inode);
1340 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1341 fl = fl->fl_next) {
1342 if (fl->fl_file == filp) {
1343 type = target_leasetype(fl);
1344 break;
1345 }
1346 }
1347 unlock_flocks();
1348 return type;
1349}
1350
1351int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1352{
1353 struct file_lock *fl, **before, **my_before = NULL, *lease;
1354 struct dentry *dentry = filp->f_path.dentry;
1355 struct inode *inode = dentry->d_inode;
1356 int error;
1357
1358 lease = *flp;
1359
1360 error = -EAGAIN;
1361 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1362 goto out;
1363 if ((arg == F_WRLCK)
1364 && ((dentry->d_count > 1)
1365 || (atomic_read(&inode->i_count) > 1)))
1366 goto out;
1367
1368 /*
1369 * At this point, we know that if there is an exclusive
1370 * lease on this file, then we hold it on this filp
1371 * (otherwise our open of this file would have blocked).
1372 * And if we are trying to acquire an exclusive lease,
1373 * then the file is not open by anyone (including us)
1374 * except for this filp.
1375 */
1376 error = -EAGAIN;
1377 for (before = &inode->i_flock;
1378 ((fl = *before) != NULL) && IS_LEASE(fl);
1379 before = &fl->fl_next) {
1380 if (fl->fl_file == filp) {
1381 my_before = before;
1382 continue;
1383 }
1384 /*
1385 * No exclusive leases if someone else has a lease on
1386 * this file:
1387 */
1388 if (arg == F_WRLCK)
1389 goto out;
1390 /*
1391 * Modifying our existing lease is OK, but no getting a
1392 * new lease if someone else is opening for write:
1393 */
1394 if (fl->fl_flags & FL_UNLOCK_PENDING)
1395 goto out;
1396 }
1397
1398 if (my_before != NULL) {
1399 error = lease->fl_lmops->lm_change(my_before, arg);
1400 if (!error)
1401 *flp = *my_before;
1402 goto out;
1403 }
1404
1405 error = -EINVAL;
1406 if (!leases_enable)
1407 goto out;
1408
1409 locks_insert_lock(before, lease);
1410 return 0;
1411
1412out:
1413 return error;
1414}
1415
1416int generic_delete_lease(struct file *filp, struct file_lock **flp)
1417{
1418 struct file_lock *fl, **before;
1419 struct dentry *dentry = filp->f_path.dentry;
1420 struct inode *inode = dentry->d_inode;
1421
1422 for (before = &inode->i_flock;
1423 ((fl = *before) != NULL) && IS_LEASE(fl);
1424 before = &fl->fl_next) {
1425 if (fl->fl_file != filp)
1426 continue;
1427 return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1428 }
1429 return -EAGAIN;
1430}
1431
1432/**
1433 * generic_setlease - sets a lease on an open file
1434 * @filp: file pointer
1435 * @arg: type of lease to obtain
1436 * @flp: input - file_lock to use, output - file_lock inserted
1437 *
1438 * The (input) flp->fl_lmops->lm_break function is required
1439 * by break_lease().
1440 *
1441 * Called with file_lock_lock held.
1442 */
1443int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1444{
1445 struct dentry *dentry = filp->f_path.dentry;
1446 struct inode *inode = dentry->d_inode;
1447 int error;
1448
1449 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1450 return -EACCES;
1451 if (!S_ISREG(inode->i_mode))
1452 return -EINVAL;
1453 error = security_file_lock(filp, arg);
1454 if (error)
1455 return error;
1456
1457 time_out_leases(inode);
1458
1459 BUG_ON(!(*flp)->fl_lmops->lm_break);
1460
1461 switch (arg) {
1462 case F_UNLCK:
1463 return generic_delete_lease(filp, flp);
1464 case F_RDLCK:
1465 case F_WRLCK:
1466 return generic_add_lease(filp, arg, flp);
1467 default:
1468 return -EINVAL;
1469 }
1470}
1471EXPORT_SYMBOL(generic_setlease);
1472
1473static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1474{
1475 if (filp->f_op && filp->f_op->setlease)
1476 return filp->f_op->setlease(filp, arg, lease);
1477 else
1478 return generic_setlease(filp, arg, lease);
1479}
1480
1481/**
1482 * vfs_setlease - sets a lease on an open file
1483 * @filp: file pointer
1484 * @arg: type of lease to obtain
1485 * @lease: file_lock to use
1486 *
1487 * Call this to establish a lease on the file.
1488 * The (*lease)->fl_lmops->lm_break operation must be set; if not,
1489 * break_lease will oops!
1490 *
1491 * This will call the filesystem's setlease file method, if
1492 * defined. Note that there is no getlease method; instead, the
1493 * filesystem setlease method should call back to setlease() to
1494 * add a lease to the inode's lease list, where fcntl_getlease() can
1495 * find it. Since fcntl_getlease() only reports whether the current
1496 * task holds a lease, a cluster filesystem need only do this for
1497 * leases held by processes on this node.
1498 *
1499 * There is also no break_lease method; filesystems that
1500 * handle their own leases should break leases themselves from the
1501 * filesystem's open, create, and (on truncate) setattr methods.
1502 *
1503 * Warning: the only current setlease methods exist only to disable
1504 * leases in certain cases. More vfs changes may be required to
1505 * allow a full filesystem lease implementation.
1506 */
1507
1508int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1509{
1510 int error;
1511
1512 lock_flocks();
1513 error = __vfs_setlease(filp, arg, lease);
1514 unlock_flocks();
1515
1516 return error;
1517}
1518EXPORT_SYMBOL_GPL(vfs_setlease);
1519
1520static int do_fcntl_delete_lease(struct file *filp)
1521{
1522 struct file_lock fl, *flp = &fl;
1523
1524 lease_init(filp, F_UNLCK, flp);
1525
1526 return vfs_setlease(filp, F_UNLCK, &flp);
1527}
1528
1529static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1530{
1531 struct file_lock *fl, *ret;
1532 struct fasync_struct *new;
1533 int error;
1534
1535 fl = lease_alloc(filp, arg);
1536 if (IS_ERR(fl))
1537 return PTR_ERR(fl);
1538
1539 new = fasync_alloc();
1540 if (!new) {
1541 locks_free_lock(fl);
1542 return -ENOMEM;
1543 }
1544 ret = fl;
1545 lock_flocks();
1546 error = __vfs_setlease(filp, arg, &ret);
1547 if (error) {
1548 unlock_flocks();
1549 locks_free_lock(fl);
1550 goto out_free_fasync;
1551 }
1552 if (ret != fl)
1553 locks_free_lock(fl);
1554
1555 /*
1556 * fasync_insert_entry() returns the old entry if any.
1557 * If there was no old entry, then it used 'new' and
1558 * inserted it into the fasync list. Clear new so that
1559 * we don't release it here.
1560 */
1561 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1562 new = NULL;
1563
1564 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1565 unlock_flocks();
1566
1567out_free_fasync:
1568 if (new)
1569 fasync_free(new);
1570 return error;
1571}
1572
1573/**
1574 * fcntl_setlease - sets a lease on an open file
1575 * @fd: open file descriptor
1576 * @filp: file pointer
1577 * @arg: type of lease to obtain
1578 *
1579 * Call this fcntl to establish a lease on the file.
1580 * Note that you also need to call %F_SETSIG to
1581 * receive a signal when the lease is broken.
1582 */
1583int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1584{
1585 if (arg == F_UNLCK)
1586 return do_fcntl_delete_lease(filp);
1587 return do_fcntl_add_lease(fd, filp, arg);
1588}
1589
1590/**
1591 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1592 * @filp: The file to apply the lock to
1593 * @fl: The lock to be applied
1594 *
1595 * Add a FLOCK style lock to a file.
1596 */
1597int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1598{
1599 int error;
1600 might_sleep();
1601 for (;;) {
1602 error = flock_lock_file(filp, fl);
1603 if (error != FILE_LOCK_DEFERRED)
1604 break;
1605 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1606 if (!error)
1607 continue;
1608
1609 locks_delete_block(fl);
1610 break;
1611 }
1612 return error;
1613}
1614
1615EXPORT_SYMBOL(flock_lock_file_wait);
1616
1617/**
1618 * sys_flock: - flock() system call.
1619 * @fd: the file descriptor to lock.
1620 * @cmd: the type of lock to apply.
1621 *
1622 * Apply a %FL_FLOCK style lock to an open file descriptor.
1623 * The @cmd can be one of
1624 *
1625 * %LOCK_SH -- a shared lock.
1626 *
1627 * %LOCK_EX -- an exclusive lock.
1628 *
1629 * %LOCK_UN -- remove an existing lock.
1630 *
1631 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1632 *
1633 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1634 * processes read and write access respectively.
1635 */
1636SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1637{
1638 struct file *filp;
1639 int fput_needed;
1640 struct file_lock *lock;
1641 int can_sleep, unlock;
1642 int error;
1643
1644 error = -EBADF;
1645 filp = fget_light(fd, &fput_needed);
1646 if (!filp)
1647 goto out;
1648
1649 can_sleep = !(cmd & LOCK_NB);
1650 cmd &= ~LOCK_NB;
1651 unlock = (cmd == LOCK_UN);
1652
1653 if (!unlock && !(cmd & LOCK_MAND) &&
1654 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1655 goto out_putf;
1656
1657 error = flock_make_lock(filp, &lock, cmd);
1658 if (error)
1659 goto out_putf;
1660 if (can_sleep)
1661 lock->fl_flags |= FL_SLEEP;
1662
1663 error = security_file_lock(filp, lock->fl_type);
1664 if (error)
1665 goto out_free;
1666
1667 if (filp->f_op && filp->f_op->flock)
1668 error = filp->f_op->flock(filp,
1669 (can_sleep) ? F_SETLKW : F_SETLK,
1670 lock);
1671 else
1672 error = flock_lock_file_wait(filp, lock);
1673
1674 out_free:
1675 locks_free_lock(lock);
1676
1677 out_putf:
1678 fput_light(filp, fput_needed);
1679 out:
1680 return error;
1681}
1682
1683/**
1684 * vfs_test_lock - test file byte range lock
1685 * @filp: The file to test lock for
1686 * @fl: The lock to test; also used to hold result
1687 *
1688 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
1689 * setting conf->fl_type to something other than F_UNLCK.
1690 */
1691int vfs_test_lock(struct file *filp, struct file_lock *fl)
1692{
1693 if (filp->f_op && filp->f_op->lock)
1694 return filp->f_op->lock(filp, F_GETLK, fl);
1695 posix_test_lock(filp, fl);
1696 return 0;
1697}
1698EXPORT_SYMBOL_GPL(vfs_test_lock);
1699
1700static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1701{
1702 flock->l_pid = fl->fl_pid;
1703#if BITS_PER_LONG == 32
1704 /*
1705 * Make sure we can represent the posix lock via
1706 * legacy 32bit flock.
1707 */
1708 if (fl->fl_start > OFFT_OFFSET_MAX)
1709 return -EOVERFLOW;
1710 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1711 return -EOVERFLOW;
1712#endif
1713 flock->l_start = fl->fl_start;
1714 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1715 fl->fl_end - fl->fl_start + 1;
1716 flock->l_whence = 0;
1717 flock->l_type = fl->fl_type;
1718 return 0;
1719}
1720
1721#if BITS_PER_LONG == 32
1722static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1723{
1724 flock->l_pid = fl->fl_pid;
1725 flock->l_start = fl->fl_start;
1726 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1727 fl->fl_end - fl->fl_start + 1;
1728 flock->l_whence = 0;
1729 flock->l_type = fl->fl_type;
1730}
1731#endif
1732
1733/* Report the first existing lock that would conflict with l.
1734 * This implements the F_GETLK command of fcntl().
1735 */
1736int fcntl_getlk(struct file *filp, struct flock __user *l)
1737{
1738 struct file_lock file_lock;
1739 struct flock flock;
1740 int error;
1741
1742 error = -EFAULT;
1743 if (copy_from_user(&flock, l, sizeof(flock)))
1744 goto out;
1745 error = -EINVAL;
1746 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1747 goto out;
1748
1749 error = flock_to_posix_lock(filp, &file_lock, &flock);
1750 if (error)
1751 goto out;
1752
1753 error = vfs_test_lock(filp, &file_lock);
1754 if (error)
1755 goto out;
1756
1757 flock.l_type = file_lock.fl_type;
1758 if (file_lock.fl_type != F_UNLCK) {
1759 error = posix_lock_to_flock(&flock, &file_lock);
1760 if (error)
1761 goto out;
1762 }
1763 error = -EFAULT;
1764 if (!copy_to_user(l, &flock, sizeof(flock)))
1765 error = 0;
1766out:
1767 return error;
1768}
1769
1770/**
1771 * vfs_lock_file - file byte range lock
1772 * @filp: The file to apply the lock to
1773 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1774 * @fl: The lock to be applied
1775 * @conf: Place to return a copy of the conflicting lock, if found.
1776 *
1777 * A caller that doesn't care about the conflicting lock may pass NULL
1778 * as the final argument.
1779 *
1780 * If the filesystem defines a private ->lock() method, then @conf will
1781 * be left unchanged; so a caller that cares should initialize it to
1782 * some acceptable default.
1783 *
1784 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1785 * locks, the ->lock() interface may return asynchronously, before the lock has
1786 * been granted or denied by the underlying filesystem, if (and only if)
1787 * lm_grant is set. Callers expecting ->lock() to return asynchronously
1788 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1789 * the request is for a blocking lock. When ->lock() does return asynchronously,
1790 * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
1791 * request completes.
1792 * If the request is for non-blocking lock the file system should return
1793 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1794 * with the result. If the request timed out the callback routine will return a
1795 * nonzero return code and the file system should release the lock. The file
1796 * system is also responsible to keep a corresponding posix lock when it
1797 * grants a lock so the VFS can find out which locks are locally held and do
1798 * the correct lock cleanup when required.
1799 * The underlying filesystem must not drop the kernel lock or call
1800 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1801 * return code.
1802 */
1803int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1804{
1805 if (filp->f_op && filp->f_op->lock)
1806 return filp->f_op->lock(filp, cmd, fl);
1807 else
1808 return posix_lock_file(filp, fl, conf);
1809}
1810EXPORT_SYMBOL_GPL(vfs_lock_file);
1811
1812static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1813 struct file_lock *fl)
1814{
1815 int error;
1816
1817 error = security_file_lock(filp, fl->fl_type);
1818 if (error)
1819 return error;
1820
1821 for (;;) {
1822 error = vfs_lock_file(filp, cmd, fl, NULL);
1823 if (error != FILE_LOCK_DEFERRED)
1824 break;
1825 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1826 if (!error)
1827 continue;
1828
1829 locks_delete_block(fl);
1830 break;
1831 }
1832
1833 return error;
1834}
1835
1836/* Apply the lock described by l to an open file descriptor.
1837 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1838 */
1839int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1840 struct flock __user *l)
1841{
1842 struct file_lock *file_lock = locks_alloc_lock();
1843 struct flock flock;
1844 struct inode *inode;
1845 struct file *f;
1846 int error;
1847
1848 if (file_lock == NULL)
1849 return -ENOLCK;
1850
1851 /*
1852 * This might block, so we do it before checking the inode.
1853 */
1854 error = -EFAULT;
1855 if (copy_from_user(&flock, l, sizeof(flock)))
1856 goto out;
1857
1858 inode = filp->f_path.dentry->d_inode;
1859
1860 /* Don't allow mandatory locks on files that may be memory mapped
1861 * and shared.
1862 */
1863 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1864 error = -EAGAIN;
1865 goto out;
1866 }
1867
1868again:
1869 error = flock_to_posix_lock(filp, file_lock, &flock);
1870 if (error)
1871 goto out;
1872 if (cmd == F_SETLKW) {
1873 file_lock->fl_flags |= FL_SLEEP;
1874 }
1875
1876 error = -EBADF;
1877 switch (flock.l_type) {
1878 case F_RDLCK:
1879 if (!(filp->f_mode & FMODE_READ))
1880 goto out;
1881 break;
1882 case F_WRLCK:
1883 if (!(filp->f_mode & FMODE_WRITE))
1884 goto out;
1885 break;
1886 case F_UNLCK:
1887 break;
1888 default:
1889 error = -EINVAL;
1890 goto out;
1891 }
1892
1893 error = do_lock_file_wait(filp, cmd, file_lock);
1894
1895 /*
1896 * Attempt to detect a close/fcntl race and recover by
1897 * releasing the lock that was just acquired.
1898 */
1899 /*
1900 * we need that spin_lock here - it prevents reordering between
1901 * update of inode->i_flock and check for it done in close().
1902 * rcu_read_lock() wouldn't do.
1903 */
1904 spin_lock(¤t->files->file_lock);
1905 f = fcheck(fd);
1906 spin_unlock(¤t->files->file_lock);
1907 if (!error && f != filp && flock.l_type != F_UNLCK) {
1908 flock.l_type = F_UNLCK;
1909 goto again;
1910 }
1911
1912out:
1913 locks_free_lock(file_lock);
1914 return error;
1915}
1916
1917#if BITS_PER_LONG == 32
1918/* Report the first existing lock that would conflict with l.
1919 * This implements the F_GETLK command of fcntl().
1920 */
1921int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1922{
1923 struct file_lock file_lock;
1924 struct flock64 flock;
1925 int error;
1926
1927 error = -EFAULT;
1928 if (copy_from_user(&flock, l, sizeof(flock)))
1929 goto out;
1930 error = -EINVAL;
1931 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1932 goto out;
1933
1934 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1935 if (error)
1936 goto out;
1937
1938 error = vfs_test_lock(filp, &file_lock);
1939 if (error)
1940 goto out;
1941
1942 flock.l_type = file_lock.fl_type;
1943 if (file_lock.fl_type != F_UNLCK)
1944 posix_lock_to_flock64(&flock, &file_lock);
1945
1946 error = -EFAULT;
1947 if (!copy_to_user(l, &flock, sizeof(flock)))
1948 error = 0;
1949
1950out:
1951 return error;
1952}
1953
1954/* Apply the lock described by l to an open file descriptor.
1955 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1956 */
1957int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1958 struct flock64 __user *l)
1959{
1960 struct file_lock *file_lock = locks_alloc_lock();
1961 struct flock64 flock;
1962 struct inode *inode;
1963 struct file *f;
1964 int error;
1965
1966 if (file_lock == NULL)
1967 return -ENOLCK;
1968
1969 /*
1970 * This might block, so we do it before checking the inode.
1971 */
1972 error = -EFAULT;
1973 if (copy_from_user(&flock, l, sizeof(flock)))
1974 goto out;
1975
1976 inode = filp->f_path.dentry->d_inode;
1977
1978 /* Don't allow mandatory locks on files that may be memory mapped
1979 * and shared.
1980 */
1981 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1982 error = -EAGAIN;
1983 goto out;
1984 }
1985
1986again:
1987 error = flock64_to_posix_lock(filp, file_lock, &flock);
1988 if (error)
1989 goto out;
1990 if (cmd == F_SETLKW64) {
1991 file_lock->fl_flags |= FL_SLEEP;
1992 }
1993
1994 error = -EBADF;
1995 switch (flock.l_type) {
1996 case F_RDLCK:
1997 if (!(filp->f_mode & FMODE_READ))
1998 goto out;
1999 break;
2000 case F_WRLCK:
2001 if (!(filp->f_mode & FMODE_WRITE))
2002 goto out;
2003 break;
2004 case F_UNLCK:
2005 break;
2006 default:
2007 error = -EINVAL;
2008 goto out;
2009 }
2010
2011 error = do_lock_file_wait(filp, cmd, file_lock);
2012
2013 /*
2014 * Attempt to detect a close/fcntl race and recover by
2015 * releasing the lock that was just acquired.
2016 */
2017 spin_lock(¤t->files->file_lock);
2018 f = fcheck(fd);
2019 spin_unlock(¤t->files->file_lock);
2020 if (!error && f != filp && flock.l_type != F_UNLCK) {
2021 flock.l_type = F_UNLCK;
2022 goto again;
2023 }
2024
2025out:
2026 locks_free_lock(file_lock);
2027 return error;
2028}
2029#endif /* BITS_PER_LONG == 32 */
2030
2031/*
2032 * This function is called when the file is being removed
2033 * from the task's fd array. POSIX locks belonging to this task
2034 * are deleted at this time.
2035 */
2036void locks_remove_posix(struct file *filp, fl_owner_t owner)
2037{
2038 struct file_lock lock;
2039
2040 /*
2041 * If there are no locks held on this file, we don't need to call
2042 * posix_lock_file(). Another process could be setting a lock on this
2043 * file at the same time, but we wouldn't remove that lock anyway.
2044 */
2045 if (!filp->f_path.dentry->d_inode->i_flock)
2046 return;
2047
2048 lock.fl_type = F_UNLCK;
2049 lock.fl_flags = FL_POSIX | FL_CLOSE;
2050 lock.fl_start = 0;
2051 lock.fl_end = OFFSET_MAX;
2052 lock.fl_owner = owner;
2053 lock.fl_pid = current->tgid;
2054 lock.fl_file = filp;
2055 lock.fl_ops = NULL;
2056 lock.fl_lmops = NULL;
2057
2058 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2059
2060 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2061 lock.fl_ops->fl_release_private(&lock);
2062}
2063
2064EXPORT_SYMBOL(locks_remove_posix);
2065
2066/*
2067 * This function is called on the last close of an open file.
2068 */
2069void locks_remove_flock(struct file *filp)
2070{
2071 struct inode * inode = filp->f_path.dentry->d_inode;
2072 struct file_lock *fl;
2073 struct file_lock **before;
2074
2075 if (!inode->i_flock)
2076 return;
2077
2078 if (filp->f_op && filp->f_op->flock) {
2079 struct file_lock fl = {
2080 .fl_pid = current->tgid,
2081 .fl_file = filp,
2082 .fl_flags = FL_FLOCK,
2083 .fl_type = F_UNLCK,
2084 .fl_end = OFFSET_MAX,
2085 };
2086 filp->f_op->flock(filp, F_SETLKW, &fl);
2087 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2088 fl.fl_ops->fl_release_private(&fl);
2089 }
2090
2091 lock_flocks();
2092 before = &inode->i_flock;
2093
2094 while ((fl = *before) != NULL) {
2095 if (fl->fl_file == filp) {
2096 if (IS_FLOCK(fl)) {
2097 locks_delete_lock(before);
2098 continue;
2099 }
2100 if (IS_LEASE(fl)) {
2101 lease_modify(before, F_UNLCK);
2102 continue;
2103 }
2104 /* What? */
2105 BUG();
2106 }
2107 before = &fl->fl_next;
2108 }
2109 unlock_flocks();
2110}
2111
2112/**
2113 * posix_unblock_lock - stop waiting for a file lock
2114 * @filp: how the file was opened
2115 * @waiter: the lock which was waiting
2116 *
2117 * lockd needs to block waiting for locks.
2118 */
2119int
2120posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2121{
2122 int status = 0;
2123
2124 lock_flocks();
2125 if (waiter->fl_next)
2126 __locks_delete_block(waiter);
2127 else
2128 status = -ENOENT;
2129 unlock_flocks();
2130 return status;
2131}
2132
2133EXPORT_SYMBOL(posix_unblock_lock);
2134
2135/**
2136 * vfs_cancel_lock - file byte range unblock lock
2137 * @filp: The file to apply the unblock to
2138 * @fl: The lock to be unblocked
2139 *
2140 * Used by lock managers to cancel blocked requests
2141 */
2142int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2143{
2144 if (filp->f_op && filp->f_op->lock)
2145 return filp->f_op->lock(filp, F_CANCELLK, fl);
2146 return 0;
2147}
2148
2149EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2150
2151#ifdef CONFIG_PROC_FS
2152#include <linux/proc_fs.h>
2153#include <linux/seq_file.h>
2154
2155static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2156 loff_t id, char *pfx)
2157{
2158 struct inode *inode = NULL;
2159 unsigned int fl_pid;
2160
2161 if (fl->fl_nspid)
2162 fl_pid = pid_vnr(fl->fl_nspid);
2163 else
2164 fl_pid = fl->fl_pid;
2165
2166 if (fl->fl_file != NULL)
2167 inode = fl->fl_file->f_path.dentry->d_inode;
2168
2169 seq_printf(f, "%lld:%s ", id, pfx);
2170 if (IS_POSIX(fl)) {
2171 seq_printf(f, "%6s %s ",
2172 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2173 (inode == NULL) ? "*NOINODE*" :
2174 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2175 } else if (IS_FLOCK(fl)) {
2176 if (fl->fl_type & LOCK_MAND) {
2177 seq_printf(f, "FLOCK MSNFS ");
2178 } else {
2179 seq_printf(f, "FLOCK ADVISORY ");
2180 }
2181 } else if (IS_LEASE(fl)) {
2182 seq_printf(f, "LEASE ");
2183 if (lease_breaking(fl))
2184 seq_printf(f, "BREAKING ");
2185 else if (fl->fl_file)
2186 seq_printf(f, "ACTIVE ");
2187 else
2188 seq_printf(f, "BREAKER ");
2189 } else {
2190 seq_printf(f, "UNKNOWN UNKNOWN ");
2191 }
2192 if (fl->fl_type & LOCK_MAND) {
2193 seq_printf(f, "%s ",
2194 (fl->fl_type & LOCK_READ)
2195 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2196 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2197 } else {
2198 seq_printf(f, "%s ",
2199 (lease_breaking(fl))
2200 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2201 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2202 }
2203 if (inode) {
2204#ifdef WE_CAN_BREAK_LSLK_NOW
2205 seq_printf(f, "%d %s:%ld ", fl_pid,
2206 inode->i_sb->s_id, inode->i_ino);
2207#else
2208 /* userspace relies on this representation of dev_t ;-( */
2209 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2210 MAJOR(inode->i_sb->s_dev),
2211 MINOR(inode->i_sb->s_dev), inode->i_ino);
2212#endif
2213 } else {
2214 seq_printf(f, "%d <none>:0 ", fl_pid);
2215 }
2216 if (IS_POSIX(fl)) {
2217 if (fl->fl_end == OFFSET_MAX)
2218 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2219 else
2220 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2221 } else {
2222 seq_printf(f, "0 EOF\n");
2223 }
2224}
2225
2226static int locks_show(struct seq_file *f, void *v)
2227{
2228 struct file_lock *fl, *bfl;
2229
2230 fl = list_entry(v, struct file_lock, fl_link);
2231
2232 lock_get_status(f, fl, *((loff_t *)f->private), "");
2233
2234 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2235 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2236
2237 return 0;
2238}
2239
2240static void *locks_start(struct seq_file *f, loff_t *pos)
2241{
2242 loff_t *p = f->private;
2243
2244 lock_flocks();
2245 *p = (*pos + 1);
2246 return seq_list_start(&file_lock_list, *pos);
2247}
2248
2249static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2250{
2251 loff_t *p = f->private;
2252 ++*p;
2253 return seq_list_next(v, &file_lock_list, pos);
2254}
2255
2256static void locks_stop(struct seq_file *f, void *v)
2257{
2258 unlock_flocks();
2259}
2260
2261static const struct seq_operations locks_seq_operations = {
2262 .start = locks_start,
2263 .next = locks_next,
2264 .stop = locks_stop,
2265 .show = locks_show,
2266};
2267
2268static int locks_open(struct inode *inode, struct file *filp)
2269{
2270 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2271}
2272
2273static const struct file_operations proc_locks_operations = {
2274 .open = locks_open,
2275 .read = seq_read,
2276 .llseek = seq_lseek,
2277 .release = seq_release_private,
2278};
2279
2280static int __init proc_locks_init(void)
2281{
2282 proc_create("locks", 0, NULL, &proc_locks_operations);
2283 return 0;
2284}
2285module_init(proc_locks_init);
2286#endif
2287
2288/**
2289 * lock_may_read - checks that the region is free of locks
2290 * @inode: the inode that is being read
2291 * @start: the first byte to read
2292 * @len: the number of bytes to read
2293 *
2294 * Emulates Windows locking requirements. Whole-file
2295 * mandatory locks (share modes) can prohibit a read and
2296 * byte-range POSIX locks can prohibit a read if they overlap.
2297 *
2298 * N.B. this function is only ever called
2299 * from knfsd and ownership of locks is never checked.
2300 */
2301int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2302{
2303 struct file_lock *fl;
2304 int result = 1;
2305 lock_flocks();
2306 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2307 if (IS_POSIX(fl)) {
2308 if (fl->fl_type == F_RDLCK)
2309 continue;
2310 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2311 continue;
2312 } else if (IS_FLOCK(fl)) {
2313 if (!(fl->fl_type & LOCK_MAND))
2314 continue;
2315 if (fl->fl_type & LOCK_READ)
2316 continue;
2317 } else
2318 continue;
2319 result = 0;
2320 break;
2321 }
2322 unlock_flocks();
2323 return result;
2324}
2325
2326EXPORT_SYMBOL(lock_may_read);
2327
2328/**
2329 * lock_may_write - checks that the region is free of locks
2330 * @inode: the inode that is being written
2331 * @start: the first byte to write
2332 * @len: the number of bytes to write
2333 *
2334 * Emulates Windows locking requirements. Whole-file
2335 * mandatory locks (share modes) can prohibit a write and
2336 * byte-range POSIX locks can prohibit a write if they overlap.
2337 *
2338 * N.B. this function is only ever called
2339 * from knfsd and ownership of locks is never checked.
2340 */
2341int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2342{
2343 struct file_lock *fl;
2344 int result = 1;
2345 lock_flocks();
2346 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2347 if (IS_POSIX(fl)) {
2348 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2349 continue;
2350 } else if (IS_FLOCK(fl)) {
2351 if (!(fl->fl_type & LOCK_MAND))
2352 continue;
2353 if (fl->fl_type & LOCK_WRITE)
2354 continue;
2355 } else
2356 continue;
2357 result = 0;
2358 break;
2359 }
2360 unlock_flocks();
2361 return result;
2362}
2363
2364EXPORT_SYMBOL(lock_may_write);
2365
2366static int __init filelock_init(void)
2367{
2368 filelock_cache = kmem_cache_create("file_lock_cache",
2369 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2370
2371 return 0;
2372}
2373
2374core_initcall(filelock_init);