Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/locks.c
   4 *
   5 * We implement four types of file locks: BSD locks, posix locks, open
   6 * file description locks, and leases.  For details about BSD locks,
   7 * see the flock(2) man page; for details about the other three, see
   8 * fcntl(2).
   9 *
  10 *
  11 * Locking conflicts and dependencies:
  12 * If multiple threads attempt to lock the same byte (or flock the same file)
  13 * only one can be granted the lock, and other must wait their turn.
  14 * The first lock has been "applied" or "granted", the others are "waiting"
  15 * and are "blocked" by the "applied" lock..
  16 *
  17 * Waiting and applied locks are all kept in trees whose properties are:
  18 *
  19 *	- the root of a tree may be an applied or waiting lock.
  20 *	- every other node in the tree is a waiting lock that
  21 *	  conflicts with every ancestor of that node.
  22 *
  23 * Every such tree begins life as a waiting singleton which obviously
  24 * satisfies the above properties.
  25 *
  26 * The only ways we modify trees preserve these properties:
  27 *
  28 *	1. We may add a new leaf node, but only after first verifying that it
  29 *	   conflicts with all of its ancestors.
  30 *	2. We may remove the root of a tree, creating a new singleton
  31 *	   tree from the root and N new trees rooted in the immediate
  32 *	   children.
  33 *	3. If the root of a tree is not currently an applied lock, we may
  34 *	   apply it (if possible).
  35 *	4. We may upgrade the root of the tree (either extend its range,
  36 *	   or upgrade its entire range from read to write).
  37 *
  38 * When an applied lock is modified in a way that reduces or downgrades any
  39 * part of its range, we remove all its children (2 above).  This particularly
  40 * happens when a lock is unlocked.
  41 *
  42 * For each of those child trees we "wake up" the thread which is
  43 * waiting for the lock so it can continue handling as follows: if the
  44 * root of the tree applies, we do so (3).  If it doesn't, it must
  45 * conflict with some applied lock.  We remove (wake up) all of its children
  46 * (2), and add it is a new leaf to the tree rooted in the applied
  47 * lock (1).  We then repeat the process recursively with those
  48 * children.
  49 *
  50 */
  51
  52#include <linux/capability.h>
  53#include <linux/file.h>
  54#include <linux/fdtable.h>
  55#include <linux/filelock.h>
  56#include <linux/fs.h>
  57#include <linux/init.h>
  58#include <linux/security.h>
  59#include <linux/slab.h>
  60#include <linux/syscalls.h>
  61#include <linux/time.h>
  62#include <linux/rcupdate.h>
  63#include <linux/pid_namespace.h>
  64#include <linux/hashtable.h>
  65#include <linux/percpu.h>
  66#include <linux/sysctl.h>
  67
  68#define CREATE_TRACE_POINTS
  69#include <trace/events/filelock.h>
  70
  71#include <linux/uaccess.h>
  72
  73#define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
  74#define IS_FLOCK(fl)	(fl->fl_flags & FL_FLOCK)
  75#define IS_LEASE(fl)	(fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
  76#define IS_OFDLCK(fl)	(fl->fl_flags & FL_OFDLCK)
  77#define IS_REMOTELCK(fl)	(fl->fl_pid <= 0)
 
 
 
 
  78
  79static bool lease_breaking(struct file_lock *fl)
  80{
  81	return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
  82}
  83
  84static int target_leasetype(struct file_lock *fl)
  85{
  86	if (fl->fl_flags & FL_UNLOCK_PENDING)
  87		return F_UNLCK;
  88	if (fl->fl_flags & FL_DOWNGRADE_PENDING)
  89		return F_RDLCK;
  90	return fl->fl_type;
  91}
  92
  93static int leases_enable = 1;
  94static int lease_break_time = 45;
  95
  96#ifdef CONFIG_SYSCTL
  97static struct ctl_table locks_sysctls[] = {
  98	{
  99		.procname	= "leases-enable",
 100		.data		= &leases_enable,
 101		.maxlen		= sizeof(int),
 102		.mode		= 0644,
 103		.proc_handler	= proc_dointvec,
 104	},
 105#ifdef CONFIG_MMU
 106	{
 107		.procname	= "lease-break-time",
 108		.data		= &lease_break_time,
 109		.maxlen		= sizeof(int),
 110		.mode		= 0644,
 111		.proc_handler	= proc_dointvec,
 112	},
 113#endif /* CONFIG_MMU */
 114};
 115
 116static int __init init_fs_locks_sysctls(void)
 117{
 118	register_sysctl_init("fs", locks_sysctls);
 119	return 0;
 120}
 121early_initcall(init_fs_locks_sysctls);
 122#endif /* CONFIG_SYSCTL */
 123
 124/*
 125 * The global file_lock_list is only used for displaying /proc/locks, so we
 126 * keep a list on each CPU, with each list protected by its own spinlock.
 127 * Global serialization is done using file_rwsem.
 128 *
 129 * Note that alterations to the list also require that the relevant flc_lock is
 130 * held.
 131 */
 132struct file_lock_list_struct {
 133	spinlock_t		lock;
 134	struct hlist_head	hlist;
 135};
 136static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
 137DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
 138
 139
 140/*
 141 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 142 * It is protected by blocked_lock_lock.
 143 *
 144 * We hash locks by lockowner in order to optimize searching for the lock a
 145 * particular lockowner is waiting on.
 146 *
 147 * FIXME: make this value scale via some heuristic? We generally will want more
 148 * buckets when we have more lockowners holding locks, but that's a little
 149 * difficult to determine without knowing what the workload will look like.
 150 */
 151#define BLOCKED_HASH_BITS	7
 152static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 153
 154/*
 155 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 156 * want to be holding this lock.
 157 *
 158 * In addition, it also protects the fl->fl_blocked_requests list, and the
 159 * fl->fl_blocker pointer for file_lock structures that are acting as lock
 160 * requests (in contrast to those that are acting as records of acquired locks).
 161 *
 162 * Note that when we acquire this lock in order to change the above fields,
 163 * we often hold the flc_lock as well. In certain cases, when reading the fields
 164 * protected by this lock, we can skip acquiring it iff we already hold the
 165 * flc_lock.
 166 */
 167static DEFINE_SPINLOCK(blocked_lock_lock);
 168
 169static struct kmem_cache *flctx_cache __ro_after_init;
 170static struct kmem_cache *filelock_cache __ro_after_init;
 
 171
 172static struct file_lock_context *
 173locks_get_lock_context(struct inode *inode, int type)
 174{
 175	struct file_lock_context *ctx;
 176
 177	/* paired with cmpxchg() below */
 178	ctx = locks_inode_context(inode);
 179	if (likely(ctx) || type == F_UNLCK)
 180		goto out;
 181
 182	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
 183	if (!ctx)
 184		goto out;
 185
 186	spin_lock_init(&ctx->flc_lock);
 187	INIT_LIST_HEAD(&ctx->flc_flock);
 188	INIT_LIST_HEAD(&ctx->flc_posix);
 189	INIT_LIST_HEAD(&ctx->flc_lease);
 190
 191	/*
 192	 * Assign the pointer if it's not already assigned. If it is, then
 193	 * free the context we just allocated.
 194	 */
 195	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
 196		kmem_cache_free(flctx_cache, ctx);
 197		ctx = locks_inode_context(inode);
 198	}
 199out:
 200	trace_locks_get_lock_context(inode, type, ctx);
 201	return ctx;
 202}
 203
 204static void
 205locks_dump_ctx_list(struct list_head *list, char *list_type)
 206{
 207	struct file_lock *fl;
 208
 209	list_for_each_entry(fl, list, fl_list) {
 210		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
 211	}
 
 212}
 213
 214static void
 215locks_check_ctx_lists(struct inode *inode)
 216{
 217	struct file_lock_context *ctx = inode->i_flctx;
 218
 219	if (unlikely(!list_empty(&ctx->flc_flock) ||
 220		     !list_empty(&ctx->flc_posix) ||
 221		     !list_empty(&ctx->flc_lease))) {
 222		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
 223			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
 224			inode->i_ino);
 225		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
 226		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
 227		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
 228	}
 229}
 230
 231static void
 232locks_check_ctx_file_list(struct file *filp, struct list_head *list,
 233				char *list_type)
 234{
 235	struct file_lock *fl;
 236	struct inode *inode = file_inode(filp);
 237
 238	list_for_each_entry(fl, list, fl_list)
 239		if (fl->fl_file == filp)
 240			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
 241				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
 242				list_type, MAJOR(inode->i_sb->s_dev),
 243				MINOR(inode->i_sb->s_dev), inode->i_ino,
 244				fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
 
 245}
 246
 247void
 248locks_free_lock_context(struct inode *inode)
 249{
 250	struct file_lock_context *ctx = locks_inode_context(inode);
 251
 252	if (unlikely(ctx)) {
 253		locks_check_ctx_lists(inode);
 254		kmem_cache_free(flctx_cache, ctx);
 255	}
 256}
 257
 258static void locks_init_lock_heads(struct file_lock *fl)
 259{
 260	INIT_HLIST_NODE(&fl->fl_link);
 261	INIT_LIST_HEAD(&fl->fl_list);
 262	INIT_LIST_HEAD(&fl->fl_blocked_requests);
 263	INIT_LIST_HEAD(&fl->fl_blocked_member);
 264	init_waitqueue_head(&fl->fl_wait);
 265}
 266
 267/* Allocate an empty lock structure. */
 268struct file_lock *locks_alloc_lock(void)
 269{
 270	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 271
 272	if (fl)
 273		locks_init_lock_heads(fl);
 274
 275	return fl;
 276}
 277EXPORT_SYMBOL_GPL(locks_alloc_lock);
 278
 
 
 
 
 
 
 
 
 
 
 
 
 279void locks_release_private(struct file_lock *fl)
 280{
 281	BUG_ON(waitqueue_active(&fl->fl_wait));
 282	BUG_ON(!list_empty(&fl->fl_list));
 283	BUG_ON(!list_empty(&fl->fl_blocked_requests));
 284	BUG_ON(!list_empty(&fl->fl_blocked_member));
 285	BUG_ON(!hlist_unhashed(&fl->fl_link));
 
 
 286
 287	if (fl->fl_ops) {
 288		if (fl->fl_ops->fl_release_private)
 289			fl->fl_ops->fl_release_private(fl);
 290		fl->fl_ops = NULL;
 291	}
 292
 293	if (fl->fl_lmops) {
 294		if (fl->fl_lmops->lm_put_owner) {
 295			fl->fl_lmops->lm_put_owner(fl->fl_owner);
 296			fl->fl_owner = NULL;
 297		}
 298		fl->fl_lmops = NULL;
 299	}
 300}
 301EXPORT_SYMBOL_GPL(locks_release_private);
 302
 303/**
 304 * locks_owner_has_blockers - Check for blocking lock requests
 305 * @flctx: file lock context
 306 * @owner: lock owner
 307 *
 308 * Return values:
 309 *   %true: @owner has at least one blocker
 310 *   %false: @owner has no blockers
 311 */
 312bool locks_owner_has_blockers(struct file_lock_context *flctx,
 313		fl_owner_t owner)
 314{
 315	struct file_lock *fl;
 316
 317	spin_lock(&flctx->flc_lock);
 318	list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
 319		if (fl->fl_owner != owner)
 320			continue;
 321		if (!list_empty(&fl->fl_blocked_requests)) {
 322			spin_unlock(&flctx->flc_lock);
 323			return true;
 324		}
 325	}
 326	spin_unlock(&flctx->flc_lock);
 327	return false;
 328}
 329EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
 330
 331/* Free a lock which is not in use. */
 332void locks_free_lock(struct file_lock *fl)
 333{
 334	locks_release_private(fl);
 335	kmem_cache_free(filelock_cache, fl);
 336}
 337EXPORT_SYMBOL(locks_free_lock);
 338
 
 
 
 
 
 
 
 339static void
 340locks_dispose_list(struct list_head *dispose)
 341{
 342	struct file_lock *fl;
 343
 344	while (!list_empty(dispose)) {
 345		fl = list_first_entry(dispose, struct file_lock, fl_list);
 346		list_del_init(&fl->fl_list);
 347		locks_free_lock(fl);
 
 
 
 348	}
 349}
 350
 351void locks_init_lock(struct file_lock *fl)
 352{
 353	memset(fl, 0, sizeof(struct file_lock));
 354	locks_init_lock_heads(fl);
 355}
 356EXPORT_SYMBOL(locks_init_lock);
 357
 
 
 
 
 
 
 
 358/*
 359 * Initialize a new lock from an existing file_lock structure.
 360 */
 361void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 362{
 363	new->fl_owner = fl->fl_owner;
 364	new->fl_pid = fl->fl_pid;
 365	new->fl_file = NULL;
 366	new->fl_flags = fl->fl_flags;
 367	new->fl_type = fl->fl_type;
 368	new->fl_start = fl->fl_start;
 369	new->fl_end = fl->fl_end;
 370	new->fl_lmops = fl->fl_lmops;
 371	new->fl_ops = NULL;
 372
 373	if (fl->fl_lmops) {
 374		if (fl->fl_lmops->lm_get_owner)
 375			fl->fl_lmops->lm_get_owner(fl->fl_owner);
 376	}
 377}
 378EXPORT_SYMBOL(locks_copy_conflock);
 379
 380void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 381{
 382	/* "new" must be a freshly-initialized lock */
 383	WARN_ON_ONCE(new->fl_ops);
 384
 385	locks_copy_conflock(new, fl);
 386
 387	new->fl_file = fl->fl_file;
 388	new->fl_ops = fl->fl_ops;
 389
 390	if (fl->fl_ops) {
 391		if (fl->fl_ops->fl_copy_lock)
 392			fl->fl_ops->fl_copy_lock(new, fl);
 393	}
 394}
 395EXPORT_SYMBOL(locks_copy_lock);
 396
 397static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
 398{
 399	struct file_lock *f;
 400
 401	/*
 402	 * As ctx->flc_lock is held, new requests cannot be added to
 403	 * ->fl_blocked_requests, so we don't need a lock to check if it
 404	 * is empty.
 405	 */
 406	if (list_empty(&fl->fl_blocked_requests))
 407		return;
 408	spin_lock(&blocked_lock_lock);
 409	list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
 410	list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
 411		f->fl_blocker = new;
 
 
 412	spin_unlock(&blocked_lock_lock);
 413}
 414
 415static inline int flock_translate_cmd(int cmd) {
 416	switch (cmd) {
 417	case LOCK_SH:
 418		return F_RDLCK;
 419	case LOCK_EX:
 420		return F_WRLCK;
 421	case LOCK_UN:
 422		return F_UNLCK;
 423	}
 424	return -EINVAL;
 425}
 426
 427/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 428static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
 429{
 430	locks_init_lock(fl);
 431
 432	fl->fl_file = filp;
 433	fl->fl_owner = filp;
 434	fl->fl_pid = current->tgid;
 435	fl->fl_flags = FL_FLOCK;
 436	fl->fl_type = type;
 437	fl->fl_end = OFFSET_MAX;
 438}
 439
 440static int assign_type(struct file_lock *fl, int type)
 441{
 442	switch (type) {
 443	case F_RDLCK:
 444	case F_WRLCK:
 445	case F_UNLCK:
 446		fl->fl_type = type;
 447		break;
 448	default:
 449		return -EINVAL;
 450	}
 451	return 0;
 452}
 453
 454static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 455				 struct flock64 *l)
 456{
 457	switch (l->l_whence) {
 458	case SEEK_SET:
 459		fl->fl_start = 0;
 460		break;
 461	case SEEK_CUR:
 462		fl->fl_start = filp->f_pos;
 463		break;
 464	case SEEK_END:
 465		fl->fl_start = i_size_read(file_inode(filp));
 466		break;
 467	default:
 468		return -EINVAL;
 469	}
 470	if (l->l_start > OFFSET_MAX - fl->fl_start)
 471		return -EOVERFLOW;
 472	fl->fl_start += l->l_start;
 473	if (fl->fl_start < 0)
 474		return -EINVAL;
 475
 476	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
 477	   POSIX-2001 defines it. */
 478	if (l->l_len > 0) {
 479		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 480			return -EOVERFLOW;
 481		fl->fl_end = fl->fl_start + (l->l_len - 1);
 482
 483	} else if (l->l_len < 0) {
 484		if (fl->fl_start + l->l_len < 0)
 485			return -EINVAL;
 486		fl->fl_end = fl->fl_start - 1;
 487		fl->fl_start += l->l_len;
 488	} else
 489		fl->fl_end = OFFSET_MAX;
 490
 491	fl->fl_owner = current->files;
 492	fl->fl_pid = current->tgid;
 493	fl->fl_file = filp;
 494	fl->fl_flags = FL_POSIX;
 495	fl->fl_ops = NULL;
 496	fl->fl_lmops = NULL;
 497
 498	return assign_type(fl, l->l_type);
 499}
 500
 501/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 502 * style lock.
 503 */
 504static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 505			       struct flock *l)
 506{
 507	struct flock64 ll = {
 508		.l_type = l->l_type,
 509		.l_whence = l->l_whence,
 510		.l_start = l->l_start,
 511		.l_len = l->l_len,
 512	};
 513
 514	return flock64_to_posix_lock(filp, fl, &ll);
 515}
 516
 517/* default lease lock manager operations */
 518static bool
 519lease_break_callback(struct file_lock *fl)
 520{
 521	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 522	return false;
 523}
 524
 525static void
 526lease_setup(struct file_lock *fl, void **priv)
 527{
 528	struct file *filp = fl->fl_file;
 529	struct fasync_struct *fa = *priv;
 530
 531	/*
 532	 * fasync_insert_entry() returns the old entry if any. If there was no
 533	 * old entry, then it used "priv" and inserted it into the fasync list.
 534	 * Clear the pointer to indicate that it shouldn't be freed.
 535	 */
 536	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
 537		*priv = NULL;
 538
 539	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
 540}
 541
 542static const struct lock_manager_operations lease_manager_ops = {
 543	.lm_break = lease_break_callback,
 544	.lm_change = lease_modify,
 545	.lm_setup = lease_setup,
 546};
 547
 548/*
 549 * Initialize a lease, use the default lock manager operations
 550 */
 551static int lease_init(struct file *filp, int type, struct file_lock *fl)
 552{
 553	if (assign_type(fl, type) != 0)
 554		return -EINVAL;
 555
 556	fl->fl_owner = filp;
 557	fl->fl_pid = current->tgid;
 558
 559	fl->fl_file = filp;
 560	fl->fl_flags = FL_LEASE;
 561	fl->fl_start = 0;
 562	fl->fl_end = OFFSET_MAX;
 563	fl->fl_ops = NULL;
 564	fl->fl_lmops = &lease_manager_ops;
 565	return 0;
 566}
 567
 568/* Allocate a file_lock initialised to this type of lease */
 569static struct file_lock *lease_alloc(struct file *filp, int type)
 570{
 571	struct file_lock *fl = locks_alloc_lock();
 572	int error = -ENOMEM;
 573
 574	if (fl == NULL)
 575		return ERR_PTR(error);
 576
 577	error = lease_init(filp, type, fl);
 578	if (error) {
 579		locks_free_lock(fl);
 580		return ERR_PTR(error);
 581	}
 582	return fl;
 583}
 584
 585/* Check if two locks overlap each other.
 586 */
 587static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 588{
 589	return ((fl1->fl_end >= fl2->fl_start) &&
 590		(fl2->fl_end >= fl1->fl_start));
 591}
 592
 593/*
 594 * Check whether two locks have the same owner.
 595 */
 596static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
 597{
 598	return fl1->fl_owner == fl2->fl_owner;
 599}
 600
 601/* Must be called with the flc_lock held! */
 602static void locks_insert_global_locks(struct file_lock *fl)
 603{
 604	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
 605
 606	percpu_rwsem_assert_held(&file_rwsem);
 607
 608	spin_lock(&fll->lock);
 609	fl->fl_link_cpu = smp_processor_id();
 610	hlist_add_head(&fl->fl_link, &fll->hlist);
 611	spin_unlock(&fll->lock);
 612}
 613
 614/* Must be called with the flc_lock held! */
 615static void locks_delete_global_locks(struct file_lock *fl)
 616{
 617	struct file_lock_list_struct *fll;
 618
 619	percpu_rwsem_assert_held(&file_rwsem);
 620
 621	/*
 622	 * Avoid taking lock if already unhashed. This is safe since this check
 623	 * is done while holding the flc_lock, and new insertions into the list
 624	 * also require that it be held.
 625	 */
 626	if (hlist_unhashed(&fl->fl_link))
 627		return;
 628
 629	fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
 630	spin_lock(&fll->lock);
 631	hlist_del_init(&fl->fl_link);
 632	spin_unlock(&fll->lock);
 633}
 634
 635static unsigned long
 636posix_owner_key(struct file_lock *fl)
 637{
 638	return (unsigned long)fl->fl_owner;
 639}
 640
 641static void locks_insert_global_blocked(struct file_lock *waiter)
 642{
 643	lockdep_assert_held(&blocked_lock_lock);
 644
 645	hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
 646}
 647
 648static void locks_delete_global_blocked(struct file_lock *waiter)
 649{
 650	lockdep_assert_held(&blocked_lock_lock);
 651
 652	hash_del(&waiter->fl_link);
 653}
 654
 655/* Remove waiter from blocker's block list.
 656 * When blocker ends up pointing to itself then the list is empty.
 657 *
 658 * Must be called with blocked_lock_lock held.
 659 */
 660static void __locks_delete_block(struct file_lock *waiter)
 661{
 662	locks_delete_global_blocked(waiter);
 663	list_del_init(&waiter->fl_blocked_member);
 664}
 665
 666static void __locks_wake_up_blocks(struct file_lock *blocker)
 667{
 668	while (!list_empty(&blocker->fl_blocked_requests)) {
 669		struct file_lock *waiter;
 
 
 
 
 670
 671		waiter = list_first_entry(&blocker->fl_blocked_requests,
 672					  struct file_lock, fl_blocked_member);
 673		__locks_delete_block(waiter);
 674		if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
 675			waiter->fl_lmops->lm_notify(waiter);
 676		else
 677			wake_up(&waiter->fl_wait);
 678
 679		/*
 680		 * The setting of fl_blocker to NULL marks the "done"
 681		 * point in deleting a block. Paired with acquire at the top
 682		 * of locks_delete_block().
 683		 */
 684		smp_store_release(&waiter->fl_blocker, NULL);
 685	}
 686}
 687
 688/**
 689 *	locks_delete_block - stop waiting for a file lock
 690 *	@waiter: the lock which was waiting
 691 *
 692 *	lockd/nfsd need to disconnect the lock while working on it.
 693 */
 694int locks_delete_block(struct file_lock *waiter)
 695{
 696	int status = -ENOENT;
 697
 698	/*
 699	 * If fl_blocker is NULL, it won't be set again as this thread "owns"
 700	 * the lock and is the only one that might try to claim the lock.
 701	 *
 702	 * We use acquire/release to manage fl_blocker so that we can
 703	 * optimize away taking the blocked_lock_lock in many cases.
 704	 *
 705	 * The smp_load_acquire guarantees two things:
 706	 *
 707	 * 1/ that fl_blocked_requests can be tested locklessly. If something
 708	 * was recently added to that list it must have been in a locked region
 709	 * *before* the locked region when fl_blocker was set to NULL.
 710	 *
 711	 * 2/ that no other thread is accessing 'waiter', so it is safe to free
 712	 * it.  __locks_wake_up_blocks is careful not to touch waiter after
 713	 * fl_blocker is released.
 714	 *
 715	 * If a lockless check of fl_blocker shows it to be NULL, we know that
 716	 * no new locks can be inserted into its fl_blocked_requests list, and
 717	 * can avoid doing anything further if the list is empty.
 718	 */
 719	if (!smp_load_acquire(&waiter->fl_blocker) &&
 720	    list_empty(&waiter->fl_blocked_requests))
 721		return status;
 722
 723	spin_lock(&blocked_lock_lock);
 724	if (waiter->fl_blocker)
 725		status = 0;
 726	__locks_wake_up_blocks(waiter);
 727	__locks_delete_block(waiter);
 728
 729	/*
 730	 * The setting of fl_blocker to NULL marks the "done" point in deleting
 731	 * a block. Paired with acquire at the top of this function.
 732	 */
 733	smp_store_release(&waiter->fl_blocker, NULL);
 734	spin_unlock(&blocked_lock_lock);
 735	return status;
 736}
 
 
 
 
 
 
 
 
 
 
 
 737EXPORT_SYMBOL(locks_delete_block);
 738
 739/* Insert waiter into blocker's block list.
 740 * We use a circular list so that processes can be easily woken up in
 741 * the order they blocked. The documentation doesn't require this but
 742 * it seems like the reasonable thing to do.
 743 *
 744 * Must be called with both the flc_lock and blocked_lock_lock held. The
 745 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
 746 * but by ensuring that the flc_lock is also held on insertions we can avoid
 747 * taking the blocked_lock_lock in some cases when we see that the
 748 * fl_blocked_requests list is empty.
 749 *
 750 * Rather than just adding to the list, we check for conflicts with any existing
 751 * waiters, and add beneath any waiter that blocks the new waiter.
 752 * Thus wakeups don't happen until needed.
 753 */
 754static void __locks_insert_block(struct file_lock *blocker,
 755				 struct file_lock *waiter,
 756				 bool conflict(struct file_lock *,
 757					       struct file_lock *))
 758{
 759	struct file_lock *fl;
 760	BUG_ON(!list_empty(&waiter->fl_blocked_member));
 761
 
 762new_blocker:
 763	list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
 764		if (conflict(fl, waiter)) {
 765			blocker =  fl;
 766			goto new_blocker;
 767		}
 768	waiter->fl_blocker = blocker;
 769	list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
 770	if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
 
 
 771		locks_insert_global_blocked(waiter);
 772
 773	/* The requests in waiter->fl_blocked are known to conflict with
 774	 * waiter, but might not conflict with blocker, or the requests
 775	 * and lock which block it.  So they all need to be woken.
 776	 */
 777	__locks_wake_up_blocks(waiter);
 778}
 779
 780/* Must be called with flc_lock held. */
 781static void locks_insert_block(struct file_lock *blocker,
 782			       struct file_lock *waiter,
 783			       bool conflict(struct file_lock *,
 784					     struct file_lock *))
 785{
 786	spin_lock(&blocked_lock_lock);
 787	__locks_insert_block(blocker, waiter, conflict);
 788	spin_unlock(&blocked_lock_lock);
 789}
 790
 791/*
 792 * Wake up processes blocked waiting for blocker.
 793 *
 794 * Must be called with the inode->flc_lock held!
 795 */
 796static void locks_wake_up_blocks(struct file_lock *blocker)
 797{
 798	/*
 799	 * Avoid taking global lock if list is empty. This is safe since new
 800	 * blocked requests are only added to the list under the flc_lock, and
 801	 * the flc_lock is always held here. Note that removal from the
 802	 * fl_blocked_requests list does not require the flc_lock, so we must
 803	 * recheck list_empty() after acquiring the blocked_lock_lock.
 804	 */
 805	if (list_empty(&blocker->fl_blocked_requests))
 806		return;
 807
 808	spin_lock(&blocked_lock_lock);
 809	__locks_wake_up_blocks(blocker);
 810	spin_unlock(&blocked_lock_lock);
 811}
 812
 813static void
 814locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
 815{
 816	list_add_tail(&fl->fl_list, before);
 817	locks_insert_global_locks(fl);
 818}
 819
 820static void
 821locks_unlink_lock_ctx(struct file_lock *fl)
 822{
 823	locks_delete_global_locks(fl);
 824	list_del_init(&fl->fl_list);
 825	locks_wake_up_blocks(fl);
 826}
 827
 828static void
 829locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
 830{
 831	locks_unlink_lock_ctx(fl);
 832	if (dispose)
 833		list_add(&fl->fl_list, dispose);
 834	else
 835		locks_free_lock(fl);
 836}
 837
 838/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 839 * checks for shared/exclusive status of overlapping locks.
 840 */
 841static bool locks_conflict(struct file_lock *caller_fl,
 842			   struct file_lock *sys_fl)
 843{
 844	if (sys_fl->fl_type == F_WRLCK)
 845		return true;
 846	if (caller_fl->fl_type == F_WRLCK)
 847		return true;
 848	return false;
 849}
 850
 851/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 852 * checking before calling the locks_conflict().
 853 */
 854static bool posix_locks_conflict(struct file_lock *caller_fl,
 855				 struct file_lock *sys_fl)
 856{
 
 
 
 857	/* POSIX locks owned by the same process do not conflict with
 858	 * each other.
 859	 */
 860	if (posix_same_owner(caller_fl, sys_fl))
 861		return false;
 862
 863	/* Check whether they overlap */
 864	if (!locks_overlap(caller_fl, sys_fl))
 865		return false;
 866
 867	return locks_conflict(caller_fl, sys_fl);
 868}
 869
 870/* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
 871 * path so checks for additional GETLK-specific things like F_UNLCK.
 872 */
 873static bool posix_test_locks_conflict(struct file_lock *caller_fl,
 874				      struct file_lock *sys_fl)
 875{
 
 
 
 876	/* F_UNLCK checks any locks on the same fd. */
 877	if (caller_fl->fl_type == F_UNLCK) {
 878		if (!posix_same_owner(caller_fl, sys_fl))
 879			return false;
 880		return locks_overlap(caller_fl, sys_fl);
 881	}
 882	return posix_locks_conflict(caller_fl, sys_fl);
 883}
 884
 885/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 886 * checking before calling the locks_conflict().
 887 */
 888static bool flock_locks_conflict(struct file_lock *caller_fl,
 889				 struct file_lock *sys_fl)
 890{
 891	/* FLOCK locks referring to the same filp do not conflict with
 892	 * each other.
 893	 */
 894	if (caller_fl->fl_file == sys_fl->fl_file)
 895		return false;
 896
 897	return locks_conflict(caller_fl, sys_fl);
 898}
 899
 900void
 901posix_test_lock(struct file *filp, struct file_lock *fl)
 902{
 903	struct file_lock *cfl;
 904	struct file_lock_context *ctx;
 905	struct inode *inode = file_inode(filp);
 906	void *owner;
 907	void (*func)(void);
 908
 909	ctx = locks_inode_context(inode);
 910	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
 911		fl->fl_type = F_UNLCK;
 912		return;
 913	}
 914
 915retry:
 916	spin_lock(&ctx->flc_lock);
 917	list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
 918		if (!posix_test_locks_conflict(fl, cfl))
 919			continue;
 920		if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
 921			&& (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
 922			owner = cfl->fl_lmops->lm_mod_owner;
 923			func = cfl->fl_lmops->lm_expire_lock;
 924			__module_get(owner);
 925			spin_unlock(&ctx->flc_lock);
 926			(*func)();
 927			module_put(owner);
 928			goto retry;
 929		}
 930		locks_copy_conflock(fl, cfl);
 931		goto out;
 932	}
 933	fl->fl_type = F_UNLCK;
 934out:
 935	spin_unlock(&ctx->flc_lock);
 936	return;
 937}
 938EXPORT_SYMBOL(posix_test_lock);
 939
 940/*
 941 * Deadlock detection:
 942 *
 943 * We attempt to detect deadlocks that are due purely to posix file
 944 * locks.
 945 *
 946 * We assume that a task can be waiting for at most one lock at a time.
 947 * So for any acquired lock, the process holding that lock may be
 948 * waiting on at most one other lock.  That lock in turns may be held by
 949 * someone waiting for at most one other lock.  Given a requested lock
 950 * caller_fl which is about to wait for a conflicting lock block_fl, we
 951 * follow this chain of waiters to ensure we are not about to create a
 952 * cycle.
 953 *
 954 * Since we do this before we ever put a process to sleep on a lock, we
 955 * are ensured that there is never a cycle; that is what guarantees that
 956 * the while() loop in posix_locks_deadlock() eventually completes.
 957 *
 958 * Note: the above assumption may not be true when handling lock
 959 * requests from a broken NFS client. It may also fail in the presence
 960 * of tasks (such as posix threads) sharing the same open file table.
 961 * To handle those cases, we just bail out after a few iterations.
 962 *
 963 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
 964 * Because the owner is not even nominally tied to a thread of
 965 * execution, the deadlock detection below can't reasonably work well. Just
 966 * skip it for those.
 967 *
 968 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
 969 * locks that just checks for the case where two tasks are attempting to
 970 * upgrade from read to write locks on the same inode.
 971 */
 972
 973#define MAX_DEADLK_ITERATIONS 10
 974
 975/* Find a lock that the owner of the given block_fl is blocking on. */
 976static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
 977{
 978	struct file_lock *fl;
 979
 980	hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
 981		if (posix_same_owner(fl, block_fl)) {
 982			while (fl->fl_blocker)
 983				fl = fl->fl_blocker;
 984			return fl;
 985		}
 986	}
 987	return NULL;
 988}
 989
 990/* Must be called with the blocked_lock_lock held! */
 991static int posix_locks_deadlock(struct file_lock *caller_fl,
 992				struct file_lock *block_fl)
 993{
 
 
 994	int i = 0;
 995
 996	lockdep_assert_held(&blocked_lock_lock);
 997
 998	/*
 999	 * This deadlock detector can't reasonably detect deadlocks with
1000	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1001	 */
1002	if (IS_OFDLCK(caller_fl))
1003		return 0;
1004
1005	while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1006		if (i++ > MAX_DEADLK_ITERATIONS)
1007			return 0;
1008		if (posix_same_owner(caller_fl, block_fl))
1009			return 1;
1010	}
1011	return 0;
1012}
1013
1014/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1015 * after any leases, but before any posix locks.
1016 *
1017 * Note that if called with an FL_EXISTS argument, the caller may determine
1018 * whether or not a lock was successfully freed by testing the return
1019 * value for -ENOENT.
1020 */
1021static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1022{
1023	struct file_lock *new_fl = NULL;
1024	struct file_lock *fl;
1025	struct file_lock_context *ctx;
1026	int error = 0;
1027	bool found = false;
1028	LIST_HEAD(dispose);
1029
1030	ctx = locks_get_lock_context(inode, request->fl_type);
1031	if (!ctx) {
1032		if (request->fl_type != F_UNLCK)
1033			return -ENOMEM;
1034		return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1035	}
1036
1037	if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1038		new_fl = locks_alloc_lock();
1039		if (!new_fl)
1040			return -ENOMEM;
1041	}
1042
1043	percpu_down_read(&file_rwsem);
1044	spin_lock(&ctx->flc_lock);
1045	if (request->fl_flags & FL_ACCESS)
1046		goto find_conflict;
1047
1048	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1049		if (request->fl_file != fl->fl_file)
1050			continue;
1051		if (request->fl_type == fl->fl_type)
1052			goto out;
1053		found = true;
1054		locks_delete_lock_ctx(fl, &dispose);
1055		break;
1056	}
1057
1058	if (request->fl_type == F_UNLCK) {
1059		if ((request->fl_flags & FL_EXISTS) && !found)
1060			error = -ENOENT;
1061		goto out;
1062	}
1063
1064find_conflict:
1065	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1066		if (!flock_locks_conflict(request, fl))
1067			continue;
1068		error = -EAGAIN;
1069		if (!(request->fl_flags & FL_SLEEP))
1070			goto out;
1071		error = FILE_LOCK_DEFERRED;
1072		locks_insert_block(fl, request, flock_locks_conflict);
1073		goto out;
1074	}
1075	if (request->fl_flags & FL_ACCESS)
1076		goto out;
1077	locks_copy_lock(new_fl, request);
1078	locks_move_blocks(new_fl, request);
1079	locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1080	new_fl = NULL;
1081	error = 0;
1082
1083out:
1084	spin_unlock(&ctx->flc_lock);
1085	percpu_up_read(&file_rwsem);
1086	if (new_fl)
1087		locks_free_lock(new_fl);
1088	locks_dispose_list(&dispose);
1089	trace_flock_lock_inode(inode, request, error);
1090	return error;
1091}
1092
1093static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1094			    struct file_lock *conflock)
1095{
1096	struct file_lock *fl, *tmp;
1097	struct file_lock *new_fl = NULL;
1098	struct file_lock *new_fl2 = NULL;
1099	struct file_lock *left = NULL;
1100	struct file_lock *right = NULL;
1101	struct file_lock_context *ctx;
1102	int error;
1103	bool added = false;
1104	LIST_HEAD(dispose);
1105	void *owner;
1106	void (*func)(void);
1107
1108	ctx = locks_get_lock_context(inode, request->fl_type);
1109	if (!ctx)
1110		return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1111
1112	/*
1113	 * We may need two file_lock structures for this operation,
1114	 * so we get them in advance to avoid races.
1115	 *
1116	 * In some cases we can be sure, that no new locks will be needed
1117	 */
1118	if (!(request->fl_flags & FL_ACCESS) &&
1119	    (request->fl_type != F_UNLCK ||
1120	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1121		new_fl = locks_alloc_lock();
1122		new_fl2 = locks_alloc_lock();
1123	}
1124
1125retry:
1126	percpu_down_read(&file_rwsem);
1127	spin_lock(&ctx->flc_lock);
1128	/*
1129	 * New lock request. Walk all POSIX locks and look for conflicts. If
1130	 * there are any, either return error or put the request on the
1131	 * blocker's list of waiters and the global blocked_hash.
1132	 */
1133	if (request->fl_type != F_UNLCK) {
1134		list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1135			if (!posix_locks_conflict(request, fl))
1136				continue;
1137			if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1138				&& (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1139				owner = fl->fl_lmops->lm_mod_owner;
1140				func = fl->fl_lmops->lm_expire_lock;
1141				__module_get(owner);
1142				spin_unlock(&ctx->flc_lock);
1143				percpu_up_read(&file_rwsem);
1144				(*func)();
1145				module_put(owner);
1146				goto retry;
1147			}
1148			if (conflock)
1149				locks_copy_conflock(conflock, fl);
1150			error = -EAGAIN;
1151			if (!(request->fl_flags & FL_SLEEP))
1152				goto out;
1153			/*
1154			 * Deadlock detection and insertion into the blocked
1155			 * locks list must be done while holding the same lock!
1156			 */
1157			error = -EDEADLK;
1158			spin_lock(&blocked_lock_lock);
1159			/*
1160			 * Ensure that we don't find any locks blocked on this
1161			 * request during deadlock detection.
1162			 */
1163			__locks_wake_up_blocks(request);
1164			if (likely(!posix_locks_deadlock(request, fl))) {
1165				error = FILE_LOCK_DEFERRED;
1166				__locks_insert_block(fl, request,
1167						     posix_locks_conflict);
1168			}
1169			spin_unlock(&blocked_lock_lock);
1170			goto out;
1171		}
1172	}
1173
1174	/* If we're just looking for a conflict, we're done. */
1175	error = 0;
1176	if (request->fl_flags & FL_ACCESS)
1177		goto out;
1178
1179	/* Find the first old lock with the same owner as the new lock */
1180	list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1181		if (posix_same_owner(request, fl))
1182			break;
1183	}
1184
1185	/* Process locks with this owner. */
1186	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1187		if (!posix_same_owner(request, fl))
1188			break;
1189
1190		/* Detect adjacent or overlapping regions (if same lock type) */
1191		if (request->fl_type == fl->fl_type) {
1192			/* In all comparisons of start vs end, use
1193			 * "start - 1" rather than "end + 1". If end
1194			 * is OFFSET_MAX, end + 1 will become negative.
1195			 */
1196			if (fl->fl_end < request->fl_start - 1)
1197				continue;
1198			/* If the next lock in the list has entirely bigger
1199			 * addresses than the new one, insert the lock here.
1200			 */
1201			if (fl->fl_start - 1 > request->fl_end)
1202				break;
1203
1204			/* If we come here, the new and old lock are of the
1205			 * same type and adjacent or overlapping. Make one
1206			 * lock yielding from the lower start address of both
1207			 * locks to the higher end address.
1208			 */
1209			if (fl->fl_start > request->fl_start)
1210				fl->fl_start = request->fl_start;
1211			else
1212				request->fl_start = fl->fl_start;
1213			if (fl->fl_end < request->fl_end)
1214				fl->fl_end = request->fl_end;
1215			else
1216				request->fl_end = fl->fl_end;
1217			if (added) {
1218				locks_delete_lock_ctx(fl, &dispose);
1219				continue;
1220			}
1221			request = fl;
1222			added = true;
1223		} else {
1224			/* Processing for different lock types is a bit
1225			 * more complex.
1226			 */
1227			if (fl->fl_end < request->fl_start)
1228				continue;
1229			if (fl->fl_start > request->fl_end)
1230				break;
1231			if (request->fl_type == F_UNLCK)
1232				added = true;
1233			if (fl->fl_start < request->fl_start)
1234				left = fl;
1235			/* If the next lock in the list has a higher end
1236			 * address than the new one, insert the new one here.
1237			 */
1238			if (fl->fl_end > request->fl_end) {
1239				right = fl;
1240				break;
1241			}
1242			if (fl->fl_start >= request->fl_start) {
1243				/* The new lock completely replaces an old
1244				 * one (This may happen several times).
1245				 */
1246				if (added) {
1247					locks_delete_lock_ctx(fl, &dispose);
1248					continue;
1249				}
1250				/*
1251				 * Replace the old lock with new_fl, and
1252				 * remove the old one. It's safe to do the
1253				 * insert here since we know that we won't be
1254				 * using new_fl later, and that the lock is
1255				 * just replacing an existing lock.
1256				 */
1257				error = -ENOLCK;
1258				if (!new_fl)
1259					goto out;
1260				locks_copy_lock(new_fl, request);
1261				locks_move_blocks(new_fl, request);
1262				request = new_fl;
1263				new_fl = NULL;
1264				locks_insert_lock_ctx(request, &fl->fl_list);
1265				locks_delete_lock_ctx(fl, &dispose);
 
1266				added = true;
1267			}
1268		}
1269	}
1270
1271	/*
1272	 * The above code only modifies existing locks in case of merging or
1273	 * replacing. If new lock(s) need to be inserted all modifications are
1274	 * done below this, so it's safe yet to bail out.
1275	 */
1276	error = -ENOLCK; /* "no luck" */
1277	if (right && left == right && !new_fl2)
1278		goto out;
1279
1280	error = 0;
1281	if (!added) {
1282		if (request->fl_type == F_UNLCK) {
1283			if (request->fl_flags & FL_EXISTS)
1284				error = -ENOENT;
1285			goto out;
1286		}
1287
1288		if (!new_fl) {
1289			error = -ENOLCK;
1290			goto out;
1291		}
1292		locks_copy_lock(new_fl, request);
1293		locks_move_blocks(new_fl, request);
1294		locks_insert_lock_ctx(new_fl, &fl->fl_list);
1295		fl = new_fl;
1296		new_fl = NULL;
1297	}
1298	if (right) {
1299		if (left == right) {
1300			/* The new lock breaks the old one in two pieces,
1301			 * so we have to use the second new lock.
1302			 */
1303			left = new_fl2;
1304			new_fl2 = NULL;
1305			locks_copy_lock(left, right);
1306			locks_insert_lock_ctx(left, &fl->fl_list);
1307		}
1308		right->fl_start = request->fl_end + 1;
1309		locks_wake_up_blocks(right);
1310	}
1311	if (left) {
1312		left->fl_end = request->fl_start - 1;
1313		locks_wake_up_blocks(left);
1314	}
1315 out:
 
1316	spin_unlock(&ctx->flc_lock);
1317	percpu_up_read(&file_rwsem);
1318	trace_posix_lock_inode(inode, request, error);
1319	/*
1320	 * Free any unused locks.
1321	 */
1322	if (new_fl)
1323		locks_free_lock(new_fl);
1324	if (new_fl2)
1325		locks_free_lock(new_fl2);
1326	locks_dispose_list(&dispose);
1327
1328	return error;
1329}
1330
1331/**
1332 * posix_lock_file - Apply a POSIX-style lock to a file
1333 * @filp: The file to apply the lock to
1334 * @fl: The lock to be applied
1335 * @conflock: Place to return a copy of the conflicting lock, if found.
1336 *
1337 * Add a POSIX style lock to a file.
1338 * We merge adjacent & overlapping locks whenever possible.
1339 * POSIX locks are sorted by owner task, then by starting address
1340 *
1341 * Note that if called with an FL_EXISTS argument, the caller may determine
1342 * whether or not a lock was successfully freed by testing the return
1343 * value for -ENOENT.
1344 */
1345int posix_lock_file(struct file *filp, struct file_lock *fl,
1346			struct file_lock *conflock)
1347{
1348	return posix_lock_inode(file_inode(filp), fl, conflock);
1349}
1350EXPORT_SYMBOL(posix_lock_file);
1351
1352/**
1353 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1354 * @inode: inode of file to which lock request should be applied
1355 * @fl: The lock to be applied
1356 *
1357 * Apply a POSIX style lock request to an inode.
1358 */
1359static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1360{
1361	int error;
1362	might_sleep ();
1363	for (;;) {
1364		error = posix_lock_inode(inode, fl, NULL);
1365		if (error != FILE_LOCK_DEFERRED)
1366			break;
1367		error = wait_event_interruptible(fl->fl_wait,
1368					list_empty(&fl->fl_blocked_member));
1369		if (error)
1370			break;
1371	}
1372	locks_delete_block(fl);
1373	return error;
1374}
1375
1376static void lease_clear_pending(struct file_lock *fl, int arg)
1377{
1378	switch (arg) {
1379	case F_UNLCK:
1380		fl->fl_flags &= ~FL_UNLOCK_PENDING;
1381		fallthrough;
1382	case F_RDLCK:
1383		fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1384	}
1385}
1386
1387/* We already had a lease on this file; just change its type */
1388int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1389{
1390	int error = assign_type(fl, arg);
1391
1392	if (error)
1393		return error;
1394	lease_clear_pending(fl, arg);
1395	locks_wake_up_blocks(fl);
1396	if (arg == F_UNLCK) {
1397		struct file *filp = fl->fl_file;
1398
1399		f_delown(filp);
1400		filp->f_owner.signum = 0;
1401		fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1402		if (fl->fl_fasync != NULL) {
1403			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1404			fl->fl_fasync = NULL;
1405		}
1406		locks_delete_lock_ctx(fl, dispose);
1407	}
1408	return 0;
1409}
1410EXPORT_SYMBOL(lease_modify);
1411
1412static bool past_time(unsigned long then)
1413{
1414	if (!then)
1415		/* 0 is a special value meaning "this never expires": */
1416		return false;
1417	return time_after(jiffies, then);
1418}
1419
1420static void time_out_leases(struct inode *inode, struct list_head *dispose)
1421{
1422	struct file_lock_context *ctx = inode->i_flctx;
1423	struct file_lock *fl, *tmp;
1424
1425	lockdep_assert_held(&ctx->flc_lock);
1426
1427	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1428		trace_time_out_leases(inode, fl);
1429		if (past_time(fl->fl_downgrade_time))
1430			lease_modify(fl, F_RDLCK, dispose);
1431		if (past_time(fl->fl_break_time))
1432			lease_modify(fl, F_UNLCK, dispose);
1433	}
1434}
1435
1436static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1437{
1438	bool rc;
 
 
1439
1440	if (lease->fl_lmops->lm_breaker_owns_lease
1441			&& lease->fl_lmops->lm_breaker_owns_lease(lease))
1442		return false;
1443	if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1444		rc = false;
1445		goto trace;
1446	}
1447	if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1448		rc = false;
1449		goto trace;
1450	}
1451
1452	rc = locks_conflict(breaker, lease);
1453trace:
1454	trace_leases_conflict(rc, lease, breaker);
1455	return rc;
1456}
1457
1458static bool
1459any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1460{
1461	struct file_lock_context *ctx = inode->i_flctx;
1462	struct file_lock *fl;
1463
1464	lockdep_assert_held(&ctx->flc_lock);
1465
1466	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1467		if (leases_conflict(fl, breaker))
1468			return true;
1469	}
1470	return false;
1471}
1472
1473/**
1474 *	__break_lease	-	revoke all outstanding leases on file
1475 *	@inode: the inode of the file to return
1476 *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1477 *	    break all leases
1478 *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1479 *	    only delegations
1480 *
1481 *	break_lease (inlined for speed) has checked there already is at least
1482 *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1483 *	a call to open() or truncate().  This function can sleep unless you
1484 *	specified %O_NONBLOCK to your open().
1485 */
1486int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1487{
1488	int error = 0;
1489	struct file_lock_context *ctx;
1490	struct file_lock *new_fl, *fl, *tmp;
1491	unsigned long break_time;
1492	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1493	LIST_HEAD(dispose);
1494
1495	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1496	if (IS_ERR(new_fl))
1497		return PTR_ERR(new_fl);
1498	new_fl->fl_flags = type;
1499
1500	/* typically we will check that ctx is non-NULL before calling */
1501	ctx = locks_inode_context(inode);
1502	if (!ctx) {
1503		WARN_ON_ONCE(1);
1504		goto free_lock;
1505	}
1506
1507	percpu_down_read(&file_rwsem);
1508	spin_lock(&ctx->flc_lock);
1509
1510	time_out_leases(inode, &dispose);
1511
1512	if (!any_leases_conflict(inode, new_fl))
1513		goto out;
1514
1515	break_time = 0;
1516	if (lease_break_time > 0) {
1517		break_time = jiffies + lease_break_time * HZ;
1518		if (break_time == 0)
1519			break_time++;	/* so that 0 means no break time */
1520	}
1521
1522	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1523		if (!leases_conflict(fl, new_fl))
1524			continue;
1525		if (want_write) {
1526			if (fl->fl_flags & FL_UNLOCK_PENDING)
1527				continue;
1528			fl->fl_flags |= FL_UNLOCK_PENDING;
1529			fl->fl_break_time = break_time;
1530		} else {
1531			if (lease_breaking(fl))
1532				continue;
1533			fl->fl_flags |= FL_DOWNGRADE_PENDING;
1534			fl->fl_downgrade_time = break_time;
1535		}
1536		if (fl->fl_lmops->lm_break(fl))
1537			locks_delete_lock_ctx(fl, &dispose);
1538	}
1539
1540	if (list_empty(&ctx->flc_lease))
1541		goto out;
1542
1543	if (mode & O_NONBLOCK) {
1544		trace_break_lease_noblock(inode, new_fl);
1545		error = -EWOULDBLOCK;
1546		goto out;
1547	}
1548
1549restart:
1550	fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1551	break_time = fl->fl_break_time;
1552	if (break_time != 0)
1553		break_time -= jiffies;
1554	if (break_time == 0)
1555		break_time++;
1556	locks_insert_block(fl, new_fl, leases_conflict);
1557	trace_break_lease_block(inode, new_fl);
1558	spin_unlock(&ctx->flc_lock);
1559	percpu_up_read(&file_rwsem);
1560
1561	locks_dispose_list(&dispose);
1562	error = wait_event_interruptible_timeout(new_fl->fl_wait,
1563					list_empty(&new_fl->fl_blocked_member),
1564					break_time);
1565
1566	percpu_down_read(&file_rwsem);
1567	spin_lock(&ctx->flc_lock);
1568	trace_break_lease_unblock(inode, new_fl);
1569	locks_delete_block(new_fl);
1570	if (error >= 0) {
1571		/*
1572		 * Wait for the next conflicting lease that has not been
1573		 * broken yet
1574		 */
1575		if (error == 0)
1576			time_out_leases(inode, &dispose);
1577		if (any_leases_conflict(inode, new_fl))
1578			goto restart;
1579		error = 0;
1580	}
1581out:
1582	spin_unlock(&ctx->flc_lock);
1583	percpu_up_read(&file_rwsem);
1584	locks_dispose_list(&dispose);
1585free_lock:
1586	locks_free_lock(new_fl);
1587	return error;
1588}
1589EXPORT_SYMBOL(__break_lease);
1590
1591/**
1592 *	lease_get_mtime - update modified time of an inode with exclusive lease
1593 *	@inode: the inode
1594 *      @time:  pointer to a timespec which contains the last modified time
1595 *
1596 * This is to force NFS clients to flush their caches for files with
1597 * exclusive leases.  The justification is that if someone has an
1598 * exclusive lease, then they could be modifying it.
1599 */
1600void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1601{
1602	bool has_lease = false;
1603	struct file_lock_context *ctx;
1604	struct file_lock *fl;
1605
1606	ctx = locks_inode_context(inode);
1607	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1608		spin_lock(&ctx->flc_lock);
1609		fl = list_first_entry_or_null(&ctx->flc_lease,
1610					      struct file_lock, fl_list);
1611		if (fl && (fl->fl_type == F_WRLCK))
1612			has_lease = true;
1613		spin_unlock(&ctx->flc_lock);
1614	}
1615
1616	if (has_lease)
1617		*time = current_time(inode);
1618}
1619EXPORT_SYMBOL(lease_get_mtime);
1620
1621/**
1622 *	fcntl_getlease - Enquire what lease is currently active
1623 *	@filp: the file
1624 *
1625 *	The value returned by this function will be one of
1626 *	(if no lease break is pending):
1627 *
1628 *	%F_RDLCK to indicate a shared lease is held.
1629 *
1630 *	%F_WRLCK to indicate an exclusive lease is held.
1631 *
1632 *	%F_UNLCK to indicate no lease is held.
1633 *
1634 *	(if a lease break is pending):
1635 *
1636 *	%F_RDLCK to indicate an exclusive lease needs to be
1637 *		changed to a shared lease (or removed).
1638 *
1639 *	%F_UNLCK to indicate the lease needs to be removed.
1640 *
1641 *	XXX: sfr & willy disagree over whether F_INPROGRESS
1642 *	should be returned to userspace.
1643 */
1644int fcntl_getlease(struct file *filp)
1645{
1646	struct file_lock *fl;
1647	struct inode *inode = file_inode(filp);
1648	struct file_lock_context *ctx;
1649	int type = F_UNLCK;
1650	LIST_HEAD(dispose);
1651
1652	ctx = locks_inode_context(inode);
1653	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1654		percpu_down_read(&file_rwsem);
1655		spin_lock(&ctx->flc_lock);
1656		time_out_leases(inode, &dispose);
1657		list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1658			if (fl->fl_file != filp)
1659				continue;
1660			type = target_leasetype(fl);
1661			break;
1662		}
1663		spin_unlock(&ctx->flc_lock);
1664		percpu_up_read(&file_rwsem);
1665
1666		locks_dispose_list(&dispose);
1667	}
1668	return type;
1669}
1670
1671/**
1672 * check_conflicting_open - see if the given file points to an inode that has
1673 *			    an existing open that would conflict with the
1674 *			    desired lease.
1675 * @filp:	file to check
1676 * @arg:	type of lease that we're trying to acquire
1677 * @flags:	current lock flags
1678 *
1679 * Check to see if there's an existing open fd on this file that would
1680 * conflict with the lease we're trying to set.
1681 */
1682static int
1683check_conflicting_open(struct file *filp, const int arg, int flags)
1684{
1685	struct inode *inode = file_inode(filp);
1686	int self_wcount = 0, self_rcount = 0;
1687
1688	if (flags & FL_LAYOUT)
1689		return 0;
1690	if (flags & FL_DELEG)
1691		/* We leave these checks to the caller */
1692		return 0;
1693
1694	if (arg == F_RDLCK)
1695		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1696	else if (arg != F_WRLCK)
1697		return 0;
1698
1699	/*
1700	 * Make sure that only read/write count is from lease requestor.
1701	 * Note that this will result in denying write leases when i_writecount
1702	 * is negative, which is what we want.  (We shouldn't grant write leases
1703	 * on files open for execution.)
1704	 */
1705	if (filp->f_mode & FMODE_WRITE)
1706		self_wcount = 1;
1707	else if (filp->f_mode & FMODE_READ)
1708		self_rcount = 1;
1709
1710	if (atomic_read(&inode->i_writecount) != self_wcount ||
1711	    atomic_read(&inode->i_readcount) != self_rcount)
1712		return -EAGAIN;
1713
1714	return 0;
1715}
1716
1717static int
1718generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv)
1719{
1720	struct file_lock *fl, *my_fl = NULL, *lease;
1721	struct inode *inode = file_inode(filp);
1722	struct file_lock_context *ctx;
1723	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1724	int error;
1725	LIST_HEAD(dispose);
1726
1727	lease = *flp;
1728	trace_generic_add_lease(inode, lease);
1729
 
 
 
 
1730	/* Note that arg is never F_UNLCK here */
1731	ctx = locks_get_lock_context(inode, arg);
1732	if (!ctx)
1733		return -ENOMEM;
1734
1735	/*
1736	 * In the delegation case we need mutual exclusion with
1737	 * a number of operations that take the i_mutex.  We trylock
1738	 * because delegations are an optional optimization, and if
1739	 * there's some chance of a conflict--we'd rather not
1740	 * bother, maybe that's a sign this just isn't a good file to
1741	 * hand out a delegation on.
1742	 */
1743	if (is_deleg && !inode_trylock(inode))
1744		return -EAGAIN;
1745
1746	percpu_down_read(&file_rwsem);
1747	spin_lock(&ctx->flc_lock);
1748	time_out_leases(inode, &dispose);
1749	error = check_conflicting_open(filp, arg, lease->fl_flags);
1750	if (error)
1751		goto out;
1752
1753	/*
1754	 * At this point, we know that if there is an exclusive
1755	 * lease on this file, then we hold it on this filp
1756	 * (otherwise our open of this file would have blocked).
1757	 * And if we are trying to acquire an exclusive lease,
1758	 * then the file is not open by anyone (including us)
1759	 * except for this filp.
1760	 */
1761	error = -EAGAIN;
1762	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1763		if (fl->fl_file == filp &&
1764		    fl->fl_owner == lease->fl_owner) {
1765			my_fl = fl;
1766			continue;
1767		}
1768
1769		/*
1770		 * No exclusive leases if someone else has a lease on
1771		 * this file:
1772		 */
1773		if (arg == F_WRLCK)
1774			goto out;
1775		/*
1776		 * Modifying our existing lease is OK, but no getting a
1777		 * new lease if someone else is opening for write:
1778		 */
1779		if (fl->fl_flags & FL_UNLOCK_PENDING)
1780			goto out;
1781	}
1782
1783	if (my_fl != NULL) {
1784		lease = my_fl;
1785		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1786		if (error)
1787			goto out;
1788		goto out_setup;
1789	}
1790
1791	error = -EINVAL;
1792	if (!leases_enable)
1793		goto out;
1794
1795	locks_insert_lock_ctx(lease, &ctx->flc_lease);
1796	/*
1797	 * The check in break_lease() is lockless. It's possible for another
1798	 * open to race in after we did the earlier check for a conflicting
1799	 * open but before the lease was inserted. Check again for a
1800	 * conflicting open and cancel the lease if there is one.
1801	 *
1802	 * We also add a barrier here to ensure that the insertion of the lock
1803	 * precedes these checks.
1804	 */
1805	smp_mb();
1806	error = check_conflicting_open(filp, arg, lease->fl_flags);
1807	if (error) {
1808		locks_unlink_lock_ctx(lease);
1809		goto out;
1810	}
1811
1812out_setup:
1813	if (lease->fl_lmops->lm_setup)
1814		lease->fl_lmops->lm_setup(lease, priv);
1815out:
1816	spin_unlock(&ctx->flc_lock);
1817	percpu_up_read(&file_rwsem);
1818	locks_dispose_list(&dispose);
1819	if (is_deleg)
1820		inode_unlock(inode);
1821	if (!error && !my_fl)
1822		*flp = NULL;
1823	return error;
1824}
1825
1826static int generic_delete_lease(struct file *filp, void *owner)
1827{
1828	int error = -EAGAIN;
1829	struct file_lock *fl, *victim = NULL;
1830	struct inode *inode = file_inode(filp);
1831	struct file_lock_context *ctx;
1832	LIST_HEAD(dispose);
1833
1834	ctx = locks_inode_context(inode);
1835	if (!ctx) {
1836		trace_generic_delete_lease(inode, NULL);
1837		return error;
1838	}
1839
1840	percpu_down_read(&file_rwsem);
1841	spin_lock(&ctx->flc_lock);
1842	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1843		if (fl->fl_file == filp &&
1844		    fl->fl_owner == owner) {
1845			victim = fl;
1846			break;
1847		}
1848	}
1849	trace_generic_delete_lease(inode, victim);
1850	if (victim)
1851		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1852	spin_unlock(&ctx->flc_lock);
1853	percpu_up_read(&file_rwsem);
1854	locks_dispose_list(&dispose);
1855	return error;
1856}
1857
1858/**
1859 *	generic_setlease	-	sets a lease on an open file
1860 *	@filp:	file pointer
1861 *	@arg:	type of lease to obtain
1862 *	@flp:	input - file_lock to use, output - file_lock inserted
1863 *	@priv:	private data for lm_setup (may be NULL if lm_setup
1864 *		doesn't require it)
1865 *
1866 *	The (input) flp->fl_lmops->lm_break function is required
1867 *	by break_lease().
1868 */
1869int generic_setlease(struct file *filp, int arg, struct file_lock **flp,
1870			void **priv)
1871{
1872	struct inode *inode = file_inode(filp);
1873	vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
1874	int error;
1875
1876	if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
1877		return -EACCES;
1878	if (!S_ISREG(inode->i_mode))
1879		return -EINVAL;
1880	error = security_file_lock(filp, arg);
1881	if (error)
1882		return error;
1883
1884	switch (arg) {
1885	case F_UNLCK:
1886		return generic_delete_lease(filp, *priv);
1887	case F_RDLCK:
1888	case F_WRLCK:
1889		if (!(*flp)->fl_lmops->lm_break) {
1890			WARN_ON_ONCE(1);
1891			return -ENOLCK;
1892		}
1893
1894		return generic_add_lease(filp, arg, flp, priv);
1895	default:
1896		return -EINVAL;
1897	}
1898}
1899EXPORT_SYMBOL(generic_setlease);
1900
1901/*
1902 * Kernel subsystems can register to be notified on any attempt to set
1903 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1904 * to close files that it may have cached when there is an attempt to set a
1905 * conflicting lease.
1906 */
1907static struct srcu_notifier_head lease_notifier_chain;
1908
1909static inline void
1910lease_notifier_chain_init(void)
1911{
1912	srcu_init_notifier_head(&lease_notifier_chain);
1913}
1914
1915static inline void
1916setlease_notifier(int arg, struct file_lock *lease)
1917{
1918	if (arg != F_UNLCK)
1919		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1920}
1921
1922int lease_register_notifier(struct notifier_block *nb)
1923{
1924	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1925}
1926EXPORT_SYMBOL_GPL(lease_register_notifier);
1927
1928void lease_unregister_notifier(struct notifier_block *nb)
1929{
1930	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1931}
1932EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1933
 
 
 
 
 
 
 
 
 
 
 
 
 
1934/**
1935 * vfs_setlease        -       sets a lease on an open file
1936 * @filp:	file pointer
1937 * @arg:	type of lease to obtain
1938 * @lease:	file_lock to use when adding a lease
1939 * @priv:	private info for lm_setup when adding a lease (may be
1940 *		NULL if lm_setup doesn't require it)
1941 *
1942 * Call this to establish a lease on the file. The "lease" argument is not
1943 * used for F_UNLCK requests and may be NULL. For commands that set or alter
1944 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
1945 * set; if not, this function will return -ENOLCK (and generate a scary-looking
1946 * stack trace).
1947 *
1948 * The "priv" pointer is passed directly to the lm_setup function as-is. It
1949 * may be NULL if the lm_setup operation doesn't require it.
1950 */
1951int
1952vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv)
1953{
1954	if (lease)
1955		setlease_notifier(arg, *lease);
1956	if (filp->f_op->setlease)
1957		return filp->f_op->setlease(filp, arg, lease, priv);
1958	else
1959		return generic_setlease(filp, arg, lease, priv);
 
 
 
 
 
 
1960}
1961EXPORT_SYMBOL_GPL(vfs_setlease);
1962
1963static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
1964{
1965	struct file_lock *fl;
1966	struct fasync_struct *new;
1967	int error;
1968
1969	fl = lease_alloc(filp, arg);
1970	if (IS_ERR(fl))
1971		return PTR_ERR(fl);
1972
1973	new = fasync_alloc();
1974	if (!new) {
1975		locks_free_lock(fl);
1976		return -ENOMEM;
1977	}
1978	new->fa_fd = fd;
1979
1980	error = vfs_setlease(filp, arg, &fl, (void **)&new);
1981	if (fl)
1982		locks_free_lock(fl);
1983	if (new)
1984		fasync_free(new);
1985	return error;
1986}
1987
1988/**
1989 *	fcntl_setlease	-	sets a lease on an open file
1990 *	@fd: open file descriptor
1991 *	@filp: file pointer
1992 *	@arg: type of lease to obtain
1993 *
1994 *	Call this fcntl to establish a lease on the file.
1995 *	Note that you also need to call %F_SETSIG to
1996 *	receive a signal when the lease is broken.
1997 */
1998int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
1999{
2000	if (arg == F_UNLCK)
2001		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2002	return do_fcntl_add_lease(fd, filp, arg);
2003}
2004
2005/**
2006 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2007 * @inode: inode of the file to apply to
2008 * @fl: The lock to be applied
2009 *
2010 * Apply a FLOCK style lock request to an inode.
2011 */
2012static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2013{
2014	int error;
2015	might_sleep();
2016	for (;;) {
2017		error = flock_lock_inode(inode, fl);
2018		if (error != FILE_LOCK_DEFERRED)
2019			break;
2020		error = wait_event_interruptible(fl->fl_wait,
2021				list_empty(&fl->fl_blocked_member));
2022		if (error)
2023			break;
2024	}
2025	locks_delete_block(fl);
2026	return error;
2027}
2028
2029/**
2030 * locks_lock_inode_wait - Apply a lock to an inode
2031 * @inode: inode of the file to apply to
2032 * @fl: The lock to be applied
2033 *
2034 * Apply a POSIX or FLOCK style lock request to an inode.
2035 */
2036int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2037{
2038	int res = 0;
2039	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2040		case FL_POSIX:
2041			res = posix_lock_inode_wait(inode, fl);
2042			break;
2043		case FL_FLOCK:
2044			res = flock_lock_inode_wait(inode, fl);
2045			break;
2046		default:
2047			BUG();
2048	}
2049	return res;
2050}
2051EXPORT_SYMBOL(locks_lock_inode_wait);
2052
2053/**
2054 *	sys_flock: - flock() system call.
2055 *	@fd: the file descriptor to lock.
2056 *	@cmd: the type of lock to apply.
2057 *
2058 *	Apply a %FL_FLOCK style lock to an open file descriptor.
2059 *	The @cmd can be one of:
2060 *
2061 *	- %LOCK_SH -- a shared lock.
2062 *	- %LOCK_EX -- an exclusive lock.
2063 *	- %LOCK_UN -- remove an existing lock.
2064 *	- %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2065 *
2066 *	%LOCK_MAND support has been removed from the kernel.
2067 */
2068SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2069{
2070	int can_sleep, error, type;
2071	struct file_lock fl;
2072	struct fd f;
2073
2074	/*
2075	 * LOCK_MAND locks were broken for a long time in that they never
2076	 * conflicted with one another and didn't prevent any sort of open,
2077	 * read or write activity.
2078	 *
2079	 * Just ignore these requests now, to preserve legacy behavior, but
2080	 * throw a warning to let people know that they don't actually work.
2081	 */
2082	if (cmd & LOCK_MAND) {
2083		pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2084		return 0;
2085	}
2086
2087	type = flock_translate_cmd(cmd & ~LOCK_NB);
2088	if (type < 0)
2089		return type;
2090
2091	error = -EBADF;
2092	f = fdget(fd);
2093	if (!f.file)
2094		return error;
2095
2096	if (type != F_UNLCK && !(f.file->f_mode & (FMODE_READ | FMODE_WRITE)))
2097		goto out_putf;
2098
2099	flock_make_lock(f.file, &fl, type);
2100
2101	error = security_file_lock(f.file, fl.fl_type);
2102	if (error)
2103		goto out_putf;
2104
2105	can_sleep = !(cmd & LOCK_NB);
2106	if (can_sleep)
2107		fl.fl_flags |= FL_SLEEP;
2108
2109	if (f.file->f_op->flock)
2110		error = f.file->f_op->flock(f.file,
2111					    (can_sleep) ? F_SETLKW : F_SETLK,
2112					    &fl);
2113	else
2114		error = locks_lock_file_wait(f.file, &fl);
2115
2116	locks_release_private(&fl);
2117 out_putf:
2118	fdput(f);
2119
2120	return error;
2121}
2122
2123/**
2124 * vfs_test_lock - test file byte range lock
2125 * @filp: The file to test lock for
2126 * @fl: The lock to test; also used to hold result
2127 *
2128 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2129 * setting conf->fl_type to something other than F_UNLCK.
2130 */
2131int vfs_test_lock(struct file *filp, struct file_lock *fl)
2132{
2133	WARN_ON_ONCE(filp != fl->fl_file);
2134	if (filp->f_op->lock)
2135		return filp->f_op->lock(filp, F_GETLK, fl);
2136	posix_test_lock(filp, fl);
2137	return 0;
2138}
2139EXPORT_SYMBOL_GPL(vfs_test_lock);
2140
2141/**
2142 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2143 * @fl: The file_lock who's fl_pid should be translated
2144 * @ns: The namespace into which the pid should be translated
2145 *
2146 * Used to translate a fl_pid into a namespace virtual pid number
2147 */
2148static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2149{
2150	pid_t vnr;
2151	struct pid *pid;
2152
2153	if (IS_OFDLCK(fl))
2154		return -1;
2155	if (IS_REMOTELCK(fl))
2156		return fl->fl_pid;
 
 
 
2157	/*
2158	 * If the flock owner process is dead and its pid has been already
2159	 * freed, the translation below won't work, but we still want to show
2160	 * flock owner pid number in init pidns.
2161	 */
2162	if (ns == &init_pid_ns)
2163		return (pid_t)fl->fl_pid;
2164
2165	rcu_read_lock();
2166	pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2167	vnr = pid_nr_ns(pid, ns);
2168	rcu_read_unlock();
2169	return vnr;
2170}
2171
2172static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2173{
2174	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2175#if BITS_PER_LONG == 32
2176	/*
2177	 * Make sure we can represent the posix lock via
2178	 * legacy 32bit flock.
2179	 */
2180	if (fl->fl_start > OFFT_OFFSET_MAX)
2181		return -EOVERFLOW;
2182	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2183		return -EOVERFLOW;
2184#endif
2185	flock->l_start = fl->fl_start;
2186	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2187		fl->fl_end - fl->fl_start + 1;
2188	flock->l_whence = 0;
2189	flock->l_type = fl->fl_type;
2190	return 0;
2191}
2192
2193#if BITS_PER_LONG == 32
2194static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2195{
2196	flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2197	flock->l_start = fl->fl_start;
2198	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2199		fl->fl_end - fl->fl_start + 1;
2200	flock->l_whence = 0;
2201	flock->l_type = fl->fl_type;
2202}
2203#endif
2204
2205/* Report the first existing lock that would conflict with l.
2206 * This implements the F_GETLK command of fcntl().
2207 */
2208int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2209{
2210	struct file_lock *fl;
2211	int error;
2212
2213	fl = locks_alloc_lock();
2214	if (fl == NULL)
2215		return -ENOMEM;
2216	error = -EINVAL;
2217	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2218			&& flock->l_type != F_WRLCK)
2219		goto out;
2220
2221	error = flock_to_posix_lock(filp, fl, flock);
2222	if (error)
2223		goto out;
2224
2225	if (cmd == F_OFD_GETLK) {
2226		error = -EINVAL;
2227		if (flock->l_pid != 0)
2228			goto out;
2229
2230		fl->fl_flags |= FL_OFDLCK;
2231		fl->fl_owner = filp;
2232	}
2233
2234	error = vfs_test_lock(filp, fl);
2235	if (error)
2236		goto out;
2237
2238	flock->l_type = fl->fl_type;
2239	if (fl->fl_type != F_UNLCK) {
2240		error = posix_lock_to_flock(flock, fl);
2241		if (error)
2242			goto out;
2243	}
2244out:
2245	locks_free_lock(fl);
2246	return error;
2247}
2248
2249/**
2250 * vfs_lock_file - file byte range lock
2251 * @filp: The file to apply the lock to
2252 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2253 * @fl: The lock to be applied
2254 * @conf: Place to return a copy of the conflicting lock, if found.
2255 *
2256 * A caller that doesn't care about the conflicting lock may pass NULL
2257 * as the final argument.
2258 *
2259 * If the filesystem defines a private ->lock() method, then @conf will
2260 * be left unchanged; so a caller that cares should initialize it to
2261 * some acceptable default.
2262 *
2263 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2264 * locks, the ->lock() interface may return asynchronously, before the lock has
2265 * been granted or denied by the underlying filesystem, if (and only if)
2266 * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
2267 * flags need to be set.
2268 *
2269 * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
2270 * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
2271 * blocking lock. When ->lock() does return asynchronously, it must return
2272 * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
2273 * If the request is for non-blocking lock the file system should return
2274 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2275 * with the result. If the request timed out the callback routine will return a
2276 * nonzero return code and the file system should release the lock. The file
2277 * system is also responsible to keep a corresponding posix lock when it
2278 * grants a lock so the VFS can find out which locks are locally held and do
2279 * the correct lock cleanup when required.
2280 * The underlying filesystem must not drop the kernel lock or call
2281 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2282 * return code.
2283 */
2284int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2285{
2286	WARN_ON_ONCE(filp != fl->fl_file);
2287	if (filp->f_op->lock)
2288		return filp->f_op->lock(filp, cmd, fl);
2289	else
2290		return posix_lock_file(filp, fl, conf);
2291}
2292EXPORT_SYMBOL_GPL(vfs_lock_file);
2293
2294static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2295			     struct file_lock *fl)
2296{
2297	int error;
2298
2299	error = security_file_lock(filp, fl->fl_type);
2300	if (error)
2301		return error;
2302
2303	for (;;) {
2304		error = vfs_lock_file(filp, cmd, fl, NULL);
2305		if (error != FILE_LOCK_DEFERRED)
2306			break;
2307		error = wait_event_interruptible(fl->fl_wait,
2308					list_empty(&fl->fl_blocked_member));
2309		if (error)
2310			break;
2311	}
2312	locks_delete_block(fl);
2313
2314	return error;
2315}
2316
2317/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2318static int
2319check_fmode_for_setlk(struct file_lock *fl)
2320{
2321	switch (fl->fl_type) {
2322	case F_RDLCK:
2323		if (!(fl->fl_file->f_mode & FMODE_READ))
2324			return -EBADF;
2325		break;
2326	case F_WRLCK:
2327		if (!(fl->fl_file->f_mode & FMODE_WRITE))
2328			return -EBADF;
2329	}
2330	return 0;
2331}
2332
2333/* Apply the lock described by l to an open file descriptor.
2334 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2335 */
2336int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2337		struct flock *flock)
2338{
2339	struct file_lock *file_lock = locks_alloc_lock();
2340	struct inode *inode = file_inode(filp);
2341	struct file *f;
2342	int error;
2343
2344	if (file_lock == NULL)
2345		return -ENOLCK;
2346
2347	error = flock_to_posix_lock(filp, file_lock, flock);
2348	if (error)
2349		goto out;
2350
2351	error = check_fmode_for_setlk(file_lock);
2352	if (error)
2353		goto out;
2354
2355	/*
2356	 * If the cmd is requesting file-private locks, then set the
2357	 * FL_OFDLCK flag and override the owner.
2358	 */
2359	switch (cmd) {
2360	case F_OFD_SETLK:
2361		error = -EINVAL;
2362		if (flock->l_pid != 0)
2363			goto out;
2364
2365		cmd = F_SETLK;
2366		file_lock->fl_flags |= FL_OFDLCK;
2367		file_lock->fl_owner = filp;
2368		break;
2369	case F_OFD_SETLKW:
2370		error = -EINVAL;
2371		if (flock->l_pid != 0)
2372			goto out;
2373
2374		cmd = F_SETLKW;
2375		file_lock->fl_flags |= FL_OFDLCK;
2376		file_lock->fl_owner = filp;
2377		fallthrough;
2378	case F_SETLKW:
2379		file_lock->fl_flags |= FL_SLEEP;
2380	}
2381
2382	error = do_lock_file_wait(filp, cmd, file_lock);
2383
2384	/*
2385	 * Attempt to detect a close/fcntl race and recover by releasing the
2386	 * lock that was just acquired. There is no need to do that when we're
 
2387	 * unlocking though, or for OFD locks.
2388	 */
2389	if (!error && file_lock->fl_type != F_UNLCK &&
2390	    !(file_lock->fl_flags & FL_OFDLCK)) {
2391		struct files_struct *files = current->files;
2392		/*
2393		 * We need that spin_lock here - it prevents reordering between
2394		 * update of i_flctx->flc_posix and check for it done in
2395		 * close(). rcu_read_lock() wouldn't do.
2396		 */
2397		spin_lock(&files->file_lock);
2398		f = files_lookup_fd_locked(files, fd);
2399		spin_unlock(&files->file_lock);
2400		if (f != filp) {
2401			file_lock->fl_type = F_UNLCK;
2402			error = do_lock_file_wait(filp, cmd, file_lock);
2403			WARN_ON_ONCE(error);
2404			error = -EBADF;
2405		}
2406	}
2407out:
2408	trace_fcntl_setlk(inode, file_lock, error);
2409	locks_free_lock(file_lock);
2410	return error;
2411}
2412
2413#if BITS_PER_LONG == 32
2414/* Report the first existing lock that would conflict with l.
2415 * This implements the F_GETLK command of fcntl().
2416 */
2417int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2418{
2419	struct file_lock *fl;
2420	int error;
2421
2422	fl = locks_alloc_lock();
2423	if (fl == NULL)
2424		return -ENOMEM;
2425
2426	error = -EINVAL;
2427	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2428			&& flock->l_type != F_WRLCK)
2429		goto out;
2430
2431	error = flock64_to_posix_lock(filp, fl, flock);
2432	if (error)
2433		goto out;
2434
2435	if (cmd == F_OFD_GETLK) {
2436		error = -EINVAL;
2437		if (flock->l_pid != 0)
2438			goto out;
2439
2440		fl->fl_flags |= FL_OFDLCK;
2441		fl->fl_owner = filp;
2442	}
2443
2444	error = vfs_test_lock(filp, fl);
2445	if (error)
2446		goto out;
2447
2448	flock->l_type = fl->fl_type;
2449	if (fl->fl_type != F_UNLCK)
2450		posix_lock_to_flock64(flock, fl);
2451
2452out:
2453	locks_free_lock(fl);
2454	return error;
2455}
2456
2457/* Apply the lock described by l to an open file descriptor.
2458 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2459 */
2460int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2461		struct flock64 *flock)
2462{
2463	struct file_lock *file_lock = locks_alloc_lock();
2464	struct file *f;
2465	int error;
2466
2467	if (file_lock == NULL)
2468		return -ENOLCK;
2469
2470	error = flock64_to_posix_lock(filp, file_lock, flock);
2471	if (error)
2472		goto out;
2473
2474	error = check_fmode_for_setlk(file_lock);
2475	if (error)
2476		goto out;
2477
2478	/*
2479	 * If the cmd is requesting file-private locks, then set the
2480	 * FL_OFDLCK flag and override the owner.
2481	 */
2482	switch (cmd) {
2483	case F_OFD_SETLK:
2484		error = -EINVAL;
2485		if (flock->l_pid != 0)
2486			goto out;
2487
2488		cmd = F_SETLK64;
2489		file_lock->fl_flags |= FL_OFDLCK;
2490		file_lock->fl_owner = filp;
2491		break;
2492	case F_OFD_SETLKW:
2493		error = -EINVAL;
2494		if (flock->l_pid != 0)
2495			goto out;
2496
2497		cmd = F_SETLKW64;
2498		file_lock->fl_flags |= FL_OFDLCK;
2499		file_lock->fl_owner = filp;
2500		fallthrough;
2501	case F_SETLKW64:
2502		file_lock->fl_flags |= FL_SLEEP;
2503	}
2504
2505	error = do_lock_file_wait(filp, cmd, file_lock);
2506
2507	/*
2508	 * Attempt to detect a close/fcntl race and recover by releasing the
2509	 * lock that was just acquired. There is no need to do that when we're
 
2510	 * unlocking though, or for OFD locks.
2511	 */
2512	if (!error && file_lock->fl_type != F_UNLCK &&
2513	    !(file_lock->fl_flags & FL_OFDLCK)) {
2514		struct files_struct *files = current->files;
2515		/*
2516		 * We need that spin_lock here - it prevents reordering between
2517		 * update of i_flctx->flc_posix and check for it done in
2518		 * close(). rcu_read_lock() wouldn't do.
2519		 */
2520		spin_lock(&files->file_lock);
2521		f = files_lookup_fd_locked(files, fd);
2522		spin_unlock(&files->file_lock);
2523		if (f != filp) {
2524			file_lock->fl_type = F_UNLCK;
2525			error = do_lock_file_wait(filp, cmd, file_lock);
2526			WARN_ON_ONCE(error);
2527			error = -EBADF;
2528		}
2529	}
2530out:
2531	locks_free_lock(file_lock);
2532	return error;
2533}
2534#endif /* BITS_PER_LONG == 32 */
2535
2536/*
2537 * This function is called when the file is being removed
2538 * from the task's fd array.  POSIX locks belonging to this task
2539 * are deleted at this time.
2540 */
2541void locks_remove_posix(struct file *filp, fl_owner_t owner)
2542{
2543	int error;
2544	struct inode *inode = file_inode(filp);
2545	struct file_lock lock;
2546	struct file_lock_context *ctx;
2547
2548	/*
2549	 * If there are no locks held on this file, we don't need to call
2550	 * posix_lock_file().  Another process could be setting a lock on this
2551	 * file at the same time, but we wouldn't remove that lock anyway.
2552	 */
2553	ctx = locks_inode_context(inode);
2554	if (!ctx || list_empty(&ctx->flc_posix))
2555		return;
2556
2557	locks_init_lock(&lock);
2558	lock.fl_type = F_UNLCK;
2559	lock.fl_flags = FL_POSIX | FL_CLOSE;
2560	lock.fl_start = 0;
2561	lock.fl_end = OFFSET_MAX;
2562	lock.fl_owner = owner;
2563	lock.fl_pid = current->tgid;
2564	lock.fl_file = filp;
2565	lock.fl_ops = NULL;
2566	lock.fl_lmops = NULL;
2567
2568	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2569
2570	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2571		lock.fl_ops->fl_release_private(&lock);
2572	trace_locks_remove_posix(inode, &lock, error);
2573}
2574EXPORT_SYMBOL(locks_remove_posix);
2575
2576/* The i_flctx must be valid when calling into here */
2577static void
2578locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2579{
2580	struct file_lock fl;
2581	struct inode *inode = file_inode(filp);
2582
2583	if (list_empty(&flctx->flc_flock))
2584		return;
2585
2586	flock_make_lock(filp, &fl, F_UNLCK);
2587	fl.fl_flags |= FL_CLOSE;
2588
2589	if (filp->f_op->flock)
2590		filp->f_op->flock(filp, F_SETLKW, &fl);
2591	else
2592		flock_lock_inode(inode, &fl);
2593
2594	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2595		fl.fl_ops->fl_release_private(&fl);
2596}
2597
2598/* The i_flctx must be valid when calling into here */
2599static void
2600locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2601{
2602	struct file_lock *fl, *tmp;
2603	LIST_HEAD(dispose);
2604
2605	if (list_empty(&ctx->flc_lease))
2606		return;
2607
2608	percpu_down_read(&file_rwsem);
2609	spin_lock(&ctx->flc_lock);
2610	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2611		if (filp == fl->fl_file)
2612			lease_modify(fl, F_UNLCK, &dispose);
2613	spin_unlock(&ctx->flc_lock);
2614	percpu_up_read(&file_rwsem);
2615
2616	locks_dispose_list(&dispose);
2617}
2618
2619/*
2620 * This function is called on the last close of an open file.
2621 */
2622void locks_remove_file(struct file *filp)
2623{
2624	struct file_lock_context *ctx;
2625
2626	ctx = locks_inode_context(file_inode(filp));
2627	if (!ctx)
2628		return;
2629
2630	/* remove any OFD locks */
2631	locks_remove_posix(filp, filp);
2632
2633	/* remove flock locks */
2634	locks_remove_flock(filp, ctx);
2635
2636	/* remove any leases */
2637	locks_remove_lease(filp, ctx);
2638
2639	spin_lock(&ctx->flc_lock);
2640	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2641	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2642	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2643	spin_unlock(&ctx->flc_lock);
2644}
2645
2646/**
2647 * vfs_cancel_lock - file byte range unblock lock
2648 * @filp: The file to apply the unblock to
2649 * @fl: The lock to be unblocked
2650 *
2651 * Used by lock managers to cancel blocked requests
2652 */
2653int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2654{
2655	WARN_ON_ONCE(filp != fl->fl_file);
2656	if (filp->f_op->lock)
2657		return filp->f_op->lock(filp, F_CANCELLK, fl);
2658	return 0;
2659}
2660EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2661
2662/**
2663 * vfs_inode_has_locks - are any file locks held on @inode?
2664 * @inode: inode to check for locks
2665 *
2666 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2667 * set on @inode.
2668 */
2669bool vfs_inode_has_locks(struct inode *inode)
2670{
2671	struct file_lock_context *ctx;
2672	bool ret;
2673
2674	ctx = locks_inode_context(inode);
2675	if (!ctx)
2676		return false;
2677
2678	spin_lock(&ctx->flc_lock);
2679	ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2680	spin_unlock(&ctx->flc_lock);
2681	return ret;
2682}
2683EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2684
2685#ifdef CONFIG_PROC_FS
2686#include <linux/proc_fs.h>
2687#include <linux/seq_file.h>
2688
2689struct locks_iterator {
2690	int	li_cpu;
2691	loff_t	li_pos;
2692};
2693
2694static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2695			    loff_t id, char *pfx, int repeat)
2696{
2697	struct inode *inode = NULL;
2698	unsigned int fl_pid;
2699	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2700	int type;
 
 
 
2701
2702	fl_pid = locks_translate_pid(fl, proc_pidns);
2703	/*
2704	 * If lock owner is dead (and pid is freed) or not visible in current
2705	 * pidns, zero is shown as a pid value. Check lock info from
2706	 * init_pid_ns to get saved lock pid value.
2707	 */
2708
2709	if (fl->fl_file != NULL)
2710		inode = file_inode(fl->fl_file);
2711
2712	seq_printf(f, "%lld: ", id);
2713
2714	if (repeat)
2715		seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2716
2717	if (IS_POSIX(fl)) {
2718		if (fl->fl_flags & FL_ACCESS)
2719			seq_puts(f, "ACCESS");
2720		else if (IS_OFDLCK(fl))
2721			seq_puts(f, "OFDLCK");
2722		else
2723			seq_puts(f, "POSIX ");
2724
2725		seq_printf(f, " %s ",
2726			     (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2727	} else if (IS_FLOCK(fl)) {
2728		seq_puts(f, "FLOCK  ADVISORY  ");
2729	} else if (IS_LEASE(fl)) {
2730		if (fl->fl_flags & FL_DELEG)
 
 
 
 
2731			seq_puts(f, "DELEG  ");
2732		else
2733			seq_puts(f, "LEASE  ");
2734
2735		if (lease_breaking(fl))
2736			seq_puts(f, "BREAKING  ");
2737		else if (fl->fl_file)
2738			seq_puts(f, "ACTIVE    ");
2739		else
2740			seq_puts(f, "BREAKER   ");
2741	} else {
2742		seq_puts(f, "UNKNOWN UNKNOWN  ");
2743	}
2744	type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2745
2746	seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2747			     (type == F_RDLCK) ? "READ" : "UNLCK");
2748	if (inode) {
2749		/* userspace relies on this representation of dev_t */
2750		seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2751				MAJOR(inode->i_sb->s_dev),
2752				MINOR(inode->i_sb->s_dev), inode->i_ino);
2753	} else {
2754		seq_printf(f, "%d <none>:0 ", fl_pid);
2755	}
2756	if (IS_POSIX(fl)) {
2757		if (fl->fl_end == OFFSET_MAX)
2758			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2759		else
2760			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2761	} else {
2762		seq_puts(f, "0 EOF\n");
2763	}
2764}
2765
2766static struct file_lock *get_next_blocked_member(struct file_lock *node)
2767{
2768	struct file_lock *tmp;
2769
2770	/* NULL node or root node */
2771	if (node == NULL || node->fl_blocker == NULL)
2772		return NULL;
2773
2774	/* Next member in the linked list could be itself */
2775	tmp = list_next_entry(node, fl_blocked_member);
2776	if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
 
2777		|| tmp == node) {
2778		return NULL;
2779	}
2780
2781	return tmp;
2782}
2783
2784static int locks_show(struct seq_file *f, void *v)
2785{
2786	struct locks_iterator *iter = f->private;
2787	struct file_lock *cur, *tmp;
2788	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2789	int level = 0;
2790
2791	cur = hlist_entry(v, struct file_lock, fl_link);
2792
2793	if (locks_translate_pid(cur, proc_pidns) == 0)
2794		return 0;
2795
2796	/* View this crossed linked list as a binary tree, the first member of fl_blocked_requests
2797	 * is the left child of current node, the next silibing in fl_blocked_member is the
2798	 * right child, we can alse get the parent of current node from fl_blocker, so this
2799	 * question becomes traversal of a binary tree
2800	 */
2801	while (cur != NULL) {
2802		if (level)
2803			lock_get_status(f, cur, iter->li_pos, "-> ", level);
2804		else
2805			lock_get_status(f, cur, iter->li_pos, "", level);
2806
2807		if (!list_empty(&cur->fl_blocked_requests)) {
2808			/* Turn left */
2809			cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2810				struct file_lock, fl_blocked_member);
 
2811			level++;
2812		} else {
2813			/* Turn right */
2814			tmp = get_next_blocked_member(cur);
2815			/* Fall back to parent node */
2816			while (tmp == NULL && cur->fl_blocker != NULL) {
2817				cur = cur->fl_blocker;
2818				level--;
2819				tmp = get_next_blocked_member(cur);
2820			}
2821			cur = tmp;
2822		}
2823	}
2824
2825	return 0;
2826}
2827
2828static void __show_fd_locks(struct seq_file *f,
2829			struct list_head *head, int *id,
2830			struct file *filp, struct files_struct *files)
2831{
2832	struct file_lock *fl;
2833
2834	list_for_each_entry(fl, head, fl_list) {
2835
2836		if (filp != fl->fl_file)
2837			continue;
2838		if (fl->fl_owner != files &&
2839		    fl->fl_owner != filp)
2840			continue;
2841
2842		(*id)++;
2843		seq_puts(f, "lock:\t");
2844		lock_get_status(f, fl, *id, "", 0);
2845	}
2846}
2847
2848void show_fd_locks(struct seq_file *f,
2849		  struct file *filp, struct files_struct *files)
2850{
2851	struct inode *inode = file_inode(filp);
2852	struct file_lock_context *ctx;
2853	int id = 0;
2854
2855	ctx = locks_inode_context(inode);
2856	if (!ctx)
2857		return;
2858
2859	spin_lock(&ctx->flc_lock);
2860	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2861	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2862	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2863	spin_unlock(&ctx->flc_lock);
2864}
2865
2866static void *locks_start(struct seq_file *f, loff_t *pos)
2867	__acquires(&blocked_lock_lock)
2868{
2869	struct locks_iterator *iter = f->private;
2870
2871	iter->li_pos = *pos + 1;
2872	percpu_down_write(&file_rwsem);
2873	spin_lock(&blocked_lock_lock);
2874	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2875}
2876
2877static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2878{
2879	struct locks_iterator *iter = f->private;
2880
2881	++iter->li_pos;
2882	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2883}
2884
2885static void locks_stop(struct seq_file *f, void *v)
2886	__releases(&blocked_lock_lock)
2887{
2888	spin_unlock(&blocked_lock_lock);
2889	percpu_up_write(&file_rwsem);
2890}
2891
2892static const struct seq_operations locks_seq_operations = {
2893	.start	= locks_start,
2894	.next	= locks_next,
2895	.stop	= locks_stop,
2896	.show	= locks_show,
2897};
2898
2899static int __init proc_locks_init(void)
2900{
2901	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2902			sizeof(struct locks_iterator), NULL);
2903	return 0;
2904}
2905fs_initcall(proc_locks_init);
2906#endif
2907
2908static int __init filelock_init(void)
2909{
2910	int i;
2911
2912	flctx_cache = kmem_cache_create("file_lock_ctx",
2913			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2914
2915	filelock_cache = kmem_cache_create("file_lock_cache",
2916			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
 
 
 
2917
2918	for_each_possible_cpu(i) {
2919		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2920
2921		spin_lock_init(&fll->lock);
2922		INIT_HLIST_HEAD(&fll->hlist);
2923	}
2924
2925	lease_notifier_chain_init();
2926	return 0;
2927}
2928core_initcall(filelock_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/locks.c
   4 *
   5 * We implement four types of file locks: BSD locks, posix locks, open
   6 * file description locks, and leases.  For details about BSD locks,
   7 * see the flock(2) man page; for details about the other three, see
   8 * fcntl(2).
   9 *
  10 *
  11 * Locking conflicts and dependencies:
  12 * If multiple threads attempt to lock the same byte (or flock the same file)
  13 * only one can be granted the lock, and other must wait their turn.
  14 * The first lock has been "applied" or "granted", the others are "waiting"
  15 * and are "blocked" by the "applied" lock..
  16 *
  17 * Waiting and applied locks are all kept in trees whose properties are:
  18 *
  19 *	- the root of a tree may be an applied or waiting lock.
  20 *	- every other node in the tree is a waiting lock that
  21 *	  conflicts with every ancestor of that node.
  22 *
  23 * Every such tree begins life as a waiting singleton which obviously
  24 * satisfies the above properties.
  25 *
  26 * The only ways we modify trees preserve these properties:
  27 *
  28 *	1. We may add a new leaf node, but only after first verifying that it
  29 *	   conflicts with all of its ancestors.
  30 *	2. We may remove the root of a tree, creating a new singleton
  31 *	   tree from the root and N new trees rooted in the immediate
  32 *	   children.
  33 *	3. If the root of a tree is not currently an applied lock, we may
  34 *	   apply it (if possible).
  35 *	4. We may upgrade the root of the tree (either extend its range,
  36 *	   or upgrade its entire range from read to write).
  37 *
  38 * When an applied lock is modified in a way that reduces or downgrades any
  39 * part of its range, we remove all its children (2 above).  This particularly
  40 * happens when a lock is unlocked.
  41 *
  42 * For each of those child trees we "wake up" the thread which is
  43 * waiting for the lock so it can continue handling as follows: if the
  44 * root of the tree applies, we do so (3).  If it doesn't, it must
  45 * conflict with some applied lock.  We remove (wake up) all of its children
  46 * (2), and add it is a new leaf to the tree rooted in the applied
  47 * lock (1).  We then repeat the process recursively with those
  48 * children.
  49 *
  50 */
 
  51#include <linux/capability.h>
  52#include <linux/file.h>
  53#include <linux/fdtable.h>
  54#include <linux/filelock.h>
  55#include <linux/fs.h>
  56#include <linux/init.h>
  57#include <linux/security.h>
  58#include <linux/slab.h>
  59#include <linux/syscalls.h>
  60#include <linux/time.h>
  61#include <linux/rcupdate.h>
  62#include <linux/pid_namespace.h>
  63#include <linux/hashtable.h>
  64#include <linux/percpu.h>
  65#include <linux/sysctl.h>
  66
  67#define CREATE_TRACE_POINTS
  68#include <trace/events/filelock.h>
  69
  70#include <linux/uaccess.h>
  71
  72static struct file_lock *file_lock(struct file_lock_core *flc)
  73{
  74	return container_of(flc, struct file_lock, c);
  75}
  76
  77static struct file_lease *file_lease(struct file_lock_core *flc)
  78{
  79	return container_of(flc, struct file_lease, c);
  80}
  81
  82static bool lease_breaking(struct file_lease *fl)
  83{
  84	return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
  85}
  86
  87static int target_leasetype(struct file_lease *fl)
  88{
  89	if (fl->c.flc_flags & FL_UNLOCK_PENDING)
  90		return F_UNLCK;
  91	if (fl->c.flc_flags & FL_DOWNGRADE_PENDING)
  92		return F_RDLCK;
  93	return fl->c.flc_type;
  94}
  95
  96static int leases_enable = 1;
  97static int lease_break_time = 45;
  98
  99#ifdef CONFIG_SYSCTL
 100static struct ctl_table locks_sysctls[] = {
 101	{
 102		.procname	= "leases-enable",
 103		.data		= &leases_enable,
 104		.maxlen		= sizeof(int),
 105		.mode		= 0644,
 106		.proc_handler	= proc_dointvec,
 107	},
 108#ifdef CONFIG_MMU
 109	{
 110		.procname	= "lease-break-time",
 111		.data		= &lease_break_time,
 112		.maxlen		= sizeof(int),
 113		.mode		= 0644,
 114		.proc_handler	= proc_dointvec,
 115	},
 116#endif /* CONFIG_MMU */
 117};
 118
 119static int __init init_fs_locks_sysctls(void)
 120{
 121	register_sysctl_init("fs", locks_sysctls);
 122	return 0;
 123}
 124early_initcall(init_fs_locks_sysctls);
 125#endif /* CONFIG_SYSCTL */
 126
 127/*
 128 * The global file_lock_list is only used for displaying /proc/locks, so we
 129 * keep a list on each CPU, with each list protected by its own spinlock.
 130 * Global serialization is done using file_rwsem.
 131 *
 132 * Note that alterations to the list also require that the relevant flc_lock is
 133 * held.
 134 */
 135struct file_lock_list_struct {
 136	spinlock_t		lock;
 137	struct hlist_head	hlist;
 138};
 139static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
 140DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
 141
 142
 143/*
 144 * The blocked_hash is used to find POSIX lock loops for deadlock detection.
 145 * It is protected by blocked_lock_lock.
 146 *
 147 * We hash locks by lockowner in order to optimize searching for the lock a
 148 * particular lockowner is waiting on.
 149 *
 150 * FIXME: make this value scale via some heuristic? We generally will want more
 151 * buckets when we have more lockowners holding locks, but that's a little
 152 * difficult to determine without knowing what the workload will look like.
 153 */
 154#define BLOCKED_HASH_BITS	7
 155static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
 156
 157/*
 158 * This lock protects the blocked_hash. Generally, if you're accessing it, you
 159 * want to be holding this lock.
 160 *
 161 * In addition, it also protects the fl->fl_blocked_requests list, and the
 162 * fl->fl_blocker pointer for file_lock structures that are acting as lock
 163 * requests (in contrast to those that are acting as records of acquired locks).
 164 *
 165 * Note that when we acquire this lock in order to change the above fields,
 166 * we often hold the flc_lock as well. In certain cases, when reading the fields
 167 * protected by this lock, we can skip acquiring it iff we already hold the
 168 * flc_lock.
 169 */
 170static DEFINE_SPINLOCK(blocked_lock_lock);
 171
 172static struct kmem_cache *flctx_cache __ro_after_init;
 173static struct kmem_cache *filelock_cache __ro_after_init;
 174static struct kmem_cache *filelease_cache __ro_after_init;
 175
 176static struct file_lock_context *
 177locks_get_lock_context(struct inode *inode, int type)
 178{
 179	struct file_lock_context *ctx;
 180
 181	/* paired with cmpxchg() below */
 182	ctx = locks_inode_context(inode);
 183	if (likely(ctx) || type == F_UNLCK)
 184		goto out;
 185
 186	ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
 187	if (!ctx)
 188		goto out;
 189
 190	spin_lock_init(&ctx->flc_lock);
 191	INIT_LIST_HEAD(&ctx->flc_flock);
 192	INIT_LIST_HEAD(&ctx->flc_posix);
 193	INIT_LIST_HEAD(&ctx->flc_lease);
 194
 195	/*
 196	 * Assign the pointer if it's not already assigned. If it is, then
 197	 * free the context we just allocated.
 198	 */
 199	if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
 200		kmem_cache_free(flctx_cache, ctx);
 201		ctx = locks_inode_context(inode);
 202	}
 203out:
 204	trace_locks_get_lock_context(inode, type, ctx);
 205	return ctx;
 206}
 207
 208static void
 209locks_dump_ctx_list(struct list_head *list, char *list_type)
 210{
 211	struct file_lock_core *flc;
 212
 213	list_for_each_entry(flc, list, flc_list)
 214		pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
 215			list_type, flc->flc_owner, flc->flc_flags,
 216			flc->flc_type, flc->flc_pid);
 217}
 218
 219static void
 220locks_check_ctx_lists(struct inode *inode)
 221{
 222	struct file_lock_context *ctx = inode->i_flctx;
 223
 224	if (unlikely(!list_empty(&ctx->flc_flock) ||
 225		     !list_empty(&ctx->flc_posix) ||
 226		     !list_empty(&ctx->flc_lease))) {
 227		pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
 228			MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
 229			inode->i_ino);
 230		locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
 231		locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
 232		locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
 233	}
 234}
 235
 236static void
 237locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type)
 
 238{
 239	struct file_lock_core *flc;
 240	struct inode *inode = file_inode(filp);
 241
 242	list_for_each_entry(flc, list, flc_list)
 243		if (flc->flc_file == filp)
 244			pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
 245				" fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
 246				list_type, MAJOR(inode->i_sb->s_dev),
 247				MINOR(inode->i_sb->s_dev), inode->i_ino,
 248				flc->flc_owner, flc->flc_flags,
 249				flc->flc_type, flc->flc_pid);
 250}
 251
 252void
 253locks_free_lock_context(struct inode *inode)
 254{
 255	struct file_lock_context *ctx = locks_inode_context(inode);
 256
 257	if (unlikely(ctx)) {
 258		locks_check_ctx_lists(inode);
 259		kmem_cache_free(flctx_cache, ctx);
 260	}
 261}
 262
 263static void locks_init_lock_heads(struct file_lock_core *flc)
 264{
 265	INIT_HLIST_NODE(&flc->flc_link);
 266	INIT_LIST_HEAD(&flc->flc_list);
 267	INIT_LIST_HEAD(&flc->flc_blocked_requests);
 268	INIT_LIST_HEAD(&flc->flc_blocked_member);
 269	init_waitqueue_head(&flc->flc_wait);
 270}
 271
 272/* Allocate an empty lock structure. */
 273struct file_lock *locks_alloc_lock(void)
 274{
 275	struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
 276
 277	if (fl)
 278		locks_init_lock_heads(&fl->c);
 279
 280	return fl;
 281}
 282EXPORT_SYMBOL_GPL(locks_alloc_lock);
 283
 284/* Allocate an empty lock structure. */
 285struct file_lease *locks_alloc_lease(void)
 286{
 287	struct file_lease *fl = kmem_cache_zalloc(filelease_cache, GFP_KERNEL);
 288
 289	if (fl)
 290		locks_init_lock_heads(&fl->c);
 291
 292	return fl;
 293}
 294EXPORT_SYMBOL_GPL(locks_alloc_lease);
 295
 296void locks_release_private(struct file_lock *fl)
 297{
 298	struct file_lock_core *flc = &fl->c;
 299
 300	BUG_ON(waitqueue_active(&flc->flc_wait));
 301	BUG_ON(!list_empty(&flc->flc_list));
 302	BUG_ON(!list_empty(&flc->flc_blocked_requests));
 303	BUG_ON(!list_empty(&flc->flc_blocked_member));
 304	BUG_ON(!hlist_unhashed(&flc->flc_link));
 305
 306	if (fl->fl_ops) {
 307		if (fl->fl_ops->fl_release_private)
 308			fl->fl_ops->fl_release_private(fl);
 309		fl->fl_ops = NULL;
 310	}
 311
 312	if (fl->fl_lmops) {
 313		if (fl->fl_lmops->lm_put_owner) {
 314			fl->fl_lmops->lm_put_owner(flc->flc_owner);
 315			flc->flc_owner = NULL;
 316		}
 317		fl->fl_lmops = NULL;
 318	}
 319}
 320EXPORT_SYMBOL_GPL(locks_release_private);
 321
 322/**
 323 * locks_owner_has_blockers - Check for blocking lock requests
 324 * @flctx: file lock context
 325 * @owner: lock owner
 326 *
 327 * Return values:
 328 *   %true: @owner has at least one blocker
 329 *   %false: @owner has no blockers
 330 */
 331bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner)
 
 332{
 333	struct file_lock_core *flc;
 334
 335	spin_lock(&flctx->flc_lock);
 336	list_for_each_entry(flc, &flctx->flc_posix, flc_list) {
 337		if (flc->flc_owner != owner)
 338			continue;
 339		if (!list_empty(&flc->flc_blocked_requests)) {
 340			spin_unlock(&flctx->flc_lock);
 341			return true;
 342		}
 343	}
 344	spin_unlock(&flctx->flc_lock);
 345	return false;
 346}
 347EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
 348
 349/* Free a lock which is not in use. */
 350void locks_free_lock(struct file_lock *fl)
 351{
 352	locks_release_private(fl);
 353	kmem_cache_free(filelock_cache, fl);
 354}
 355EXPORT_SYMBOL(locks_free_lock);
 356
 357/* Free a lease which is not in use. */
 358void locks_free_lease(struct file_lease *fl)
 359{
 360	kmem_cache_free(filelease_cache, fl);
 361}
 362EXPORT_SYMBOL(locks_free_lease);
 363
 364static void
 365locks_dispose_list(struct list_head *dispose)
 366{
 367	struct file_lock_core *flc;
 368
 369	while (!list_empty(dispose)) {
 370		flc = list_first_entry(dispose, struct file_lock_core, flc_list);
 371		list_del_init(&flc->flc_list);
 372		if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
 373			locks_free_lease(file_lease(flc));
 374		else
 375			locks_free_lock(file_lock(flc));
 376	}
 377}
 378
 379void locks_init_lock(struct file_lock *fl)
 380{
 381	memset(fl, 0, sizeof(struct file_lock));
 382	locks_init_lock_heads(&fl->c);
 383}
 384EXPORT_SYMBOL(locks_init_lock);
 385
 386void locks_init_lease(struct file_lease *fl)
 387{
 388	memset(fl, 0, sizeof(*fl));
 389	locks_init_lock_heads(&fl->c);
 390}
 391EXPORT_SYMBOL(locks_init_lease);
 392
 393/*
 394 * Initialize a new lock from an existing file_lock structure.
 395 */
 396void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
 397{
 398	new->c.flc_owner = fl->c.flc_owner;
 399	new->c.flc_pid = fl->c.flc_pid;
 400	new->c.flc_file = NULL;
 401	new->c.flc_flags = fl->c.flc_flags;
 402	new->c.flc_type = fl->c.flc_type;
 403	new->fl_start = fl->fl_start;
 404	new->fl_end = fl->fl_end;
 405	new->fl_lmops = fl->fl_lmops;
 406	new->fl_ops = NULL;
 407
 408	if (fl->fl_lmops) {
 409		if (fl->fl_lmops->lm_get_owner)
 410			fl->fl_lmops->lm_get_owner(fl->c.flc_owner);
 411	}
 412}
 413EXPORT_SYMBOL(locks_copy_conflock);
 414
 415void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 416{
 417	/* "new" must be a freshly-initialized lock */
 418	WARN_ON_ONCE(new->fl_ops);
 419
 420	locks_copy_conflock(new, fl);
 421
 422	new->c.flc_file = fl->c.flc_file;
 423	new->fl_ops = fl->fl_ops;
 424
 425	if (fl->fl_ops) {
 426		if (fl->fl_ops->fl_copy_lock)
 427			fl->fl_ops->fl_copy_lock(new, fl);
 428	}
 429}
 430EXPORT_SYMBOL(locks_copy_lock);
 431
 432static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
 433{
 434	struct file_lock *f;
 435
 436	/*
 437	 * As ctx->flc_lock is held, new requests cannot be added to
 438	 * ->flc_blocked_requests, so we don't need a lock to check if it
 439	 * is empty.
 440	 */
 441	if (list_empty(&fl->c.flc_blocked_requests))
 442		return;
 443	spin_lock(&blocked_lock_lock);
 444	list_splice_init(&fl->c.flc_blocked_requests,
 445			 &new->c.flc_blocked_requests);
 446	list_for_each_entry(f, &new->c.flc_blocked_requests,
 447			    c.flc_blocked_member)
 448		f->c.flc_blocker = &new->c;
 449	spin_unlock(&blocked_lock_lock);
 450}
 451
 452static inline int flock_translate_cmd(int cmd) {
 453	switch (cmd) {
 454	case LOCK_SH:
 455		return F_RDLCK;
 456	case LOCK_EX:
 457		return F_WRLCK;
 458	case LOCK_UN:
 459		return F_UNLCK;
 460	}
 461	return -EINVAL;
 462}
 463
 464/* Fill in a file_lock structure with an appropriate FLOCK lock. */
 465static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
 466{
 467	locks_init_lock(fl);
 468
 469	fl->c.flc_file = filp;
 470	fl->c.flc_owner = filp;
 471	fl->c.flc_pid = current->tgid;
 472	fl->c.flc_flags = FL_FLOCK;
 473	fl->c.flc_type = type;
 474	fl->fl_end = OFFSET_MAX;
 475}
 476
 477static int assign_type(struct file_lock_core *flc, int type)
 478{
 479	switch (type) {
 480	case F_RDLCK:
 481	case F_WRLCK:
 482	case F_UNLCK:
 483		flc->flc_type = type;
 484		break;
 485	default:
 486		return -EINVAL;
 487	}
 488	return 0;
 489}
 490
 491static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
 492				 struct flock64 *l)
 493{
 494	switch (l->l_whence) {
 495	case SEEK_SET:
 496		fl->fl_start = 0;
 497		break;
 498	case SEEK_CUR:
 499		fl->fl_start = filp->f_pos;
 500		break;
 501	case SEEK_END:
 502		fl->fl_start = i_size_read(file_inode(filp));
 503		break;
 504	default:
 505		return -EINVAL;
 506	}
 507	if (l->l_start > OFFSET_MAX - fl->fl_start)
 508		return -EOVERFLOW;
 509	fl->fl_start += l->l_start;
 510	if (fl->fl_start < 0)
 511		return -EINVAL;
 512
 513	/* POSIX-1996 leaves the case l->l_len < 0 undefined;
 514	   POSIX-2001 defines it. */
 515	if (l->l_len > 0) {
 516		if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
 517			return -EOVERFLOW;
 518		fl->fl_end = fl->fl_start + (l->l_len - 1);
 519
 520	} else if (l->l_len < 0) {
 521		if (fl->fl_start + l->l_len < 0)
 522			return -EINVAL;
 523		fl->fl_end = fl->fl_start - 1;
 524		fl->fl_start += l->l_len;
 525	} else
 526		fl->fl_end = OFFSET_MAX;
 527
 528	fl->c.flc_owner = current->files;
 529	fl->c.flc_pid = current->tgid;
 530	fl->c.flc_file = filp;
 531	fl->c.flc_flags = FL_POSIX;
 532	fl->fl_ops = NULL;
 533	fl->fl_lmops = NULL;
 534
 535	return assign_type(&fl->c, l->l_type);
 536}
 537
 538/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
 539 * style lock.
 540 */
 541static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
 542			       struct flock *l)
 543{
 544	struct flock64 ll = {
 545		.l_type = l->l_type,
 546		.l_whence = l->l_whence,
 547		.l_start = l->l_start,
 548		.l_len = l->l_len,
 549	};
 550
 551	return flock64_to_posix_lock(filp, fl, &ll);
 552}
 553
 554/* default lease lock manager operations */
 555static bool
 556lease_break_callback(struct file_lease *fl)
 557{
 558	kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
 559	return false;
 560}
 561
 562static void
 563lease_setup(struct file_lease *fl, void **priv)
 564{
 565	struct file *filp = fl->c.flc_file;
 566	struct fasync_struct *fa = *priv;
 567
 568	/*
 569	 * fasync_insert_entry() returns the old entry if any. If there was no
 570	 * old entry, then it used "priv" and inserted it into the fasync list.
 571	 * Clear the pointer to indicate that it shouldn't be freed.
 572	 */
 573	if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
 574		*priv = NULL;
 575
 576	__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
 577}
 578
 579static const struct lease_manager_operations lease_manager_ops = {
 580	.lm_break = lease_break_callback,
 581	.lm_change = lease_modify,
 582	.lm_setup = lease_setup,
 583};
 584
 585/*
 586 * Initialize a lease, use the default lock manager operations
 587 */
 588static int lease_init(struct file *filp, int type, struct file_lease *fl)
 589{
 590	if (assign_type(&fl->c, type) != 0)
 591		return -EINVAL;
 592
 593	fl->c.flc_owner = filp;
 594	fl->c.flc_pid = current->tgid;
 595
 596	fl->c.flc_file = filp;
 597	fl->c.flc_flags = FL_LEASE;
 
 
 
 598	fl->fl_lmops = &lease_manager_ops;
 599	return 0;
 600}
 601
 602/* Allocate a file_lock initialised to this type of lease */
 603static struct file_lease *lease_alloc(struct file *filp, int type)
 604{
 605	struct file_lease *fl = locks_alloc_lease();
 606	int error = -ENOMEM;
 607
 608	if (fl == NULL)
 609		return ERR_PTR(error);
 610
 611	error = lease_init(filp, type, fl);
 612	if (error) {
 613		locks_free_lease(fl);
 614		return ERR_PTR(error);
 615	}
 616	return fl;
 617}
 618
 619/* Check if two locks overlap each other.
 620 */
 621static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
 622{
 623	return ((fl1->fl_end >= fl2->fl_start) &&
 624		(fl2->fl_end >= fl1->fl_start));
 625}
 626
 627/*
 628 * Check whether two locks have the same owner.
 629 */
 630static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2)
 631{
 632	return fl1->flc_owner == fl2->flc_owner;
 633}
 634
 635/* Must be called with the flc_lock held! */
 636static void locks_insert_global_locks(struct file_lock_core *flc)
 637{
 638	struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
 639
 640	percpu_rwsem_assert_held(&file_rwsem);
 641
 642	spin_lock(&fll->lock);
 643	flc->flc_link_cpu = smp_processor_id();
 644	hlist_add_head(&flc->flc_link, &fll->hlist);
 645	spin_unlock(&fll->lock);
 646}
 647
 648/* Must be called with the flc_lock held! */
 649static void locks_delete_global_locks(struct file_lock_core *flc)
 650{
 651	struct file_lock_list_struct *fll;
 652
 653	percpu_rwsem_assert_held(&file_rwsem);
 654
 655	/*
 656	 * Avoid taking lock if already unhashed. This is safe since this check
 657	 * is done while holding the flc_lock, and new insertions into the list
 658	 * also require that it be held.
 659	 */
 660	if (hlist_unhashed(&flc->flc_link))
 661		return;
 662
 663	fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu);
 664	spin_lock(&fll->lock);
 665	hlist_del_init(&flc->flc_link);
 666	spin_unlock(&fll->lock);
 667}
 668
 669static unsigned long
 670posix_owner_key(struct file_lock_core *flc)
 671{
 672	return (unsigned long) flc->flc_owner;
 673}
 674
 675static void locks_insert_global_blocked(struct file_lock_core *waiter)
 676{
 677	lockdep_assert_held(&blocked_lock_lock);
 678
 679	hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter));
 680}
 681
 682static void locks_delete_global_blocked(struct file_lock_core *waiter)
 683{
 684	lockdep_assert_held(&blocked_lock_lock);
 685
 686	hash_del(&waiter->flc_link);
 687}
 688
 689/* Remove waiter from blocker's block list.
 690 * When blocker ends up pointing to itself then the list is empty.
 691 *
 692 * Must be called with blocked_lock_lock held.
 693 */
 694static void __locks_unlink_block(struct file_lock_core *waiter)
 695{
 696	locks_delete_global_blocked(waiter);
 697	list_del_init(&waiter->flc_blocked_member);
 698}
 699
 700static void __locks_wake_up_blocks(struct file_lock_core *blocker)
 701{
 702	while (!list_empty(&blocker->flc_blocked_requests)) {
 703		struct file_lock_core *waiter;
 704		struct file_lock *fl;
 705
 706		waiter = list_first_entry(&blocker->flc_blocked_requests,
 707					  struct file_lock_core, flc_blocked_member);
 708
 709		fl = file_lock(waiter);
 710		__locks_unlink_block(waiter);
 711		if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) &&
 712		    fl->fl_lmops && fl->fl_lmops->lm_notify)
 713			fl->fl_lmops->lm_notify(fl);
 714		else
 715			locks_wake_up(fl);
 716
 717		/*
 718		 * The setting of flc_blocker to NULL marks the "done"
 719		 * point in deleting a block. Paired with acquire at the top
 720		 * of locks_delete_block().
 721		 */
 722		smp_store_release(&waiter->flc_blocker, NULL);
 723	}
 724}
 725
 726static int __locks_delete_block(struct file_lock_core *waiter)
 
 
 
 
 
 
 727{
 728	int status = -ENOENT;
 729
 730	/*
 731	 * If fl_blocker is NULL, it won't be set again as this thread "owns"
 732	 * the lock and is the only one that might try to claim the lock.
 733	 *
 734	 * We use acquire/release to manage fl_blocker so that we can
 735	 * optimize away taking the blocked_lock_lock in many cases.
 736	 *
 737	 * The smp_load_acquire guarantees two things:
 738	 *
 739	 * 1/ that fl_blocked_requests can be tested locklessly. If something
 740	 * was recently added to that list it must have been in a locked region
 741	 * *before* the locked region when fl_blocker was set to NULL.
 742	 *
 743	 * 2/ that no other thread is accessing 'waiter', so it is safe to free
 744	 * it.  __locks_wake_up_blocks is careful not to touch waiter after
 745	 * fl_blocker is released.
 746	 *
 747	 * If a lockless check of fl_blocker shows it to be NULL, we know that
 748	 * no new locks can be inserted into its fl_blocked_requests list, and
 749	 * can avoid doing anything further if the list is empty.
 750	 */
 751	if (!smp_load_acquire(&waiter->flc_blocker) &&
 752	    list_empty(&waiter->flc_blocked_requests))
 753		return status;
 754
 755	spin_lock(&blocked_lock_lock);
 756	if (waiter->flc_blocker)
 757		status = 0;
 758	__locks_wake_up_blocks(waiter);
 759	__locks_unlink_block(waiter);
 760
 761	/*
 762	 * The setting of fl_blocker to NULL marks the "done" point in deleting
 763	 * a block. Paired with acquire at the top of this function.
 764	 */
 765	smp_store_release(&waiter->flc_blocker, NULL);
 766	spin_unlock(&blocked_lock_lock);
 767	return status;
 768}
 769
 770/**
 771 *	locks_delete_block - stop waiting for a file lock
 772 *	@waiter: the lock which was waiting
 773 *
 774 *	lockd/nfsd need to disconnect the lock while working on it.
 775 */
 776int locks_delete_block(struct file_lock *waiter)
 777{
 778	return __locks_delete_block(&waiter->c);
 779}
 780EXPORT_SYMBOL(locks_delete_block);
 781
 782/* Insert waiter into blocker's block list.
 783 * We use a circular list so that processes can be easily woken up in
 784 * the order they blocked. The documentation doesn't require this but
 785 * it seems like the reasonable thing to do.
 786 *
 787 * Must be called with both the flc_lock and blocked_lock_lock held. The
 788 * fl_blocked_requests list itself is protected by the blocked_lock_lock,
 789 * but by ensuring that the flc_lock is also held on insertions we can avoid
 790 * taking the blocked_lock_lock in some cases when we see that the
 791 * fl_blocked_requests list is empty.
 792 *
 793 * Rather than just adding to the list, we check for conflicts with any existing
 794 * waiters, and add beneath any waiter that blocks the new waiter.
 795 * Thus wakeups don't happen until needed.
 796 */
 797static void __locks_insert_block(struct file_lock_core *blocker,
 798				 struct file_lock_core *waiter,
 799				 bool conflict(struct file_lock_core *,
 800					       struct file_lock_core *))
 801{
 802	struct file_lock_core *flc;
 
 803
 804	BUG_ON(!list_empty(&waiter->flc_blocked_member));
 805new_blocker:
 806	list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member)
 807		if (conflict(flc, waiter)) {
 808			blocker =  flc;
 809			goto new_blocker;
 810		}
 811	waiter->flc_blocker = blocker;
 812	list_add_tail(&waiter->flc_blocked_member,
 813		      &blocker->flc_blocked_requests);
 814
 815	if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)
 816		locks_insert_global_blocked(waiter);
 817
 818	/* The requests in waiter->flc_blocked are known to conflict with
 819	 * waiter, but might not conflict with blocker, or the requests
 820	 * and lock which block it.  So they all need to be woken.
 821	 */
 822	__locks_wake_up_blocks(waiter);
 823}
 824
 825/* Must be called with flc_lock held. */
 826static void locks_insert_block(struct file_lock_core *blocker,
 827			       struct file_lock_core *waiter,
 828			       bool conflict(struct file_lock_core *,
 829					     struct file_lock_core *))
 830{
 831	spin_lock(&blocked_lock_lock);
 832	__locks_insert_block(blocker, waiter, conflict);
 833	spin_unlock(&blocked_lock_lock);
 834}
 835
 836/*
 837 * Wake up processes blocked waiting for blocker.
 838 *
 839 * Must be called with the inode->flc_lock held!
 840 */
 841static void locks_wake_up_blocks(struct file_lock_core *blocker)
 842{
 843	/*
 844	 * Avoid taking global lock if list is empty. This is safe since new
 845	 * blocked requests are only added to the list under the flc_lock, and
 846	 * the flc_lock is always held here. Note that removal from the
 847	 * fl_blocked_requests list does not require the flc_lock, so we must
 848	 * recheck list_empty() after acquiring the blocked_lock_lock.
 849	 */
 850	if (list_empty(&blocker->flc_blocked_requests))
 851		return;
 852
 853	spin_lock(&blocked_lock_lock);
 854	__locks_wake_up_blocks(blocker);
 855	spin_unlock(&blocked_lock_lock);
 856}
 857
 858static void
 859locks_insert_lock_ctx(struct file_lock_core *fl, struct list_head *before)
 860{
 861	list_add_tail(&fl->flc_list, before);
 862	locks_insert_global_locks(fl);
 863}
 864
 865static void
 866locks_unlink_lock_ctx(struct file_lock_core *fl)
 867{
 868	locks_delete_global_locks(fl);
 869	list_del_init(&fl->flc_list);
 870	locks_wake_up_blocks(fl);
 871}
 872
 873static void
 874locks_delete_lock_ctx(struct file_lock_core *fl, struct list_head *dispose)
 875{
 876	locks_unlink_lock_ctx(fl);
 877	if (dispose)
 878		list_add(&fl->flc_list, dispose);
 879	else
 880		locks_free_lock(file_lock(fl));
 881}
 882
 883/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
 884 * checks for shared/exclusive status of overlapping locks.
 885 */
 886static bool locks_conflict(struct file_lock_core *caller_flc,
 887			   struct file_lock_core *sys_flc)
 888{
 889	if (sys_flc->flc_type == F_WRLCK)
 890		return true;
 891	if (caller_flc->flc_type == F_WRLCK)
 892		return true;
 893	return false;
 894}
 895
 896/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
 897 * checking before calling the locks_conflict().
 898 */
 899static bool posix_locks_conflict(struct file_lock_core *caller_flc,
 900				 struct file_lock_core *sys_flc)
 901{
 902	struct file_lock *caller_fl = file_lock(caller_flc);
 903	struct file_lock *sys_fl = file_lock(sys_flc);
 904
 905	/* POSIX locks owned by the same process do not conflict with
 906	 * each other.
 907	 */
 908	if (posix_same_owner(caller_flc, sys_flc))
 909		return false;
 910
 911	/* Check whether they overlap */
 912	if (!locks_overlap(caller_fl, sys_fl))
 913		return false;
 914
 915	return locks_conflict(caller_flc, sys_flc);
 916}
 917
 918/* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
 919 * path so checks for additional GETLK-specific things like F_UNLCK.
 920 */
 921static bool posix_test_locks_conflict(struct file_lock *caller_fl,
 922				      struct file_lock *sys_fl)
 923{
 924	struct file_lock_core *caller = &caller_fl->c;
 925	struct file_lock_core *sys = &sys_fl->c;
 926
 927	/* F_UNLCK checks any locks on the same fd. */
 928	if (lock_is_unlock(caller_fl)) {
 929		if (!posix_same_owner(caller, sys))
 930			return false;
 931		return locks_overlap(caller_fl, sys_fl);
 932	}
 933	return posix_locks_conflict(caller, sys);
 934}
 935
 936/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
 937 * checking before calling the locks_conflict().
 938 */
 939static bool flock_locks_conflict(struct file_lock_core *caller_flc,
 940				 struct file_lock_core *sys_flc)
 941{
 942	/* FLOCK locks referring to the same filp do not conflict with
 943	 * each other.
 944	 */
 945	if (caller_flc->flc_file == sys_flc->flc_file)
 946		return false;
 947
 948	return locks_conflict(caller_flc, sys_flc);
 949}
 950
 951void
 952posix_test_lock(struct file *filp, struct file_lock *fl)
 953{
 954	struct file_lock *cfl;
 955	struct file_lock_context *ctx;
 956	struct inode *inode = file_inode(filp);
 957	void *owner;
 958	void (*func)(void);
 959
 960	ctx = locks_inode_context(inode);
 961	if (!ctx || list_empty_careful(&ctx->flc_posix)) {
 962		fl->c.flc_type = F_UNLCK;
 963		return;
 964	}
 965
 966retry:
 967	spin_lock(&ctx->flc_lock);
 968	list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) {
 969		if (!posix_test_locks_conflict(fl, cfl))
 970			continue;
 971		if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
 972			&& (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
 973			owner = cfl->fl_lmops->lm_mod_owner;
 974			func = cfl->fl_lmops->lm_expire_lock;
 975			__module_get(owner);
 976			spin_unlock(&ctx->flc_lock);
 977			(*func)();
 978			module_put(owner);
 979			goto retry;
 980		}
 981		locks_copy_conflock(fl, cfl);
 982		goto out;
 983	}
 984	fl->c.flc_type = F_UNLCK;
 985out:
 986	spin_unlock(&ctx->flc_lock);
 987	return;
 988}
 989EXPORT_SYMBOL(posix_test_lock);
 990
 991/*
 992 * Deadlock detection:
 993 *
 994 * We attempt to detect deadlocks that are due purely to posix file
 995 * locks.
 996 *
 997 * We assume that a task can be waiting for at most one lock at a time.
 998 * So for any acquired lock, the process holding that lock may be
 999 * waiting on at most one other lock.  That lock in turns may be held by
1000 * someone waiting for at most one other lock.  Given a requested lock
1001 * caller_fl which is about to wait for a conflicting lock block_fl, we
1002 * follow this chain of waiters to ensure we are not about to create a
1003 * cycle.
1004 *
1005 * Since we do this before we ever put a process to sleep on a lock, we
1006 * are ensured that there is never a cycle; that is what guarantees that
1007 * the while() loop in posix_locks_deadlock() eventually completes.
1008 *
1009 * Note: the above assumption may not be true when handling lock
1010 * requests from a broken NFS client. It may also fail in the presence
1011 * of tasks (such as posix threads) sharing the same open file table.
1012 * To handle those cases, we just bail out after a few iterations.
1013 *
1014 * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
1015 * Because the owner is not even nominally tied to a thread of
1016 * execution, the deadlock detection below can't reasonably work well. Just
1017 * skip it for those.
1018 *
1019 * In principle, we could do a more limited deadlock detection on FL_OFDLCK
1020 * locks that just checks for the case where two tasks are attempting to
1021 * upgrade from read to write locks on the same inode.
1022 */
1023
1024#define MAX_DEADLK_ITERATIONS 10
1025
1026/* Find a lock that the owner of the given @blocker is blocking on. */
1027static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker)
1028{
1029	struct file_lock_core *flc;
1030
1031	hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
1032		if (posix_same_owner(flc, blocker)) {
1033			while (flc->flc_blocker)
1034				flc = flc->flc_blocker;
1035			return flc;
1036		}
1037	}
1038	return NULL;
1039}
1040
1041/* Must be called with the blocked_lock_lock held! */
1042static bool posix_locks_deadlock(struct file_lock *caller_fl,
1043				 struct file_lock *block_fl)
1044{
1045	struct file_lock_core *caller = &caller_fl->c;
1046	struct file_lock_core *blocker = &block_fl->c;
1047	int i = 0;
1048
1049	lockdep_assert_held(&blocked_lock_lock);
1050
1051	/*
1052	 * This deadlock detector can't reasonably detect deadlocks with
1053	 * FL_OFDLCK locks, since they aren't owned by a process, per-se.
1054	 */
1055	if (caller->flc_flags & FL_OFDLCK)
1056		return false;
1057
1058	while ((blocker = what_owner_is_waiting_for(blocker))) {
1059		if (i++ > MAX_DEADLK_ITERATIONS)
1060			return false;
1061		if (posix_same_owner(caller, blocker))
1062			return true;
1063	}
1064	return false;
1065}
1066
1067/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
1068 * after any leases, but before any posix locks.
1069 *
1070 * Note that if called with an FL_EXISTS argument, the caller may determine
1071 * whether or not a lock was successfully freed by testing the return
1072 * value for -ENOENT.
1073 */
1074static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1075{
1076	struct file_lock *new_fl = NULL;
1077	struct file_lock *fl;
1078	struct file_lock_context *ctx;
1079	int error = 0;
1080	bool found = false;
1081	LIST_HEAD(dispose);
1082
1083	ctx = locks_get_lock_context(inode, request->c.flc_type);
1084	if (!ctx) {
1085		if (request->c.flc_type != F_UNLCK)
1086			return -ENOMEM;
1087		return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0;
1088	}
1089
1090	if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) {
1091		new_fl = locks_alloc_lock();
1092		if (!new_fl)
1093			return -ENOMEM;
1094	}
1095
1096	percpu_down_read(&file_rwsem);
1097	spin_lock(&ctx->flc_lock);
1098	if (request->c.flc_flags & FL_ACCESS)
1099		goto find_conflict;
1100
1101	list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
1102		if (request->c.flc_file != fl->c.flc_file)
1103			continue;
1104		if (request->c.flc_type == fl->c.flc_type)
1105			goto out;
1106		found = true;
1107		locks_delete_lock_ctx(&fl->c, &dispose);
1108		break;
1109	}
1110
1111	if (lock_is_unlock(request)) {
1112		if ((request->c.flc_flags & FL_EXISTS) && !found)
1113			error = -ENOENT;
1114		goto out;
1115	}
1116
1117find_conflict:
1118	list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
1119		if (!flock_locks_conflict(&request->c, &fl->c))
1120			continue;
1121		error = -EAGAIN;
1122		if (!(request->c.flc_flags & FL_SLEEP))
1123			goto out;
1124		error = FILE_LOCK_DEFERRED;
1125		locks_insert_block(&fl->c, &request->c, flock_locks_conflict);
1126		goto out;
1127	}
1128	if (request->c.flc_flags & FL_ACCESS)
1129		goto out;
1130	locks_copy_lock(new_fl, request);
1131	locks_move_blocks(new_fl, request);
1132	locks_insert_lock_ctx(&new_fl->c, &ctx->flc_flock);
1133	new_fl = NULL;
1134	error = 0;
1135
1136out:
1137	spin_unlock(&ctx->flc_lock);
1138	percpu_up_read(&file_rwsem);
1139	if (new_fl)
1140		locks_free_lock(new_fl);
1141	locks_dispose_list(&dispose);
1142	trace_flock_lock_inode(inode, request, error);
1143	return error;
1144}
1145
1146static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1147			    struct file_lock *conflock)
1148{
1149	struct file_lock *fl, *tmp;
1150	struct file_lock *new_fl = NULL;
1151	struct file_lock *new_fl2 = NULL;
1152	struct file_lock *left = NULL;
1153	struct file_lock *right = NULL;
1154	struct file_lock_context *ctx;
1155	int error;
1156	bool added = false;
1157	LIST_HEAD(dispose);
1158	void *owner;
1159	void (*func)(void);
1160
1161	ctx = locks_get_lock_context(inode, request->c.flc_type);
1162	if (!ctx)
1163		return lock_is_unlock(request) ? 0 : -ENOMEM;
1164
1165	/*
1166	 * We may need two file_lock structures for this operation,
1167	 * so we get them in advance to avoid races.
1168	 *
1169	 * In some cases we can be sure, that no new locks will be needed
1170	 */
1171	if (!(request->c.flc_flags & FL_ACCESS) &&
1172	    (request->c.flc_type != F_UNLCK ||
1173	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1174		new_fl = locks_alloc_lock();
1175		new_fl2 = locks_alloc_lock();
1176	}
1177
1178retry:
1179	percpu_down_read(&file_rwsem);
1180	spin_lock(&ctx->flc_lock);
1181	/*
1182	 * New lock request. Walk all POSIX locks and look for conflicts. If
1183	 * there are any, either return error or put the request on the
1184	 * blocker's list of waiters and the global blocked_hash.
1185	 */
1186	if (request->c.flc_type != F_UNLCK) {
1187		list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
1188			if (!posix_locks_conflict(&request->c, &fl->c))
1189				continue;
1190			if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1191				&& (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1192				owner = fl->fl_lmops->lm_mod_owner;
1193				func = fl->fl_lmops->lm_expire_lock;
1194				__module_get(owner);
1195				spin_unlock(&ctx->flc_lock);
1196				percpu_up_read(&file_rwsem);
1197				(*func)();
1198				module_put(owner);
1199				goto retry;
1200			}
1201			if (conflock)
1202				locks_copy_conflock(conflock, fl);
1203			error = -EAGAIN;
1204			if (!(request->c.flc_flags & FL_SLEEP))
1205				goto out;
1206			/*
1207			 * Deadlock detection and insertion into the blocked
1208			 * locks list must be done while holding the same lock!
1209			 */
1210			error = -EDEADLK;
1211			spin_lock(&blocked_lock_lock);
1212			/*
1213			 * Ensure that we don't find any locks blocked on this
1214			 * request during deadlock detection.
1215			 */
1216			__locks_wake_up_blocks(&request->c);
1217			if (likely(!posix_locks_deadlock(request, fl))) {
1218				error = FILE_LOCK_DEFERRED;
1219				__locks_insert_block(&fl->c, &request->c,
1220						     posix_locks_conflict);
1221			}
1222			spin_unlock(&blocked_lock_lock);
1223			goto out;
1224		}
1225	}
1226
1227	/* If we're just looking for a conflict, we're done. */
1228	error = 0;
1229	if (request->c.flc_flags & FL_ACCESS)
1230		goto out;
1231
1232	/* Find the first old lock with the same owner as the new lock */
1233	list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
1234		if (posix_same_owner(&request->c, &fl->c))
1235			break;
1236	}
1237
1238	/* Process locks with this owner. */
1239	list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) {
1240		if (!posix_same_owner(&request->c, &fl->c))
1241			break;
1242
1243		/* Detect adjacent or overlapping regions (if same lock type) */
1244		if (request->c.flc_type == fl->c.flc_type) {
1245			/* In all comparisons of start vs end, use
1246			 * "start - 1" rather than "end + 1". If end
1247			 * is OFFSET_MAX, end + 1 will become negative.
1248			 */
1249			if (fl->fl_end < request->fl_start - 1)
1250				continue;
1251			/* If the next lock in the list has entirely bigger
1252			 * addresses than the new one, insert the lock here.
1253			 */
1254			if (fl->fl_start - 1 > request->fl_end)
1255				break;
1256
1257			/* If we come here, the new and old lock are of the
1258			 * same type and adjacent or overlapping. Make one
1259			 * lock yielding from the lower start address of both
1260			 * locks to the higher end address.
1261			 */
1262			if (fl->fl_start > request->fl_start)
1263				fl->fl_start = request->fl_start;
1264			else
1265				request->fl_start = fl->fl_start;
1266			if (fl->fl_end < request->fl_end)
1267				fl->fl_end = request->fl_end;
1268			else
1269				request->fl_end = fl->fl_end;
1270			if (added) {
1271				locks_delete_lock_ctx(&fl->c, &dispose);
1272				continue;
1273			}
1274			request = fl;
1275			added = true;
1276		} else {
1277			/* Processing for different lock types is a bit
1278			 * more complex.
1279			 */
1280			if (fl->fl_end < request->fl_start)
1281				continue;
1282			if (fl->fl_start > request->fl_end)
1283				break;
1284			if (lock_is_unlock(request))
1285				added = true;
1286			if (fl->fl_start < request->fl_start)
1287				left = fl;
1288			/* If the next lock in the list has a higher end
1289			 * address than the new one, insert the new one here.
1290			 */
1291			if (fl->fl_end > request->fl_end) {
1292				right = fl;
1293				break;
1294			}
1295			if (fl->fl_start >= request->fl_start) {
1296				/* The new lock completely replaces an old
1297				 * one (This may happen several times).
1298				 */
1299				if (added) {
1300					locks_delete_lock_ctx(&fl->c, &dispose);
1301					continue;
1302				}
1303				/*
1304				 * Replace the old lock with new_fl, and
1305				 * remove the old one. It's safe to do the
1306				 * insert here since we know that we won't be
1307				 * using new_fl later, and that the lock is
1308				 * just replacing an existing lock.
1309				 */
1310				error = -ENOLCK;
1311				if (!new_fl)
1312					goto out;
1313				locks_copy_lock(new_fl, request);
1314				locks_move_blocks(new_fl, request);
1315				request = new_fl;
1316				new_fl = NULL;
1317				locks_insert_lock_ctx(&request->c,
1318						      &fl->c.flc_list);
1319				locks_delete_lock_ctx(&fl->c, &dispose);
1320				added = true;
1321			}
1322		}
1323	}
1324
1325	/*
1326	 * The above code only modifies existing locks in case of merging or
1327	 * replacing. If new lock(s) need to be inserted all modifications are
1328	 * done below this, so it's safe yet to bail out.
1329	 */
1330	error = -ENOLCK; /* "no luck" */
1331	if (right && left == right && !new_fl2)
1332		goto out;
1333
1334	error = 0;
1335	if (!added) {
1336		if (lock_is_unlock(request)) {
1337			if (request->c.flc_flags & FL_EXISTS)
1338				error = -ENOENT;
1339			goto out;
1340		}
1341
1342		if (!new_fl) {
1343			error = -ENOLCK;
1344			goto out;
1345		}
1346		locks_copy_lock(new_fl, request);
1347		locks_move_blocks(new_fl, request);
1348		locks_insert_lock_ctx(&new_fl->c, &fl->c.flc_list);
1349		fl = new_fl;
1350		new_fl = NULL;
1351	}
1352	if (right) {
1353		if (left == right) {
1354			/* The new lock breaks the old one in two pieces,
1355			 * so we have to use the second new lock.
1356			 */
1357			left = new_fl2;
1358			new_fl2 = NULL;
1359			locks_copy_lock(left, right);
1360			locks_insert_lock_ctx(&left->c, &fl->c.flc_list);
1361		}
1362		right->fl_start = request->fl_end + 1;
1363		locks_wake_up_blocks(&right->c);
1364	}
1365	if (left) {
1366		left->fl_end = request->fl_start - 1;
1367		locks_wake_up_blocks(&left->c);
1368	}
1369 out:
1370	trace_posix_lock_inode(inode, request, error);
1371	spin_unlock(&ctx->flc_lock);
1372	percpu_up_read(&file_rwsem);
 
1373	/*
1374	 * Free any unused locks.
1375	 */
1376	if (new_fl)
1377		locks_free_lock(new_fl);
1378	if (new_fl2)
1379		locks_free_lock(new_fl2);
1380	locks_dispose_list(&dispose);
1381
1382	return error;
1383}
1384
1385/**
1386 * posix_lock_file - Apply a POSIX-style lock to a file
1387 * @filp: The file to apply the lock to
1388 * @fl: The lock to be applied
1389 * @conflock: Place to return a copy of the conflicting lock, if found.
1390 *
1391 * Add a POSIX style lock to a file.
1392 * We merge adjacent & overlapping locks whenever possible.
1393 * POSIX locks are sorted by owner task, then by starting address
1394 *
1395 * Note that if called with an FL_EXISTS argument, the caller may determine
1396 * whether or not a lock was successfully freed by testing the return
1397 * value for -ENOENT.
1398 */
1399int posix_lock_file(struct file *filp, struct file_lock *fl,
1400			struct file_lock *conflock)
1401{
1402	return posix_lock_inode(file_inode(filp), fl, conflock);
1403}
1404EXPORT_SYMBOL(posix_lock_file);
1405
1406/**
1407 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1408 * @inode: inode of file to which lock request should be applied
1409 * @fl: The lock to be applied
1410 *
1411 * Apply a POSIX style lock request to an inode.
1412 */
1413static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1414{
1415	int error;
1416	might_sleep ();
1417	for (;;) {
1418		error = posix_lock_inode(inode, fl, NULL);
1419		if (error != FILE_LOCK_DEFERRED)
1420			break;
1421		error = wait_event_interruptible(fl->c.flc_wait,
1422						 list_empty(&fl->c.flc_blocked_member));
1423		if (error)
1424			break;
1425	}
1426	locks_delete_block(fl);
1427	return error;
1428}
1429
1430static void lease_clear_pending(struct file_lease *fl, int arg)
1431{
1432	switch (arg) {
1433	case F_UNLCK:
1434		fl->c.flc_flags &= ~FL_UNLOCK_PENDING;
1435		fallthrough;
1436	case F_RDLCK:
1437		fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING;
1438	}
1439}
1440
1441/* We already had a lease on this file; just change its type */
1442int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose)
1443{
1444	int error = assign_type(&fl->c, arg);
1445
1446	if (error)
1447		return error;
1448	lease_clear_pending(fl, arg);
1449	locks_wake_up_blocks(&fl->c);
1450	if (arg == F_UNLCK) {
1451		struct file *filp = fl->c.flc_file;
1452
1453		f_delown(filp);
1454		file_f_owner(filp)->signum = 0;
1455		fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
1456		if (fl->fl_fasync != NULL) {
1457			printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1458			fl->fl_fasync = NULL;
1459		}
1460		locks_delete_lock_ctx(&fl->c, dispose);
1461	}
1462	return 0;
1463}
1464EXPORT_SYMBOL(lease_modify);
1465
1466static bool past_time(unsigned long then)
1467{
1468	if (!then)
1469		/* 0 is a special value meaning "this never expires": */
1470		return false;
1471	return time_after(jiffies, then);
1472}
1473
1474static void time_out_leases(struct inode *inode, struct list_head *dispose)
1475{
1476	struct file_lock_context *ctx = inode->i_flctx;
1477	struct file_lease *fl, *tmp;
1478
1479	lockdep_assert_held(&ctx->flc_lock);
1480
1481	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
1482		trace_time_out_leases(inode, fl);
1483		if (past_time(fl->fl_downgrade_time))
1484			lease_modify(fl, F_RDLCK, dispose);
1485		if (past_time(fl->fl_break_time))
1486			lease_modify(fl, F_UNLCK, dispose);
1487	}
1488}
1489
1490static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc)
1491{
1492	bool rc;
1493	struct file_lease *lease = file_lease(lc);
1494	struct file_lease *breaker = file_lease(bc);
1495
1496	if (lease->fl_lmops->lm_breaker_owns_lease
1497			&& lease->fl_lmops->lm_breaker_owns_lease(lease))
1498		return false;
1499	if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) {
1500		rc = false;
1501		goto trace;
1502	}
1503	if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) {
1504		rc = false;
1505		goto trace;
1506	}
1507
1508	rc = locks_conflict(bc, lc);
1509trace:
1510	trace_leases_conflict(rc, lease, breaker);
1511	return rc;
1512}
1513
1514static bool
1515any_leases_conflict(struct inode *inode, struct file_lease *breaker)
1516{
1517	struct file_lock_context *ctx = inode->i_flctx;
1518	struct file_lock_core *flc;
1519
1520	lockdep_assert_held(&ctx->flc_lock);
1521
1522	list_for_each_entry(flc, &ctx->flc_lease, flc_list) {
1523		if (leases_conflict(flc, &breaker->c))
1524			return true;
1525	}
1526	return false;
1527}
1528
1529/**
1530 *	__break_lease	-	revoke all outstanding leases on file
1531 *	@inode: the inode of the file to return
1532 *	@mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
1533 *	    break all leases
1534 *	@type: FL_LEASE: break leases and delegations; FL_DELEG: break
1535 *	    only delegations
1536 *
1537 *	break_lease (inlined for speed) has checked there already is at least
1538 *	some kind of lock (maybe a lease) on this file.  Leases are broken on
1539 *	a call to open() or truncate().  This function can sleep unless you
1540 *	specified %O_NONBLOCK to your open().
1541 */
1542int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1543{
1544	int error = 0;
1545	struct file_lock_context *ctx;
1546	struct file_lease *new_fl, *fl, *tmp;
1547	unsigned long break_time;
1548	int want_write = (mode & O_ACCMODE) != O_RDONLY;
1549	LIST_HEAD(dispose);
1550
1551	new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1552	if (IS_ERR(new_fl))
1553		return PTR_ERR(new_fl);
1554	new_fl->c.flc_flags = type;
1555
1556	/* typically we will check that ctx is non-NULL before calling */
1557	ctx = locks_inode_context(inode);
1558	if (!ctx) {
1559		WARN_ON_ONCE(1);
1560		goto free_lock;
1561	}
1562
1563	percpu_down_read(&file_rwsem);
1564	spin_lock(&ctx->flc_lock);
1565
1566	time_out_leases(inode, &dispose);
1567
1568	if (!any_leases_conflict(inode, new_fl))
1569		goto out;
1570
1571	break_time = 0;
1572	if (lease_break_time > 0) {
1573		break_time = jiffies + lease_break_time * HZ;
1574		if (break_time == 0)
1575			break_time++;	/* so that 0 means no break time */
1576	}
1577
1578	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
1579		if (!leases_conflict(&fl->c, &new_fl->c))
1580			continue;
1581		if (want_write) {
1582			if (fl->c.flc_flags & FL_UNLOCK_PENDING)
1583				continue;
1584			fl->c.flc_flags |= FL_UNLOCK_PENDING;
1585			fl->fl_break_time = break_time;
1586		} else {
1587			if (lease_breaking(fl))
1588				continue;
1589			fl->c.flc_flags |= FL_DOWNGRADE_PENDING;
1590			fl->fl_downgrade_time = break_time;
1591		}
1592		if (fl->fl_lmops->lm_break(fl))
1593			locks_delete_lock_ctx(&fl->c, &dispose);
1594	}
1595
1596	if (list_empty(&ctx->flc_lease))
1597		goto out;
1598
1599	if (mode & O_NONBLOCK) {
1600		trace_break_lease_noblock(inode, new_fl);
1601		error = -EWOULDBLOCK;
1602		goto out;
1603	}
1604
1605restart:
1606	fl = list_first_entry(&ctx->flc_lease, struct file_lease, c.flc_list);
1607	break_time = fl->fl_break_time;
1608	if (break_time != 0)
1609		break_time -= jiffies;
1610	if (break_time == 0)
1611		break_time++;
1612	locks_insert_block(&fl->c, &new_fl->c, leases_conflict);
1613	trace_break_lease_block(inode, new_fl);
1614	spin_unlock(&ctx->flc_lock);
1615	percpu_up_read(&file_rwsem);
1616
1617	locks_dispose_list(&dispose);
1618	error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
1619						 list_empty(&new_fl->c.flc_blocked_member),
1620						 break_time);
1621
1622	percpu_down_read(&file_rwsem);
1623	spin_lock(&ctx->flc_lock);
1624	trace_break_lease_unblock(inode, new_fl);
1625	__locks_delete_block(&new_fl->c);
1626	if (error >= 0) {
1627		/*
1628		 * Wait for the next conflicting lease that has not been
1629		 * broken yet
1630		 */
1631		if (error == 0)
1632			time_out_leases(inode, &dispose);
1633		if (any_leases_conflict(inode, new_fl))
1634			goto restart;
1635		error = 0;
1636	}
1637out:
1638	spin_unlock(&ctx->flc_lock);
1639	percpu_up_read(&file_rwsem);
1640	locks_dispose_list(&dispose);
1641free_lock:
1642	locks_free_lease(new_fl);
1643	return error;
1644}
1645EXPORT_SYMBOL(__break_lease);
1646
1647/**
1648 *	lease_get_mtime - update modified time of an inode with exclusive lease
1649 *	@inode: the inode
1650 *      @time:  pointer to a timespec which contains the last modified time
1651 *
1652 * This is to force NFS clients to flush their caches for files with
1653 * exclusive leases.  The justification is that if someone has an
1654 * exclusive lease, then they could be modifying it.
1655 */
1656void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1657{
1658	bool has_lease = false;
1659	struct file_lock_context *ctx;
1660	struct file_lock_core *flc;
1661
1662	ctx = locks_inode_context(inode);
1663	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1664		spin_lock(&ctx->flc_lock);
1665		flc = list_first_entry_or_null(&ctx->flc_lease,
1666					       struct file_lock_core, flc_list);
1667		if (flc && flc->flc_type == F_WRLCK)
1668			has_lease = true;
1669		spin_unlock(&ctx->flc_lock);
1670	}
1671
1672	if (has_lease)
1673		*time = current_time(inode);
1674}
1675EXPORT_SYMBOL(lease_get_mtime);
1676
1677/**
1678 *	fcntl_getlease - Enquire what lease is currently active
1679 *	@filp: the file
1680 *
1681 *	The value returned by this function will be one of
1682 *	(if no lease break is pending):
1683 *
1684 *	%F_RDLCK to indicate a shared lease is held.
1685 *
1686 *	%F_WRLCK to indicate an exclusive lease is held.
1687 *
1688 *	%F_UNLCK to indicate no lease is held.
1689 *
1690 *	(if a lease break is pending):
1691 *
1692 *	%F_RDLCK to indicate an exclusive lease needs to be
1693 *		changed to a shared lease (or removed).
1694 *
1695 *	%F_UNLCK to indicate the lease needs to be removed.
1696 *
1697 *	XXX: sfr & willy disagree over whether F_INPROGRESS
1698 *	should be returned to userspace.
1699 */
1700int fcntl_getlease(struct file *filp)
1701{
1702	struct file_lease *fl;
1703	struct inode *inode = file_inode(filp);
1704	struct file_lock_context *ctx;
1705	int type = F_UNLCK;
1706	LIST_HEAD(dispose);
1707
1708	ctx = locks_inode_context(inode);
1709	if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1710		percpu_down_read(&file_rwsem);
1711		spin_lock(&ctx->flc_lock);
1712		time_out_leases(inode, &dispose);
1713		list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1714			if (fl->c.flc_file != filp)
1715				continue;
1716			type = target_leasetype(fl);
1717			break;
1718		}
1719		spin_unlock(&ctx->flc_lock);
1720		percpu_up_read(&file_rwsem);
1721
1722		locks_dispose_list(&dispose);
1723	}
1724	return type;
1725}
1726
1727/**
1728 * check_conflicting_open - see if the given file points to an inode that has
1729 *			    an existing open that would conflict with the
1730 *			    desired lease.
1731 * @filp:	file to check
1732 * @arg:	type of lease that we're trying to acquire
1733 * @flags:	current lock flags
1734 *
1735 * Check to see if there's an existing open fd on this file that would
1736 * conflict with the lease we're trying to set.
1737 */
1738static int
1739check_conflicting_open(struct file *filp, const int arg, int flags)
1740{
1741	struct inode *inode = file_inode(filp);
1742	int self_wcount = 0, self_rcount = 0;
1743
1744	if (flags & FL_LAYOUT)
1745		return 0;
1746	if (flags & FL_DELEG)
1747		/* We leave these checks to the caller */
1748		return 0;
1749
1750	if (arg == F_RDLCK)
1751		return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1752	else if (arg != F_WRLCK)
1753		return 0;
1754
1755	/*
1756	 * Make sure that only read/write count is from lease requestor.
1757	 * Note that this will result in denying write leases when i_writecount
1758	 * is negative, which is what we want.  (We shouldn't grant write leases
1759	 * on files open for execution.)
1760	 */
1761	if (filp->f_mode & FMODE_WRITE)
1762		self_wcount = 1;
1763	else if (filp->f_mode & FMODE_READ)
1764		self_rcount = 1;
1765
1766	if (atomic_read(&inode->i_writecount) != self_wcount ||
1767	    atomic_read(&inode->i_readcount) != self_rcount)
1768		return -EAGAIN;
1769
1770	return 0;
1771}
1772
1773static int
1774generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
1775{
1776	struct file_lease *fl, *my_fl = NULL, *lease;
1777	struct inode *inode = file_inode(filp);
1778	struct file_lock_context *ctx;
1779	bool is_deleg = (*flp)->c.flc_flags & FL_DELEG;
1780	int error;
1781	LIST_HEAD(dispose);
1782
1783	lease = *flp;
1784	trace_generic_add_lease(inode, lease);
1785
1786	error = file_f_owner_allocate(filp);
1787	if (error)
1788		return error;
1789
1790	/* Note that arg is never F_UNLCK here */
1791	ctx = locks_get_lock_context(inode, arg);
1792	if (!ctx)
1793		return -ENOMEM;
1794
1795	/*
1796	 * In the delegation case we need mutual exclusion with
1797	 * a number of operations that take the i_mutex.  We trylock
1798	 * because delegations are an optional optimization, and if
1799	 * there's some chance of a conflict--we'd rather not
1800	 * bother, maybe that's a sign this just isn't a good file to
1801	 * hand out a delegation on.
1802	 */
1803	if (is_deleg && !inode_trylock(inode))
1804		return -EAGAIN;
1805
1806	percpu_down_read(&file_rwsem);
1807	spin_lock(&ctx->flc_lock);
1808	time_out_leases(inode, &dispose);
1809	error = check_conflicting_open(filp, arg, lease->c.flc_flags);
1810	if (error)
1811		goto out;
1812
1813	/*
1814	 * At this point, we know that if there is an exclusive
1815	 * lease on this file, then we hold it on this filp
1816	 * (otherwise our open of this file would have blocked).
1817	 * And if we are trying to acquire an exclusive lease,
1818	 * then the file is not open by anyone (including us)
1819	 * except for this filp.
1820	 */
1821	error = -EAGAIN;
1822	list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1823		if (fl->c.flc_file == filp &&
1824		    fl->c.flc_owner == lease->c.flc_owner) {
1825			my_fl = fl;
1826			continue;
1827		}
1828
1829		/*
1830		 * No exclusive leases if someone else has a lease on
1831		 * this file:
1832		 */
1833		if (arg == F_WRLCK)
1834			goto out;
1835		/*
1836		 * Modifying our existing lease is OK, but no getting a
1837		 * new lease if someone else is opening for write:
1838		 */
1839		if (fl->c.flc_flags & FL_UNLOCK_PENDING)
1840			goto out;
1841	}
1842
1843	if (my_fl != NULL) {
1844		lease = my_fl;
1845		error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1846		if (error)
1847			goto out;
1848		goto out_setup;
1849	}
1850
1851	error = -EINVAL;
1852	if (!leases_enable)
1853		goto out;
1854
1855	locks_insert_lock_ctx(&lease->c, &ctx->flc_lease);
1856	/*
1857	 * The check in break_lease() is lockless. It's possible for another
1858	 * open to race in after we did the earlier check for a conflicting
1859	 * open but before the lease was inserted. Check again for a
1860	 * conflicting open and cancel the lease if there is one.
1861	 *
1862	 * We also add a barrier here to ensure that the insertion of the lock
1863	 * precedes these checks.
1864	 */
1865	smp_mb();
1866	error = check_conflicting_open(filp, arg, lease->c.flc_flags);
1867	if (error) {
1868		locks_unlink_lock_ctx(&lease->c);
1869		goto out;
1870	}
1871
1872out_setup:
1873	if (lease->fl_lmops->lm_setup)
1874		lease->fl_lmops->lm_setup(lease, priv);
1875out:
1876	spin_unlock(&ctx->flc_lock);
1877	percpu_up_read(&file_rwsem);
1878	locks_dispose_list(&dispose);
1879	if (is_deleg)
1880		inode_unlock(inode);
1881	if (!error && !my_fl)
1882		*flp = NULL;
1883	return error;
1884}
1885
1886static int generic_delete_lease(struct file *filp, void *owner)
1887{
1888	int error = -EAGAIN;
1889	struct file_lease *fl, *victim = NULL;
1890	struct inode *inode = file_inode(filp);
1891	struct file_lock_context *ctx;
1892	LIST_HEAD(dispose);
1893
1894	ctx = locks_inode_context(inode);
1895	if (!ctx) {
1896		trace_generic_delete_lease(inode, NULL);
1897		return error;
1898	}
1899
1900	percpu_down_read(&file_rwsem);
1901	spin_lock(&ctx->flc_lock);
1902	list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
1903		if (fl->c.flc_file == filp &&
1904		    fl->c.flc_owner == owner) {
1905			victim = fl;
1906			break;
1907		}
1908	}
1909	trace_generic_delete_lease(inode, victim);
1910	if (victim)
1911		error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1912	spin_unlock(&ctx->flc_lock);
1913	percpu_up_read(&file_rwsem);
1914	locks_dispose_list(&dispose);
1915	return error;
1916}
1917
1918/**
1919 *	generic_setlease	-	sets a lease on an open file
1920 *	@filp:	file pointer
1921 *	@arg:	type of lease to obtain
1922 *	@flp:	input - file_lock to use, output - file_lock inserted
1923 *	@priv:	private data for lm_setup (may be NULL if lm_setup
1924 *		doesn't require it)
1925 *
1926 *	The (input) flp->fl_lmops->lm_break function is required
1927 *	by break_lease().
1928 */
1929int generic_setlease(struct file *filp, int arg, struct file_lease **flp,
1930			void **priv)
1931{
 
 
 
 
 
 
 
 
 
 
 
 
1932	switch (arg) {
1933	case F_UNLCK:
1934		return generic_delete_lease(filp, *priv);
1935	case F_RDLCK:
1936	case F_WRLCK:
1937		if (!(*flp)->fl_lmops->lm_break) {
1938			WARN_ON_ONCE(1);
1939			return -ENOLCK;
1940		}
1941
1942		return generic_add_lease(filp, arg, flp, priv);
1943	default:
1944		return -EINVAL;
1945	}
1946}
1947EXPORT_SYMBOL(generic_setlease);
1948
1949/*
1950 * Kernel subsystems can register to be notified on any attempt to set
1951 * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
1952 * to close files that it may have cached when there is an attempt to set a
1953 * conflicting lease.
1954 */
1955static struct srcu_notifier_head lease_notifier_chain;
1956
1957static inline void
1958lease_notifier_chain_init(void)
1959{
1960	srcu_init_notifier_head(&lease_notifier_chain);
1961}
1962
1963static inline void
1964setlease_notifier(int arg, struct file_lease *lease)
1965{
1966	if (arg != F_UNLCK)
1967		srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1968}
1969
1970int lease_register_notifier(struct notifier_block *nb)
1971{
1972	return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1973}
1974EXPORT_SYMBOL_GPL(lease_register_notifier);
1975
1976void lease_unregister_notifier(struct notifier_block *nb)
1977{
1978	srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1979}
1980EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1981
1982
1983int
1984kernel_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
1985{
1986	if (lease)
1987		setlease_notifier(arg, *lease);
1988	if (filp->f_op->setlease)
1989		return filp->f_op->setlease(filp, arg, lease, priv);
1990	else
1991		return generic_setlease(filp, arg, lease, priv);
1992}
1993EXPORT_SYMBOL_GPL(kernel_setlease);
1994
1995/**
1996 * vfs_setlease        -       sets a lease on an open file
1997 * @filp:	file pointer
1998 * @arg:	type of lease to obtain
1999 * @lease:	file_lock to use when adding a lease
2000 * @priv:	private info for lm_setup when adding a lease (may be
2001 *		NULL if lm_setup doesn't require it)
2002 *
2003 * Call this to establish a lease on the file. The "lease" argument is not
2004 * used for F_UNLCK requests and may be NULL. For commands that set or alter
2005 * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
2006 * set; if not, this function will return -ENOLCK (and generate a scary-looking
2007 * stack trace).
2008 *
2009 * The "priv" pointer is passed directly to the lm_setup function as-is. It
2010 * may be NULL if the lm_setup operation doesn't require it.
2011 */
2012int
2013vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
2014{
2015	struct inode *inode = file_inode(filp);
2016	vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
2017	int error;
2018
2019	if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
2020		return -EACCES;
2021	if (!S_ISREG(inode->i_mode))
2022		return -EINVAL;
2023	error = security_file_lock(filp, arg);
2024	if (error)
2025		return error;
2026	return kernel_setlease(filp, arg, lease, priv);
2027}
2028EXPORT_SYMBOL_GPL(vfs_setlease);
2029
2030static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
2031{
2032	struct file_lease *fl;
2033	struct fasync_struct *new;
2034	int error;
2035
2036	fl = lease_alloc(filp, arg);
2037	if (IS_ERR(fl))
2038		return PTR_ERR(fl);
2039
2040	new = fasync_alloc();
2041	if (!new) {
2042		locks_free_lease(fl);
2043		return -ENOMEM;
2044	}
2045	new->fa_fd = fd;
2046
2047	error = vfs_setlease(filp, arg, &fl, (void **)&new);
2048	if (fl)
2049		locks_free_lease(fl);
2050	if (new)
2051		fasync_free(new);
2052	return error;
2053}
2054
2055/**
2056 *	fcntl_setlease	-	sets a lease on an open file
2057 *	@fd: open file descriptor
2058 *	@filp: file pointer
2059 *	@arg: type of lease to obtain
2060 *
2061 *	Call this fcntl to establish a lease on the file.
2062 *	Note that you also need to call %F_SETSIG to
2063 *	receive a signal when the lease is broken.
2064 */
2065int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
2066{
2067	if (arg == F_UNLCK)
2068		return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2069	return do_fcntl_add_lease(fd, filp, arg);
2070}
2071
2072/**
2073 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
2074 * @inode: inode of the file to apply to
2075 * @fl: The lock to be applied
2076 *
2077 * Apply a FLOCK style lock request to an inode.
2078 */
2079static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2080{
2081	int error;
2082	might_sleep();
2083	for (;;) {
2084		error = flock_lock_inode(inode, fl);
2085		if (error != FILE_LOCK_DEFERRED)
2086			break;
2087		error = wait_event_interruptible(fl->c.flc_wait,
2088						 list_empty(&fl->c.flc_blocked_member));
2089		if (error)
2090			break;
2091	}
2092	locks_delete_block(fl);
2093	return error;
2094}
2095
2096/**
2097 * locks_lock_inode_wait - Apply a lock to an inode
2098 * @inode: inode of the file to apply to
2099 * @fl: The lock to be applied
2100 *
2101 * Apply a POSIX or FLOCK style lock request to an inode.
2102 */
2103int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2104{
2105	int res = 0;
2106	switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) {
2107		case FL_POSIX:
2108			res = posix_lock_inode_wait(inode, fl);
2109			break;
2110		case FL_FLOCK:
2111			res = flock_lock_inode_wait(inode, fl);
2112			break;
2113		default:
2114			BUG();
2115	}
2116	return res;
2117}
2118EXPORT_SYMBOL(locks_lock_inode_wait);
2119
2120/**
2121 *	sys_flock: - flock() system call.
2122 *	@fd: the file descriptor to lock.
2123 *	@cmd: the type of lock to apply.
2124 *
2125 *	Apply a %FL_FLOCK style lock to an open file descriptor.
2126 *	The @cmd can be one of:
2127 *
2128 *	- %LOCK_SH -- a shared lock.
2129 *	- %LOCK_EX -- an exclusive lock.
2130 *	- %LOCK_UN -- remove an existing lock.
2131 *	- %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
2132 *
2133 *	%LOCK_MAND support has been removed from the kernel.
2134 */
2135SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2136{
2137	int can_sleep, error, type;
2138	struct file_lock fl;
 
2139
2140	/*
2141	 * LOCK_MAND locks were broken for a long time in that they never
2142	 * conflicted with one another and didn't prevent any sort of open,
2143	 * read or write activity.
2144	 *
2145	 * Just ignore these requests now, to preserve legacy behavior, but
2146	 * throw a warning to let people know that they don't actually work.
2147	 */
2148	if (cmd & LOCK_MAND) {
2149		pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
2150		return 0;
2151	}
2152
2153	type = flock_translate_cmd(cmd & ~LOCK_NB);
2154	if (type < 0)
2155		return type;
2156
2157	CLASS(fd, f)(fd);
2158	if (fd_empty(f))
2159		return -EBADF;
 
2160
2161	if (type != F_UNLCK && !(fd_file(f)->f_mode & (FMODE_READ | FMODE_WRITE)))
2162		return -EBADF;
2163
2164	flock_make_lock(fd_file(f), &fl, type);
2165
2166	error = security_file_lock(fd_file(f), fl.c.flc_type);
2167	if (error)
2168		return error;
2169
2170	can_sleep = !(cmd & LOCK_NB);
2171	if (can_sleep)
2172		fl.c.flc_flags |= FL_SLEEP;
2173
2174	if (fd_file(f)->f_op->flock)
2175		error = fd_file(f)->f_op->flock(fd_file(f),
2176					    (can_sleep) ? F_SETLKW : F_SETLK,
2177					    &fl);
2178	else
2179		error = locks_lock_file_wait(fd_file(f), &fl);
2180
2181	locks_release_private(&fl);
 
 
 
2182	return error;
2183}
2184
2185/**
2186 * vfs_test_lock - test file byte range lock
2187 * @filp: The file to test lock for
2188 * @fl: The lock to test; also used to hold result
2189 *
2190 * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
2191 * setting conf->fl_type to something other than F_UNLCK.
2192 */
2193int vfs_test_lock(struct file *filp, struct file_lock *fl)
2194{
2195	WARN_ON_ONCE(filp != fl->c.flc_file);
2196	if (filp->f_op->lock)
2197		return filp->f_op->lock(filp, F_GETLK, fl);
2198	posix_test_lock(filp, fl);
2199	return 0;
2200}
2201EXPORT_SYMBOL_GPL(vfs_test_lock);
2202
2203/**
2204 * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
2205 * @fl: The file_lock who's fl_pid should be translated
2206 * @ns: The namespace into which the pid should be translated
2207 *
2208 * Used to translate a fl_pid into a namespace virtual pid number
2209 */
2210static pid_t locks_translate_pid(struct file_lock_core *fl, struct pid_namespace *ns)
2211{
2212	pid_t vnr;
2213	struct pid *pid;
2214
2215	if (fl->flc_flags & FL_OFDLCK)
2216		return -1;
2217
2218	/* Remote locks report a negative pid value */
2219	if (fl->flc_pid <= 0)
2220		return fl->flc_pid;
2221
2222	/*
2223	 * If the flock owner process is dead and its pid has been already
2224	 * freed, the translation below won't work, but we still want to show
2225	 * flock owner pid number in init pidns.
2226	 */
2227	if (ns == &init_pid_ns)
2228		return (pid_t) fl->flc_pid;
2229
2230	rcu_read_lock();
2231	pid = find_pid_ns(fl->flc_pid, &init_pid_ns);
2232	vnr = pid_nr_ns(pid, ns);
2233	rcu_read_unlock();
2234	return vnr;
2235}
2236
2237static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2238{
2239	flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
2240#if BITS_PER_LONG == 32
2241	/*
2242	 * Make sure we can represent the posix lock via
2243	 * legacy 32bit flock.
2244	 */
2245	if (fl->fl_start > OFFT_OFFSET_MAX)
2246		return -EOVERFLOW;
2247	if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2248		return -EOVERFLOW;
2249#endif
2250	flock->l_start = fl->fl_start;
2251	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2252		fl->fl_end - fl->fl_start + 1;
2253	flock->l_whence = 0;
2254	flock->l_type = fl->c.flc_type;
2255	return 0;
2256}
2257
2258#if BITS_PER_LONG == 32
2259static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2260{
2261	flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
2262	flock->l_start = fl->fl_start;
2263	flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2264		fl->fl_end - fl->fl_start + 1;
2265	flock->l_whence = 0;
2266	flock->l_type = fl->c.flc_type;
2267}
2268#endif
2269
2270/* Report the first existing lock that would conflict with l.
2271 * This implements the F_GETLK command of fcntl().
2272 */
2273int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2274{
2275	struct file_lock *fl;
2276	int error;
2277
2278	fl = locks_alloc_lock();
2279	if (fl == NULL)
2280		return -ENOMEM;
2281	error = -EINVAL;
2282	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2283			&& flock->l_type != F_WRLCK)
2284		goto out;
2285
2286	error = flock_to_posix_lock(filp, fl, flock);
2287	if (error)
2288		goto out;
2289
2290	if (cmd == F_OFD_GETLK) {
2291		error = -EINVAL;
2292		if (flock->l_pid != 0)
2293			goto out;
2294
2295		fl->c.flc_flags |= FL_OFDLCK;
2296		fl->c.flc_owner = filp;
2297	}
2298
2299	error = vfs_test_lock(filp, fl);
2300	if (error)
2301		goto out;
2302
2303	flock->l_type = fl->c.flc_type;
2304	if (fl->c.flc_type != F_UNLCK) {
2305		error = posix_lock_to_flock(flock, fl);
2306		if (error)
2307			goto out;
2308	}
2309out:
2310	locks_free_lock(fl);
2311	return error;
2312}
2313
2314/**
2315 * vfs_lock_file - file byte range lock
2316 * @filp: The file to apply the lock to
2317 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
2318 * @fl: The lock to be applied
2319 * @conf: Place to return a copy of the conflicting lock, if found.
2320 *
2321 * A caller that doesn't care about the conflicting lock may pass NULL
2322 * as the final argument.
2323 *
2324 * If the filesystem defines a private ->lock() method, then @conf will
2325 * be left unchanged; so a caller that cares should initialize it to
2326 * some acceptable default.
2327 *
2328 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
2329 * locks, the ->lock() interface may return asynchronously, before the lock has
2330 * been granted or denied by the underlying filesystem, if (and only if)
2331 * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
2332 * flags need to be set.
2333 *
2334 * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
2335 * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
2336 * blocking lock. When ->lock() does return asynchronously, it must return
2337 * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
2338 * If the request is for non-blocking lock the file system should return
2339 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
2340 * with the result. If the request timed out the callback routine will return a
2341 * nonzero return code and the file system should release the lock. The file
2342 * system is also responsible to keep a corresponding posix lock when it
2343 * grants a lock so the VFS can find out which locks are locally held and do
2344 * the correct lock cleanup when required.
2345 * The underlying filesystem must not drop the kernel lock or call
2346 * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
2347 * return code.
2348 */
2349int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2350{
2351	WARN_ON_ONCE(filp != fl->c.flc_file);
2352	if (filp->f_op->lock)
2353		return filp->f_op->lock(filp, cmd, fl);
2354	else
2355		return posix_lock_file(filp, fl, conf);
2356}
2357EXPORT_SYMBOL_GPL(vfs_lock_file);
2358
2359static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2360			     struct file_lock *fl)
2361{
2362	int error;
2363
2364	error = security_file_lock(filp, fl->c.flc_type);
2365	if (error)
2366		return error;
2367
2368	for (;;) {
2369		error = vfs_lock_file(filp, cmd, fl, NULL);
2370		if (error != FILE_LOCK_DEFERRED)
2371			break;
2372		error = wait_event_interruptible(fl->c.flc_wait,
2373						 list_empty(&fl->c.flc_blocked_member));
2374		if (error)
2375			break;
2376	}
2377	locks_delete_block(fl);
2378
2379	return error;
2380}
2381
2382/* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2383static int
2384check_fmode_for_setlk(struct file_lock *fl)
2385{
2386	switch (fl->c.flc_type) {
2387	case F_RDLCK:
2388		if (!(fl->c.flc_file->f_mode & FMODE_READ))
2389			return -EBADF;
2390		break;
2391	case F_WRLCK:
2392		if (!(fl->c.flc_file->f_mode & FMODE_WRITE))
2393			return -EBADF;
2394	}
2395	return 0;
2396}
2397
2398/* Apply the lock described by l to an open file descriptor.
2399 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2400 */
2401int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2402		struct flock *flock)
2403{
2404	struct file_lock *file_lock = locks_alloc_lock();
2405	struct inode *inode = file_inode(filp);
2406	struct file *f;
2407	int error;
2408
2409	if (file_lock == NULL)
2410		return -ENOLCK;
2411
2412	error = flock_to_posix_lock(filp, file_lock, flock);
2413	if (error)
2414		goto out;
2415
2416	error = check_fmode_for_setlk(file_lock);
2417	if (error)
2418		goto out;
2419
2420	/*
2421	 * If the cmd is requesting file-private locks, then set the
2422	 * FL_OFDLCK flag and override the owner.
2423	 */
2424	switch (cmd) {
2425	case F_OFD_SETLK:
2426		error = -EINVAL;
2427		if (flock->l_pid != 0)
2428			goto out;
2429
2430		cmd = F_SETLK;
2431		file_lock->c.flc_flags |= FL_OFDLCK;
2432		file_lock->c.flc_owner = filp;
2433		break;
2434	case F_OFD_SETLKW:
2435		error = -EINVAL;
2436		if (flock->l_pid != 0)
2437			goto out;
2438
2439		cmd = F_SETLKW;
2440		file_lock->c.flc_flags |= FL_OFDLCK;
2441		file_lock->c.flc_owner = filp;
2442		fallthrough;
2443	case F_SETLKW:
2444		file_lock->c.flc_flags |= FL_SLEEP;
2445	}
2446
2447	error = do_lock_file_wait(filp, cmd, file_lock);
2448
2449	/*
2450	 * Detect close/fcntl races and recover by zapping all POSIX locks
2451	 * associated with this file and our files_struct, just like on
2452	 * filp_flush(). There is no need to do that when we're
2453	 * unlocking though, or for OFD locks.
2454	 */
2455	if (!error && file_lock->c.flc_type != F_UNLCK &&
2456	    !(file_lock->c.flc_flags & FL_OFDLCK)) {
2457		struct files_struct *files = current->files;
2458		/*
2459		 * We need that spin_lock here - it prevents reordering between
2460		 * update of i_flctx->flc_posix and check for it done in
2461		 * close(). rcu_read_lock() wouldn't do.
2462		 */
2463		spin_lock(&files->file_lock);
2464		f = files_lookup_fd_locked(files, fd);
2465		spin_unlock(&files->file_lock);
2466		if (f != filp) {
2467			locks_remove_posix(filp, files);
 
 
2468			error = -EBADF;
2469		}
2470	}
2471out:
2472	trace_fcntl_setlk(inode, file_lock, error);
2473	locks_free_lock(file_lock);
2474	return error;
2475}
2476
2477#if BITS_PER_LONG == 32
2478/* Report the first existing lock that would conflict with l.
2479 * This implements the F_GETLK command of fcntl().
2480 */
2481int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2482{
2483	struct file_lock *fl;
2484	int error;
2485
2486	fl = locks_alloc_lock();
2487	if (fl == NULL)
2488		return -ENOMEM;
2489
2490	error = -EINVAL;
2491	if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
2492			&& flock->l_type != F_WRLCK)
2493		goto out;
2494
2495	error = flock64_to_posix_lock(filp, fl, flock);
2496	if (error)
2497		goto out;
2498
2499	if (cmd == F_OFD_GETLK) {
2500		error = -EINVAL;
2501		if (flock->l_pid != 0)
2502			goto out;
2503
2504		fl->c.flc_flags |= FL_OFDLCK;
2505		fl->c.flc_owner = filp;
2506	}
2507
2508	error = vfs_test_lock(filp, fl);
2509	if (error)
2510		goto out;
2511
2512	flock->l_type = fl->c.flc_type;
2513	if (fl->c.flc_type != F_UNLCK)
2514		posix_lock_to_flock64(flock, fl);
2515
2516out:
2517	locks_free_lock(fl);
2518	return error;
2519}
2520
2521/* Apply the lock described by l to an open file descriptor.
2522 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
2523 */
2524int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2525		struct flock64 *flock)
2526{
2527	struct file_lock *file_lock = locks_alloc_lock();
2528	struct file *f;
2529	int error;
2530
2531	if (file_lock == NULL)
2532		return -ENOLCK;
2533
2534	error = flock64_to_posix_lock(filp, file_lock, flock);
2535	if (error)
2536		goto out;
2537
2538	error = check_fmode_for_setlk(file_lock);
2539	if (error)
2540		goto out;
2541
2542	/*
2543	 * If the cmd is requesting file-private locks, then set the
2544	 * FL_OFDLCK flag and override the owner.
2545	 */
2546	switch (cmd) {
2547	case F_OFD_SETLK:
2548		error = -EINVAL;
2549		if (flock->l_pid != 0)
2550			goto out;
2551
2552		cmd = F_SETLK64;
2553		file_lock->c.flc_flags |= FL_OFDLCK;
2554		file_lock->c.flc_owner = filp;
2555		break;
2556	case F_OFD_SETLKW:
2557		error = -EINVAL;
2558		if (flock->l_pid != 0)
2559			goto out;
2560
2561		cmd = F_SETLKW64;
2562		file_lock->c.flc_flags |= FL_OFDLCK;
2563		file_lock->c.flc_owner = filp;
2564		fallthrough;
2565	case F_SETLKW64:
2566		file_lock->c.flc_flags |= FL_SLEEP;
2567	}
2568
2569	error = do_lock_file_wait(filp, cmd, file_lock);
2570
2571	/*
2572	 * Detect close/fcntl races and recover by zapping all POSIX locks
2573	 * associated with this file and our files_struct, just like on
2574	 * filp_flush(). There is no need to do that when we're
2575	 * unlocking though, or for OFD locks.
2576	 */
2577	if (!error && file_lock->c.flc_type != F_UNLCK &&
2578	    !(file_lock->c.flc_flags & FL_OFDLCK)) {
2579		struct files_struct *files = current->files;
2580		/*
2581		 * We need that spin_lock here - it prevents reordering between
2582		 * update of i_flctx->flc_posix and check for it done in
2583		 * close(). rcu_read_lock() wouldn't do.
2584		 */
2585		spin_lock(&files->file_lock);
2586		f = files_lookup_fd_locked(files, fd);
2587		spin_unlock(&files->file_lock);
2588		if (f != filp) {
2589			locks_remove_posix(filp, files);
 
 
2590			error = -EBADF;
2591		}
2592	}
2593out:
2594	locks_free_lock(file_lock);
2595	return error;
2596}
2597#endif /* BITS_PER_LONG == 32 */
2598
2599/*
2600 * This function is called when the file is being removed
2601 * from the task's fd array.  POSIX locks belonging to this task
2602 * are deleted at this time.
2603 */
2604void locks_remove_posix(struct file *filp, fl_owner_t owner)
2605{
2606	int error;
2607	struct inode *inode = file_inode(filp);
2608	struct file_lock lock;
2609	struct file_lock_context *ctx;
2610
2611	/*
2612	 * If there are no locks held on this file, we don't need to call
2613	 * posix_lock_file().  Another process could be setting a lock on this
2614	 * file at the same time, but we wouldn't remove that lock anyway.
2615	 */
2616	ctx = locks_inode_context(inode);
2617	if (!ctx || list_empty(&ctx->flc_posix))
2618		return;
2619
2620	locks_init_lock(&lock);
2621	lock.c.flc_type = F_UNLCK;
2622	lock.c.flc_flags = FL_POSIX | FL_CLOSE;
2623	lock.fl_start = 0;
2624	lock.fl_end = OFFSET_MAX;
2625	lock.c.flc_owner = owner;
2626	lock.c.flc_pid = current->tgid;
2627	lock.c.flc_file = filp;
2628	lock.fl_ops = NULL;
2629	lock.fl_lmops = NULL;
2630
2631	error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2632
2633	if (lock.fl_ops && lock.fl_ops->fl_release_private)
2634		lock.fl_ops->fl_release_private(&lock);
2635	trace_locks_remove_posix(inode, &lock, error);
2636}
2637EXPORT_SYMBOL(locks_remove_posix);
2638
2639/* The i_flctx must be valid when calling into here */
2640static void
2641locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2642{
2643	struct file_lock fl;
2644	struct inode *inode = file_inode(filp);
2645
2646	if (list_empty(&flctx->flc_flock))
2647		return;
2648
2649	flock_make_lock(filp, &fl, F_UNLCK);
2650	fl.c.flc_flags |= FL_CLOSE;
2651
2652	if (filp->f_op->flock)
2653		filp->f_op->flock(filp, F_SETLKW, &fl);
2654	else
2655		flock_lock_inode(inode, &fl);
2656
2657	if (fl.fl_ops && fl.fl_ops->fl_release_private)
2658		fl.fl_ops->fl_release_private(&fl);
2659}
2660
2661/* The i_flctx must be valid when calling into here */
2662static void
2663locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2664{
2665	struct file_lease *fl, *tmp;
2666	LIST_HEAD(dispose);
2667
2668	if (list_empty(&ctx->flc_lease))
2669		return;
2670
2671	percpu_down_read(&file_rwsem);
2672	spin_lock(&ctx->flc_lock);
2673	list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list)
2674		if (filp == fl->c.flc_file)
2675			lease_modify(fl, F_UNLCK, &dispose);
2676	spin_unlock(&ctx->flc_lock);
2677	percpu_up_read(&file_rwsem);
2678
2679	locks_dispose_list(&dispose);
2680}
2681
2682/*
2683 * This function is called on the last close of an open file.
2684 */
2685void locks_remove_file(struct file *filp)
2686{
2687	struct file_lock_context *ctx;
2688
2689	ctx = locks_inode_context(file_inode(filp));
2690	if (!ctx)
2691		return;
2692
2693	/* remove any OFD locks */
2694	locks_remove_posix(filp, filp);
2695
2696	/* remove flock locks */
2697	locks_remove_flock(filp, ctx);
2698
2699	/* remove any leases */
2700	locks_remove_lease(filp, ctx);
2701
2702	spin_lock(&ctx->flc_lock);
2703	locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2704	locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2705	locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2706	spin_unlock(&ctx->flc_lock);
2707}
2708
2709/**
2710 * vfs_cancel_lock - file byte range unblock lock
2711 * @filp: The file to apply the unblock to
2712 * @fl: The lock to be unblocked
2713 *
2714 * Used by lock managers to cancel blocked requests
2715 */
2716int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2717{
2718	WARN_ON_ONCE(filp != fl->c.flc_file);
2719	if (filp->f_op->lock)
2720		return filp->f_op->lock(filp, F_CANCELLK, fl);
2721	return 0;
2722}
2723EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2724
2725/**
2726 * vfs_inode_has_locks - are any file locks held on @inode?
2727 * @inode: inode to check for locks
2728 *
2729 * Return true if there are any FL_POSIX or FL_FLOCK locks currently
2730 * set on @inode.
2731 */
2732bool vfs_inode_has_locks(struct inode *inode)
2733{
2734	struct file_lock_context *ctx;
2735	bool ret;
2736
2737	ctx = locks_inode_context(inode);
2738	if (!ctx)
2739		return false;
2740
2741	spin_lock(&ctx->flc_lock);
2742	ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
2743	spin_unlock(&ctx->flc_lock);
2744	return ret;
2745}
2746EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
2747
2748#ifdef CONFIG_PROC_FS
2749#include <linux/proc_fs.h>
2750#include <linux/seq_file.h>
2751
2752struct locks_iterator {
2753	int	li_cpu;
2754	loff_t	li_pos;
2755};
2756
2757static void lock_get_status(struct seq_file *f, struct file_lock_core *flc,
2758			    loff_t id, char *pfx, int repeat)
2759{
2760	struct inode *inode = NULL;
2761	unsigned int pid;
2762	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2763	int type = flc->flc_type;
2764	struct file_lock *fl = file_lock(flc);
2765
2766	pid = locks_translate_pid(flc, proc_pidns);
2767
 
2768	/*
2769	 * If lock owner is dead (and pid is freed) or not visible in current
2770	 * pidns, zero is shown as a pid value. Check lock info from
2771	 * init_pid_ns to get saved lock pid value.
2772	 */
2773	if (flc->flc_file != NULL)
2774		inode = file_inode(flc->flc_file);
 
2775
2776	seq_printf(f, "%lld: ", id);
2777
2778	if (repeat)
2779		seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2780
2781	if (flc->flc_flags & FL_POSIX) {
2782		if (flc->flc_flags & FL_ACCESS)
2783			seq_puts(f, "ACCESS");
2784		else if (flc->flc_flags & FL_OFDLCK)
2785			seq_puts(f, "OFDLCK");
2786		else
2787			seq_puts(f, "POSIX ");
2788
2789		seq_printf(f, " %s ",
2790			     (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2791	} else if (flc->flc_flags & FL_FLOCK) {
2792		seq_puts(f, "FLOCK  ADVISORY  ");
2793	} else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) {
2794		struct file_lease *lease = file_lease(flc);
2795
2796		type = target_leasetype(lease);
2797
2798		if (flc->flc_flags & FL_DELEG)
2799			seq_puts(f, "DELEG  ");
2800		else
2801			seq_puts(f, "LEASE  ");
2802
2803		if (lease_breaking(lease))
2804			seq_puts(f, "BREAKING  ");
2805		else if (flc->flc_file)
2806			seq_puts(f, "ACTIVE    ");
2807		else
2808			seq_puts(f, "BREAKER   ");
2809	} else {
2810		seq_puts(f, "UNKNOWN UNKNOWN  ");
2811	}
 
2812
2813	seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2814			     (type == F_RDLCK) ? "READ" : "UNLCK");
2815	if (inode) {
2816		/* userspace relies on this representation of dev_t */
2817		seq_printf(f, "%d %02x:%02x:%lu ", pid,
2818				MAJOR(inode->i_sb->s_dev),
2819				MINOR(inode->i_sb->s_dev), inode->i_ino);
2820	} else {
2821		seq_printf(f, "%d <none>:0 ", pid);
2822	}
2823	if (flc->flc_flags & FL_POSIX) {
2824		if (fl->fl_end == OFFSET_MAX)
2825			seq_printf(f, "%Ld EOF\n", fl->fl_start);
2826		else
2827			seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2828	} else {
2829		seq_puts(f, "0 EOF\n");
2830	}
2831}
2832
2833static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node)
2834{
2835	struct file_lock_core *tmp;
2836
2837	/* NULL node or root node */
2838	if (node == NULL || node->flc_blocker == NULL)
2839		return NULL;
2840
2841	/* Next member in the linked list could be itself */
2842	tmp = list_next_entry(node, flc_blocked_member);
2843	if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests,
2844			       flc_blocked_member)
2845		|| tmp == node) {
2846		return NULL;
2847	}
2848
2849	return tmp;
2850}
2851
2852static int locks_show(struct seq_file *f, void *v)
2853{
2854	struct locks_iterator *iter = f->private;
2855	struct file_lock_core *cur, *tmp;
2856	struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2857	int level = 0;
2858
2859	cur = hlist_entry(v, struct file_lock_core, flc_link);
2860
2861	if (locks_translate_pid(cur, proc_pidns) == 0)
2862		return 0;
2863
2864	/* View this crossed linked list as a binary tree, the first member of flc_blocked_requests
2865	 * is the left child of current node, the next silibing in flc_blocked_member is the
2866	 * right child, we can alse get the parent of current node from flc_blocker, so this
2867	 * question becomes traversal of a binary tree
2868	 */
2869	while (cur != NULL) {
2870		if (level)
2871			lock_get_status(f, cur, iter->li_pos, "-> ", level);
2872		else
2873			lock_get_status(f, cur, iter->li_pos, "", level);
2874
2875		if (!list_empty(&cur->flc_blocked_requests)) {
2876			/* Turn left */
2877			cur = list_first_entry_or_null(&cur->flc_blocked_requests,
2878						       struct file_lock_core,
2879						       flc_blocked_member);
2880			level++;
2881		} else {
2882			/* Turn right */
2883			tmp = get_next_blocked_member(cur);
2884			/* Fall back to parent node */
2885			while (tmp == NULL && cur->flc_blocker != NULL) {
2886				cur = cur->flc_blocker;
2887				level--;
2888				tmp = get_next_blocked_member(cur);
2889			}
2890			cur = tmp;
2891		}
2892	}
2893
2894	return 0;
2895}
2896
2897static void __show_fd_locks(struct seq_file *f,
2898			struct list_head *head, int *id,
2899			struct file *filp, struct files_struct *files)
2900{
2901	struct file_lock_core *fl;
2902
2903	list_for_each_entry(fl, head, flc_list) {
2904
2905		if (filp != fl->flc_file)
2906			continue;
2907		if (fl->flc_owner != files && fl->flc_owner != filp)
 
2908			continue;
2909
2910		(*id)++;
2911		seq_puts(f, "lock:\t");
2912		lock_get_status(f, fl, *id, "", 0);
2913	}
2914}
2915
2916void show_fd_locks(struct seq_file *f,
2917		  struct file *filp, struct files_struct *files)
2918{
2919	struct inode *inode = file_inode(filp);
2920	struct file_lock_context *ctx;
2921	int id = 0;
2922
2923	ctx = locks_inode_context(inode);
2924	if (!ctx)
2925		return;
2926
2927	spin_lock(&ctx->flc_lock);
2928	__show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2929	__show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2930	__show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2931	spin_unlock(&ctx->flc_lock);
2932}
2933
2934static void *locks_start(struct seq_file *f, loff_t *pos)
2935	__acquires(&blocked_lock_lock)
2936{
2937	struct locks_iterator *iter = f->private;
2938
2939	iter->li_pos = *pos + 1;
2940	percpu_down_write(&file_rwsem);
2941	spin_lock(&blocked_lock_lock);
2942	return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2943}
2944
2945static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2946{
2947	struct locks_iterator *iter = f->private;
2948
2949	++iter->li_pos;
2950	return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2951}
2952
2953static void locks_stop(struct seq_file *f, void *v)
2954	__releases(&blocked_lock_lock)
2955{
2956	spin_unlock(&blocked_lock_lock);
2957	percpu_up_write(&file_rwsem);
2958}
2959
2960static const struct seq_operations locks_seq_operations = {
2961	.start	= locks_start,
2962	.next	= locks_next,
2963	.stop	= locks_stop,
2964	.show	= locks_show,
2965};
2966
2967static int __init proc_locks_init(void)
2968{
2969	proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2970			sizeof(struct locks_iterator), NULL);
2971	return 0;
2972}
2973fs_initcall(proc_locks_init);
2974#endif
2975
2976static int __init filelock_init(void)
2977{
2978	int i;
2979
2980	flctx_cache = kmem_cache_create("file_lock_ctx",
2981			sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2982
2983	filelock_cache = kmem_cache_create("file_lock_cache",
2984			sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2985
2986	filelease_cache = kmem_cache_create("file_lease_cache",
2987			sizeof(struct file_lease), 0, SLAB_PANIC, NULL);
2988
2989	for_each_possible_cpu(i) {
2990		struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2991
2992		spin_lock_init(&fll->lock);
2993		INIT_HLIST_HEAD(&fll->hlist);
2994	}
2995
2996	lease_notifier_chain_init();
2997	return 0;
2998}
2999core_initcall(filelock_init);