Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc_xprt.h>
31#include <linux/lockd/nlm.h>
32#include <linux/lockd/lockd.h>
33#include <linux/kthread.h>
34
35#define NLMDBG_FACILITY NLMDBG_SVCLOCK
36
37#ifdef CONFIG_LOCKD_V4
38#define nlm_deadlock nlm4_deadlock
39#else
40#define nlm_deadlock nlm_lck_denied
41#endif
42
43static void nlmsvc_release_block(struct nlm_block *block);
44static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45static void nlmsvc_remove_block(struct nlm_block *block);
46
47static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49static const struct rpc_call_ops nlmsvc_grant_ops;
50
51/*
52 * The list of blocked locks to retry
53 */
54static LIST_HEAD(nlm_blocked);
55static DEFINE_SPINLOCK(nlm_blocked_lock);
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59{
60 /*
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
63 */
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
66 char *p = buf;
67
68 len--; /* allow for trailing \0 */
69 if (len < 3)
70 return "???";
71 for (i = 0 ; i < cookie->len ; i++) {
72 if (len < 2) {
73 strcpy(p-3, "...");
74 break;
75 }
76 sprintf(p, "%02x", cookie->data[i]);
77 p += 2;
78 len -= 2;
79 }
80 *p = '\0';
81
82 return buf;
83}
84#endif
85
86/*
87 * Insert a blocked lock into the global list
88 */
89static void
90nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91{
92 struct nlm_block *b;
93 struct list_head *pos;
94
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
98 } else {
99 list_del_init(&block->b_list);
100 }
101
102 pos = &nlm_blocked;
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
105 when ++;
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 break;
110 }
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
113 */
114 }
115
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
118}
119
120static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121{
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
125}
126
127/*
128 * Remove a block from the global list
129 */
130static inline void
131nlmsvc_remove_block(struct nlm_block *block)
132{
133 if (!list_empty(&block->b_list)) {
134 spin_lock(&nlm_blocked_lock);
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
138 }
139}
140
141/*
142 * Find a block for a given lock
143 */
144static struct nlm_block *
145nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146{
147 struct nlm_block *block;
148 struct file_lock *fl;
149
150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 file, lock->fl.fl_pid,
152 (long long)lock->fl.fl_start,
153 (long long)lock->fl.fl_end, lock->fl.fl_type);
154 list_for_each_entry(block, &nlm_blocked, b_list) {
155 fl = &block->b_call->a_args.lock.fl;
156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 block->b_file, fl->fl_pid,
158 (long long)fl->fl_start,
159 (long long)fl->fl_end, fl->fl_type,
160 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 kref_get(&block->b_count);
163 return block;
164 }
165 }
166
167 return NULL;
168}
169
170static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
171{
172 if (a->len != b->len)
173 return 0;
174 if (memcmp(a->data, b->data, a->len))
175 return 0;
176 return 1;
177}
178
179/*
180 * Find a block with a given NLM cookie.
181 */
182static inline struct nlm_block *
183nlmsvc_find_block(struct nlm_cookie *cookie)
184{
185 struct nlm_block *block;
186
187 list_for_each_entry(block, &nlm_blocked, b_list) {
188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
189 goto found;
190 }
191
192 return NULL;
193
194found:
195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 kref_get(&block->b_count);
197 return block;
198}
199
200/*
201 * Create a block and initialize it.
202 *
203 * Note: we explicitly set the cookie of the grant reply to that of
204 * the blocked lock request. The spec explicitly mentions that the client
205 * should _not_ rely on the callback containing the same cookie as the
206 * request, but (as I found out later) that's because some implementations
207 * do just this. Never mind the standards comittees, they support our
208 * logging industries.
209 *
210 * 10 years later: I hope we can safely ignore these old and broken
211 * clients by now. Let's fix this so we can uniquely identify an incoming
212 * GRANTED_RES message by cookie, without having to rely on the client's IP
213 * address. --okir
214 */
215static struct nlm_block *
216nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 struct nlm_file *file, struct nlm_lock *lock,
218 struct nlm_cookie *cookie)
219{
220 struct nlm_block *block;
221 struct nlm_rqst *call = NULL;
222
223 call = nlm_alloc_call(host);
224 if (call == NULL)
225 return NULL;
226
227 /* Allocate memory for block, and initialize arguments */
228 block = kzalloc(sizeof(*block), GFP_KERNEL);
229 if (block == NULL)
230 goto failed;
231 kref_init(&block->b_count);
232 INIT_LIST_HEAD(&block->b_list);
233 INIT_LIST_HEAD(&block->b_flist);
234
235 if (!nlmsvc_setgrantargs(call, lock))
236 goto failed_free;
237
238 /* Set notifier function for VFS, and init args */
239 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 nlmclnt_next_cookie(&call->a_args.cookie);
242
243 dprintk("lockd: created block %p...\n", block);
244
245 /* Create and initialize the block */
246 block->b_daemon = rqstp->rq_server;
247 block->b_host = host;
248 block->b_file = file;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261failed_free:
262 kfree(block);
263failed:
264 nlmsvc_release_call(call);
265 return NULL;
266}
267
268/*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
273static int nlmsvc_unlink_block(struct nlm_block *block)
274{
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282}
283
284static void nlmsvc_free_block(struct kref *kref)
285{
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block);
299}
300
301static void nlmsvc_release_block(struct nlm_block *block)
302{
303 if (block != NULL)
304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
305}
306
307/*
308 * Loop over all blocks and delete blocks held by
309 * a matching host.
310 */
311void nlmsvc_traverse_blocks(struct nlm_host *host,
312 struct nlm_file *file,
313 nlm_host_match_fn_t match)
314{
315 struct nlm_block *block, *next;
316
317restart:
318 mutex_lock(&file->f_mutex);
319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 if (!match(block->b_host, host))
321 continue;
322 /* Do not destroy blocks that are not on
323 * the global retry list - why? */
324 if (list_empty(&block->b_list))
325 continue;
326 kref_get(&block->b_count);
327 mutex_unlock(&file->f_mutex);
328 nlmsvc_unlink_block(block);
329 nlmsvc_release_block(block);
330 goto restart;
331 }
332 mutex_unlock(&file->f_mutex);
333}
334
335/*
336 * Initialize arguments for GRANTED call. The nlm_rqst structure
337 * has been cleared already.
338 */
339static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
340{
341 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
342 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
343 call->a_args.lock.caller = utsname()->nodename;
344 call->a_args.lock.oh.len = lock->oh.len;
345
346 /* set default data area */
347 call->a_args.lock.oh.data = call->a_owner;
348 call->a_args.lock.svid = lock->fl.fl_pid;
349
350 if (lock->oh.len > NLMCLNT_OHSIZE) {
351 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
352 if (!data)
353 return 0;
354 call->a_args.lock.oh.data = (u8 *) data;
355 }
356
357 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
358 return 1;
359}
360
361static void nlmsvc_freegrantargs(struct nlm_rqst *call)
362{
363 if (call->a_args.lock.oh.data != call->a_owner)
364 kfree(call->a_args.lock.oh.data);
365
366 locks_release_private(&call->a_args.lock.fl);
367}
368
369/*
370 * Deferred lock request handling for non-blocking lock
371 */
372static __be32
373nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
374{
375 __be32 status = nlm_lck_denied_nolocks;
376
377 block->b_flags |= B_QUEUED;
378
379 nlmsvc_insert_block(block, NLM_TIMEOUT);
380
381 block->b_cache_req = &rqstp->rq_chandle;
382 if (rqstp->rq_chandle.defer) {
383 block->b_deferred_req =
384 rqstp->rq_chandle.defer(block->b_cache_req);
385 if (block->b_deferred_req != NULL)
386 status = nlm_drop_reply;
387 }
388 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
389 block, block->b_flags, ntohl(status));
390
391 return status;
392}
393
394/*
395 * Attempt to establish a lock, and if it can't be granted, block it
396 * if required.
397 */
398__be32
399nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
400 struct nlm_host *host, struct nlm_lock *lock, int wait,
401 struct nlm_cookie *cookie, int reclaim)
402{
403 struct nlm_block *block = NULL;
404 int error;
405 __be32 ret;
406
407 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
408 file_inode(file->f_file)->i_sb->s_id,
409 file_inode(file->f_file)->i_ino,
410 lock->fl.fl_type, lock->fl.fl_pid,
411 (long long)lock->fl.fl_start,
412 (long long)lock->fl.fl_end,
413 wait);
414
415 /* Lock file against concurrent access */
416 mutex_lock(&file->f_mutex);
417 /* Get existing block (in case client is busy-waiting)
418 * or create new block
419 */
420 block = nlmsvc_lookup_block(file, lock);
421 if (block == NULL) {
422 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
423 ret = nlm_lck_denied_nolocks;
424 if (block == NULL)
425 goto out;
426 lock = &block->b_call->a_args.lock;
427 } else
428 lock->fl.fl_flags &= ~FL_SLEEP;
429
430 if (block->b_flags & B_QUEUED) {
431 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
432 block, block->b_flags);
433 if (block->b_granted) {
434 nlmsvc_unlink_block(block);
435 ret = nlm_granted;
436 goto out;
437 }
438 if (block->b_flags & B_TIMED_OUT) {
439 nlmsvc_unlink_block(block);
440 ret = nlm_lck_denied;
441 goto out;
442 }
443 ret = nlm_drop_reply;
444 goto out;
445 }
446
447 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
448 ret = nlm_lck_denied_grace_period;
449 goto out;
450 }
451 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
452 ret = nlm_lck_denied_grace_period;
453 goto out;
454 }
455
456 if (!wait)
457 lock->fl.fl_flags &= ~FL_SLEEP;
458 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
459 lock->fl.fl_flags &= ~FL_SLEEP;
460
461 dprintk("lockd: vfs_lock_file returned %d\n", error);
462 switch (error) {
463 case 0:
464 ret = nlm_granted;
465 goto out;
466 case -EAGAIN:
467 /*
468 * If this is a blocking request for an
469 * already pending lock request then we need
470 * to put it back on lockd's block list
471 */
472 if (wait)
473 break;
474 ret = nlm_lck_denied;
475 goto out;
476 case FILE_LOCK_DEFERRED:
477 if (wait)
478 break;
479 /* Filesystem lock operation is in progress
480 Add it to the queue waiting for callback */
481 ret = nlmsvc_defer_lock_rqst(rqstp, block);
482 goto out;
483 case -EDEADLK:
484 ret = nlm_deadlock;
485 goto out;
486 default: /* includes ENOLCK */
487 ret = nlm_lck_denied_nolocks;
488 goto out;
489 }
490
491 ret = nlm_lck_blocked;
492
493 /* Append to list of blocked */
494 nlmsvc_insert_block(block, NLM_NEVER);
495out:
496 mutex_unlock(&file->f_mutex);
497 nlmsvc_release_block(block);
498 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
499 return ret;
500}
501
502/*
503 * Test for presence of a conflicting lock.
504 */
505__be32
506nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
507 struct nlm_host *host, struct nlm_lock *lock,
508 struct nlm_lock *conflock, struct nlm_cookie *cookie)
509{
510 int error;
511 __be32 ret;
512
513 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
514 file_inode(file->f_file)->i_sb->s_id,
515 file_inode(file->f_file)->i_ino,
516 lock->fl.fl_type,
517 (long long)lock->fl.fl_start,
518 (long long)lock->fl.fl_end);
519
520 if (locks_in_grace(SVC_NET(rqstp))) {
521 ret = nlm_lck_denied_grace_period;
522 goto out;
523 }
524
525 error = vfs_test_lock(file->f_file, &lock->fl);
526 if (error) {
527 /* We can't currently deal with deferred test requests */
528 if (error == FILE_LOCK_DEFERRED)
529 WARN_ON_ONCE(1);
530
531 ret = nlm_lck_denied_nolocks;
532 goto out;
533 }
534
535 if (lock->fl.fl_type == F_UNLCK) {
536 ret = nlm_granted;
537 goto out;
538 }
539
540 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
541 lock->fl.fl_type, (long long)lock->fl.fl_start,
542 (long long)lock->fl.fl_end);
543 conflock->caller = "somehost"; /* FIXME */
544 conflock->len = strlen(conflock->caller);
545 conflock->oh.len = 0; /* don't return OH info */
546 conflock->svid = lock->fl.fl_pid;
547 conflock->fl.fl_type = lock->fl.fl_type;
548 conflock->fl.fl_start = lock->fl.fl_start;
549 conflock->fl.fl_end = lock->fl.fl_end;
550 locks_release_private(&lock->fl);
551 ret = nlm_lck_denied;
552out:
553 return ret;
554}
555
556/*
557 * Remove a lock.
558 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
559 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
560 * afterwards. In this case the block will still be there, and hence
561 * must be removed.
562 */
563__be32
564nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
565{
566 int error;
567
568 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
569 file_inode(file->f_file)->i_sb->s_id,
570 file_inode(file->f_file)->i_ino,
571 lock->fl.fl_pid,
572 (long long)lock->fl.fl_start,
573 (long long)lock->fl.fl_end);
574
575 /* First, cancel any lock that might be there */
576 nlmsvc_cancel_blocked(net, file, lock);
577
578 lock->fl.fl_type = F_UNLCK;
579 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
580
581 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
582}
583
584/*
585 * Cancel a previously blocked request.
586 *
587 * A cancel request always overrides any grant that may currently
588 * be in progress.
589 * The calling procedure must check whether the file can be closed.
590 */
591__be32
592nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
593{
594 struct nlm_block *block;
595 int status = 0;
596
597 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
598 file_inode(file->f_file)->i_sb->s_id,
599 file_inode(file->f_file)->i_ino,
600 lock->fl.fl_pid,
601 (long long)lock->fl.fl_start,
602 (long long)lock->fl.fl_end);
603
604 if (locks_in_grace(net))
605 return nlm_lck_denied_grace_period;
606
607 mutex_lock(&file->f_mutex);
608 block = nlmsvc_lookup_block(file, lock);
609 mutex_unlock(&file->f_mutex);
610 if (block != NULL) {
611 vfs_cancel_lock(block->b_file->f_file,
612 &block->b_call->a_args.lock.fl);
613 status = nlmsvc_unlink_block(block);
614 nlmsvc_release_block(block);
615 }
616 return status ? nlm_lck_denied : nlm_granted;
617}
618
619/*
620 * This is a callback from the filesystem for VFS file lock requests.
621 * It will be used if lm_grant is defined and the filesystem can not
622 * respond to the request immediately.
623 * For SETLK or SETLKW request it will get the local posix lock.
624 * In all cases it will move the block to the head of nlm_blocked q where
625 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
626 * deferred rpc for GETLK and SETLK.
627 */
628static void
629nlmsvc_update_deferred_block(struct nlm_block *block, int result)
630{
631 block->b_flags |= B_GOT_CALLBACK;
632 if (result == 0)
633 block->b_granted = 1;
634 else
635 block->b_flags |= B_TIMED_OUT;
636}
637
638static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
639{
640 struct nlm_block *block;
641 int rc = -ENOENT;
642
643 spin_lock(&nlm_blocked_lock);
644 list_for_each_entry(block, &nlm_blocked, b_list) {
645 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
646 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
647 block, block->b_flags);
648 if (block->b_flags & B_QUEUED) {
649 if (block->b_flags & B_TIMED_OUT) {
650 rc = -ENOLCK;
651 break;
652 }
653 nlmsvc_update_deferred_block(block, result);
654 } else if (result == 0)
655 block->b_granted = 1;
656
657 nlmsvc_insert_block_locked(block, 0);
658 svc_wake_up(block->b_daemon);
659 rc = 0;
660 break;
661 }
662 }
663 spin_unlock(&nlm_blocked_lock);
664 if (rc == -ENOENT)
665 printk(KERN_WARNING "lockd: grant for unknown block\n");
666 return rc;
667}
668
669/*
670 * Unblock a blocked lock request. This is a callback invoked from the
671 * VFS layer when a lock on which we blocked is removed.
672 *
673 * This function doesn't grant the blocked lock instantly, but rather moves
674 * the block to the head of nlm_blocked where it can be picked up by lockd.
675 */
676static void
677nlmsvc_notify_blocked(struct file_lock *fl)
678{
679 struct nlm_block *block;
680
681 dprintk("lockd: VFS unblock notification for block %p\n", fl);
682 spin_lock(&nlm_blocked_lock);
683 list_for_each_entry(block, &nlm_blocked, b_list) {
684 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
685 nlmsvc_insert_block_locked(block, 0);
686 spin_unlock(&nlm_blocked_lock);
687 svc_wake_up(block->b_daemon);
688 return;
689 }
690 }
691 spin_unlock(&nlm_blocked_lock);
692 printk(KERN_WARNING "lockd: notification for unknown block!\n");
693}
694
695static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
696{
697 return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
698}
699
700/*
701 * Since NLM uses two "keys" for tracking locks, we need to hash them down
702 * to one for the blocked_hash. Here, we're just xor'ing the host address
703 * with the pid in order to create a key value for picking a hash bucket.
704 */
705static unsigned long
706nlmsvc_owner_key(struct file_lock *fl)
707{
708 return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid;
709}
710
711const struct lock_manager_operations nlmsvc_lock_operations = {
712 .lm_compare_owner = nlmsvc_same_owner,
713 .lm_owner_key = nlmsvc_owner_key,
714 .lm_notify = nlmsvc_notify_blocked,
715 .lm_grant = nlmsvc_grant_deferred,
716};
717
718/*
719 * Try to claim a lock that was previously blocked.
720 *
721 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
722 * RPC thread when notifying the client. This seems like overkill...
723 * Here's why:
724 * - we don't want to use a synchronous RPC thread, otherwise
725 * we might find ourselves hanging on a dead portmapper.
726 * - Some lockd implementations (e.g. HP) don't react to
727 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
728 */
729static void
730nlmsvc_grant_blocked(struct nlm_block *block)
731{
732 struct nlm_file *file = block->b_file;
733 struct nlm_lock *lock = &block->b_call->a_args.lock;
734 int error;
735 loff_t fl_start, fl_end;
736
737 dprintk("lockd: grant blocked lock %p\n", block);
738
739 kref_get(&block->b_count);
740
741 /* Unlink block request from list */
742 nlmsvc_unlink_block(block);
743
744 /* If b_granted is true this means we've been here before.
745 * Just retry the grant callback, possibly refreshing the RPC
746 * binding */
747 if (block->b_granted) {
748 nlm_rebind_host(block->b_host);
749 goto callback;
750 }
751
752 /* Try the lock operation again */
753 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
754 * them unchanged for the GRANT_MSG
755 */
756 lock->fl.fl_flags |= FL_SLEEP;
757 fl_start = lock->fl.fl_start;
758 fl_end = lock->fl.fl_end;
759 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
760 lock->fl.fl_flags &= ~FL_SLEEP;
761 lock->fl.fl_start = fl_start;
762 lock->fl.fl_end = fl_end;
763
764 switch (error) {
765 case 0:
766 break;
767 case FILE_LOCK_DEFERRED:
768 dprintk("lockd: lock still blocked error %d\n", error);
769 nlmsvc_insert_block(block, NLM_NEVER);
770 nlmsvc_release_block(block);
771 return;
772 default:
773 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
774 -error, __func__);
775 nlmsvc_insert_block(block, 10 * HZ);
776 nlmsvc_release_block(block);
777 return;
778 }
779
780callback:
781 /* Lock was granted by VFS. */
782 dprintk("lockd: GRANTing blocked lock.\n");
783 block->b_granted = 1;
784
785 /* keep block on the list, but don't reattempt until the RPC
786 * completes or the submission fails
787 */
788 nlmsvc_insert_block(block, NLM_NEVER);
789
790 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
791 * will queue up a new one if this one times out
792 */
793 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
794 &nlmsvc_grant_ops);
795
796 /* RPC submission failed, wait a bit and retry */
797 if (error < 0)
798 nlmsvc_insert_block(block, 10 * HZ);
799}
800
801/*
802 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
803 * RPC call has succeeded or timed out.
804 * Like all RPC callbacks, it is invoked by the rpciod process, so it
805 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
806 * chain once more in order to have it removed by lockd itself (which can
807 * then sleep on the file semaphore without disrupting e.g. the nfs client).
808 */
809static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
810{
811 struct nlm_rqst *call = data;
812 struct nlm_block *block = call->a_block;
813 unsigned long timeout;
814
815 dprintk("lockd: GRANT_MSG RPC callback\n");
816
817 spin_lock(&nlm_blocked_lock);
818 /* if the block is not on a list at this point then it has
819 * been invalidated. Don't try to requeue it.
820 *
821 * FIXME: it's possible that the block is removed from the list
822 * after this check but before the nlmsvc_insert_block. In that
823 * case it will be added back. Perhaps we need better locking
824 * for nlm_blocked?
825 */
826 if (list_empty(&block->b_list))
827 goto out;
828
829 /* Technically, we should down the file semaphore here. Since we
830 * move the block towards the head of the queue only, no harm
831 * can be done, though. */
832 if (task->tk_status < 0) {
833 /* RPC error: Re-insert for retransmission */
834 timeout = 10 * HZ;
835 } else {
836 /* Call was successful, now wait for client callback */
837 timeout = 60 * HZ;
838 }
839 nlmsvc_insert_block_locked(block, timeout);
840 svc_wake_up(block->b_daemon);
841out:
842 spin_unlock(&nlm_blocked_lock);
843}
844
845/*
846 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
847 * .rpc_release rpc_call_op
848 */
849static void nlmsvc_grant_release(void *data)
850{
851 struct nlm_rqst *call = data;
852 nlmsvc_release_block(call->a_block);
853}
854
855static const struct rpc_call_ops nlmsvc_grant_ops = {
856 .rpc_call_done = nlmsvc_grant_callback,
857 .rpc_release = nlmsvc_grant_release,
858};
859
860/*
861 * We received a GRANT_RES callback. Try to find the corresponding
862 * block.
863 */
864void
865nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
866{
867 struct nlm_block *block;
868
869 dprintk("grant_reply: looking for cookie %x, s=%d \n",
870 *(unsigned int *)(cookie->data), status);
871 if (!(block = nlmsvc_find_block(cookie)))
872 return;
873
874 if (status == nlm_lck_denied_grace_period) {
875 /* Try again in a couple of seconds */
876 nlmsvc_insert_block(block, 10 * HZ);
877 } else {
878 /*
879 * Lock is now held by client, or has been rejected.
880 * In both cases, the block should be removed.
881 */
882 nlmsvc_unlink_block(block);
883 }
884 nlmsvc_release_block(block);
885}
886
887/* Helper function to handle retry of a deferred block.
888 * If it is a blocking lock, call grant_blocked.
889 * For a non-blocking lock or test lock, revisit the request.
890 */
891static void
892retry_deferred_block(struct nlm_block *block)
893{
894 if (!(block->b_flags & B_GOT_CALLBACK))
895 block->b_flags |= B_TIMED_OUT;
896 nlmsvc_insert_block(block, NLM_TIMEOUT);
897 dprintk("revisit block %p flags %d\n", block, block->b_flags);
898 if (block->b_deferred_req) {
899 block->b_deferred_req->revisit(block->b_deferred_req, 0);
900 block->b_deferred_req = NULL;
901 }
902}
903
904/*
905 * Retry all blocked locks that have been notified. This is where lockd
906 * picks up locks that can be granted, or grant notifications that must
907 * be retransmitted.
908 */
909unsigned long
910nlmsvc_retry_blocked(void)
911{
912 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
913 struct nlm_block *block;
914
915 spin_lock(&nlm_blocked_lock);
916 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
917 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
918
919 if (block->b_when == NLM_NEVER)
920 break;
921 if (time_after(block->b_when, jiffies)) {
922 timeout = block->b_when - jiffies;
923 break;
924 }
925 spin_unlock(&nlm_blocked_lock);
926
927 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
928 block, block->b_when);
929 if (block->b_flags & B_QUEUED) {
930 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
931 block, block->b_granted, block->b_flags);
932 retry_deferred_block(block);
933 } else
934 nlmsvc_grant_blocked(block);
935 spin_lock(&nlm_blocked_lock);
936 }
937 spin_unlock(&nlm_blocked_lock);
938
939 return timeout;
940}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc_xprt.h>
31#include <linux/lockd/nlm.h>
32#include <linux/lockd/lockd.h>
33#include <linux/kthread.h>
34
35#define NLMDBG_FACILITY NLMDBG_SVCLOCK
36
37#ifdef CONFIG_LOCKD_V4
38#define nlm_deadlock nlm4_deadlock
39#else
40#define nlm_deadlock nlm_lck_denied
41#endif
42
43static void nlmsvc_release_block(struct nlm_block *block);
44static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45static void nlmsvc_remove_block(struct nlm_block *block);
46
47static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49static const struct rpc_call_ops nlmsvc_grant_ops;
50
51/*
52 * The list of blocked locks to retry
53 */
54static LIST_HEAD(nlm_blocked);
55static DEFINE_SPINLOCK(nlm_blocked_lock);
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59{
60 /*
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
63 */
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
66 char *p = buf;
67
68 len--; /* allow for trailing \0 */
69 if (len < 3)
70 return "???";
71 for (i = 0 ; i < cookie->len ; i++) {
72 if (len < 2) {
73 strcpy(p-3, "...");
74 break;
75 }
76 sprintf(p, "%02x", cookie->data[i]);
77 p += 2;
78 len -= 2;
79 }
80 *p = '\0';
81
82 return buf;
83}
84#endif
85
86/*
87 * Insert a blocked lock into the global list
88 */
89static void
90nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91{
92 struct nlm_block *b;
93 struct list_head *pos;
94
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
98 } else {
99 list_del_init(&block->b_list);
100 }
101
102 pos = &nlm_blocked;
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
105 when ++;
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 break;
110 }
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
113 */
114 }
115
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
118}
119
120static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121{
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
125}
126
127/*
128 * Remove a block from the global list
129 */
130static inline void
131nlmsvc_remove_block(struct nlm_block *block)
132{
133 if (!list_empty(&block->b_list)) {
134 spin_lock(&nlm_blocked_lock);
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
138 }
139}
140
141/*
142 * Find a block for a given lock
143 */
144static struct nlm_block *
145nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146{
147 struct nlm_block *block;
148 struct file_lock *fl;
149
150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 file, lock->fl.fl_pid,
152 (long long)lock->fl.fl_start,
153 (long long)lock->fl.fl_end, lock->fl.fl_type);
154 list_for_each_entry(block, &nlm_blocked, b_list) {
155 fl = &block->b_call->a_args.lock.fl;
156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 block->b_file, fl->fl_pid,
158 (long long)fl->fl_start,
159 (long long)fl->fl_end, fl->fl_type,
160 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 kref_get(&block->b_count);
163 return block;
164 }
165 }
166
167 return NULL;
168}
169
170static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
171{
172 if (a->len != b->len)
173 return 0;
174 if (memcmp(a->data, b->data, a->len))
175 return 0;
176 return 1;
177}
178
179/*
180 * Find a block with a given NLM cookie.
181 */
182static inline struct nlm_block *
183nlmsvc_find_block(struct nlm_cookie *cookie)
184{
185 struct nlm_block *block;
186
187 list_for_each_entry(block, &nlm_blocked, b_list) {
188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
189 goto found;
190 }
191
192 return NULL;
193
194found:
195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 kref_get(&block->b_count);
197 return block;
198}
199
200/*
201 * Create a block and initialize it.
202 *
203 * Note: we explicitly set the cookie of the grant reply to that of
204 * the blocked lock request. The spec explicitly mentions that the client
205 * should _not_ rely on the callback containing the same cookie as the
206 * request, but (as I found out later) that's because some implementations
207 * do just this. Never mind the standards comittees, they support our
208 * logging industries.
209 *
210 * 10 years later: I hope we can safely ignore these old and broken
211 * clients by now. Let's fix this so we can uniquely identify an incoming
212 * GRANTED_RES message by cookie, without having to rely on the client's IP
213 * address. --okir
214 */
215static struct nlm_block *
216nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 struct nlm_file *file, struct nlm_lock *lock,
218 struct nlm_cookie *cookie)
219{
220 struct nlm_block *block;
221 struct nlm_rqst *call = NULL;
222
223 call = nlm_alloc_call(host);
224 if (call == NULL)
225 return NULL;
226
227 /* Allocate memory for block, and initialize arguments */
228 block = kzalloc(sizeof(*block), GFP_KERNEL);
229 if (block == NULL)
230 goto failed;
231 kref_init(&block->b_count);
232 INIT_LIST_HEAD(&block->b_list);
233 INIT_LIST_HEAD(&block->b_flist);
234
235 if (!nlmsvc_setgrantargs(call, lock))
236 goto failed_free;
237
238 /* Set notifier function for VFS, and init args */
239 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 nlmclnt_next_cookie(&call->a_args.cookie);
242
243 dprintk("lockd: created block %p...\n", block);
244
245 /* Create and initialize the block */
246 block->b_daemon = rqstp->rq_server;
247 block->b_host = host;
248 block->b_file = file;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261failed_free:
262 kfree(block);
263failed:
264 nlmsvc_release_call(call);
265 return NULL;
266}
267
268/*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
273static int nlmsvc_unlink_block(struct nlm_block *block)
274{
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = locks_delete_block(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282}
283
284static void nlmsvc_free_block(struct kref *kref)
285{
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block);
299}
300
301static void nlmsvc_release_block(struct nlm_block *block)
302{
303 if (block != NULL)
304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
305}
306
307/*
308 * Loop over all blocks and delete blocks held by
309 * a matching host.
310 */
311void nlmsvc_traverse_blocks(struct nlm_host *host,
312 struct nlm_file *file,
313 nlm_host_match_fn_t match)
314{
315 struct nlm_block *block, *next;
316
317restart:
318 mutex_lock(&file->f_mutex);
319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 if (!match(block->b_host, host))
321 continue;
322 /* Do not destroy blocks that are not on
323 * the global retry list - why? */
324 if (list_empty(&block->b_list))
325 continue;
326 kref_get(&block->b_count);
327 mutex_unlock(&file->f_mutex);
328 nlmsvc_unlink_block(block);
329 nlmsvc_release_block(block);
330 goto restart;
331 }
332 mutex_unlock(&file->f_mutex);
333}
334
335static struct nlm_lockowner *
336nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
337{
338 refcount_inc(&lockowner->count);
339 return lockowner;
340}
341
342static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
343{
344 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
345 return;
346 list_del(&lockowner->list);
347 spin_unlock(&lockowner->host->h_lock);
348 nlmsvc_release_host(lockowner->host);
349 kfree(lockowner);
350}
351
352static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
353{
354 struct nlm_lockowner *lockowner;
355 list_for_each_entry(lockowner, &host->h_lockowners, list) {
356 if (lockowner->pid != pid)
357 continue;
358 return nlmsvc_get_lockowner(lockowner);
359 }
360 return NULL;
361}
362
363static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364{
365 struct nlm_lockowner *res, *new = NULL;
366
367 spin_lock(&host->h_lock);
368 res = __nlmsvc_find_lockowner(host, pid);
369
370 if (res == NULL) {
371 spin_unlock(&host->h_lock);
372 new = kmalloc(sizeof(*res), GFP_KERNEL);
373 spin_lock(&host->h_lock);
374 res = __nlmsvc_find_lockowner(host, pid);
375 if (res == NULL && new != NULL) {
376 res = new;
377 /* fs/locks.c will manage the refcount through lock_ops */
378 refcount_set(&new->count, 1);
379 new->pid = pid;
380 new->host = nlm_get_host(host);
381 list_add(&new->list, &host->h_lockowners);
382 new = NULL;
383 }
384 }
385
386 spin_unlock(&host->h_lock);
387 kfree(new);
388 return res;
389}
390
391void
392nlmsvc_release_lockowner(struct nlm_lock *lock)
393{
394 if (lock->fl.fl_owner)
395 nlmsvc_put_lockowner(lock->fl.fl_owner);
396}
397
398static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
399{
400 struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
401 new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
402}
403
404static void nlmsvc_locks_release_private(struct file_lock *fl)
405{
406 nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
407}
408
409static const struct file_lock_operations nlmsvc_lock_ops = {
410 .fl_copy_lock = nlmsvc_locks_copy_lock,
411 .fl_release_private = nlmsvc_locks_release_private,
412};
413
414void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
415 pid_t pid)
416{
417 fl->fl_owner = nlmsvc_find_lockowner(host, pid);
418 if (fl->fl_owner != NULL)
419 fl->fl_ops = &nlmsvc_lock_ops;
420}
421
422/*
423 * Initialize arguments for GRANTED call. The nlm_rqst structure
424 * has been cleared already.
425 */
426static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
427{
428 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
429 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
430 call->a_args.lock.caller = utsname()->nodename;
431 call->a_args.lock.oh.len = lock->oh.len;
432
433 /* set default data area */
434 call->a_args.lock.oh.data = call->a_owner;
435 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
436
437 if (lock->oh.len > NLMCLNT_OHSIZE) {
438 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
439 if (!data)
440 return 0;
441 call->a_args.lock.oh.data = (u8 *) data;
442 }
443
444 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
445 return 1;
446}
447
448static void nlmsvc_freegrantargs(struct nlm_rqst *call)
449{
450 if (call->a_args.lock.oh.data != call->a_owner)
451 kfree(call->a_args.lock.oh.data);
452
453 locks_release_private(&call->a_args.lock.fl);
454}
455
456/*
457 * Deferred lock request handling for non-blocking lock
458 */
459static __be32
460nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
461{
462 __be32 status = nlm_lck_denied_nolocks;
463
464 block->b_flags |= B_QUEUED;
465
466 nlmsvc_insert_block(block, NLM_TIMEOUT);
467
468 block->b_cache_req = &rqstp->rq_chandle;
469 if (rqstp->rq_chandle.defer) {
470 block->b_deferred_req =
471 rqstp->rq_chandle.defer(block->b_cache_req);
472 if (block->b_deferred_req != NULL)
473 status = nlm_drop_reply;
474 }
475 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
476 block, block->b_flags, ntohl(status));
477
478 return status;
479}
480
481/*
482 * Attempt to establish a lock, and if it can't be granted, block it
483 * if required.
484 */
485__be32
486nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
487 struct nlm_host *host, struct nlm_lock *lock, int wait,
488 struct nlm_cookie *cookie, int reclaim)
489{
490 struct nlm_block *block = NULL;
491 int error;
492 __be32 ret;
493
494 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
495 locks_inode(file->f_file)->i_sb->s_id,
496 locks_inode(file->f_file)->i_ino,
497 lock->fl.fl_type, lock->fl.fl_pid,
498 (long long)lock->fl.fl_start,
499 (long long)lock->fl.fl_end,
500 wait);
501
502 /* Lock file against concurrent access */
503 mutex_lock(&file->f_mutex);
504 /* Get existing block (in case client is busy-waiting)
505 * or create new block
506 */
507 block = nlmsvc_lookup_block(file, lock);
508 if (block == NULL) {
509 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
510 ret = nlm_lck_denied_nolocks;
511 if (block == NULL)
512 goto out;
513 lock = &block->b_call->a_args.lock;
514 } else
515 lock->fl.fl_flags &= ~FL_SLEEP;
516
517 if (block->b_flags & B_QUEUED) {
518 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
519 block, block->b_flags);
520 if (block->b_granted) {
521 nlmsvc_unlink_block(block);
522 ret = nlm_granted;
523 goto out;
524 }
525 if (block->b_flags & B_TIMED_OUT) {
526 nlmsvc_unlink_block(block);
527 ret = nlm_lck_denied;
528 goto out;
529 }
530 ret = nlm_drop_reply;
531 goto out;
532 }
533
534 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
535 ret = nlm_lck_denied_grace_period;
536 goto out;
537 }
538 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
539 ret = nlm_lck_denied_grace_period;
540 goto out;
541 }
542
543 if (!wait)
544 lock->fl.fl_flags &= ~FL_SLEEP;
545 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
546 lock->fl.fl_flags &= ~FL_SLEEP;
547
548 dprintk("lockd: vfs_lock_file returned %d\n", error);
549 switch (error) {
550 case 0:
551 ret = nlm_granted;
552 goto out;
553 case -EAGAIN:
554 /*
555 * If this is a blocking request for an
556 * already pending lock request then we need
557 * to put it back on lockd's block list
558 */
559 if (wait)
560 break;
561 ret = nlm_lck_denied;
562 goto out;
563 case FILE_LOCK_DEFERRED:
564 if (wait)
565 break;
566 /* Filesystem lock operation is in progress
567 Add it to the queue waiting for callback */
568 ret = nlmsvc_defer_lock_rqst(rqstp, block);
569 goto out;
570 case -EDEADLK:
571 ret = nlm_deadlock;
572 goto out;
573 default: /* includes ENOLCK */
574 ret = nlm_lck_denied_nolocks;
575 goto out;
576 }
577
578 ret = nlm_lck_blocked;
579
580 /* Append to list of blocked */
581 nlmsvc_insert_block(block, NLM_NEVER);
582out:
583 mutex_unlock(&file->f_mutex);
584 nlmsvc_release_block(block);
585 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
586 return ret;
587}
588
589/*
590 * Test for presence of a conflicting lock.
591 */
592__be32
593nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
594 struct nlm_host *host, struct nlm_lock *lock,
595 struct nlm_lock *conflock, struct nlm_cookie *cookie)
596{
597 int error;
598 __be32 ret;
599 struct nlm_lockowner *test_owner;
600
601 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
602 locks_inode(file->f_file)->i_sb->s_id,
603 locks_inode(file->f_file)->i_ino,
604 lock->fl.fl_type,
605 (long long)lock->fl.fl_start,
606 (long long)lock->fl.fl_end);
607
608 if (locks_in_grace(SVC_NET(rqstp))) {
609 ret = nlm_lck_denied_grace_period;
610 goto out;
611 }
612
613 /* If there's a conflicting lock, remember to clean up the test lock */
614 test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
615
616 error = vfs_test_lock(file->f_file, &lock->fl);
617 if (error) {
618 /* We can't currently deal with deferred test requests */
619 if (error == FILE_LOCK_DEFERRED)
620 WARN_ON_ONCE(1);
621
622 ret = nlm_lck_denied_nolocks;
623 goto out;
624 }
625
626 if (lock->fl.fl_type == F_UNLCK) {
627 ret = nlm_granted;
628 goto out;
629 }
630
631 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
632 lock->fl.fl_type, (long long)lock->fl.fl_start,
633 (long long)lock->fl.fl_end);
634 conflock->caller = "somehost"; /* FIXME */
635 conflock->len = strlen(conflock->caller);
636 conflock->oh.len = 0; /* don't return OH info */
637 conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
638 conflock->fl.fl_type = lock->fl.fl_type;
639 conflock->fl.fl_start = lock->fl.fl_start;
640 conflock->fl.fl_end = lock->fl.fl_end;
641 locks_release_private(&lock->fl);
642
643 /* Clean up the test lock */
644 lock->fl.fl_owner = NULL;
645 nlmsvc_put_lockowner(test_owner);
646
647 ret = nlm_lck_denied;
648out:
649 return ret;
650}
651
652/*
653 * Remove a lock.
654 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
655 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
656 * afterwards. In this case the block will still be there, and hence
657 * must be removed.
658 */
659__be32
660nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
661{
662 int error;
663
664 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
665 locks_inode(file->f_file)->i_sb->s_id,
666 locks_inode(file->f_file)->i_ino,
667 lock->fl.fl_pid,
668 (long long)lock->fl.fl_start,
669 (long long)lock->fl.fl_end);
670
671 /* First, cancel any lock that might be there */
672 nlmsvc_cancel_blocked(net, file, lock);
673
674 lock->fl.fl_type = F_UNLCK;
675 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
676
677 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
678}
679
680/*
681 * Cancel a previously blocked request.
682 *
683 * A cancel request always overrides any grant that may currently
684 * be in progress.
685 * The calling procedure must check whether the file can be closed.
686 */
687__be32
688nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
689{
690 struct nlm_block *block;
691 int status = 0;
692
693 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
694 locks_inode(file->f_file)->i_sb->s_id,
695 locks_inode(file->f_file)->i_ino,
696 lock->fl.fl_pid,
697 (long long)lock->fl.fl_start,
698 (long long)lock->fl.fl_end);
699
700 if (locks_in_grace(net))
701 return nlm_lck_denied_grace_period;
702
703 mutex_lock(&file->f_mutex);
704 block = nlmsvc_lookup_block(file, lock);
705 mutex_unlock(&file->f_mutex);
706 if (block != NULL) {
707 vfs_cancel_lock(block->b_file->f_file,
708 &block->b_call->a_args.lock.fl);
709 status = nlmsvc_unlink_block(block);
710 nlmsvc_release_block(block);
711 }
712 return status ? nlm_lck_denied : nlm_granted;
713}
714
715/*
716 * This is a callback from the filesystem for VFS file lock requests.
717 * It will be used if lm_grant is defined and the filesystem can not
718 * respond to the request immediately.
719 * For SETLK or SETLKW request it will get the local posix lock.
720 * In all cases it will move the block to the head of nlm_blocked q where
721 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
722 * deferred rpc for GETLK and SETLK.
723 */
724static void
725nlmsvc_update_deferred_block(struct nlm_block *block, int result)
726{
727 block->b_flags |= B_GOT_CALLBACK;
728 if (result == 0)
729 block->b_granted = 1;
730 else
731 block->b_flags |= B_TIMED_OUT;
732}
733
734static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
735{
736 struct nlm_block *block;
737 int rc = -ENOENT;
738
739 spin_lock(&nlm_blocked_lock);
740 list_for_each_entry(block, &nlm_blocked, b_list) {
741 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
742 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
743 block, block->b_flags);
744 if (block->b_flags & B_QUEUED) {
745 if (block->b_flags & B_TIMED_OUT) {
746 rc = -ENOLCK;
747 break;
748 }
749 nlmsvc_update_deferred_block(block, result);
750 } else if (result == 0)
751 block->b_granted = 1;
752
753 nlmsvc_insert_block_locked(block, 0);
754 svc_wake_up(block->b_daemon);
755 rc = 0;
756 break;
757 }
758 }
759 spin_unlock(&nlm_blocked_lock);
760 if (rc == -ENOENT)
761 printk(KERN_WARNING "lockd: grant for unknown block\n");
762 return rc;
763}
764
765/*
766 * Unblock a blocked lock request. This is a callback invoked from the
767 * VFS layer when a lock on which we blocked is removed.
768 *
769 * This function doesn't grant the blocked lock instantly, but rather moves
770 * the block to the head of nlm_blocked where it can be picked up by lockd.
771 */
772static void
773nlmsvc_notify_blocked(struct file_lock *fl)
774{
775 struct nlm_block *block;
776
777 dprintk("lockd: VFS unblock notification for block %p\n", fl);
778 spin_lock(&nlm_blocked_lock);
779 list_for_each_entry(block, &nlm_blocked, b_list) {
780 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
781 nlmsvc_insert_block_locked(block, 0);
782 spin_unlock(&nlm_blocked_lock);
783 svc_wake_up(block->b_daemon);
784 return;
785 }
786 }
787 spin_unlock(&nlm_blocked_lock);
788 printk(KERN_WARNING "lockd: notification for unknown block!\n");
789}
790
791const struct lock_manager_operations nlmsvc_lock_operations = {
792 .lm_notify = nlmsvc_notify_blocked,
793 .lm_grant = nlmsvc_grant_deferred,
794};
795
796/*
797 * Try to claim a lock that was previously blocked.
798 *
799 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
800 * RPC thread when notifying the client. This seems like overkill...
801 * Here's why:
802 * - we don't want to use a synchronous RPC thread, otherwise
803 * we might find ourselves hanging on a dead portmapper.
804 * - Some lockd implementations (e.g. HP) don't react to
805 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
806 */
807static void
808nlmsvc_grant_blocked(struct nlm_block *block)
809{
810 struct nlm_file *file = block->b_file;
811 struct nlm_lock *lock = &block->b_call->a_args.lock;
812 int error;
813 loff_t fl_start, fl_end;
814
815 dprintk("lockd: grant blocked lock %p\n", block);
816
817 kref_get(&block->b_count);
818
819 /* Unlink block request from list */
820 nlmsvc_unlink_block(block);
821
822 /* If b_granted is true this means we've been here before.
823 * Just retry the grant callback, possibly refreshing the RPC
824 * binding */
825 if (block->b_granted) {
826 nlm_rebind_host(block->b_host);
827 goto callback;
828 }
829
830 /* Try the lock operation again */
831 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
832 * them unchanged for the GRANT_MSG
833 */
834 lock->fl.fl_flags |= FL_SLEEP;
835 fl_start = lock->fl.fl_start;
836 fl_end = lock->fl.fl_end;
837 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
838 lock->fl.fl_flags &= ~FL_SLEEP;
839 lock->fl.fl_start = fl_start;
840 lock->fl.fl_end = fl_end;
841
842 switch (error) {
843 case 0:
844 break;
845 case FILE_LOCK_DEFERRED:
846 dprintk("lockd: lock still blocked error %d\n", error);
847 nlmsvc_insert_block(block, NLM_NEVER);
848 nlmsvc_release_block(block);
849 return;
850 default:
851 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
852 -error, __func__);
853 nlmsvc_insert_block(block, 10 * HZ);
854 nlmsvc_release_block(block);
855 return;
856 }
857
858callback:
859 /* Lock was granted by VFS. */
860 dprintk("lockd: GRANTing blocked lock.\n");
861 block->b_granted = 1;
862
863 /* keep block on the list, but don't reattempt until the RPC
864 * completes or the submission fails
865 */
866 nlmsvc_insert_block(block, NLM_NEVER);
867
868 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
869 * will queue up a new one if this one times out
870 */
871 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
872 &nlmsvc_grant_ops);
873
874 /* RPC submission failed, wait a bit and retry */
875 if (error < 0)
876 nlmsvc_insert_block(block, 10 * HZ);
877}
878
879/*
880 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
881 * RPC call has succeeded or timed out.
882 * Like all RPC callbacks, it is invoked by the rpciod process, so it
883 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
884 * chain once more in order to have it removed by lockd itself (which can
885 * then sleep on the file semaphore without disrupting e.g. the nfs client).
886 */
887static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
888{
889 struct nlm_rqst *call = data;
890 struct nlm_block *block = call->a_block;
891 unsigned long timeout;
892
893 dprintk("lockd: GRANT_MSG RPC callback\n");
894
895 spin_lock(&nlm_blocked_lock);
896 /* if the block is not on a list at this point then it has
897 * been invalidated. Don't try to requeue it.
898 *
899 * FIXME: it's possible that the block is removed from the list
900 * after this check but before the nlmsvc_insert_block. In that
901 * case it will be added back. Perhaps we need better locking
902 * for nlm_blocked?
903 */
904 if (list_empty(&block->b_list))
905 goto out;
906
907 /* Technically, we should down the file semaphore here. Since we
908 * move the block towards the head of the queue only, no harm
909 * can be done, though. */
910 if (task->tk_status < 0) {
911 /* RPC error: Re-insert for retransmission */
912 timeout = 10 * HZ;
913 } else {
914 /* Call was successful, now wait for client callback */
915 timeout = 60 * HZ;
916 }
917 nlmsvc_insert_block_locked(block, timeout);
918 svc_wake_up(block->b_daemon);
919out:
920 spin_unlock(&nlm_blocked_lock);
921}
922
923/*
924 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
925 * .rpc_release rpc_call_op
926 */
927static void nlmsvc_grant_release(void *data)
928{
929 struct nlm_rqst *call = data;
930 nlmsvc_release_block(call->a_block);
931}
932
933static const struct rpc_call_ops nlmsvc_grant_ops = {
934 .rpc_call_done = nlmsvc_grant_callback,
935 .rpc_release = nlmsvc_grant_release,
936};
937
938/*
939 * We received a GRANT_RES callback. Try to find the corresponding
940 * block.
941 */
942void
943nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
944{
945 struct nlm_block *block;
946
947 dprintk("grant_reply: looking for cookie %x, s=%d \n",
948 *(unsigned int *)(cookie->data), status);
949 if (!(block = nlmsvc_find_block(cookie)))
950 return;
951
952 if (status == nlm_lck_denied_grace_period) {
953 /* Try again in a couple of seconds */
954 nlmsvc_insert_block(block, 10 * HZ);
955 } else {
956 /*
957 * Lock is now held by client, or has been rejected.
958 * In both cases, the block should be removed.
959 */
960 nlmsvc_unlink_block(block);
961 }
962 nlmsvc_release_block(block);
963}
964
965/* Helper function to handle retry of a deferred block.
966 * If it is a blocking lock, call grant_blocked.
967 * For a non-blocking lock or test lock, revisit the request.
968 */
969static void
970retry_deferred_block(struct nlm_block *block)
971{
972 if (!(block->b_flags & B_GOT_CALLBACK))
973 block->b_flags |= B_TIMED_OUT;
974 nlmsvc_insert_block(block, NLM_TIMEOUT);
975 dprintk("revisit block %p flags %d\n", block, block->b_flags);
976 if (block->b_deferred_req) {
977 block->b_deferred_req->revisit(block->b_deferred_req, 0);
978 block->b_deferred_req = NULL;
979 }
980}
981
982/*
983 * Retry all blocked locks that have been notified. This is where lockd
984 * picks up locks that can be granted, or grant notifications that must
985 * be retransmitted.
986 */
987unsigned long
988nlmsvc_retry_blocked(void)
989{
990 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
991 struct nlm_block *block;
992
993 spin_lock(&nlm_blocked_lock);
994 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
995 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
996
997 if (block->b_when == NLM_NEVER)
998 break;
999 if (time_after(block->b_when, jiffies)) {
1000 timeout = block->b_when - jiffies;
1001 break;
1002 }
1003 spin_unlock(&nlm_blocked_lock);
1004
1005 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1006 block, block->b_when);
1007 if (block->b_flags & B_QUEUED) {
1008 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1009 block, block->b_granted, block->b_flags);
1010 retry_deferred_block(block);
1011 } else
1012 nlmsvc_grant_blocked(block);
1013 spin_lock(&nlm_blocked_lock);
1014 }
1015 spin_unlock(&nlm_blocked_lock);
1016
1017 return timeout;
1018}