Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc_xprt.h>
31#include <linux/lockd/nlm.h>
32#include <linux/lockd/lockd.h>
33
34#define NLMDBG_FACILITY NLMDBG_SVCLOCK
35
36#ifdef CONFIG_LOCKD_V4
37#define nlm_deadlock nlm4_deadlock
38#else
39#define nlm_deadlock nlm_lck_denied
40#endif
41
42static void nlmsvc_release_block(struct nlm_block *block);
43static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
44static void nlmsvc_remove_block(struct nlm_block *block);
45
46static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48static const struct rpc_call_ops nlmsvc_grant_ops;
49
50/*
51 * The list of blocked locks to retry
52 */
53static LIST_HEAD(nlm_blocked);
54static DEFINE_SPINLOCK(nlm_blocked_lock);
55
56#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
58{
59 /*
60 * We can get away with a static buffer because this is only called
61 * from lockd, which is single-threaded.
62 */
63 static char buf[2*NLM_MAXCOOKIELEN+1];
64 unsigned int i, len = sizeof(buf);
65 char *p = buf;
66
67 len--; /* allow for trailing \0 */
68 if (len < 3)
69 return "???";
70 for (i = 0 ; i < cookie->len ; i++) {
71 if (len < 2) {
72 strcpy(p-3, "...");
73 break;
74 }
75 sprintf(p, "%02x", cookie->data[i]);
76 p += 2;
77 len -= 2;
78 }
79 *p = '\0';
80
81 return buf;
82}
83#endif
84
85/*
86 * Insert a blocked lock into the global list
87 */
88static void
89nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
90{
91 struct nlm_block *b;
92 struct list_head *pos;
93
94 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
95 if (list_empty(&block->b_list)) {
96 kref_get(&block->b_count);
97 } else {
98 list_del_init(&block->b_list);
99 }
100
101 pos = &nlm_blocked;
102 if (when != NLM_NEVER) {
103 if ((when += jiffies) == NLM_NEVER)
104 when ++;
105 list_for_each(pos, &nlm_blocked) {
106 b = list_entry(pos, struct nlm_block, b_list);
107 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
108 break;
109 }
110 /* On normal exit from the loop, pos == &nlm_blocked,
111 * so we will be adding to the end of the list - good
112 */
113 }
114
115 list_add_tail(&block->b_list, pos);
116 block->b_when = when;
117}
118
119static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
120{
121 spin_lock(&nlm_blocked_lock);
122 nlmsvc_insert_block_locked(block, when);
123 spin_unlock(&nlm_blocked_lock);
124}
125
126/*
127 * Remove a block from the global list
128 */
129static inline void
130nlmsvc_remove_block(struct nlm_block *block)
131{
132 spin_lock(&nlm_blocked_lock);
133 if (!list_empty(&block->b_list)) {
134 list_del_init(&block->b_list);
135 spin_unlock(&nlm_blocked_lock);
136 nlmsvc_release_block(block);
137 return;
138 }
139 spin_unlock(&nlm_blocked_lock);
140}
141
142/*
143 * Find a block for a given lock
144 */
145static struct nlm_block *
146nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
147{
148 struct nlm_block *block;
149 struct file_lock *fl;
150
151 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
152 file, lock->fl.c.flc_pid,
153 (long long)lock->fl.fl_start,
154 (long long)lock->fl.fl_end,
155 lock->fl.c.flc_type);
156 spin_lock(&nlm_blocked_lock);
157 list_for_each_entry(block, &nlm_blocked, b_list) {
158 fl = &block->b_call->a_args.lock.fl;
159 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
160 block->b_file, fl->c.flc_pid,
161 (long long)fl->fl_start,
162 (long long)fl->fl_end, fl->c.flc_type,
163 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
164 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
165 kref_get(&block->b_count);
166 spin_unlock(&nlm_blocked_lock);
167 return block;
168 }
169 }
170 spin_unlock(&nlm_blocked_lock);
171
172 return NULL;
173}
174
175static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
176{
177 if (a->len != b->len)
178 return 0;
179 if (memcmp(a->data, b->data, a->len))
180 return 0;
181 return 1;
182}
183
184/*
185 * Find a block with a given NLM cookie.
186 */
187static inline struct nlm_block *
188nlmsvc_find_block(struct nlm_cookie *cookie)
189{
190 struct nlm_block *block;
191
192 spin_lock(&nlm_blocked_lock);
193 list_for_each_entry(block, &nlm_blocked, b_list) {
194 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
195 goto found;
196 }
197 spin_unlock(&nlm_blocked_lock);
198
199 return NULL;
200
201found:
202 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
203 kref_get(&block->b_count);
204 spin_unlock(&nlm_blocked_lock);
205 return block;
206}
207
208/*
209 * Create a block and initialize it.
210 *
211 * Note: we explicitly set the cookie of the grant reply to that of
212 * the blocked lock request. The spec explicitly mentions that the client
213 * should _not_ rely on the callback containing the same cookie as the
214 * request, but (as I found out later) that's because some implementations
215 * do just this. Never mind the standards comittees, they support our
216 * logging industries.
217 *
218 * 10 years later: I hope we can safely ignore these old and broken
219 * clients by now. Let's fix this so we can uniquely identify an incoming
220 * GRANTED_RES message by cookie, without having to rely on the client's IP
221 * address. --okir
222 */
223static struct nlm_block *
224nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
225 struct nlm_file *file, struct nlm_lock *lock,
226 struct nlm_cookie *cookie)
227{
228 struct nlm_block *block;
229 struct nlm_rqst *call = NULL;
230
231 call = nlm_alloc_call(host);
232 if (call == NULL)
233 return NULL;
234
235 /* Allocate memory for block, and initialize arguments */
236 block = kzalloc(sizeof(*block), GFP_KERNEL);
237 if (block == NULL)
238 goto failed;
239 kref_init(&block->b_count);
240 INIT_LIST_HEAD(&block->b_list);
241 INIT_LIST_HEAD(&block->b_flist);
242
243 if (!nlmsvc_setgrantargs(call, lock))
244 goto failed_free;
245
246 /* Set notifier function for VFS, and init args */
247 call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
248 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
249 nlmclnt_next_cookie(&call->a_args.cookie);
250
251 dprintk("lockd: created block %p...\n", block);
252
253 /* Create and initialize the block */
254 block->b_daemon = rqstp->rq_server;
255 block->b_host = host;
256 block->b_file = file;
257 file->f_count++;
258
259 /* Add to file's list of blocks */
260 list_add(&block->b_flist, &file->f_blocks);
261
262 /* Set up RPC arguments for callback */
263 block->b_call = call;
264 call->a_flags = RPC_TASK_ASYNC;
265 call->a_block = block;
266
267 return block;
268
269failed_free:
270 kfree(block);
271failed:
272 nlmsvc_release_call(call);
273 return NULL;
274}
275
276/*
277 * Delete a block.
278 * It is the caller's responsibility to check whether the file
279 * can be closed hereafter.
280 */
281static int nlmsvc_unlink_block(struct nlm_block *block)
282{
283 int status;
284 dprintk("lockd: unlinking block %p...\n", block);
285
286 /* Remove block from list */
287 status = locks_delete_block(&block->b_call->a_args.lock.fl);
288 nlmsvc_remove_block(block);
289 return status;
290}
291
292static void nlmsvc_free_block(struct kref *kref)
293{
294 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
295 struct nlm_file *file = block->b_file;
296
297 dprintk("lockd: freeing block %p...\n", block);
298
299 /* Remove block from file's list of blocks */
300 list_del_init(&block->b_flist);
301 mutex_unlock(&file->f_mutex);
302
303 nlmsvc_freegrantargs(block->b_call);
304 nlmsvc_release_call(block->b_call);
305 nlm_release_file(block->b_file);
306 kfree(block);
307}
308
309static void nlmsvc_release_block(struct nlm_block *block)
310{
311 if (block != NULL)
312 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
313}
314
315/*
316 * Loop over all blocks and delete blocks held by
317 * a matching host.
318 */
319void nlmsvc_traverse_blocks(struct nlm_host *host,
320 struct nlm_file *file,
321 nlm_host_match_fn_t match)
322{
323 struct nlm_block *block, *next;
324
325restart:
326 mutex_lock(&file->f_mutex);
327 spin_lock(&nlm_blocked_lock);
328 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
329 if (!match(block->b_host, host))
330 continue;
331 /* Do not destroy blocks that are not on
332 * the global retry list - why? */
333 if (list_empty(&block->b_list))
334 continue;
335 kref_get(&block->b_count);
336 spin_unlock(&nlm_blocked_lock);
337 mutex_unlock(&file->f_mutex);
338 nlmsvc_unlink_block(block);
339 nlmsvc_release_block(block);
340 goto restart;
341 }
342 spin_unlock(&nlm_blocked_lock);
343 mutex_unlock(&file->f_mutex);
344}
345
346static struct nlm_lockowner *
347nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
348{
349 refcount_inc(&lockowner->count);
350 return lockowner;
351}
352
353void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
354{
355 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
356 return;
357 list_del(&lockowner->list);
358 spin_unlock(&lockowner->host->h_lock);
359 nlmsvc_release_host(lockowner->host);
360 kfree(lockowner);
361}
362
363static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364{
365 struct nlm_lockowner *lockowner;
366 list_for_each_entry(lockowner, &host->h_lockowners, list) {
367 if (lockowner->pid != pid)
368 continue;
369 return nlmsvc_get_lockowner(lockowner);
370 }
371 return NULL;
372}
373
374static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
375{
376 struct nlm_lockowner *res, *new = NULL;
377
378 spin_lock(&host->h_lock);
379 res = __nlmsvc_find_lockowner(host, pid);
380
381 if (res == NULL) {
382 spin_unlock(&host->h_lock);
383 new = kmalloc(sizeof(*res), GFP_KERNEL);
384 spin_lock(&host->h_lock);
385 res = __nlmsvc_find_lockowner(host, pid);
386 if (res == NULL && new != NULL) {
387 res = new;
388 /* fs/locks.c will manage the refcount through lock_ops */
389 refcount_set(&new->count, 1);
390 new->pid = pid;
391 new->host = nlm_get_host(host);
392 list_add(&new->list, &host->h_lockowners);
393 new = NULL;
394 }
395 }
396
397 spin_unlock(&host->h_lock);
398 kfree(new);
399 return res;
400}
401
402void
403nlmsvc_release_lockowner(struct nlm_lock *lock)
404{
405 if (lock->fl.c.flc_owner)
406 nlmsvc_put_lockowner(lock->fl.c.flc_owner);
407}
408
409void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
410 pid_t pid)
411{
412 fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
413}
414
415/*
416 * Initialize arguments for GRANTED call. The nlm_rqst structure
417 * has been cleared already.
418 */
419static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
420{
421 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
422 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
423 call->a_args.lock.caller = utsname()->nodename;
424 call->a_args.lock.oh.len = lock->oh.len;
425
426 /* set default data area */
427 call->a_args.lock.oh.data = call->a_owner;
428 call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
429
430 if (lock->oh.len > NLMCLNT_OHSIZE) {
431 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
432 if (!data)
433 return 0;
434 call->a_args.lock.oh.data = (u8 *) data;
435 }
436
437 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
438 return 1;
439}
440
441static void nlmsvc_freegrantargs(struct nlm_rqst *call)
442{
443 if (call->a_args.lock.oh.data != call->a_owner)
444 kfree(call->a_args.lock.oh.data);
445
446 locks_release_private(&call->a_args.lock.fl);
447}
448
449/*
450 * Deferred lock request handling for non-blocking lock
451 */
452static __be32
453nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
454{
455 __be32 status = nlm_lck_denied_nolocks;
456
457 block->b_flags |= B_QUEUED;
458
459 nlmsvc_insert_block(block, NLM_TIMEOUT);
460
461 block->b_cache_req = &rqstp->rq_chandle;
462 if (rqstp->rq_chandle.defer) {
463 block->b_deferred_req =
464 rqstp->rq_chandle.defer(block->b_cache_req);
465 if (block->b_deferred_req != NULL)
466 status = nlm_drop_reply;
467 }
468 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
469 block, block->b_flags, ntohl(status));
470
471 return status;
472}
473
474/*
475 * Attempt to establish a lock, and if it can't be granted, block it
476 * if required.
477 */
478__be32
479nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
480 struct nlm_host *host, struct nlm_lock *lock, int wait,
481 struct nlm_cookie *cookie, int reclaim)
482{
483 struct inode *inode __maybe_unused = nlmsvc_file_inode(file);
484 struct nlm_block *block = NULL;
485 int error;
486 int mode;
487 int async_block = 0;
488 __be32 ret;
489
490 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
491 inode->i_sb->s_id, inode->i_ino,
492 lock->fl.c.flc_type,
493 lock->fl.c.flc_pid,
494 (long long)lock->fl.fl_start,
495 (long long)lock->fl.fl_end,
496 wait);
497
498 if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) {
499 async_block = wait;
500 wait = 0;
501 }
502
503 /* Lock file against concurrent access */
504 mutex_lock(&file->f_mutex);
505 /* Get existing block (in case client is busy-waiting)
506 * or create new block
507 */
508 block = nlmsvc_lookup_block(file, lock);
509 if (block == NULL) {
510 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
511 ret = nlm_lck_denied_nolocks;
512 if (block == NULL)
513 goto out;
514 lock = &block->b_call->a_args.lock;
515 } else
516 lock->fl.c.flc_flags &= ~FL_SLEEP;
517
518 if (block->b_flags & B_QUEUED) {
519 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
520 block, block->b_flags);
521 if (block->b_granted) {
522 nlmsvc_unlink_block(block);
523 ret = nlm_granted;
524 goto out;
525 }
526 if (block->b_flags & B_TIMED_OUT) {
527 nlmsvc_unlink_block(block);
528 ret = nlm_lck_denied;
529 goto out;
530 }
531 ret = nlm_drop_reply;
532 goto out;
533 }
534
535 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
536 ret = nlm_lck_denied_grace_period;
537 goto out;
538 }
539 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
540 ret = nlm_lck_denied_grace_period;
541 goto out;
542 }
543
544 spin_lock(&nlm_blocked_lock);
545 /*
546 * If this is a lock request for an already pending
547 * lock request we return nlm_lck_blocked without calling
548 * vfs_lock_file() again. Otherwise we have two pending
549 * requests on the underlaying ->lock() implementation but
550 * only one nlm_block to being granted by lm_grant().
551 */
552 if (locks_can_async_lock(nlmsvc_file_file(file)->f_op) &&
553 !list_empty(&block->b_list)) {
554 spin_unlock(&nlm_blocked_lock);
555 ret = nlm_lck_blocked;
556 goto out;
557 }
558
559 /* Append to list of blocked */
560 nlmsvc_insert_block_locked(block, NLM_NEVER);
561 spin_unlock(&nlm_blocked_lock);
562
563 if (!wait)
564 lock->fl.c.flc_flags &= ~FL_SLEEP;
565 mode = lock_to_openmode(&lock->fl);
566 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
567 lock->fl.c.flc_flags &= ~FL_SLEEP;
568
569 dprintk("lockd: vfs_lock_file returned %d\n", error);
570 switch (error) {
571 case 0:
572 nlmsvc_remove_block(block);
573 ret = nlm_granted;
574 goto out;
575 case -EAGAIN:
576 if (!wait)
577 nlmsvc_remove_block(block);
578 ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
579 goto out;
580 case FILE_LOCK_DEFERRED:
581 if (wait)
582 break;
583 /* Filesystem lock operation is in progress
584 Add it to the queue waiting for callback */
585 ret = nlmsvc_defer_lock_rqst(rqstp, block);
586 goto out;
587 case -EDEADLK:
588 nlmsvc_remove_block(block);
589 ret = nlm_deadlock;
590 goto out;
591 default: /* includes ENOLCK */
592 nlmsvc_remove_block(block);
593 ret = nlm_lck_denied_nolocks;
594 goto out;
595 }
596
597 ret = nlm_lck_blocked;
598out:
599 mutex_unlock(&file->f_mutex);
600 nlmsvc_release_block(block);
601 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
602 return ret;
603}
604
605/*
606 * Test for presence of a conflicting lock.
607 */
608__be32
609nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
610 struct nlm_host *host, struct nlm_lock *lock,
611 struct nlm_lock *conflock)
612{
613 int error;
614 int mode;
615 __be32 ret;
616
617 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
618 nlmsvc_file_inode(file)->i_sb->s_id,
619 nlmsvc_file_inode(file)->i_ino,
620 lock->fl.c.flc_type,
621 (long long)lock->fl.fl_start,
622 (long long)lock->fl.fl_end);
623
624 if (locks_in_grace(SVC_NET(rqstp))) {
625 ret = nlm_lck_denied_grace_period;
626 goto out;
627 }
628
629 mode = lock_to_openmode(&lock->fl);
630 error = vfs_test_lock(file->f_file[mode], &lock->fl);
631 if (error) {
632 /* We can't currently deal with deferred test requests */
633 if (error == FILE_LOCK_DEFERRED)
634 WARN_ON_ONCE(1);
635
636 ret = nlm_lck_denied_nolocks;
637 goto out;
638 }
639
640 if (lock->fl.c.flc_type == F_UNLCK) {
641 ret = nlm_granted;
642 goto out;
643 }
644
645 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
646 lock->fl.c.flc_type, (long long)lock->fl.fl_start,
647 (long long)lock->fl.fl_end);
648 conflock->caller = "somehost"; /* FIXME */
649 conflock->len = strlen(conflock->caller);
650 conflock->oh.len = 0; /* don't return OH info */
651 conflock->svid = lock->fl.c.flc_pid;
652 conflock->fl.c.flc_type = lock->fl.c.flc_type;
653 conflock->fl.fl_start = lock->fl.fl_start;
654 conflock->fl.fl_end = lock->fl.fl_end;
655 locks_release_private(&lock->fl);
656
657 ret = nlm_lck_denied;
658out:
659 return ret;
660}
661
662/*
663 * Remove a lock.
664 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
665 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
666 * afterwards. In this case the block will still be there, and hence
667 * must be removed.
668 */
669__be32
670nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
671{
672 int error = 0;
673
674 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
675 nlmsvc_file_inode(file)->i_sb->s_id,
676 nlmsvc_file_inode(file)->i_ino,
677 lock->fl.c.flc_pid,
678 (long long)lock->fl.fl_start,
679 (long long)lock->fl.fl_end);
680
681 /* First, cancel any lock that might be there */
682 nlmsvc_cancel_blocked(net, file, lock);
683
684 lock->fl.c.flc_type = F_UNLCK;
685 lock->fl.c.flc_file = file->f_file[O_RDONLY];
686 if (lock->fl.c.flc_file)
687 error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
688 &lock->fl, NULL);
689 lock->fl.c.flc_file = file->f_file[O_WRONLY];
690 if (lock->fl.c.flc_file)
691 error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
692 &lock->fl, NULL);
693
694 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
695}
696
697/*
698 * Cancel a previously blocked request.
699 *
700 * A cancel request always overrides any grant that may currently
701 * be in progress.
702 * The calling procedure must check whether the file can be closed.
703 */
704__be32
705nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
706{
707 struct nlm_block *block;
708 int status = 0;
709 int mode;
710
711 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
712 nlmsvc_file_inode(file)->i_sb->s_id,
713 nlmsvc_file_inode(file)->i_ino,
714 lock->fl.c.flc_pid,
715 (long long)lock->fl.fl_start,
716 (long long)lock->fl.fl_end);
717
718 if (locks_in_grace(net))
719 return nlm_lck_denied_grace_period;
720
721 mutex_lock(&file->f_mutex);
722 block = nlmsvc_lookup_block(file, lock);
723 mutex_unlock(&file->f_mutex);
724 if (block != NULL) {
725 struct file_lock *fl = &block->b_call->a_args.lock.fl;
726
727 mode = lock_to_openmode(fl);
728 vfs_cancel_lock(block->b_file->f_file[mode], fl);
729 status = nlmsvc_unlink_block(block);
730 nlmsvc_release_block(block);
731 }
732 return status ? nlm_lck_denied : nlm_granted;
733}
734
735/*
736 * This is a callback from the filesystem for VFS file lock requests.
737 * It will be used if lm_grant is defined and the filesystem can not
738 * respond to the request immediately.
739 * For SETLK or SETLKW request it will get the local posix lock.
740 * In all cases it will move the block to the head of nlm_blocked q where
741 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
742 * deferred rpc for GETLK and SETLK.
743 */
744static void
745nlmsvc_update_deferred_block(struct nlm_block *block, int result)
746{
747 block->b_flags |= B_GOT_CALLBACK;
748 if (result == 0)
749 block->b_granted = 1;
750 else
751 block->b_flags |= B_TIMED_OUT;
752}
753
754static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
755{
756 struct nlm_block *block;
757 int rc = -ENOENT;
758
759 spin_lock(&nlm_blocked_lock);
760 list_for_each_entry(block, &nlm_blocked, b_list) {
761 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
762 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
763 block, block->b_flags);
764 if (block->b_flags & B_QUEUED) {
765 if (block->b_flags & B_TIMED_OUT) {
766 rc = -ENOLCK;
767 break;
768 }
769 nlmsvc_update_deferred_block(block, result);
770 } else if (result == 0)
771 block->b_granted = 1;
772
773 nlmsvc_insert_block_locked(block, 0);
774 svc_wake_up(block->b_daemon);
775 rc = 0;
776 break;
777 }
778 }
779 spin_unlock(&nlm_blocked_lock);
780 if (rc == -ENOENT)
781 printk(KERN_WARNING "lockd: grant for unknown block\n");
782 return rc;
783}
784
785/*
786 * Unblock a blocked lock request. This is a callback invoked from the
787 * VFS layer when a lock on which we blocked is removed.
788 *
789 * This function doesn't grant the blocked lock instantly, but rather moves
790 * the block to the head of nlm_blocked where it can be picked up by lockd.
791 */
792static void
793nlmsvc_notify_blocked(struct file_lock *fl)
794{
795 struct nlm_block *block;
796
797 dprintk("lockd: VFS unblock notification for block %p\n", fl);
798 spin_lock(&nlm_blocked_lock);
799 list_for_each_entry(block, &nlm_blocked, b_list) {
800 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
801 nlmsvc_insert_block_locked(block, 0);
802 spin_unlock(&nlm_blocked_lock);
803 svc_wake_up(block->b_daemon);
804 return;
805 }
806 }
807 spin_unlock(&nlm_blocked_lock);
808 printk(KERN_WARNING "lockd: notification for unknown block!\n");
809}
810
811static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
812{
813 return nlmsvc_get_lockowner(owner);
814}
815
816static void nlmsvc_put_owner(fl_owner_t owner)
817{
818 nlmsvc_put_lockowner(owner);
819}
820
821const struct lock_manager_operations nlmsvc_lock_operations = {
822 .lm_notify = nlmsvc_notify_blocked,
823 .lm_grant = nlmsvc_grant_deferred,
824 .lm_get_owner = nlmsvc_get_owner,
825 .lm_put_owner = nlmsvc_put_owner,
826};
827
828/*
829 * Try to claim a lock that was previously blocked.
830 *
831 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
832 * RPC thread when notifying the client. This seems like overkill...
833 * Here's why:
834 * - we don't want to use a synchronous RPC thread, otherwise
835 * we might find ourselves hanging on a dead portmapper.
836 * - Some lockd implementations (e.g. HP) don't react to
837 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
838 */
839static void
840nlmsvc_grant_blocked(struct nlm_block *block)
841{
842 struct nlm_file *file = block->b_file;
843 struct nlm_lock *lock = &block->b_call->a_args.lock;
844 int mode;
845 int error;
846 loff_t fl_start, fl_end;
847
848 dprintk("lockd: grant blocked lock %p\n", block);
849
850 kref_get(&block->b_count);
851
852 /* Unlink block request from list */
853 nlmsvc_unlink_block(block);
854
855 /* If b_granted is true this means we've been here before.
856 * Just retry the grant callback, possibly refreshing the RPC
857 * binding */
858 if (block->b_granted) {
859 nlm_rebind_host(block->b_host);
860 goto callback;
861 }
862
863 /* Try the lock operation again */
864 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
865 * them unchanged for the GRANT_MSG
866 */
867 lock->fl.c.flc_flags |= FL_SLEEP;
868 fl_start = lock->fl.fl_start;
869 fl_end = lock->fl.fl_end;
870 mode = lock_to_openmode(&lock->fl);
871 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
872 lock->fl.c.flc_flags &= ~FL_SLEEP;
873 lock->fl.fl_start = fl_start;
874 lock->fl.fl_end = fl_end;
875
876 switch (error) {
877 case 0:
878 break;
879 case FILE_LOCK_DEFERRED:
880 dprintk("lockd: lock still blocked error %d\n", error);
881 nlmsvc_insert_block(block, NLM_NEVER);
882 nlmsvc_release_block(block);
883 return;
884 default:
885 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
886 -error, __func__);
887 nlmsvc_insert_block(block, 10 * HZ);
888 nlmsvc_release_block(block);
889 return;
890 }
891
892callback:
893 /* Lock was granted by VFS. */
894 dprintk("lockd: GRANTing blocked lock.\n");
895 block->b_granted = 1;
896
897 /* keep block on the list, but don't reattempt until the RPC
898 * completes or the submission fails
899 */
900 nlmsvc_insert_block(block, NLM_NEVER);
901
902 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
903 * will queue up a new one if this one times out
904 */
905 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
906 &nlmsvc_grant_ops);
907
908 /* RPC submission failed, wait a bit and retry */
909 if (error < 0)
910 nlmsvc_insert_block(block, 10 * HZ);
911}
912
913/*
914 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
915 * RPC call has succeeded or timed out.
916 * Like all RPC callbacks, it is invoked by the rpciod process, so it
917 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
918 * chain once more in order to have it removed by lockd itself (which can
919 * then sleep on the file semaphore without disrupting e.g. the nfs client).
920 */
921static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
922{
923 struct nlm_rqst *call = data;
924 struct nlm_block *block = call->a_block;
925 unsigned long timeout;
926
927 dprintk("lockd: GRANT_MSG RPC callback\n");
928
929 spin_lock(&nlm_blocked_lock);
930 /* if the block is not on a list at this point then it has
931 * been invalidated. Don't try to requeue it.
932 *
933 * FIXME: it's possible that the block is removed from the list
934 * after this check but before the nlmsvc_insert_block. In that
935 * case it will be added back. Perhaps we need better locking
936 * for nlm_blocked?
937 */
938 if (list_empty(&block->b_list))
939 goto out;
940
941 /* Technically, we should down the file semaphore here. Since we
942 * move the block towards the head of the queue only, no harm
943 * can be done, though. */
944 if (task->tk_status < 0) {
945 /* RPC error: Re-insert for retransmission */
946 timeout = 10 * HZ;
947 } else {
948 /* Call was successful, now wait for client callback */
949 timeout = 60 * HZ;
950 }
951 nlmsvc_insert_block_locked(block, timeout);
952 svc_wake_up(block->b_daemon);
953out:
954 spin_unlock(&nlm_blocked_lock);
955}
956
957/*
958 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
959 * .rpc_release rpc_call_op
960 */
961static void nlmsvc_grant_release(void *data)
962{
963 struct nlm_rqst *call = data;
964 nlmsvc_release_block(call->a_block);
965}
966
967static const struct rpc_call_ops nlmsvc_grant_ops = {
968 .rpc_call_done = nlmsvc_grant_callback,
969 .rpc_release = nlmsvc_grant_release,
970};
971
972/*
973 * We received a GRANT_RES callback. Try to find the corresponding
974 * block.
975 */
976void
977nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
978{
979 struct nlm_block *block;
980 struct file_lock *fl;
981 int error;
982
983 dprintk("grant_reply: looking for cookie %x, s=%d \n",
984 *(unsigned int *)(cookie->data), status);
985 if (!(block = nlmsvc_find_block(cookie)))
986 return;
987
988 switch (status) {
989 case nlm_lck_denied_grace_period:
990 /* Try again in a couple of seconds */
991 nlmsvc_insert_block(block, 10 * HZ);
992 break;
993 case nlm_lck_denied:
994 /* Client doesn't want it, just unlock it */
995 nlmsvc_unlink_block(block);
996 fl = &block->b_call->a_args.lock.fl;
997 fl->c.flc_type = F_UNLCK;
998 error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
999 if (error)
1000 pr_warn("lockd: unable to unlock lock rejected by client!\n");
1001 break;
1002 default:
1003 /*
1004 * Either it was accepted or the status makes no sense
1005 * just unlink it either way.
1006 */
1007 nlmsvc_unlink_block(block);
1008 }
1009 nlmsvc_release_block(block);
1010}
1011
1012/* Helper function to handle retry of a deferred block.
1013 * If it is a blocking lock, call grant_blocked.
1014 * For a non-blocking lock or test lock, revisit the request.
1015 */
1016static void
1017retry_deferred_block(struct nlm_block *block)
1018{
1019 if (!(block->b_flags & B_GOT_CALLBACK))
1020 block->b_flags |= B_TIMED_OUT;
1021 nlmsvc_insert_block(block, NLM_TIMEOUT);
1022 dprintk("revisit block %p flags %d\n", block, block->b_flags);
1023 if (block->b_deferred_req) {
1024 block->b_deferred_req->revisit(block->b_deferred_req, 0);
1025 block->b_deferred_req = NULL;
1026 }
1027}
1028
1029/*
1030 * Retry all blocked locks that have been notified. This is where lockd
1031 * picks up locks that can be granted, or grant notifications that must
1032 * be retransmitted.
1033 */
1034void
1035nlmsvc_retry_blocked(struct svc_rqst *rqstp)
1036{
1037 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1038 struct nlm_block *block;
1039
1040 spin_lock(&nlm_blocked_lock);
1041 while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
1042 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1043
1044 if (block->b_when == NLM_NEVER)
1045 break;
1046 if (time_after(block->b_when, jiffies)) {
1047 timeout = block->b_when - jiffies;
1048 break;
1049 }
1050 spin_unlock(&nlm_blocked_lock);
1051
1052 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1053 block, block->b_when);
1054 if (block->b_flags & B_QUEUED) {
1055 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1056 block, block->b_granted, block->b_flags);
1057 retry_deferred_block(block);
1058 } else
1059 nlmsvc_grant_blocked(block);
1060 spin_lock(&nlm_blocked_lock);
1061 }
1062 spin_unlock(&nlm_blocked_lock);
1063
1064 if (timeout < MAX_SCHEDULE_TIMEOUT)
1065 mod_timer(&nlmsvc_retry, jiffies + timeout);
1066}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc_xprt.h>
31#include <linux/lockd/nlm.h>
32#include <linux/lockd/lockd.h>
33#include <linux/exportfs.h>
34
35#define NLMDBG_FACILITY NLMDBG_SVCLOCK
36
37#ifdef CONFIG_LOCKD_V4
38#define nlm_deadlock nlm4_deadlock
39#else
40#define nlm_deadlock nlm_lck_denied
41#endif
42
43static void nlmsvc_release_block(struct nlm_block *block);
44static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45static void nlmsvc_remove_block(struct nlm_block *block);
46
47static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49static const struct rpc_call_ops nlmsvc_grant_ops;
50
51/*
52 * The list of blocked locks to retry
53 */
54static LIST_HEAD(nlm_blocked);
55static DEFINE_SPINLOCK(nlm_blocked_lock);
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59{
60 /*
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
63 */
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
66 char *p = buf;
67
68 len--; /* allow for trailing \0 */
69 if (len < 3)
70 return "???";
71 for (i = 0 ; i < cookie->len ; i++) {
72 if (len < 2) {
73 strcpy(p-3, "...");
74 break;
75 }
76 sprintf(p, "%02x", cookie->data[i]);
77 p += 2;
78 len -= 2;
79 }
80 *p = '\0';
81
82 return buf;
83}
84#endif
85
86/*
87 * Insert a blocked lock into the global list
88 */
89static void
90nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91{
92 struct nlm_block *b;
93 struct list_head *pos;
94
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
98 } else {
99 list_del_init(&block->b_list);
100 }
101
102 pos = &nlm_blocked;
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
105 when ++;
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 break;
110 }
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
113 */
114 }
115
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
118}
119
120static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121{
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
125}
126
127/*
128 * Remove a block from the global list
129 */
130static inline void
131nlmsvc_remove_block(struct nlm_block *block)
132{
133 spin_lock(&nlm_blocked_lock);
134 if (!list_empty(&block->b_list)) {
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
138 return;
139 }
140 spin_unlock(&nlm_blocked_lock);
141}
142
143/*
144 * Find a block for a given lock
145 */
146static struct nlm_block *
147nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
148{
149 struct nlm_block *block;
150 struct file_lock *fl;
151
152 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
153 file, lock->fl.c.flc_pid,
154 (long long)lock->fl.fl_start,
155 (long long)lock->fl.fl_end,
156 lock->fl.c.flc_type);
157 spin_lock(&nlm_blocked_lock);
158 list_for_each_entry(block, &nlm_blocked, b_list) {
159 fl = &block->b_call->a_args.lock.fl;
160 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
161 block->b_file, fl->c.flc_pid,
162 (long long)fl->fl_start,
163 (long long)fl->fl_end, fl->c.flc_type,
164 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
165 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
166 kref_get(&block->b_count);
167 spin_unlock(&nlm_blocked_lock);
168 return block;
169 }
170 }
171 spin_unlock(&nlm_blocked_lock);
172
173 return NULL;
174}
175
176static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
177{
178 if (a->len != b->len)
179 return 0;
180 if (memcmp(a->data, b->data, a->len))
181 return 0;
182 return 1;
183}
184
185/*
186 * Find a block with a given NLM cookie.
187 */
188static inline struct nlm_block *
189nlmsvc_find_block(struct nlm_cookie *cookie)
190{
191 struct nlm_block *block;
192
193 spin_lock(&nlm_blocked_lock);
194 list_for_each_entry(block, &nlm_blocked, b_list) {
195 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
196 goto found;
197 }
198 spin_unlock(&nlm_blocked_lock);
199
200 return NULL;
201
202found:
203 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
204 kref_get(&block->b_count);
205 spin_unlock(&nlm_blocked_lock);
206 return block;
207}
208
209/*
210 * Create a block and initialize it.
211 *
212 * Note: we explicitly set the cookie of the grant reply to that of
213 * the blocked lock request. The spec explicitly mentions that the client
214 * should _not_ rely on the callback containing the same cookie as the
215 * request, but (as I found out later) that's because some implementations
216 * do just this. Never mind the standards comittees, they support our
217 * logging industries.
218 *
219 * 10 years later: I hope we can safely ignore these old and broken
220 * clients by now. Let's fix this so we can uniquely identify an incoming
221 * GRANTED_RES message by cookie, without having to rely on the client's IP
222 * address. --okir
223 */
224static struct nlm_block *
225nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
226 struct nlm_file *file, struct nlm_lock *lock,
227 struct nlm_cookie *cookie)
228{
229 struct nlm_block *block;
230 struct nlm_rqst *call = NULL;
231
232 call = nlm_alloc_call(host);
233 if (call == NULL)
234 return NULL;
235
236 /* Allocate memory for block, and initialize arguments */
237 block = kzalloc(sizeof(*block), GFP_KERNEL);
238 if (block == NULL)
239 goto failed;
240 kref_init(&block->b_count);
241 INIT_LIST_HEAD(&block->b_list);
242 INIT_LIST_HEAD(&block->b_flist);
243
244 if (!nlmsvc_setgrantargs(call, lock))
245 goto failed_free;
246
247 /* Set notifier function for VFS, and init args */
248 call->a_args.lock.fl.c.flc_flags |= FL_SLEEP;
249 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
250 nlmclnt_next_cookie(&call->a_args.cookie);
251
252 dprintk("lockd: created block %p...\n", block);
253
254 /* Create and initialize the block */
255 block->b_daemon = rqstp->rq_server;
256 block->b_host = host;
257 block->b_file = file;
258 file->f_count++;
259
260 /* Add to file's list of blocks */
261 list_add(&block->b_flist, &file->f_blocks);
262
263 /* Set up RPC arguments for callback */
264 block->b_call = call;
265 call->a_flags = RPC_TASK_ASYNC;
266 call->a_block = block;
267
268 return block;
269
270failed_free:
271 kfree(block);
272failed:
273 nlmsvc_release_call(call);
274 return NULL;
275}
276
277/*
278 * Delete a block.
279 * It is the caller's responsibility to check whether the file
280 * can be closed hereafter.
281 */
282static int nlmsvc_unlink_block(struct nlm_block *block)
283{
284 int status;
285 dprintk("lockd: unlinking block %p...\n", block);
286
287 /* Remove block from list */
288 status = locks_delete_block(&block->b_call->a_args.lock.fl);
289 nlmsvc_remove_block(block);
290 return status;
291}
292
293static void nlmsvc_free_block(struct kref *kref)
294{
295 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
296 struct nlm_file *file = block->b_file;
297
298 dprintk("lockd: freeing block %p...\n", block);
299
300 /* Remove block from file's list of blocks */
301 list_del_init(&block->b_flist);
302 mutex_unlock(&file->f_mutex);
303
304 nlmsvc_freegrantargs(block->b_call);
305 nlmsvc_release_call(block->b_call);
306 nlm_release_file(block->b_file);
307 kfree(block);
308}
309
310static void nlmsvc_release_block(struct nlm_block *block)
311{
312 if (block != NULL)
313 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
314}
315
316/*
317 * Loop over all blocks and delete blocks held by
318 * a matching host.
319 */
320void nlmsvc_traverse_blocks(struct nlm_host *host,
321 struct nlm_file *file,
322 nlm_host_match_fn_t match)
323{
324 struct nlm_block *block, *next;
325
326restart:
327 mutex_lock(&file->f_mutex);
328 spin_lock(&nlm_blocked_lock);
329 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
330 if (!match(block->b_host, host))
331 continue;
332 /* Do not destroy blocks that are not on
333 * the global retry list - why? */
334 if (list_empty(&block->b_list))
335 continue;
336 kref_get(&block->b_count);
337 spin_unlock(&nlm_blocked_lock);
338 mutex_unlock(&file->f_mutex);
339 nlmsvc_unlink_block(block);
340 nlmsvc_release_block(block);
341 goto restart;
342 }
343 spin_unlock(&nlm_blocked_lock);
344 mutex_unlock(&file->f_mutex);
345}
346
347static struct nlm_lockowner *
348nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
349{
350 refcount_inc(&lockowner->count);
351 return lockowner;
352}
353
354void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
355{
356 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
357 return;
358 list_del(&lockowner->list);
359 spin_unlock(&lockowner->host->h_lock);
360 nlmsvc_release_host(lockowner->host);
361 kfree(lockowner);
362}
363
364static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
365{
366 struct nlm_lockowner *lockowner;
367 list_for_each_entry(lockowner, &host->h_lockowners, list) {
368 if (lockowner->pid != pid)
369 continue;
370 return nlmsvc_get_lockowner(lockowner);
371 }
372 return NULL;
373}
374
375static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
376{
377 struct nlm_lockowner *res, *new = NULL;
378
379 spin_lock(&host->h_lock);
380 res = __nlmsvc_find_lockowner(host, pid);
381
382 if (res == NULL) {
383 spin_unlock(&host->h_lock);
384 new = kmalloc(sizeof(*res), GFP_KERNEL);
385 spin_lock(&host->h_lock);
386 res = __nlmsvc_find_lockowner(host, pid);
387 if (res == NULL && new != NULL) {
388 res = new;
389 /* fs/locks.c will manage the refcount through lock_ops */
390 refcount_set(&new->count, 1);
391 new->pid = pid;
392 new->host = nlm_get_host(host);
393 list_add(&new->list, &host->h_lockowners);
394 new = NULL;
395 }
396 }
397
398 spin_unlock(&host->h_lock);
399 kfree(new);
400 return res;
401}
402
403void
404nlmsvc_release_lockowner(struct nlm_lock *lock)
405{
406 if (lock->fl.c.flc_owner)
407 nlmsvc_put_lockowner(lock->fl.c.flc_owner);
408}
409
410void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
411 pid_t pid)
412{
413 fl->c.flc_owner = nlmsvc_find_lockowner(host, pid);
414}
415
416/*
417 * Initialize arguments for GRANTED call. The nlm_rqst structure
418 * has been cleared already.
419 */
420static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
421{
422 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
423 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
424 call->a_args.lock.caller = utsname()->nodename;
425 call->a_args.lock.oh.len = lock->oh.len;
426
427 /* set default data area */
428 call->a_args.lock.oh.data = call->a_owner;
429 call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid;
430
431 if (lock->oh.len > NLMCLNT_OHSIZE) {
432 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
433 if (!data)
434 return 0;
435 call->a_args.lock.oh.data = (u8 *) data;
436 }
437
438 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
439 return 1;
440}
441
442static void nlmsvc_freegrantargs(struct nlm_rqst *call)
443{
444 if (call->a_args.lock.oh.data != call->a_owner)
445 kfree(call->a_args.lock.oh.data);
446
447 locks_release_private(&call->a_args.lock.fl);
448}
449
450/*
451 * Deferred lock request handling for non-blocking lock
452 */
453static __be32
454nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
455{
456 __be32 status = nlm_lck_denied_nolocks;
457
458 block->b_flags |= B_QUEUED;
459
460 nlmsvc_insert_block(block, NLM_TIMEOUT);
461
462 block->b_cache_req = &rqstp->rq_chandle;
463 if (rqstp->rq_chandle.defer) {
464 block->b_deferred_req =
465 rqstp->rq_chandle.defer(block->b_cache_req);
466 if (block->b_deferred_req != NULL)
467 status = nlm_drop_reply;
468 }
469 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
470 block, block->b_flags, ntohl(status));
471
472 return status;
473}
474
475/*
476 * Attempt to establish a lock, and if it can't be granted, block it
477 * if required.
478 */
479__be32
480nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
481 struct nlm_host *host, struct nlm_lock *lock, int wait,
482 struct nlm_cookie *cookie, int reclaim)
483{
484 struct inode *inode = nlmsvc_file_inode(file);
485 struct nlm_block *block = NULL;
486 int error;
487 int mode;
488 int async_block = 0;
489 __be32 ret;
490
491 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
492 inode->i_sb->s_id, inode->i_ino,
493 lock->fl.c.flc_type,
494 lock->fl.c.flc_pid,
495 (long long)lock->fl.fl_start,
496 (long long)lock->fl.fl_end,
497 wait);
498
499 if (!exportfs_lock_op_is_async(inode->i_sb->s_export_op)) {
500 async_block = wait;
501 wait = 0;
502 }
503
504 /* Lock file against concurrent access */
505 mutex_lock(&file->f_mutex);
506 /* Get existing block (in case client is busy-waiting)
507 * or create new block
508 */
509 block = nlmsvc_lookup_block(file, lock);
510 if (block == NULL) {
511 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
512 ret = nlm_lck_denied_nolocks;
513 if (block == NULL)
514 goto out;
515 lock = &block->b_call->a_args.lock;
516 } else
517 lock->fl.c.flc_flags &= ~FL_SLEEP;
518
519 if (block->b_flags & B_QUEUED) {
520 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
521 block, block->b_flags);
522 if (block->b_granted) {
523 nlmsvc_unlink_block(block);
524 ret = nlm_granted;
525 goto out;
526 }
527 if (block->b_flags & B_TIMED_OUT) {
528 nlmsvc_unlink_block(block);
529 ret = nlm_lck_denied;
530 goto out;
531 }
532 ret = nlm_drop_reply;
533 goto out;
534 }
535
536 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
537 ret = nlm_lck_denied_grace_period;
538 goto out;
539 }
540 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
541 ret = nlm_lck_denied_grace_period;
542 goto out;
543 }
544
545 spin_lock(&nlm_blocked_lock);
546 /*
547 * If this is a lock request for an already pending
548 * lock request we return nlm_lck_blocked without calling
549 * vfs_lock_file() again. Otherwise we have two pending
550 * requests on the underlaying ->lock() implementation but
551 * only one nlm_block to being granted by lm_grant().
552 */
553 if (exportfs_lock_op_is_async(inode->i_sb->s_export_op) &&
554 !list_empty(&block->b_list)) {
555 spin_unlock(&nlm_blocked_lock);
556 ret = nlm_lck_blocked;
557 goto out;
558 }
559
560 /* Append to list of blocked */
561 nlmsvc_insert_block_locked(block, NLM_NEVER);
562 spin_unlock(&nlm_blocked_lock);
563
564 if (!wait)
565 lock->fl.c.flc_flags &= ~FL_SLEEP;
566 mode = lock_to_openmode(&lock->fl);
567 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
568 lock->fl.c.flc_flags &= ~FL_SLEEP;
569
570 dprintk("lockd: vfs_lock_file returned %d\n", error);
571 switch (error) {
572 case 0:
573 nlmsvc_remove_block(block);
574 ret = nlm_granted;
575 goto out;
576 case -EAGAIN:
577 if (!wait)
578 nlmsvc_remove_block(block);
579 ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
580 goto out;
581 case FILE_LOCK_DEFERRED:
582 if (wait)
583 break;
584 /* Filesystem lock operation is in progress
585 Add it to the queue waiting for callback */
586 ret = nlmsvc_defer_lock_rqst(rqstp, block);
587 goto out;
588 case -EDEADLK:
589 nlmsvc_remove_block(block);
590 ret = nlm_deadlock;
591 goto out;
592 default: /* includes ENOLCK */
593 nlmsvc_remove_block(block);
594 ret = nlm_lck_denied_nolocks;
595 goto out;
596 }
597
598 ret = nlm_lck_blocked;
599out:
600 mutex_unlock(&file->f_mutex);
601 nlmsvc_release_block(block);
602 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
603 return ret;
604}
605
606/*
607 * Test for presence of a conflicting lock.
608 */
609__be32
610nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
611 struct nlm_host *host, struct nlm_lock *lock,
612 struct nlm_lock *conflock, struct nlm_cookie *cookie)
613{
614 int error;
615 int mode;
616 __be32 ret;
617
618 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
619 nlmsvc_file_inode(file)->i_sb->s_id,
620 nlmsvc_file_inode(file)->i_ino,
621 lock->fl.c.flc_type,
622 (long long)lock->fl.fl_start,
623 (long long)lock->fl.fl_end);
624
625 if (locks_in_grace(SVC_NET(rqstp))) {
626 ret = nlm_lck_denied_grace_period;
627 goto out;
628 }
629
630 mode = lock_to_openmode(&lock->fl);
631 error = vfs_test_lock(file->f_file[mode], &lock->fl);
632 if (error) {
633 /* We can't currently deal with deferred test requests */
634 if (error == FILE_LOCK_DEFERRED)
635 WARN_ON_ONCE(1);
636
637 ret = nlm_lck_denied_nolocks;
638 goto out;
639 }
640
641 if (lock->fl.c.flc_type == F_UNLCK) {
642 ret = nlm_granted;
643 goto out;
644 }
645
646 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
647 lock->fl.c.flc_type, (long long)lock->fl.fl_start,
648 (long long)lock->fl.fl_end);
649 conflock->caller = "somehost"; /* FIXME */
650 conflock->len = strlen(conflock->caller);
651 conflock->oh.len = 0; /* don't return OH info */
652 conflock->svid = lock->fl.c.flc_pid;
653 conflock->fl.c.flc_type = lock->fl.c.flc_type;
654 conflock->fl.fl_start = lock->fl.fl_start;
655 conflock->fl.fl_end = lock->fl.fl_end;
656 locks_release_private(&lock->fl);
657
658 ret = nlm_lck_denied;
659out:
660 return ret;
661}
662
663/*
664 * Remove a lock.
665 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
666 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
667 * afterwards. In this case the block will still be there, and hence
668 * must be removed.
669 */
670__be32
671nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
672{
673 int error = 0;
674
675 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
676 nlmsvc_file_inode(file)->i_sb->s_id,
677 nlmsvc_file_inode(file)->i_ino,
678 lock->fl.c.flc_pid,
679 (long long)lock->fl.fl_start,
680 (long long)lock->fl.fl_end);
681
682 /* First, cancel any lock that might be there */
683 nlmsvc_cancel_blocked(net, file, lock);
684
685 lock->fl.c.flc_type = F_UNLCK;
686 lock->fl.c.flc_file = file->f_file[O_RDONLY];
687 if (lock->fl.c.flc_file)
688 error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
689 &lock->fl, NULL);
690 lock->fl.c.flc_file = file->f_file[O_WRONLY];
691 if (lock->fl.c.flc_file)
692 error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK,
693 &lock->fl, NULL);
694
695 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
696}
697
698/*
699 * Cancel a previously blocked request.
700 *
701 * A cancel request always overrides any grant that may currently
702 * be in progress.
703 * The calling procedure must check whether the file can be closed.
704 */
705__be32
706nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
707{
708 struct nlm_block *block;
709 int status = 0;
710 int mode;
711
712 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
713 nlmsvc_file_inode(file)->i_sb->s_id,
714 nlmsvc_file_inode(file)->i_ino,
715 lock->fl.c.flc_pid,
716 (long long)lock->fl.fl_start,
717 (long long)lock->fl.fl_end);
718
719 if (locks_in_grace(net))
720 return nlm_lck_denied_grace_period;
721
722 mutex_lock(&file->f_mutex);
723 block = nlmsvc_lookup_block(file, lock);
724 mutex_unlock(&file->f_mutex);
725 if (block != NULL) {
726 struct file_lock *fl = &block->b_call->a_args.lock.fl;
727
728 mode = lock_to_openmode(fl);
729 vfs_cancel_lock(block->b_file->f_file[mode], fl);
730 status = nlmsvc_unlink_block(block);
731 nlmsvc_release_block(block);
732 }
733 return status ? nlm_lck_denied : nlm_granted;
734}
735
736/*
737 * This is a callback from the filesystem for VFS file lock requests.
738 * It will be used if lm_grant is defined and the filesystem can not
739 * respond to the request immediately.
740 * For SETLK or SETLKW request it will get the local posix lock.
741 * In all cases it will move the block to the head of nlm_blocked q where
742 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
743 * deferred rpc for GETLK and SETLK.
744 */
745static void
746nlmsvc_update_deferred_block(struct nlm_block *block, int result)
747{
748 block->b_flags |= B_GOT_CALLBACK;
749 if (result == 0)
750 block->b_granted = 1;
751 else
752 block->b_flags |= B_TIMED_OUT;
753}
754
755static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
756{
757 struct nlm_block *block;
758 int rc = -ENOENT;
759
760 spin_lock(&nlm_blocked_lock);
761 list_for_each_entry(block, &nlm_blocked, b_list) {
762 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
763 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
764 block, block->b_flags);
765 if (block->b_flags & B_QUEUED) {
766 if (block->b_flags & B_TIMED_OUT) {
767 rc = -ENOLCK;
768 break;
769 }
770 nlmsvc_update_deferred_block(block, result);
771 } else if (result == 0)
772 block->b_granted = 1;
773
774 nlmsvc_insert_block_locked(block, 0);
775 svc_wake_up(block->b_daemon);
776 rc = 0;
777 break;
778 }
779 }
780 spin_unlock(&nlm_blocked_lock);
781 if (rc == -ENOENT)
782 printk(KERN_WARNING "lockd: grant for unknown block\n");
783 return rc;
784}
785
786/*
787 * Unblock a blocked lock request. This is a callback invoked from the
788 * VFS layer when a lock on which we blocked is removed.
789 *
790 * This function doesn't grant the blocked lock instantly, but rather moves
791 * the block to the head of nlm_blocked where it can be picked up by lockd.
792 */
793static void
794nlmsvc_notify_blocked(struct file_lock *fl)
795{
796 struct nlm_block *block;
797
798 dprintk("lockd: VFS unblock notification for block %p\n", fl);
799 spin_lock(&nlm_blocked_lock);
800 list_for_each_entry(block, &nlm_blocked, b_list) {
801 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
802 nlmsvc_insert_block_locked(block, 0);
803 spin_unlock(&nlm_blocked_lock);
804 svc_wake_up(block->b_daemon);
805 return;
806 }
807 }
808 spin_unlock(&nlm_blocked_lock);
809 printk(KERN_WARNING "lockd: notification for unknown block!\n");
810}
811
812static fl_owner_t nlmsvc_get_owner(fl_owner_t owner)
813{
814 return nlmsvc_get_lockowner(owner);
815}
816
817static void nlmsvc_put_owner(fl_owner_t owner)
818{
819 nlmsvc_put_lockowner(owner);
820}
821
822const struct lock_manager_operations nlmsvc_lock_operations = {
823 .lm_notify = nlmsvc_notify_blocked,
824 .lm_grant = nlmsvc_grant_deferred,
825 .lm_get_owner = nlmsvc_get_owner,
826 .lm_put_owner = nlmsvc_put_owner,
827};
828
829/*
830 * Try to claim a lock that was previously blocked.
831 *
832 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
833 * RPC thread when notifying the client. This seems like overkill...
834 * Here's why:
835 * - we don't want to use a synchronous RPC thread, otherwise
836 * we might find ourselves hanging on a dead portmapper.
837 * - Some lockd implementations (e.g. HP) don't react to
838 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
839 */
840static void
841nlmsvc_grant_blocked(struct nlm_block *block)
842{
843 struct nlm_file *file = block->b_file;
844 struct nlm_lock *lock = &block->b_call->a_args.lock;
845 int mode;
846 int error;
847 loff_t fl_start, fl_end;
848
849 dprintk("lockd: grant blocked lock %p\n", block);
850
851 kref_get(&block->b_count);
852
853 /* Unlink block request from list */
854 nlmsvc_unlink_block(block);
855
856 /* If b_granted is true this means we've been here before.
857 * Just retry the grant callback, possibly refreshing the RPC
858 * binding */
859 if (block->b_granted) {
860 nlm_rebind_host(block->b_host);
861 goto callback;
862 }
863
864 /* Try the lock operation again */
865 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
866 * them unchanged for the GRANT_MSG
867 */
868 lock->fl.c.flc_flags |= FL_SLEEP;
869 fl_start = lock->fl.fl_start;
870 fl_end = lock->fl.fl_end;
871 mode = lock_to_openmode(&lock->fl);
872 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL);
873 lock->fl.c.flc_flags &= ~FL_SLEEP;
874 lock->fl.fl_start = fl_start;
875 lock->fl.fl_end = fl_end;
876
877 switch (error) {
878 case 0:
879 break;
880 case FILE_LOCK_DEFERRED:
881 dprintk("lockd: lock still blocked error %d\n", error);
882 nlmsvc_insert_block(block, NLM_NEVER);
883 nlmsvc_release_block(block);
884 return;
885 default:
886 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
887 -error, __func__);
888 nlmsvc_insert_block(block, 10 * HZ);
889 nlmsvc_release_block(block);
890 return;
891 }
892
893callback:
894 /* Lock was granted by VFS. */
895 dprintk("lockd: GRANTing blocked lock.\n");
896 block->b_granted = 1;
897
898 /* keep block on the list, but don't reattempt until the RPC
899 * completes or the submission fails
900 */
901 nlmsvc_insert_block(block, NLM_NEVER);
902
903 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
904 * will queue up a new one if this one times out
905 */
906 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
907 &nlmsvc_grant_ops);
908
909 /* RPC submission failed, wait a bit and retry */
910 if (error < 0)
911 nlmsvc_insert_block(block, 10 * HZ);
912}
913
914/*
915 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
916 * RPC call has succeeded or timed out.
917 * Like all RPC callbacks, it is invoked by the rpciod process, so it
918 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
919 * chain once more in order to have it removed by lockd itself (which can
920 * then sleep on the file semaphore without disrupting e.g. the nfs client).
921 */
922static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
923{
924 struct nlm_rqst *call = data;
925 struct nlm_block *block = call->a_block;
926 unsigned long timeout;
927
928 dprintk("lockd: GRANT_MSG RPC callback\n");
929
930 spin_lock(&nlm_blocked_lock);
931 /* if the block is not on a list at this point then it has
932 * been invalidated. Don't try to requeue it.
933 *
934 * FIXME: it's possible that the block is removed from the list
935 * after this check but before the nlmsvc_insert_block. In that
936 * case it will be added back. Perhaps we need better locking
937 * for nlm_blocked?
938 */
939 if (list_empty(&block->b_list))
940 goto out;
941
942 /* Technically, we should down the file semaphore here. Since we
943 * move the block towards the head of the queue only, no harm
944 * can be done, though. */
945 if (task->tk_status < 0) {
946 /* RPC error: Re-insert for retransmission */
947 timeout = 10 * HZ;
948 } else {
949 /* Call was successful, now wait for client callback */
950 timeout = 60 * HZ;
951 }
952 nlmsvc_insert_block_locked(block, timeout);
953 svc_wake_up(block->b_daemon);
954out:
955 spin_unlock(&nlm_blocked_lock);
956}
957
958/*
959 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
960 * .rpc_release rpc_call_op
961 */
962static void nlmsvc_grant_release(void *data)
963{
964 struct nlm_rqst *call = data;
965 nlmsvc_release_block(call->a_block);
966}
967
968static const struct rpc_call_ops nlmsvc_grant_ops = {
969 .rpc_call_done = nlmsvc_grant_callback,
970 .rpc_release = nlmsvc_grant_release,
971};
972
973/*
974 * We received a GRANT_RES callback. Try to find the corresponding
975 * block.
976 */
977void
978nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
979{
980 struct nlm_block *block;
981 struct file_lock *fl;
982 int error;
983
984 dprintk("grant_reply: looking for cookie %x, s=%d \n",
985 *(unsigned int *)(cookie->data), status);
986 if (!(block = nlmsvc_find_block(cookie)))
987 return;
988
989 switch (status) {
990 case nlm_lck_denied_grace_period:
991 /* Try again in a couple of seconds */
992 nlmsvc_insert_block(block, 10 * HZ);
993 break;
994 case nlm_lck_denied:
995 /* Client doesn't want it, just unlock it */
996 nlmsvc_unlink_block(block);
997 fl = &block->b_call->a_args.lock.fl;
998 fl->c.flc_type = F_UNLCK;
999 error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL);
1000 if (error)
1001 pr_warn("lockd: unable to unlock lock rejected by client!\n");
1002 break;
1003 default:
1004 /*
1005 * Either it was accepted or the status makes no sense
1006 * just unlink it either way.
1007 */
1008 nlmsvc_unlink_block(block);
1009 }
1010 nlmsvc_release_block(block);
1011}
1012
1013/* Helper function to handle retry of a deferred block.
1014 * If it is a blocking lock, call grant_blocked.
1015 * For a non-blocking lock or test lock, revisit the request.
1016 */
1017static void
1018retry_deferred_block(struct nlm_block *block)
1019{
1020 if (!(block->b_flags & B_GOT_CALLBACK))
1021 block->b_flags |= B_TIMED_OUT;
1022 nlmsvc_insert_block(block, NLM_TIMEOUT);
1023 dprintk("revisit block %p flags %d\n", block, block->b_flags);
1024 if (block->b_deferred_req) {
1025 block->b_deferred_req->revisit(block->b_deferred_req, 0);
1026 block->b_deferred_req = NULL;
1027 }
1028}
1029
1030/*
1031 * Retry all blocked locks that have been notified. This is where lockd
1032 * picks up locks that can be granted, or grant notifications that must
1033 * be retransmitted.
1034 */
1035void
1036nlmsvc_retry_blocked(struct svc_rqst *rqstp)
1037{
1038 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1039 struct nlm_block *block;
1040
1041 spin_lock(&nlm_blocked_lock);
1042 while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
1043 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
1044
1045 if (block->b_when == NLM_NEVER)
1046 break;
1047 if (time_after(block->b_when, jiffies)) {
1048 timeout = block->b_when - jiffies;
1049 break;
1050 }
1051 spin_unlock(&nlm_blocked_lock);
1052
1053 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1054 block, block->b_when);
1055 if (block->b_flags & B_QUEUED) {
1056 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1057 block, block->b_granted, block->b_flags);
1058 retry_deferred_block(block);
1059 } else
1060 nlmsvc_grant_blocked(block);
1061 spin_lock(&nlm_blocked_lock);
1062 }
1063 spin_unlock(&nlm_blocked_lock);
1064
1065 if (timeout < MAX_SCHEDULE_TIMEOUT)
1066 mod_timer(&nlmsvc_retry, jiffies + timeout);
1067}