Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/lockd/svclock.c
4 *
5 * Handling of server-side locks, mostly of the blocked variety.
6 * This is the ugliest part of lockd because we tread on very thin ice.
7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
8 * IMNSHO introducing the grant callback into the NLM protocol was one
9 * of the worst ideas Sun ever had. Except maybe for the idea of doing
10 * NFS file locking at all.
11 *
12 * I'm trying hard to avoid race conditions by protecting most accesses
13 * to a file's list of blocked locks through a semaphore. The global
14 * list of blocked locks is not protected in this fashion however.
15 * Therefore, some functions (such as the RPC callback for the async grant
16 * call) move blocked locks towards the head of the list *while some other
17 * process might be traversing it*. This should not be a problem in
18 * practice, because this will only cause functions traversing the list
19 * to visit some blocks twice.
20 *
21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
22 */
23
24#include <linux/types.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc_xprt.h>
31#include <linux/lockd/nlm.h>
32#include <linux/lockd/lockd.h>
33#include <linux/kthread.h>
34
35#define NLMDBG_FACILITY NLMDBG_SVCLOCK
36
37#ifdef CONFIG_LOCKD_V4
38#define nlm_deadlock nlm4_deadlock
39#else
40#define nlm_deadlock nlm_lck_denied
41#endif
42
43static void nlmsvc_release_block(struct nlm_block *block);
44static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
45static void nlmsvc_remove_block(struct nlm_block *block);
46
47static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
48static void nlmsvc_freegrantargs(struct nlm_rqst *call);
49static const struct rpc_call_ops nlmsvc_grant_ops;
50
51/*
52 * The list of blocked locks to retry
53 */
54static LIST_HEAD(nlm_blocked);
55static DEFINE_SPINLOCK(nlm_blocked_lock);
56
57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
59{
60 /*
61 * We can get away with a static buffer because this is only called
62 * from lockd, which is single-threaded.
63 */
64 static char buf[2*NLM_MAXCOOKIELEN+1];
65 unsigned int i, len = sizeof(buf);
66 char *p = buf;
67
68 len--; /* allow for trailing \0 */
69 if (len < 3)
70 return "???";
71 for (i = 0 ; i < cookie->len ; i++) {
72 if (len < 2) {
73 strcpy(p-3, "...");
74 break;
75 }
76 sprintf(p, "%02x", cookie->data[i]);
77 p += 2;
78 len -= 2;
79 }
80 *p = '\0';
81
82 return buf;
83}
84#endif
85
86/*
87 * Insert a blocked lock into the global list
88 */
89static void
90nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
91{
92 struct nlm_block *b;
93 struct list_head *pos;
94
95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
96 if (list_empty(&block->b_list)) {
97 kref_get(&block->b_count);
98 } else {
99 list_del_init(&block->b_list);
100 }
101
102 pos = &nlm_blocked;
103 if (when != NLM_NEVER) {
104 if ((when += jiffies) == NLM_NEVER)
105 when ++;
106 list_for_each(pos, &nlm_blocked) {
107 b = list_entry(pos, struct nlm_block, b_list);
108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
109 break;
110 }
111 /* On normal exit from the loop, pos == &nlm_blocked,
112 * so we will be adding to the end of the list - good
113 */
114 }
115
116 list_add_tail(&block->b_list, pos);
117 block->b_when = when;
118}
119
120static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
121{
122 spin_lock(&nlm_blocked_lock);
123 nlmsvc_insert_block_locked(block, when);
124 spin_unlock(&nlm_blocked_lock);
125}
126
127/*
128 * Remove a block from the global list
129 */
130static inline void
131nlmsvc_remove_block(struct nlm_block *block)
132{
133 if (!list_empty(&block->b_list)) {
134 spin_lock(&nlm_blocked_lock);
135 list_del_init(&block->b_list);
136 spin_unlock(&nlm_blocked_lock);
137 nlmsvc_release_block(block);
138 }
139}
140
141/*
142 * Find a block for a given lock
143 */
144static struct nlm_block *
145nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
146{
147 struct nlm_block *block;
148 struct file_lock *fl;
149
150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
151 file, lock->fl.fl_pid,
152 (long long)lock->fl.fl_start,
153 (long long)lock->fl.fl_end, lock->fl.fl_type);
154 list_for_each_entry(block, &nlm_blocked, b_list) {
155 fl = &block->b_call->a_args.lock.fl;
156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
157 block->b_file, fl->fl_pid,
158 (long long)fl->fl_start,
159 (long long)fl->fl_end, fl->fl_type,
160 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
162 kref_get(&block->b_count);
163 return block;
164 }
165 }
166
167 return NULL;
168}
169
170static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
171{
172 if (a->len != b->len)
173 return 0;
174 if (memcmp(a->data, b->data, a->len))
175 return 0;
176 return 1;
177}
178
179/*
180 * Find a block with a given NLM cookie.
181 */
182static inline struct nlm_block *
183nlmsvc_find_block(struct nlm_cookie *cookie)
184{
185 struct nlm_block *block;
186
187 list_for_each_entry(block, &nlm_blocked, b_list) {
188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
189 goto found;
190 }
191
192 return NULL;
193
194found:
195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
196 kref_get(&block->b_count);
197 return block;
198}
199
200/*
201 * Create a block and initialize it.
202 *
203 * Note: we explicitly set the cookie of the grant reply to that of
204 * the blocked lock request. The spec explicitly mentions that the client
205 * should _not_ rely on the callback containing the same cookie as the
206 * request, but (as I found out later) that's because some implementations
207 * do just this. Never mind the standards comittees, they support our
208 * logging industries.
209 *
210 * 10 years later: I hope we can safely ignore these old and broken
211 * clients by now. Let's fix this so we can uniquely identify an incoming
212 * GRANTED_RES message by cookie, without having to rely on the client's IP
213 * address. --okir
214 */
215static struct nlm_block *
216nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
217 struct nlm_file *file, struct nlm_lock *lock,
218 struct nlm_cookie *cookie)
219{
220 struct nlm_block *block;
221 struct nlm_rqst *call = NULL;
222
223 call = nlm_alloc_call(host);
224 if (call == NULL)
225 return NULL;
226
227 /* Allocate memory for block, and initialize arguments */
228 block = kzalloc(sizeof(*block), GFP_KERNEL);
229 if (block == NULL)
230 goto failed;
231 kref_init(&block->b_count);
232 INIT_LIST_HEAD(&block->b_list);
233 INIT_LIST_HEAD(&block->b_flist);
234
235 if (!nlmsvc_setgrantargs(call, lock))
236 goto failed_free;
237
238 /* Set notifier function for VFS, and init args */
239 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
241 nlmclnt_next_cookie(&call->a_args.cookie);
242
243 dprintk("lockd: created block %p...\n", block);
244
245 /* Create and initialize the block */
246 block->b_daemon = rqstp->rq_server;
247 block->b_host = host;
248 block->b_file = file;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261failed_free:
262 kfree(block);
263failed:
264 nlmsvc_release_call(call);
265 return NULL;
266}
267
268/*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
273static int nlmsvc_unlink_block(struct nlm_block *block)
274{
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = locks_delete_block(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282}
283
284static void nlmsvc_free_block(struct kref *kref)
285{
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block);
299}
300
301static void nlmsvc_release_block(struct nlm_block *block)
302{
303 if (block != NULL)
304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
305}
306
307/*
308 * Loop over all blocks and delete blocks held by
309 * a matching host.
310 */
311void nlmsvc_traverse_blocks(struct nlm_host *host,
312 struct nlm_file *file,
313 nlm_host_match_fn_t match)
314{
315 struct nlm_block *block, *next;
316
317restart:
318 mutex_lock(&file->f_mutex);
319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
320 if (!match(block->b_host, host))
321 continue;
322 /* Do not destroy blocks that are not on
323 * the global retry list - why? */
324 if (list_empty(&block->b_list))
325 continue;
326 kref_get(&block->b_count);
327 mutex_unlock(&file->f_mutex);
328 nlmsvc_unlink_block(block);
329 nlmsvc_release_block(block);
330 goto restart;
331 }
332 mutex_unlock(&file->f_mutex);
333}
334
335static struct nlm_lockowner *
336nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
337{
338 refcount_inc(&lockowner->count);
339 return lockowner;
340}
341
342static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
343{
344 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
345 return;
346 list_del(&lockowner->list);
347 spin_unlock(&lockowner->host->h_lock);
348 nlmsvc_release_host(lockowner->host);
349 kfree(lockowner);
350}
351
352static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
353{
354 struct nlm_lockowner *lockowner;
355 list_for_each_entry(lockowner, &host->h_lockowners, list) {
356 if (lockowner->pid != pid)
357 continue;
358 return nlmsvc_get_lockowner(lockowner);
359 }
360 return NULL;
361}
362
363static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
364{
365 struct nlm_lockowner *res, *new = NULL;
366
367 spin_lock(&host->h_lock);
368 res = __nlmsvc_find_lockowner(host, pid);
369
370 if (res == NULL) {
371 spin_unlock(&host->h_lock);
372 new = kmalloc(sizeof(*res), GFP_KERNEL);
373 spin_lock(&host->h_lock);
374 res = __nlmsvc_find_lockowner(host, pid);
375 if (res == NULL && new != NULL) {
376 res = new;
377 /* fs/locks.c will manage the refcount through lock_ops */
378 refcount_set(&new->count, 1);
379 new->pid = pid;
380 new->host = nlm_get_host(host);
381 list_add(&new->list, &host->h_lockowners);
382 new = NULL;
383 }
384 }
385
386 spin_unlock(&host->h_lock);
387 kfree(new);
388 return res;
389}
390
391void
392nlmsvc_release_lockowner(struct nlm_lock *lock)
393{
394 if (lock->fl.fl_owner)
395 nlmsvc_put_lockowner(lock->fl.fl_owner);
396}
397
398static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
399{
400 struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
401 new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
402}
403
404static void nlmsvc_locks_release_private(struct file_lock *fl)
405{
406 nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
407}
408
409static const struct file_lock_operations nlmsvc_lock_ops = {
410 .fl_copy_lock = nlmsvc_locks_copy_lock,
411 .fl_release_private = nlmsvc_locks_release_private,
412};
413
414void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
415 pid_t pid)
416{
417 fl->fl_owner = nlmsvc_find_lockowner(host, pid);
418 if (fl->fl_owner != NULL)
419 fl->fl_ops = &nlmsvc_lock_ops;
420}
421
422/*
423 * Initialize arguments for GRANTED call. The nlm_rqst structure
424 * has been cleared already.
425 */
426static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
427{
428 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
429 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
430 call->a_args.lock.caller = utsname()->nodename;
431 call->a_args.lock.oh.len = lock->oh.len;
432
433 /* set default data area */
434 call->a_args.lock.oh.data = call->a_owner;
435 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
436
437 if (lock->oh.len > NLMCLNT_OHSIZE) {
438 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
439 if (!data)
440 return 0;
441 call->a_args.lock.oh.data = (u8 *) data;
442 }
443
444 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
445 return 1;
446}
447
448static void nlmsvc_freegrantargs(struct nlm_rqst *call)
449{
450 if (call->a_args.lock.oh.data != call->a_owner)
451 kfree(call->a_args.lock.oh.data);
452
453 locks_release_private(&call->a_args.lock.fl);
454}
455
456/*
457 * Deferred lock request handling for non-blocking lock
458 */
459static __be32
460nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
461{
462 __be32 status = nlm_lck_denied_nolocks;
463
464 block->b_flags |= B_QUEUED;
465
466 nlmsvc_insert_block(block, NLM_TIMEOUT);
467
468 block->b_cache_req = &rqstp->rq_chandle;
469 if (rqstp->rq_chandle.defer) {
470 block->b_deferred_req =
471 rqstp->rq_chandle.defer(block->b_cache_req);
472 if (block->b_deferred_req != NULL)
473 status = nlm_drop_reply;
474 }
475 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
476 block, block->b_flags, ntohl(status));
477
478 return status;
479}
480
481/*
482 * Attempt to establish a lock, and if it can't be granted, block it
483 * if required.
484 */
485__be32
486nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
487 struct nlm_host *host, struct nlm_lock *lock, int wait,
488 struct nlm_cookie *cookie, int reclaim)
489{
490 struct nlm_block *block = NULL;
491 int error;
492 __be32 ret;
493
494 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
495 locks_inode(file->f_file)->i_sb->s_id,
496 locks_inode(file->f_file)->i_ino,
497 lock->fl.fl_type, lock->fl.fl_pid,
498 (long long)lock->fl.fl_start,
499 (long long)lock->fl.fl_end,
500 wait);
501
502 /* Lock file against concurrent access */
503 mutex_lock(&file->f_mutex);
504 /* Get existing block (in case client is busy-waiting)
505 * or create new block
506 */
507 block = nlmsvc_lookup_block(file, lock);
508 if (block == NULL) {
509 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
510 ret = nlm_lck_denied_nolocks;
511 if (block == NULL)
512 goto out;
513 lock = &block->b_call->a_args.lock;
514 } else
515 lock->fl.fl_flags &= ~FL_SLEEP;
516
517 if (block->b_flags & B_QUEUED) {
518 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
519 block, block->b_flags);
520 if (block->b_granted) {
521 nlmsvc_unlink_block(block);
522 ret = nlm_granted;
523 goto out;
524 }
525 if (block->b_flags & B_TIMED_OUT) {
526 nlmsvc_unlink_block(block);
527 ret = nlm_lck_denied;
528 goto out;
529 }
530 ret = nlm_drop_reply;
531 goto out;
532 }
533
534 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
535 ret = nlm_lck_denied_grace_period;
536 goto out;
537 }
538 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
539 ret = nlm_lck_denied_grace_period;
540 goto out;
541 }
542
543 if (!wait)
544 lock->fl.fl_flags &= ~FL_SLEEP;
545 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
546 lock->fl.fl_flags &= ~FL_SLEEP;
547
548 dprintk("lockd: vfs_lock_file returned %d\n", error);
549 switch (error) {
550 case 0:
551 ret = nlm_granted;
552 goto out;
553 case -EAGAIN:
554 /*
555 * If this is a blocking request for an
556 * already pending lock request then we need
557 * to put it back on lockd's block list
558 */
559 if (wait)
560 break;
561 ret = nlm_lck_denied;
562 goto out;
563 case FILE_LOCK_DEFERRED:
564 if (wait)
565 break;
566 /* Filesystem lock operation is in progress
567 Add it to the queue waiting for callback */
568 ret = nlmsvc_defer_lock_rqst(rqstp, block);
569 goto out;
570 case -EDEADLK:
571 ret = nlm_deadlock;
572 goto out;
573 default: /* includes ENOLCK */
574 ret = nlm_lck_denied_nolocks;
575 goto out;
576 }
577
578 ret = nlm_lck_blocked;
579
580 /* Append to list of blocked */
581 nlmsvc_insert_block(block, NLM_NEVER);
582out:
583 mutex_unlock(&file->f_mutex);
584 nlmsvc_release_block(block);
585 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
586 return ret;
587}
588
589/*
590 * Test for presence of a conflicting lock.
591 */
592__be32
593nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
594 struct nlm_host *host, struct nlm_lock *lock,
595 struct nlm_lock *conflock, struct nlm_cookie *cookie)
596{
597 int error;
598 __be32 ret;
599 struct nlm_lockowner *test_owner;
600
601 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
602 locks_inode(file->f_file)->i_sb->s_id,
603 locks_inode(file->f_file)->i_ino,
604 lock->fl.fl_type,
605 (long long)lock->fl.fl_start,
606 (long long)lock->fl.fl_end);
607
608 if (locks_in_grace(SVC_NET(rqstp))) {
609 ret = nlm_lck_denied_grace_period;
610 goto out;
611 }
612
613 /* If there's a conflicting lock, remember to clean up the test lock */
614 test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
615
616 error = vfs_test_lock(file->f_file, &lock->fl);
617 if (error) {
618 /* We can't currently deal with deferred test requests */
619 if (error == FILE_LOCK_DEFERRED)
620 WARN_ON_ONCE(1);
621
622 ret = nlm_lck_denied_nolocks;
623 goto out;
624 }
625
626 if (lock->fl.fl_type == F_UNLCK) {
627 ret = nlm_granted;
628 goto out;
629 }
630
631 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
632 lock->fl.fl_type, (long long)lock->fl.fl_start,
633 (long long)lock->fl.fl_end);
634 conflock->caller = "somehost"; /* FIXME */
635 conflock->len = strlen(conflock->caller);
636 conflock->oh.len = 0; /* don't return OH info */
637 conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
638 conflock->fl.fl_type = lock->fl.fl_type;
639 conflock->fl.fl_start = lock->fl.fl_start;
640 conflock->fl.fl_end = lock->fl.fl_end;
641 locks_release_private(&lock->fl);
642
643 /* Clean up the test lock */
644 lock->fl.fl_owner = NULL;
645 nlmsvc_put_lockowner(test_owner);
646
647 ret = nlm_lck_denied;
648out:
649 return ret;
650}
651
652/*
653 * Remove a lock.
654 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
655 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
656 * afterwards. In this case the block will still be there, and hence
657 * must be removed.
658 */
659__be32
660nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
661{
662 int error;
663
664 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
665 locks_inode(file->f_file)->i_sb->s_id,
666 locks_inode(file->f_file)->i_ino,
667 lock->fl.fl_pid,
668 (long long)lock->fl.fl_start,
669 (long long)lock->fl.fl_end);
670
671 /* First, cancel any lock that might be there */
672 nlmsvc_cancel_blocked(net, file, lock);
673
674 lock->fl.fl_type = F_UNLCK;
675 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
676
677 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
678}
679
680/*
681 * Cancel a previously blocked request.
682 *
683 * A cancel request always overrides any grant that may currently
684 * be in progress.
685 * The calling procedure must check whether the file can be closed.
686 */
687__be32
688nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
689{
690 struct nlm_block *block;
691 int status = 0;
692
693 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
694 locks_inode(file->f_file)->i_sb->s_id,
695 locks_inode(file->f_file)->i_ino,
696 lock->fl.fl_pid,
697 (long long)lock->fl.fl_start,
698 (long long)lock->fl.fl_end);
699
700 if (locks_in_grace(net))
701 return nlm_lck_denied_grace_period;
702
703 mutex_lock(&file->f_mutex);
704 block = nlmsvc_lookup_block(file, lock);
705 mutex_unlock(&file->f_mutex);
706 if (block != NULL) {
707 vfs_cancel_lock(block->b_file->f_file,
708 &block->b_call->a_args.lock.fl);
709 status = nlmsvc_unlink_block(block);
710 nlmsvc_release_block(block);
711 }
712 return status ? nlm_lck_denied : nlm_granted;
713}
714
715/*
716 * This is a callback from the filesystem for VFS file lock requests.
717 * It will be used if lm_grant is defined and the filesystem can not
718 * respond to the request immediately.
719 * For SETLK or SETLKW request it will get the local posix lock.
720 * In all cases it will move the block to the head of nlm_blocked q where
721 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
722 * deferred rpc for GETLK and SETLK.
723 */
724static void
725nlmsvc_update_deferred_block(struct nlm_block *block, int result)
726{
727 block->b_flags |= B_GOT_CALLBACK;
728 if (result == 0)
729 block->b_granted = 1;
730 else
731 block->b_flags |= B_TIMED_OUT;
732}
733
734static int nlmsvc_grant_deferred(struct file_lock *fl, int result)
735{
736 struct nlm_block *block;
737 int rc = -ENOENT;
738
739 spin_lock(&nlm_blocked_lock);
740 list_for_each_entry(block, &nlm_blocked, b_list) {
741 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
742 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
743 block, block->b_flags);
744 if (block->b_flags & B_QUEUED) {
745 if (block->b_flags & B_TIMED_OUT) {
746 rc = -ENOLCK;
747 break;
748 }
749 nlmsvc_update_deferred_block(block, result);
750 } else if (result == 0)
751 block->b_granted = 1;
752
753 nlmsvc_insert_block_locked(block, 0);
754 svc_wake_up(block->b_daemon);
755 rc = 0;
756 break;
757 }
758 }
759 spin_unlock(&nlm_blocked_lock);
760 if (rc == -ENOENT)
761 printk(KERN_WARNING "lockd: grant for unknown block\n");
762 return rc;
763}
764
765/*
766 * Unblock a blocked lock request. This is a callback invoked from the
767 * VFS layer when a lock on which we blocked is removed.
768 *
769 * This function doesn't grant the blocked lock instantly, but rather moves
770 * the block to the head of nlm_blocked where it can be picked up by lockd.
771 */
772static void
773nlmsvc_notify_blocked(struct file_lock *fl)
774{
775 struct nlm_block *block;
776
777 dprintk("lockd: VFS unblock notification for block %p\n", fl);
778 spin_lock(&nlm_blocked_lock);
779 list_for_each_entry(block, &nlm_blocked, b_list) {
780 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
781 nlmsvc_insert_block_locked(block, 0);
782 spin_unlock(&nlm_blocked_lock);
783 svc_wake_up(block->b_daemon);
784 return;
785 }
786 }
787 spin_unlock(&nlm_blocked_lock);
788 printk(KERN_WARNING "lockd: notification for unknown block!\n");
789}
790
791const struct lock_manager_operations nlmsvc_lock_operations = {
792 .lm_notify = nlmsvc_notify_blocked,
793 .lm_grant = nlmsvc_grant_deferred,
794};
795
796/*
797 * Try to claim a lock that was previously blocked.
798 *
799 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
800 * RPC thread when notifying the client. This seems like overkill...
801 * Here's why:
802 * - we don't want to use a synchronous RPC thread, otherwise
803 * we might find ourselves hanging on a dead portmapper.
804 * - Some lockd implementations (e.g. HP) don't react to
805 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
806 */
807static void
808nlmsvc_grant_blocked(struct nlm_block *block)
809{
810 struct nlm_file *file = block->b_file;
811 struct nlm_lock *lock = &block->b_call->a_args.lock;
812 int error;
813 loff_t fl_start, fl_end;
814
815 dprintk("lockd: grant blocked lock %p\n", block);
816
817 kref_get(&block->b_count);
818
819 /* Unlink block request from list */
820 nlmsvc_unlink_block(block);
821
822 /* If b_granted is true this means we've been here before.
823 * Just retry the grant callback, possibly refreshing the RPC
824 * binding */
825 if (block->b_granted) {
826 nlm_rebind_host(block->b_host);
827 goto callback;
828 }
829
830 /* Try the lock operation again */
831 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
832 * them unchanged for the GRANT_MSG
833 */
834 lock->fl.fl_flags |= FL_SLEEP;
835 fl_start = lock->fl.fl_start;
836 fl_end = lock->fl.fl_end;
837 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
838 lock->fl.fl_flags &= ~FL_SLEEP;
839 lock->fl.fl_start = fl_start;
840 lock->fl.fl_end = fl_end;
841
842 switch (error) {
843 case 0:
844 break;
845 case FILE_LOCK_DEFERRED:
846 dprintk("lockd: lock still blocked error %d\n", error);
847 nlmsvc_insert_block(block, NLM_NEVER);
848 nlmsvc_release_block(block);
849 return;
850 default:
851 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
852 -error, __func__);
853 nlmsvc_insert_block(block, 10 * HZ);
854 nlmsvc_release_block(block);
855 return;
856 }
857
858callback:
859 /* Lock was granted by VFS. */
860 dprintk("lockd: GRANTing blocked lock.\n");
861 block->b_granted = 1;
862
863 /* keep block on the list, but don't reattempt until the RPC
864 * completes or the submission fails
865 */
866 nlmsvc_insert_block(block, NLM_NEVER);
867
868 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
869 * will queue up a new one if this one times out
870 */
871 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
872 &nlmsvc_grant_ops);
873
874 /* RPC submission failed, wait a bit and retry */
875 if (error < 0)
876 nlmsvc_insert_block(block, 10 * HZ);
877}
878
879/*
880 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
881 * RPC call has succeeded or timed out.
882 * Like all RPC callbacks, it is invoked by the rpciod process, so it
883 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
884 * chain once more in order to have it removed by lockd itself (which can
885 * then sleep on the file semaphore without disrupting e.g. the nfs client).
886 */
887static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
888{
889 struct nlm_rqst *call = data;
890 struct nlm_block *block = call->a_block;
891 unsigned long timeout;
892
893 dprintk("lockd: GRANT_MSG RPC callback\n");
894
895 spin_lock(&nlm_blocked_lock);
896 /* if the block is not on a list at this point then it has
897 * been invalidated. Don't try to requeue it.
898 *
899 * FIXME: it's possible that the block is removed from the list
900 * after this check but before the nlmsvc_insert_block. In that
901 * case it will be added back. Perhaps we need better locking
902 * for nlm_blocked?
903 */
904 if (list_empty(&block->b_list))
905 goto out;
906
907 /* Technically, we should down the file semaphore here. Since we
908 * move the block towards the head of the queue only, no harm
909 * can be done, though. */
910 if (task->tk_status < 0) {
911 /* RPC error: Re-insert for retransmission */
912 timeout = 10 * HZ;
913 } else {
914 /* Call was successful, now wait for client callback */
915 timeout = 60 * HZ;
916 }
917 nlmsvc_insert_block_locked(block, timeout);
918 svc_wake_up(block->b_daemon);
919out:
920 spin_unlock(&nlm_blocked_lock);
921}
922
923/*
924 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
925 * .rpc_release rpc_call_op
926 */
927static void nlmsvc_grant_release(void *data)
928{
929 struct nlm_rqst *call = data;
930 nlmsvc_release_block(call->a_block);
931}
932
933static const struct rpc_call_ops nlmsvc_grant_ops = {
934 .rpc_call_done = nlmsvc_grant_callback,
935 .rpc_release = nlmsvc_grant_release,
936};
937
938/*
939 * We received a GRANT_RES callback. Try to find the corresponding
940 * block.
941 */
942void
943nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
944{
945 struct nlm_block *block;
946
947 dprintk("grant_reply: looking for cookie %x, s=%d \n",
948 *(unsigned int *)(cookie->data), status);
949 if (!(block = nlmsvc_find_block(cookie)))
950 return;
951
952 if (status == nlm_lck_denied_grace_period) {
953 /* Try again in a couple of seconds */
954 nlmsvc_insert_block(block, 10 * HZ);
955 } else {
956 /*
957 * Lock is now held by client, or has been rejected.
958 * In both cases, the block should be removed.
959 */
960 nlmsvc_unlink_block(block);
961 }
962 nlmsvc_release_block(block);
963}
964
965/* Helper function to handle retry of a deferred block.
966 * If it is a blocking lock, call grant_blocked.
967 * For a non-blocking lock or test lock, revisit the request.
968 */
969static void
970retry_deferred_block(struct nlm_block *block)
971{
972 if (!(block->b_flags & B_GOT_CALLBACK))
973 block->b_flags |= B_TIMED_OUT;
974 nlmsvc_insert_block(block, NLM_TIMEOUT);
975 dprintk("revisit block %p flags %d\n", block, block->b_flags);
976 if (block->b_deferred_req) {
977 block->b_deferred_req->revisit(block->b_deferred_req, 0);
978 block->b_deferred_req = NULL;
979 }
980}
981
982/*
983 * Retry all blocked locks that have been notified. This is where lockd
984 * picks up locks that can be granted, or grant notifications that must
985 * be retransmitted.
986 */
987unsigned long
988nlmsvc_retry_blocked(void)
989{
990 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
991 struct nlm_block *block;
992
993 spin_lock(&nlm_blocked_lock);
994 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
995 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
996
997 if (block->b_when == NLM_NEVER)
998 break;
999 if (time_after(block->b_when, jiffies)) {
1000 timeout = block->b_when - jiffies;
1001 break;
1002 }
1003 spin_unlock(&nlm_blocked_lock);
1004
1005 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
1006 block, block->b_when);
1007 if (block->b_flags & B_QUEUED) {
1008 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
1009 block, block->b_granted, block->b_flags);
1010 retry_deferred_block(block);
1011 } else
1012 nlmsvc_grant_blocked(block);
1013 spin_lock(&nlm_blocked_lock);
1014 }
1015 spin_unlock(&nlm_blocked_lock);
1016
1017 return timeout;
1018}
1/*
2 * linux/fs/lockd/svclock.c
3 *
4 * Handling of server-side locks, mostly of the blocked variety.
5 * This is the ugliest part of lockd because we tread on very thin ice.
6 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
7 * IMNSHO introducing the grant callback into the NLM protocol was one
8 * of the worst ideas Sun ever had. Except maybe for the idea of doing
9 * NFS file locking at all.
10 *
11 * I'm trying hard to avoid race conditions by protecting most accesses
12 * to a file's list of blocked locks through a semaphore. The global
13 * list of blocked locks is not protected in this fashion however.
14 * Therefore, some functions (such as the RPC callback for the async grant
15 * call) move blocked locks towards the head of the list *while some other
16 * process might be traversing it*. This should not be a problem in
17 * practice, because this will only cause functions traversing the list
18 * to visit some blocks twice.
19 *
20 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
21 */
22
23#include <linux/types.h>
24#include <linux/slab.h>
25#include <linux/errno.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/sunrpc/clnt.h>
29#include <linux/sunrpc/svc_xprt.h>
30#include <linux/lockd/nlm.h>
31#include <linux/lockd/lockd.h>
32#include <linux/kthread.h>
33
34#define NLMDBG_FACILITY NLMDBG_SVCLOCK
35
36#ifdef CONFIG_LOCKD_V4
37#define nlm_deadlock nlm4_deadlock
38#else
39#define nlm_deadlock nlm_lck_denied
40#endif
41
42static void nlmsvc_release_block(struct nlm_block *block);
43static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
44static void nlmsvc_remove_block(struct nlm_block *block);
45
46static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48static const struct rpc_call_ops nlmsvc_grant_ops;
49
50/*
51 * The list of blocked locks to retry
52 */
53static LIST_HEAD(nlm_blocked);
54static DEFINE_SPINLOCK(nlm_blocked_lock);
55
56#ifdef LOCKD_DEBUG
57static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
58{
59 /*
60 * We can get away with a static buffer because we're only
61 * called with BKL held.
62 */
63 static char buf[2*NLM_MAXCOOKIELEN+1];
64 unsigned int i, len = sizeof(buf);
65 char *p = buf;
66
67 len--; /* allow for trailing \0 */
68 if (len < 3)
69 return "???";
70 for (i = 0 ; i < cookie->len ; i++) {
71 if (len < 2) {
72 strcpy(p-3, "...");
73 break;
74 }
75 sprintf(p, "%02x", cookie->data[i]);
76 p += 2;
77 len -= 2;
78 }
79 *p = '\0';
80
81 return buf;
82}
83#endif
84
85/*
86 * Insert a blocked lock into the global list
87 */
88static void
89nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
90{
91 struct nlm_block *b;
92 struct list_head *pos;
93
94 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
95 if (list_empty(&block->b_list)) {
96 kref_get(&block->b_count);
97 } else {
98 list_del_init(&block->b_list);
99 }
100
101 pos = &nlm_blocked;
102 if (when != NLM_NEVER) {
103 if ((when += jiffies) == NLM_NEVER)
104 when ++;
105 list_for_each(pos, &nlm_blocked) {
106 b = list_entry(pos, struct nlm_block, b_list);
107 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
108 break;
109 }
110 /* On normal exit from the loop, pos == &nlm_blocked,
111 * so we will be adding to the end of the list - good
112 */
113 }
114
115 list_add_tail(&block->b_list, pos);
116 block->b_when = when;
117}
118
119static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
120{
121 spin_lock(&nlm_blocked_lock);
122 nlmsvc_insert_block_locked(block, when);
123 spin_unlock(&nlm_blocked_lock);
124}
125
126/*
127 * Remove a block from the global list
128 */
129static inline void
130nlmsvc_remove_block(struct nlm_block *block)
131{
132 if (!list_empty(&block->b_list)) {
133 spin_lock(&nlm_blocked_lock);
134 list_del_init(&block->b_list);
135 spin_unlock(&nlm_blocked_lock);
136 nlmsvc_release_block(block);
137 }
138}
139
140/*
141 * Find a block for a given lock
142 */
143static struct nlm_block *
144nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
145{
146 struct nlm_block *block;
147 struct file_lock *fl;
148
149 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
150 file, lock->fl.fl_pid,
151 (long long)lock->fl.fl_start,
152 (long long)lock->fl.fl_end, lock->fl.fl_type);
153 list_for_each_entry(block, &nlm_blocked, b_list) {
154 fl = &block->b_call->a_args.lock.fl;
155 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
156 block->b_file, fl->fl_pid,
157 (long long)fl->fl_start,
158 (long long)fl->fl_end, fl->fl_type,
159 nlmdbg_cookie2a(&block->b_call->a_args.cookie));
160 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
161 kref_get(&block->b_count);
162 return block;
163 }
164 }
165
166 return NULL;
167}
168
169static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
170{
171 if (a->len != b->len)
172 return 0;
173 if (memcmp(a->data, b->data, a->len))
174 return 0;
175 return 1;
176}
177
178/*
179 * Find a block with a given NLM cookie.
180 */
181static inline struct nlm_block *
182nlmsvc_find_block(struct nlm_cookie *cookie)
183{
184 struct nlm_block *block;
185
186 list_for_each_entry(block, &nlm_blocked, b_list) {
187 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
188 goto found;
189 }
190
191 return NULL;
192
193found:
194 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
195 kref_get(&block->b_count);
196 return block;
197}
198
199/*
200 * Create a block and initialize it.
201 *
202 * Note: we explicitly set the cookie of the grant reply to that of
203 * the blocked lock request. The spec explicitly mentions that the client
204 * should _not_ rely on the callback containing the same cookie as the
205 * request, but (as I found out later) that's because some implementations
206 * do just this. Never mind the standards comittees, they support our
207 * logging industries.
208 *
209 * 10 years later: I hope we can safely ignore these old and broken
210 * clients by now. Let's fix this so we can uniquely identify an incoming
211 * GRANTED_RES message by cookie, without having to rely on the client's IP
212 * address. --okir
213 */
214static struct nlm_block *
215nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
216 struct nlm_file *file, struct nlm_lock *lock,
217 struct nlm_cookie *cookie)
218{
219 struct nlm_block *block;
220 struct nlm_rqst *call = NULL;
221
222 call = nlm_alloc_call(host);
223 if (call == NULL)
224 return NULL;
225
226 /* Allocate memory for block, and initialize arguments */
227 block = kzalloc(sizeof(*block), GFP_KERNEL);
228 if (block == NULL)
229 goto failed;
230 kref_init(&block->b_count);
231 INIT_LIST_HEAD(&block->b_list);
232 INIT_LIST_HEAD(&block->b_flist);
233
234 if (!nlmsvc_setgrantargs(call, lock))
235 goto failed_free;
236
237 /* Set notifier function for VFS, and init args */
238 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
239 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
240 nlmclnt_next_cookie(&call->a_args.cookie);
241
242 dprintk("lockd: created block %p...\n", block);
243
244 /* Create and initialize the block */
245 block->b_daemon = rqstp->rq_server;
246 block->b_host = host;
247 block->b_file = file;
248 block->b_fl = NULL;
249 file->f_count++;
250
251 /* Add to file's list of blocks */
252 list_add(&block->b_flist, &file->f_blocks);
253
254 /* Set up RPC arguments for callback */
255 block->b_call = call;
256 call->a_flags = RPC_TASK_ASYNC;
257 call->a_block = block;
258
259 return block;
260
261failed_free:
262 kfree(block);
263failed:
264 nlmsvc_release_call(call);
265 return NULL;
266}
267
268/*
269 * Delete a block.
270 * It is the caller's responsibility to check whether the file
271 * can be closed hereafter.
272 */
273static int nlmsvc_unlink_block(struct nlm_block *block)
274{
275 int status;
276 dprintk("lockd: unlinking block %p...\n", block);
277
278 /* Remove block from list */
279 status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
280 nlmsvc_remove_block(block);
281 return status;
282}
283
284static void nlmsvc_free_block(struct kref *kref)
285{
286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
287 struct nlm_file *file = block->b_file;
288
289 dprintk("lockd: freeing block %p...\n", block);
290
291 /* Remove block from file's list of blocks */
292 list_del_init(&block->b_flist);
293 mutex_unlock(&file->f_mutex);
294
295 nlmsvc_freegrantargs(block->b_call);
296 nlmsvc_release_call(block->b_call);
297 nlm_release_file(block->b_file);
298 kfree(block->b_fl);
299 kfree(block);
300}
301
302static void nlmsvc_release_block(struct nlm_block *block)
303{
304 if (block != NULL)
305 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
306}
307
308/*
309 * Loop over all blocks and delete blocks held by
310 * a matching host.
311 */
312void nlmsvc_traverse_blocks(struct nlm_host *host,
313 struct nlm_file *file,
314 nlm_host_match_fn_t match)
315{
316 struct nlm_block *block, *next;
317
318restart:
319 mutex_lock(&file->f_mutex);
320 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
321 if (!match(block->b_host, host))
322 continue;
323 /* Do not destroy blocks that are not on
324 * the global retry list - why? */
325 if (list_empty(&block->b_list))
326 continue;
327 kref_get(&block->b_count);
328 mutex_unlock(&file->f_mutex);
329 nlmsvc_unlink_block(block);
330 nlmsvc_release_block(block);
331 goto restart;
332 }
333 mutex_unlock(&file->f_mutex);
334}
335
336/*
337 * Initialize arguments for GRANTED call. The nlm_rqst structure
338 * has been cleared already.
339 */
340static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
341{
342 locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
343 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
344 call->a_args.lock.caller = utsname()->nodename;
345 call->a_args.lock.oh.len = lock->oh.len;
346
347 /* set default data area */
348 call->a_args.lock.oh.data = call->a_owner;
349 call->a_args.lock.svid = lock->fl.fl_pid;
350
351 if (lock->oh.len > NLMCLNT_OHSIZE) {
352 void *data = kmalloc(lock->oh.len, GFP_KERNEL);
353 if (!data)
354 return 0;
355 call->a_args.lock.oh.data = (u8 *) data;
356 }
357
358 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
359 return 1;
360}
361
362static void nlmsvc_freegrantargs(struct nlm_rqst *call)
363{
364 if (call->a_args.lock.oh.data != call->a_owner)
365 kfree(call->a_args.lock.oh.data);
366
367 locks_release_private(&call->a_args.lock.fl);
368}
369
370/*
371 * Deferred lock request handling for non-blocking lock
372 */
373static __be32
374nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
375{
376 __be32 status = nlm_lck_denied_nolocks;
377
378 block->b_flags |= B_QUEUED;
379
380 nlmsvc_insert_block(block, NLM_TIMEOUT);
381
382 block->b_cache_req = &rqstp->rq_chandle;
383 if (rqstp->rq_chandle.defer) {
384 block->b_deferred_req =
385 rqstp->rq_chandle.defer(block->b_cache_req);
386 if (block->b_deferred_req != NULL)
387 status = nlm_drop_reply;
388 }
389 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
390 block, block->b_flags, ntohl(status));
391
392 return status;
393}
394
395/*
396 * Attempt to establish a lock, and if it can't be granted, block it
397 * if required.
398 */
399__be32
400nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
401 struct nlm_host *host, struct nlm_lock *lock, int wait,
402 struct nlm_cookie *cookie, int reclaim)
403{
404 struct nlm_block *block = NULL;
405 int error;
406 __be32 ret;
407
408 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
409 file_inode(file->f_file)->i_sb->s_id,
410 file_inode(file->f_file)->i_ino,
411 lock->fl.fl_type, lock->fl.fl_pid,
412 (long long)lock->fl.fl_start,
413 (long long)lock->fl.fl_end,
414 wait);
415
416 /* Lock file against concurrent access */
417 mutex_lock(&file->f_mutex);
418 /* Get existing block (in case client is busy-waiting)
419 * or create new block
420 */
421 block = nlmsvc_lookup_block(file, lock);
422 if (block == NULL) {
423 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
424 ret = nlm_lck_denied_nolocks;
425 if (block == NULL)
426 goto out;
427 lock = &block->b_call->a_args.lock;
428 } else
429 lock->fl.fl_flags &= ~FL_SLEEP;
430
431 if (block->b_flags & B_QUEUED) {
432 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
433 block, block->b_flags);
434 if (block->b_granted) {
435 nlmsvc_unlink_block(block);
436 ret = nlm_granted;
437 goto out;
438 }
439 if (block->b_flags & B_TIMED_OUT) {
440 nlmsvc_unlink_block(block);
441 ret = nlm_lck_denied;
442 goto out;
443 }
444 ret = nlm_drop_reply;
445 goto out;
446 }
447
448 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
449 ret = nlm_lck_denied_grace_period;
450 goto out;
451 }
452 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
453 ret = nlm_lck_denied_grace_period;
454 goto out;
455 }
456
457 if (!wait)
458 lock->fl.fl_flags &= ~FL_SLEEP;
459 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
460 lock->fl.fl_flags &= ~FL_SLEEP;
461
462 dprintk("lockd: vfs_lock_file returned %d\n", error);
463 switch (error) {
464 case 0:
465 ret = nlm_granted;
466 goto out;
467 case -EAGAIN:
468 /*
469 * If this is a blocking request for an
470 * already pending lock request then we need
471 * to put it back on lockd's block list
472 */
473 if (wait)
474 break;
475 ret = nlm_lck_denied;
476 goto out;
477 case FILE_LOCK_DEFERRED:
478 if (wait)
479 break;
480 /* Filesystem lock operation is in progress
481 Add it to the queue waiting for callback */
482 ret = nlmsvc_defer_lock_rqst(rqstp, block);
483 goto out;
484 case -EDEADLK:
485 ret = nlm_deadlock;
486 goto out;
487 default: /* includes ENOLCK */
488 ret = nlm_lck_denied_nolocks;
489 goto out;
490 }
491
492 ret = nlm_lck_blocked;
493
494 /* Append to list of blocked */
495 nlmsvc_insert_block(block, NLM_NEVER);
496out:
497 mutex_unlock(&file->f_mutex);
498 nlmsvc_release_block(block);
499 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
500 return ret;
501}
502
503/*
504 * Test for presence of a conflicting lock.
505 */
506__be32
507nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
508 struct nlm_host *host, struct nlm_lock *lock,
509 struct nlm_lock *conflock, struct nlm_cookie *cookie)
510{
511 struct nlm_block *block = NULL;
512 int error;
513 __be32 ret;
514
515 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
516 file_inode(file->f_file)->i_sb->s_id,
517 file_inode(file->f_file)->i_ino,
518 lock->fl.fl_type,
519 (long long)lock->fl.fl_start,
520 (long long)lock->fl.fl_end);
521
522 /* Get existing block (in case client is busy-waiting) */
523 block = nlmsvc_lookup_block(file, lock);
524
525 if (block == NULL) {
526 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
527
528 if (conf == NULL)
529 return nlm_granted;
530 block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
531 if (block == NULL) {
532 kfree(conf);
533 return nlm_granted;
534 }
535 block->b_fl = conf;
536 }
537 if (block->b_flags & B_QUEUED) {
538 dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
539 block, block->b_flags, block->b_fl);
540 if (block->b_flags & B_TIMED_OUT) {
541 nlmsvc_unlink_block(block);
542 ret = nlm_lck_denied;
543 goto out;
544 }
545 if (block->b_flags & B_GOT_CALLBACK) {
546 nlmsvc_unlink_block(block);
547 if (block->b_fl != NULL
548 && block->b_fl->fl_type != F_UNLCK) {
549 lock->fl = *block->b_fl;
550 goto conf_lock;
551 } else {
552 ret = nlm_granted;
553 goto out;
554 }
555 }
556 ret = nlm_drop_reply;
557 goto out;
558 }
559
560 if (locks_in_grace(SVC_NET(rqstp))) {
561 ret = nlm_lck_denied_grace_period;
562 goto out;
563 }
564 error = vfs_test_lock(file->f_file, &lock->fl);
565 if (error == FILE_LOCK_DEFERRED) {
566 ret = nlmsvc_defer_lock_rqst(rqstp, block);
567 goto out;
568 }
569 if (error) {
570 ret = nlm_lck_denied_nolocks;
571 goto out;
572 }
573 if (lock->fl.fl_type == F_UNLCK) {
574 ret = nlm_granted;
575 goto out;
576 }
577
578conf_lock:
579 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
580 lock->fl.fl_type, (long long)lock->fl.fl_start,
581 (long long)lock->fl.fl_end);
582 conflock->caller = "somehost"; /* FIXME */
583 conflock->len = strlen(conflock->caller);
584 conflock->oh.len = 0; /* don't return OH info */
585 conflock->svid = lock->fl.fl_pid;
586 conflock->fl.fl_type = lock->fl.fl_type;
587 conflock->fl.fl_start = lock->fl.fl_start;
588 conflock->fl.fl_end = lock->fl.fl_end;
589 ret = nlm_lck_denied;
590out:
591 if (block)
592 nlmsvc_release_block(block);
593 return ret;
594}
595
596/*
597 * Remove a lock.
598 * This implies a CANCEL call: We send a GRANT_MSG, the client replies
599 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
600 * afterwards. In this case the block will still be there, and hence
601 * must be removed.
602 */
603__be32
604nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
605{
606 int error;
607
608 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
609 file_inode(file->f_file)->i_sb->s_id,
610 file_inode(file->f_file)->i_ino,
611 lock->fl.fl_pid,
612 (long long)lock->fl.fl_start,
613 (long long)lock->fl.fl_end);
614
615 /* First, cancel any lock that might be there */
616 nlmsvc_cancel_blocked(net, file, lock);
617
618 lock->fl.fl_type = F_UNLCK;
619 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
620
621 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
622}
623
624/*
625 * Cancel a previously blocked request.
626 *
627 * A cancel request always overrides any grant that may currently
628 * be in progress.
629 * The calling procedure must check whether the file can be closed.
630 */
631__be32
632nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
633{
634 struct nlm_block *block;
635 int status = 0;
636
637 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
638 file_inode(file->f_file)->i_sb->s_id,
639 file_inode(file->f_file)->i_ino,
640 lock->fl.fl_pid,
641 (long long)lock->fl.fl_start,
642 (long long)lock->fl.fl_end);
643
644 if (locks_in_grace(net))
645 return nlm_lck_denied_grace_period;
646
647 mutex_lock(&file->f_mutex);
648 block = nlmsvc_lookup_block(file, lock);
649 mutex_unlock(&file->f_mutex);
650 if (block != NULL) {
651 vfs_cancel_lock(block->b_file->f_file,
652 &block->b_call->a_args.lock.fl);
653 status = nlmsvc_unlink_block(block);
654 nlmsvc_release_block(block);
655 }
656 return status ? nlm_lck_denied : nlm_granted;
657}
658
659/*
660 * This is a callback from the filesystem for VFS file lock requests.
661 * It will be used if lm_grant is defined and the filesystem can not
662 * respond to the request immediately.
663 * For GETLK request it will copy the reply to the nlm_block.
664 * For SETLK or SETLKW request it will get the local posix lock.
665 * In all cases it will move the block to the head of nlm_blocked q where
666 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
667 * deferred rpc for GETLK and SETLK.
668 */
669static void
670nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
671 int result)
672{
673 block->b_flags |= B_GOT_CALLBACK;
674 if (result == 0)
675 block->b_granted = 1;
676 else
677 block->b_flags |= B_TIMED_OUT;
678 if (conf) {
679 if (block->b_fl)
680 __locks_copy_lock(block->b_fl, conf);
681 }
682}
683
684static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
685 int result)
686{
687 struct nlm_block *block;
688 int rc = -ENOENT;
689
690 spin_lock(&nlm_blocked_lock);
691 list_for_each_entry(block, &nlm_blocked, b_list) {
692 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
693 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
694 block, block->b_flags);
695 if (block->b_flags & B_QUEUED) {
696 if (block->b_flags & B_TIMED_OUT) {
697 rc = -ENOLCK;
698 break;
699 }
700 nlmsvc_update_deferred_block(block, conf, result);
701 } else if (result == 0)
702 block->b_granted = 1;
703
704 nlmsvc_insert_block_locked(block, 0);
705 svc_wake_up(block->b_daemon);
706 rc = 0;
707 break;
708 }
709 }
710 spin_unlock(&nlm_blocked_lock);
711 if (rc == -ENOENT)
712 printk(KERN_WARNING "lockd: grant for unknown block\n");
713 return rc;
714}
715
716/*
717 * Unblock a blocked lock request. This is a callback invoked from the
718 * VFS layer when a lock on which we blocked is removed.
719 *
720 * This function doesn't grant the blocked lock instantly, but rather moves
721 * the block to the head of nlm_blocked where it can be picked up by lockd.
722 */
723static void
724nlmsvc_notify_blocked(struct file_lock *fl)
725{
726 struct nlm_block *block;
727
728 dprintk("lockd: VFS unblock notification for block %p\n", fl);
729 spin_lock(&nlm_blocked_lock);
730 list_for_each_entry(block, &nlm_blocked, b_list) {
731 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
732 nlmsvc_insert_block_locked(block, 0);
733 spin_unlock(&nlm_blocked_lock);
734 svc_wake_up(block->b_daemon);
735 return;
736 }
737 }
738 spin_unlock(&nlm_blocked_lock);
739 printk(KERN_WARNING "lockd: notification for unknown block!\n");
740}
741
742static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
743{
744 return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
745}
746
747/*
748 * Since NLM uses two "keys" for tracking locks, we need to hash them down
749 * to one for the blocked_hash. Here, we're just xor'ing the host address
750 * with the pid in order to create a key value for picking a hash bucket.
751 */
752static unsigned long
753nlmsvc_owner_key(struct file_lock *fl)
754{
755 return (unsigned long)fl->fl_owner ^ (unsigned long)fl->fl_pid;
756}
757
758const struct lock_manager_operations nlmsvc_lock_operations = {
759 .lm_compare_owner = nlmsvc_same_owner,
760 .lm_owner_key = nlmsvc_owner_key,
761 .lm_notify = nlmsvc_notify_blocked,
762 .lm_grant = nlmsvc_grant_deferred,
763};
764
765/*
766 * Try to claim a lock that was previously blocked.
767 *
768 * Note that we use both the RPC_GRANTED_MSG call _and_ an async
769 * RPC thread when notifying the client. This seems like overkill...
770 * Here's why:
771 * - we don't want to use a synchronous RPC thread, otherwise
772 * we might find ourselves hanging on a dead portmapper.
773 * - Some lockd implementations (e.g. HP) don't react to
774 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
775 */
776static void
777nlmsvc_grant_blocked(struct nlm_block *block)
778{
779 struct nlm_file *file = block->b_file;
780 struct nlm_lock *lock = &block->b_call->a_args.lock;
781 int error;
782 loff_t fl_start, fl_end;
783
784 dprintk("lockd: grant blocked lock %p\n", block);
785
786 kref_get(&block->b_count);
787
788 /* Unlink block request from list */
789 nlmsvc_unlink_block(block);
790
791 /* If b_granted is true this means we've been here before.
792 * Just retry the grant callback, possibly refreshing the RPC
793 * binding */
794 if (block->b_granted) {
795 nlm_rebind_host(block->b_host);
796 goto callback;
797 }
798
799 /* Try the lock operation again */
800 /* vfs_lock_file() can mangle fl_start and fl_end, but we need
801 * them unchanged for the GRANT_MSG
802 */
803 lock->fl.fl_flags |= FL_SLEEP;
804 fl_start = lock->fl.fl_start;
805 fl_end = lock->fl.fl_end;
806 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
807 lock->fl.fl_flags &= ~FL_SLEEP;
808 lock->fl.fl_start = fl_start;
809 lock->fl.fl_end = fl_end;
810
811 switch (error) {
812 case 0:
813 break;
814 case FILE_LOCK_DEFERRED:
815 dprintk("lockd: lock still blocked error %d\n", error);
816 nlmsvc_insert_block(block, NLM_NEVER);
817 nlmsvc_release_block(block);
818 return;
819 default:
820 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
821 -error, __func__);
822 nlmsvc_insert_block(block, 10 * HZ);
823 nlmsvc_release_block(block);
824 return;
825 }
826
827callback:
828 /* Lock was granted by VFS. */
829 dprintk("lockd: GRANTing blocked lock.\n");
830 block->b_granted = 1;
831
832 /* keep block on the list, but don't reattempt until the RPC
833 * completes or the submission fails
834 */
835 nlmsvc_insert_block(block, NLM_NEVER);
836
837 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
838 * will queue up a new one if this one times out
839 */
840 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
841 &nlmsvc_grant_ops);
842
843 /* RPC submission failed, wait a bit and retry */
844 if (error < 0)
845 nlmsvc_insert_block(block, 10 * HZ);
846}
847
848/*
849 * This is the callback from the RPC layer when the NLM_GRANTED_MSG
850 * RPC call has succeeded or timed out.
851 * Like all RPC callbacks, it is invoked by the rpciod process, so it
852 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
853 * chain once more in order to have it removed by lockd itself (which can
854 * then sleep on the file semaphore without disrupting e.g. the nfs client).
855 */
856static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
857{
858 struct nlm_rqst *call = data;
859 struct nlm_block *block = call->a_block;
860 unsigned long timeout;
861
862 dprintk("lockd: GRANT_MSG RPC callback\n");
863
864 spin_lock(&nlm_blocked_lock);
865 /* if the block is not on a list at this point then it has
866 * been invalidated. Don't try to requeue it.
867 *
868 * FIXME: it's possible that the block is removed from the list
869 * after this check but before the nlmsvc_insert_block. In that
870 * case it will be added back. Perhaps we need better locking
871 * for nlm_blocked?
872 */
873 if (list_empty(&block->b_list))
874 goto out;
875
876 /* Technically, we should down the file semaphore here. Since we
877 * move the block towards the head of the queue only, no harm
878 * can be done, though. */
879 if (task->tk_status < 0) {
880 /* RPC error: Re-insert for retransmission */
881 timeout = 10 * HZ;
882 } else {
883 /* Call was successful, now wait for client callback */
884 timeout = 60 * HZ;
885 }
886 nlmsvc_insert_block_locked(block, timeout);
887 svc_wake_up(block->b_daemon);
888out:
889 spin_unlock(&nlm_blocked_lock);
890}
891
892/*
893 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
894 * .rpc_release rpc_call_op
895 */
896static void nlmsvc_grant_release(void *data)
897{
898 struct nlm_rqst *call = data;
899 nlmsvc_release_block(call->a_block);
900}
901
902static const struct rpc_call_ops nlmsvc_grant_ops = {
903 .rpc_call_done = nlmsvc_grant_callback,
904 .rpc_release = nlmsvc_grant_release,
905};
906
907/*
908 * We received a GRANT_RES callback. Try to find the corresponding
909 * block.
910 */
911void
912nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
913{
914 struct nlm_block *block;
915
916 dprintk("grant_reply: looking for cookie %x, s=%d \n",
917 *(unsigned int *)(cookie->data), status);
918 if (!(block = nlmsvc_find_block(cookie)))
919 return;
920
921 if (block) {
922 if (status == nlm_lck_denied_grace_period) {
923 /* Try again in a couple of seconds */
924 nlmsvc_insert_block(block, 10 * HZ);
925 } else {
926 /* Lock is now held by client, or has been rejected.
927 * In both cases, the block should be removed. */
928 nlmsvc_unlink_block(block);
929 }
930 }
931 nlmsvc_release_block(block);
932}
933
934/* Helper function to handle retry of a deferred block.
935 * If it is a blocking lock, call grant_blocked.
936 * For a non-blocking lock or test lock, revisit the request.
937 */
938static void
939retry_deferred_block(struct nlm_block *block)
940{
941 if (!(block->b_flags & B_GOT_CALLBACK))
942 block->b_flags |= B_TIMED_OUT;
943 nlmsvc_insert_block(block, NLM_TIMEOUT);
944 dprintk("revisit block %p flags %d\n", block, block->b_flags);
945 if (block->b_deferred_req) {
946 block->b_deferred_req->revisit(block->b_deferred_req, 0);
947 block->b_deferred_req = NULL;
948 }
949}
950
951/*
952 * Retry all blocked locks that have been notified. This is where lockd
953 * picks up locks that can be granted, or grant notifications that must
954 * be retransmitted.
955 */
956unsigned long
957nlmsvc_retry_blocked(void)
958{
959 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
960 struct nlm_block *block;
961
962 spin_lock(&nlm_blocked_lock);
963 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
964 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
965
966 if (block->b_when == NLM_NEVER)
967 break;
968 if (time_after(block->b_when, jiffies)) {
969 timeout = block->b_when - jiffies;
970 break;
971 }
972 spin_unlock(&nlm_blocked_lock);
973
974 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
975 block, block->b_when);
976 if (block->b_flags & B_QUEUED) {
977 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
978 block, block->b_granted, block->b_flags);
979 retry_deferred_block(block);
980 } else
981 nlmsvc_grant_blocked(block);
982 spin_lock(&nlm_blocked_lock);
983 }
984 spin_unlock(&nlm_blocked_lock);
985
986 return timeout;
987}