Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/lockd/clntproc.c
  4 *
  5 * RPC procedures for the client side NLM implementation
  6 *
  7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/errno.h>
 14#include <linux/fs.h>
 
 15#include <linux/nfs_fs.h>
 16#include <linux/utsname.h>
 17#include <linux/freezer.h>
 18#include <linux/sunrpc/clnt.h>
 19#include <linux/sunrpc/svc.h>
 20#include <linux/lockd/lockd.h>
 21
 
 
 22#define NLMDBG_FACILITY		NLMDBG_CLIENT
 23#define NLMCLNT_GRACE_WAIT	(5*HZ)
 24#define NLMCLNT_POLL_TIMEOUT	(30*HZ)
 25#define NLMCLNT_MAX_RETRIES	3
 26
 27static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
 28static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
 29static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
 30static int	nlm_stat_to_errno(__be32 stat);
 31static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
 32static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
 33
 34static const struct rpc_call_ops nlmclnt_unlock_ops;
 35static const struct rpc_call_ops nlmclnt_cancel_ops;
 36
 37/*
 38 * Cookie counter for NLM requests
 39 */
 40static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
 41
 42void nlmclnt_next_cookie(struct nlm_cookie *c)
 43{
 44	u32	cookie = atomic_inc_return(&nlm_cookie);
 45
 46	memcpy(c->data, &cookie, 4);
 47	c->len=4;
 48}
 49
 50static struct nlm_lockowner *
 51nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
 52{
 53	refcount_inc(&lockowner->count);
 54	return lockowner;
 55}
 56
 57static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
 58{
 59	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
 60		return;
 61	list_del(&lockowner->list);
 62	spin_unlock(&lockowner->host->h_lock);
 63	nlmclnt_release_host(lockowner->host);
 64	kfree(lockowner);
 65}
 66
 67static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
 68{
 69	struct nlm_lockowner *lockowner;
 70	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 71		if (lockowner->pid == pid)
 72			return -EBUSY;
 73	}
 74	return 0;
 75}
 76
 77static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
 78{
 79	uint32_t res;
 80	do {
 81		res = host->h_pidcount++;
 82	} while (nlm_pidbusy(host, res) < 0);
 83	return res;
 84}
 85
 86static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 87{
 88	struct nlm_lockowner *lockowner;
 89	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 90		if (lockowner->owner != owner)
 91			continue;
 92		return nlmclnt_get_lockowner(lockowner);
 93	}
 94	return NULL;
 95}
 96
 97static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 98{
 99	struct nlm_lockowner *res, *new = NULL;
100
101	spin_lock(&host->h_lock);
102	res = __nlmclnt_find_lockowner(host, owner);
103	if (res == NULL) {
104		spin_unlock(&host->h_lock);
105		new = kmalloc(sizeof(*new), GFP_KERNEL);
106		spin_lock(&host->h_lock);
107		res = __nlmclnt_find_lockowner(host, owner);
108		if (res == NULL && new != NULL) {
109			res = new;
110			refcount_set(&new->count, 1);
111			new->owner = owner;
112			new->pid = __nlm_alloc_pid(host);
113			new->host = nlm_get_host(host);
114			list_add(&new->list, &host->h_lockowners);
115			new = NULL;
116		}
117	}
118	spin_unlock(&host->h_lock);
119	kfree(new);
120	return res;
121}
122
123/*
124 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
125 */
126static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
127{
128	struct nlm_args	*argp = &req->a_args;
129	struct nlm_lock	*lock = &argp->lock;
130	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
131
132	nlmclnt_next_cookie(&argp->cookie);
133	memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh));
134	lock->caller  = nodename;
135	lock->oh.data = req->a_owner;
136	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
137				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
138				nodename);
139	lock->svid = fl->fl_u.nfs_fl.owner->pid;
140	lock->fl.fl_start = fl->fl_start;
141	lock->fl.fl_end = fl->fl_end;
142	lock->fl.fl_type = fl->fl_type;
143}
144
145static void nlmclnt_release_lockargs(struct nlm_rqst *req)
146{
147	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
148}
149
150/**
151 * nlmclnt_proc - Perform a single client-side lock request
152 * @host: address of a valid nlm_host context representing the NLM server
153 * @cmd: fcntl-style file lock operation to perform
154 * @fl: address of arguments for the lock operation
155 * @data: address of data to be sent to callback operations
156 *
157 */
158int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
159{
160	struct nlm_rqst		*call;
161	int			status;
162	const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
163
164	call = nlm_alloc_call(host);
165	if (call == NULL)
166		return -ENOMEM;
167
168	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
169		nlmclnt_ops->nlmclnt_alloc_call(data);
170
171	nlmclnt_locks_init_private(fl, host);
172	if (!fl->fl_u.nfs_fl.owner) {
173		/* lockowner allocation has failed */
174		nlmclnt_release_call(call);
175		return -ENOMEM;
176	}
177	/* Set up the argument struct */
178	nlmclnt_setlockargs(call, fl);
179	call->a_callback_data = data;
180
181	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
182		if (fl->fl_type != F_UNLCK) {
183			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
184			status = nlmclnt_lock(call, fl);
185		} else
186			status = nlmclnt_unlock(call, fl);
187	} else if (IS_GETLK(cmd))
188		status = nlmclnt_test(call, fl);
189	else
190		status = -EINVAL;
191	fl->fl_ops->fl_release_private(fl);
192	fl->fl_ops = NULL;
193
194	dprintk("lockd: clnt proc returns %d\n", status);
195	return status;
196}
197EXPORT_SYMBOL_GPL(nlmclnt_proc);
198
199/*
200 * Allocate an NLM RPC call struct
201 */
202struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
203{
204	struct nlm_rqst	*call;
205
206	for(;;) {
207		call = kzalloc(sizeof(*call), GFP_KERNEL);
208		if (call != NULL) {
209			refcount_set(&call->a_count, 1);
210			locks_init_lock(&call->a_args.lock.fl);
211			locks_init_lock(&call->a_res.lock.fl);
212			call->a_host = nlm_get_host(host);
213			return call;
214		}
215		if (signalled())
216			break;
217		printk("nlm_alloc_call: failed, waiting for memory\n");
218		schedule_timeout_interruptible(5*HZ);
219	}
220	return NULL;
221}
222
223void nlmclnt_release_call(struct nlm_rqst *call)
224{
225	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
226
227	if (!refcount_dec_and_test(&call->a_count))
228		return;
229	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
230		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
231	nlmclnt_release_host(call->a_host);
232	nlmclnt_release_lockargs(call);
233	kfree(call);
234}
235
236static void nlmclnt_rpc_release(void *data)
237{
238	nlmclnt_release_call(data);
239}
240
241static int nlm_wait_on_grace(wait_queue_head_t *queue)
242{
243	DEFINE_WAIT(wait);
244	int status = -EINTR;
245
246	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
247	if (!signalled ()) {
248		schedule_timeout(NLMCLNT_GRACE_WAIT);
249		try_to_freeze();
250		if (!signalled ())
251			status = 0;
252	}
253	finish_wait(queue, &wait);
254	return status;
255}
256
257/*
258 * Generic NLM call
259 */
260static int
261nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
262{
263	struct nlm_host	*host = req->a_host;
264	struct rpc_clnt	*clnt;
265	struct nlm_args	*argp = &req->a_args;
266	struct nlm_res	*resp = &req->a_res;
267	struct rpc_message msg = {
268		.rpc_argp	= argp,
269		.rpc_resp	= resp,
270		.rpc_cred	= cred,
271	};
272	int		status;
273
274	dprintk("lockd: call procedure %d on %s\n",
275			(int)proc, host->h_name);
276
277	do {
278		if (host->h_reclaiming && !argp->reclaim)
279			goto in_grace_period;
280
281		/* If we have no RPC client yet, create one. */
282		if ((clnt = nlm_bind_host(host)) == NULL)
283			return -ENOLCK;
284		msg.rpc_proc = &clnt->cl_procinfo[proc];
285
286		/* Perform the RPC call. If an error occurs, try again */
287		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
288			dprintk("lockd: rpc_call returned error %d\n", -status);
289			switch (status) {
290			case -EPROTONOSUPPORT:
291				status = -EINVAL;
292				break;
293			case -ECONNREFUSED:
294			case -ETIMEDOUT:
295			case -ENOTCONN:
296				nlm_rebind_host(host);
297				status = -EAGAIN;
298				break;
299			case -ERESTARTSYS:
300				return signalled () ? -EINTR : status;
301			default:
302				break;
303			}
304			break;
305		} else
306		if (resp->status == nlm_lck_denied_grace_period) {
307			dprintk("lockd: server in grace period\n");
308			if (argp->reclaim) {
309				printk(KERN_WARNING
310				     "lockd: spurious grace period reject?!\n");
311				return -ENOLCK;
312			}
313		} else {
314			if (!argp->reclaim) {
315				/* We appear to be out of the grace period */
316				wake_up_all(&host->h_gracewait);
317			}
318			dprintk("lockd: server returns status %d\n",
319				ntohl(resp->status));
320			return 0;	/* Okay, call complete */
321		}
322
323in_grace_period:
324		/*
325		 * The server has rebooted and appears to be in the grace
326		 * period during which locks are only allowed to be
327		 * reclaimed.
328		 * We can only back off and try again later.
329		 */
330		status = nlm_wait_on_grace(&host->h_gracewait);
331	} while (status == 0);
332
333	return status;
334}
335
336/*
337 * Generic NLM call, async version.
338 */
339static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
340{
341	struct nlm_host	*host = req->a_host;
342	struct rpc_clnt	*clnt;
343	struct rpc_task_setup task_setup_data = {
344		.rpc_message = msg,
345		.callback_ops = tk_ops,
346		.callback_data = req,
347		.flags = RPC_TASK_ASYNC,
348	};
349
350	dprintk("lockd: call procedure %d on %s (async)\n",
351			(int)proc, host->h_name);
352
353	/* If we have no RPC client yet, create one. */
354	clnt = nlm_bind_host(host);
355	if (clnt == NULL)
356		goto out_err;
357	msg->rpc_proc = &clnt->cl_procinfo[proc];
358	task_setup_data.rpc_client = clnt;
359
360        /* bootstrap and kick off the async RPC call */
361	return rpc_run_task(&task_setup_data);
362out_err:
363	tk_ops->rpc_release(req);
364	return ERR_PTR(-ENOLCK);
365}
366
367static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
368{
369	struct rpc_task *task;
370
371	task = __nlm_async_call(req, proc, msg, tk_ops);
372	if (IS_ERR(task))
373		return PTR_ERR(task);
374	rpc_put_task(task);
375	return 0;
376}
377
378/*
379 * NLM asynchronous call.
380 */
381int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
382{
383	struct rpc_message msg = {
384		.rpc_argp	= &req->a_args,
385		.rpc_resp	= &req->a_res,
386	};
387	return nlm_do_async_call(req, proc, &msg, tk_ops);
388}
389
390int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
391{
392	struct rpc_message msg = {
393		.rpc_argp	= &req->a_res,
394	};
395	return nlm_do_async_call(req, proc, &msg, tk_ops);
396}
397
398/*
399 * NLM client asynchronous call.
400 *
401 * Note that although the calls are asynchronous, and are therefore
402 *      guaranteed to complete, we still always attempt to wait for
403 *      completion in order to be able to correctly track the lock
404 *      state.
405 */
406static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
407{
408	struct rpc_message msg = {
409		.rpc_argp	= &req->a_args,
410		.rpc_resp	= &req->a_res,
411		.rpc_cred	= cred,
412	};
413	struct rpc_task *task;
414	int err;
415
416	task = __nlm_async_call(req, proc, &msg, tk_ops);
417	if (IS_ERR(task))
418		return PTR_ERR(task);
419	err = rpc_wait_for_completion_task(task);
420	rpc_put_task(task);
421	return err;
422}
423
424/*
425 * TEST for the presence of a conflicting lock
426 */
427static int
428nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
429{
430	int	status;
431
432	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
433	if (status < 0)
434		goto out;
435
436	switch (req->a_res.status) {
437		case nlm_granted:
438			fl->fl_type = F_UNLCK;
439			break;
440		case nlm_lck_denied:
441			/*
442			 * Report the conflicting lock back to the application.
443			 */
444			fl->fl_start = req->a_res.lock.fl.fl_start;
445			fl->fl_end = req->a_res.lock.fl.fl_end;
446			fl->fl_type = req->a_res.lock.fl.fl_type;
447			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
448			break;
449		default:
450			status = nlm_stat_to_errno(req->a_res.status);
451	}
452out:
 
 
 
453	nlmclnt_release_call(req);
454	return status;
455}
456
457static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
458{
459	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
460	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
461	new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
462	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
463	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
464}
465
466static void nlmclnt_locks_release_private(struct file_lock *fl)
467{
468	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
469	list_del(&fl->fl_u.nfs_fl.list);
470	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
471	nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
472}
473
474static const struct file_lock_operations nlmclnt_lock_ops = {
475	.fl_copy_lock = nlmclnt_locks_copy_lock,
476	.fl_release_private = nlmclnt_locks_release_private,
477};
478
479static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
480{
481	fl->fl_u.nfs_fl.state = 0;
482	fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
483	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
484	fl->fl_ops = &nlmclnt_lock_ops;
485}
486
487static int do_vfs_lock(struct file_lock *fl)
488{
489	return locks_lock_file_wait(fl->fl_file, fl);
490}
491
492/*
493 * LOCK: Try to create a lock
494 *
495 *			Programmer Harassment Alert
496 *
497 * When given a blocking lock request in a sync RPC call, the HPUX lockd
498 * will faithfully return LCK_BLOCKED but never cares to notify us when
499 * the lock could be granted. This way, our local process could hang
500 * around forever waiting for the callback.
501 *
502 *  Solution A:	Implement busy-waiting
503 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
504 *
505 * For now I am implementing solution A, because I hate the idea of
506 * re-implementing lockd for a third time in two months. The async
507 * calls shouldn't be too hard to do, however.
508 *
509 * This is one of the lovely things about standards in the NFS area:
510 * they're so soft and squishy you can't really blame HP for doing this.
511 */
512static int
513nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
514{
515	const struct cred *cred = nfs_file_cred(fl->fl_file);
516	struct nlm_host	*host = req->a_host;
517	struct nlm_res	*resp = &req->a_res;
518	struct nlm_wait *block = NULL;
519	unsigned char fl_flags = fl->fl_flags;
520	unsigned char fl_type;
 
521	int status = -ENOLCK;
522
523	if (nsm_monitor(host) < 0)
524		goto out;
525	req->a_args.state = nsm_local_state;
526
527	fl->fl_flags |= FL_ACCESS;
528	status = do_vfs_lock(fl);
529	fl->fl_flags = fl_flags;
530	if (status < 0)
531		goto out;
532
533	block = nlmclnt_prepare_block(host, fl);
534again:
535	/*
536	 * Initialise resp->status to a valid non-zero value,
537	 * since 0 == nlm_lck_granted
538	 */
539	resp->status = nlm_lck_blocked;
540	for(;;) {
 
 
 
 
 
 
 
541		/* Reboot protection */
542		fl->fl_u.nfs_fl.state = host->h_state;
543		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
544		if (status < 0)
545			break;
546		/* Did a reclaimer thread notify us of a server reboot? */
547		if (resp->status ==  nlm_lck_denied_grace_period)
548			continue;
549		if (resp->status != nlm_lck_blocked)
550			break;
551		/* Wait on an NLM blocking lock */
552		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
553		if (status < 0)
554			break;
555		if (resp->status != nlm_lck_blocked)
556			break;
557	}
 
 
 
558
559	/* if we were interrupted while blocking, then cancel the lock request
560	 * and exit
561	 */
562	if (resp->status == nlm_lck_blocked) {
563		if (!req->a_args.block)
564			goto out_unlock;
565		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
566			goto out_unblock;
567	}
568
569	if (resp->status == nlm_granted) {
570		down_read(&host->h_rwsem);
571		/* Check whether or not the server has rebooted */
572		if (fl->fl_u.nfs_fl.state != host->h_state) {
573			up_read(&host->h_rwsem);
574			goto again;
575		}
576		/* Ensure the resulting lock will get added to granted list */
577		fl->fl_flags |= FL_SLEEP;
578		if (do_vfs_lock(fl) < 0)
579			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
580		up_read(&host->h_rwsem);
581		fl->fl_flags = fl_flags;
582		status = 0;
583	}
584	if (status < 0)
585		goto out_unlock;
586	/*
587	 * EAGAIN doesn't make sense for sleeping locks, and in some
588	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
589	 * turn it into an ENOLCK.
590	 */
591	if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
592		status = -ENOLCK;
593	else
594		status = nlm_stat_to_errno(resp->status);
595out_unblock:
596	nlmclnt_finish_block(block);
597out:
 
 
 
598	nlmclnt_release_call(req);
599	return status;
600out_unlock:
601	/* Fatal error: ensure that we remove the lock altogether */
 
 
 
602	dprintk("lockd: lock attempt ended in fatal error.\n"
603		"       Attempting to unlock.\n");
604	nlmclnt_finish_block(block);
605	fl_type = fl->fl_type;
606	fl->fl_type = F_UNLCK;
607	down_read(&host->h_rwsem);
608	do_vfs_lock(fl);
609	up_read(&host->h_rwsem);
610	fl->fl_type = fl_type;
611	fl->fl_flags = fl_flags;
612	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
613	return status;
614}
615
616/*
617 * RECLAIM: Try to reclaim a lock
618 */
619int
620nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
621		struct nlm_rqst *req)
622{
623	int		status;
624
625	memset(req, 0, sizeof(*req));
626	locks_init_lock(&req->a_args.lock.fl);
627	locks_init_lock(&req->a_res.lock.fl);
628	req->a_host  = host;
629
630	/* Set up the argument struct */
631	nlmclnt_setlockargs(req, fl);
632	req->a_args.reclaim = 1;
633
634	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
635	if (status >= 0 && req->a_res.status == nlm_granted)
636		return 0;
637
638	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
639				"(errno %d, status %d)\n", fl->fl_pid,
640				status, ntohl(req->a_res.status));
641
642	/*
643	 * FIXME: This is a serious failure. We can
644	 *
645	 *  a.	Ignore the problem
646	 *  b.	Send the owning process some signal (Linux doesn't have
647	 *	SIGLOST, though...)
648	 *  c.	Retry the operation
649	 *
650	 * Until someone comes up with a simple implementation
651	 * for b or c, I'll choose option a.
652	 */
653
654	return -ENOLCK;
655}
656
657/*
658 * UNLOCK: remove an existing lock
659 */
660static int
661nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
662{
663	struct nlm_host	*host = req->a_host;
664	struct nlm_res	*resp = &req->a_res;
665	int status;
666	unsigned char fl_flags = fl->fl_flags;
667
668	/*
669	 * Note: the server is supposed to either grant us the unlock
670	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
671	 * case, we want to unlock.
672	 */
673	fl->fl_flags |= FL_EXISTS;
674	down_read(&host->h_rwsem);
675	status = do_vfs_lock(fl);
676	up_read(&host->h_rwsem);
677	fl->fl_flags = fl_flags;
678	if (status == -ENOENT) {
679		status = 0;
680		goto out;
681	}
682
683	refcount_inc(&req->a_count);
684	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
685			NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
686	if (status < 0)
687		goto out;
688
689	if (resp->status == nlm_granted)
690		goto out;
691
692	if (resp->status != nlm_lck_denied_nolocks)
693		printk("lockd: unexpected unlock status: %d\n",
694			ntohl(resp->status));
695	/* What to do now? I'm out of my depth... */
696	status = -ENOLCK;
697out:
 
 
 
698	nlmclnt_release_call(req);
699	return status;
700}
701
702static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
703{
704	struct nlm_rqst	*req = data;
705	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
706	bool defer_call = false;
707
708	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
709		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
710
711	if (!defer_call)
712		rpc_call_start(task);
713}
714
715static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
716{
717	struct nlm_rqst	*req = data;
718	u32 status = ntohl(req->a_res.status);
719
720	if (RPC_SIGNALLED(task))
721		goto die;
722
723	if (task->tk_status < 0) {
724		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
725		switch (task->tk_status) {
726		case -EACCES:
727		case -EIO:
728			goto die;
729		default:
730			goto retry_rebind;
731		}
732	}
733	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
734		rpc_delay(task, NLMCLNT_GRACE_WAIT);
735		goto retry_unlock;
736	}
737	if (status != NLM_LCK_GRANTED)
738		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
739die:
740	return;
741 retry_rebind:
742	nlm_rebind_host(req->a_host);
743 retry_unlock:
744	rpc_restart_call(task);
745}
746
747static const struct rpc_call_ops nlmclnt_unlock_ops = {
748	.rpc_call_prepare = nlmclnt_unlock_prepare,
749	.rpc_call_done = nlmclnt_unlock_callback,
750	.rpc_release = nlmclnt_rpc_release,
751};
752
753/*
754 * Cancel a blocked lock request.
755 * We always use an async RPC call for this in order not to hang a
756 * process that has been Ctrl-C'ed.
757 */
758static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
759{
760	struct nlm_rqst	*req;
761	int status;
762
763	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
764		"       Attempting to cancel lock.\n");
765
766	req = nlm_alloc_call(host);
767	if (!req)
768		return -ENOMEM;
769	req->a_flags = RPC_TASK_ASYNC;
770
771	nlmclnt_setlockargs(req, fl);
772	req->a_args.block = block;
773
774	refcount_inc(&req->a_count);
775	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
776			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
777	if (status == 0 && req->a_res.status == nlm_lck_denied)
778		status = -ENOLCK;
779	nlmclnt_release_call(req);
780	return status;
781}
782
783static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
784{
785	struct nlm_rqst	*req = data;
786	u32 status = ntohl(req->a_res.status);
787
788	if (RPC_SIGNALLED(task))
789		goto die;
790
791	if (task->tk_status < 0) {
792		dprintk("lockd: CANCEL call error %d, retrying.\n",
793					task->tk_status);
794		goto retry_cancel;
795	}
796
797	switch (status) {
798	case NLM_LCK_GRANTED:
799	case NLM_LCK_DENIED_GRACE_PERIOD:
800	case NLM_LCK_DENIED:
801		/* Everything's good */
802		break;
803	case NLM_LCK_DENIED_NOLOCKS:
804		dprintk("lockd: CANCEL failed (server has no locks)\n");
805		goto retry_cancel;
806	default:
807		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
808			status);
809	}
810
811die:
812	return;
813
814retry_cancel:
815	/* Don't ever retry more than 3 times */
816	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
817		goto die;
818	nlm_rebind_host(req->a_host);
819	rpc_restart_call(task);
820	rpc_delay(task, 30 * HZ);
821}
822
823static const struct rpc_call_ops nlmclnt_cancel_ops = {
824	.rpc_call_done = nlmclnt_cancel_callback,
825	.rpc_release = nlmclnt_rpc_release,
826};
827
828/*
829 * Convert an NLM status code to a generic kernel errno
830 */
831static int
832nlm_stat_to_errno(__be32 status)
833{
834	switch(ntohl(status)) {
835	case NLM_LCK_GRANTED:
836		return 0;
837	case NLM_LCK_DENIED:
838		return -EAGAIN;
839	case NLM_LCK_DENIED_NOLOCKS:
840	case NLM_LCK_DENIED_GRACE_PERIOD:
841		return -ENOLCK;
842	case NLM_LCK_BLOCKED:
843		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
844		return -ENOLCK;
845#ifdef CONFIG_LOCKD_V4
846	case NLM_DEADLCK:
847		return -EDEADLK;
848	case NLM_ROFS:
849		return -EROFS;
850	case NLM_STALE_FH:
851		return -ESTALE;
852	case NLM_FBIG:
853		return -EOVERFLOW;
854	case NLM_FAILED:
855		return -ENOLCK;
856#endif
857	}
858	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
859		 ntohl(status));
860	return -ENOLCK;
861}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/lockd/clntproc.c
  4 *
  5 * RPC procedures for the client side NLM implementation
  6 *
  7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/errno.h>
 14#include <linux/fs.h>
 15#include <linux/filelock.h>
 16#include <linux/nfs_fs.h>
 17#include <linux/utsname.h>
 18#include <linux/freezer.h>
 19#include <linux/sunrpc/clnt.h>
 20#include <linux/sunrpc/svc.h>
 21#include <linux/lockd/lockd.h>
 22
 23#include "trace.h"
 24
 25#define NLMDBG_FACILITY		NLMDBG_CLIENT
 26#define NLMCLNT_GRACE_WAIT	(5*HZ)
 27#define NLMCLNT_POLL_TIMEOUT	(30*HZ)
 28#define NLMCLNT_MAX_RETRIES	3
 29
 30static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
 31static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
 32static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
 33static int	nlm_stat_to_errno(__be32 stat);
 34static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
 35static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
 36
 37static const struct rpc_call_ops nlmclnt_unlock_ops;
 38static const struct rpc_call_ops nlmclnt_cancel_ops;
 39
 40/*
 41 * Cookie counter for NLM requests
 42 */
 43static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
 44
 45void nlmclnt_next_cookie(struct nlm_cookie *c)
 46{
 47	u32	cookie = atomic_inc_return(&nlm_cookie);
 48
 49	memcpy(c->data, &cookie, 4);
 50	c->len=4;
 51}
 52
 53static struct nlm_lockowner *
 54nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
 55{
 56	refcount_inc(&lockowner->count);
 57	return lockowner;
 58}
 59
 60static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
 61{
 62	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
 63		return;
 64	list_del(&lockowner->list);
 65	spin_unlock(&lockowner->host->h_lock);
 66	nlmclnt_release_host(lockowner->host);
 67	kfree(lockowner);
 68}
 69
 70static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
 71{
 72	struct nlm_lockowner *lockowner;
 73	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 74		if (lockowner->pid == pid)
 75			return -EBUSY;
 76	}
 77	return 0;
 78}
 79
 80static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
 81{
 82	uint32_t res;
 83	do {
 84		res = host->h_pidcount++;
 85	} while (nlm_pidbusy(host, res) < 0);
 86	return res;
 87}
 88
 89static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 90{
 91	struct nlm_lockowner *lockowner;
 92	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 93		if (lockowner->owner != owner)
 94			continue;
 95		return nlmclnt_get_lockowner(lockowner);
 96	}
 97	return NULL;
 98}
 99
100static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
101{
102	struct nlm_lockowner *res, *new = NULL;
103
104	spin_lock(&host->h_lock);
105	res = __nlmclnt_find_lockowner(host, owner);
106	if (res == NULL) {
107		spin_unlock(&host->h_lock);
108		new = kmalloc(sizeof(*new), GFP_KERNEL);
109		spin_lock(&host->h_lock);
110		res = __nlmclnt_find_lockowner(host, owner);
111		if (res == NULL && new != NULL) {
112			res = new;
113			refcount_set(&new->count, 1);
114			new->owner = owner;
115			new->pid = __nlm_alloc_pid(host);
116			new->host = nlm_get_host(host);
117			list_add(&new->list, &host->h_lockowners);
118			new = NULL;
119		}
120	}
121	spin_unlock(&host->h_lock);
122	kfree(new);
123	return res;
124}
125
126/*
127 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
128 */
129static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
130{
131	struct nlm_args	*argp = &req->a_args;
132	struct nlm_lock	*lock = &argp->lock;
133	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
134
135	nlmclnt_next_cookie(&argp->cookie);
136	memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
137	lock->caller  = nodename;
138	lock->oh.data = req->a_owner;
139	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
140				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
141				nodename);
142	lock->svid = fl->fl_u.nfs_fl.owner->pid;
143	lock->fl.fl_start = fl->fl_start;
144	lock->fl.fl_end = fl->fl_end;
145	lock->fl.fl_type = fl->fl_type;
146}
147
148static void nlmclnt_release_lockargs(struct nlm_rqst *req)
149{
150	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
151}
152
153/**
154 * nlmclnt_proc - Perform a single client-side lock request
155 * @host: address of a valid nlm_host context representing the NLM server
156 * @cmd: fcntl-style file lock operation to perform
157 * @fl: address of arguments for the lock operation
158 * @data: address of data to be sent to callback operations
159 *
160 */
161int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
162{
163	struct nlm_rqst		*call;
164	int			status;
165	const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
166
167	call = nlm_alloc_call(host);
168	if (call == NULL)
169		return -ENOMEM;
170
171	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
172		nlmclnt_ops->nlmclnt_alloc_call(data);
173
174	nlmclnt_locks_init_private(fl, host);
175	if (!fl->fl_u.nfs_fl.owner) {
176		/* lockowner allocation has failed */
177		nlmclnt_release_call(call);
178		return -ENOMEM;
179	}
180	/* Set up the argument struct */
181	nlmclnt_setlockargs(call, fl);
182	call->a_callback_data = data;
183
184	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
185		if (fl->fl_type != F_UNLCK) {
186			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
187			status = nlmclnt_lock(call, fl);
188		} else
189			status = nlmclnt_unlock(call, fl);
190	} else if (IS_GETLK(cmd))
191		status = nlmclnt_test(call, fl);
192	else
193		status = -EINVAL;
194	fl->fl_ops->fl_release_private(fl);
195	fl->fl_ops = NULL;
196
197	dprintk("lockd: clnt proc returns %d\n", status);
198	return status;
199}
200EXPORT_SYMBOL_GPL(nlmclnt_proc);
201
202/*
203 * Allocate an NLM RPC call struct
204 */
205struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
206{
207	struct nlm_rqst	*call;
208
209	for(;;) {
210		call = kzalloc(sizeof(*call), GFP_KERNEL);
211		if (call != NULL) {
212			refcount_set(&call->a_count, 1);
213			locks_init_lock(&call->a_args.lock.fl);
214			locks_init_lock(&call->a_res.lock.fl);
215			call->a_host = nlm_get_host(host);
216			return call;
217		}
218		if (signalled())
219			break;
220		printk("nlm_alloc_call: failed, waiting for memory\n");
221		schedule_timeout_interruptible(5*HZ);
222	}
223	return NULL;
224}
225
226void nlmclnt_release_call(struct nlm_rqst *call)
227{
228	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
229
230	if (!refcount_dec_and_test(&call->a_count))
231		return;
232	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
233		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
234	nlmclnt_release_host(call->a_host);
235	nlmclnt_release_lockargs(call);
236	kfree(call);
237}
238
239static void nlmclnt_rpc_release(void *data)
240{
241	nlmclnt_release_call(data);
242}
243
244static int nlm_wait_on_grace(wait_queue_head_t *queue)
245{
246	DEFINE_WAIT(wait);
247	int status = -EINTR;
248
249	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
250	if (!signalled ()) {
251		schedule_timeout(NLMCLNT_GRACE_WAIT);
252		try_to_freeze();
253		if (!signalled ())
254			status = 0;
255	}
256	finish_wait(queue, &wait);
257	return status;
258}
259
260/*
261 * Generic NLM call
262 */
263static int
264nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
265{
266	struct nlm_host	*host = req->a_host;
267	struct rpc_clnt	*clnt;
268	struct nlm_args	*argp = &req->a_args;
269	struct nlm_res	*resp = &req->a_res;
270	struct rpc_message msg = {
271		.rpc_argp	= argp,
272		.rpc_resp	= resp,
273		.rpc_cred	= cred,
274	};
275	int		status;
276
277	dprintk("lockd: call procedure %d on %s\n",
278			(int)proc, host->h_name);
279
280	do {
281		if (host->h_reclaiming && !argp->reclaim)
282			goto in_grace_period;
283
284		/* If we have no RPC client yet, create one. */
285		if ((clnt = nlm_bind_host(host)) == NULL)
286			return -ENOLCK;
287		msg.rpc_proc = &clnt->cl_procinfo[proc];
288
289		/* Perform the RPC call. If an error occurs, try again */
290		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
291			dprintk("lockd: rpc_call returned error %d\n", -status);
292			switch (status) {
293			case -EPROTONOSUPPORT:
294				status = -EINVAL;
295				break;
296			case -ECONNREFUSED:
297			case -ETIMEDOUT:
298			case -ENOTCONN:
299				nlm_rebind_host(host);
300				status = -EAGAIN;
301				break;
302			case -ERESTARTSYS:
303				return signalled () ? -EINTR : status;
304			default:
305				break;
306			}
307			break;
308		} else
309		if (resp->status == nlm_lck_denied_grace_period) {
310			dprintk("lockd: server in grace period\n");
311			if (argp->reclaim) {
312				printk(KERN_WARNING
313				     "lockd: spurious grace period reject?!\n");
314				return -ENOLCK;
315			}
316		} else {
317			if (!argp->reclaim) {
318				/* We appear to be out of the grace period */
319				wake_up_all(&host->h_gracewait);
320			}
321			dprintk("lockd: server returns status %d\n",
322				ntohl(resp->status));
323			return 0;	/* Okay, call complete */
324		}
325
326in_grace_period:
327		/*
328		 * The server has rebooted and appears to be in the grace
329		 * period during which locks are only allowed to be
330		 * reclaimed.
331		 * We can only back off and try again later.
332		 */
333		status = nlm_wait_on_grace(&host->h_gracewait);
334	} while (status == 0);
335
336	return status;
337}
338
339/*
340 * Generic NLM call, async version.
341 */
342static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
343{
344	struct nlm_host	*host = req->a_host;
345	struct rpc_clnt	*clnt;
346	struct rpc_task_setup task_setup_data = {
347		.rpc_message = msg,
348		.callback_ops = tk_ops,
349		.callback_data = req,
350		.flags = RPC_TASK_ASYNC,
351	};
352
353	dprintk("lockd: call procedure %d on %s (async)\n",
354			(int)proc, host->h_name);
355
356	/* If we have no RPC client yet, create one. */
357	clnt = nlm_bind_host(host);
358	if (clnt == NULL)
359		goto out_err;
360	msg->rpc_proc = &clnt->cl_procinfo[proc];
361	task_setup_data.rpc_client = clnt;
362
363        /* bootstrap and kick off the async RPC call */
364	return rpc_run_task(&task_setup_data);
365out_err:
366	tk_ops->rpc_release(req);
367	return ERR_PTR(-ENOLCK);
368}
369
370static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
371{
372	struct rpc_task *task;
373
374	task = __nlm_async_call(req, proc, msg, tk_ops);
375	if (IS_ERR(task))
376		return PTR_ERR(task);
377	rpc_put_task(task);
378	return 0;
379}
380
381/*
382 * NLM asynchronous call.
383 */
384int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
385{
386	struct rpc_message msg = {
387		.rpc_argp	= &req->a_args,
388		.rpc_resp	= &req->a_res,
389	};
390	return nlm_do_async_call(req, proc, &msg, tk_ops);
391}
392
393int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
394{
395	struct rpc_message msg = {
396		.rpc_argp	= &req->a_res,
397	};
398	return nlm_do_async_call(req, proc, &msg, tk_ops);
399}
400
401/*
402 * NLM client asynchronous call.
403 *
404 * Note that although the calls are asynchronous, and are therefore
405 *      guaranteed to complete, we still always attempt to wait for
406 *      completion in order to be able to correctly track the lock
407 *      state.
408 */
409static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
410{
411	struct rpc_message msg = {
412		.rpc_argp	= &req->a_args,
413		.rpc_resp	= &req->a_res,
414		.rpc_cred	= cred,
415	};
416	struct rpc_task *task;
417	int err;
418
419	task = __nlm_async_call(req, proc, &msg, tk_ops);
420	if (IS_ERR(task))
421		return PTR_ERR(task);
422	err = rpc_wait_for_completion_task(task);
423	rpc_put_task(task);
424	return err;
425}
426
427/*
428 * TEST for the presence of a conflicting lock
429 */
430static int
431nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
432{
433	int	status;
434
435	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
436	if (status < 0)
437		goto out;
438
439	switch (req->a_res.status) {
440		case nlm_granted:
441			fl->fl_type = F_UNLCK;
442			break;
443		case nlm_lck_denied:
444			/*
445			 * Report the conflicting lock back to the application.
446			 */
447			fl->fl_start = req->a_res.lock.fl.fl_start;
448			fl->fl_end = req->a_res.lock.fl.fl_end;
449			fl->fl_type = req->a_res.lock.fl.fl_type;
450			fl->fl_pid = -req->a_res.lock.fl.fl_pid;
451			break;
452		default:
453			status = nlm_stat_to_errno(req->a_res.status);
454	}
455out:
456	trace_nlmclnt_test(&req->a_args.lock,
457			   (const struct sockaddr *)&req->a_host->h_addr,
458			   req->a_host->h_addrlen, req->a_res.status);
459	nlmclnt_release_call(req);
460	return status;
461}
462
463static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
464{
465	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
466	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
467	new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
468	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
469	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
470}
471
472static void nlmclnt_locks_release_private(struct file_lock *fl)
473{
474	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
475	list_del(&fl->fl_u.nfs_fl.list);
476	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
477	nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
478}
479
480static const struct file_lock_operations nlmclnt_lock_ops = {
481	.fl_copy_lock = nlmclnt_locks_copy_lock,
482	.fl_release_private = nlmclnt_locks_release_private,
483};
484
485static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
486{
487	fl->fl_u.nfs_fl.state = 0;
488	fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
489	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
490	fl->fl_ops = &nlmclnt_lock_ops;
491}
492
493static int do_vfs_lock(struct file_lock *fl)
494{
495	return locks_lock_file_wait(fl->fl_file, fl);
496}
497
498/*
499 * LOCK: Try to create a lock
500 *
501 *			Programmer Harassment Alert
502 *
503 * When given a blocking lock request in a sync RPC call, the HPUX lockd
504 * will faithfully return LCK_BLOCKED but never cares to notify us when
505 * the lock could be granted. This way, our local process could hang
506 * around forever waiting for the callback.
507 *
508 *  Solution A:	Implement busy-waiting
509 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
510 *
511 * For now I am implementing solution A, because I hate the idea of
512 * re-implementing lockd for a third time in two months. The async
513 * calls shouldn't be too hard to do, however.
514 *
515 * This is one of the lovely things about standards in the NFS area:
516 * they're so soft and squishy you can't really blame HP for doing this.
517 */
518static int
519nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
520{
521	const struct cred *cred = nfs_file_cred(fl->fl_file);
522	struct nlm_host	*host = req->a_host;
523	struct nlm_res	*resp = &req->a_res;
524	struct nlm_wait block;
525	unsigned char fl_flags = fl->fl_flags;
526	unsigned char fl_type;
527	__be32 b_status;
528	int status = -ENOLCK;
529
530	if (nsm_monitor(host) < 0)
531		goto out;
532	req->a_args.state = nsm_local_state;
533
534	fl->fl_flags |= FL_ACCESS;
535	status = do_vfs_lock(fl);
536	fl->fl_flags = fl_flags;
537	if (status < 0)
538		goto out;
539
540	nlmclnt_prepare_block(&block, host, fl);
541again:
542	/*
543	 * Initialise resp->status to a valid non-zero value,
544	 * since 0 == nlm_lck_granted
545	 */
546	resp->status = nlm_lck_blocked;
547
548	/*
549	 * A GRANTED callback can come at any time -- even before the reply
550	 * to the LOCK request arrives, so we queue the wait before
551	 * requesting the lock.
552	 */
553	nlmclnt_queue_block(&block);
554	for (;;) {
555		/* Reboot protection */
556		fl->fl_u.nfs_fl.state = host->h_state;
557		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
558		if (status < 0)
559			break;
560		/* Did a reclaimer thread notify us of a server reboot? */
561		if (resp->status == nlm_lck_denied_grace_period)
562			continue;
563		if (resp->status != nlm_lck_blocked)
564			break;
565		/* Wait on an NLM blocking lock */
566		status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
567		if (status < 0)
568			break;
569		if (block.b_status != nlm_lck_blocked)
570			break;
571	}
572	b_status = nlmclnt_dequeue_block(&block);
573	if (resp->status == nlm_lck_blocked)
574		resp->status = b_status;
575
576	/* if we were interrupted while blocking, then cancel the lock request
577	 * and exit
578	 */
579	if (resp->status == nlm_lck_blocked) {
580		if (!req->a_args.block)
581			goto out_unlock;
582		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
583			goto out;
584	}
585
586	if (resp->status == nlm_granted) {
587		down_read(&host->h_rwsem);
588		/* Check whether or not the server has rebooted */
589		if (fl->fl_u.nfs_fl.state != host->h_state) {
590			up_read(&host->h_rwsem);
591			goto again;
592		}
593		/* Ensure the resulting lock will get added to granted list */
594		fl->fl_flags |= FL_SLEEP;
595		if (do_vfs_lock(fl) < 0)
596			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
597		up_read(&host->h_rwsem);
598		fl->fl_flags = fl_flags;
599		status = 0;
600	}
601	if (status < 0)
602		goto out_unlock;
603	/*
604	 * EAGAIN doesn't make sense for sleeping locks, and in some
605	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
606	 * turn it into an ENOLCK.
607	 */
608	if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
609		status = -ENOLCK;
610	else
611		status = nlm_stat_to_errno(resp->status);
 
 
612out:
613	trace_nlmclnt_lock(&req->a_args.lock,
614			   (const struct sockaddr *)&req->a_host->h_addr,
615			   req->a_host->h_addrlen, req->a_res.status);
616	nlmclnt_release_call(req);
617	return status;
618out_unlock:
619	/* Fatal error: ensure that we remove the lock altogether */
620	trace_nlmclnt_lock(&req->a_args.lock,
621			   (const struct sockaddr *)&req->a_host->h_addr,
622			   req->a_host->h_addrlen, req->a_res.status);
623	dprintk("lockd: lock attempt ended in fatal error.\n"
624		"       Attempting to unlock.\n");
 
625	fl_type = fl->fl_type;
626	fl->fl_type = F_UNLCK;
627	down_read(&host->h_rwsem);
628	do_vfs_lock(fl);
629	up_read(&host->h_rwsem);
630	fl->fl_type = fl_type;
631	fl->fl_flags = fl_flags;
632	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
633	return status;
634}
635
636/*
637 * RECLAIM: Try to reclaim a lock
638 */
639int
640nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
641		struct nlm_rqst *req)
642{
643	int		status;
644
645	memset(req, 0, sizeof(*req));
646	locks_init_lock(&req->a_args.lock.fl);
647	locks_init_lock(&req->a_res.lock.fl);
648	req->a_host  = host;
649
650	/* Set up the argument struct */
651	nlmclnt_setlockargs(req, fl);
652	req->a_args.reclaim = 1;
653
654	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
655	if (status >= 0 && req->a_res.status == nlm_granted)
656		return 0;
657
658	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
659				"(errno %d, status %d)\n", fl->fl_pid,
660				status, ntohl(req->a_res.status));
661
662	/*
663	 * FIXME: This is a serious failure. We can
664	 *
665	 *  a.	Ignore the problem
666	 *  b.	Send the owning process some signal (Linux doesn't have
667	 *	SIGLOST, though...)
668	 *  c.	Retry the operation
669	 *
670	 * Until someone comes up with a simple implementation
671	 * for b or c, I'll choose option a.
672	 */
673
674	return -ENOLCK;
675}
676
677/*
678 * UNLOCK: remove an existing lock
679 */
680static int
681nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
682{
683	struct nlm_host	*host = req->a_host;
684	struct nlm_res	*resp = &req->a_res;
685	int status;
686	unsigned char fl_flags = fl->fl_flags;
687
688	/*
689	 * Note: the server is supposed to either grant us the unlock
690	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
691	 * case, we want to unlock.
692	 */
693	fl->fl_flags |= FL_EXISTS;
694	down_read(&host->h_rwsem);
695	status = do_vfs_lock(fl);
696	up_read(&host->h_rwsem);
697	fl->fl_flags = fl_flags;
698	if (status == -ENOENT) {
699		status = 0;
700		goto out;
701	}
702
703	refcount_inc(&req->a_count);
704	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
705			NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
706	if (status < 0)
707		goto out;
708
709	if (resp->status == nlm_granted)
710		goto out;
711
712	if (resp->status != nlm_lck_denied_nolocks)
713		printk("lockd: unexpected unlock status: %d\n",
714			ntohl(resp->status));
715	/* What to do now? I'm out of my depth... */
716	status = -ENOLCK;
717out:
718	trace_nlmclnt_unlock(&req->a_args.lock,
719			     (const struct sockaddr *)&req->a_host->h_addr,
720			     req->a_host->h_addrlen, req->a_res.status);
721	nlmclnt_release_call(req);
722	return status;
723}
724
725static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
726{
727	struct nlm_rqst	*req = data;
728	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
729	bool defer_call = false;
730
731	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
732		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
733
734	if (!defer_call)
735		rpc_call_start(task);
736}
737
738static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
739{
740	struct nlm_rqst	*req = data;
741	u32 status = ntohl(req->a_res.status);
742
743	if (RPC_SIGNALLED(task))
744		goto die;
745
746	if (task->tk_status < 0) {
747		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
748		switch (task->tk_status) {
749		case -EACCES:
750		case -EIO:
751			goto die;
752		default:
753			goto retry_rebind;
754		}
755	}
756	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
757		rpc_delay(task, NLMCLNT_GRACE_WAIT);
758		goto retry_unlock;
759	}
760	if (status != NLM_LCK_GRANTED)
761		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
762die:
763	return;
764 retry_rebind:
765	nlm_rebind_host(req->a_host);
766 retry_unlock:
767	rpc_restart_call(task);
768}
769
770static const struct rpc_call_ops nlmclnt_unlock_ops = {
771	.rpc_call_prepare = nlmclnt_unlock_prepare,
772	.rpc_call_done = nlmclnt_unlock_callback,
773	.rpc_release = nlmclnt_rpc_release,
774};
775
776/*
777 * Cancel a blocked lock request.
778 * We always use an async RPC call for this in order not to hang a
779 * process that has been Ctrl-C'ed.
780 */
781static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
782{
783	struct nlm_rqst	*req;
784	int status;
785
786	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
787		"       Attempting to cancel lock.\n");
788
789	req = nlm_alloc_call(host);
790	if (!req)
791		return -ENOMEM;
792	req->a_flags = RPC_TASK_ASYNC;
793
794	nlmclnt_setlockargs(req, fl);
795	req->a_args.block = block;
796
797	refcount_inc(&req->a_count);
798	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
799			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
800	if (status == 0 && req->a_res.status == nlm_lck_denied)
801		status = -ENOLCK;
802	nlmclnt_release_call(req);
803	return status;
804}
805
806static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
807{
808	struct nlm_rqst	*req = data;
809	u32 status = ntohl(req->a_res.status);
810
811	if (RPC_SIGNALLED(task))
812		goto die;
813
814	if (task->tk_status < 0) {
815		dprintk("lockd: CANCEL call error %d, retrying.\n",
816					task->tk_status);
817		goto retry_cancel;
818	}
819
820	switch (status) {
821	case NLM_LCK_GRANTED:
822	case NLM_LCK_DENIED_GRACE_PERIOD:
823	case NLM_LCK_DENIED:
824		/* Everything's good */
825		break;
826	case NLM_LCK_DENIED_NOLOCKS:
827		dprintk("lockd: CANCEL failed (server has no locks)\n");
828		goto retry_cancel;
829	default:
830		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
831			status);
832	}
833
834die:
835	return;
836
837retry_cancel:
838	/* Don't ever retry more than 3 times */
839	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
840		goto die;
841	nlm_rebind_host(req->a_host);
842	rpc_restart_call(task);
843	rpc_delay(task, 30 * HZ);
844}
845
846static const struct rpc_call_ops nlmclnt_cancel_ops = {
847	.rpc_call_done = nlmclnt_cancel_callback,
848	.rpc_release = nlmclnt_rpc_release,
849};
850
851/*
852 * Convert an NLM status code to a generic kernel errno
853 */
854static int
855nlm_stat_to_errno(__be32 status)
856{
857	switch(ntohl(status)) {
858	case NLM_LCK_GRANTED:
859		return 0;
860	case NLM_LCK_DENIED:
861		return -EAGAIN;
862	case NLM_LCK_DENIED_NOLOCKS:
863	case NLM_LCK_DENIED_GRACE_PERIOD:
864		return -ENOLCK;
865	case NLM_LCK_BLOCKED:
866		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
867		return -ENOLCK;
868#ifdef CONFIG_LOCKD_V4
869	case NLM_DEADLCK:
870		return -EDEADLK;
871	case NLM_ROFS:
872		return -EROFS;
873	case NLM_STALE_FH:
874		return -ESTALE;
875	case NLM_FBIG:
876		return -EOVERFLOW;
877	case NLM_FAILED:
878		return -ENOLCK;
879#endif
880	}
881	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
882		 ntohl(status));
883	return -ENOLCK;
884}