Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/fs/lockd/clntproc.c
  4 *
  5 * RPC procedures for the client side NLM implementation
  6 *
  7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/errno.h>
 14#include <linux/fs.h>
 15#include <linux/filelock.h>
 16#include <linux/nfs_fs.h>
 17#include <linux/utsname.h>
 18#include <linux/freezer.h>
 19#include <linux/sunrpc/clnt.h>
 20#include <linux/sunrpc/svc.h>
 21#include <linux/lockd/lockd.h>
 22
 23#include "trace.h"
 24
 25#define NLMDBG_FACILITY		NLMDBG_CLIENT
 26#define NLMCLNT_GRACE_WAIT	(5*HZ)
 27#define NLMCLNT_POLL_TIMEOUT	(30*HZ)
 28#define NLMCLNT_MAX_RETRIES	3
 29
 30static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
 31static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
 32static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
 33static int	nlm_stat_to_errno(__be32 stat);
 34static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
 35static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
 36
 37static const struct rpc_call_ops nlmclnt_unlock_ops;
 38static const struct rpc_call_ops nlmclnt_cancel_ops;
 39
 40/*
 41 * Cookie counter for NLM requests
 42 */
 43static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
 44
 45void nlmclnt_next_cookie(struct nlm_cookie *c)
 46{
 47	u32	cookie = atomic_inc_return(&nlm_cookie);
 48
 49	memcpy(c->data, &cookie, 4);
 50	c->len=4;
 51}
 52
 53static struct nlm_lockowner *
 54nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
 55{
 56	refcount_inc(&lockowner->count);
 57	return lockowner;
 58}
 59
 60static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
 61{
 62	if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
 63		return;
 64	list_del(&lockowner->list);
 65	spin_unlock(&lockowner->host->h_lock);
 66	nlmclnt_release_host(lockowner->host);
 67	kfree(lockowner);
 68}
 69
 70static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
 71{
 72	struct nlm_lockowner *lockowner;
 73	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 74		if (lockowner->pid == pid)
 75			return -EBUSY;
 76	}
 77	return 0;
 78}
 79
 80static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
 81{
 82	uint32_t res;
 83	do {
 84		res = host->h_pidcount++;
 85	} while (nlm_pidbusy(host, res) < 0);
 86	return res;
 87}
 88
 89static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 90{
 91	struct nlm_lockowner *lockowner;
 92	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 93		if (lockowner->owner != owner)
 94			continue;
 95		return nlmclnt_get_lockowner(lockowner);
 96	}
 97	return NULL;
 98}
 99
100static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
101{
102	struct nlm_lockowner *res, *new = NULL;
103
104	spin_lock(&host->h_lock);
105	res = __nlmclnt_find_lockowner(host, owner);
106	if (res == NULL) {
107		spin_unlock(&host->h_lock);
108		new = kmalloc(sizeof(*new), GFP_KERNEL);
109		spin_lock(&host->h_lock);
110		res = __nlmclnt_find_lockowner(host, owner);
111		if (res == NULL && new != NULL) {
112			res = new;
113			refcount_set(&new->count, 1);
114			new->owner = owner;
115			new->pid = __nlm_alloc_pid(host);
116			new->host = nlm_get_host(host);
117			list_add(&new->list, &host->h_lockowners);
118			new = NULL;
119		}
120	}
121	spin_unlock(&host->h_lock);
122	kfree(new);
123	return res;
124}
125
126/*
127 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
128 */
129static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
130{
131	struct nlm_args	*argp = &req->a_args;
132	struct nlm_lock	*lock = &argp->lock;
133	char *nodename = req->a_host->h_rpcclnt->cl_nodename;
134
135	nlmclnt_next_cookie(&argp->cookie);
136	memcpy(&lock->fh, NFS_FH(file_inode(fl->c.flc_file)),
137	       sizeof(struct nfs_fh));
138	lock->caller  = nodename;
139	lock->oh.data = req->a_owner;
140	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
141				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
142				nodename);
143	lock->svid = fl->fl_u.nfs_fl.owner->pid;
144	lock->fl.fl_start = fl->fl_start;
145	lock->fl.fl_end = fl->fl_end;
146	lock->fl.c.flc_type = fl->c.flc_type;
147}
148
149static void nlmclnt_release_lockargs(struct nlm_rqst *req)
150{
151	WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
152}
153
154/**
155 * nlmclnt_proc - Perform a single client-side lock request
156 * @host: address of a valid nlm_host context representing the NLM server
157 * @cmd: fcntl-style file lock operation to perform
158 * @fl: address of arguments for the lock operation
159 * @data: address of data to be sent to callback operations
160 *
161 */
162int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
163{
164	struct nlm_rqst		*call;
165	int			status;
166	const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
167
 
168	call = nlm_alloc_call(host);
169	if (call == NULL)
170		return -ENOMEM;
171
172	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
173		nlmclnt_ops->nlmclnt_alloc_call(data);
174
175	nlmclnt_locks_init_private(fl, host);
176	if (!fl->fl_u.nfs_fl.owner) {
177		/* lockowner allocation has failed */
178		nlmclnt_release_call(call);
179		return -ENOMEM;
180	}
181	/* Set up the argument struct */
182	nlmclnt_setlockargs(call, fl);
183	call->a_callback_data = data;
184
185	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
186		if (fl->c.flc_type != F_UNLCK) {
187			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
188			status = nlmclnt_lock(call, fl);
189		} else
190			status = nlmclnt_unlock(call, fl);
191	} else if (IS_GETLK(cmd))
192		status = nlmclnt_test(call, fl);
193	else
194		status = -EINVAL;
195	fl->fl_ops->fl_release_private(fl);
196	fl->fl_ops = NULL;
197
198	dprintk("lockd: clnt proc returns %d\n", status);
199	return status;
200}
201EXPORT_SYMBOL_GPL(nlmclnt_proc);
202
203/*
204 * Allocate an NLM RPC call struct
 
 
 
205 */
206struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
207{
208	struct nlm_rqst	*call;
209
210	for(;;) {
211		call = kzalloc(sizeof(*call), GFP_KERNEL);
212		if (call != NULL) {
213			refcount_set(&call->a_count, 1);
214			locks_init_lock(&call->a_args.lock.fl);
215			locks_init_lock(&call->a_res.lock.fl);
216			call->a_host = nlm_get_host(host);
217			return call;
218		}
219		if (signalled())
220			break;
221		printk("nlm_alloc_call: failed, waiting for memory\n");
222		schedule_timeout_interruptible(5*HZ);
223	}
 
224	return NULL;
225}
226
227void nlmclnt_release_call(struct nlm_rqst *call)
228{
229	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
230
231	if (!refcount_dec_and_test(&call->a_count))
232		return;
233	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
234		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
235	nlmclnt_release_host(call->a_host);
236	nlmclnt_release_lockargs(call);
237	kfree(call);
238}
239
240static void nlmclnt_rpc_release(void *data)
241{
242	nlmclnt_release_call(data);
243}
244
245static int nlm_wait_on_grace(wait_queue_head_t *queue)
246{
247	DEFINE_WAIT(wait);
248	int status = -EINTR;
249
250	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
251	if (!signalled ()) {
252		schedule_timeout(NLMCLNT_GRACE_WAIT);
253		try_to_freeze();
254		if (!signalled ())
255			status = 0;
256	}
257	finish_wait(queue, &wait);
258	return status;
259}
260
261/*
262 * Generic NLM call
263 */
264static int
265nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
266{
267	struct nlm_host	*host = req->a_host;
268	struct rpc_clnt	*clnt;
269	struct nlm_args	*argp = &req->a_args;
270	struct nlm_res	*resp = &req->a_res;
271	struct rpc_message msg = {
272		.rpc_argp	= argp,
273		.rpc_resp	= resp,
274		.rpc_cred	= cred,
275	};
276	int		status;
277
278	dprintk("lockd: call procedure %d on %s\n",
279			(int)proc, host->h_name);
280
281	do {
282		if (host->h_reclaiming && !argp->reclaim)
283			goto in_grace_period;
284
285		/* If we have no RPC client yet, create one. */
286		if ((clnt = nlm_bind_host(host)) == NULL)
287			return -ENOLCK;
288		msg.rpc_proc = &clnt->cl_procinfo[proc];
289
290		/* Perform the RPC call. If an error occurs, try again */
291		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
292			dprintk("lockd: rpc_call returned error %d\n", -status);
293			switch (status) {
294			case -EPROTONOSUPPORT:
295				status = -EINVAL;
296				break;
297			case -ECONNREFUSED:
298			case -ETIMEDOUT:
299			case -ENOTCONN:
300				nlm_rebind_host(host);
301				status = -EAGAIN;
302				break;
303			case -ERESTARTSYS:
304				return signalled () ? -EINTR : status;
305			default:
306				break;
307			}
308			break;
309		} else
310		if (resp->status == nlm_lck_denied_grace_period) {
311			dprintk("lockd: server in grace period\n");
312			if (argp->reclaim) {
313				printk(KERN_WARNING
314				     "lockd: spurious grace period reject?!\n");
315				return -ENOLCK;
316			}
317		} else {
318			if (!argp->reclaim) {
319				/* We appear to be out of the grace period */
320				wake_up_all(&host->h_gracewait);
321			}
322			dprintk("lockd: server returns status %d\n",
323				ntohl(resp->status));
324			return 0;	/* Okay, call complete */
325		}
326
327in_grace_period:
328		/*
329		 * The server has rebooted and appears to be in the grace
330		 * period during which locks are only allowed to be
331		 * reclaimed.
332		 * We can only back off and try again later.
333		 */
334		status = nlm_wait_on_grace(&host->h_gracewait);
335	} while (status == 0);
336
337	return status;
338}
339
340/*
341 * Generic NLM call, async version.
342 */
343static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
344{
345	struct nlm_host	*host = req->a_host;
346	struct rpc_clnt	*clnt;
347	struct rpc_task_setup task_setup_data = {
348		.rpc_message = msg,
349		.callback_ops = tk_ops,
350		.callback_data = req,
351		.flags = RPC_TASK_ASYNC,
352	};
353
354	dprintk("lockd: call procedure %d on %s (async)\n",
355			(int)proc, host->h_name);
356
357	/* If we have no RPC client yet, create one. */
358	clnt = nlm_bind_host(host);
359	if (clnt == NULL)
360		goto out_err;
361	msg->rpc_proc = &clnt->cl_procinfo[proc];
362	task_setup_data.rpc_client = clnt;
363
364        /* bootstrap and kick off the async RPC call */
365	return rpc_run_task(&task_setup_data);
366out_err:
367	tk_ops->rpc_release(req);
368	return ERR_PTR(-ENOLCK);
369}
370
371static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
372{
373	struct rpc_task *task;
374
375	task = __nlm_async_call(req, proc, msg, tk_ops);
376	if (IS_ERR(task))
377		return PTR_ERR(task);
378	rpc_put_task(task);
379	return 0;
380}
381
382/*
383 * NLM asynchronous call.
384 */
385int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
386{
387	struct rpc_message msg = {
388		.rpc_argp	= &req->a_args,
389		.rpc_resp	= &req->a_res,
390	};
391	return nlm_do_async_call(req, proc, &msg, tk_ops);
392}
393
394int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
395{
396	struct rpc_message msg = {
397		.rpc_argp	= &req->a_res,
398	};
399	return nlm_do_async_call(req, proc, &msg, tk_ops);
400}
401
402/*
403 * NLM client asynchronous call.
404 *
405 * Note that although the calls are asynchronous, and are therefore
406 *      guaranteed to complete, we still always attempt to wait for
407 *      completion in order to be able to correctly track the lock
408 *      state.
409 */
410static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
411{
412	struct rpc_message msg = {
413		.rpc_argp	= &req->a_args,
414		.rpc_resp	= &req->a_res,
415		.rpc_cred	= cred,
416	};
417	struct rpc_task *task;
418	int err;
419
420	task = __nlm_async_call(req, proc, &msg, tk_ops);
421	if (IS_ERR(task))
422		return PTR_ERR(task);
423	err = rpc_wait_for_completion_task(task);
424	rpc_put_task(task);
425	return err;
426}
427
428/*
429 * TEST for the presence of a conflicting lock
430 */
431static int
432nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
433{
434	int	status;
435
436	status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
437			      NLMPROC_TEST);
438	if (status < 0)
439		goto out;
440
441	switch (req->a_res.status) {
442		case nlm_granted:
443			fl->c.flc_type = F_UNLCK;
444			break;
445		case nlm_lck_denied:
446			/*
447			 * Report the conflicting lock back to the application.
448			 */
449			fl->fl_start = req->a_res.lock.fl.fl_start;
450			fl->fl_end = req->a_res.lock.fl.fl_end;
451			fl->c.flc_type = req->a_res.lock.fl.c.flc_type;
452			fl->c.flc_pid = -req->a_res.lock.fl.c.flc_pid;
453			break;
454		default:
455			status = nlm_stat_to_errno(req->a_res.status);
456	}
457out:
458	trace_nlmclnt_test(&req->a_args.lock,
459			   (const struct sockaddr *)&req->a_host->h_addr,
460			   req->a_host->h_addrlen, req->a_res.status);
461	nlmclnt_release_call(req);
462	return status;
463}
464
465static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
466{
467	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
468	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
469	new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
470	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
471	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
472}
473
474static void nlmclnt_locks_release_private(struct file_lock *fl)
475{
476	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
477	list_del(&fl->fl_u.nfs_fl.list);
478	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
479	nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
480}
481
482static const struct file_lock_operations nlmclnt_lock_ops = {
483	.fl_copy_lock = nlmclnt_locks_copy_lock,
484	.fl_release_private = nlmclnt_locks_release_private,
485};
486
487static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
488{
 
489	fl->fl_u.nfs_fl.state = 0;
490	fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host,
491						       fl->c.flc_owner);
492	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
493	fl->fl_ops = &nlmclnt_lock_ops;
494}
495
496static int do_vfs_lock(struct file_lock *fl)
497{
498	return locks_lock_file_wait(fl->c.flc_file, fl);
 
 
 
 
 
 
 
 
 
 
 
499}
500
501/*
502 * LOCK: Try to create a lock
503 *
504 *			Programmer Harassment Alert
505 *
506 * When given a blocking lock request in a sync RPC call, the HPUX lockd
507 * will faithfully return LCK_BLOCKED but never cares to notify us when
508 * the lock could be granted. This way, our local process could hang
509 * around forever waiting for the callback.
510 *
511 *  Solution A:	Implement busy-waiting
512 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
513 *
514 * For now I am implementing solution A, because I hate the idea of
515 * re-implementing lockd for a third time in two months. The async
516 * calls shouldn't be too hard to do, however.
517 *
518 * This is one of the lovely things about standards in the NFS area:
519 * they're so soft and squishy you can't really blame HP for doing this.
520 */
521static int
522nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
523{
524	const struct cred *cred = nfs_file_cred(fl->c.flc_file);
525	struct nlm_host	*host = req->a_host;
526	struct nlm_res	*resp = &req->a_res;
527	struct nlm_wait block;
528	unsigned char flags = fl->c.flc_flags;
529	unsigned char type;
530	__be32 b_status;
531	int status = -ENOLCK;
532
533	if (nsm_monitor(host) < 0)
534		goto out;
535	req->a_args.state = nsm_local_state;
536
537	fl->c.flc_flags |= FL_ACCESS;
538	status = do_vfs_lock(fl);
539	fl->c.flc_flags = flags;
540	if (status < 0)
541		goto out;
542
543	nlmclnt_prepare_block(&block, host, fl);
544again:
545	/*
546	 * Initialise resp->status to a valid non-zero value,
547	 * since 0 == nlm_lck_granted
548	 */
549	resp->status = nlm_lck_blocked;
550
551	/*
552	 * A GRANTED callback can come at any time -- even before the reply
553	 * to the LOCK request arrives, so we queue the wait before
554	 * requesting the lock.
555	 */
556	nlmclnt_queue_block(&block);
557	for (;;) {
558		/* Reboot protection */
559		fl->fl_u.nfs_fl.state = host->h_state;
560		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
561		if (status < 0)
562			break;
563		/* Did a reclaimer thread notify us of a server reboot? */
564		if (resp->status == nlm_lck_denied_grace_period)
565			continue;
566		if (resp->status != nlm_lck_blocked)
567			break;
568		/* Wait on an NLM blocking lock */
569		status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
570		if (status < 0)
571			break;
572		if (block.b_status != nlm_lck_blocked)
573			break;
574	}
575	b_status = nlmclnt_dequeue_block(&block);
576	if (resp->status == nlm_lck_blocked)
577		resp->status = b_status;
578
579	/* if we were interrupted while blocking, then cancel the lock request
580	 * and exit
581	 */
582	if (resp->status == nlm_lck_blocked) {
583		if (!req->a_args.block)
584			goto out_unlock;
585		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
586			goto out;
587	}
588
589	if (resp->status == nlm_granted) {
590		down_read(&host->h_rwsem);
591		/* Check whether or not the server has rebooted */
592		if (fl->fl_u.nfs_fl.state != host->h_state) {
593			up_read(&host->h_rwsem);
594			goto again;
595		}
596		/* Ensure the resulting lock will get added to granted list */
597		fl->c.flc_flags |= FL_SLEEP;
598		if (do_vfs_lock(fl) < 0)
599			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
600		up_read(&host->h_rwsem);
601		fl->c.flc_flags = flags;
602		status = 0;
603	}
604	if (status < 0)
605		goto out_unlock;
606	/*
607	 * EAGAIN doesn't make sense for sleeping locks, and in some
608	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
609	 * turn it into an ENOLCK.
610	 */
611	if (resp->status == nlm_lck_denied && (flags & FL_SLEEP))
612		status = -ENOLCK;
613	else
614		status = nlm_stat_to_errno(resp->status);
 
 
615out:
616	trace_nlmclnt_lock(&req->a_args.lock,
617			   (const struct sockaddr *)&req->a_host->h_addr,
618			   req->a_host->h_addrlen, req->a_res.status);
619	nlmclnt_release_call(req);
620	return status;
621out_unlock:
622	/* Fatal error: ensure that we remove the lock altogether */
623	trace_nlmclnt_lock(&req->a_args.lock,
624			   (const struct sockaddr *)&req->a_host->h_addr,
625			   req->a_host->h_addrlen, req->a_res.status);
626	dprintk("lockd: lock attempt ended in fatal error.\n"
627		"       Attempting to unlock.\n");
628	type = fl->c.flc_type;
629	fl->c.flc_type = F_UNLCK;
 
630	down_read(&host->h_rwsem);
631	do_vfs_lock(fl);
632	up_read(&host->h_rwsem);
633	fl->c.flc_type = type;
634	fl->c.flc_flags = flags;
635	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
636	return status;
637}
638
639/*
640 * RECLAIM: Try to reclaim a lock
641 */
642int
643nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
644		struct nlm_rqst *req)
645{
 
646	int		status;
647
 
648	memset(req, 0, sizeof(*req));
649	locks_init_lock(&req->a_args.lock.fl);
650	locks_init_lock(&req->a_res.lock.fl);
651	req->a_host  = host;
 
652
653	/* Set up the argument struct */
654	nlmclnt_setlockargs(req, fl);
655	req->a_args.reclaim = 1;
656
657	status = nlmclnt_call(nfs_file_cred(fl->c.flc_file), req,
658			      NLMPROC_LOCK);
659	if (status >= 0 && req->a_res.status == nlm_granted)
660		return 0;
661
662	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
663				"(errno %d, status %d)\n",
664				fl->c.flc_pid,
665				status, ntohl(req->a_res.status));
666
667	/*
668	 * FIXME: This is a serious failure. We can
669	 *
670	 *  a.	Ignore the problem
671	 *  b.	Send the owning process some signal (Linux doesn't have
672	 *	SIGLOST, though...)
673	 *  c.	Retry the operation
674	 *
675	 * Until someone comes up with a simple implementation
676	 * for b or c, I'll choose option a.
677	 */
678
679	return -ENOLCK;
680}
681
682/*
683 * UNLOCK: remove an existing lock
684 */
685static int
686nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
687{
688	struct nlm_host	*host = req->a_host;
689	struct nlm_res	*resp = &req->a_res;
690	int status;
691	unsigned char flags = fl->c.flc_flags;
692
693	/*
694	 * Note: the server is supposed to either grant us the unlock
695	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
696	 * case, we want to unlock.
697	 */
698	fl->c.flc_flags |= FL_EXISTS;
699	down_read(&host->h_rwsem);
700	status = do_vfs_lock(fl);
701	up_read(&host->h_rwsem);
702	fl->c.flc_flags = flags;
703	if (status == -ENOENT) {
704		status = 0;
705		goto out;
706	}
707
708	refcount_inc(&req->a_count);
709	status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
710				    NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
711	if (status < 0)
712		goto out;
713
714	if (resp->status == nlm_granted)
715		goto out;
716
717	if (resp->status != nlm_lck_denied_nolocks)
718		printk("lockd: unexpected unlock status: %d\n",
719			ntohl(resp->status));
720	/* What to do now? I'm out of my depth... */
721	status = -ENOLCK;
722out:
723	trace_nlmclnt_unlock(&req->a_args.lock,
724			     (const struct sockaddr *)&req->a_host->h_addr,
725			     req->a_host->h_addrlen, req->a_res.status);
726	nlmclnt_release_call(req);
727	return status;
728}
729
730static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
731{
732	struct nlm_rqst	*req = data;
733	const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
734	bool defer_call = false;
735
736	if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
737		defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
738
739	if (!defer_call)
740		rpc_call_start(task);
741}
742
743static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
744{
745	struct nlm_rqst	*req = data;
746	u32 status = ntohl(req->a_res.status);
747
748	if (RPC_SIGNALLED(task))
749		goto die;
750
751	if (task->tk_status < 0) {
752		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
753		switch (task->tk_status) {
754		case -EACCES:
755		case -EIO:
756			goto die;
757		default:
758			goto retry_rebind;
759		}
760	}
761	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
762		rpc_delay(task, NLMCLNT_GRACE_WAIT);
763		goto retry_unlock;
764	}
765	if (status != NLM_LCK_GRANTED)
766		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
767die:
768	return;
769 retry_rebind:
770	nlm_rebind_host(req->a_host);
771 retry_unlock:
772	rpc_restart_call(task);
773}
774
775static const struct rpc_call_ops nlmclnt_unlock_ops = {
776	.rpc_call_prepare = nlmclnt_unlock_prepare,
777	.rpc_call_done = nlmclnt_unlock_callback,
778	.rpc_release = nlmclnt_rpc_release,
779};
780
781/*
782 * Cancel a blocked lock request.
783 * We always use an async RPC call for this in order not to hang a
784 * process that has been Ctrl-C'ed.
785 */
786static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
787{
788	struct nlm_rqst	*req;
789	int status;
790
791	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
792		"       Attempting to cancel lock.\n");
793
794	req = nlm_alloc_call(host);
795	if (!req)
796		return -ENOMEM;
797	req->a_flags = RPC_TASK_ASYNC;
798
799	nlmclnt_setlockargs(req, fl);
800	req->a_args.block = block;
801
802	refcount_inc(&req->a_count);
803	status = nlmclnt_async_call(nfs_file_cred(fl->c.flc_file), req,
804				    NLMPROC_CANCEL, &nlmclnt_cancel_ops);
805	if (status == 0 && req->a_res.status == nlm_lck_denied)
806		status = -ENOLCK;
807	nlmclnt_release_call(req);
808	return status;
809}
810
811static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
812{
813	struct nlm_rqst	*req = data;
814	u32 status = ntohl(req->a_res.status);
815
816	if (RPC_SIGNALLED(task))
817		goto die;
818
819	if (task->tk_status < 0) {
820		dprintk("lockd: CANCEL call error %d, retrying.\n",
821					task->tk_status);
822		goto retry_cancel;
823	}
 
 
 
824
825	switch (status) {
826	case NLM_LCK_GRANTED:
827	case NLM_LCK_DENIED_GRACE_PERIOD:
828	case NLM_LCK_DENIED:
829		/* Everything's good */
830		break;
831	case NLM_LCK_DENIED_NOLOCKS:
832		dprintk("lockd: CANCEL failed (server has no locks)\n");
833		goto retry_cancel;
834	default:
835		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
836			status);
837	}
838
839die:
840	return;
841
842retry_cancel:
843	/* Don't ever retry more than 3 times */
844	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
845		goto die;
846	nlm_rebind_host(req->a_host);
847	rpc_restart_call(task);
848	rpc_delay(task, 30 * HZ);
849}
850
851static const struct rpc_call_ops nlmclnt_cancel_ops = {
852	.rpc_call_done = nlmclnt_cancel_callback,
853	.rpc_release = nlmclnt_rpc_release,
854};
855
856/*
857 * Convert an NLM status code to a generic kernel errno
858 */
859static int
860nlm_stat_to_errno(__be32 status)
861{
862	switch(ntohl(status)) {
863	case NLM_LCK_GRANTED:
864		return 0;
865	case NLM_LCK_DENIED:
866		return -EAGAIN;
867	case NLM_LCK_DENIED_NOLOCKS:
868	case NLM_LCK_DENIED_GRACE_PERIOD:
869		return -ENOLCK;
870	case NLM_LCK_BLOCKED:
871		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
872		return -ENOLCK;
873#ifdef CONFIG_LOCKD_V4
874	case NLM_DEADLCK:
875		return -EDEADLK;
876	case NLM_ROFS:
877		return -EROFS;
878	case NLM_STALE_FH:
879		return -ESTALE;
880	case NLM_FBIG:
881		return -EOVERFLOW;
882	case NLM_FAILED:
883		return -ENOLCK;
884#endif
885	}
886	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
887		 ntohl(status));
888	return -ENOLCK;
889}
v3.1
 
  1/*
  2 * linux/fs/lockd/clntproc.c
  3 *
  4 * RPC procedures for the client side NLM implementation
  5 *
  6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/slab.h>
 11#include <linux/types.h>
 12#include <linux/errno.h>
 13#include <linux/fs.h>
 
 14#include <linux/nfs_fs.h>
 15#include <linux/utsname.h>
 16#include <linux/freezer.h>
 17#include <linux/sunrpc/clnt.h>
 18#include <linux/sunrpc/svc.h>
 19#include <linux/lockd/lockd.h>
 20
 
 
 21#define NLMDBG_FACILITY		NLMDBG_CLIENT
 22#define NLMCLNT_GRACE_WAIT	(5*HZ)
 23#define NLMCLNT_POLL_TIMEOUT	(30*HZ)
 24#define NLMCLNT_MAX_RETRIES	3
 25
 26static int	nlmclnt_test(struct nlm_rqst *, struct file_lock *);
 27static int	nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
 28static int	nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
 29static int	nlm_stat_to_errno(__be32 stat);
 30static void	nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
 31static int	nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
 32
 33static const struct rpc_call_ops nlmclnt_unlock_ops;
 34static const struct rpc_call_ops nlmclnt_cancel_ops;
 35
 36/*
 37 * Cookie counter for NLM requests
 38 */
 39static atomic_t	nlm_cookie = ATOMIC_INIT(0x1234);
 40
 41void nlmclnt_next_cookie(struct nlm_cookie *c)
 42{
 43	u32	cookie = atomic_inc_return(&nlm_cookie);
 44
 45	memcpy(c->data, &cookie, 4);
 46	c->len=4;
 47}
 48
 49static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
 
 50{
 51	atomic_inc(&lockowner->count);
 52	return lockowner;
 53}
 54
 55static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
 56{
 57	if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
 58		return;
 59	list_del(&lockowner->list);
 60	spin_unlock(&lockowner->host->h_lock);
 61	nlmclnt_release_host(lockowner->host);
 62	kfree(lockowner);
 63}
 64
 65static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
 66{
 67	struct nlm_lockowner *lockowner;
 68	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 69		if (lockowner->pid == pid)
 70			return -EBUSY;
 71	}
 72	return 0;
 73}
 74
 75static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
 76{
 77	uint32_t res;
 78	do {
 79		res = host->h_pidcount++;
 80	} while (nlm_pidbusy(host, res) < 0);
 81	return res;
 82}
 83
 84static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 85{
 86	struct nlm_lockowner *lockowner;
 87	list_for_each_entry(lockowner, &host->h_lockowners, list) {
 88		if (lockowner->owner != owner)
 89			continue;
 90		return nlm_get_lockowner(lockowner);
 91	}
 92	return NULL;
 93}
 94
 95static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
 96{
 97	struct nlm_lockowner *res, *new = NULL;
 98
 99	spin_lock(&host->h_lock);
100	res = __nlm_find_lockowner(host, owner);
101	if (res == NULL) {
102		spin_unlock(&host->h_lock);
103		new = kmalloc(sizeof(*new), GFP_KERNEL);
104		spin_lock(&host->h_lock);
105		res = __nlm_find_lockowner(host, owner);
106		if (res == NULL && new != NULL) {
107			res = new;
108			atomic_set(&new->count, 1);
109			new->owner = owner;
110			new->pid = __nlm_alloc_pid(host);
111			new->host = nlm_get_host(host);
112			list_add(&new->list, &host->h_lockowners);
113			new = NULL;
114		}
115	}
116	spin_unlock(&host->h_lock);
117	kfree(new);
118	return res;
119}
120
121/*
122 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
123 */
124static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
125{
126	struct nlm_args	*argp = &req->a_args;
127	struct nlm_lock	*lock = &argp->lock;
 
128
129	nlmclnt_next_cookie(&argp->cookie);
130	memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
131	lock->caller  = utsname()->nodename;
 
132	lock->oh.data = req->a_owner;
133	lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
134				(unsigned int)fl->fl_u.nfs_fl.owner->pid,
135				utsname()->nodename);
136	lock->svid = fl->fl_u.nfs_fl.owner->pid;
137	lock->fl.fl_start = fl->fl_start;
138	lock->fl.fl_end = fl->fl_end;
139	lock->fl.fl_type = fl->fl_type;
140}
141
142static void nlmclnt_release_lockargs(struct nlm_rqst *req)
143{
144	BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
145}
146
147/**
148 * nlmclnt_proc - Perform a single client-side lock request
149 * @host: address of a valid nlm_host context representing the NLM server
150 * @cmd: fcntl-style file lock operation to perform
151 * @fl: address of arguments for the lock operation
 
152 *
153 */
154int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
155{
156	struct nlm_rqst		*call;
157	int			status;
 
158
159	nlm_get_host(host);
160	call = nlm_alloc_call(host);
161	if (call == NULL)
162		return -ENOMEM;
163
 
 
 
164	nlmclnt_locks_init_private(fl, host);
 
 
 
 
 
165	/* Set up the argument struct */
166	nlmclnt_setlockargs(call, fl);
 
167
168	if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
169		if (fl->fl_type != F_UNLCK) {
170			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
171			status = nlmclnt_lock(call, fl);
172		} else
173			status = nlmclnt_unlock(call, fl);
174	} else if (IS_GETLK(cmd))
175		status = nlmclnt_test(call, fl);
176	else
177		status = -EINVAL;
178	fl->fl_ops->fl_release_private(fl);
179	fl->fl_ops = NULL;
180
181	dprintk("lockd: clnt proc returns %d\n", status);
182	return status;
183}
184EXPORT_SYMBOL_GPL(nlmclnt_proc);
185
186/*
187 * Allocate an NLM RPC call struct
188 *
189 * Note: the caller must hold a reference to host. In case of failure,
190 * this reference will be released.
191 */
192struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
193{
194	struct nlm_rqst	*call;
195
196	for(;;) {
197		call = kzalloc(sizeof(*call), GFP_KERNEL);
198		if (call != NULL) {
199			atomic_set(&call->a_count, 1);
200			locks_init_lock(&call->a_args.lock.fl);
201			locks_init_lock(&call->a_res.lock.fl);
202			call->a_host = host;
203			return call;
204		}
205		if (signalled())
206			break;
207		printk("nlm_alloc_call: failed, waiting for memory\n");
208		schedule_timeout_interruptible(5*HZ);
209	}
210	nlmclnt_release_host(host);
211	return NULL;
212}
213
214void nlmclnt_release_call(struct nlm_rqst *call)
215{
216	if (!atomic_dec_and_test(&call->a_count))
 
 
217		return;
 
 
218	nlmclnt_release_host(call->a_host);
219	nlmclnt_release_lockargs(call);
220	kfree(call);
221}
222
223static void nlmclnt_rpc_release(void *data)
224{
225	nlmclnt_release_call(data);
226}
227
228static int nlm_wait_on_grace(wait_queue_head_t *queue)
229{
230	DEFINE_WAIT(wait);
231	int status = -EINTR;
232
233	prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
234	if (!signalled ()) {
235		schedule_timeout(NLMCLNT_GRACE_WAIT);
236		try_to_freeze();
237		if (!signalled ())
238			status = 0;
239	}
240	finish_wait(queue, &wait);
241	return status;
242}
243
244/*
245 * Generic NLM call
246 */
247static int
248nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
249{
250	struct nlm_host	*host = req->a_host;
251	struct rpc_clnt	*clnt;
252	struct nlm_args	*argp = &req->a_args;
253	struct nlm_res	*resp = &req->a_res;
254	struct rpc_message msg = {
255		.rpc_argp	= argp,
256		.rpc_resp	= resp,
257		.rpc_cred	= cred,
258	};
259	int		status;
260
261	dprintk("lockd: call procedure %d on %s\n",
262			(int)proc, host->h_name);
263
264	do {
265		if (host->h_reclaiming && !argp->reclaim)
266			goto in_grace_period;
267
268		/* If we have no RPC client yet, create one. */
269		if ((clnt = nlm_bind_host(host)) == NULL)
270			return -ENOLCK;
271		msg.rpc_proc = &clnt->cl_procinfo[proc];
272
273		/* Perform the RPC call. If an error occurs, try again */
274		if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
275			dprintk("lockd: rpc_call returned error %d\n", -status);
276			switch (status) {
277			case -EPROTONOSUPPORT:
278				status = -EINVAL;
279				break;
280			case -ECONNREFUSED:
281			case -ETIMEDOUT:
282			case -ENOTCONN:
283				nlm_rebind_host(host);
284				status = -EAGAIN;
285				break;
286			case -ERESTARTSYS:
287				return signalled () ? -EINTR : status;
288			default:
289				break;
290			}
291			break;
292		} else
293		if (resp->status == nlm_lck_denied_grace_period) {
294			dprintk("lockd: server in grace period\n");
295			if (argp->reclaim) {
296				printk(KERN_WARNING
297				     "lockd: spurious grace period reject?!\n");
298				return -ENOLCK;
299			}
300		} else {
301			if (!argp->reclaim) {
302				/* We appear to be out of the grace period */
303				wake_up_all(&host->h_gracewait);
304			}
305			dprintk("lockd: server returns status %d\n",
306				ntohl(resp->status));
307			return 0;	/* Okay, call complete */
308		}
309
310in_grace_period:
311		/*
312		 * The server has rebooted and appears to be in the grace
313		 * period during which locks are only allowed to be
314		 * reclaimed.
315		 * We can only back off and try again later.
316		 */
317		status = nlm_wait_on_grace(&host->h_gracewait);
318	} while (status == 0);
319
320	return status;
321}
322
323/*
324 * Generic NLM call, async version.
325 */
326static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
327{
328	struct nlm_host	*host = req->a_host;
329	struct rpc_clnt	*clnt;
330	struct rpc_task_setup task_setup_data = {
331		.rpc_message = msg,
332		.callback_ops = tk_ops,
333		.callback_data = req,
334		.flags = RPC_TASK_ASYNC,
335	};
336
337	dprintk("lockd: call procedure %d on %s (async)\n",
338			(int)proc, host->h_name);
339
340	/* If we have no RPC client yet, create one. */
341	clnt = nlm_bind_host(host);
342	if (clnt == NULL)
343		goto out_err;
344	msg->rpc_proc = &clnt->cl_procinfo[proc];
345	task_setup_data.rpc_client = clnt;
346
347        /* bootstrap and kick off the async RPC call */
348	return rpc_run_task(&task_setup_data);
349out_err:
350	tk_ops->rpc_release(req);
351	return ERR_PTR(-ENOLCK);
352}
353
354static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
355{
356	struct rpc_task *task;
357
358	task = __nlm_async_call(req, proc, msg, tk_ops);
359	if (IS_ERR(task))
360		return PTR_ERR(task);
361	rpc_put_task(task);
362	return 0;
363}
364
365/*
366 * NLM asynchronous call.
367 */
368int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
369{
370	struct rpc_message msg = {
371		.rpc_argp	= &req->a_args,
372		.rpc_resp	= &req->a_res,
373	};
374	return nlm_do_async_call(req, proc, &msg, tk_ops);
375}
376
377int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
378{
379	struct rpc_message msg = {
380		.rpc_argp	= &req->a_res,
381	};
382	return nlm_do_async_call(req, proc, &msg, tk_ops);
383}
384
385/*
386 * NLM client asynchronous call.
387 *
388 * Note that although the calls are asynchronous, and are therefore
389 *      guaranteed to complete, we still always attempt to wait for
390 *      completion in order to be able to correctly track the lock
391 *      state.
392 */
393static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
394{
395	struct rpc_message msg = {
396		.rpc_argp	= &req->a_args,
397		.rpc_resp	= &req->a_res,
398		.rpc_cred	= cred,
399	};
400	struct rpc_task *task;
401	int err;
402
403	task = __nlm_async_call(req, proc, &msg, tk_ops);
404	if (IS_ERR(task))
405		return PTR_ERR(task);
406	err = rpc_wait_for_completion_task(task);
407	rpc_put_task(task);
408	return err;
409}
410
411/*
412 * TEST for the presence of a conflicting lock
413 */
414static int
415nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
416{
417	int	status;
418
419	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
 
420	if (status < 0)
421		goto out;
422
423	switch (req->a_res.status) {
424		case nlm_granted:
425			fl->fl_type = F_UNLCK;
426			break;
427		case nlm_lck_denied:
428			/*
429			 * Report the conflicting lock back to the application.
430			 */
431			fl->fl_start = req->a_res.lock.fl.fl_start;
432			fl->fl_end = req->a_res.lock.fl.fl_end;
433			fl->fl_type = req->a_res.lock.fl.fl_type;
434			fl->fl_pid = 0;
435			break;
436		default:
437			status = nlm_stat_to_errno(req->a_res.status);
438	}
439out:
 
 
 
440	nlmclnt_release_call(req);
441	return status;
442}
443
444static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
445{
446	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
447	new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
448	new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
449	list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
450	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
451}
452
453static void nlmclnt_locks_release_private(struct file_lock *fl)
454{
455	spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
456	list_del(&fl->fl_u.nfs_fl.list);
457	spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
458	nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
459}
460
461static const struct file_lock_operations nlmclnt_lock_ops = {
462	.fl_copy_lock = nlmclnt_locks_copy_lock,
463	.fl_release_private = nlmclnt_locks_release_private,
464};
465
466static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
467{
468	BUG_ON(fl->fl_ops != NULL);
469	fl->fl_u.nfs_fl.state = 0;
470	fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
 
471	INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
472	fl->fl_ops = &nlmclnt_lock_ops;
473}
474
475static int do_vfs_lock(struct file_lock *fl)
476{
477	int res = 0;
478	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
479		case FL_POSIX:
480			res = posix_lock_file_wait(fl->fl_file, fl);
481			break;
482		case FL_FLOCK:
483			res = flock_lock_file_wait(fl->fl_file, fl);
484			break;
485		default:
486			BUG();
487	}
488	return res;
489}
490
491/*
492 * LOCK: Try to create a lock
493 *
494 *			Programmer Harassment Alert
495 *
496 * When given a blocking lock request in a sync RPC call, the HPUX lockd
497 * will faithfully return LCK_BLOCKED but never cares to notify us when
498 * the lock could be granted. This way, our local process could hang
499 * around forever waiting for the callback.
500 *
501 *  Solution A:	Implement busy-waiting
502 *  Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
503 *
504 * For now I am implementing solution A, because I hate the idea of
505 * re-implementing lockd for a third time in two months. The async
506 * calls shouldn't be too hard to do, however.
507 *
508 * This is one of the lovely things about standards in the NFS area:
509 * they're so soft and squishy you can't really blame HP for doing this.
510 */
511static int
512nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
513{
514	struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
515	struct nlm_host	*host = req->a_host;
516	struct nlm_res	*resp = &req->a_res;
517	struct nlm_wait *block = NULL;
518	unsigned char fl_flags = fl->fl_flags;
519	unsigned char fl_type;
 
520	int status = -ENOLCK;
521
522	if (nsm_monitor(host) < 0)
523		goto out;
524	req->a_args.state = nsm_local_state;
525
526	fl->fl_flags |= FL_ACCESS;
527	status = do_vfs_lock(fl);
528	fl->fl_flags = fl_flags;
529	if (status < 0)
530		goto out;
531
532	block = nlmclnt_prepare_block(host, fl);
533again:
534	/*
535	 * Initialise resp->status to a valid non-zero value,
536	 * since 0 == nlm_lck_granted
537	 */
538	resp->status = nlm_lck_blocked;
539	for(;;) {
 
 
 
 
 
 
 
540		/* Reboot protection */
541		fl->fl_u.nfs_fl.state = host->h_state;
542		status = nlmclnt_call(cred, req, NLMPROC_LOCK);
543		if (status < 0)
544			break;
545		/* Did a reclaimer thread notify us of a server reboot? */
546		if (resp->status ==  nlm_lck_denied_grace_period)
547			continue;
548		if (resp->status != nlm_lck_blocked)
549			break;
550		/* Wait on an NLM blocking lock */
551		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
552		if (status < 0)
553			break;
554		if (resp->status != nlm_lck_blocked)
555			break;
556	}
 
 
 
557
558	/* if we were interrupted while blocking, then cancel the lock request
559	 * and exit
560	 */
561	if (resp->status == nlm_lck_blocked) {
562		if (!req->a_args.block)
563			goto out_unlock;
564		if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
565			goto out_unblock;
566	}
567
568	if (resp->status == nlm_granted) {
569		down_read(&host->h_rwsem);
570		/* Check whether or not the server has rebooted */
571		if (fl->fl_u.nfs_fl.state != host->h_state) {
572			up_read(&host->h_rwsem);
573			goto again;
574		}
575		/* Ensure the resulting lock will get added to granted list */
576		fl->fl_flags |= FL_SLEEP;
577		if (do_vfs_lock(fl) < 0)
578			printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
579		up_read(&host->h_rwsem);
580		fl->fl_flags = fl_flags;
581		status = 0;
582	}
583	if (status < 0)
584		goto out_unlock;
585	/*
586	 * EAGAIN doesn't make sense for sleeping locks, and in some
587	 * cases NLM_LCK_DENIED is returned for a permanent error.  So
588	 * turn it into an ENOLCK.
589	 */
590	if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
591		status = -ENOLCK;
592	else
593		status = nlm_stat_to_errno(resp->status);
594out_unblock:
595	nlmclnt_finish_block(block);
596out:
 
 
 
597	nlmclnt_release_call(req);
598	return status;
599out_unlock:
600	/* Fatal error: ensure that we remove the lock altogether */
 
 
 
601	dprintk("lockd: lock attempt ended in fatal error.\n"
602		"       Attempting to unlock.\n");
603	nlmclnt_finish_block(block);
604	fl_type = fl->fl_type;
605	fl->fl_type = F_UNLCK;
606	down_read(&host->h_rwsem);
607	do_vfs_lock(fl);
608	up_read(&host->h_rwsem);
609	fl->fl_type = fl_type;
610	fl->fl_flags = fl_flags;
611	nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
612	return status;
613}
614
615/*
616 * RECLAIM: Try to reclaim a lock
617 */
618int
619nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
 
620{
621	struct nlm_rqst reqst, *req;
622	int		status;
623
624	req = &reqst;
625	memset(req, 0, sizeof(*req));
626	locks_init_lock(&req->a_args.lock.fl);
627	locks_init_lock(&req->a_res.lock.fl);
628	req->a_host  = host;
629	req->a_flags = 0;
630
631	/* Set up the argument struct */
632	nlmclnt_setlockargs(req, fl);
633	req->a_args.reclaim = 1;
634
635	status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
 
636	if (status >= 0 && req->a_res.status == nlm_granted)
637		return 0;
638
639	printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
640				"(errno %d, status %d)\n", fl->fl_pid,
 
641				status, ntohl(req->a_res.status));
642
643	/*
644	 * FIXME: This is a serious failure. We can
645	 *
646	 *  a.	Ignore the problem
647	 *  b.	Send the owning process some signal (Linux doesn't have
648	 *	SIGLOST, though...)
649	 *  c.	Retry the operation
650	 *
651	 * Until someone comes up with a simple implementation
652	 * for b or c, I'll choose option a.
653	 */
654
655	return -ENOLCK;
656}
657
658/*
659 * UNLOCK: remove an existing lock
660 */
661static int
662nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
663{
664	struct nlm_host	*host = req->a_host;
665	struct nlm_res	*resp = &req->a_res;
666	int status;
667	unsigned char fl_flags = fl->fl_flags;
668
669	/*
670	 * Note: the server is supposed to either grant us the unlock
671	 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
672	 * case, we want to unlock.
673	 */
674	fl->fl_flags |= FL_EXISTS;
675	down_read(&host->h_rwsem);
676	status = do_vfs_lock(fl);
677	up_read(&host->h_rwsem);
678	fl->fl_flags = fl_flags;
679	if (status == -ENOENT) {
680		status = 0;
681		goto out;
682	}
683
684	atomic_inc(&req->a_count);
685	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
686			NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
687	if (status < 0)
688		goto out;
689
690	if (resp->status == nlm_granted)
691		goto out;
692
693	if (resp->status != nlm_lck_denied_nolocks)
694		printk("lockd: unexpected unlock status: %d\n",
695			ntohl(resp->status));
696	/* What to do now? I'm out of my depth... */
697	status = -ENOLCK;
698out:
 
 
 
699	nlmclnt_release_call(req);
700	return status;
701}
702
 
 
 
 
 
 
 
 
 
 
 
 
 
703static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
704{
705	struct nlm_rqst	*req = data;
706	u32 status = ntohl(req->a_res.status);
707
708	if (RPC_ASSASSINATED(task))
709		goto die;
710
711	if (task->tk_status < 0) {
712		dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
713		switch (task->tk_status) {
714		case -EACCES:
715		case -EIO:
716			goto die;
717		default:
718			goto retry_rebind;
719		}
720	}
721	if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
722		rpc_delay(task, NLMCLNT_GRACE_WAIT);
723		goto retry_unlock;
724	}
725	if (status != NLM_LCK_GRANTED)
726		printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
727die:
728	return;
729 retry_rebind:
730	nlm_rebind_host(req->a_host);
731 retry_unlock:
732	rpc_restart_call(task);
733}
734
735static const struct rpc_call_ops nlmclnt_unlock_ops = {
 
736	.rpc_call_done = nlmclnt_unlock_callback,
737	.rpc_release = nlmclnt_rpc_release,
738};
739
740/*
741 * Cancel a blocked lock request.
742 * We always use an async RPC call for this in order not to hang a
743 * process that has been Ctrl-C'ed.
744 */
745static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
746{
747	struct nlm_rqst	*req;
748	int status;
749
750	dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
751		"       Attempting to cancel lock.\n");
752
753	req = nlm_alloc_call(nlm_get_host(host));
754	if (!req)
755		return -ENOMEM;
756	req->a_flags = RPC_TASK_ASYNC;
757
758	nlmclnt_setlockargs(req, fl);
759	req->a_args.block = block;
760
761	atomic_inc(&req->a_count);
762	status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
763			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
764	if (status == 0 && req->a_res.status == nlm_lck_denied)
765		status = -ENOLCK;
766	nlmclnt_release_call(req);
767	return status;
768}
769
770static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
771{
772	struct nlm_rqst	*req = data;
773	u32 status = ntohl(req->a_res.status);
774
775	if (RPC_ASSASSINATED(task))
776		goto die;
777
778	if (task->tk_status < 0) {
779		dprintk("lockd: CANCEL call error %d, retrying.\n",
780					task->tk_status);
781		goto retry_cancel;
782	}
783
784	dprintk("lockd: cancel status %u (task %u)\n",
785			status, task->tk_pid);
786
787	switch (status) {
788	case NLM_LCK_GRANTED:
789	case NLM_LCK_DENIED_GRACE_PERIOD:
790	case NLM_LCK_DENIED:
791		/* Everything's good */
792		break;
793	case NLM_LCK_DENIED_NOLOCKS:
794		dprintk("lockd: CANCEL failed (server has no locks)\n");
795		goto retry_cancel;
796	default:
797		printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
798			status);
799	}
800
801die:
802	return;
803
804retry_cancel:
805	/* Don't ever retry more than 3 times */
806	if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
807		goto die;
808	nlm_rebind_host(req->a_host);
809	rpc_restart_call(task);
810	rpc_delay(task, 30 * HZ);
811}
812
813static const struct rpc_call_ops nlmclnt_cancel_ops = {
814	.rpc_call_done = nlmclnt_cancel_callback,
815	.rpc_release = nlmclnt_rpc_release,
816};
817
818/*
819 * Convert an NLM status code to a generic kernel errno
820 */
821static int
822nlm_stat_to_errno(__be32 status)
823{
824	switch(ntohl(status)) {
825	case NLM_LCK_GRANTED:
826		return 0;
827	case NLM_LCK_DENIED:
828		return -EAGAIN;
829	case NLM_LCK_DENIED_NOLOCKS:
830	case NLM_LCK_DENIED_GRACE_PERIOD:
831		return -ENOLCK;
832	case NLM_LCK_BLOCKED:
833		printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
834		return -ENOLCK;
835#ifdef CONFIG_LOCKD_V4
836	case NLM_DEADLCK:
837		return -EDEADLK;
838	case NLM_ROFS:
839		return -EROFS;
840	case NLM_STALE_FH:
841		return -ESTALE;
842	case NLM_FBIG:
843		return -EOVERFLOW;
844	case NLM_FAILED:
845		return -ENOLCK;
846#endif
847	}
848	printk(KERN_NOTICE "lockd: unexpected server status %d\n",
849		 ntohl(status));
850	return -ENOLCK;
851}