Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
  4 */
  5
  6#include <linux/miscdevice.h>
  7#include <linux/init.h>
  8#include <linux/wait.h>
  9#include <linux/file.h>
 10#include <linux/fs.h>
 11#include <linux/poll.h>
 12#include <linux/signal.h>
 13#include <linux/spinlock.h>
 14#include <linux/dlm.h>
 15#include <linux/dlm_device.h>
 16#include <linux/slab.h>
 17#include <linux/sched/signal.h>
 18
 19#include <trace/events/dlm.h>
 20
 21#include "dlm_internal.h"
 22#include "lockspace.h"
 23#include "lock.h"
 24#include "lvb_table.h"
 25#include "user.h"
 26#include "ast.h"
 27#include "config.h"
 28#include "memory.h"
 29
 30static const char name_prefix[] = "dlm";
 31static const struct file_operations device_fops;
 32static atomic_t dlm_monitor_opened;
 33static int dlm_monitor_unused = 1;
 34
 35#ifdef CONFIG_COMPAT
 36
 37struct dlm_lock_params32 {
 38	__u8 mode;
 39	__u8 namelen;
 40	__u16 unused;
 41	__u32 flags;
 42	__u32 lkid;
 43	__u32 parent;
 44	__u64 xid;
 45	__u64 timeout;
 46	__u32 castparam;
 47	__u32 castaddr;
 48	__u32 bastparam;
 49	__u32 bastaddr;
 50	__u32 lksb;
 51	char lvb[DLM_USER_LVB_LEN];
 52	char name[];
 53};
 54
 55struct dlm_write_request32 {
 56	__u32 version[3];
 57	__u8 cmd;
 58	__u8 is64bit;
 59	__u8 unused[2];
 60
 61	union  {
 62		struct dlm_lock_params32 lock;
 63		struct dlm_lspace_params lspace;
 64		struct dlm_purge_params purge;
 65	} i;
 66};
 67
 68struct dlm_lksb32 {
 69	__u32 sb_status;
 70	__u32 sb_lkid;
 71	__u8 sb_flags;
 72	__u32 sb_lvbptr;
 73};
 74
 75struct dlm_lock_result32 {
 76	__u32 version[3];
 77	__u32 length;
 78	__u32 user_astaddr;
 79	__u32 user_astparam;
 80	__u32 user_lksb;
 81	struct dlm_lksb32 lksb;
 82	__u8 bast_mode;
 83	__u8 unused[3];
 84	/* Offsets may be zero if no data is present */
 85	__u32 lvb_offset;
 86};
 87
 88static void compat_input(struct dlm_write_request *kb,
 89			 struct dlm_write_request32 *kb32,
 90			 int namelen)
 91{
 92	kb->version[0] = kb32->version[0];
 93	kb->version[1] = kb32->version[1];
 94	kb->version[2] = kb32->version[2];
 95
 96	kb->cmd = kb32->cmd;
 97	kb->is64bit = kb32->is64bit;
 98	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
 99	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100		kb->i.lspace.flags = kb32->i.lspace.flags;
101		kb->i.lspace.minor = kb32->i.lspace.minor;
102		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103	} else if (kb->cmd == DLM_USER_PURGE) {
104		kb->i.purge.nodeid = kb32->i.purge.nodeid;
105		kb->i.purge.pid = kb32->i.purge.pid;
106	} else {
107		kb->i.lock.mode = kb32->i.lock.mode;
108		kb->i.lock.namelen = kb32->i.lock.namelen;
109		kb->i.lock.flags = kb32->i.lock.flags;
110		kb->i.lock.lkid = kb32->i.lock.lkid;
111		kb->i.lock.parent = kb32->i.lock.parent;
112		kb->i.lock.xid = kb32->i.lock.xid;
113		kb->i.lock.timeout = kb32->i.lock.timeout;
114		kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115		kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116		kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117		kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118		kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
121	}
122}
123
124static void compat_output(struct dlm_lock_result *res,
125			  struct dlm_lock_result32 *res32)
126{
127	memset(res32, 0, sizeof(*res32));
128
129	res32->version[0] = res->version[0];
130	res32->version[1] = res->version[1];
131	res32->version[2] = res->version[2];
132
133	res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134	res32->user_astparam = (__u32)(__force long)res->user_astparam;
135	res32->user_lksb = (__u32)(__force long)res->user_lksb;
136	res32->bast_mode = res->bast_mode;
137
138	res32->lvb_offset = res->lvb_offset;
139	res32->length = res->length;
140
141	res32->lksb.sb_status = res->lksb.sb_status;
142	res32->lksb.sb_flags = res->lksb.sb_flags;
143	res32->lksb.sb_lkid = res->lksb.sb_lkid;
144	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
145}
146#endif
147
148/* Figure out if this lock is at the end of its life and no longer
149   available for the application to use.  The lkb still exists until
150   the final ast is read.  A lock becomes EOL in three situations:
151     1. a noqueue request fails with EAGAIN
152     2. an unlock completes with EUNLOCK
153     3. a cancel of a waiting request completes with ECANCEL/EDEADLK
154   An EOL lock needs to be removed from the process's list of locks.
155   And we can't allow any new operation on an EOL lock.  This is
156   not related to the lifetime of the lkb struct which is managed
157   entirely by refcount. */
158
159static int lkb_is_endoflife(int mode, int status)
160{
161	switch (status) {
162	case -DLM_EUNLOCK:
163		return 1;
164	case -DLM_ECANCEL:
165	case -ETIMEDOUT:
166	case -EDEADLK:
167	case -EAGAIN:
168		if (mode == DLM_LOCK_IV)
169			return 1;
170		break;
171	}
172	return 0;
173}
174
175/* we could possibly check if the cancel of an orphan has resulted in the lkb
176   being removed and then remove that lkb from the orphans list and free it */
177
178void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
179		      int status, uint32_t sbflags)
180{
181	struct dlm_ls *ls;
182	struct dlm_user_args *ua;
183	struct dlm_user_proc *proc;
184	struct dlm_callback *cb;
185	int rv, copy_lvb;
186
187	if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
188	    test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
189		return;
190
191	ls = lkb->lkb_resource->res_ls;
192	spin_lock_bh(&ls->ls_clear_proc_locks);
193
194	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
195	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
196	   lkb->ua so we can't try to use it.  This second check is necessary
197	   for cases where a completion ast is received for an operation that
198	   began before clear_proc_locks did its cancel/unlock. */
199
200	if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
201	    test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
202		goto out;
203
204	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
205	ua = lkb->lkb_ua;
206	proc = ua->proc;
207
208	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
209		goto out;
210
211	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
212		set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
213
214	spin_lock_bh(&proc->asts_spin);
215
216	if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags,
217				   &copy_lvb)) {
218		rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
219		if (!rv) {
220			cb->copy_lvb = copy_lvb;
221			cb->ua = *ua;
222			cb->lkb_lksb = &cb->ua.lksb;
223			if (copy_lvb) {
224				memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
225				       DLM_USER_LVB_LEN);
226				cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
227			}
228
229			list_add_tail(&cb->list, &proc->asts);
230			wake_up_interruptible(&proc->wait);
231		}
 
232	}
233	spin_unlock_bh(&proc->asts_spin);
234
235	if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
236		/* N.B. spin_lock locks_spin, not asts_spin */
237		spin_lock_bh(&proc->locks_spin);
238		if (!list_empty(&lkb->lkb_ownqueue)) {
239			list_del_init(&lkb->lkb_ownqueue);
240			dlm_put_lkb(lkb);
241		}
242		spin_unlock_bh(&proc->locks_spin);
243	}
244 out:
245	spin_unlock_bh(&ls->ls_clear_proc_locks);
246}
247
248static int device_user_lock(struct dlm_user_proc *proc,
249			    struct dlm_lock_params *params)
250{
251	struct dlm_ls *ls;
252	struct dlm_user_args *ua;
253	uint32_t lkid;
254	int error = -ENOMEM;
255
256	ls = dlm_find_lockspace_local(proc->lockspace);
257	if (!ls)
258		return -ENOENT;
259
260	if (!params->castaddr || !params->lksb) {
261		error = -EINVAL;
262		goto out;
263	}
264
265	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
266	if (!ua)
267		goto out;
268	ua->proc = proc;
269	ua->user_lksb = params->lksb;
270	ua->castparam = params->castparam;
271	ua->castaddr = params->castaddr;
272	ua->bastparam = params->bastparam;
273	ua->bastaddr = params->bastaddr;
274	ua->xid = params->xid;
275
276	if (params->flags & DLM_LKF_CONVERT) {
277		error = dlm_user_convert(ls, ua,
278					 params->mode, params->flags,
279					 params->lkid, params->lvb);
 
280	} else if (params->flags & DLM_LKF_ORPHAN) {
281		error = dlm_user_adopt_orphan(ls, ua,
282					 params->mode, params->flags,
283					 params->name, params->namelen,
 
284					 &lkid);
285		if (!error)
286			error = lkid;
287	} else {
288		error = dlm_user_request(ls, ua,
289					 params->mode, params->flags,
290					 params->name, params->namelen);
 
291		if (!error)
292			error = ua->lksb.sb_lkid;
293	}
294 out:
295	dlm_put_lockspace(ls);
296	return error;
297}
298
299static int device_user_unlock(struct dlm_user_proc *proc,
300			      struct dlm_lock_params *params)
301{
302	struct dlm_ls *ls;
303	struct dlm_user_args *ua;
304	int error = -ENOMEM;
305
306	ls = dlm_find_lockspace_local(proc->lockspace);
307	if (!ls)
308		return -ENOENT;
309
310	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
311	if (!ua)
312		goto out;
313	ua->proc = proc;
314	ua->user_lksb = params->lksb;
315	ua->castparam = params->castparam;
316	ua->castaddr = params->castaddr;
317
318	if (params->flags & DLM_LKF_CANCEL)
319		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
320	else
321		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
322					params->lvb);
323 out:
324	dlm_put_lockspace(ls);
325	return error;
326}
327
328static int device_user_deadlock(struct dlm_user_proc *proc,
329				struct dlm_lock_params *params)
330{
331	struct dlm_ls *ls;
332	int error;
333
334	ls = dlm_find_lockspace_local(proc->lockspace);
335	if (!ls)
336		return -ENOENT;
337
338	error = dlm_user_deadlock(ls, params->flags, params->lkid);
339
340	dlm_put_lockspace(ls);
341	return error;
342}
343
344static int dlm_device_register(struct dlm_ls *ls, char *name)
345{
346	int error, len;
347
348	/* The device is already registered.  This happens when the
349	   lockspace is created multiple times from userspace. */
350	if (ls->ls_device.name)
351		return 0;
352
353	error = -ENOMEM;
354	len = strlen(name) + strlen(name_prefix) + 2;
355	ls->ls_device.name = kzalloc(len, GFP_NOFS);
356	if (!ls->ls_device.name)
357		goto fail;
358
359	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
360		 name);
361	ls->ls_device.fops = &device_fops;
362	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
363
364	error = misc_register(&ls->ls_device);
365	if (error) {
366		kfree(ls->ls_device.name);
367		/* this has to be set to NULL
368		 * to avoid a double-free in dlm_device_deregister
369		 */
370		ls->ls_device.name = NULL;
371	}
372fail:
373	return error;
374}
375
376int dlm_device_deregister(struct dlm_ls *ls)
377{
378	/* The device is not registered.  This happens when the lockspace
379	   was never used from userspace, or when device_create_lockspace()
380	   calls dlm_release_lockspace() after the register fails. */
381	if (!ls->ls_device.name)
382		return 0;
383
384	misc_deregister(&ls->ls_device);
385	kfree(ls->ls_device.name);
386	return 0;
387}
388
389static int device_user_purge(struct dlm_user_proc *proc,
390			     struct dlm_purge_params *params)
391{
392	struct dlm_ls *ls;
393	int error;
394
395	ls = dlm_find_lockspace_local(proc->lockspace);
396	if (!ls)
397		return -ENOENT;
398
399	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
400
401	dlm_put_lockspace(ls);
402	return error;
403}
404
405static int device_create_lockspace(struct dlm_lspace_params *params)
406{
407	dlm_lockspace_t *lockspace;
408	struct dlm_ls *ls;
409	int error;
410
411	if (!capable(CAP_SYS_ADMIN))
412		return -EPERM;
413
414	error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
415				       params->flags, DLM_USER_LVB_LEN, NULL,
416				       NULL, NULL, &lockspace);
417	if (error)
418		return error;
419
420	ls = dlm_find_lockspace_local(lockspace);
421	if (!ls)
422		return -ENOENT;
423
424	error = dlm_device_register(ls, params->name);
425	dlm_put_lockspace(ls);
426
427	if (error)
428		dlm_release_lockspace(lockspace, 0);
429	else
430		error = ls->ls_device.minor;
431
432	return error;
433}
434
435static int device_remove_lockspace(struct dlm_lspace_params *params)
436{
437	dlm_lockspace_t *lockspace;
438	struct dlm_ls *ls;
439	int error, force = 0;
440
441	if (!capable(CAP_SYS_ADMIN))
442		return -EPERM;
443
444	ls = dlm_find_lockspace_device(params->minor);
445	if (!ls)
446		return -ENOENT;
447
448	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
449		force = 2;
450
451	lockspace = ls;
452	dlm_put_lockspace(ls);
453
454	/* The final dlm_release_lockspace waits for references to go to
455	   zero, so all processes will need to close their device for the
456	   ls before the release will proceed.  release also calls the
457	   device_deregister above.  Converting a positive return value
458	   from release to zero means that userspace won't know when its
459	   release was the final one, but it shouldn't need to know. */
460
461	error = dlm_release_lockspace(lockspace, force);
462	if (error > 0)
463		error = 0;
464	return error;
465}
466
467/* Check the user's version matches ours */
468static int check_version(struct dlm_write_request *req)
469{
470	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
471	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
472	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
473
474		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
475		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
476		       current->comm,
477		       task_pid_nr(current),
478		       req->version[0],
479		       req->version[1],
480		       req->version[2],
481		       DLM_DEVICE_VERSION_MAJOR,
482		       DLM_DEVICE_VERSION_MINOR,
483		       DLM_DEVICE_VERSION_PATCH);
484		return -EINVAL;
485	}
486	return 0;
487}
488
489/*
490 * device_write
491 *
492 *   device_user_lock
493 *     dlm_user_request -> request_lock
494 *     dlm_user_convert -> convert_lock
495 *
496 *   device_user_unlock
497 *     dlm_user_unlock -> unlock_lock
498 *     dlm_user_cancel -> cancel_lock
499 *
500 *   device_create_lockspace
501 *     dlm_new_lockspace
502 *
503 *   device_remove_lockspace
504 *     dlm_release_lockspace
505 */
506
507/* a write to a lockspace device is a lock or unlock request, a write
508   to the control device is to create/remove a lockspace */
509
510static ssize_t device_write(struct file *file, const char __user *buf,
511			    size_t count, loff_t *ppos)
512{
513	struct dlm_user_proc *proc = file->private_data;
514	struct dlm_write_request *kbuf;
515	int error;
516
517#ifdef CONFIG_COMPAT
518	if (count < sizeof(struct dlm_write_request32))
519#else
520	if (count < sizeof(struct dlm_write_request))
521#endif
522		return -EINVAL;
523
524	/*
525	 * can't compare against COMPAT/dlm_write_request32 because
526	 * we don't yet know if is64bit is zero
527	 */
528	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
529		return -EINVAL;
530
531	kbuf = memdup_user_nul(buf, count);
532	if (IS_ERR(kbuf))
533		return PTR_ERR(kbuf);
534
535	if (check_version(kbuf)) {
536		error = -EBADE;
537		goto out_free;
538	}
539
540#ifdef CONFIG_COMPAT
541	if (!kbuf->is64bit) {
542		struct dlm_write_request32 *k32buf;
543		int namelen = 0;
544
545		if (count > sizeof(struct dlm_write_request32))
546			namelen = count - sizeof(struct dlm_write_request32);
547
548		k32buf = (struct dlm_write_request32 *)kbuf;
549
550		/* add 1 after namelen so that the name string is terminated */
551		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
552			       GFP_NOFS);
553		if (!kbuf) {
554			kfree(k32buf);
555			return -ENOMEM;
556		}
557
558		if (proc)
559			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
560
561		compat_input(kbuf, k32buf, namelen);
562		kfree(k32buf);
563	}
564#endif
565
566	/* do we really need this? can a write happen after a close? */
567	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
568	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
569		error = -EINVAL;
570		goto out_free;
571	}
572
573	error = -EINVAL;
574
575	switch (kbuf->cmd)
576	{
577	case DLM_USER_LOCK:
578		if (!proc) {
579			log_print("no locking on control device");
580			goto out_free;
581		}
582		error = device_user_lock(proc, &kbuf->i.lock);
583		break;
584
585	case DLM_USER_UNLOCK:
586		if (!proc) {
587			log_print("no locking on control device");
588			goto out_free;
589		}
590		error = device_user_unlock(proc, &kbuf->i.lock);
591		break;
592
593	case DLM_USER_DEADLOCK:
594		if (!proc) {
595			log_print("no locking on control device");
596			goto out_free;
597		}
598		error = device_user_deadlock(proc, &kbuf->i.lock);
599		break;
600
601	case DLM_USER_CREATE_LOCKSPACE:
602		if (proc) {
603			log_print("create/remove only on control device");
604			goto out_free;
605		}
606		error = device_create_lockspace(&kbuf->i.lspace);
607		break;
608
609	case DLM_USER_REMOVE_LOCKSPACE:
610		if (proc) {
611			log_print("create/remove only on control device");
612			goto out_free;
613		}
614		error = device_remove_lockspace(&kbuf->i.lspace);
615		break;
616
617	case DLM_USER_PURGE:
618		if (!proc) {
619			log_print("no locking on control device");
620			goto out_free;
621		}
622		error = device_user_purge(proc, &kbuf->i.purge);
623		break;
624
625	default:
626		log_print("Unknown command passed to DLM device : %d\n",
627			  kbuf->cmd);
628	}
629
630 out_free:
631	kfree(kbuf);
632	return error;
633}
634
635/* Every process that opens the lockspace device has its own "proc" structure
636   hanging off the open file that's used to keep track of locks owned by the
637   process and asts that need to be delivered to the process. */
638
639static int device_open(struct inode *inode, struct file *file)
640{
641	struct dlm_user_proc *proc;
642	struct dlm_ls *ls;
643
644	ls = dlm_find_lockspace_device(iminor(inode));
645	if (!ls)
646		return -ENOENT;
647
648	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
649	if (!proc) {
650		dlm_put_lockspace(ls);
651		return -ENOMEM;
652	}
653
654	proc->lockspace = ls;
655	INIT_LIST_HEAD(&proc->asts);
656	INIT_LIST_HEAD(&proc->locks);
657	INIT_LIST_HEAD(&proc->unlocking);
658	spin_lock_init(&proc->asts_spin);
659	spin_lock_init(&proc->locks_spin);
660	init_waitqueue_head(&proc->wait);
661	file->private_data = proc;
662
663	return 0;
664}
665
666static int device_close(struct inode *inode, struct file *file)
667{
668	struct dlm_user_proc *proc = file->private_data;
669	struct dlm_ls *ls;
670
671	ls = dlm_find_lockspace_local(proc->lockspace);
672	if (!ls)
673		return -ENOENT;
674
675	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
676
677	dlm_clear_proc_locks(ls, proc);
678
679	/* at this point no more lkb's should exist for this lockspace,
680	   so there's no chance of dlm_user_add_ast() being called and
681	   looking for lkb->ua->proc */
682
683	kfree(proc);
684	file->private_data = NULL;
685
686	dlm_put_lockspace(ls);
687	dlm_put_lockspace(ls);  /* for the find in device_open() */
688
689	/* FIXME: AUTOFREE: if this ls is no longer used do
690	   device_remove_lockspace() */
691
692	return 0;
693}
694
695static int copy_result_to_user(struct dlm_user_args *ua, int compat,
696			       uint32_t flags, int mode, int copy_lvb,
697			       char __user *buf, size_t count)
698{
699#ifdef CONFIG_COMPAT
700	struct dlm_lock_result32 result32;
701#endif
702	struct dlm_lock_result result;
703	void *resultptr;
704	int error=0;
705	int len;
706	int struct_len;
707
708	memset(&result, 0, sizeof(struct dlm_lock_result));
709	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
710	result.version[1] = DLM_DEVICE_VERSION_MINOR;
711	result.version[2] = DLM_DEVICE_VERSION_PATCH;
712	memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
713	result.user_lksb = ua->user_lksb;
714
715	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
716	   in a conversion unless the conversion is successful.  See code
717	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
718	   notes that a new blocking AST address and parameter are set even if
719	   the conversion fails, so maybe we should just do that. */
720
721	if (flags & DLM_CB_BAST) {
722		result.user_astaddr = ua->bastaddr;
723		result.user_astparam = ua->bastparam;
724		result.bast_mode = mode;
725	} else {
726		result.user_astaddr = ua->castaddr;
727		result.user_astparam = ua->castparam;
728	}
729
730#ifdef CONFIG_COMPAT
731	if (compat)
732		len = sizeof(struct dlm_lock_result32);
733	else
734#endif
735		len = sizeof(struct dlm_lock_result);
736	struct_len = len;
737
738	/* copy lvb to userspace if there is one, it's been updated, and
739	   the user buffer has space for it */
740
741	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
742		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
743				 DLM_USER_LVB_LEN)) {
744			error = -EFAULT;
745			goto out;
746		}
747
748		result.lvb_offset = len;
749		len += DLM_USER_LVB_LEN;
750	}
751
752	result.length = len;
753	resultptr = &result;
754#ifdef CONFIG_COMPAT
755	if (compat) {
756		compat_output(&result, &result32);
757		resultptr = &result32;
758	}
759#endif
760
761	if (copy_to_user(buf, resultptr, struct_len))
762		error = -EFAULT;
763	else
764		error = len;
765 out:
766	return error;
767}
768
769static int copy_version_to_user(char __user *buf, size_t count)
770{
771	struct dlm_device_version ver;
772
773	memset(&ver, 0, sizeof(struct dlm_device_version));
774	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
775	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
776	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
777
778	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
779		return -EFAULT;
780	return sizeof(struct dlm_device_version);
781}
782
783/* a read returns a single ast described in a struct dlm_lock_result */
784
785static ssize_t device_read(struct file *file, char __user *buf, size_t count,
786			   loff_t *ppos)
787{
788	struct dlm_user_proc *proc = file->private_data;
 
789	DECLARE_WAITQUEUE(wait, current);
790	struct dlm_callback *cb;
791	int rv, ret;
 
792
793	if (count == sizeof(struct dlm_device_version)) {
794		rv = copy_version_to_user(buf, count);
795		return rv;
796	}
797
798	if (!proc) {
799		log_print("non-version read from control device %zu", count);
800		return -EINVAL;
801	}
802
803#ifdef CONFIG_COMPAT
804	if (count < sizeof(struct dlm_lock_result32))
805#else
806	if (count < sizeof(struct dlm_lock_result))
807#endif
808		return -EINVAL;
809
 
 
810	/* do we really need this? can a read happen after a close? */
811	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
812		return -EINVAL;
813
814	spin_lock_bh(&proc->asts_spin);
815	if (list_empty(&proc->asts)) {
816		if (file->f_flags & O_NONBLOCK) {
817			spin_unlock_bh(&proc->asts_spin);
818			return -EAGAIN;
819		}
820
821		add_wait_queue(&proc->wait, &wait);
822
823	repeat:
824		set_current_state(TASK_INTERRUPTIBLE);
825		if (list_empty(&proc->asts) && !signal_pending(current)) {
826			spin_unlock_bh(&proc->asts_spin);
827			schedule();
828			spin_lock_bh(&proc->asts_spin);
829			goto repeat;
830		}
831		set_current_state(TASK_RUNNING);
832		remove_wait_queue(&proc->wait, &wait);
833
834		if (signal_pending(current)) {
835			spin_unlock_bh(&proc->asts_spin);
836			return -ERESTARTSYS;
837		}
838	}
839
840	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
841	   without removing lkb_cb_list; so empty lkb_cb_list is always
842	   consistent with empty lkb_callbacks */
843
844	cb = list_first_entry(&proc->asts, struct dlm_callback, list);
845	list_del(&cb->list);
846	spin_unlock_bh(&proc->asts_spin);
847
848	if (cb->flags & DLM_CB_BAST) {
849		trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
850			       cb->res_length);
851	} else if (cb->flags & DLM_CB_CAST) {
852		cb->lkb_lksb->sb_status = cb->sb_status;
853		cb->lkb_lksb->sb_flags = cb->sb_flags;
854		trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
855			      cb->sb_flags, cb->res_name, cb->res_length);
856	}
857
858	ret = copy_result_to_user(&cb->ua,
859				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
860				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
861	dlm_free_cb(cb);
862	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
863}
864
865static __poll_t device_poll(struct file *file, poll_table *wait)
866{
867	struct dlm_user_proc *proc = file->private_data;
868
869	poll_wait(file, &proc->wait, wait);
870
871	spin_lock_bh(&proc->asts_spin);
872	if (!list_empty(&proc->asts)) {
873		spin_unlock_bh(&proc->asts_spin);
874		return EPOLLIN | EPOLLRDNORM;
875	}
876	spin_unlock_bh(&proc->asts_spin);
877	return 0;
878}
879
880int dlm_user_daemon_available(void)
881{
882	/* dlm_controld hasn't started (or, has started, but not
883	   properly populated configfs) */
884
885	if (!dlm_our_nodeid())
886		return 0;
887
888	/* This is to deal with versions of dlm_controld that don't
889	   know about the monitor device.  We assume that if the
890	   dlm_controld was started (above), but the monitor device
891	   was never opened, that it's an old version.  dlm_controld
892	   should open the monitor device before populating configfs. */
893
894	if (dlm_monitor_unused)
895		return 1;
896
897	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
898}
899
900static int ctl_device_open(struct inode *inode, struct file *file)
901{
902	file->private_data = NULL;
903	return 0;
904}
905
906static int ctl_device_close(struct inode *inode, struct file *file)
907{
908	return 0;
909}
910
911static int monitor_device_open(struct inode *inode, struct file *file)
912{
913	atomic_inc(&dlm_monitor_opened);
914	dlm_monitor_unused = 0;
915	return 0;
916}
917
918static int monitor_device_close(struct inode *inode, struct file *file)
919{
920	if (atomic_dec_and_test(&dlm_monitor_opened))
921		dlm_stop_lockspaces();
922	return 0;
923}
924
925static const struct file_operations device_fops = {
926	.open    = device_open,
927	.release = device_close,
928	.read    = device_read,
929	.write   = device_write,
930	.poll    = device_poll,
931	.owner   = THIS_MODULE,
932	.llseek  = noop_llseek,
933};
934
935static const struct file_operations ctl_device_fops = {
936	.open    = ctl_device_open,
937	.release = ctl_device_close,
938	.read    = device_read,
939	.write   = device_write,
940	.owner   = THIS_MODULE,
941	.llseek  = noop_llseek,
942};
943
944static struct miscdevice ctl_device = {
945	.name  = "dlm-control",
946	.fops  = &ctl_device_fops,
947	.minor = MISC_DYNAMIC_MINOR,
948};
949
950static const struct file_operations monitor_device_fops = {
951	.open    = monitor_device_open,
952	.release = monitor_device_close,
953	.owner   = THIS_MODULE,
954	.llseek  = noop_llseek,
955};
956
957static struct miscdevice monitor_device = {
958	.name  = "dlm-monitor",
959	.fops  = &monitor_device_fops,
960	.minor = MISC_DYNAMIC_MINOR,
961};
962
963int __init dlm_user_init(void)
964{
965	int error;
966
967	atomic_set(&dlm_monitor_opened, 0);
968
969	error = misc_register(&ctl_device);
970	if (error) {
971		log_print("misc_register failed for control device");
972		goto out;
973	}
974
975	error = misc_register(&monitor_device);
976	if (error) {
977		log_print("misc_register failed for monitor device");
978		misc_deregister(&ctl_device);
979	}
980 out:
981	return error;
982}
983
984void dlm_user_exit(void)
985{
986	misc_deregister(&ctl_device);
987	misc_deregister(&monitor_device);
988}
989
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
   4 */
   5
   6#include <linux/miscdevice.h>
   7#include <linux/init.h>
   8#include <linux/wait.h>
   9#include <linux/file.h>
  10#include <linux/fs.h>
  11#include <linux/poll.h>
  12#include <linux/signal.h>
  13#include <linux/spinlock.h>
  14#include <linux/dlm.h>
  15#include <linux/dlm_device.h>
  16#include <linux/slab.h>
  17#include <linux/sched/signal.h>
  18
 
 
  19#include "dlm_internal.h"
  20#include "lockspace.h"
  21#include "lock.h"
  22#include "lvb_table.h"
  23#include "user.h"
  24#include "ast.h"
  25#include "config.h"
 
  26
  27static const char name_prefix[] = "dlm";
  28static const struct file_operations device_fops;
  29static atomic_t dlm_monitor_opened;
  30static int dlm_monitor_unused = 1;
  31
  32#ifdef CONFIG_COMPAT
  33
  34struct dlm_lock_params32 {
  35	__u8 mode;
  36	__u8 namelen;
  37	__u16 unused;
  38	__u32 flags;
  39	__u32 lkid;
  40	__u32 parent;
  41	__u64 xid;
  42	__u64 timeout;
  43	__u32 castparam;
  44	__u32 castaddr;
  45	__u32 bastparam;
  46	__u32 bastaddr;
  47	__u32 lksb;
  48	char lvb[DLM_USER_LVB_LEN];
  49	char name[];
  50};
  51
  52struct dlm_write_request32 {
  53	__u32 version[3];
  54	__u8 cmd;
  55	__u8 is64bit;
  56	__u8 unused[2];
  57
  58	union  {
  59		struct dlm_lock_params32 lock;
  60		struct dlm_lspace_params lspace;
  61		struct dlm_purge_params purge;
  62	} i;
  63};
  64
  65struct dlm_lksb32 {
  66	__u32 sb_status;
  67	__u32 sb_lkid;
  68	__u8 sb_flags;
  69	__u32 sb_lvbptr;
  70};
  71
  72struct dlm_lock_result32 {
  73	__u32 version[3];
  74	__u32 length;
  75	__u32 user_astaddr;
  76	__u32 user_astparam;
  77	__u32 user_lksb;
  78	struct dlm_lksb32 lksb;
  79	__u8 bast_mode;
  80	__u8 unused[3];
  81	/* Offsets may be zero if no data is present */
  82	__u32 lvb_offset;
  83};
  84
  85static void compat_input(struct dlm_write_request *kb,
  86			 struct dlm_write_request32 *kb32,
  87			 int namelen)
  88{
  89	kb->version[0] = kb32->version[0];
  90	kb->version[1] = kb32->version[1];
  91	kb->version[2] = kb32->version[2];
  92
  93	kb->cmd = kb32->cmd;
  94	kb->is64bit = kb32->is64bit;
  95	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
  96	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
  97		kb->i.lspace.flags = kb32->i.lspace.flags;
  98		kb->i.lspace.minor = kb32->i.lspace.minor;
  99		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
 100	} else if (kb->cmd == DLM_USER_PURGE) {
 101		kb->i.purge.nodeid = kb32->i.purge.nodeid;
 102		kb->i.purge.pid = kb32->i.purge.pid;
 103	} else {
 104		kb->i.lock.mode = kb32->i.lock.mode;
 105		kb->i.lock.namelen = kb32->i.lock.namelen;
 106		kb->i.lock.flags = kb32->i.lock.flags;
 107		kb->i.lock.lkid = kb32->i.lock.lkid;
 108		kb->i.lock.parent = kb32->i.lock.parent;
 109		kb->i.lock.xid = kb32->i.lock.xid;
 110		kb->i.lock.timeout = kb32->i.lock.timeout;
 111		kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
 112		kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
 113		kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
 114		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
 115		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
 116		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
 117		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
 118	}
 119}
 120
 121static void compat_output(struct dlm_lock_result *res,
 122			  struct dlm_lock_result32 *res32)
 123{
 124	memset(res32, 0, sizeof(*res32));
 125
 126	res32->version[0] = res->version[0];
 127	res32->version[1] = res->version[1];
 128	res32->version[2] = res->version[2];
 129
 130	res32->user_astaddr = (__u32)(long)res->user_astaddr;
 131	res32->user_astparam = (__u32)(long)res->user_astparam;
 132	res32->user_lksb = (__u32)(long)res->user_lksb;
 133	res32->bast_mode = res->bast_mode;
 134
 135	res32->lvb_offset = res->lvb_offset;
 136	res32->length = res->length;
 137
 138	res32->lksb.sb_status = res->lksb.sb_status;
 139	res32->lksb.sb_flags = res->lksb.sb_flags;
 140	res32->lksb.sb_lkid = res->lksb.sb_lkid;
 141	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
 142}
 143#endif
 144
 145/* Figure out if this lock is at the end of its life and no longer
 146   available for the application to use.  The lkb still exists until
 147   the final ast is read.  A lock becomes EOL in three situations:
 148     1. a noqueue request fails with EAGAIN
 149     2. an unlock completes with EUNLOCK
 150     3. a cancel of a waiting request completes with ECANCEL/EDEADLK
 151   An EOL lock needs to be removed from the process's list of locks.
 152   And we can't allow any new operation on an EOL lock.  This is
 153   not related to the lifetime of the lkb struct which is managed
 154   entirely by refcount. */
 155
 156static int lkb_is_endoflife(int mode, int status)
 157{
 158	switch (status) {
 159	case -DLM_EUNLOCK:
 160		return 1;
 161	case -DLM_ECANCEL:
 162	case -ETIMEDOUT:
 163	case -EDEADLK:
 164	case -EAGAIN:
 165		if (mode == DLM_LOCK_IV)
 166			return 1;
 167		break;
 168	}
 169	return 0;
 170}
 171
 172/* we could possibly check if the cancel of an orphan has resulted in the lkb
 173   being removed and then remove that lkb from the orphans list and free it */
 174
 175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 176		      int status, uint32_t sbflags, uint64_t seq)
 177{
 178	struct dlm_ls *ls;
 179	struct dlm_user_args *ua;
 180	struct dlm_user_proc *proc;
 181	int rv;
 
 182
 183	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
 
 184		return;
 185
 186	ls = lkb->lkb_resource->res_ls;
 187	mutex_lock(&ls->ls_clear_proc_locks);
 188
 189	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
 190	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
 191	   lkb->ua so we can't try to use it.  This second check is necessary
 192	   for cases where a completion ast is received for an operation that
 193	   began before clear_proc_locks did its cancel/unlock. */
 194
 195	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
 
 196		goto out;
 197
 198	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
 199	ua = lkb->lkb_ua;
 200	proc = ua->proc;
 201
 202	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
 203		goto out;
 204
 205	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
 206		lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
 207
 208	spin_lock(&proc->asts_spin);
 209
 210	rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
 211	if (rv < 0) {
 212		spin_unlock(&proc->asts_spin);
 213		goto out;
 214	}
 
 
 
 
 
 
 
 215
 216	if (list_empty(&lkb->lkb_cb_list)) {
 217		kref_get(&lkb->lkb_ref);
 218		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
 219		wake_up_interruptible(&proc->wait);
 220	}
 221	spin_unlock(&proc->asts_spin);
 222
 223	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
 224		/* N.B. spin_lock locks_spin, not asts_spin */
 225		spin_lock(&proc->locks_spin);
 226		if (!list_empty(&lkb->lkb_ownqueue)) {
 227			list_del_init(&lkb->lkb_ownqueue);
 228			dlm_put_lkb(lkb);
 229		}
 230		spin_unlock(&proc->locks_spin);
 231	}
 232 out:
 233	mutex_unlock(&ls->ls_clear_proc_locks);
 234}
 235
 236static int device_user_lock(struct dlm_user_proc *proc,
 237			    struct dlm_lock_params *params)
 238{
 239	struct dlm_ls *ls;
 240	struct dlm_user_args *ua;
 241	uint32_t lkid;
 242	int error = -ENOMEM;
 243
 244	ls = dlm_find_lockspace_local(proc->lockspace);
 245	if (!ls)
 246		return -ENOENT;
 247
 248	if (!params->castaddr || !params->lksb) {
 249		error = -EINVAL;
 250		goto out;
 251	}
 252
 253	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 254	if (!ua)
 255		goto out;
 256	ua->proc = proc;
 257	ua->user_lksb = params->lksb;
 258	ua->castparam = params->castparam;
 259	ua->castaddr = params->castaddr;
 260	ua->bastparam = params->bastparam;
 261	ua->bastaddr = params->bastaddr;
 262	ua->xid = params->xid;
 263
 264	if (params->flags & DLM_LKF_CONVERT) {
 265		error = dlm_user_convert(ls, ua,
 266				         params->mode, params->flags,
 267				         params->lkid, params->lvb,
 268					 (unsigned long) params->timeout);
 269	} else if (params->flags & DLM_LKF_ORPHAN) {
 270		error = dlm_user_adopt_orphan(ls, ua,
 271					 params->mode, params->flags,
 272					 params->name, params->namelen,
 273					 (unsigned long) params->timeout,
 274					 &lkid);
 275		if (!error)
 276			error = lkid;
 277	} else {
 278		error = dlm_user_request(ls, ua,
 279					 params->mode, params->flags,
 280					 params->name, params->namelen,
 281					 (unsigned long) params->timeout);
 282		if (!error)
 283			error = ua->lksb.sb_lkid;
 284	}
 285 out:
 286	dlm_put_lockspace(ls);
 287	return error;
 288}
 289
 290static int device_user_unlock(struct dlm_user_proc *proc,
 291			      struct dlm_lock_params *params)
 292{
 293	struct dlm_ls *ls;
 294	struct dlm_user_args *ua;
 295	int error = -ENOMEM;
 296
 297	ls = dlm_find_lockspace_local(proc->lockspace);
 298	if (!ls)
 299		return -ENOENT;
 300
 301	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 302	if (!ua)
 303		goto out;
 304	ua->proc = proc;
 305	ua->user_lksb = params->lksb;
 306	ua->castparam = params->castparam;
 307	ua->castaddr = params->castaddr;
 308
 309	if (params->flags & DLM_LKF_CANCEL)
 310		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
 311	else
 312		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
 313					params->lvb);
 314 out:
 315	dlm_put_lockspace(ls);
 316	return error;
 317}
 318
 319static int device_user_deadlock(struct dlm_user_proc *proc,
 320				struct dlm_lock_params *params)
 321{
 322	struct dlm_ls *ls;
 323	int error;
 324
 325	ls = dlm_find_lockspace_local(proc->lockspace);
 326	if (!ls)
 327		return -ENOENT;
 328
 329	error = dlm_user_deadlock(ls, params->flags, params->lkid);
 330
 331	dlm_put_lockspace(ls);
 332	return error;
 333}
 334
 335static int dlm_device_register(struct dlm_ls *ls, char *name)
 336{
 337	int error, len;
 338
 339	/* The device is already registered.  This happens when the
 340	   lockspace is created multiple times from userspace. */
 341	if (ls->ls_device.name)
 342		return 0;
 343
 344	error = -ENOMEM;
 345	len = strlen(name) + strlen(name_prefix) + 2;
 346	ls->ls_device.name = kzalloc(len, GFP_NOFS);
 347	if (!ls->ls_device.name)
 348		goto fail;
 349
 350	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
 351		 name);
 352	ls->ls_device.fops = &device_fops;
 353	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
 354
 355	error = misc_register(&ls->ls_device);
 356	if (error) {
 357		kfree(ls->ls_device.name);
 358		/* this has to be set to NULL
 359		 * to avoid a double-free in dlm_device_deregister
 360		 */
 361		ls->ls_device.name = NULL;
 362	}
 363fail:
 364	return error;
 365}
 366
 367int dlm_device_deregister(struct dlm_ls *ls)
 368{
 369	/* The device is not registered.  This happens when the lockspace
 370	   was never used from userspace, or when device_create_lockspace()
 371	   calls dlm_release_lockspace() after the register fails. */
 372	if (!ls->ls_device.name)
 373		return 0;
 374
 375	misc_deregister(&ls->ls_device);
 376	kfree(ls->ls_device.name);
 377	return 0;
 378}
 379
 380static int device_user_purge(struct dlm_user_proc *proc,
 381			     struct dlm_purge_params *params)
 382{
 383	struct dlm_ls *ls;
 384	int error;
 385
 386	ls = dlm_find_lockspace_local(proc->lockspace);
 387	if (!ls)
 388		return -ENOENT;
 389
 390	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
 391
 392	dlm_put_lockspace(ls);
 393	return error;
 394}
 395
 396static int device_create_lockspace(struct dlm_lspace_params *params)
 397{
 398	dlm_lockspace_t *lockspace;
 399	struct dlm_ls *ls;
 400	int error;
 401
 402	if (!capable(CAP_SYS_ADMIN))
 403		return -EPERM;
 404
 405	error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
 406				  DLM_USER_LVB_LEN, NULL, NULL, NULL,
 407				  &lockspace);
 408	if (error)
 409		return error;
 410
 411	ls = dlm_find_lockspace_local(lockspace);
 412	if (!ls)
 413		return -ENOENT;
 414
 415	error = dlm_device_register(ls, params->name);
 416	dlm_put_lockspace(ls);
 417
 418	if (error)
 419		dlm_release_lockspace(lockspace, 0);
 420	else
 421		error = ls->ls_device.minor;
 422
 423	return error;
 424}
 425
 426static int device_remove_lockspace(struct dlm_lspace_params *params)
 427{
 428	dlm_lockspace_t *lockspace;
 429	struct dlm_ls *ls;
 430	int error, force = 0;
 431
 432	if (!capable(CAP_SYS_ADMIN))
 433		return -EPERM;
 434
 435	ls = dlm_find_lockspace_device(params->minor);
 436	if (!ls)
 437		return -ENOENT;
 438
 439	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
 440		force = 2;
 441
 442	lockspace = ls->ls_local_handle;
 443	dlm_put_lockspace(ls);
 444
 445	/* The final dlm_release_lockspace waits for references to go to
 446	   zero, so all processes will need to close their device for the
 447	   ls before the release will proceed.  release also calls the
 448	   device_deregister above.  Converting a positive return value
 449	   from release to zero means that userspace won't know when its
 450	   release was the final one, but it shouldn't need to know. */
 451
 452	error = dlm_release_lockspace(lockspace, force);
 453	if (error > 0)
 454		error = 0;
 455	return error;
 456}
 457
 458/* Check the user's version matches ours */
 459static int check_version(struct dlm_write_request *req)
 460{
 461	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
 462	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
 463	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
 464
 465		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
 466		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
 467		       current->comm,
 468		       task_pid_nr(current),
 469		       req->version[0],
 470		       req->version[1],
 471		       req->version[2],
 472		       DLM_DEVICE_VERSION_MAJOR,
 473		       DLM_DEVICE_VERSION_MINOR,
 474		       DLM_DEVICE_VERSION_PATCH);
 475		return -EINVAL;
 476	}
 477	return 0;
 478}
 479
 480/*
 481 * device_write
 482 *
 483 *   device_user_lock
 484 *     dlm_user_request -> request_lock
 485 *     dlm_user_convert -> convert_lock
 486 *
 487 *   device_user_unlock
 488 *     dlm_user_unlock -> unlock_lock
 489 *     dlm_user_cancel -> cancel_lock
 490 *
 491 *   device_create_lockspace
 492 *     dlm_new_lockspace
 493 *
 494 *   device_remove_lockspace
 495 *     dlm_release_lockspace
 496 */
 497
 498/* a write to a lockspace device is a lock or unlock request, a write
 499   to the control device is to create/remove a lockspace */
 500
 501static ssize_t device_write(struct file *file, const char __user *buf,
 502			    size_t count, loff_t *ppos)
 503{
 504	struct dlm_user_proc *proc = file->private_data;
 505	struct dlm_write_request *kbuf;
 506	int error;
 507
 508#ifdef CONFIG_COMPAT
 509	if (count < sizeof(struct dlm_write_request32))
 510#else
 511	if (count < sizeof(struct dlm_write_request))
 512#endif
 513		return -EINVAL;
 514
 515	/*
 516	 * can't compare against COMPAT/dlm_write_request32 because
 517	 * we don't yet know if is64bit is zero
 518	 */
 519	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
 520		return -EINVAL;
 521
 522	kbuf = memdup_user_nul(buf, count);
 523	if (IS_ERR(kbuf))
 524		return PTR_ERR(kbuf);
 525
 526	if (check_version(kbuf)) {
 527		error = -EBADE;
 528		goto out_free;
 529	}
 530
 531#ifdef CONFIG_COMPAT
 532	if (!kbuf->is64bit) {
 533		struct dlm_write_request32 *k32buf;
 534		int namelen = 0;
 535
 536		if (count > sizeof(struct dlm_write_request32))
 537			namelen = count - sizeof(struct dlm_write_request32);
 538
 539		k32buf = (struct dlm_write_request32 *)kbuf;
 540
 541		/* add 1 after namelen so that the name string is terminated */
 542		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
 543			       GFP_NOFS);
 544		if (!kbuf) {
 545			kfree(k32buf);
 546			return -ENOMEM;
 547		}
 548
 549		if (proc)
 550			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
 551
 552		compat_input(kbuf, k32buf, namelen);
 553		kfree(k32buf);
 554	}
 555#endif
 556
 557	/* do we really need this? can a write happen after a close? */
 558	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
 559	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
 560		error = -EINVAL;
 561		goto out_free;
 562	}
 563
 564	error = -EINVAL;
 565
 566	switch (kbuf->cmd)
 567	{
 568	case DLM_USER_LOCK:
 569		if (!proc) {
 570			log_print("no locking on control device");
 571			goto out_free;
 572		}
 573		error = device_user_lock(proc, &kbuf->i.lock);
 574		break;
 575
 576	case DLM_USER_UNLOCK:
 577		if (!proc) {
 578			log_print("no locking on control device");
 579			goto out_free;
 580		}
 581		error = device_user_unlock(proc, &kbuf->i.lock);
 582		break;
 583
 584	case DLM_USER_DEADLOCK:
 585		if (!proc) {
 586			log_print("no locking on control device");
 587			goto out_free;
 588		}
 589		error = device_user_deadlock(proc, &kbuf->i.lock);
 590		break;
 591
 592	case DLM_USER_CREATE_LOCKSPACE:
 593		if (proc) {
 594			log_print("create/remove only on control device");
 595			goto out_free;
 596		}
 597		error = device_create_lockspace(&kbuf->i.lspace);
 598		break;
 599
 600	case DLM_USER_REMOVE_LOCKSPACE:
 601		if (proc) {
 602			log_print("create/remove only on control device");
 603			goto out_free;
 604		}
 605		error = device_remove_lockspace(&kbuf->i.lspace);
 606		break;
 607
 608	case DLM_USER_PURGE:
 609		if (!proc) {
 610			log_print("no locking on control device");
 611			goto out_free;
 612		}
 613		error = device_user_purge(proc, &kbuf->i.purge);
 614		break;
 615
 616	default:
 617		log_print("Unknown command passed to DLM device : %d\n",
 618			  kbuf->cmd);
 619	}
 620
 621 out_free:
 622	kfree(kbuf);
 623	return error;
 624}
 625
 626/* Every process that opens the lockspace device has its own "proc" structure
 627   hanging off the open file that's used to keep track of locks owned by the
 628   process and asts that need to be delivered to the process. */
 629
 630static int device_open(struct inode *inode, struct file *file)
 631{
 632	struct dlm_user_proc *proc;
 633	struct dlm_ls *ls;
 634
 635	ls = dlm_find_lockspace_device(iminor(inode));
 636	if (!ls)
 637		return -ENOENT;
 638
 639	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
 640	if (!proc) {
 641		dlm_put_lockspace(ls);
 642		return -ENOMEM;
 643	}
 644
 645	proc->lockspace = ls->ls_local_handle;
 646	INIT_LIST_HEAD(&proc->asts);
 647	INIT_LIST_HEAD(&proc->locks);
 648	INIT_LIST_HEAD(&proc->unlocking);
 649	spin_lock_init(&proc->asts_spin);
 650	spin_lock_init(&proc->locks_spin);
 651	init_waitqueue_head(&proc->wait);
 652	file->private_data = proc;
 653
 654	return 0;
 655}
 656
 657static int device_close(struct inode *inode, struct file *file)
 658{
 659	struct dlm_user_proc *proc = file->private_data;
 660	struct dlm_ls *ls;
 661
 662	ls = dlm_find_lockspace_local(proc->lockspace);
 663	if (!ls)
 664		return -ENOENT;
 665
 666	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
 667
 668	dlm_clear_proc_locks(ls, proc);
 669
 670	/* at this point no more lkb's should exist for this lockspace,
 671	   so there's no chance of dlm_user_add_ast() being called and
 672	   looking for lkb->ua->proc */
 673
 674	kfree(proc);
 675	file->private_data = NULL;
 676
 677	dlm_put_lockspace(ls);
 678	dlm_put_lockspace(ls);  /* for the find in device_open() */
 679
 680	/* FIXME: AUTOFREE: if this ls is no longer used do
 681	   device_remove_lockspace() */
 682
 683	return 0;
 684}
 685
 686static int copy_result_to_user(struct dlm_user_args *ua, int compat,
 687			       uint32_t flags, int mode, int copy_lvb,
 688			       char __user *buf, size_t count)
 689{
 690#ifdef CONFIG_COMPAT
 691	struct dlm_lock_result32 result32;
 692#endif
 693	struct dlm_lock_result result;
 694	void *resultptr;
 695	int error=0;
 696	int len;
 697	int struct_len;
 698
 699	memset(&result, 0, sizeof(struct dlm_lock_result));
 700	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
 701	result.version[1] = DLM_DEVICE_VERSION_MINOR;
 702	result.version[2] = DLM_DEVICE_VERSION_PATCH;
 703	memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
 704	result.user_lksb = ua->user_lksb;
 705
 706	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
 707	   in a conversion unless the conversion is successful.  See code
 708	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
 709	   notes that a new blocking AST address and parameter are set even if
 710	   the conversion fails, so maybe we should just do that. */
 711
 712	if (flags & DLM_CB_BAST) {
 713		result.user_astaddr = ua->bastaddr;
 714		result.user_astparam = ua->bastparam;
 715		result.bast_mode = mode;
 716	} else {
 717		result.user_astaddr = ua->castaddr;
 718		result.user_astparam = ua->castparam;
 719	}
 720
 721#ifdef CONFIG_COMPAT
 722	if (compat)
 723		len = sizeof(struct dlm_lock_result32);
 724	else
 725#endif
 726		len = sizeof(struct dlm_lock_result);
 727	struct_len = len;
 728
 729	/* copy lvb to userspace if there is one, it's been updated, and
 730	   the user buffer has space for it */
 731
 732	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
 733		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
 734				 DLM_USER_LVB_LEN)) {
 735			error = -EFAULT;
 736			goto out;
 737		}
 738
 739		result.lvb_offset = len;
 740		len += DLM_USER_LVB_LEN;
 741	}
 742
 743	result.length = len;
 744	resultptr = &result;
 745#ifdef CONFIG_COMPAT
 746	if (compat) {
 747		compat_output(&result, &result32);
 748		resultptr = &result32;
 749	}
 750#endif
 751
 752	if (copy_to_user(buf, resultptr, struct_len))
 753		error = -EFAULT;
 754	else
 755		error = len;
 756 out:
 757	return error;
 758}
 759
 760static int copy_version_to_user(char __user *buf, size_t count)
 761{
 762	struct dlm_device_version ver;
 763
 764	memset(&ver, 0, sizeof(struct dlm_device_version));
 765	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
 766	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
 767	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
 768
 769	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
 770		return -EFAULT;
 771	return sizeof(struct dlm_device_version);
 772}
 773
 774/* a read returns a single ast described in a struct dlm_lock_result */
 775
 776static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 777			   loff_t *ppos)
 778{
 779	struct dlm_user_proc *proc = file->private_data;
 780	struct dlm_lkb *lkb;
 781	DECLARE_WAITQUEUE(wait, current);
 782	struct dlm_callback cb;
 783	int rv, resid, copy_lvb = 0;
 784	int old_mode, new_mode;
 785
 786	if (count == sizeof(struct dlm_device_version)) {
 787		rv = copy_version_to_user(buf, count);
 788		return rv;
 789	}
 790
 791	if (!proc) {
 792		log_print("non-version read from control device %zu", count);
 793		return -EINVAL;
 794	}
 795
 796#ifdef CONFIG_COMPAT
 797	if (count < sizeof(struct dlm_lock_result32))
 798#else
 799	if (count < sizeof(struct dlm_lock_result))
 800#endif
 801		return -EINVAL;
 802
 803 try_another:
 804
 805	/* do we really need this? can a read happen after a close? */
 806	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
 807		return -EINVAL;
 808
 809	spin_lock(&proc->asts_spin);
 810	if (list_empty(&proc->asts)) {
 811		if (file->f_flags & O_NONBLOCK) {
 812			spin_unlock(&proc->asts_spin);
 813			return -EAGAIN;
 814		}
 815
 816		add_wait_queue(&proc->wait, &wait);
 817
 818	repeat:
 819		set_current_state(TASK_INTERRUPTIBLE);
 820		if (list_empty(&proc->asts) && !signal_pending(current)) {
 821			spin_unlock(&proc->asts_spin);
 822			schedule();
 823			spin_lock(&proc->asts_spin);
 824			goto repeat;
 825		}
 826		set_current_state(TASK_RUNNING);
 827		remove_wait_queue(&proc->wait, &wait);
 828
 829		if (signal_pending(current)) {
 830			spin_unlock(&proc->asts_spin);
 831			return -ERESTARTSYS;
 832		}
 833	}
 834
 835	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
 836	   without removing lkb_cb_list; so empty lkb_cb_list is always
 837	   consistent with empty lkb_callbacks */
 838
 839	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
 840
 841	/* rem_lkb_callback sets a new lkb_last_cast */
 842	old_mode = lkb->lkb_last_cast.mode;
 843
 844	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
 845	if (rv < 0) {
 846		/* this shouldn't happen; lkb should have been removed from
 847		   list when resid was zero */
 848		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
 849		list_del_init(&lkb->lkb_cb_list);
 850		spin_unlock(&proc->asts_spin);
 851		/* removes ref for proc->asts, may cause lkb to be freed */
 852		dlm_put_lkb(lkb);
 853		goto try_another;
 854	}
 855	if (!resid)
 856		list_del_init(&lkb->lkb_cb_list);
 857	spin_unlock(&proc->asts_spin);
 858
 859	if (cb.flags & DLM_CB_SKIP) {
 860		/* removes ref for proc->asts, may cause lkb to be freed */
 861		if (!resid)
 862			dlm_put_lkb(lkb);
 863		goto try_another;
 864	}
 865
 866	if (cb.flags & DLM_CB_CAST) {
 867		new_mode = cb.mode;
 868
 869		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
 870		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
 871			copy_lvb = 1;
 872
 873		lkb->lkb_lksb->sb_status = cb.sb_status;
 874		lkb->lkb_lksb->sb_flags = cb.sb_flags;
 875	}
 876
 877	rv = copy_result_to_user(lkb->lkb_ua,
 878				 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
 879				 cb.flags, cb.mode, copy_lvb, buf, count);
 880
 881	/* removes ref for proc->asts, may cause lkb to be freed */
 882	if (!resid)
 883		dlm_put_lkb(lkb);
 884
 885	return rv;
 886}
 887
 888static __poll_t device_poll(struct file *file, poll_table *wait)
 889{
 890	struct dlm_user_proc *proc = file->private_data;
 891
 892	poll_wait(file, &proc->wait, wait);
 893
 894	spin_lock(&proc->asts_spin);
 895	if (!list_empty(&proc->asts)) {
 896		spin_unlock(&proc->asts_spin);
 897		return EPOLLIN | EPOLLRDNORM;
 898	}
 899	spin_unlock(&proc->asts_spin);
 900	return 0;
 901}
 902
 903int dlm_user_daemon_available(void)
 904{
 905	/* dlm_controld hasn't started (or, has started, but not
 906	   properly populated configfs) */
 907
 908	if (!dlm_our_nodeid())
 909		return 0;
 910
 911	/* This is to deal with versions of dlm_controld that don't
 912	   know about the monitor device.  We assume that if the
 913	   dlm_controld was started (above), but the monitor device
 914	   was never opened, that it's an old version.  dlm_controld
 915	   should open the monitor device before populating configfs. */
 916
 917	if (dlm_monitor_unused)
 918		return 1;
 919
 920	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
 921}
 922
 923static int ctl_device_open(struct inode *inode, struct file *file)
 924{
 925	file->private_data = NULL;
 926	return 0;
 927}
 928
 929static int ctl_device_close(struct inode *inode, struct file *file)
 930{
 931	return 0;
 932}
 933
 934static int monitor_device_open(struct inode *inode, struct file *file)
 935{
 936	atomic_inc(&dlm_monitor_opened);
 937	dlm_monitor_unused = 0;
 938	return 0;
 939}
 940
 941static int monitor_device_close(struct inode *inode, struct file *file)
 942{
 943	if (atomic_dec_and_test(&dlm_monitor_opened))
 944		dlm_stop_lockspaces();
 945	return 0;
 946}
 947
 948static const struct file_operations device_fops = {
 949	.open    = device_open,
 950	.release = device_close,
 951	.read    = device_read,
 952	.write   = device_write,
 953	.poll    = device_poll,
 954	.owner   = THIS_MODULE,
 955	.llseek  = noop_llseek,
 956};
 957
 958static const struct file_operations ctl_device_fops = {
 959	.open    = ctl_device_open,
 960	.release = ctl_device_close,
 961	.read    = device_read,
 962	.write   = device_write,
 963	.owner   = THIS_MODULE,
 964	.llseek  = noop_llseek,
 965};
 966
 967static struct miscdevice ctl_device = {
 968	.name  = "dlm-control",
 969	.fops  = &ctl_device_fops,
 970	.minor = MISC_DYNAMIC_MINOR,
 971};
 972
 973static const struct file_operations monitor_device_fops = {
 974	.open    = monitor_device_open,
 975	.release = monitor_device_close,
 976	.owner   = THIS_MODULE,
 977	.llseek  = noop_llseek,
 978};
 979
 980static struct miscdevice monitor_device = {
 981	.name  = "dlm-monitor",
 982	.fops  = &monitor_device_fops,
 983	.minor = MISC_DYNAMIC_MINOR,
 984};
 985
 986int __init dlm_user_init(void)
 987{
 988	int error;
 989
 990	atomic_set(&dlm_monitor_opened, 0);
 991
 992	error = misc_register(&ctl_device);
 993	if (error) {
 994		log_print("misc_register failed for control device");
 995		goto out;
 996	}
 997
 998	error = misc_register(&monitor_device);
 999	if (error) {
1000		log_print("misc_register failed for monitor device");
1001		misc_deregister(&ctl_device);
1002	}
1003 out:
1004	return error;
1005}
1006
1007void dlm_user_exit(void)
1008{
1009	misc_deregister(&ctl_device);
1010	misc_deregister(&monitor_device);
1011}
1012