Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
   4 */
   5
   6#include <linux/miscdevice.h>
   7#include <linux/init.h>
   8#include <linux/wait.h>
   9#include <linux/file.h>
  10#include <linux/fs.h>
  11#include <linux/poll.h>
  12#include <linux/signal.h>
  13#include <linux/spinlock.h>
  14#include <linux/dlm.h>
  15#include <linux/dlm_device.h>
  16#include <linux/slab.h>
  17#include <linux/sched/signal.h>
  18
  19#include <trace/events/dlm.h>
  20
  21#include "dlm_internal.h"
  22#include "lockspace.h"
  23#include "lock.h"
 
  24#include "user.h"
  25#include "ast.h"
  26#include "config.h"
  27#include "memory.h"
  28
  29static const char name_prefix[] = "dlm";
  30static const struct file_operations device_fops;
  31static atomic_t dlm_monitor_opened;
  32static int dlm_monitor_unused = 1;
  33
  34#ifdef CONFIG_COMPAT
  35
  36struct dlm_lock_params32 {
  37	__u8 mode;
  38	__u8 namelen;
  39	__u16 unused;
  40	__u32 flags;
  41	__u32 lkid;
  42	__u32 parent;
  43	__u64 xid;
  44	__u64 timeout;
  45	__u32 castparam;
  46	__u32 castaddr;
  47	__u32 bastparam;
  48	__u32 bastaddr;
  49	__u32 lksb;
  50	char lvb[DLM_USER_LVB_LEN];
  51	char name[];
  52};
  53
  54struct dlm_write_request32 {
  55	__u32 version[3];
  56	__u8 cmd;
  57	__u8 is64bit;
  58	__u8 unused[2];
  59
  60	union  {
  61		struct dlm_lock_params32 lock;
  62		struct dlm_lspace_params lspace;
  63		struct dlm_purge_params purge;
  64	} i;
  65};
  66
  67struct dlm_lksb32 {
  68	__u32 sb_status;
  69	__u32 sb_lkid;
  70	__u8 sb_flags;
  71	__u32 sb_lvbptr;
  72};
  73
  74struct dlm_lock_result32 {
  75	__u32 version[3];
  76	__u32 length;
  77	__u32 user_astaddr;
  78	__u32 user_astparam;
  79	__u32 user_lksb;
  80	struct dlm_lksb32 lksb;
  81	__u8 bast_mode;
  82	__u8 unused[3];
  83	/* Offsets may be zero if no data is present */
  84	__u32 lvb_offset;
  85};
  86
  87static void compat_input(struct dlm_write_request *kb,
  88			 struct dlm_write_request32 *kb32,
  89			 int namelen)
  90{
  91	kb->version[0] = kb32->version[0];
  92	kb->version[1] = kb32->version[1];
  93	kb->version[2] = kb32->version[2];
  94
  95	kb->cmd = kb32->cmd;
  96	kb->is64bit = kb32->is64bit;
  97	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
  98	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
  99		kb->i.lspace.flags = kb32->i.lspace.flags;
 100		kb->i.lspace.minor = kb32->i.lspace.minor;
 101		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
 102	} else if (kb->cmd == DLM_USER_PURGE) {
 103		kb->i.purge.nodeid = kb32->i.purge.nodeid;
 104		kb->i.purge.pid = kb32->i.purge.pid;
 105	} else {
 106		kb->i.lock.mode = kb32->i.lock.mode;
 107		kb->i.lock.namelen = kb32->i.lock.namelen;
 108		kb->i.lock.flags = kb32->i.lock.flags;
 109		kb->i.lock.lkid = kb32->i.lock.lkid;
 110		kb->i.lock.parent = kb32->i.lock.parent;
 111		kb->i.lock.xid = kb32->i.lock.xid;
 112		kb->i.lock.timeout = kb32->i.lock.timeout;
 113		kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
 114		kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
 115		kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
 116		kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
 117		kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
 118		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
 119		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
 120	}
 121}
 122
 123static void compat_output(struct dlm_lock_result *res,
 124			  struct dlm_lock_result32 *res32)
 125{
 126	memset(res32, 0, sizeof(*res32));
 127
 128	res32->version[0] = res->version[0];
 129	res32->version[1] = res->version[1];
 130	res32->version[2] = res->version[2];
 131
 132	res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
 133	res32->user_astparam = (__u32)(__force long)res->user_astparam;
 134	res32->user_lksb = (__u32)(__force long)res->user_lksb;
 135	res32->bast_mode = res->bast_mode;
 136
 137	res32->lvb_offset = res->lvb_offset;
 138	res32->length = res->length;
 139
 140	res32->lksb.sb_status = res->lksb.sb_status;
 141	res32->lksb.sb_flags = res->lksb.sb_flags;
 142	res32->lksb.sb_lkid = res->lksb.sb_lkid;
 143	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
 144}
 145#endif
 146
 147/* should held proc->asts_spin lock */
 148void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
 149{
 150	struct dlm_callback *cb, *safe;
 151
 152	list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
 153		list_del(&cb->list);
 154		kref_put(&cb->ref, dlm_release_callback);
 155	}
 156
 157	clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
 158
 159	/* invalidate */
 160	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
 161	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
 162	lkb->lkb_last_bast_mode = -1;
 163}
 164
 165/* Figure out if this lock is at the end of its life and no longer
 166   available for the application to use.  The lkb still exists until
 167   the final ast is read.  A lock becomes EOL in three situations:
 168     1. a noqueue request fails with EAGAIN
 169     2. an unlock completes with EUNLOCK
 170     3. a cancel of a waiting request completes with ECANCEL/EDEADLK
 171   An EOL lock needs to be removed from the process's list of locks.
 172   And we can't allow any new operation on an EOL lock.  This is
 173   not related to the lifetime of the lkb struct which is managed
 174   entirely by refcount. */
 175
 176static int lkb_is_endoflife(int mode, int status)
 177{
 178	switch (status) {
 179	case -DLM_EUNLOCK:
 180		return 1;
 181	case -DLM_ECANCEL:
 182	case -ETIMEDOUT:
 183	case -EDEADLK:
 184	case -EAGAIN:
 185		if (mode == DLM_LOCK_IV)
 186			return 1;
 187		break;
 188	}
 189	return 0;
 190}
 191
 192/* we could possibly check if the cancel of an orphan has resulted in the lkb
 193   being removed and then remove that lkb from the orphans list and free it */
 194
 195void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 196		      int status, uint32_t sbflags)
 197{
 198	struct dlm_ls *ls;
 199	struct dlm_user_args *ua;
 200	struct dlm_user_proc *proc;
 201	int rv;
 202
 203	if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
 204	    test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
 205		return;
 206
 207	ls = lkb->lkb_resource->res_ls;
 208	spin_lock(&ls->ls_clear_proc_locks);
 209
 210	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
 211	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
 212	   lkb->ua so we can't try to use it.  This second check is necessary
 213	   for cases where a completion ast is received for an operation that
 214	   began before clear_proc_locks did its cancel/unlock. */
 215
 216	if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
 217	    test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
 218		goto out;
 219
 220	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
 221	ua = lkb->lkb_ua;
 222	proc = ua->proc;
 223
 224	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
 225		goto out;
 226
 227	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
 228		set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
 229
 230	spin_lock(&proc->asts_spin);
 231
 232	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
 233	switch (rv) {
 234	case DLM_ENQUEUE_CALLBACK_FAILURE:
 235		spin_unlock(&proc->asts_spin);
 236		WARN_ON_ONCE(1);
 237		goto out;
 238	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
 
 
 239		kref_get(&lkb->lkb_ref);
 240		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
 241		wake_up_interruptible(&proc->wait);
 242		break;
 243	case DLM_ENQUEUE_CALLBACK_SUCCESS:
 244		break;
 245	default:
 246		WARN_ON_ONCE(1);
 247		break;
 248	}
 249	spin_unlock(&proc->asts_spin);
 250
 251	if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
 252		/* N.B. spin_lock locks_spin, not asts_spin */
 253		spin_lock(&proc->locks_spin);
 254		if (!list_empty(&lkb->lkb_ownqueue)) {
 255			list_del_init(&lkb->lkb_ownqueue);
 256			dlm_put_lkb(lkb);
 257		}
 258		spin_unlock(&proc->locks_spin);
 259	}
 260 out:
 261	spin_unlock(&ls->ls_clear_proc_locks);
 262}
 263
 264static int device_user_lock(struct dlm_user_proc *proc,
 265			    struct dlm_lock_params *params)
 266{
 267	struct dlm_ls *ls;
 268	struct dlm_user_args *ua;
 269	uint32_t lkid;
 270	int error = -ENOMEM;
 271
 272	ls = dlm_find_lockspace_local(proc->lockspace);
 273	if (!ls)
 274		return -ENOENT;
 275
 276	if (!params->castaddr || !params->lksb) {
 277		error = -EINVAL;
 278		goto out;
 279	}
 280
 281	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 282	if (!ua)
 283		goto out;
 284	ua->proc = proc;
 285	ua->user_lksb = params->lksb;
 286	ua->castparam = params->castparam;
 287	ua->castaddr = params->castaddr;
 288	ua->bastparam = params->bastparam;
 289	ua->bastaddr = params->bastaddr;
 290	ua->xid = params->xid;
 291
 292	if (params->flags & DLM_LKF_CONVERT) {
 293		error = dlm_user_convert(ls, ua,
 294					 params->mode, params->flags,
 295					 params->lkid, params->lvb);
 
 296	} else if (params->flags & DLM_LKF_ORPHAN) {
 297		error = dlm_user_adopt_orphan(ls, ua,
 298					 params->mode, params->flags,
 299					 params->name, params->namelen,
 
 300					 &lkid);
 301		if (!error)
 302			error = lkid;
 303	} else {
 304		error = dlm_user_request(ls, ua,
 305					 params->mode, params->flags,
 306					 params->name, params->namelen);
 
 307		if (!error)
 308			error = ua->lksb.sb_lkid;
 309	}
 310 out:
 311	dlm_put_lockspace(ls);
 312	return error;
 313}
 314
 315static int device_user_unlock(struct dlm_user_proc *proc,
 316			      struct dlm_lock_params *params)
 317{
 318	struct dlm_ls *ls;
 319	struct dlm_user_args *ua;
 320	int error = -ENOMEM;
 321
 322	ls = dlm_find_lockspace_local(proc->lockspace);
 323	if (!ls)
 324		return -ENOENT;
 325
 326	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 327	if (!ua)
 328		goto out;
 329	ua->proc = proc;
 330	ua->user_lksb = params->lksb;
 331	ua->castparam = params->castparam;
 332	ua->castaddr = params->castaddr;
 333
 334	if (params->flags & DLM_LKF_CANCEL)
 335		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
 336	else
 337		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
 338					params->lvb);
 339 out:
 340	dlm_put_lockspace(ls);
 341	return error;
 342}
 343
 344static int device_user_deadlock(struct dlm_user_proc *proc,
 345				struct dlm_lock_params *params)
 346{
 347	struct dlm_ls *ls;
 348	int error;
 349
 350	ls = dlm_find_lockspace_local(proc->lockspace);
 351	if (!ls)
 352		return -ENOENT;
 353
 354	error = dlm_user_deadlock(ls, params->flags, params->lkid);
 355
 356	dlm_put_lockspace(ls);
 357	return error;
 358}
 359
 360static int dlm_device_register(struct dlm_ls *ls, char *name)
 361{
 362	int error, len;
 363
 364	/* The device is already registered.  This happens when the
 365	   lockspace is created multiple times from userspace. */
 366	if (ls->ls_device.name)
 367		return 0;
 368
 369	error = -ENOMEM;
 370	len = strlen(name) + strlen(name_prefix) + 2;
 371	ls->ls_device.name = kzalloc(len, GFP_NOFS);
 372	if (!ls->ls_device.name)
 373		goto fail;
 374
 375	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
 376		 name);
 377	ls->ls_device.fops = &device_fops;
 378	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
 379
 380	error = misc_register(&ls->ls_device);
 381	if (error) {
 382		kfree(ls->ls_device.name);
 383		/* this has to be set to NULL
 384		 * to avoid a double-free in dlm_device_deregister
 385		 */
 386		ls->ls_device.name = NULL;
 387	}
 388fail:
 389	return error;
 390}
 391
 392int dlm_device_deregister(struct dlm_ls *ls)
 393{
 394	/* The device is not registered.  This happens when the lockspace
 395	   was never used from userspace, or when device_create_lockspace()
 396	   calls dlm_release_lockspace() after the register fails. */
 397	if (!ls->ls_device.name)
 398		return 0;
 399
 400	misc_deregister(&ls->ls_device);
 401	kfree(ls->ls_device.name);
 402	return 0;
 403}
 404
 405static int device_user_purge(struct dlm_user_proc *proc,
 406			     struct dlm_purge_params *params)
 407{
 408	struct dlm_ls *ls;
 409	int error;
 410
 411	ls = dlm_find_lockspace_local(proc->lockspace);
 412	if (!ls)
 413		return -ENOENT;
 414
 415	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
 416
 417	dlm_put_lockspace(ls);
 418	return error;
 419}
 420
 421static int device_create_lockspace(struct dlm_lspace_params *params)
 422{
 423	dlm_lockspace_t *lockspace;
 424	struct dlm_ls *ls;
 425	int error;
 426
 427	if (!capable(CAP_SYS_ADMIN))
 428		return -EPERM;
 429
 430	error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
 431				       params->flags, DLM_USER_LVB_LEN, NULL,
 432				       NULL, NULL, &lockspace);
 433	if (error)
 434		return error;
 435
 436	ls = dlm_find_lockspace_local(lockspace);
 437	if (!ls)
 438		return -ENOENT;
 439
 440	error = dlm_device_register(ls, params->name);
 441	dlm_put_lockspace(ls);
 442
 443	if (error)
 444		dlm_release_lockspace(lockspace, 0);
 445	else
 446		error = ls->ls_device.minor;
 447
 448	return error;
 449}
 450
 451static int device_remove_lockspace(struct dlm_lspace_params *params)
 452{
 453	dlm_lockspace_t *lockspace;
 454	struct dlm_ls *ls;
 455	int error, force = 0;
 456
 457	if (!capable(CAP_SYS_ADMIN))
 458		return -EPERM;
 459
 460	ls = dlm_find_lockspace_device(params->minor);
 461	if (!ls)
 462		return -ENOENT;
 463
 464	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
 465		force = 2;
 466
 467	lockspace = ls->ls_local_handle;
 468	dlm_put_lockspace(ls);
 469
 470	/* The final dlm_release_lockspace waits for references to go to
 471	   zero, so all processes will need to close their device for the
 472	   ls before the release will proceed.  release also calls the
 473	   device_deregister above.  Converting a positive return value
 474	   from release to zero means that userspace won't know when its
 475	   release was the final one, but it shouldn't need to know. */
 476
 477	error = dlm_release_lockspace(lockspace, force);
 478	if (error > 0)
 479		error = 0;
 480	return error;
 481}
 482
 483/* Check the user's version matches ours */
 484static int check_version(struct dlm_write_request *req)
 485{
 486	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
 487	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
 488	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
 489
 490		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
 491		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
 492		       current->comm,
 493		       task_pid_nr(current),
 494		       req->version[0],
 495		       req->version[1],
 496		       req->version[2],
 497		       DLM_DEVICE_VERSION_MAJOR,
 498		       DLM_DEVICE_VERSION_MINOR,
 499		       DLM_DEVICE_VERSION_PATCH);
 500		return -EINVAL;
 501	}
 502	return 0;
 503}
 504
 505/*
 506 * device_write
 507 *
 508 *   device_user_lock
 509 *     dlm_user_request -> request_lock
 510 *     dlm_user_convert -> convert_lock
 511 *
 512 *   device_user_unlock
 513 *     dlm_user_unlock -> unlock_lock
 514 *     dlm_user_cancel -> cancel_lock
 515 *
 516 *   device_create_lockspace
 517 *     dlm_new_lockspace
 518 *
 519 *   device_remove_lockspace
 520 *     dlm_release_lockspace
 521 */
 522
 523/* a write to a lockspace device is a lock or unlock request, a write
 524   to the control device is to create/remove a lockspace */
 525
 526static ssize_t device_write(struct file *file, const char __user *buf,
 527			    size_t count, loff_t *ppos)
 528{
 529	struct dlm_user_proc *proc = file->private_data;
 530	struct dlm_write_request *kbuf;
 531	int error;
 532
 533#ifdef CONFIG_COMPAT
 534	if (count < sizeof(struct dlm_write_request32))
 535#else
 536	if (count < sizeof(struct dlm_write_request))
 537#endif
 538		return -EINVAL;
 539
 540	/*
 541	 * can't compare against COMPAT/dlm_write_request32 because
 542	 * we don't yet know if is64bit is zero
 543	 */
 544	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
 545		return -EINVAL;
 546
 547	kbuf = memdup_user_nul(buf, count);
 548	if (IS_ERR(kbuf))
 549		return PTR_ERR(kbuf);
 550
 551	if (check_version(kbuf)) {
 552		error = -EBADE;
 553		goto out_free;
 554	}
 555
 556#ifdef CONFIG_COMPAT
 557	if (!kbuf->is64bit) {
 558		struct dlm_write_request32 *k32buf;
 559		int namelen = 0;
 560
 561		if (count > sizeof(struct dlm_write_request32))
 562			namelen = count - sizeof(struct dlm_write_request32);
 563
 564		k32buf = (struct dlm_write_request32 *)kbuf;
 565
 566		/* add 1 after namelen so that the name string is terminated */
 567		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
 568			       GFP_NOFS);
 569		if (!kbuf) {
 570			kfree(k32buf);
 571			return -ENOMEM;
 572		}
 573
 574		if (proc)
 575			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
 576
 577		compat_input(kbuf, k32buf, namelen);
 578		kfree(k32buf);
 579	}
 580#endif
 581
 582	/* do we really need this? can a write happen after a close? */
 583	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
 584	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
 585		error = -EINVAL;
 586		goto out_free;
 587	}
 588
 589	error = -EINVAL;
 590
 591	switch (kbuf->cmd)
 592	{
 593	case DLM_USER_LOCK:
 594		if (!proc) {
 595			log_print("no locking on control device");
 596			goto out_free;
 597		}
 598		error = device_user_lock(proc, &kbuf->i.lock);
 599		break;
 600
 601	case DLM_USER_UNLOCK:
 602		if (!proc) {
 603			log_print("no locking on control device");
 604			goto out_free;
 605		}
 606		error = device_user_unlock(proc, &kbuf->i.lock);
 607		break;
 608
 609	case DLM_USER_DEADLOCK:
 610		if (!proc) {
 611			log_print("no locking on control device");
 612			goto out_free;
 613		}
 614		error = device_user_deadlock(proc, &kbuf->i.lock);
 615		break;
 616
 617	case DLM_USER_CREATE_LOCKSPACE:
 618		if (proc) {
 619			log_print("create/remove only on control device");
 620			goto out_free;
 621		}
 622		error = device_create_lockspace(&kbuf->i.lspace);
 623		break;
 624
 625	case DLM_USER_REMOVE_LOCKSPACE:
 626		if (proc) {
 627			log_print("create/remove only on control device");
 628			goto out_free;
 629		}
 630		error = device_remove_lockspace(&kbuf->i.lspace);
 631		break;
 632
 633	case DLM_USER_PURGE:
 634		if (!proc) {
 635			log_print("no locking on control device");
 636			goto out_free;
 637		}
 638		error = device_user_purge(proc, &kbuf->i.purge);
 639		break;
 640
 641	default:
 642		log_print("Unknown command passed to DLM device : %d\n",
 643			  kbuf->cmd);
 644	}
 645
 646 out_free:
 647	kfree(kbuf);
 648	return error;
 649}
 650
 651/* Every process that opens the lockspace device has its own "proc" structure
 652   hanging off the open file that's used to keep track of locks owned by the
 653   process and asts that need to be delivered to the process. */
 654
 655static int device_open(struct inode *inode, struct file *file)
 656{
 657	struct dlm_user_proc *proc;
 658	struct dlm_ls *ls;
 659
 660	ls = dlm_find_lockspace_device(iminor(inode));
 661	if (!ls)
 662		return -ENOENT;
 663
 664	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
 665	if (!proc) {
 666		dlm_put_lockspace(ls);
 667		return -ENOMEM;
 668	}
 669
 670	proc->lockspace = ls->ls_local_handle;
 671	INIT_LIST_HEAD(&proc->asts);
 672	INIT_LIST_HEAD(&proc->locks);
 673	INIT_LIST_HEAD(&proc->unlocking);
 674	spin_lock_init(&proc->asts_spin);
 675	spin_lock_init(&proc->locks_spin);
 676	init_waitqueue_head(&proc->wait);
 677	file->private_data = proc;
 678
 679	return 0;
 680}
 681
 682static int device_close(struct inode *inode, struct file *file)
 683{
 684	struct dlm_user_proc *proc = file->private_data;
 685	struct dlm_ls *ls;
 686
 687	ls = dlm_find_lockspace_local(proc->lockspace);
 688	if (!ls)
 689		return -ENOENT;
 690
 691	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
 692
 693	dlm_clear_proc_locks(ls, proc);
 694
 695	/* at this point no more lkb's should exist for this lockspace,
 696	   so there's no chance of dlm_user_add_ast() being called and
 697	   looking for lkb->ua->proc */
 698
 699	kfree(proc);
 700	file->private_data = NULL;
 701
 702	dlm_put_lockspace(ls);
 703	dlm_put_lockspace(ls);  /* for the find in device_open() */
 704
 705	/* FIXME: AUTOFREE: if this ls is no longer used do
 706	   device_remove_lockspace() */
 707
 708	return 0;
 709}
 710
 711static int copy_result_to_user(struct dlm_user_args *ua, int compat,
 712			       uint32_t flags, int mode, int copy_lvb,
 713			       char __user *buf, size_t count)
 714{
 715#ifdef CONFIG_COMPAT
 716	struct dlm_lock_result32 result32;
 717#endif
 718	struct dlm_lock_result result;
 719	void *resultptr;
 720	int error=0;
 721	int len;
 722	int struct_len;
 723
 724	memset(&result, 0, sizeof(struct dlm_lock_result));
 725	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
 726	result.version[1] = DLM_DEVICE_VERSION_MINOR;
 727	result.version[2] = DLM_DEVICE_VERSION_PATCH;
 728	memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
 729	result.user_lksb = ua->user_lksb;
 730
 731	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
 732	   in a conversion unless the conversion is successful.  See code
 733	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
 734	   notes that a new blocking AST address and parameter are set even if
 735	   the conversion fails, so maybe we should just do that. */
 736
 737	if (flags & DLM_CB_BAST) {
 738		result.user_astaddr = ua->bastaddr;
 739		result.user_astparam = ua->bastparam;
 740		result.bast_mode = mode;
 741	} else {
 742		result.user_astaddr = ua->castaddr;
 743		result.user_astparam = ua->castparam;
 744	}
 745
 746#ifdef CONFIG_COMPAT
 747	if (compat)
 748		len = sizeof(struct dlm_lock_result32);
 749	else
 750#endif
 751		len = sizeof(struct dlm_lock_result);
 752	struct_len = len;
 753
 754	/* copy lvb to userspace if there is one, it's been updated, and
 755	   the user buffer has space for it */
 756
 757	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
 758		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
 759				 DLM_USER_LVB_LEN)) {
 760			error = -EFAULT;
 761			goto out;
 762		}
 763
 764		result.lvb_offset = len;
 765		len += DLM_USER_LVB_LEN;
 766	}
 767
 768	result.length = len;
 769	resultptr = &result;
 770#ifdef CONFIG_COMPAT
 771	if (compat) {
 772		compat_output(&result, &result32);
 773		resultptr = &result32;
 774	}
 775#endif
 776
 777	if (copy_to_user(buf, resultptr, struct_len))
 778		error = -EFAULT;
 779	else
 780		error = len;
 781 out:
 782	return error;
 783}
 784
 785static int copy_version_to_user(char __user *buf, size_t count)
 786{
 787	struct dlm_device_version ver;
 788
 789	memset(&ver, 0, sizeof(struct dlm_device_version));
 790	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
 791	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
 792	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
 793
 794	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
 795		return -EFAULT;
 796	return sizeof(struct dlm_device_version);
 797}
 798
 799/* a read returns a single ast described in a struct dlm_lock_result */
 800
 801static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 802			   loff_t *ppos)
 803{
 804	struct dlm_user_proc *proc = file->private_data;
 805	struct dlm_lkb *lkb;
 806	DECLARE_WAITQUEUE(wait, current);
 807	struct dlm_callback *cb;
 808	int rv, ret;
 
 809
 810	if (count == sizeof(struct dlm_device_version)) {
 811		rv = copy_version_to_user(buf, count);
 812		return rv;
 813	}
 814
 815	if (!proc) {
 816		log_print("non-version read from control device %zu", count);
 817		return -EINVAL;
 818	}
 819
 820#ifdef CONFIG_COMPAT
 821	if (count < sizeof(struct dlm_lock_result32))
 822#else
 823	if (count < sizeof(struct dlm_lock_result))
 824#endif
 825		return -EINVAL;
 826
 827 try_another:
 828
 829	/* do we really need this? can a read happen after a close? */
 830	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
 831		return -EINVAL;
 832
 833	spin_lock(&proc->asts_spin);
 834	if (list_empty(&proc->asts)) {
 835		if (file->f_flags & O_NONBLOCK) {
 836			spin_unlock(&proc->asts_spin);
 837			return -EAGAIN;
 838		}
 839
 840		add_wait_queue(&proc->wait, &wait);
 841
 842	repeat:
 843		set_current_state(TASK_INTERRUPTIBLE);
 844		if (list_empty(&proc->asts) && !signal_pending(current)) {
 845			spin_unlock(&proc->asts_spin);
 846			schedule();
 847			spin_lock(&proc->asts_spin);
 848			goto repeat;
 849		}
 850		set_current_state(TASK_RUNNING);
 851		remove_wait_queue(&proc->wait, &wait);
 852
 853		if (signal_pending(current)) {
 854			spin_unlock(&proc->asts_spin);
 855			return -ERESTARTSYS;
 856		}
 857	}
 858
 859	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
 860	   without removing lkb_cb_list; so empty lkb_cb_list is always
 861	   consistent with empty lkb_callbacks */
 862
 863	lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
 
 
 
 864
 865	rv = dlm_dequeue_lkb_callback(lkb, &cb);
 866	switch (rv) {
 867	case DLM_DEQUEUE_CALLBACK_EMPTY:
 868		/* this shouldn't happen; lkb should have been removed from
 869		 * list when last item was dequeued
 870		 */
 871		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
 872		list_del_init(&lkb->lkb_cb_list);
 873		spin_unlock(&proc->asts_spin);
 874		/* removes ref for proc->asts, may cause lkb to be freed */
 875		dlm_put_lkb(lkb);
 876		WARN_ON_ONCE(1);
 877		goto try_another;
 878	case DLM_DEQUEUE_CALLBACK_LAST:
 879		list_del_init(&lkb->lkb_cb_list);
 880		clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
 881		break;
 882	case DLM_DEQUEUE_CALLBACK_SUCCESS:
 883		break;
 884	default:
 885		WARN_ON_ONCE(1);
 886		break;
 887	}
 
 
 888	spin_unlock(&proc->asts_spin);
 889
 890	if (cb->flags & DLM_CB_BAST) {
 891		trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
 892	} else if (cb->flags & DLM_CB_CAST) {
 893		lkb->lkb_lksb->sb_status = cb->sb_status;
 894		lkb->lkb_lksb->sb_flags = cb->sb_flags;
 895		trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
 896	}
 897
 898	ret = copy_result_to_user(lkb->lkb_ua,
 899				  test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
 900				  cb->flags, cb->mode, cb->copy_lvb, buf, count);
 
 
 
 
 
 
 
 901
 902	kref_put(&cb->ref, dlm_release_callback);
 
 
 903
 904	/* removes ref for proc->asts, may cause lkb to be freed */
 905	if (rv == DLM_DEQUEUE_CALLBACK_LAST)
 906		dlm_put_lkb(lkb);
 907
 908	return ret;
 909}
 910
 911static __poll_t device_poll(struct file *file, poll_table *wait)
 912{
 913	struct dlm_user_proc *proc = file->private_data;
 914
 915	poll_wait(file, &proc->wait, wait);
 916
 917	spin_lock(&proc->asts_spin);
 918	if (!list_empty(&proc->asts)) {
 919		spin_unlock(&proc->asts_spin);
 920		return EPOLLIN | EPOLLRDNORM;
 921	}
 922	spin_unlock(&proc->asts_spin);
 923	return 0;
 924}
 925
 926int dlm_user_daemon_available(void)
 927{
 928	/* dlm_controld hasn't started (or, has started, but not
 929	   properly populated configfs) */
 930
 931	if (!dlm_our_nodeid())
 932		return 0;
 933
 934	/* This is to deal with versions of dlm_controld that don't
 935	   know about the monitor device.  We assume that if the
 936	   dlm_controld was started (above), but the monitor device
 937	   was never opened, that it's an old version.  dlm_controld
 938	   should open the monitor device before populating configfs. */
 939
 940	if (dlm_monitor_unused)
 941		return 1;
 942
 943	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
 944}
 945
 946static int ctl_device_open(struct inode *inode, struct file *file)
 947{
 948	file->private_data = NULL;
 949	return 0;
 950}
 951
 952static int ctl_device_close(struct inode *inode, struct file *file)
 953{
 954	return 0;
 955}
 956
 957static int monitor_device_open(struct inode *inode, struct file *file)
 958{
 959	atomic_inc(&dlm_monitor_opened);
 960	dlm_monitor_unused = 0;
 961	return 0;
 962}
 963
 964static int monitor_device_close(struct inode *inode, struct file *file)
 965{
 966	if (atomic_dec_and_test(&dlm_monitor_opened))
 967		dlm_stop_lockspaces();
 968	return 0;
 969}
 970
 971static const struct file_operations device_fops = {
 972	.open    = device_open,
 973	.release = device_close,
 974	.read    = device_read,
 975	.write   = device_write,
 976	.poll    = device_poll,
 977	.owner   = THIS_MODULE,
 978	.llseek  = noop_llseek,
 979};
 980
 981static const struct file_operations ctl_device_fops = {
 982	.open    = ctl_device_open,
 983	.release = ctl_device_close,
 984	.read    = device_read,
 985	.write   = device_write,
 986	.owner   = THIS_MODULE,
 987	.llseek  = noop_llseek,
 988};
 989
 990static struct miscdevice ctl_device = {
 991	.name  = "dlm-control",
 992	.fops  = &ctl_device_fops,
 993	.minor = MISC_DYNAMIC_MINOR,
 994};
 995
 996static const struct file_operations monitor_device_fops = {
 997	.open    = monitor_device_open,
 998	.release = monitor_device_close,
 999	.owner   = THIS_MODULE,
1000	.llseek  = noop_llseek,
1001};
1002
1003static struct miscdevice monitor_device = {
1004	.name  = "dlm-monitor",
1005	.fops  = &monitor_device_fops,
1006	.minor = MISC_DYNAMIC_MINOR,
1007};
1008
1009int __init dlm_user_init(void)
1010{
1011	int error;
1012
1013	atomic_set(&dlm_monitor_opened, 0);
1014
1015	error = misc_register(&ctl_device);
1016	if (error) {
1017		log_print("misc_register failed for control device");
1018		goto out;
1019	}
1020
1021	error = misc_register(&monitor_device);
1022	if (error) {
1023		log_print("misc_register failed for monitor device");
1024		misc_deregister(&ctl_device);
1025	}
1026 out:
1027	return error;
1028}
1029
1030void dlm_user_exit(void)
1031{
1032	misc_deregister(&ctl_device);
1033	misc_deregister(&monitor_device);
1034}
1035
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2006-2010 Red Hat, Inc.  All rights reserved.
   4 */
   5
   6#include <linux/miscdevice.h>
   7#include <linux/init.h>
   8#include <linux/wait.h>
   9#include <linux/file.h>
  10#include <linux/fs.h>
  11#include <linux/poll.h>
  12#include <linux/signal.h>
  13#include <linux/spinlock.h>
  14#include <linux/dlm.h>
  15#include <linux/dlm_device.h>
  16#include <linux/slab.h>
  17#include <linux/sched/signal.h>
  18
 
 
  19#include "dlm_internal.h"
  20#include "lockspace.h"
  21#include "lock.h"
  22#include "lvb_table.h"
  23#include "user.h"
  24#include "ast.h"
  25#include "config.h"
 
  26
  27static const char name_prefix[] = "dlm";
  28static const struct file_operations device_fops;
  29static atomic_t dlm_monitor_opened;
  30static int dlm_monitor_unused = 1;
  31
  32#ifdef CONFIG_COMPAT
  33
  34struct dlm_lock_params32 {
  35	__u8 mode;
  36	__u8 namelen;
  37	__u16 unused;
  38	__u32 flags;
  39	__u32 lkid;
  40	__u32 parent;
  41	__u64 xid;
  42	__u64 timeout;
  43	__u32 castparam;
  44	__u32 castaddr;
  45	__u32 bastparam;
  46	__u32 bastaddr;
  47	__u32 lksb;
  48	char lvb[DLM_USER_LVB_LEN];
  49	char name[];
  50};
  51
  52struct dlm_write_request32 {
  53	__u32 version[3];
  54	__u8 cmd;
  55	__u8 is64bit;
  56	__u8 unused[2];
  57
  58	union  {
  59		struct dlm_lock_params32 lock;
  60		struct dlm_lspace_params lspace;
  61		struct dlm_purge_params purge;
  62	} i;
  63};
  64
  65struct dlm_lksb32 {
  66	__u32 sb_status;
  67	__u32 sb_lkid;
  68	__u8 sb_flags;
  69	__u32 sb_lvbptr;
  70};
  71
  72struct dlm_lock_result32 {
  73	__u32 version[3];
  74	__u32 length;
  75	__u32 user_astaddr;
  76	__u32 user_astparam;
  77	__u32 user_lksb;
  78	struct dlm_lksb32 lksb;
  79	__u8 bast_mode;
  80	__u8 unused[3];
  81	/* Offsets may be zero if no data is present */
  82	__u32 lvb_offset;
  83};
  84
  85static void compat_input(struct dlm_write_request *kb,
  86			 struct dlm_write_request32 *kb32,
  87			 int namelen)
  88{
  89	kb->version[0] = kb32->version[0];
  90	kb->version[1] = kb32->version[1];
  91	kb->version[2] = kb32->version[2];
  92
  93	kb->cmd = kb32->cmd;
  94	kb->is64bit = kb32->is64bit;
  95	if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
  96	    kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
  97		kb->i.lspace.flags = kb32->i.lspace.flags;
  98		kb->i.lspace.minor = kb32->i.lspace.minor;
  99		memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
 100	} else if (kb->cmd == DLM_USER_PURGE) {
 101		kb->i.purge.nodeid = kb32->i.purge.nodeid;
 102		kb->i.purge.pid = kb32->i.purge.pid;
 103	} else {
 104		kb->i.lock.mode = kb32->i.lock.mode;
 105		kb->i.lock.namelen = kb32->i.lock.namelen;
 106		kb->i.lock.flags = kb32->i.lock.flags;
 107		kb->i.lock.lkid = kb32->i.lock.lkid;
 108		kb->i.lock.parent = kb32->i.lock.parent;
 109		kb->i.lock.xid = kb32->i.lock.xid;
 110		kb->i.lock.timeout = kb32->i.lock.timeout;
 111		kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
 112		kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
 113		kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
 114		kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
 115		kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
 116		memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
 117		memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
 118	}
 119}
 120
 121static void compat_output(struct dlm_lock_result *res,
 122			  struct dlm_lock_result32 *res32)
 123{
 124	memset(res32, 0, sizeof(*res32));
 125
 126	res32->version[0] = res->version[0];
 127	res32->version[1] = res->version[1];
 128	res32->version[2] = res->version[2];
 129
 130	res32->user_astaddr = (__u32)(long)res->user_astaddr;
 131	res32->user_astparam = (__u32)(long)res->user_astparam;
 132	res32->user_lksb = (__u32)(long)res->user_lksb;
 133	res32->bast_mode = res->bast_mode;
 134
 135	res32->lvb_offset = res->lvb_offset;
 136	res32->length = res->length;
 137
 138	res32->lksb.sb_status = res->lksb.sb_status;
 139	res32->lksb.sb_flags = res->lksb.sb_flags;
 140	res32->lksb.sb_lkid = res->lksb.sb_lkid;
 141	res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
 142}
 143#endif
 144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 145/* Figure out if this lock is at the end of its life and no longer
 146   available for the application to use.  The lkb still exists until
 147   the final ast is read.  A lock becomes EOL in three situations:
 148     1. a noqueue request fails with EAGAIN
 149     2. an unlock completes with EUNLOCK
 150     3. a cancel of a waiting request completes with ECANCEL/EDEADLK
 151   An EOL lock needs to be removed from the process's list of locks.
 152   And we can't allow any new operation on an EOL lock.  This is
 153   not related to the lifetime of the lkb struct which is managed
 154   entirely by refcount. */
 155
 156static int lkb_is_endoflife(int mode, int status)
 157{
 158	switch (status) {
 159	case -DLM_EUNLOCK:
 160		return 1;
 161	case -DLM_ECANCEL:
 162	case -ETIMEDOUT:
 163	case -EDEADLK:
 164	case -EAGAIN:
 165		if (mode == DLM_LOCK_IV)
 166			return 1;
 167		break;
 168	}
 169	return 0;
 170}
 171
 172/* we could possibly check if the cancel of an orphan has resulted in the lkb
 173   being removed and then remove that lkb from the orphans list and free it */
 174
 175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
 176		      int status, uint32_t sbflags, uint64_t seq)
 177{
 178	struct dlm_ls *ls;
 179	struct dlm_user_args *ua;
 180	struct dlm_user_proc *proc;
 181	int rv;
 182
 183	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
 
 184		return;
 185
 186	ls = lkb->lkb_resource->res_ls;
 187	mutex_lock(&ls->ls_clear_proc_locks);
 188
 189	/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
 190	   can't be delivered.  For ORPHAN's, dlm_clear_proc_locks() freed
 191	   lkb->ua so we can't try to use it.  This second check is necessary
 192	   for cases where a completion ast is received for an operation that
 193	   began before clear_proc_locks did its cancel/unlock. */
 194
 195	if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
 
 196		goto out;
 197
 198	DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
 199	ua = lkb->lkb_ua;
 200	proc = ua->proc;
 201
 202	if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
 203		goto out;
 204
 205	if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
 206		lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
 207
 208	spin_lock(&proc->asts_spin);
 209
 210	rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
 211	if (rv < 0) {
 
 212		spin_unlock(&proc->asts_spin);
 
 213		goto out;
 214	}
 215
 216	if (list_empty(&lkb->lkb_cb_list)) {
 217		kref_get(&lkb->lkb_ref);
 218		list_add_tail(&lkb->lkb_cb_list, &proc->asts);
 219		wake_up_interruptible(&proc->wait);
 
 
 
 
 
 
 220	}
 221	spin_unlock(&proc->asts_spin);
 222
 223	if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
 224		/* N.B. spin_lock locks_spin, not asts_spin */
 225		spin_lock(&proc->locks_spin);
 226		if (!list_empty(&lkb->lkb_ownqueue)) {
 227			list_del_init(&lkb->lkb_ownqueue);
 228			dlm_put_lkb(lkb);
 229		}
 230		spin_unlock(&proc->locks_spin);
 231	}
 232 out:
 233	mutex_unlock(&ls->ls_clear_proc_locks);
 234}
 235
 236static int device_user_lock(struct dlm_user_proc *proc,
 237			    struct dlm_lock_params *params)
 238{
 239	struct dlm_ls *ls;
 240	struct dlm_user_args *ua;
 241	uint32_t lkid;
 242	int error = -ENOMEM;
 243
 244	ls = dlm_find_lockspace_local(proc->lockspace);
 245	if (!ls)
 246		return -ENOENT;
 247
 248	if (!params->castaddr || !params->lksb) {
 249		error = -EINVAL;
 250		goto out;
 251	}
 252
 253	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 254	if (!ua)
 255		goto out;
 256	ua->proc = proc;
 257	ua->user_lksb = params->lksb;
 258	ua->castparam = params->castparam;
 259	ua->castaddr = params->castaddr;
 260	ua->bastparam = params->bastparam;
 261	ua->bastaddr = params->bastaddr;
 262	ua->xid = params->xid;
 263
 264	if (params->flags & DLM_LKF_CONVERT) {
 265		error = dlm_user_convert(ls, ua,
 266				         params->mode, params->flags,
 267				         params->lkid, params->lvb,
 268					 (unsigned long) params->timeout);
 269	} else if (params->flags & DLM_LKF_ORPHAN) {
 270		error = dlm_user_adopt_orphan(ls, ua,
 271					 params->mode, params->flags,
 272					 params->name, params->namelen,
 273					 (unsigned long) params->timeout,
 274					 &lkid);
 275		if (!error)
 276			error = lkid;
 277	} else {
 278		error = dlm_user_request(ls, ua,
 279					 params->mode, params->flags,
 280					 params->name, params->namelen,
 281					 (unsigned long) params->timeout);
 282		if (!error)
 283			error = ua->lksb.sb_lkid;
 284	}
 285 out:
 286	dlm_put_lockspace(ls);
 287	return error;
 288}
 289
 290static int device_user_unlock(struct dlm_user_proc *proc,
 291			      struct dlm_lock_params *params)
 292{
 293	struct dlm_ls *ls;
 294	struct dlm_user_args *ua;
 295	int error = -ENOMEM;
 296
 297	ls = dlm_find_lockspace_local(proc->lockspace);
 298	if (!ls)
 299		return -ENOENT;
 300
 301	ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
 302	if (!ua)
 303		goto out;
 304	ua->proc = proc;
 305	ua->user_lksb = params->lksb;
 306	ua->castparam = params->castparam;
 307	ua->castaddr = params->castaddr;
 308
 309	if (params->flags & DLM_LKF_CANCEL)
 310		error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
 311	else
 312		error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
 313					params->lvb);
 314 out:
 315	dlm_put_lockspace(ls);
 316	return error;
 317}
 318
 319static int device_user_deadlock(struct dlm_user_proc *proc,
 320				struct dlm_lock_params *params)
 321{
 322	struct dlm_ls *ls;
 323	int error;
 324
 325	ls = dlm_find_lockspace_local(proc->lockspace);
 326	if (!ls)
 327		return -ENOENT;
 328
 329	error = dlm_user_deadlock(ls, params->flags, params->lkid);
 330
 331	dlm_put_lockspace(ls);
 332	return error;
 333}
 334
 335static int dlm_device_register(struct dlm_ls *ls, char *name)
 336{
 337	int error, len;
 338
 339	/* The device is already registered.  This happens when the
 340	   lockspace is created multiple times from userspace. */
 341	if (ls->ls_device.name)
 342		return 0;
 343
 344	error = -ENOMEM;
 345	len = strlen(name) + strlen(name_prefix) + 2;
 346	ls->ls_device.name = kzalloc(len, GFP_NOFS);
 347	if (!ls->ls_device.name)
 348		goto fail;
 349
 350	snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
 351		 name);
 352	ls->ls_device.fops = &device_fops;
 353	ls->ls_device.minor = MISC_DYNAMIC_MINOR;
 354
 355	error = misc_register(&ls->ls_device);
 356	if (error) {
 357		kfree(ls->ls_device.name);
 358		/* this has to be set to NULL
 359		 * to avoid a double-free in dlm_device_deregister
 360		 */
 361		ls->ls_device.name = NULL;
 362	}
 363fail:
 364	return error;
 365}
 366
 367int dlm_device_deregister(struct dlm_ls *ls)
 368{
 369	/* The device is not registered.  This happens when the lockspace
 370	   was never used from userspace, or when device_create_lockspace()
 371	   calls dlm_release_lockspace() after the register fails. */
 372	if (!ls->ls_device.name)
 373		return 0;
 374
 375	misc_deregister(&ls->ls_device);
 376	kfree(ls->ls_device.name);
 377	return 0;
 378}
 379
 380static int device_user_purge(struct dlm_user_proc *proc,
 381			     struct dlm_purge_params *params)
 382{
 383	struct dlm_ls *ls;
 384	int error;
 385
 386	ls = dlm_find_lockspace_local(proc->lockspace);
 387	if (!ls)
 388		return -ENOENT;
 389
 390	error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
 391
 392	dlm_put_lockspace(ls);
 393	return error;
 394}
 395
 396static int device_create_lockspace(struct dlm_lspace_params *params)
 397{
 398	dlm_lockspace_t *lockspace;
 399	struct dlm_ls *ls;
 400	int error;
 401
 402	if (!capable(CAP_SYS_ADMIN))
 403		return -EPERM;
 404
 405	error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
 406				  DLM_USER_LVB_LEN, NULL, NULL, NULL,
 407				  &lockspace);
 408	if (error)
 409		return error;
 410
 411	ls = dlm_find_lockspace_local(lockspace);
 412	if (!ls)
 413		return -ENOENT;
 414
 415	error = dlm_device_register(ls, params->name);
 416	dlm_put_lockspace(ls);
 417
 418	if (error)
 419		dlm_release_lockspace(lockspace, 0);
 420	else
 421		error = ls->ls_device.minor;
 422
 423	return error;
 424}
 425
 426static int device_remove_lockspace(struct dlm_lspace_params *params)
 427{
 428	dlm_lockspace_t *lockspace;
 429	struct dlm_ls *ls;
 430	int error, force = 0;
 431
 432	if (!capable(CAP_SYS_ADMIN))
 433		return -EPERM;
 434
 435	ls = dlm_find_lockspace_device(params->minor);
 436	if (!ls)
 437		return -ENOENT;
 438
 439	if (params->flags & DLM_USER_LSFLG_FORCEFREE)
 440		force = 2;
 441
 442	lockspace = ls->ls_local_handle;
 443	dlm_put_lockspace(ls);
 444
 445	/* The final dlm_release_lockspace waits for references to go to
 446	   zero, so all processes will need to close their device for the
 447	   ls before the release will proceed.  release also calls the
 448	   device_deregister above.  Converting a positive return value
 449	   from release to zero means that userspace won't know when its
 450	   release was the final one, but it shouldn't need to know. */
 451
 452	error = dlm_release_lockspace(lockspace, force);
 453	if (error > 0)
 454		error = 0;
 455	return error;
 456}
 457
 458/* Check the user's version matches ours */
 459static int check_version(struct dlm_write_request *req)
 460{
 461	if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
 462	    (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
 463	     req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
 464
 465		printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
 466		       "user (%d.%d.%d) kernel (%d.%d.%d)\n",
 467		       current->comm,
 468		       task_pid_nr(current),
 469		       req->version[0],
 470		       req->version[1],
 471		       req->version[2],
 472		       DLM_DEVICE_VERSION_MAJOR,
 473		       DLM_DEVICE_VERSION_MINOR,
 474		       DLM_DEVICE_VERSION_PATCH);
 475		return -EINVAL;
 476	}
 477	return 0;
 478}
 479
 480/*
 481 * device_write
 482 *
 483 *   device_user_lock
 484 *     dlm_user_request -> request_lock
 485 *     dlm_user_convert -> convert_lock
 486 *
 487 *   device_user_unlock
 488 *     dlm_user_unlock -> unlock_lock
 489 *     dlm_user_cancel -> cancel_lock
 490 *
 491 *   device_create_lockspace
 492 *     dlm_new_lockspace
 493 *
 494 *   device_remove_lockspace
 495 *     dlm_release_lockspace
 496 */
 497
 498/* a write to a lockspace device is a lock or unlock request, a write
 499   to the control device is to create/remove a lockspace */
 500
 501static ssize_t device_write(struct file *file, const char __user *buf,
 502			    size_t count, loff_t *ppos)
 503{
 504	struct dlm_user_proc *proc = file->private_data;
 505	struct dlm_write_request *kbuf;
 506	int error;
 507
 508#ifdef CONFIG_COMPAT
 509	if (count < sizeof(struct dlm_write_request32))
 510#else
 511	if (count < sizeof(struct dlm_write_request))
 512#endif
 513		return -EINVAL;
 514
 515	/*
 516	 * can't compare against COMPAT/dlm_write_request32 because
 517	 * we don't yet know if is64bit is zero
 518	 */
 519	if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
 520		return -EINVAL;
 521
 522	kbuf = memdup_user_nul(buf, count);
 523	if (IS_ERR(kbuf))
 524		return PTR_ERR(kbuf);
 525
 526	if (check_version(kbuf)) {
 527		error = -EBADE;
 528		goto out_free;
 529	}
 530
 531#ifdef CONFIG_COMPAT
 532	if (!kbuf->is64bit) {
 533		struct dlm_write_request32 *k32buf;
 534		int namelen = 0;
 535
 536		if (count > sizeof(struct dlm_write_request32))
 537			namelen = count - sizeof(struct dlm_write_request32);
 538
 539		k32buf = (struct dlm_write_request32 *)kbuf;
 540
 541		/* add 1 after namelen so that the name string is terminated */
 542		kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
 543			       GFP_NOFS);
 544		if (!kbuf) {
 545			kfree(k32buf);
 546			return -ENOMEM;
 547		}
 548
 549		if (proc)
 550			set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
 551
 552		compat_input(kbuf, k32buf, namelen);
 553		kfree(k32buf);
 554	}
 555#endif
 556
 557	/* do we really need this? can a write happen after a close? */
 558	if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
 559	    (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
 560		error = -EINVAL;
 561		goto out_free;
 562	}
 563
 564	error = -EINVAL;
 565
 566	switch (kbuf->cmd)
 567	{
 568	case DLM_USER_LOCK:
 569		if (!proc) {
 570			log_print("no locking on control device");
 571			goto out_free;
 572		}
 573		error = device_user_lock(proc, &kbuf->i.lock);
 574		break;
 575
 576	case DLM_USER_UNLOCK:
 577		if (!proc) {
 578			log_print("no locking on control device");
 579			goto out_free;
 580		}
 581		error = device_user_unlock(proc, &kbuf->i.lock);
 582		break;
 583
 584	case DLM_USER_DEADLOCK:
 585		if (!proc) {
 586			log_print("no locking on control device");
 587			goto out_free;
 588		}
 589		error = device_user_deadlock(proc, &kbuf->i.lock);
 590		break;
 591
 592	case DLM_USER_CREATE_LOCKSPACE:
 593		if (proc) {
 594			log_print("create/remove only on control device");
 595			goto out_free;
 596		}
 597		error = device_create_lockspace(&kbuf->i.lspace);
 598		break;
 599
 600	case DLM_USER_REMOVE_LOCKSPACE:
 601		if (proc) {
 602			log_print("create/remove only on control device");
 603			goto out_free;
 604		}
 605		error = device_remove_lockspace(&kbuf->i.lspace);
 606		break;
 607
 608	case DLM_USER_PURGE:
 609		if (!proc) {
 610			log_print("no locking on control device");
 611			goto out_free;
 612		}
 613		error = device_user_purge(proc, &kbuf->i.purge);
 614		break;
 615
 616	default:
 617		log_print("Unknown command passed to DLM device : %d\n",
 618			  kbuf->cmd);
 619	}
 620
 621 out_free:
 622	kfree(kbuf);
 623	return error;
 624}
 625
 626/* Every process that opens the lockspace device has its own "proc" structure
 627   hanging off the open file that's used to keep track of locks owned by the
 628   process and asts that need to be delivered to the process. */
 629
 630static int device_open(struct inode *inode, struct file *file)
 631{
 632	struct dlm_user_proc *proc;
 633	struct dlm_ls *ls;
 634
 635	ls = dlm_find_lockspace_device(iminor(inode));
 636	if (!ls)
 637		return -ENOENT;
 638
 639	proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
 640	if (!proc) {
 641		dlm_put_lockspace(ls);
 642		return -ENOMEM;
 643	}
 644
 645	proc->lockspace = ls->ls_local_handle;
 646	INIT_LIST_HEAD(&proc->asts);
 647	INIT_LIST_HEAD(&proc->locks);
 648	INIT_LIST_HEAD(&proc->unlocking);
 649	spin_lock_init(&proc->asts_spin);
 650	spin_lock_init(&proc->locks_spin);
 651	init_waitqueue_head(&proc->wait);
 652	file->private_data = proc;
 653
 654	return 0;
 655}
 656
 657static int device_close(struct inode *inode, struct file *file)
 658{
 659	struct dlm_user_proc *proc = file->private_data;
 660	struct dlm_ls *ls;
 661
 662	ls = dlm_find_lockspace_local(proc->lockspace);
 663	if (!ls)
 664		return -ENOENT;
 665
 666	set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
 667
 668	dlm_clear_proc_locks(ls, proc);
 669
 670	/* at this point no more lkb's should exist for this lockspace,
 671	   so there's no chance of dlm_user_add_ast() being called and
 672	   looking for lkb->ua->proc */
 673
 674	kfree(proc);
 675	file->private_data = NULL;
 676
 677	dlm_put_lockspace(ls);
 678	dlm_put_lockspace(ls);  /* for the find in device_open() */
 679
 680	/* FIXME: AUTOFREE: if this ls is no longer used do
 681	   device_remove_lockspace() */
 682
 683	return 0;
 684}
 685
 686static int copy_result_to_user(struct dlm_user_args *ua, int compat,
 687			       uint32_t flags, int mode, int copy_lvb,
 688			       char __user *buf, size_t count)
 689{
 690#ifdef CONFIG_COMPAT
 691	struct dlm_lock_result32 result32;
 692#endif
 693	struct dlm_lock_result result;
 694	void *resultptr;
 695	int error=0;
 696	int len;
 697	int struct_len;
 698
 699	memset(&result, 0, sizeof(struct dlm_lock_result));
 700	result.version[0] = DLM_DEVICE_VERSION_MAJOR;
 701	result.version[1] = DLM_DEVICE_VERSION_MINOR;
 702	result.version[2] = DLM_DEVICE_VERSION_PATCH;
 703	memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
 704	result.user_lksb = ua->user_lksb;
 705
 706	/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
 707	   in a conversion unless the conversion is successful.  See code
 708	   in dlm_user_convert() for updating ua from ua_tmp.  OpenVMS, though,
 709	   notes that a new blocking AST address and parameter are set even if
 710	   the conversion fails, so maybe we should just do that. */
 711
 712	if (flags & DLM_CB_BAST) {
 713		result.user_astaddr = ua->bastaddr;
 714		result.user_astparam = ua->bastparam;
 715		result.bast_mode = mode;
 716	} else {
 717		result.user_astaddr = ua->castaddr;
 718		result.user_astparam = ua->castparam;
 719	}
 720
 721#ifdef CONFIG_COMPAT
 722	if (compat)
 723		len = sizeof(struct dlm_lock_result32);
 724	else
 725#endif
 726		len = sizeof(struct dlm_lock_result);
 727	struct_len = len;
 728
 729	/* copy lvb to userspace if there is one, it's been updated, and
 730	   the user buffer has space for it */
 731
 732	if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
 733		if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
 734				 DLM_USER_LVB_LEN)) {
 735			error = -EFAULT;
 736			goto out;
 737		}
 738
 739		result.lvb_offset = len;
 740		len += DLM_USER_LVB_LEN;
 741	}
 742
 743	result.length = len;
 744	resultptr = &result;
 745#ifdef CONFIG_COMPAT
 746	if (compat) {
 747		compat_output(&result, &result32);
 748		resultptr = &result32;
 749	}
 750#endif
 751
 752	if (copy_to_user(buf, resultptr, struct_len))
 753		error = -EFAULT;
 754	else
 755		error = len;
 756 out:
 757	return error;
 758}
 759
 760static int copy_version_to_user(char __user *buf, size_t count)
 761{
 762	struct dlm_device_version ver;
 763
 764	memset(&ver, 0, sizeof(struct dlm_device_version));
 765	ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
 766	ver.version[1] = DLM_DEVICE_VERSION_MINOR;
 767	ver.version[2] = DLM_DEVICE_VERSION_PATCH;
 768
 769	if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
 770		return -EFAULT;
 771	return sizeof(struct dlm_device_version);
 772}
 773
 774/* a read returns a single ast described in a struct dlm_lock_result */
 775
 776static ssize_t device_read(struct file *file, char __user *buf, size_t count,
 777			   loff_t *ppos)
 778{
 779	struct dlm_user_proc *proc = file->private_data;
 780	struct dlm_lkb *lkb;
 781	DECLARE_WAITQUEUE(wait, current);
 782	struct dlm_callback cb;
 783	int rv, resid, copy_lvb = 0;
 784	int old_mode, new_mode;
 785
 786	if (count == sizeof(struct dlm_device_version)) {
 787		rv = copy_version_to_user(buf, count);
 788		return rv;
 789	}
 790
 791	if (!proc) {
 792		log_print("non-version read from control device %zu", count);
 793		return -EINVAL;
 794	}
 795
 796#ifdef CONFIG_COMPAT
 797	if (count < sizeof(struct dlm_lock_result32))
 798#else
 799	if (count < sizeof(struct dlm_lock_result))
 800#endif
 801		return -EINVAL;
 802
 803 try_another:
 804
 805	/* do we really need this? can a read happen after a close? */
 806	if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
 807		return -EINVAL;
 808
 809	spin_lock(&proc->asts_spin);
 810	if (list_empty(&proc->asts)) {
 811		if (file->f_flags & O_NONBLOCK) {
 812			spin_unlock(&proc->asts_spin);
 813			return -EAGAIN;
 814		}
 815
 816		add_wait_queue(&proc->wait, &wait);
 817
 818	repeat:
 819		set_current_state(TASK_INTERRUPTIBLE);
 820		if (list_empty(&proc->asts) && !signal_pending(current)) {
 821			spin_unlock(&proc->asts_spin);
 822			schedule();
 823			spin_lock(&proc->asts_spin);
 824			goto repeat;
 825		}
 826		set_current_state(TASK_RUNNING);
 827		remove_wait_queue(&proc->wait, &wait);
 828
 829		if (signal_pending(current)) {
 830			spin_unlock(&proc->asts_spin);
 831			return -ERESTARTSYS;
 832		}
 833	}
 834
 835	/* if we empty lkb_callbacks, we don't want to unlock the spinlock
 836	   without removing lkb_cb_list; so empty lkb_cb_list is always
 837	   consistent with empty lkb_callbacks */
 838
 839	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
 840
 841	/* rem_lkb_callback sets a new lkb_last_cast */
 842	old_mode = lkb->lkb_last_cast.mode;
 843
 844	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
 845	if (rv < 0) {
 
 846		/* this shouldn't happen; lkb should have been removed from
 847		   list when resid was zero */
 
 848		log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
 849		list_del_init(&lkb->lkb_cb_list);
 850		spin_unlock(&proc->asts_spin);
 851		/* removes ref for proc->asts, may cause lkb to be freed */
 852		dlm_put_lkb(lkb);
 
 853		goto try_another;
 
 
 
 
 
 
 
 
 
 854	}
 855	if (!resid)
 856		list_del_init(&lkb->lkb_cb_list);
 857	spin_unlock(&proc->asts_spin);
 858
 859	if (cb.flags & DLM_CB_SKIP) {
 860		/* removes ref for proc->asts, may cause lkb to be freed */
 861		if (!resid)
 862			dlm_put_lkb(lkb);
 863		goto try_another;
 
 864	}
 865
 866	if (cb.flags & DLM_CB_CAST) {
 867		new_mode = cb.mode;
 868
 869		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
 870		    dlm_lvb_operations[old_mode + 1][new_mode + 1])
 871			copy_lvb = 1;
 872
 873		lkb->lkb_lksb->sb_status = cb.sb_status;
 874		lkb->lkb_lksb->sb_flags = cb.sb_flags;
 875	}
 876
 877	rv = copy_result_to_user(lkb->lkb_ua,
 878				 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
 879				 cb.flags, cb.mode, copy_lvb, buf, count);
 880
 881	/* removes ref for proc->asts, may cause lkb to be freed */
 882	if (!resid)
 883		dlm_put_lkb(lkb);
 884
 885	return rv;
 886}
 887
 888static __poll_t device_poll(struct file *file, poll_table *wait)
 889{
 890	struct dlm_user_proc *proc = file->private_data;
 891
 892	poll_wait(file, &proc->wait, wait);
 893
 894	spin_lock(&proc->asts_spin);
 895	if (!list_empty(&proc->asts)) {
 896		spin_unlock(&proc->asts_spin);
 897		return EPOLLIN | EPOLLRDNORM;
 898	}
 899	spin_unlock(&proc->asts_spin);
 900	return 0;
 901}
 902
 903int dlm_user_daemon_available(void)
 904{
 905	/* dlm_controld hasn't started (or, has started, but not
 906	   properly populated configfs) */
 907
 908	if (!dlm_our_nodeid())
 909		return 0;
 910
 911	/* This is to deal with versions of dlm_controld that don't
 912	   know about the monitor device.  We assume that if the
 913	   dlm_controld was started (above), but the monitor device
 914	   was never opened, that it's an old version.  dlm_controld
 915	   should open the monitor device before populating configfs. */
 916
 917	if (dlm_monitor_unused)
 918		return 1;
 919
 920	return atomic_read(&dlm_monitor_opened) ? 1 : 0;
 921}
 922
 923static int ctl_device_open(struct inode *inode, struct file *file)
 924{
 925	file->private_data = NULL;
 926	return 0;
 927}
 928
 929static int ctl_device_close(struct inode *inode, struct file *file)
 930{
 931	return 0;
 932}
 933
 934static int monitor_device_open(struct inode *inode, struct file *file)
 935{
 936	atomic_inc(&dlm_monitor_opened);
 937	dlm_monitor_unused = 0;
 938	return 0;
 939}
 940
 941static int monitor_device_close(struct inode *inode, struct file *file)
 942{
 943	if (atomic_dec_and_test(&dlm_monitor_opened))
 944		dlm_stop_lockspaces();
 945	return 0;
 946}
 947
 948static const struct file_operations device_fops = {
 949	.open    = device_open,
 950	.release = device_close,
 951	.read    = device_read,
 952	.write   = device_write,
 953	.poll    = device_poll,
 954	.owner   = THIS_MODULE,
 955	.llseek  = noop_llseek,
 956};
 957
 958static const struct file_operations ctl_device_fops = {
 959	.open    = ctl_device_open,
 960	.release = ctl_device_close,
 961	.read    = device_read,
 962	.write   = device_write,
 963	.owner   = THIS_MODULE,
 964	.llseek  = noop_llseek,
 965};
 966
 967static struct miscdevice ctl_device = {
 968	.name  = "dlm-control",
 969	.fops  = &ctl_device_fops,
 970	.minor = MISC_DYNAMIC_MINOR,
 971};
 972
 973static const struct file_operations monitor_device_fops = {
 974	.open    = monitor_device_open,
 975	.release = monitor_device_close,
 976	.owner   = THIS_MODULE,
 977	.llseek  = noop_llseek,
 978};
 979
 980static struct miscdevice monitor_device = {
 981	.name  = "dlm-monitor",
 982	.fops  = &monitor_device_fops,
 983	.minor = MISC_DYNAMIC_MINOR,
 984};
 985
 986int __init dlm_user_init(void)
 987{
 988	int error;
 989
 990	atomic_set(&dlm_monitor_opened, 0);
 991
 992	error = misc_register(&ctl_device);
 993	if (error) {
 994		log_print("misc_register failed for control device");
 995		goto out;
 996	}
 997
 998	error = misc_register(&monitor_device);
 999	if (error) {
1000		log_print("misc_register failed for monitor device");
1001		misc_deregister(&ctl_device);
1002	}
1003 out:
1004	return error;
1005}
1006
1007void dlm_user_exit(void)
1008{
1009	misc_deregister(&ctl_device);
1010	misc_deregister(&monitor_device);
1011}
1012