Loading...
1/*
2 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/init.h>
11#include <linux/wait.h>
12#include <linux/module.h>
13#include <linux/file.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/dlm.h>
19#include <linux/dlm_device.h>
20#include <linux/slab.h>
21
22#include "dlm_internal.h"
23#include "lockspace.h"
24#include "lock.h"
25#include "lvb_table.h"
26#include "user.h"
27#include "ast.h"
28
29static const char name_prefix[] = "dlm";
30static const struct file_operations device_fops;
31static atomic_t dlm_monitor_opened;
32static int dlm_monitor_unused = 1;
33
34#ifdef CONFIG_COMPAT
35
36struct dlm_lock_params32 {
37 __u8 mode;
38 __u8 namelen;
39 __u16 unused;
40 __u32 flags;
41 __u32 lkid;
42 __u32 parent;
43 __u64 xid;
44 __u64 timeout;
45 __u32 castparam;
46 __u32 castaddr;
47 __u32 bastparam;
48 __u32 bastaddr;
49 __u32 lksb;
50 char lvb[DLM_USER_LVB_LEN];
51 char name[0];
52};
53
54struct dlm_write_request32 {
55 __u32 version[3];
56 __u8 cmd;
57 __u8 is64bit;
58 __u8 unused[2];
59
60 union {
61 struct dlm_lock_params32 lock;
62 struct dlm_lspace_params lspace;
63 struct dlm_purge_params purge;
64 } i;
65};
66
67struct dlm_lksb32 {
68 __u32 sb_status;
69 __u32 sb_lkid;
70 __u8 sb_flags;
71 __u32 sb_lvbptr;
72};
73
74struct dlm_lock_result32 {
75 __u32 version[3];
76 __u32 length;
77 __u32 user_astaddr;
78 __u32 user_astparam;
79 __u32 user_lksb;
80 struct dlm_lksb32 lksb;
81 __u8 bast_mode;
82 __u8 unused[3];
83 /* Offsets may be zero if no data is present */
84 __u32 lvb_offset;
85};
86
87static void compat_input(struct dlm_write_request *kb,
88 struct dlm_write_request32 *kb32,
89 int namelen)
90{
91 kb->version[0] = kb32->version[0];
92 kb->version[1] = kb32->version[1];
93 kb->version[2] = kb32->version[2];
94
95 kb->cmd = kb32->cmd;
96 kb->is64bit = kb32->is64bit;
97 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
98 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
99 kb->i.lspace.flags = kb32->i.lspace.flags;
100 kb->i.lspace.minor = kb32->i.lspace.minor;
101 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
102 } else if (kb->cmd == DLM_USER_PURGE) {
103 kb->i.purge.nodeid = kb32->i.purge.nodeid;
104 kb->i.purge.pid = kb32->i.purge.pid;
105 } else {
106 kb->i.lock.mode = kb32->i.lock.mode;
107 kb->i.lock.namelen = kb32->i.lock.namelen;
108 kb->i.lock.flags = kb32->i.lock.flags;
109 kb->i.lock.lkid = kb32->i.lock.lkid;
110 kb->i.lock.parent = kb32->i.lock.parent;
111 kb->i.lock.xid = kb32->i.lock.xid;
112 kb->i.lock.timeout = kb32->i.lock.timeout;
113 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
114 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
115 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
116 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
117 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
118 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
119 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
120 }
121}
122
123static void compat_output(struct dlm_lock_result *res,
124 struct dlm_lock_result32 *res32)
125{
126 res32->version[0] = res->version[0];
127 res32->version[1] = res->version[1];
128 res32->version[2] = res->version[2];
129
130 res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 res32->user_astparam = (__u32)(long)res->user_astparam;
132 res32->user_lksb = (__u32)(long)res->user_lksb;
133 res32->bast_mode = res->bast_mode;
134
135 res32->lvb_offset = res->lvb_offset;
136 res32->length = res->length;
137
138 res32->lksb.sb_status = res->lksb.sb_status;
139 res32->lksb.sb_flags = res->lksb.sb_flags;
140 res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142}
143#endif
144
145/* Figure out if this lock is at the end of its life and no longer
146 available for the application to use. The lkb still exists until
147 the final ast is read. A lock becomes EOL in three situations:
148 1. a noqueue request fails with EAGAIN
149 2. an unlock completes with EUNLOCK
150 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151 An EOL lock needs to be removed from the process's list of locks.
152 And we can't allow any new operation on an EOL lock. This is
153 not related to the lifetime of the lkb struct which is managed
154 entirely by refcount. */
155
156static int lkb_is_endoflife(int mode, int status)
157{
158 switch (status) {
159 case -DLM_EUNLOCK:
160 return 1;
161 case -DLM_ECANCEL:
162 case -ETIMEDOUT:
163 case -EDEADLK:
164 case -EAGAIN:
165 if (mode == DLM_LOCK_IV)
166 return 1;
167 break;
168 }
169 return 0;
170}
171
172/* we could possibly check if the cancel of an orphan has resulted in the lkb
173 being removed and then remove that lkb from the orphans list and free it */
174
175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 int status, uint32_t sbflags, uint64_t seq)
177{
178 struct dlm_ls *ls;
179 struct dlm_user_args *ua;
180 struct dlm_user_proc *proc;
181 int rv;
182
183 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
184 return;
185
186 ls = lkb->lkb_resource->res_ls;
187 mutex_lock(&ls->ls_clear_proc_locks);
188
189 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
191 lkb->ua so we can't try to use it. This second check is necessary
192 for cases where a completion ast is received for an operation that
193 began before clear_proc_locks did its cancel/unlock. */
194
195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
196 goto out;
197
198 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
199 ua = lkb->lkb_ua;
200 proc = ua->proc;
201
202 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
203 goto out;
204
205 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
206 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
207
208 spin_lock(&proc->asts_spin);
209
210 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 if (rv < 0) {
212 spin_unlock(&proc->asts_spin);
213 goto out;
214 }
215
216 if (list_empty(&lkb->lkb_cb_list)) {
217 kref_get(&lkb->lkb_ref);
218 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 wake_up_interruptible(&proc->wait);
220 }
221 spin_unlock(&proc->asts_spin);
222
223 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
224 /* N.B. spin_lock locks_spin, not asts_spin */
225 spin_lock(&proc->locks_spin);
226 if (!list_empty(&lkb->lkb_ownqueue)) {
227 list_del_init(&lkb->lkb_ownqueue);
228 dlm_put_lkb(lkb);
229 }
230 spin_unlock(&proc->locks_spin);
231 }
232 out:
233 mutex_unlock(&ls->ls_clear_proc_locks);
234}
235
236static int device_user_lock(struct dlm_user_proc *proc,
237 struct dlm_lock_params *params)
238{
239 struct dlm_ls *ls;
240 struct dlm_user_args *ua;
241 int error = -ENOMEM;
242
243 ls = dlm_find_lockspace_local(proc->lockspace);
244 if (!ls)
245 return -ENOENT;
246
247 if (!params->castaddr || !params->lksb) {
248 error = -EINVAL;
249 goto out;
250 }
251
252 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
253 if (!ua)
254 goto out;
255 ua->proc = proc;
256 ua->user_lksb = params->lksb;
257 ua->castparam = params->castparam;
258 ua->castaddr = params->castaddr;
259 ua->bastparam = params->bastparam;
260 ua->bastaddr = params->bastaddr;
261 ua->xid = params->xid;
262
263 if (params->flags & DLM_LKF_CONVERT)
264 error = dlm_user_convert(ls, ua,
265 params->mode, params->flags,
266 params->lkid, params->lvb,
267 (unsigned long) params->timeout);
268 else {
269 error = dlm_user_request(ls, ua,
270 params->mode, params->flags,
271 params->name, params->namelen,
272 (unsigned long) params->timeout);
273 if (!error)
274 error = ua->lksb.sb_lkid;
275 }
276 out:
277 dlm_put_lockspace(ls);
278 return error;
279}
280
281static int device_user_unlock(struct dlm_user_proc *proc,
282 struct dlm_lock_params *params)
283{
284 struct dlm_ls *ls;
285 struct dlm_user_args *ua;
286 int error = -ENOMEM;
287
288 ls = dlm_find_lockspace_local(proc->lockspace);
289 if (!ls)
290 return -ENOENT;
291
292 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
293 if (!ua)
294 goto out;
295 ua->proc = proc;
296 ua->user_lksb = params->lksb;
297 ua->castparam = params->castparam;
298 ua->castaddr = params->castaddr;
299
300 if (params->flags & DLM_LKF_CANCEL)
301 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
302 else
303 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
304 params->lvb);
305 out:
306 dlm_put_lockspace(ls);
307 return error;
308}
309
310static int device_user_deadlock(struct dlm_user_proc *proc,
311 struct dlm_lock_params *params)
312{
313 struct dlm_ls *ls;
314 int error;
315
316 ls = dlm_find_lockspace_local(proc->lockspace);
317 if (!ls)
318 return -ENOENT;
319
320 error = dlm_user_deadlock(ls, params->flags, params->lkid);
321
322 dlm_put_lockspace(ls);
323 return error;
324}
325
326static int dlm_device_register(struct dlm_ls *ls, char *name)
327{
328 int error, len;
329
330 /* The device is already registered. This happens when the
331 lockspace is created multiple times from userspace. */
332 if (ls->ls_device.name)
333 return 0;
334
335 error = -ENOMEM;
336 len = strlen(name) + strlen(name_prefix) + 2;
337 ls->ls_device.name = kzalloc(len, GFP_NOFS);
338 if (!ls->ls_device.name)
339 goto fail;
340
341 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
342 name);
343 ls->ls_device.fops = &device_fops;
344 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
345
346 error = misc_register(&ls->ls_device);
347 if (error) {
348 kfree(ls->ls_device.name);
349 }
350fail:
351 return error;
352}
353
354int dlm_device_deregister(struct dlm_ls *ls)
355{
356 int error;
357
358 /* The device is not registered. This happens when the lockspace
359 was never used from userspace, or when device_create_lockspace()
360 calls dlm_release_lockspace() after the register fails. */
361 if (!ls->ls_device.name)
362 return 0;
363
364 error = misc_deregister(&ls->ls_device);
365 if (!error)
366 kfree(ls->ls_device.name);
367 return error;
368}
369
370static int device_user_purge(struct dlm_user_proc *proc,
371 struct dlm_purge_params *params)
372{
373 struct dlm_ls *ls;
374 int error;
375
376 ls = dlm_find_lockspace_local(proc->lockspace);
377 if (!ls)
378 return -ENOENT;
379
380 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
381
382 dlm_put_lockspace(ls);
383 return error;
384}
385
386static int device_create_lockspace(struct dlm_lspace_params *params)
387{
388 dlm_lockspace_t *lockspace;
389 struct dlm_ls *ls;
390 int error;
391
392 if (!capable(CAP_SYS_ADMIN))
393 return -EPERM;
394
395 error = dlm_new_lockspace(params->name, strlen(params->name),
396 &lockspace, params->flags, DLM_USER_LVB_LEN);
397 if (error)
398 return error;
399
400 ls = dlm_find_lockspace_local(lockspace);
401 if (!ls)
402 return -ENOENT;
403
404 error = dlm_device_register(ls, params->name);
405 dlm_put_lockspace(ls);
406
407 if (error)
408 dlm_release_lockspace(lockspace, 0);
409 else
410 error = ls->ls_device.minor;
411
412 return error;
413}
414
415static int device_remove_lockspace(struct dlm_lspace_params *params)
416{
417 dlm_lockspace_t *lockspace;
418 struct dlm_ls *ls;
419 int error, force = 0;
420
421 if (!capable(CAP_SYS_ADMIN))
422 return -EPERM;
423
424 ls = dlm_find_lockspace_device(params->minor);
425 if (!ls)
426 return -ENOENT;
427
428 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
429 force = 2;
430
431 lockspace = ls->ls_local_handle;
432 dlm_put_lockspace(ls);
433
434 /* The final dlm_release_lockspace waits for references to go to
435 zero, so all processes will need to close their device for the
436 ls before the release will proceed. release also calls the
437 device_deregister above. Converting a positive return value
438 from release to zero means that userspace won't know when its
439 release was the final one, but it shouldn't need to know. */
440
441 error = dlm_release_lockspace(lockspace, force);
442 if (error > 0)
443 error = 0;
444 return error;
445}
446
447/* Check the user's version matches ours */
448static int check_version(struct dlm_write_request *req)
449{
450 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
451 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
452 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
453
454 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
455 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
456 current->comm,
457 task_pid_nr(current),
458 req->version[0],
459 req->version[1],
460 req->version[2],
461 DLM_DEVICE_VERSION_MAJOR,
462 DLM_DEVICE_VERSION_MINOR,
463 DLM_DEVICE_VERSION_PATCH);
464 return -EINVAL;
465 }
466 return 0;
467}
468
469/*
470 * device_write
471 *
472 * device_user_lock
473 * dlm_user_request -> request_lock
474 * dlm_user_convert -> convert_lock
475 *
476 * device_user_unlock
477 * dlm_user_unlock -> unlock_lock
478 * dlm_user_cancel -> cancel_lock
479 *
480 * device_create_lockspace
481 * dlm_new_lockspace
482 *
483 * device_remove_lockspace
484 * dlm_release_lockspace
485 */
486
487/* a write to a lockspace device is a lock or unlock request, a write
488 to the control device is to create/remove a lockspace */
489
490static ssize_t device_write(struct file *file, const char __user *buf,
491 size_t count, loff_t *ppos)
492{
493 struct dlm_user_proc *proc = file->private_data;
494 struct dlm_write_request *kbuf;
495 sigset_t tmpsig, allsigs;
496 int error;
497
498#ifdef CONFIG_COMPAT
499 if (count < sizeof(struct dlm_write_request32))
500#else
501 if (count < sizeof(struct dlm_write_request))
502#endif
503 return -EINVAL;
504
505 kbuf = kzalloc(count + 1, GFP_NOFS);
506 if (!kbuf)
507 return -ENOMEM;
508
509 if (copy_from_user(kbuf, buf, count)) {
510 error = -EFAULT;
511 goto out_free;
512 }
513
514 if (check_version(kbuf)) {
515 error = -EBADE;
516 goto out_free;
517 }
518
519#ifdef CONFIG_COMPAT
520 if (!kbuf->is64bit) {
521 struct dlm_write_request32 *k32buf;
522 int namelen = 0;
523
524 if (count > sizeof(struct dlm_write_request32))
525 namelen = count - sizeof(struct dlm_write_request32);
526
527 k32buf = (struct dlm_write_request32 *)kbuf;
528
529 /* add 1 after namelen so that the name string is terminated */
530 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
531 GFP_NOFS);
532 if (!kbuf) {
533 kfree(k32buf);
534 return -ENOMEM;
535 }
536
537 if (proc)
538 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
539
540 compat_input(kbuf, k32buf, namelen);
541 kfree(k32buf);
542 }
543#endif
544
545 /* do we really need this? can a write happen after a close? */
546 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
547 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
548 error = -EINVAL;
549 goto out_free;
550 }
551
552 sigfillset(&allsigs);
553 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
554
555 error = -EINVAL;
556
557 switch (kbuf->cmd)
558 {
559 case DLM_USER_LOCK:
560 if (!proc) {
561 log_print("no locking on control device");
562 goto out_sig;
563 }
564 error = device_user_lock(proc, &kbuf->i.lock);
565 break;
566
567 case DLM_USER_UNLOCK:
568 if (!proc) {
569 log_print("no locking on control device");
570 goto out_sig;
571 }
572 error = device_user_unlock(proc, &kbuf->i.lock);
573 break;
574
575 case DLM_USER_DEADLOCK:
576 if (!proc) {
577 log_print("no locking on control device");
578 goto out_sig;
579 }
580 error = device_user_deadlock(proc, &kbuf->i.lock);
581 break;
582
583 case DLM_USER_CREATE_LOCKSPACE:
584 if (proc) {
585 log_print("create/remove only on control device");
586 goto out_sig;
587 }
588 error = device_create_lockspace(&kbuf->i.lspace);
589 break;
590
591 case DLM_USER_REMOVE_LOCKSPACE:
592 if (proc) {
593 log_print("create/remove only on control device");
594 goto out_sig;
595 }
596 error = device_remove_lockspace(&kbuf->i.lspace);
597 break;
598
599 case DLM_USER_PURGE:
600 if (!proc) {
601 log_print("no locking on control device");
602 goto out_sig;
603 }
604 error = device_user_purge(proc, &kbuf->i.purge);
605 break;
606
607 default:
608 log_print("Unknown command passed to DLM device : %d\n",
609 kbuf->cmd);
610 }
611
612 out_sig:
613 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
614 out_free:
615 kfree(kbuf);
616 return error;
617}
618
619/* Every process that opens the lockspace device has its own "proc" structure
620 hanging off the open file that's used to keep track of locks owned by the
621 process and asts that need to be delivered to the process. */
622
623static int device_open(struct inode *inode, struct file *file)
624{
625 struct dlm_user_proc *proc;
626 struct dlm_ls *ls;
627
628 ls = dlm_find_lockspace_device(iminor(inode));
629 if (!ls)
630 return -ENOENT;
631
632 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
633 if (!proc) {
634 dlm_put_lockspace(ls);
635 return -ENOMEM;
636 }
637
638 proc->lockspace = ls->ls_local_handle;
639 INIT_LIST_HEAD(&proc->asts);
640 INIT_LIST_HEAD(&proc->locks);
641 INIT_LIST_HEAD(&proc->unlocking);
642 spin_lock_init(&proc->asts_spin);
643 spin_lock_init(&proc->locks_spin);
644 init_waitqueue_head(&proc->wait);
645 file->private_data = proc;
646
647 return 0;
648}
649
650static int device_close(struct inode *inode, struct file *file)
651{
652 struct dlm_user_proc *proc = file->private_data;
653 struct dlm_ls *ls;
654 sigset_t tmpsig, allsigs;
655
656 ls = dlm_find_lockspace_local(proc->lockspace);
657 if (!ls)
658 return -ENOENT;
659
660 sigfillset(&allsigs);
661 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
662
663 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
664
665 dlm_clear_proc_locks(ls, proc);
666
667 /* at this point no more lkb's should exist for this lockspace,
668 so there's no chance of dlm_user_add_ast() being called and
669 looking for lkb->ua->proc */
670
671 kfree(proc);
672 file->private_data = NULL;
673
674 dlm_put_lockspace(ls);
675 dlm_put_lockspace(ls); /* for the find in device_open() */
676
677 /* FIXME: AUTOFREE: if this ls is no longer used do
678 device_remove_lockspace() */
679
680 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
681 recalc_sigpending();
682
683 return 0;
684}
685
686static int copy_result_to_user(struct dlm_user_args *ua, int compat,
687 uint32_t flags, int mode, int copy_lvb,
688 char __user *buf, size_t count)
689{
690#ifdef CONFIG_COMPAT
691 struct dlm_lock_result32 result32;
692#endif
693 struct dlm_lock_result result;
694 void *resultptr;
695 int error=0;
696 int len;
697 int struct_len;
698
699 memset(&result, 0, sizeof(struct dlm_lock_result));
700 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
701 result.version[1] = DLM_DEVICE_VERSION_MINOR;
702 result.version[2] = DLM_DEVICE_VERSION_PATCH;
703 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
704 result.user_lksb = ua->user_lksb;
705
706 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
707 in a conversion unless the conversion is successful. See code
708 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
709 notes that a new blocking AST address and parameter are set even if
710 the conversion fails, so maybe we should just do that. */
711
712 if (flags & DLM_CB_BAST) {
713 result.user_astaddr = ua->bastaddr;
714 result.user_astparam = ua->bastparam;
715 result.bast_mode = mode;
716 } else {
717 result.user_astaddr = ua->castaddr;
718 result.user_astparam = ua->castparam;
719 }
720
721#ifdef CONFIG_COMPAT
722 if (compat)
723 len = sizeof(struct dlm_lock_result32);
724 else
725#endif
726 len = sizeof(struct dlm_lock_result);
727 struct_len = len;
728
729 /* copy lvb to userspace if there is one, it's been updated, and
730 the user buffer has space for it */
731
732 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
733 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
734 DLM_USER_LVB_LEN)) {
735 error = -EFAULT;
736 goto out;
737 }
738
739 result.lvb_offset = len;
740 len += DLM_USER_LVB_LEN;
741 }
742
743 result.length = len;
744 resultptr = &result;
745#ifdef CONFIG_COMPAT
746 if (compat) {
747 compat_output(&result, &result32);
748 resultptr = &result32;
749 }
750#endif
751
752 if (copy_to_user(buf, resultptr, struct_len))
753 error = -EFAULT;
754 else
755 error = len;
756 out:
757 return error;
758}
759
760static int copy_version_to_user(char __user *buf, size_t count)
761{
762 struct dlm_device_version ver;
763
764 memset(&ver, 0, sizeof(struct dlm_device_version));
765 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
766 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
767 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
768
769 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
770 return -EFAULT;
771 return sizeof(struct dlm_device_version);
772}
773
774/* a read returns a single ast described in a struct dlm_lock_result */
775
776static ssize_t device_read(struct file *file, char __user *buf, size_t count,
777 loff_t *ppos)
778{
779 struct dlm_user_proc *proc = file->private_data;
780 struct dlm_lkb *lkb;
781 DECLARE_WAITQUEUE(wait, current);
782 struct dlm_callback cb;
783 int rv, resid, copy_lvb = 0;
784
785 if (count == sizeof(struct dlm_device_version)) {
786 rv = copy_version_to_user(buf, count);
787 return rv;
788 }
789
790 if (!proc) {
791 log_print("non-version read from control device %zu", count);
792 return -EINVAL;
793 }
794
795#ifdef CONFIG_COMPAT
796 if (count < sizeof(struct dlm_lock_result32))
797#else
798 if (count < sizeof(struct dlm_lock_result))
799#endif
800 return -EINVAL;
801
802 try_another:
803
804 /* do we really need this? can a read happen after a close? */
805 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
806 return -EINVAL;
807
808 spin_lock(&proc->asts_spin);
809 if (list_empty(&proc->asts)) {
810 if (file->f_flags & O_NONBLOCK) {
811 spin_unlock(&proc->asts_spin);
812 return -EAGAIN;
813 }
814
815 add_wait_queue(&proc->wait, &wait);
816
817 repeat:
818 set_current_state(TASK_INTERRUPTIBLE);
819 if (list_empty(&proc->asts) && !signal_pending(current)) {
820 spin_unlock(&proc->asts_spin);
821 schedule();
822 spin_lock(&proc->asts_spin);
823 goto repeat;
824 }
825 set_current_state(TASK_RUNNING);
826 remove_wait_queue(&proc->wait, &wait);
827
828 if (signal_pending(current)) {
829 spin_unlock(&proc->asts_spin);
830 return -ERESTARTSYS;
831 }
832 }
833
834 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
835 without removing lkb_cb_list; so empty lkb_cb_list is always
836 consistent with empty lkb_callbacks */
837
838 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
839
840 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
841 if (rv < 0) {
842 /* this shouldn't happen; lkb should have been removed from
843 list when resid was zero */
844 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
845 list_del_init(&lkb->lkb_cb_list);
846 spin_unlock(&proc->asts_spin);
847 /* removes ref for proc->asts, may cause lkb to be freed */
848 dlm_put_lkb(lkb);
849 goto try_another;
850 }
851 if (!resid)
852 list_del_init(&lkb->lkb_cb_list);
853 spin_unlock(&proc->asts_spin);
854
855 if (cb.flags & DLM_CB_SKIP) {
856 /* removes ref for proc->asts, may cause lkb to be freed */
857 if (!resid)
858 dlm_put_lkb(lkb);
859 goto try_another;
860 }
861
862 if (cb.flags & DLM_CB_CAST) {
863 int old_mode, new_mode;
864
865 old_mode = lkb->lkb_last_cast.mode;
866 new_mode = cb.mode;
867
868 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
869 dlm_lvb_operations[old_mode + 1][new_mode + 1])
870 copy_lvb = 1;
871
872 lkb->lkb_lksb->sb_status = cb.sb_status;
873 lkb->lkb_lksb->sb_flags = cb.sb_flags;
874 }
875
876 rv = copy_result_to_user(lkb->lkb_ua,
877 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
878 cb.flags, cb.mode, copy_lvb, buf, count);
879
880 /* removes ref for proc->asts, may cause lkb to be freed */
881 if (!resid)
882 dlm_put_lkb(lkb);
883
884 return rv;
885}
886
887static unsigned int device_poll(struct file *file, poll_table *wait)
888{
889 struct dlm_user_proc *proc = file->private_data;
890
891 poll_wait(file, &proc->wait, wait);
892
893 spin_lock(&proc->asts_spin);
894 if (!list_empty(&proc->asts)) {
895 spin_unlock(&proc->asts_spin);
896 return POLLIN | POLLRDNORM;
897 }
898 spin_unlock(&proc->asts_spin);
899 return 0;
900}
901
902int dlm_user_daemon_available(void)
903{
904 /* dlm_controld hasn't started (or, has started, but not
905 properly populated configfs) */
906
907 if (!dlm_our_nodeid())
908 return 0;
909
910 /* This is to deal with versions of dlm_controld that don't
911 know about the monitor device. We assume that if the
912 dlm_controld was started (above), but the monitor device
913 was never opened, that it's an old version. dlm_controld
914 should open the monitor device before populating configfs. */
915
916 if (dlm_monitor_unused)
917 return 1;
918
919 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
920}
921
922static int ctl_device_open(struct inode *inode, struct file *file)
923{
924 file->private_data = NULL;
925 return 0;
926}
927
928static int ctl_device_close(struct inode *inode, struct file *file)
929{
930 return 0;
931}
932
933static int monitor_device_open(struct inode *inode, struct file *file)
934{
935 atomic_inc(&dlm_monitor_opened);
936 dlm_monitor_unused = 0;
937 return 0;
938}
939
940static int monitor_device_close(struct inode *inode, struct file *file)
941{
942 if (atomic_dec_and_test(&dlm_monitor_opened))
943 dlm_stop_lockspaces();
944 return 0;
945}
946
947static const struct file_operations device_fops = {
948 .open = device_open,
949 .release = device_close,
950 .read = device_read,
951 .write = device_write,
952 .poll = device_poll,
953 .owner = THIS_MODULE,
954 .llseek = noop_llseek,
955};
956
957static const struct file_operations ctl_device_fops = {
958 .open = ctl_device_open,
959 .release = ctl_device_close,
960 .read = device_read,
961 .write = device_write,
962 .owner = THIS_MODULE,
963 .llseek = noop_llseek,
964};
965
966static struct miscdevice ctl_device = {
967 .name = "dlm-control",
968 .fops = &ctl_device_fops,
969 .minor = MISC_DYNAMIC_MINOR,
970};
971
972static const struct file_operations monitor_device_fops = {
973 .open = monitor_device_open,
974 .release = monitor_device_close,
975 .owner = THIS_MODULE,
976 .llseek = noop_llseek,
977};
978
979static struct miscdevice monitor_device = {
980 .name = "dlm-monitor",
981 .fops = &monitor_device_fops,
982 .minor = MISC_DYNAMIC_MINOR,
983};
984
985int __init dlm_user_init(void)
986{
987 int error;
988
989 atomic_set(&dlm_monitor_opened, 0);
990
991 error = misc_register(&ctl_device);
992 if (error) {
993 log_print("misc_register failed for control device");
994 goto out;
995 }
996
997 error = misc_register(&monitor_device);
998 if (error) {
999 log_print("misc_register failed for monitor device");
1000 misc_deregister(&ctl_device);
1001 }
1002 out:
1003 return error;
1004}
1005
1006void dlm_user_exit(void)
1007{
1008 misc_deregister(&ctl_device);
1009 misc_deregister(&monitor_device);
1010}
1011
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
4 */
5
6#include <linux/miscdevice.h>
7#include <linux/init.h>
8#include <linux/wait.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/poll.h>
12#include <linux/signal.h>
13#include <linux/spinlock.h>
14#include <linux/dlm.h>
15#include <linux/dlm_device.h>
16#include <linux/slab.h>
17#include <linux/sched/signal.h>
18
19#include "dlm_internal.h"
20#include "lockspace.h"
21#include "lock.h"
22#include "lvb_table.h"
23#include "user.h"
24#include "ast.h"
25#include "config.h"
26
27static const char name_prefix[] = "dlm";
28static const struct file_operations device_fops;
29static atomic_t dlm_monitor_opened;
30static int dlm_monitor_unused = 1;
31
32#ifdef CONFIG_COMPAT
33
34struct dlm_lock_params32 {
35 __u8 mode;
36 __u8 namelen;
37 __u16 unused;
38 __u32 flags;
39 __u32 lkid;
40 __u32 parent;
41 __u64 xid;
42 __u64 timeout;
43 __u32 castparam;
44 __u32 castaddr;
45 __u32 bastparam;
46 __u32 bastaddr;
47 __u32 lksb;
48 char lvb[DLM_USER_LVB_LEN];
49 char name[0];
50};
51
52struct dlm_write_request32 {
53 __u32 version[3];
54 __u8 cmd;
55 __u8 is64bit;
56 __u8 unused[2];
57
58 union {
59 struct dlm_lock_params32 lock;
60 struct dlm_lspace_params lspace;
61 struct dlm_purge_params purge;
62 } i;
63};
64
65struct dlm_lksb32 {
66 __u32 sb_status;
67 __u32 sb_lkid;
68 __u8 sb_flags;
69 __u32 sb_lvbptr;
70};
71
72struct dlm_lock_result32 {
73 __u32 version[3];
74 __u32 length;
75 __u32 user_astaddr;
76 __u32 user_astparam;
77 __u32 user_lksb;
78 struct dlm_lksb32 lksb;
79 __u8 bast_mode;
80 __u8 unused[3];
81 /* Offsets may be zero if no data is present */
82 __u32 lvb_offset;
83};
84
85static void compat_input(struct dlm_write_request *kb,
86 struct dlm_write_request32 *kb32,
87 int namelen)
88{
89 kb->version[0] = kb32->version[0];
90 kb->version[1] = kb32->version[1];
91 kb->version[2] = kb32->version[2];
92
93 kb->cmd = kb32->cmd;
94 kb->is64bit = kb32->is64bit;
95 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
96 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
97 kb->i.lspace.flags = kb32->i.lspace.flags;
98 kb->i.lspace.minor = kb32->i.lspace.minor;
99 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
100 } else if (kb->cmd == DLM_USER_PURGE) {
101 kb->i.purge.nodeid = kb32->i.purge.nodeid;
102 kb->i.purge.pid = kb32->i.purge.pid;
103 } else {
104 kb->i.lock.mode = kb32->i.lock.mode;
105 kb->i.lock.namelen = kb32->i.lock.namelen;
106 kb->i.lock.flags = kb32->i.lock.flags;
107 kb->i.lock.lkid = kb32->i.lock.lkid;
108 kb->i.lock.parent = kb32->i.lock.parent;
109 kb->i.lock.xid = kb32->i.lock.xid;
110 kb->i.lock.timeout = kb32->i.lock.timeout;
111 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
112 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
113 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
114 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
115 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
116 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
117 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
118 }
119}
120
121static void compat_output(struct dlm_lock_result *res,
122 struct dlm_lock_result32 *res32)
123{
124 memset(res32, 0, sizeof(*res32));
125
126 res32->version[0] = res->version[0];
127 res32->version[1] = res->version[1];
128 res32->version[2] = res->version[2];
129
130 res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 res32->user_astparam = (__u32)(long)res->user_astparam;
132 res32->user_lksb = (__u32)(long)res->user_lksb;
133 res32->bast_mode = res->bast_mode;
134
135 res32->lvb_offset = res->lvb_offset;
136 res32->length = res->length;
137
138 res32->lksb.sb_status = res->lksb.sb_status;
139 res32->lksb.sb_flags = res->lksb.sb_flags;
140 res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142}
143#endif
144
145/* Figure out if this lock is at the end of its life and no longer
146 available for the application to use. The lkb still exists until
147 the final ast is read. A lock becomes EOL in three situations:
148 1. a noqueue request fails with EAGAIN
149 2. an unlock completes with EUNLOCK
150 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151 An EOL lock needs to be removed from the process's list of locks.
152 And we can't allow any new operation on an EOL lock. This is
153 not related to the lifetime of the lkb struct which is managed
154 entirely by refcount. */
155
156static int lkb_is_endoflife(int mode, int status)
157{
158 switch (status) {
159 case -DLM_EUNLOCK:
160 return 1;
161 case -DLM_ECANCEL:
162 case -ETIMEDOUT:
163 case -EDEADLK:
164 case -EAGAIN:
165 if (mode == DLM_LOCK_IV)
166 return 1;
167 break;
168 }
169 return 0;
170}
171
172/* we could possibly check if the cancel of an orphan has resulted in the lkb
173 being removed and then remove that lkb from the orphans list and free it */
174
175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 int status, uint32_t sbflags, uint64_t seq)
177{
178 struct dlm_ls *ls;
179 struct dlm_user_args *ua;
180 struct dlm_user_proc *proc;
181 int rv;
182
183 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
184 return;
185
186 ls = lkb->lkb_resource->res_ls;
187 mutex_lock(&ls->ls_clear_proc_locks);
188
189 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
191 lkb->ua so we can't try to use it. This second check is necessary
192 for cases where a completion ast is received for an operation that
193 began before clear_proc_locks did its cancel/unlock. */
194
195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
196 goto out;
197
198 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
199 ua = lkb->lkb_ua;
200 proc = ua->proc;
201
202 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
203 goto out;
204
205 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
206 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
207
208 spin_lock(&proc->asts_spin);
209
210 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 if (rv < 0) {
212 spin_unlock(&proc->asts_spin);
213 goto out;
214 }
215
216 if (list_empty(&lkb->lkb_cb_list)) {
217 kref_get(&lkb->lkb_ref);
218 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 wake_up_interruptible(&proc->wait);
220 }
221 spin_unlock(&proc->asts_spin);
222
223 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
224 /* N.B. spin_lock locks_spin, not asts_spin */
225 spin_lock(&proc->locks_spin);
226 if (!list_empty(&lkb->lkb_ownqueue)) {
227 list_del_init(&lkb->lkb_ownqueue);
228 dlm_put_lkb(lkb);
229 }
230 spin_unlock(&proc->locks_spin);
231 }
232 out:
233 mutex_unlock(&ls->ls_clear_proc_locks);
234}
235
236static int device_user_lock(struct dlm_user_proc *proc,
237 struct dlm_lock_params *params)
238{
239 struct dlm_ls *ls;
240 struct dlm_user_args *ua;
241 uint32_t lkid;
242 int error = -ENOMEM;
243
244 ls = dlm_find_lockspace_local(proc->lockspace);
245 if (!ls)
246 return -ENOENT;
247
248 if (!params->castaddr || !params->lksb) {
249 error = -EINVAL;
250 goto out;
251 }
252
253 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
254 if (!ua)
255 goto out;
256 ua->proc = proc;
257 ua->user_lksb = params->lksb;
258 ua->castparam = params->castparam;
259 ua->castaddr = params->castaddr;
260 ua->bastparam = params->bastparam;
261 ua->bastaddr = params->bastaddr;
262 ua->xid = params->xid;
263
264 if (params->flags & DLM_LKF_CONVERT) {
265 error = dlm_user_convert(ls, ua,
266 params->mode, params->flags,
267 params->lkid, params->lvb,
268 (unsigned long) params->timeout);
269 } else if (params->flags & DLM_LKF_ORPHAN) {
270 error = dlm_user_adopt_orphan(ls, ua,
271 params->mode, params->flags,
272 params->name, params->namelen,
273 (unsigned long) params->timeout,
274 &lkid);
275 if (!error)
276 error = lkid;
277 } else {
278 error = dlm_user_request(ls, ua,
279 params->mode, params->flags,
280 params->name, params->namelen,
281 (unsigned long) params->timeout);
282 if (!error)
283 error = ua->lksb.sb_lkid;
284 }
285 out:
286 dlm_put_lockspace(ls);
287 return error;
288}
289
290static int device_user_unlock(struct dlm_user_proc *proc,
291 struct dlm_lock_params *params)
292{
293 struct dlm_ls *ls;
294 struct dlm_user_args *ua;
295 int error = -ENOMEM;
296
297 ls = dlm_find_lockspace_local(proc->lockspace);
298 if (!ls)
299 return -ENOENT;
300
301 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
302 if (!ua)
303 goto out;
304 ua->proc = proc;
305 ua->user_lksb = params->lksb;
306 ua->castparam = params->castparam;
307 ua->castaddr = params->castaddr;
308
309 if (params->flags & DLM_LKF_CANCEL)
310 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
311 else
312 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
313 params->lvb);
314 out:
315 dlm_put_lockspace(ls);
316 return error;
317}
318
319static int device_user_deadlock(struct dlm_user_proc *proc,
320 struct dlm_lock_params *params)
321{
322 struct dlm_ls *ls;
323 int error;
324
325 ls = dlm_find_lockspace_local(proc->lockspace);
326 if (!ls)
327 return -ENOENT;
328
329 error = dlm_user_deadlock(ls, params->flags, params->lkid);
330
331 dlm_put_lockspace(ls);
332 return error;
333}
334
335static int dlm_device_register(struct dlm_ls *ls, char *name)
336{
337 int error, len;
338
339 /* The device is already registered. This happens when the
340 lockspace is created multiple times from userspace. */
341 if (ls->ls_device.name)
342 return 0;
343
344 error = -ENOMEM;
345 len = strlen(name) + strlen(name_prefix) + 2;
346 ls->ls_device.name = kzalloc(len, GFP_NOFS);
347 if (!ls->ls_device.name)
348 goto fail;
349
350 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
351 name);
352 ls->ls_device.fops = &device_fops;
353 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
354
355 error = misc_register(&ls->ls_device);
356 if (error) {
357 kfree(ls->ls_device.name);
358 /* this has to be set to NULL
359 * to avoid a double-free in dlm_device_deregister
360 */
361 ls->ls_device.name = NULL;
362 }
363fail:
364 return error;
365}
366
367int dlm_device_deregister(struct dlm_ls *ls)
368{
369 /* The device is not registered. This happens when the lockspace
370 was never used from userspace, or when device_create_lockspace()
371 calls dlm_release_lockspace() after the register fails. */
372 if (!ls->ls_device.name)
373 return 0;
374
375 misc_deregister(&ls->ls_device);
376 kfree(ls->ls_device.name);
377 return 0;
378}
379
380static int device_user_purge(struct dlm_user_proc *proc,
381 struct dlm_purge_params *params)
382{
383 struct dlm_ls *ls;
384 int error;
385
386 ls = dlm_find_lockspace_local(proc->lockspace);
387 if (!ls)
388 return -ENOENT;
389
390 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
391
392 dlm_put_lockspace(ls);
393 return error;
394}
395
396static int device_create_lockspace(struct dlm_lspace_params *params)
397{
398 dlm_lockspace_t *lockspace;
399 struct dlm_ls *ls;
400 int error;
401
402 if (!capable(CAP_SYS_ADMIN))
403 return -EPERM;
404
405 error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
406 DLM_USER_LVB_LEN, NULL, NULL, NULL,
407 &lockspace);
408 if (error)
409 return error;
410
411 ls = dlm_find_lockspace_local(lockspace);
412 if (!ls)
413 return -ENOENT;
414
415 error = dlm_device_register(ls, params->name);
416 dlm_put_lockspace(ls);
417
418 if (error)
419 dlm_release_lockspace(lockspace, 0);
420 else
421 error = ls->ls_device.minor;
422
423 return error;
424}
425
426static int device_remove_lockspace(struct dlm_lspace_params *params)
427{
428 dlm_lockspace_t *lockspace;
429 struct dlm_ls *ls;
430 int error, force = 0;
431
432 if (!capable(CAP_SYS_ADMIN))
433 return -EPERM;
434
435 ls = dlm_find_lockspace_device(params->minor);
436 if (!ls)
437 return -ENOENT;
438
439 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
440 force = 2;
441
442 lockspace = ls->ls_local_handle;
443 dlm_put_lockspace(ls);
444
445 /* The final dlm_release_lockspace waits for references to go to
446 zero, so all processes will need to close their device for the
447 ls before the release will proceed. release also calls the
448 device_deregister above. Converting a positive return value
449 from release to zero means that userspace won't know when its
450 release was the final one, but it shouldn't need to know. */
451
452 error = dlm_release_lockspace(lockspace, force);
453 if (error > 0)
454 error = 0;
455 return error;
456}
457
458/* Check the user's version matches ours */
459static int check_version(struct dlm_write_request *req)
460{
461 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
462 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
463 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
464
465 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
466 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
467 current->comm,
468 task_pid_nr(current),
469 req->version[0],
470 req->version[1],
471 req->version[2],
472 DLM_DEVICE_VERSION_MAJOR,
473 DLM_DEVICE_VERSION_MINOR,
474 DLM_DEVICE_VERSION_PATCH);
475 return -EINVAL;
476 }
477 return 0;
478}
479
480/*
481 * device_write
482 *
483 * device_user_lock
484 * dlm_user_request -> request_lock
485 * dlm_user_convert -> convert_lock
486 *
487 * device_user_unlock
488 * dlm_user_unlock -> unlock_lock
489 * dlm_user_cancel -> cancel_lock
490 *
491 * device_create_lockspace
492 * dlm_new_lockspace
493 *
494 * device_remove_lockspace
495 * dlm_release_lockspace
496 */
497
498/* a write to a lockspace device is a lock or unlock request, a write
499 to the control device is to create/remove a lockspace */
500
501static ssize_t device_write(struct file *file, const char __user *buf,
502 size_t count, loff_t *ppos)
503{
504 struct dlm_user_proc *proc = file->private_data;
505 struct dlm_write_request *kbuf;
506 int error;
507
508#ifdef CONFIG_COMPAT
509 if (count < sizeof(struct dlm_write_request32))
510#else
511 if (count < sizeof(struct dlm_write_request))
512#endif
513 return -EINVAL;
514
515 /*
516 * can't compare against COMPAT/dlm_write_request32 because
517 * we don't yet know if is64bit is zero
518 */
519 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
520 return -EINVAL;
521
522 kbuf = memdup_user_nul(buf, count);
523 if (IS_ERR(kbuf))
524 return PTR_ERR(kbuf);
525
526 if (check_version(kbuf)) {
527 error = -EBADE;
528 goto out_free;
529 }
530
531#ifdef CONFIG_COMPAT
532 if (!kbuf->is64bit) {
533 struct dlm_write_request32 *k32buf;
534 int namelen = 0;
535
536 if (count > sizeof(struct dlm_write_request32))
537 namelen = count - sizeof(struct dlm_write_request32);
538
539 k32buf = (struct dlm_write_request32 *)kbuf;
540
541 /* add 1 after namelen so that the name string is terminated */
542 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
543 GFP_NOFS);
544 if (!kbuf) {
545 kfree(k32buf);
546 return -ENOMEM;
547 }
548
549 if (proc)
550 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
551
552 compat_input(kbuf, k32buf, namelen);
553 kfree(k32buf);
554 }
555#endif
556
557 /* do we really need this? can a write happen after a close? */
558 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
559 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
560 error = -EINVAL;
561 goto out_free;
562 }
563
564 error = -EINVAL;
565
566 switch (kbuf->cmd)
567 {
568 case DLM_USER_LOCK:
569 if (!proc) {
570 log_print("no locking on control device");
571 goto out_free;
572 }
573 error = device_user_lock(proc, &kbuf->i.lock);
574 break;
575
576 case DLM_USER_UNLOCK:
577 if (!proc) {
578 log_print("no locking on control device");
579 goto out_free;
580 }
581 error = device_user_unlock(proc, &kbuf->i.lock);
582 break;
583
584 case DLM_USER_DEADLOCK:
585 if (!proc) {
586 log_print("no locking on control device");
587 goto out_free;
588 }
589 error = device_user_deadlock(proc, &kbuf->i.lock);
590 break;
591
592 case DLM_USER_CREATE_LOCKSPACE:
593 if (proc) {
594 log_print("create/remove only on control device");
595 goto out_free;
596 }
597 error = device_create_lockspace(&kbuf->i.lspace);
598 break;
599
600 case DLM_USER_REMOVE_LOCKSPACE:
601 if (proc) {
602 log_print("create/remove only on control device");
603 goto out_free;
604 }
605 error = device_remove_lockspace(&kbuf->i.lspace);
606 break;
607
608 case DLM_USER_PURGE:
609 if (!proc) {
610 log_print("no locking on control device");
611 goto out_free;
612 }
613 error = device_user_purge(proc, &kbuf->i.purge);
614 break;
615
616 default:
617 log_print("Unknown command passed to DLM device : %d\n",
618 kbuf->cmd);
619 }
620
621 out_free:
622 kfree(kbuf);
623 return error;
624}
625
626/* Every process that opens the lockspace device has its own "proc" structure
627 hanging off the open file that's used to keep track of locks owned by the
628 process and asts that need to be delivered to the process. */
629
630static int device_open(struct inode *inode, struct file *file)
631{
632 struct dlm_user_proc *proc;
633 struct dlm_ls *ls;
634
635 ls = dlm_find_lockspace_device(iminor(inode));
636 if (!ls)
637 return -ENOENT;
638
639 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
640 if (!proc) {
641 dlm_put_lockspace(ls);
642 return -ENOMEM;
643 }
644
645 proc->lockspace = ls->ls_local_handle;
646 INIT_LIST_HEAD(&proc->asts);
647 INIT_LIST_HEAD(&proc->locks);
648 INIT_LIST_HEAD(&proc->unlocking);
649 spin_lock_init(&proc->asts_spin);
650 spin_lock_init(&proc->locks_spin);
651 init_waitqueue_head(&proc->wait);
652 file->private_data = proc;
653
654 return 0;
655}
656
657static int device_close(struct inode *inode, struct file *file)
658{
659 struct dlm_user_proc *proc = file->private_data;
660 struct dlm_ls *ls;
661
662 ls = dlm_find_lockspace_local(proc->lockspace);
663 if (!ls)
664 return -ENOENT;
665
666 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
667
668 dlm_clear_proc_locks(ls, proc);
669
670 /* at this point no more lkb's should exist for this lockspace,
671 so there's no chance of dlm_user_add_ast() being called and
672 looking for lkb->ua->proc */
673
674 kfree(proc);
675 file->private_data = NULL;
676
677 dlm_put_lockspace(ls);
678 dlm_put_lockspace(ls); /* for the find in device_open() */
679
680 /* FIXME: AUTOFREE: if this ls is no longer used do
681 device_remove_lockspace() */
682
683 return 0;
684}
685
686static int copy_result_to_user(struct dlm_user_args *ua, int compat,
687 uint32_t flags, int mode, int copy_lvb,
688 char __user *buf, size_t count)
689{
690#ifdef CONFIG_COMPAT
691 struct dlm_lock_result32 result32;
692#endif
693 struct dlm_lock_result result;
694 void *resultptr;
695 int error=0;
696 int len;
697 int struct_len;
698
699 memset(&result, 0, sizeof(struct dlm_lock_result));
700 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
701 result.version[1] = DLM_DEVICE_VERSION_MINOR;
702 result.version[2] = DLM_DEVICE_VERSION_PATCH;
703 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
704 result.user_lksb = ua->user_lksb;
705
706 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
707 in a conversion unless the conversion is successful. See code
708 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
709 notes that a new blocking AST address and parameter are set even if
710 the conversion fails, so maybe we should just do that. */
711
712 if (flags & DLM_CB_BAST) {
713 result.user_astaddr = ua->bastaddr;
714 result.user_astparam = ua->bastparam;
715 result.bast_mode = mode;
716 } else {
717 result.user_astaddr = ua->castaddr;
718 result.user_astparam = ua->castparam;
719 }
720
721#ifdef CONFIG_COMPAT
722 if (compat)
723 len = sizeof(struct dlm_lock_result32);
724 else
725#endif
726 len = sizeof(struct dlm_lock_result);
727 struct_len = len;
728
729 /* copy lvb to userspace if there is one, it's been updated, and
730 the user buffer has space for it */
731
732 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
733 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
734 DLM_USER_LVB_LEN)) {
735 error = -EFAULT;
736 goto out;
737 }
738
739 result.lvb_offset = len;
740 len += DLM_USER_LVB_LEN;
741 }
742
743 result.length = len;
744 resultptr = &result;
745#ifdef CONFIG_COMPAT
746 if (compat) {
747 compat_output(&result, &result32);
748 resultptr = &result32;
749 }
750#endif
751
752 if (copy_to_user(buf, resultptr, struct_len))
753 error = -EFAULT;
754 else
755 error = len;
756 out:
757 return error;
758}
759
760static int copy_version_to_user(char __user *buf, size_t count)
761{
762 struct dlm_device_version ver;
763
764 memset(&ver, 0, sizeof(struct dlm_device_version));
765 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
766 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
767 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
768
769 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
770 return -EFAULT;
771 return sizeof(struct dlm_device_version);
772}
773
774/* a read returns a single ast described in a struct dlm_lock_result */
775
776static ssize_t device_read(struct file *file, char __user *buf, size_t count,
777 loff_t *ppos)
778{
779 struct dlm_user_proc *proc = file->private_data;
780 struct dlm_lkb *lkb;
781 DECLARE_WAITQUEUE(wait, current);
782 struct dlm_callback cb;
783 int rv, resid, copy_lvb = 0;
784 int old_mode, new_mode;
785
786 if (count == sizeof(struct dlm_device_version)) {
787 rv = copy_version_to_user(buf, count);
788 return rv;
789 }
790
791 if (!proc) {
792 log_print("non-version read from control device %zu", count);
793 return -EINVAL;
794 }
795
796#ifdef CONFIG_COMPAT
797 if (count < sizeof(struct dlm_lock_result32))
798#else
799 if (count < sizeof(struct dlm_lock_result))
800#endif
801 return -EINVAL;
802
803 try_another:
804
805 /* do we really need this? can a read happen after a close? */
806 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
807 return -EINVAL;
808
809 spin_lock(&proc->asts_spin);
810 if (list_empty(&proc->asts)) {
811 if (file->f_flags & O_NONBLOCK) {
812 spin_unlock(&proc->asts_spin);
813 return -EAGAIN;
814 }
815
816 add_wait_queue(&proc->wait, &wait);
817
818 repeat:
819 set_current_state(TASK_INTERRUPTIBLE);
820 if (list_empty(&proc->asts) && !signal_pending(current)) {
821 spin_unlock(&proc->asts_spin);
822 schedule();
823 spin_lock(&proc->asts_spin);
824 goto repeat;
825 }
826 set_current_state(TASK_RUNNING);
827 remove_wait_queue(&proc->wait, &wait);
828
829 if (signal_pending(current)) {
830 spin_unlock(&proc->asts_spin);
831 return -ERESTARTSYS;
832 }
833 }
834
835 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
836 without removing lkb_cb_list; so empty lkb_cb_list is always
837 consistent with empty lkb_callbacks */
838
839 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
840
841 /* rem_lkb_callback sets a new lkb_last_cast */
842 old_mode = lkb->lkb_last_cast.mode;
843
844 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
845 if (rv < 0) {
846 /* this shouldn't happen; lkb should have been removed from
847 list when resid was zero */
848 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
849 list_del_init(&lkb->lkb_cb_list);
850 spin_unlock(&proc->asts_spin);
851 /* removes ref for proc->asts, may cause lkb to be freed */
852 dlm_put_lkb(lkb);
853 goto try_another;
854 }
855 if (!resid)
856 list_del_init(&lkb->lkb_cb_list);
857 spin_unlock(&proc->asts_spin);
858
859 if (cb.flags & DLM_CB_SKIP) {
860 /* removes ref for proc->asts, may cause lkb to be freed */
861 if (!resid)
862 dlm_put_lkb(lkb);
863 goto try_another;
864 }
865
866 if (cb.flags & DLM_CB_CAST) {
867 new_mode = cb.mode;
868
869 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
870 dlm_lvb_operations[old_mode + 1][new_mode + 1])
871 copy_lvb = 1;
872
873 lkb->lkb_lksb->sb_status = cb.sb_status;
874 lkb->lkb_lksb->sb_flags = cb.sb_flags;
875 }
876
877 rv = copy_result_to_user(lkb->lkb_ua,
878 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
879 cb.flags, cb.mode, copy_lvb, buf, count);
880
881 /* removes ref for proc->asts, may cause lkb to be freed */
882 if (!resid)
883 dlm_put_lkb(lkb);
884
885 return rv;
886}
887
888static __poll_t device_poll(struct file *file, poll_table *wait)
889{
890 struct dlm_user_proc *proc = file->private_data;
891
892 poll_wait(file, &proc->wait, wait);
893
894 spin_lock(&proc->asts_spin);
895 if (!list_empty(&proc->asts)) {
896 spin_unlock(&proc->asts_spin);
897 return EPOLLIN | EPOLLRDNORM;
898 }
899 spin_unlock(&proc->asts_spin);
900 return 0;
901}
902
903int dlm_user_daemon_available(void)
904{
905 /* dlm_controld hasn't started (or, has started, but not
906 properly populated configfs) */
907
908 if (!dlm_our_nodeid())
909 return 0;
910
911 /* This is to deal with versions of dlm_controld that don't
912 know about the monitor device. We assume that if the
913 dlm_controld was started (above), but the monitor device
914 was never opened, that it's an old version. dlm_controld
915 should open the monitor device before populating configfs. */
916
917 if (dlm_monitor_unused)
918 return 1;
919
920 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
921}
922
923static int ctl_device_open(struct inode *inode, struct file *file)
924{
925 file->private_data = NULL;
926 return 0;
927}
928
929static int ctl_device_close(struct inode *inode, struct file *file)
930{
931 return 0;
932}
933
934static int monitor_device_open(struct inode *inode, struct file *file)
935{
936 atomic_inc(&dlm_monitor_opened);
937 dlm_monitor_unused = 0;
938 return 0;
939}
940
941static int monitor_device_close(struct inode *inode, struct file *file)
942{
943 if (atomic_dec_and_test(&dlm_monitor_opened))
944 dlm_stop_lockspaces();
945 return 0;
946}
947
948static const struct file_operations device_fops = {
949 .open = device_open,
950 .release = device_close,
951 .read = device_read,
952 .write = device_write,
953 .poll = device_poll,
954 .owner = THIS_MODULE,
955 .llseek = noop_llseek,
956};
957
958static const struct file_operations ctl_device_fops = {
959 .open = ctl_device_open,
960 .release = ctl_device_close,
961 .read = device_read,
962 .write = device_write,
963 .owner = THIS_MODULE,
964 .llseek = noop_llseek,
965};
966
967static struct miscdevice ctl_device = {
968 .name = "dlm-control",
969 .fops = &ctl_device_fops,
970 .minor = MISC_DYNAMIC_MINOR,
971};
972
973static const struct file_operations monitor_device_fops = {
974 .open = monitor_device_open,
975 .release = monitor_device_close,
976 .owner = THIS_MODULE,
977 .llseek = noop_llseek,
978};
979
980static struct miscdevice monitor_device = {
981 .name = "dlm-monitor",
982 .fops = &monitor_device_fops,
983 .minor = MISC_DYNAMIC_MINOR,
984};
985
986int __init dlm_user_init(void)
987{
988 int error;
989
990 atomic_set(&dlm_monitor_opened, 0);
991
992 error = misc_register(&ctl_device);
993 if (error) {
994 log_print("misc_register failed for control device");
995 goto out;
996 }
997
998 error = misc_register(&monitor_device);
999 if (error) {
1000 log_print("misc_register failed for monitor device");
1001 misc_deregister(&ctl_device);
1002 }
1003 out:
1004 return error;
1005}
1006
1007void dlm_user_exit(void)
1008{
1009 misc_deregister(&ctl_device);
1010 misc_deregister(&monitor_device);
1011}
1012