Loading...
1/*
2 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/init.h>
11#include <linux/wait.h>
12#include <linux/module.h>
13#include <linux/file.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/dlm.h>
19#include <linux/dlm_device.h>
20#include <linux/slab.h>
21
22#include "dlm_internal.h"
23#include "lockspace.h"
24#include "lock.h"
25#include "lvb_table.h"
26#include "user.h"
27#include "ast.h"
28
29static const char name_prefix[] = "dlm";
30static const struct file_operations device_fops;
31static atomic_t dlm_monitor_opened;
32static int dlm_monitor_unused = 1;
33
34#ifdef CONFIG_COMPAT
35
36struct dlm_lock_params32 {
37 __u8 mode;
38 __u8 namelen;
39 __u16 unused;
40 __u32 flags;
41 __u32 lkid;
42 __u32 parent;
43 __u64 xid;
44 __u64 timeout;
45 __u32 castparam;
46 __u32 castaddr;
47 __u32 bastparam;
48 __u32 bastaddr;
49 __u32 lksb;
50 char lvb[DLM_USER_LVB_LEN];
51 char name[0];
52};
53
54struct dlm_write_request32 {
55 __u32 version[3];
56 __u8 cmd;
57 __u8 is64bit;
58 __u8 unused[2];
59
60 union {
61 struct dlm_lock_params32 lock;
62 struct dlm_lspace_params lspace;
63 struct dlm_purge_params purge;
64 } i;
65};
66
67struct dlm_lksb32 {
68 __u32 sb_status;
69 __u32 sb_lkid;
70 __u8 sb_flags;
71 __u32 sb_lvbptr;
72};
73
74struct dlm_lock_result32 {
75 __u32 version[3];
76 __u32 length;
77 __u32 user_astaddr;
78 __u32 user_astparam;
79 __u32 user_lksb;
80 struct dlm_lksb32 lksb;
81 __u8 bast_mode;
82 __u8 unused[3];
83 /* Offsets may be zero if no data is present */
84 __u32 lvb_offset;
85};
86
87static void compat_input(struct dlm_write_request *kb,
88 struct dlm_write_request32 *kb32,
89 int namelen)
90{
91 kb->version[0] = kb32->version[0];
92 kb->version[1] = kb32->version[1];
93 kb->version[2] = kb32->version[2];
94
95 kb->cmd = kb32->cmd;
96 kb->is64bit = kb32->is64bit;
97 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
98 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
99 kb->i.lspace.flags = kb32->i.lspace.flags;
100 kb->i.lspace.minor = kb32->i.lspace.minor;
101 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
102 } else if (kb->cmd == DLM_USER_PURGE) {
103 kb->i.purge.nodeid = kb32->i.purge.nodeid;
104 kb->i.purge.pid = kb32->i.purge.pid;
105 } else {
106 kb->i.lock.mode = kb32->i.lock.mode;
107 kb->i.lock.namelen = kb32->i.lock.namelen;
108 kb->i.lock.flags = kb32->i.lock.flags;
109 kb->i.lock.lkid = kb32->i.lock.lkid;
110 kb->i.lock.parent = kb32->i.lock.parent;
111 kb->i.lock.xid = kb32->i.lock.xid;
112 kb->i.lock.timeout = kb32->i.lock.timeout;
113 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
114 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
115 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
116 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
117 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
118 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
119 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
120 }
121}
122
123static void compat_output(struct dlm_lock_result *res,
124 struct dlm_lock_result32 *res32)
125{
126 res32->version[0] = res->version[0];
127 res32->version[1] = res->version[1];
128 res32->version[2] = res->version[2];
129
130 res32->user_astaddr = (__u32)(long)res->user_astaddr;
131 res32->user_astparam = (__u32)(long)res->user_astparam;
132 res32->user_lksb = (__u32)(long)res->user_lksb;
133 res32->bast_mode = res->bast_mode;
134
135 res32->lvb_offset = res->lvb_offset;
136 res32->length = res->length;
137
138 res32->lksb.sb_status = res->lksb.sb_status;
139 res32->lksb.sb_flags = res->lksb.sb_flags;
140 res32->lksb.sb_lkid = res->lksb.sb_lkid;
141 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
142}
143#endif
144
145/* Figure out if this lock is at the end of its life and no longer
146 available for the application to use. The lkb still exists until
147 the final ast is read. A lock becomes EOL in three situations:
148 1. a noqueue request fails with EAGAIN
149 2. an unlock completes with EUNLOCK
150 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
151 An EOL lock needs to be removed from the process's list of locks.
152 And we can't allow any new operation on an EOL lock. This is
153 not related to the lifetime of the lkb struct which is managed
154 entirely by refcount. */
155
156static int lkb_is_endoflife(int mode, int status)
157{
158 switch (status) {
159 case -DLM_EUNLOCK:
160 return 1;
161 case -DLM_ECANCEL:
162 case -ETIMEDOUT:
163 case -EDEADLK:
164 case -EAGAIN:
165 if (mode == DLM_LOCK_IV)
166 return 1;
167 break;
168 }
169 return 0;
170}
171
172/* we could possibly check if the cancel of an orphan has resulted in the lkb
173 being removed and then remove that lkb from the orphans list and free it */
174
175void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
176 int status, uint32_t sbflags, uint64_t seq)
177{
178 struct dlm_ls *ls;
179 struct dlm_user_args *ua;
180 struct dlm_user_proc *proc;
181 int rv;
182
183 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
184 return;
185
186 ls = lkb->lkb_resource->res_ls;
187 mutex_lock(&ls->ls_clear_proc_locks);
188
189 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
190 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
191 lkb->ua so we can't try to use it. This second check is necessary
192 for cases where a completion ast is received for an operation that
193 began before clear_proc_locks did its cancel/unlock. */
194
195 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
196 goto out;
197
198 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
199 ua = lkb->lkb_ua;
200 proc = ua->proc;
201
202 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
203 goto out;
204
205 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
206 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
207
208 spin_lock(&proc->asts_spin);
209
210 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
211 if (rv < 0) {
212 spin_unlock(&proc->asts_spin);
213 goto out;
214 }
215
216 if (list_empty(&lkb->lkb_cb_list)) {
217 kref_get(&lkb->lkb_ref);
218 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
219 wake_up_interruptible(&proc->wait);
220 }
221 spin_unlock(&proc->asts_spin);
222
223 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
224 /* N.B. spin_lock locks_spin, not asts_spin */
225 spin_lock(&proc->locks_spin);
226 if (!list_empty(&lkb->lkb_ownqueue)) {
227 list_del_init(&lkb->lkb_ownqueue);
228 dlm_put_lkb(lkb);
229 }
230 spin_unlock(&proc->locks_spin);
231 }
232 out:
233 mutex_unlock(&ls->ls_clear_proc_locks);
234}
235
236static int device_user_lock(struct dlm_user_proc *proc,
237 struct dlm_lock_params *params)
238{
239 struct dlm_ls *ls;
240 struct dlm_user_args *ua;
241 int error = -ENOMEM;
242
243 ls = dlm_find_lockspace_local(proc->lockspace);
244 if (!ls)
245 return -ENOENT;
246
247 if (!params->castaddr || !params->lksb) {
248 error = -EINVAL;
249 goto out;
250 }
251
252 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
253 if (!ua)
254 goto out;
255 ua->proc = proc;
256 ua->user_lksb = params->lksb;
257 ua->castparam = params->castparam;
258 ua->castaddr = params->castaddr;
259 ua->bastparam = params->bastparam;
260 ua->bastaddr = params->bastaddr;
261 ua->xid = params->xid;
262
263 if (params->flags & DLM_LKF_CONVERT)
264 error = dlm_user_convert(ls, ua,
265 params->mode, params->flags,
266 params->lkid, params->lvb,
267 (unsigned long) params->timeout);
268 else {
269 error = dlm_user_request(ls, ua,
270 params->mode, params->flags,
271 params->name, params->namelen,
272 (unsigned long) params->timeout);
273 if (!error)
274 error = ua->lksb.sb_lkid;
275 }
276 out:
277 dlm_put_lockspace(ls);
278 return error;
279}
280
281static int device_user_unlock(struct dlm_user_proc *proc,
282 struct dlm_lock_params *params)
283{
284 struct dlm_ls *ls;
285 struct dlm_user_args *ua;
286 int error = -ENOMEM;
287
288 ls = dlm_find_lockspace_local(proc->lockspace);
289 if (!ls)
290 return -ENOENT;
291
292 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
293 if (!ua)
294 goto out;
295 ua->proc = proc;
296 ua->user_lksb = params->lksb;
297 ua->castparam = params->castparam;
298 ua->castaddr = params->castaddr;
299
300 if (params->flags & DLM_LKF_CANCEL)
301 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
302 else
303 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
304 params->lvb);
305 out:
306 dlm_put_lockspace(ls);
307 return error;
308}
309
310static int device_user_deadlock(struct dlm_user_proc *proc,
311 struct dlm_lock_params *params)
312{
313 struct dlm_ls *ls;
314 int error;
315
316 ls = dlm_find_lockspace_local(proc->lockspace);
317 if (!ls)
318 return -ENOENT;
319
320 error = dlm_user_deadlock(ls, params->flags, params->lkid);
321
322 dlm_put_lockspace(ls);
323 return error;
324}
325
326static int dlm_device_register(struct dlm_ls *ls, char *name)
327{
328 int error, len;
329
330 /* The device is already registered. This happens when the
331 lockspace is created multiple times from userspace. */
332 if (ls->ls_device.name)
333 return 0;
334
335 error = -ENOMEM;
336 len = strlen(name) + strlen(name_prefix) + 2;
337 ls->ls_device.name = kzalloc(len, GFP_NOFS);
338 if (!ls->ls_device.name)
339 goto fail;
340
341 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
342 name);
343 ls->ls_device.fops = &device_fops;
344 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
345
346 error = misc_register(&ls->ls_device);
347 if (error) {
348 kfree(ls->ls_device.name);
349 }
350fail:
351 return error;
352}
353
354int dlm_device_deregister(struct dlm_ls *ls)
355{
356 int error;
357
358 /* The device is not registered. This happens when the lockspace
359 was never used from userspace, or when device_create_lockspace()
360 calls dlm_release_lockspace() after the register fails. */
361 if (!ls->ls_device.name)
362 return 0;
363
364 error = misc_deregister(&ls->ls_device);
365 if (!error)
366 kfree(ls->ls_device.name);
367 return error;
368}
369
370static int device_user_purge(struct dlm_user_proc *proc,
371 struct dlm_purge_params *params)
372{
373 struct dlm_ls *ls;
374 int error;
375
376 ls = dlm_find_lockspace_local(proc->lockspace);
377 if (!ls)
378 return -ENOENT;
379
380 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
381
382 dlm_put_lockspace(ls);
383 return error;
384}
385
386static int device_create_lockspace(struct dlm_lspace_params *params)
387{
388 dlm_lockspace_t *lockspace;
389 struct dlm_ls *ls;
390 int error;
391
392 if (!capable(CAP_SYS_ADMIN))
393 return -EPERM;
394
395 error = dlm_new_lockspace(params->name, strlen(params->name),
396 &lockspace, params->flags, DLM_USER_LVB_LEN);
397 if (error)
398 return error;
399
400 ls = dlm_find_lockspace_local(lockspace);
401 if (!ls)
402 return -ENOENT;
403
404 error = dlm_device_register(ls, params->name);
405 dlm_put_lockspace(ls);
406
407 if (error)
408 dlm_release_lockspace(lockspace, 0);
409 else
410 error = ls->ls_device.minor;
411
412 return error;
413}
414
415static int device_remove_lockspace(struct dlm_lspace_params *params)
416{
417 dlm_lockspace_t *lockspace;
418 struct dlm_ls *ls;
419 int error, force = 0;
420
421 if (!capable(CAP_SYS_ADMIN))
422 return -EPERM;
423
424 ls = dlm_find_lockspace_device(params->minor);
425 if (!ls)
426 return -ENOENT;
427
428 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
429 force = 2;
430
431 lockspace = ls->ls_local_handle;
432 dlm_put_lockspace(ls);
433
434 /* The final dlm_release_lockspace waits for references to go to
435 zero, so all processes will need to close their device for the
436 ls before the release will proceed. release also calls the
437 device_deregister above. Converting a positive return value
438 from release to zero means that userspace won't know when its
439 release was the final one, but it shouldn't need to know. */
440
441 error = dlm_release_lockspace(lockspace, force);
442 if (error > 0)
443 error = 0;
444 return error;
445}
446
447/* Check the user's version matches ours */
448static int check_version(struct dlm_write_request *req)
449{
450 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
451 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
452 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
453
454 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
455 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
456 current->comm,
457 task_pid_nr(current),
458 req->version[0],
459 req->version[1],
460 req->version[2],
461 DLM_DEVICE_VERSION_MAJOR,
462 DLM_DEVICE_VERSION_MINOR,
463 DLM_DEVICE_VERSION_PATCH);
464 return -EINVAL;
465 }
466 return 0;
467}
468
469/*
470 * device_write
471 *
472 * device_user_lock
473 * dlm_user_request -> request_lock
474 * dlm_user_convert -> convert_lock
475 *
476 * device_user_unlock
477 * dlm_user_unlock -> unlock_lock
478 * dlm_user_cancel -> cancel_lock
479 *
480 * device_create_lockspace
481 * dlm_new_lockspace
482 *
483 * device_remove_lockspace
484 * dlm_release_lockspace
485 */
486
487/* a write to a lockspace device is a lock or unlock request, a write
488 to the control device is to create/remove a lockspace */
489
490static ssize_t device_write(struct file *file, const char __user *buf,
491 size_t count, loff_t *ppos)
492{
493 struct dlm_user_proc *proc = file->private_data;
494 struct dlm_write_request *kbuf;
495 sigset_t tmpsig, allsigs;
496 int error;
497
498#ifdef CONFIG_COMPAT
499 if (count < sizeof(struct dlm_write_request32))
500#else
501 if (count < sizeof(struct dlm_write_request))
502#endif
503 return -EINVAL;
504
505 kbuf = kzalloc(count + 1, GFP_NOFS);
506 if (!kbuf)
507 return -ENOMEM;
508
509 if (copy_from_user(kbuf, buf, count)) {
510 error = -EFAULT;
511 goto out_free;
512 }
513
514 if (check_version(kbuf)) {
515 error = -EBADE;
516 goto out_free;
517 }
518
519#ifdef CONFIG_COMPAT
520 if (!kbuf->is64bit) {
521 struct dlm_write_request32 *k32buf;
522 int namelen = 0;
523
524 if (count > sizeof(struct dlm_write_request32))
525 namelen = count - sizeof(struct dlm_write_request32);
526
527 k32buf = (struct dlm_write_request32 *)kbuf;
528
529 /* add 1 after namelen so that the name string is terminated */
530 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
531 GFP_NOFS);
532 if (!kbuf) {
533 kfree(k32buf);
534 return -ENOMEM;
535 }
536
537 if (proc)
538 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
539
540 compat_input(kbuf, k32buf, namelen);
541 kfree(k32buf);
542 }
543#endif
544
545 /* do we really need this? can a write happen after a close? */
546 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
547 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
548 error = -EINVAL;
549 goto out_free;
550 }
551
552 sigfillset(&allsigs);
553 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
554
555 error = -EINVAL;
556
557 switch (kbuf->cmd)
558 {
559 case DLM_USER_LOCK:
560 if (!proc) {
561 log_print("no locking on control device");
562 goto out_sig;
563 }
564 error = device_user_lock(proc, &kbuf->i.lock);
565 break;
566
567 case DLM_USER_UNLOCK:
568 if (!proc) {
569 log_print("no locking on control device");
570 goto out_sig;
571 }
572 error = device_user_unlock(proc, &kbuf->i.lock);
573 break;
574
575 case DLM_USER_DEADLOCK:
576 if (!proc) {
577 log_print("no locking on control device");
578 goto out_sig;
579 }
580 error = device_user_deadlock(proc, &kbuf->i.lock);
581 break;
582
583 case DLM_USER_CREATE_LOCKSPACE:
584 if (proc) {
585 log_print("create/remove only on control device");
586 goto out_sig;
587 }
588 error = device_create_lockspace(&kbuf->i.lspace);
589 break;
590
591 case DLM_USER_REMOVE_LOCKSPACE:
592 if (proc) {
593 log_print("create/remove only on control device");
594 goto out_sig;
595 }
596 error = device_remove_lockspace(&kbuf->i.lspace);
597 break;
598
599 case DLM_USER_PURGE:
600 if (!proc) {
601 log_print("no locking on control device");
602 goto out_sig;
603 }
604 error = device_user_purge(proc, &kbuf->i.purge);
605 break;
606
607 default:
608 log_print("Unknown command passed to DLM device : %d\n",
609 kbuf->cmd);
610 }
611
612 out_sig:
613 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
614 out_free:
615 kfree(kbuf);
616 return error;
617}
618
619/* Every process that opens the lockspace device has its own "proc" structure
620 hanging off the open file that's used to keep track of locks owned by the
621 process and asts that need to be delivered to the process. */
622
623static int device_open(struct inode *inode, struct file *file)
624{
625 struct dlm_user_proc *proc;
626 struct dlm_ls *ls;
627
628 ls = dlm_find_lockspace_device(iminor(inode));
629 if (!ls)
630 return -ENOENT;
631
632 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
633 if (!proc) {
634 dlm_put_lockspace(ls);
635 return -ENOMEM;
636 }
637
638 proc->lockspace = ls->ls_local_handle;
639 INIT_LIST_HEAD(&proc->asts);
640 INIT_LIST_HEAD(&proc->locks);
641 INIT_LIST_HEAD(&proc->unlocking);
642 spin_lock_init(&proc->asts_spin);
643 spin_lock_init(&proc->locks_spin);
644 init_waitqueue_head(&proc->wait);
645 file->private_data = proc;
646
647 return 0;
648}
649
650static int device_close(struct inode *inode, struct file *file)
651{
652 struct dlm_user_proc *proc = file->private_data;
653 struct dlm_ls *ls;
654 sigset_t tmpsig, allsigs;
655
656 ls = dlm_find_lockspace_local(proc->lockspace);
657 if (!ls)
658 return -ENOENT;
659
660 sigfillset(&allsigs);
661 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
662
663 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
664
665 dlm_clear_proc_locks(ls, proc);
666
667 /* at this point no more lkb's should exist for this lockspace,
668 so there's no chance of dlm_user_add_ast() being called and
669 looking for lkb->ua->proc */
670
671 kfree(proc);
672 file->private_data = NULL;
673
674 dlm_put_lockspace(ls);
675 dlm_put_lockspace(ls); /* for the find in device_open() */
676
677 /* FIXME: AUTOFREE: if this ls is no longer used do
678 device_remove_lockspace() */
679
680 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
681 recalc_sigpending();
682
683 return 0;
684}
685
686static int copy_result_to_user(struct dlm_user_args *ua, int compat,
687 uint32_t flags, int mode, int copy_lvb,
688 char __user *buf, size_t count)
689{
690#ifdef CONFIG_COMPAT
691 struct dlm_lock_result32 result32;
692#endif
693 struct dlm_lock_result result;
694 void *resultptr;
695 int error=0;
696 int len;
697 int struct_len;
698
699 memset(&result, 0, sizeof(struct dlm_lock_result));
700 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
701 result.version[1] = DLM_DEVICE_VERSION_MINOR;
702 result.version[2] = DLM_DEVICE_VERSION_PATCH;
703 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
704 result.user_lksb = ua->user_lksb;
705
706 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
707 in a conversion unless the conversion is successful. See code
708 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
709 notes that a new blocking AST address and parameter are set even if
710 the conversion fails, so maybe we should just do that. */
711
712 if (flags & DLM_CB_BAST) {
713 result.user_astaddr = ua->bastaddr;
714 result.user_astparam = ua->bastparam;
715 result.bast_mode = mode;
716 } else {
717 result.user_astaddr = ua->castaddr;
718 result.user_astparam = ua->castparam;
719 }
720
721#ifdef CONFIG_COMPAT
722 if (compat)
723 len = sizeof(struct dlm_lock_result32);
724 else
725#endif
726 len = sizeof(struct dlm_lock_result);
727 struct_len = len;
728
729 /* copy lvb to userspace if there is one, it's been updated, and
730 the user buffer has space for it */
731
732 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
733 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
734 DLM_USER_LVB_LEN)) {
735 error = -EFAULT;
736 goto out;
737 }
738
739 result.lvb_offset = len;
740 len += DLM_USER_LVB_LEN;
741 }
742
743 result.length = len;
744 resultptr = &result;
745#ifdef CONFIG_COMPAT
746 if (compat) {
747 compat_output(&result, &result32);
748 resultptr = &result32;
749 }
750#endif
751
752 if (copy_to_user(buf, resultptr, struct_len))
753 error = -EFAULT;
754 else
755 error = len;
756 out:
757 return error;
758}
759
760static int copy_version_to_user(char __user *buf, size_t count)
761{
762 struct dlm_device_version ver;
763
764 memset(&ver, 0, sizeof(struct dlm_device_version));
765 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
766 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
767 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
768
769 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
770 return -EFAULT;
771 return sizeof(struct dlm_device_version);
772}
773
774/* a read returns a single ast described in a struct dlm_lock_result */
775
776static ssize_t device_read(struct file *file, char __user *buf, size_t count,
777 loff_t *ppos)
778{
779 struct dlm_user_proc *proc = file->private_data;
780 struct dlm_lkb *lkb;
781 DECLARE_WAITQUEUE(wait, current);
782 struct dlm_callback cb;
783 int rv, resid, copy_lvb = 0;
784
785 if (count == sizeof(struct dlm_device_version)) {
786 rv = copy_version_to_user(buf, count);
787 return rv;
788 }
789
790 if (!proc) {
791 log_print("non-version read from control device %zu", count);
792 return -EINVAL;
793 }
794
795#ifdef CONFIG_COMPAT
796 if (count < sizeof(struct dlm_lock_result32))
797#else
798 if (count < sizeof(struct dlm_lock_result))
799#endif
800 return -EINVAL;
801
802 try_another:
803
804 /* do we really need this? can a read happen after a close? */
805 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
806 return -EINVAL;
807
808 spin_lock(&proc->asts_spin);
809 if (list_empty(&proc->asts)) {
810 if (file->f_flags & O_NONBLOCK) {
811 spin_unlock(&proc->asts_spin);
812 return -EAGAIN;
813 }
814
815 add_wait_queue(&proc->wait, &wait);
816
817 repeat:
818 set_current_state(TASK_INTERRUPTIBLE);
819 if (list_empty(&proc->asts) && !signal_pending(current)) {
820 spin_unlock(&proc->asts_spin);
821 schedule();
822 spin_lock(&proc->asts_spin);
823 goto repeat;
824 }
825 set_current_state(TASK_RUNNING);
826 remove_wait_queue(&proc->wait, &wait);
827
828 if (signal_pending(current)) {
829 spin_unlock(&proc->asts_spin);
830 return -ERESTARTSYS;
831 }
832 }
833
834 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
835 without removing lkb_cb_list; so empty lkb_cb_list is always
836 consistent with empty lkb_callbacks */
837
838 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
839
840 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
841 if (rv < 0) {
842 /* this shouldn't happen; lkb should have been removed from
843 list when resid was zero */
844 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
845 list_del_init(&lkb->lkb_cb_list);
846 spin_unlock(&proc->asts_spin);
847 /* removes ref for proc->asts, may cause lkb to be freed */
848 dlm_put_lkb(lkb);
849 goto try_another;
850 }
851 if (!resid)
852 list_del_init(&lkb->lkb_cb_list);
853 spin_unlock(&proc->asts_spin);
854
855 if (cb.flags & DLM_CB_SKIP) {
856 /* removes ref for proc->asts, may cause lkb to be freed */
857 if (!resid)
858 dlm_put_lkb(lkb);
859 goto try_another;
860 }
861
862 if (cb.flags & DLM_CB_CAST) {
863 int old_mode, new_mode;
864
865 old_mode = lkb->lkb_last_cast.mode;
866 new_mode = cb.mode;
867
868 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
869 dlm_lvb_operations[old_mode + 1][new_mode + 1])
870 copy_lvb = 1;
871
872 lkb->lkb_lksb->sb_status = cb.sb_status;
873 lkb->lkb_lksb->sb_flags = cb.sb_flags;
874 }
875
876 rv = copy_result_to_user(lkb->lkb_ua,
877 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
878 cb.flags, cb.mode, copy_lvb, buf, count);
879
880 /* removes ref for proc->asts, may cause lkb to be freed */
881 if (!resid)
882 dlm_put_lkb(lkb);
883
884 return rv;
885}
886
887static unsigned int device_poll(struct file *file, poll_table *wait)
888{
889 struct dlm_user_proc *proc = file->private_data;
890
891 poll_wait(file, &proc->wait, wait);
892
893 spin_lock(&proc->asts_spin);
894 if (!list_empty(&proc->asts)) {
895 spin_unlock(&proc->asts_spin);
896 return POLLIN | POLLRDNORM;
897 }
898 spin_unlock(&proc->asts_spin);
899 return 0;
900}
901
902int dlm_user_daemon_available(void)
903{
904 /* dlm_controld hasn't started (or, has started, but not
905 properly populated configfs) */
906
907 if (!dlm_our_nodeid())
908 return 0;
909
910 /* This is to deal with versions of dlm_controld that don't
911 know about the monitor device. We assume that if the
912 dlm_controld was started (above), but the monitor device
913 was never opened, that it's an old version. dlm_controld
914 should open the monitor device before populating configfs. */
915
916 if (dlm_monitor_unused)
917 return 1;
918
919 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
920}
921
922static int ctl_device_open(struct inode *inode, struct file *file)
923{
924 file->private_data = NULL;
925 return 0;
926}
927
928static int ctl_device_close(struct inode *inode, struct file *file)
929{
930 return 0;
931}
932
933static int monitor_device_open(struct inode *inode, struct file *file)
934{
935 atomic_inc(&dlm_monitor_opened);
936 dlm_monitor_unused = 0;
937 return 0;
938}
939
940static int monitor_device_close(struct inode *inode, struct file *file)
941{
942 if (atomic_dec_and_test(&dlm_monitor_opened))
943 dlm_stop_lockspaces();
944 return 0;
945}
946
947static const struct file_operations device_fops = {
948 .open = device_open,
949 .release = device_close,
950 .read = device_read,
951 .write = device_write,
952 .poll = device_poll,
953 .owner = THIS_MODULE,
954 .llseek = noop_llseek,
955};
956
957static const struct file_operations ctl_device_fops = {
958 .open = ctl_device_open,
959 .release = ctl_device_close,
960 .read = device_read,
961 .write = device_write,
962 .owner = THIS_MODULE,
963 .llseek = noop_llseek,
964};
965
966static struct miscdevice ctl_device = {
967 .name = "dlm-control",
968 .fops = &ctl_device_fops,
969 .minor = MISC_DYNAMIC_MINOR,
970};
971
972static const struct file_operations monitor_device_fops = {
973 .open = monitor_device_open,
974 .release = monitor_device_close,
975 .owner = THIS_MODULE,
976 .llseek = noop_llseek,
977};
978
979static struct miscdevice monitor_device = {
980 .name = "dlm-monitor",
981 .fops = &monitor_device_fops,
982 .minor = MISC_DYNAMIC_MINOR,
983};
984
985int __init dlm_user_init(void)
986{
987 int error;
988
989 atomic_set(&dlm_monitor_opened, 0);
990
991 error = misc_register(&ctl_device);
992 if (error) {
993 log_print("misc_register failed for control device");
994 goto out;
995 }
996
997 error = misc_register(&monitor_device);
998 if (error) {
999 log_print("misc_register failed for monitor device");
1000 misc_deregister(&ctl_device);
1001 }
1002 out:
1003 return error;
1004}
1005
1006void dlm_user_exit(void)
1007{
1008 misc_deregister(&ctl_device);
1009 misc_deregister(&monitor_device);
1010}
1011
1/*
2 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/init.h>
11#include <linux/wait.h>
12#include <linux/file.h>
13#include <linux/fs.h>
14#include <linux/poll.h>
15#include <linux/signal.h>
16#include <linux/spinlock.h>
17#include <linux/dlm.h>
18#include <linux/dlm_device.h>
19#include <linux/slab.h>
20
21#include "dlm_internal.h"
22#include "lockspace.h"
23#include "lock.h"
24#include "lvb_table.h"
25#include "user.h"
26#include "ast.h"
27
28static const char name_prefix[] = "dlm";
29static const struct file_operations device_fops;
30static atomic_t dlm_monitor_opened;
31static int dlm_monitor_unused = 1;
32
33#ifdef CONFIG_COMPAT
34
35struct dlm_lock_params32 {
36 __u8 mode;
37 __u8 namelen;
38 __u16 unused;
39 __u32 flags;
40 __u32 lkid;
41 __u32 parent;
42 __u64 xid;
43 __u64 timeout;
44 __u32 castparam;
45 __u32 castaddr;
46 __u32 bastparam;
47 __u32 bastaddr;
48 __u32 lksb;
49 char lvb[DLM_USER_LVB_LEN];
50 char name[0];
51};
52
53struct dlm_write_request32 {
54 __u32 version[3];
55 __u8 cmd;
56 __u8 is64bit;
57 __u8 unused[2];
58
59 union {
60 struct dlm_lock_params32 lock;
61 struct dlm_lspace_params lspace;
62 struct dlm_purge_params purge;
63 } i;
64};
65
66struct dlm_lksb32 {
67 __u32 sb_status;
68 __u32 sb_lkid;
69 __u8 sb_flags;
70 __u32 sb_lvbptr;
71};
72
73struct dlm_lock_result32 {
74 __u32 version[3];
75 __u32 length;
76 __u32 user_astaddr;
77 __u32 user_astparam;
78 __u32 user_lksb;
79 struct dlm_lksb32 lksb;
80 __u8 bast_mode;
81 __u8 unused[3];
82 /* Offsets may be zero if no data is present */
83 __u32 lvb_offset;
84};
85
86static void compat_input(struct dlm_write_request *kb,
87 struct dlm_write_request32 *kb32,
88 int namelen)
89{
90 kb->version[0] = kb32->version[0];
91 kb->version[1] = kb32->version[1];
92 kb->version[2] = kb32->version[2];
93
94 kb->cmd = kb32->cmd;
95 kb->is64bit = kb32->is64bit;
96 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
97 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
98 kb->i.lspace.flags = kb32->i.lspace.flags;
99 kb->i.lspace.minor = kb32->i.lspace.minor;
100 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
101 } else if (kb->cmd == DLM_USER_PURGE) {
102 kb->i.purge.nodeid = kb32->i.purge.nodeid;
103 kb->i.purge.pid = kb32->i.purge.pid;
104 } else {
105 kb->i.lock.mode = kb32->i.lock.mode;
106 kb->i.lock.namelen = kb32->i.lock.namelen;
107 kb->i.lock.flags = kb32->i.lock.flags;
108 kb->i.lock.lkid = kb32->i.lock.lkid;
109 kb->i.lock.parent = kb32->i.lock.parent;
110 kb->i.lock.xid = kb32->i.lock.xid;
111 kb->i.lock.timeout = kb32->i.lock.timeout;
112 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
113 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
114 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
115 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
116 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
117 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
118 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
119 }
120}
121
122static void compat_output(struct dlm_lock_result *res,
123 struct dlm_lock_result32 *res32)
124{
125 res32->version[0] = res->version[0];
126 res32->version[1] = res->version[1];
127 res32->version[2] = res->version[2];
128
129 res32->user_astaddr = (__u32)(long)res->user_astaddr;
130 res32->user_astparam = (__u32)(long)res->user_astparam;
131 res32->user_lksb = (__u32)(long)res->user_lksb;
132 res32->bast_mode = res->bast_mode;
133
134 res32->lvb_offset = res->lvb_offset;
135 res32->length = res->length;
136
137 res32->lksb.sb_status = res->lksb.sb_status;
138 res32->lksb.sb_flags = res->lksb.sb_flags;
139 res32->lksb.sb_lkid = res->lksb.sb_lkid;
140 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
141}
142#endif
143
144/* Figure out if this lock is at the end of its life and no longer
145 available for the application to use. The lkb still exists until
146 the final ast is read. A lock becomes EOL in three situations:
147 1. a noqueue request fails with EAGAIN
148 2. an unlock completes with EUNLOCK
149 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
150 An EOL lock needs to be removed from the process's list of locks.
151 And we can't allow any new operation on an EOL lock. This is
152 not related to the lifetime of the lkb struct which is managed
153 entirely by refcount. */
154
155static int lkb_is_endoflife(int mode, int status)
156{
157 switch (status) {
158 case -DLM_EUNLOCK:
159 return 1;
160 case -DLM_ECANCEL:
161 case -ETIMEDOUT:
162 case -EDEADLK:
163 case -EAGAIN:
164 if (mode == DLM_LOCK_IV)
165 return 1;
166 break;
167 }
168 return 0;
169}
170
171/* we could possibly check if the cancel of an orphan has resulted in the lkb
172 being removed and then remove that lkb from the orphans list and free it */
173
174void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
175 int status, uint32_t sbflags, uint64_t seq)
176{
177 struct dlm_ls *ls;
178 struct dlm_user_args *ua;
179 struct dlm_user_proc *proc;
180 int rv;
181
182 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
183 return;
184
185 ls = lkb->lkb_resource->res_ls;
186 mutex_lock(&ls->ls_clear_proc_locks);
187
188 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
189 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
190 lkb->ua so we can't try to use it. This second check is necessary
191 for cases where a completion ast is received for an operation that
192 began before clear_proc_locks did its cancel/unlock. */
193
194 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
195 goto out;
196
197 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
198 ua = lkb->lkb_ua;
199 proc = ua->proc;
200
201 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
202 goto out;
203
204 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
205 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
206
207 spin_lock(&proc->asts_spin);
208
209 rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
210 if (rv < 0) {
211 spin_unlock(&proc->asts_spin);
212 goto out;
213 }
214
215 if (list_empty(&lkb->lkb_cb_list)) {
216 kref_get(&lkb->lkb_ref);
217 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
218 wake_up_interruptible(&proc->wait);
219 }
220 spin_unlock(&proc->asts_spin);
221
222 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
223 /* N.B. spin_lock locks_spin, not asts_spin */
224 spin_lock(&proc->locks_spin);
225 if (!list_empty(&lkb->lkb_ownqueue)) {
226 list_del_init(&lkb->lkb_ownqueue);
227 dlm_put_lkb(lkb);
228 }
229 spin_unlock(&proc->locks_spin);
230 }
231 out:
232 mutex_unlock(&ls->ls_clear_proc_locks);
233}
234
235static int device_user_lock(struct dlm_user_proc *proc,
236 struct dlm_lock_params *params)
237{
238 struct dlm_ls *ls;
239 struct dlm_user_args *ua;
240 uint32_t lkid;
241 int error = -ENOMEM;
242
243 ls = dlm_find_lockspace_local(proc->lockspace);
244 if (!ls)
245 return -ENOENT;
246
247 if (!params->castaddr || !params->lksb) {
248 error = -EINVAL;
249 goto out;
250 }
251
252 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
253 if (!ua)
254 goto out;
255 ua->proc = proc;
256 ua->user_lksb = params->lksb;
257 ua->castparam = params->castparam;
258 ua->castaddr = params->castaddr;
259 ua->bastparam = params->bastparam;
260 ua->bastaddr = params->bastaddr;
261 ua->xid = params->xid;
262
263 if (params->flags & DLM_LKF_CONVERT) {
264 error = dlm_user_convert(ls, ua,
265 params->mode, params->flags,
266 params->lkid, params->lvb,
267 (unsigned long) params->timeout);
268 } else if (params->flags & DLM_LKF_ORPHAN) {
269 error = dlm_user_adopt_orphan(ls, ua,
270 params->mode, params->flags,
271 params->name, params->namelen,
272 (unsigned long) params->timeout,
273 &lkid);
274 if (!error)
275 error = lkid;
276 } else {
277 error = dlm_user_request(ls, ua,
278 params->mode, params->flags,
279 params->name, params->namelen,
280 (unsigned long) params->timeout);
281 if (!error)
282 error = ua->lksb.sb_lkid;
283 }
284 out:
285 dlm_put_lockspace(ls);
286 return error;
287}
288
289static int device_user_unlock(struct dlm_user_proc *proc,
290 struct dlm_lock_params *params)
291{
292 struct dlm_ls *ls;
293 struct dlm_user_args *ua;
294 int error = -ENOMEM;
295
296 ls = dlm_find_lockspace_local(proc->lockspace);
297 if (!ls)
298 return -ENOENT;
299
300 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
301 if (!ua)
302 goto out;
303 ua->proc = proc;
304 ua->user_lksb = params->lksb;
305 ua->castparam = params->castparam;
306 ua->castaddr = params->castaddr;
307
308 if (params->flags & DLM_LKF_CANCEL)
309 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
310 else
311 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
312 params->lvb);
313 out:
314 dlm_put_lockspace(ls);
315 return error;
316}
317
318static int device_user_deadlock(struct dlm_user_proc *proc,
319 struct dlm_lock_params *params)
320{
321 struct dlm_ls *ls;
322 int error;
323
324 ls = dlm_find_lockspace_local(proc->lockspace);
325 if (!ls)
326 return -ENOENT;
327
328 error = dlm_user_deadlock(ls, params->flags, params->lkid);
329
330 dlm_put_lockspace(ls);
331 return error;
332}
333
334static int dlm_device_register(struct dlm_ls *ls, char *name)
335{
336 int error, len;
337
338 /* The device is already registered. This happens when the
339 lockspace is created multiple times from userspace. */
340 if (ls->ls_device.name)
341 return 0;
342
343 error = -ENOMEM;
344 len = strlen(name) + strlen(name_prefix) + 2;
345 ls->ls_device.name = kzalloc(len, GFP_NOFS);
346 if (!ls->ls_device.name)
347 goto fail;
348
349 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
350 name);
351 ls->ls_device.fops = &device_fops;
352 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
353
354 error = misc_register(&ls->ls_device);
355 if (error) {
356 kfree(ls->ls_device.name);
357 }
358fail:
359 return error;
360}
361
362int dlm_device_deregister(struct dlm_ls *ls)
363{
364 /* The device is not registered. This happens when the lockspace
365 was never used from userspace, or when device_create_lockspace()
366 calls dlm_release_lockspace() after the register fails. */
367 if (!ls->ls_device.name)
368 return 0;
369
370 misc_deregister(&ls->ls_device);
371 kfree(ls->ls_device.name);
372 return 0;
373}
374
375static int device_user_purge(struct dlm_user_proc *proc,
376 struct dlm_purge_params *params)
377{
378 struct dlm_ls *ls;
379 int error;
380
381 ls = dlm_find_lockspace_local(proc->lockspace);
382 if (!ls)
383 return -ENOENT;
384
385 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
386
387 dlm_put_lockspace(ls);
388 return error;
389}
390
391static int device_create_lockspace(struct dlm_lspace_params *params)
392{
393 dlm_lockspace_t *lockspace;
394 struct dlm_ls *ls;
395 int error;
396
397 if (!capable(CAP_SYS_ADMIN))
398 return -EPERM;
399
400 error = dlm_new_lockspace(params->name, NULL, params->flags,
401 DLM_USER_LVB_LEN, NULL, NULL, NULL,
402 &lockspace);
403 if (error)
404 return error;
405
406 ls = dlm_find_lockspace_local(lockspace);
407 if (!ls)
408 return -ENOENT;
409
410 error = dlm_device_register(ls, params->name);
411 dlm_put_lockspace(ls);
412
413 if (error)
414 dlm_release_lockspace(lockspace, 0);
415 else
416 error = ls->ls_device.minor;
417
418 return error;
419}
420
421static int device_remove_lockspace(struct dlm_lspace_params *params)
422{
423 dlm_lockspace_t *lockspace;
424 struct dlm_ls *ls;
425 int error, force = 0;
426
427 if (!capable(CAP_SYS_ADMIN))
428 return -EPERM;
429
430 ls = dlm_find_lockspace_device(params->minor);
431 if (!ls)
432 return -ENOENT;
433
434 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
435 force = 2;
436
437 lockspace = ls->ls_local_handle;
438 dlm_put_lockspace(ls);
439
440 /* The final dlm_release_lockspace waits for references to go to
441 zero, so all processes will need to close their device for the
442 ls before the release will proceed. release also calls the
443 device_deregister above. Converting a positive return value
444 from release to zero means that userspace won't know when its
445 release was the final one, but it shouldn't need to know. */
446
447 error = dlm_release_lockspace(lockspace, force);
448 if (error > 0)
449 error = 0;
450 return error;
451}
452
453/* Check the user's version matches ours */
454static int check_version(struct dlm_write_request *req)
455{
456 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
457 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
458 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
459
460 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
461 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
462 current->comm,
463 task_pid_nr(current),
464 req->version[0],
465 req->version[1],
466 req->version[2],
467 DLM_DEVICE_VERSION_MAJOR,
468 DLM_DEVICE_VERSION_MINOR,
469 DLM_DEVICE_VERSION_PATCH);
470 return -EINVAL;
471 }
472 return 0;
473}
474
475/*
476 * device_write
477 *
478 * device_user_lock
479 * dlm_user_request -> request_lock
480 * dlm_user_convert -> convert_lock
481 *
482 * device_user_unlock
483 * dlm_user_unlock -> unlock_lock
484 * dlm_user_cancel -> cancel_lock
485 *
486 * device_create_lockspace
487 * dlm_new_lockspace
488 *
489 * device_remove_lockspace
490 * dlm_release_lockspace
491 */
492
493/* a write to a lockspace device is a lock or unlock request, a write
494 to the control device is to create/remove a lockspace */
495
496static ssize_t device_write(struct file *file, const char __user *buf,
497 size_t count, loff_t *ppos)
498{
499 struct dlm_user_proc *proc = file->private_data;
500 struct dlm_write_request *kbuf;
501 int error;
502
503#ifdef CONFIG_COMPAT
504 if (count < sizeof(struct dlm_write_request32))
505#else
506 if (count < sizeof(struct dlm_write_request))
507#endif
508 return -EINVAL;
509
510 /*
511 * can't compare against COMPAT/dlm_write_request32 because
512 * we don't yet know if is64bit is zero
513 */
514 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
515 return -EINVAL;
516
517 kbuf = memdup_user_nul(buf, count);
518 if (IS_ERR(kbuf))
519 return PTR_ERR(kbuf);
520
521 if (check_version(kbuf)) {
522 error = -EBADE;
523 goto out_free;
524 }
525
526#ifdef CONFIG_COMPAT
527 if (!kbuf->is64bit) {
528 struct dlm_write_request32 *k32buf;
529 int namelen = 0;
530
531 if (count > sizeof(struct dlm_write_request32))
532 namelen = count - sizeof(struct dlm_write_request32);
533
534 k32buf = (struct dlm_write_request32 *)kbuf;
535
536 /* add 1 after namelen so that the name string is terminated */
537 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
538 GFP_NOFS);
539 if (!kbuf) {
540 kfree(k32buf);
541 return -ENOMEM;
542 }
543
544 if (proc)
545 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
546
547 compat_input(kbuf, k32buf, namelen);
548 kfree(k32buf);
549 }
550#endif
551
552 /* do we really need this? can a write happen after a close? */
553 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
554 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
555 error = -EINVAL;
556 goto out_free;
557 }
558
559 error = -EINVAL;
560
561 switch (kbuf->cmd)
562 {
563 case DLM_USER_LOCK:
564 if (!proc) {
565 log_print("no locking on control device");
566 goto out_free;
567 }
568 error = device_user_lock(proc, &kbuf->i.lock);
569 break;
570
571 case DLM_USER_UNLOCK:
572 if (!proc) {
573 log_print("no locking on control device");
574 goto out_free;
575 }
576 error = device_user_unlock(proc, &kbuf->i.lock);
577 break;
578
579 case DLM_USER_DEADLOCK:
580 if (!proc) {
581 log_print("no locking on control device");
582 goto out_free;
583 }
584 error = device_user_deadlock(proc, &kbuf->i.lock);
585 break;
586
587 case DLM_USER_CREATE_LOCKSPACE:
588 if (proc) {
589 log_print("create/remove only on control device");
590 goto out_free;
591 }
592 error = device_create_lockspace(&kbuf->i.lspace);
593 break;
594
595 case DLM_USER_REMOVE_LOCKSPACE:
596 if (proc) {
597 log_print("create/remove only on control device");
598 goto out_free;
599 }
600 error = device_remove_lockspace(&kbuf->i.lspace);
601 break;
602
603 case DLM_USER_PURGE:
604 if (!proc) {
605 log_print("no locking on control device");
606 goto out_free;
607 }
608 error = device_user_purge(proc, &kbuf->i.purge);
609 break;
610
611 default:
612 log_print("Unknown command passed to DLM device : %d\n",
613 kbuf->cmd);
614 }
615
616 out_free:
617 kfree(kbuf);
618 return error;
619}
620
621/* Every process that opens the lockspace device has its own "proc" structure
622 hanging off the open file that's used to keep track of locks owned by the
623 process and asts that need to be delivered to the process. */
624
625static int device_open(struct inode *inode, struct file *file)
626{
627 struct dlm_user_proc *proc;
628 struct dlm_ls *ls;
629
630 ls = dlm_find_lockspace_device(iminor(inode));
631 if (!ls)
632 return -ENOENT;
633
634 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
635 if (!proc) {
636 dlm_put_lockspace(ls);
637 return -ENOMEM;
638 }
639
640 proc->lockspace = ls->ls_local_handle;
641 INIT_LIST_HEAD(&proc->asts);
642 INIT_LIST_HEAD(&proc->locks);
643 INIT_LIST_HEAD(&proc->unlocking);
644 spin_lock_init(&proc->asts_spin);
645 spin_lock_init(&proc->locks_spin);
646 init_waitqueue_head(&proc->wait);
647 file->private_data = proc;
648
649 return 0;
650}
651
652static int device_close(struct inode *inode, struct file *file)
653{
654 struct dlm_user_proc *proc = file->private_data;
655 struct dlm_ls *ls;
656
657 ls = dlm_find_lockspace_local(proc->lockspace);
658 if (!ls)
659 return -ENOENT;
660
661 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
662
663 dlm_clear_proc_locks(ls, proc);
664
665 /* at this point no more lkb's should exist for this lockspace,
666 so there's no chance of dlm_user_add_ast() being called and
667 looking for lkb->ua->proc */
668
669 kfree(proc);
670 file->private_data = NULL;
671
672 dlm_put_lockspace(ls);
673 dlm_put_lockspace(ls); /* for the find in device_open() */
674
675 /* FIXME: AUTOFREE: if this ls is no longer used do
676 device_remove_lockspace() */
677
678 return 0;
679}
680
681static int copy_result_to_user(struct dlm_user_args *ua, int compat,
682 uint32_t flags, int mode, int copy_lvb,
683 char __user *buf, size_t count)
684{
685#ifdef CONFIG_COMPAT
686 struct dlm_lock_result32 result32;
687#endif
688 struct dlm_lock_result result;
689 void *resultptr;
690 int error=0;
691 int len;
692 int struct_len;
693
694 memset(&result, 0, sizeof(struct dlm_lock_result));
695 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
696 result.version[1] = DLM_DEVICE_VERSION_MINOR;
697 result.version[2] = DLM_DEVICE_VERSION_PATCH;
698 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
699 result.user_lksb = ua->user_lksb;
700
701 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
702 in a conversion unless the conversion is successful. See code
703 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
704 notes that a new blocking AST address and parameter are set even if
705 the conversion fails, so maybe we should just do that. */
706
707 if (flags & DLM_CB_BAST) {
708 result.user_astaddr = ua->bastaddr;
709 result.user_astparam = ua->bastparam;
710 result.bast_mode = mode;
711 } else {
712 result.user_astaddr = ua->castaddr;
713 result.user_astparam = ua->castparam;
714 }
715
716#ifdef CONFIG_COMPAT
717 if (compat)
718 len = sizeof(struct dlm_lock_result32);
719 else
720#endif
721 len = sizeof(struct dlm_lock_result);
722 struct_len = len;
723
724 /* copy lvb to userspace if there is one, it's been updated, and
725 the user buffer has space for it */
726
727 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
728 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
729 DLM_USER_LVB_LEN)) {
730 error = -EFAULT;
731 goto out;
732 }
733
734 result.lvb_offset = len;
735 len += DLM_USER_LVB_LEN;
736 }
737
738 result.length = len;
739 resultptr = &result;
740#ifdef CONFIG_COMPAT
741 if (compat) {
742 compat_output(&result, &result32);
743 resultptr = &result32;
744 }
745#endif
746
747 if (copy_to_user(buf, resultptr, struct_len))
748 error = -EFAULT;
749 else
750 error = len;
751 out:
752 return error;
753}
754
755static int copy_version_to_user(char __user *buf, size_t count)
756{
757 struct dlm_device_version ver;
758
759 memset(&ver, 0, sizeof(struct dlm_device_version));
760 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
761 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
762 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
763
764 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
765 return -EFAULT;
766 return sizeof(struct dlm_device_version);
767}
768
769/* a read returns a single ast described in a struct dlm_lock_result */
770
771static ssize_t device_read(struct file *file, char __user *buf, size_t count,
772 loff_t *ppos)
773{
774 struct dlm_user_proc *proc = file->private_data;
775 struct dlm_lkb *lkb;
776 DECLARE_WAITQUEUE(wait, current);
777 struct dlm_callback cb;
778 int rv, resid, copy_lvb = 0;
779 int old_mode, new_mode;
780
781 if (count == sizeof(struct dlm_device_version)) {
782 rv = copy_version_to_user(buf, count);
783 return rv;
784 }
785
786 if (!proc) {
787 log_print("non-version read from control device %zu", count);
788 return -EINVAL;
789 }
790
791#ifdef CONFIG_COMPAT
792 if (count < sizeof(struct dlm_lock_result32))
793#else
794 if (count < sizeof(struct dlm_lock_result))
795#endif
796 return -EINVAL;
797
798 try_another:
799
800 /* do we really need this? can a read happen after a close? */
801 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
802 return -EINVAL;
803
804 spin_lock(&proc->asts_spin);
805 if (list_empty(&proc->asts)) {
806 if (file->f_flags & O_NONBLOCK) {
807 spin_unlock(&proc->asts_spin);
808 return -EAGAIN;
809 }
810
811 add_wait_queue(&proc->wait, &wait);
812
813 repeat:
814 set_current_state(TASK_INTERRUPTIBLE);
815 if (list_empty(&proc->asts) && !signal_pending(current)) {
816 spin_unlock(&proc->asts_spin);
817 schedule();
818 spin_lock(&proc->asts_spin);
819 goto repeat;
820 }
821 set_current_state(TASK_RUNNING);
822 remove_wait_queue(&proc->wait, &wait);
823
824 if (signal_pending(current)) {
825 spin_unlock(&proc->asts_spin);
826 return -ERESTARTSYS;
827 }
828 }
829
830 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
831 without removing lkb_cb_list; so empty lkb_cb_list is always
832 consistent with empty lkb_callbacks */
833
834 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
835
836 /* rem_lkb_callback sets a new lkb_last_cast */
837 old_mode = lkb->lkb_last_cast.mode;
838
839 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
840 if (rv < 0) {
841 /* this shouldn't happen; lkb should have been removed from
842 list when resid was zero */
843 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
844 list_del_init(&lkb->lkb_cb_list);
845 spin_unlock(&proc->asts_spin);
846 /* removes ref for proc->asts, may cause lkb to be freed */
847 dlm_put_lkb(lkb);
848 goto try_another;
849 }
850 if (!resid)
851 list_del_init(&lkb->lkb_cb_list);
852 spin_unlock(&proc->asts_spin);
853
854 if (cb.flags & DLM_CB_SKIP) {
855 /* removes ref for proc->asts, may cause lkb to be freed */
856 if (!resid)
857 dlm_put_lkb(lkb);
858 goto try_another;
859 }
860
861 if (cb.flags & DLM_CB_CAST) {
862 new_mode = cb.mode;
863
864 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
865 dlm_lvb_operations[old_mode + 1][new_mode + 1])
866 copy_lvb = 1;
867
868 lkb->lkb_lksb->sb_status = cb.sb_status;
869 lkb->lkb_lksb->sb_flags = cb.sb_flags;
870 }
871
872 rv = copy_result_to_user(lkb->lkb_ua,
873 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
874 cb.flags, cb.mode, copy_lvb, buf, count);
875
876 /* removes ref for proc->asts, may cause lkb to be freed */
877 if (!resid)
878 dlm_put_lkb(lkb);
879
880 return rv;
881}
882
883static unsigned int device_poll(struct file *file, poll_table *wait)
884{
885 struct dlm_user_proc *proc = file->private_data;
886
887 poll_wait(file, &proc->wait, wait);
888
889 spin_lock(&proc->asts_spin);
890 if (!list_empty(&proc->asts)) {
891 spin_unlock(&proc->asts_spin);
892 return POLLIN | POLLRDNORM;
893 }
894 spin_unlock(&proc->asts_spin);
895 return 0;
896}
897
898int dlm_user_daemon_available(void)
899{
900 /* dlm_controld hasn't started (or, has started, but not
901 properly populated configfs) */
902
903 if (!dlm_our_nodeid())
904 return 0;
905
906 /* This is to deal with versions of dlm_controld that don't
907 know about the monitor device. We assume that if the
908 dlm_controld was started (above), but the monitor device
909 was never opened, that it's an old version. dlm_controld
910 should open the monitor device before populating configfs. */
911
912 if (dlm_monitor_unused)
913 return 1;
914
915 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
916}
917
918static int ctl_device_open(struct inode *inode, struct file *file)
919{
920 file->private_data = NULL;
921 return 0;
922}
923
924static int ctl_device_close(struct inode *inode, struct file *file)
925{
926 return 0;
927}
928
929static int monitor_device_open(struct inode *inode, struct file *file)
930{
931 atomic_inc(&dlm_monitor_opened);
932 dlm_monitor_unused = 0;
933 return 0;
934}
935
936static int monitor_device_close(struct inode *inode, struct file *file)
937{
938 if (atomic_dec_and_test(&dlm_monitor_opened))
939 dlm_stop_lockspaces();
940 return 0;
941}
942
943static const struct file_operations device_fops = {
944 .open = device_open,
945 .release = device_close,
946 .read = device_read,
947 .write = device_write,
948 .poll = device_poll,
949 .owner = THIS_MODULE,
950 .llseek = noop_llseek,
951};
952
953static const struct file_operations ctl_device_fops = {
954 .open = ctl_device_open,
955 .release = ctl_device_close,
956 .read = device_read,
957 .write = device_write,
958 .owner = THIS_MODULE,
959 .llseek = noop_llseek,
960};
961
962static struct miscdevice ctl_device = {
963 .name = "dlm-control",
964 .fops = &ctl_device_fops,
965 .minor = MISC_DYNAMIC_MINOR,
966};
967
968static const struct file_operations monitor_device_fops = {
969 .open = monitor_device_open,
970 .release = monitor_device_close,
971 .owner = THIS_MODULE,
972 .llseek = noop_llseek,
973};
974
975static struct miscdevice monitor_device = {
976 .name = "dlm-monitor",
977 .fops = &monitor_device_fops,
978 .minor = MISC_DYNAMIC_MINOR,
979};
980
981int __init dlm_user_init(void)
982{
983 int error;
984
985 atomic_set(&dlm_monitor_opened, 0);
986
987 error = misc_register(&ctl_device);
988 if (error) {
989 log_print("misc_register failed for control device");
990 goto out;
991 }
992
993 error = misc_register(&monitor_device);
994 if (error) {
995 log_print("misc_register failed for monitor device");
996 misc_deregister(&ctl_device);
997 }
998 out:
999 return error;
1000}
1001
1002void dlm_user_exit(void)
1003{
1004 misc_deregister(&ctl_device);
1005 misc_deregister(&monitor_device);
1006}
1007