Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
4 */
5
6#include <linux/miscdevice.h>
7#include <linux/init.h>
8#include <linux/wait.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/poll.h>
12#include <linux/signal.h>
13#include <linux/spinlock.h>
14#include <linux/dlm.h>
15#include <linux/dlm_device.h>
16#include <linux/slab.h>
17#include <linux/sched/signal.h>
18
19#include <trace/events/dlm.h>
20
21#include "dlm_internal.h"
22#include "lockspace.h"
23#include "lock.h"
24#include "lvb_table.h"
25#include "user.h"
26#include "ast.h"
27#include "config.h"
28#include "memory.h"
29
30static const char name_prefix[] = "dlm";
31static const struct file_operations device_fops;
32static atomic_t dlm_monitor_opened;
33static int dlm_monitor_unused = 1;
34
35#ifdef CONFIG_COMPAT
36
37struct dlm_lock_params32 {
38 __u8 mode;
39 __u8 namelen;
40 __u16 unused;
41 __u32 flags;
42 __u32 lkid;
43 __u32 parent;
44 __u64 xid;
45 __u64 timeout;
46 __u32 castparam;
47 __u32 castaddr;
48 __u32 bastparam;
49 __u32 bastaddr;
50 __u32 lksb;
51 char lvb[DLM_USER_LVB_LEN];
52 char name[];
53};
54
55struct dlm_write_request32 {
56 __u32 version[3];
57 __u8 cmd;
58 __u8 is64bit;
59 __u8 unused[2];
60
61 union {
62 struct dlm_lock_params32 lock;
63 struct dlm_lspace_params lspace;
64 struct dlm_purge_params purge;
65 } i;
66};
67
68struct dlm_lksb32 {
69 __u32 sb_status;
70 __u32 sb_lkid;
71 __u8 sb_flags;
72 __u32 sb_lvbptr;
73};
74
75struct dlm_lock_result32 {
76 __u32 version[3];
77 __u32 length;
78 __u32 user_astaddr;
79 __u32 user_astparam;
80 __u32 user_lksb;
81 struct dlm_lksb32 lksb;
82 __u8 bast_mode;
83 __u8 unused[3];
84 /* Offsets may be zero if no data is present */
85 __u32 lvb_offset;
86};
87
88static void compat_input(struct dlm_write_request *kb,
89 struct dlm_write_request32 *kb32,
90 int namelen)
91{
92 kb->version[0] = kb32->version[0];
93 kb->version[1] = kb32->version[1];
94 kb->version[2] = kb32->version[2];
95
96 kb->cmd = kb32->cmd;
97 kb->is64bit = kb32->is64bit;
98 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
99 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100 kb->i.lspace.flags = kb32->i.lspace.flags;
101 kb->i.lspace.minor = kb32->i.lspace.minor;
102 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103 } else if (kb->cmd == DLM_USER_PURGE) {
104 kb->i.purge.nodeid = kb32->i.purge.nodeid;
105 kb->i.purge.pid = kb32->i.purge.pid;
106 } else {
107 kb->i.lock.mode = kb32->i.lock.mode;
108 kb->i.lock.namelen = kb32->i.lock.namelen;
109 kb->i.lock.flags = kb32->i.lock.flags;
110 kb->i.lock.lkid = kb32->i.lock.lkid;
111 kb->i.lock.parent = kb32->i.lock.parent;
112 kb->i.lock.xid = kb32->i.lock.xid;
113 kb->i.lock.timeout = kb32->i.lock.timeout;
114 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
121 }
122}
123
124static void compat_output(struct dlm_lock_result *res,
125 struct dlm_lock_result32 *res32)
126{
127 memset(res32, 0, sizeof(*res32));
128
129 res32->version[0] = res->version[0];
130 res32->version[1] = res->version[1];
131 res32->version[2] = res->version[2];
132
133 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134 res32->user_astparam = (__u32)(__force long)res->user_astparam;
135 res32->user_lksb = (__u32)(__force long)res->user_lksb;
136 res32->bast_mode = res->bast_mode;
137
138 res32->lvb_offset = res->lvb_offset;
139 res32->length = res->length;
140
141 res32->lksb.sb_status = res->lksb.sb_status;
142 res32->lksb.sb_flags = res->lksb.sb_flags;
143 res32->lksb.sb_lkid = res->lksb.sb_lkid;
144 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
145}
146#endif
147
148/* Figure out if this lock is at the end of its life and no longer
149 available for the application to use. The lkb still exists until
150 the final ast is read. A lock becomes EOL in three situations:
151 1. a noqueue request fails with EAGAIN
152 2. an unlock completes with EUNLOCK
153 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
154 An EOL lock needs to be removed from the process's list of locks.
155 And we can't allow any new operation on an EOL lock. This is
156 not related to the lifetime of the lkb struct which is managed
157 entirely by refcount. */
158
159static int lkb_is_endoflife(int mode, int status)
160{
161 switch (status) {
162 case -DLM_EUNLOCK:
163 return 1;
164 case -DLM_ECANCEL:
165 case -ETIMEDOUT:
166 case -EDEADLK:
167 case -EAGAIN:
168 if (mode == DLM_LOCK_IV)
169 return 1;
170 break;
171 }
172 return 0;
173}
174
175/* we could possibly check if the cancel of an orphan has resulted in the lkb
176 being removed and then remove that lkb from the orphans list and free it */
177
178void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
179 int status, uint32_t sbflags)
180{
181 struct dlm_ls *ls;
182 struct dlm_user_args *ua;
183 struct dlm_user_proc *proc;
184 struct dlm_callback *cb;
185 int rv, copy_lvb;
186
187 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
188 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
189 return;
190
191 ls = lkb->lkb_resource->res_ls;
192 spin_lock_bh(&ls->ls_clear_proc_locks);
193
194 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
195 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
196 lkb->ua so we can't try to use it. This second check is necessary
197 for cases where a completion ast is received for an operation that
198 began before clear_proc_locks did its cancel/unlock. */
199
200 if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
201 test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
202 goto out;
203
204 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
205 ua = lkb->lkb_ua;
206 proc = ua->proc;
207
208 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
209 goto out;
210
211 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
212 set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
213
214 spin_lock_bh(&proc->asts_spin);
215
216 if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags,
217 ©_lvb)) {
218 rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
219 if (!rv) {
220 cb->copy_lvb = copy_lvb;
221 cb->ua = *ua;
222 cb->lkb_lksb = &cb->ua.lksb;
223 if (copy_lvb) {
224 memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
225 DLM_USER_LVB_LEN);
226 cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
227 }
228
229 list_add_tail(&cb->list, &proc->asts);
230 wake_up_interruptible(&proc->wait);
231 }
232 }
233 spin_unlock_bh(&proc->asts_spin);
234
235 if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
236 /* N.B. spin_lock locks_spin, not asts_spin */
237 spin_lock_bh(&proc->locks_spin);
238 if (!list_empty(&lkb->lkb_ownqueue)) {
239 list_del_init(&lkb->lkb_ownqueue);
240 dlm_put_lkb(lkb);
241 }
242 spin_unlock_bh(&proc->locks_spin);
243 }
244 out:
245 spin_unlock_bh(&ls->ls_clear_proc_locks);
246}
247
248static int device_user_lock(struct dlm_user_proc *proc,
249 struct dlm_lock_params *params)
250{
251 struct dlm_ls *ls;
252 struct dlm_user_args *ua;
253 uint32_t lkid;
254 int error = -ENOMEM;
255
256 ls = dlm_find_lockspace_local(proc->lockspace);
257 if (!ls)
258 return -ENOENT;
259
260 if (!params->castaddr || !params->lksb) {
261 error = -EINVAL;
262 goto out;
263 }
264
265 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
266 if (!ua)
267 goto out;
268 ua->proc = proc;
269 ua->user_lksb = params->lksb;
270 ua->castparam = params->castparam;
271 ua->castaddr = params->castaddr;
272 ua->bastparam = params->bastparam;
273 ua->bastaddr = params->bastaddr;
274 ua->xid = params->xid;
275
276 if (params->flags & DLM_LKF_CONVERT) {
277 error = dlm_user_convert(ls, ua,
278 params->mode, params->flags,
279 params->lkid, params->lvb);
280 } else if (params->flags & DLM_LKF_ORPHAN) {
281 error = dlm_user_adopt_orphan(ls, ua,
282 params->mode, params->flags,
283 params->name, params->namelen,
284 &lkid);
285 if (!error)
286 error = lkid;
287 } else {
288 error = dlm_user_request(ls, ua,
289 params->mode, params->flags,
290 params->name, params->namelen);
291 if (!error)
292 error = ua->lksb.sb_lkid;
293 }
294 out:
295 dlm_put_lockspace(ls);
296 return error;
297}
298
299static int device_user_unlock(struct dlm_user_proc *proc,
300 struct dlm_lock_params *params)
301{
302 struct dlm_ls *ls;
303 struct dlm_user_args *ua;
304 int error = -ENOMEM;
305
306 ls = dlm_find_lockspace_local(proc->lockspace);
307 if (!ls)
308 return -ENOENT;
309
310 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
311 if (!ua)
312 goto out;
313 ua->proc = proc;
314 ua->user_lksb = params->lksb;
315 ua->castparam = params->castparam;
316 ua->castaddr = params->castaddr;
317
318 if (params->flags & DLM_LKF_CANCEL)
319 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
320 else
321 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
322 params->lvb);
323 out:
324 dlm_put_lockspace(ls);
325 return error;
326}
327
328static int device_user_deadlock(struct dlm_user_proc *proc,
329 struct dlm_lock_params *params)
330{
331 struct dlm_ls *ls;
332 int error;
333
334 ls = dlm_find_lockspace_local(proc->lockspace);
335 if (!ls)
336 return -ENOENT;
337
338 error = dlm_user_deadlock(ls, params->flags, params->lkid);
339
340 dlm_put_lockspace(ls);
341 return error;
342}
343
344static int dlm_device_register(struct dlm_ls *ls, char *name)
345{
346 int error, len;
347
348 /* The device is already registered. This happens when the
349 lockspace is created multiple times from userspace. */
350 if (ls->ls_device.name)
351 return 0;
352
353 error = -ENOMEM;
354 len = strlen(name) + strlen(name_prefix) + 2;
355 ls->ls_device.name = kzalloc(len, GFP_NOFS);
356 if (!ls->ls_device.name)
357 goto fail;
358
359 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
360 name);
361 ls->ls_device.fops = &device_fops;
362 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
363
364 error = misc_register(&ls->ls_device);
365 if (error) {
366 kfree(ls->ls_device.name);
367 /* this has to be set to NULL
368 * to avoid a double-free in dlm_device_deregister
369 */
370 ls->ls_device.name = NULL;
371 }
372fail:
373 return error;
374}
375
376int dlm_device_deregister(struct dlm_ls *ls)
377{
378 /* The device is not registered. This happens when the lockspace
379 was never used from userspace, or when device_create_lockspace()
380 calls dlm_release_lockspace() after the register fails. */
381 if (!ls->ls_device.name)
382 return 0;
383
384 misc_deregister(&ls->ls_device);
385 kfree(ls->ls_device.name);
386 return 0;
387}
388
389static int device_user_purge(struct dlm_user_proc *proc,
390 struct dlm_purge_params *params)
391{
392 struct dlm_ls *ls;
393 int error;
394
395 ls = dlm_find_lockspace_local(proc->lockspace);
396 if (!ls)
397 return -ENOENT;
398
399 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
400
401 dlm_put_lockspace(ls);
402 return error;
403}
404
405static int device_create_lockspace(struct dlm_lspace_params *params)
406{
407 dlm_lockspace_t *lockspace;
408 struct dlm_ls *ls;
409 int error;
410
411 if (!capable(CAP_SYS_ADMIN))
412 return -EPERM;
413
414 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
415 params->flags, DLM_USER_LVB_LEN, NULL,
416 NULL, NULL, &lockspace);
417 if (error)
418 return error;
419
420 ls = dlm_find_lockspace_local(lockspace);
421 if (!ls)
422 return -ENOENT;
423
424 error = dlm_device_register(ls, params->name);
425 dlm_put_lockspace(ls);
426
427 if (error)
428 dlm_release_lockspace(lockspace, 0);
429 else
430 error = ls->ls_device.minor;
431
432 return error;
433}
434
435static int device_remove_lockspace(struct dlm_lspace_params *params)
436{
437 dlm_lockspace_t *lockspace;
438 struct dlm_ls *ls;
439 int error, force = 0;
440
441 if (!capable(CAP_SYS_ADMIN))
442 return -EPERM;
443
444 ls = dlm_find_lockspace_device(params->minor);
445 if (!ls)
446 return -ENOENT;
447
448 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
449 force = 2;
450
451 lockspace = ls;
452 dlm_put_lockspace(ls);
453
454 /* The final dlm_release_lockspace waits for references to go to
455 zero, so all processes will need to close their device for the
456 ls before the release will proceed. release also calls the
457 device_deregister above. Converting a positive return value
458 from release to zero means that userspace won't know when its
459 release was the final one, but it shouldn't need to know. */
460
461 error = dlm_release_lockspace(lockspace, force);
462 if (error > 0)
463 error = 0;
464 return error;
465}
466
467/* Check the user's version matches ours */
468static int check_version(struct dlm_write_request *req)
469{
470 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
471 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
472 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
473
474 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
475 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
476 current->comm,
477 task_pid_nr(current),
478 req->version[0],
479 req->version[1],
480 req->version[2],
481 DLM_DEVICE_VERSION_MAJOR,
482 DLM_DEVICE_VERSION_MINOR,
483 DLM_DEVICE_VERSION_PATCH);
484 return -EINVAL;
485 }
486 return 0;
487}
488
489/*
490 * device_write
491 *
492 * device_user_lock
493 * dlm_user_request -> request_lock
494 * dlm_user_convert -> convert_lock
495 *
496 * device_user_unlock
497 * dlm_user_unlock -> unlock_lock
498 * dlm_user_cancel -> cancel_lock
499 *
500 * device_create_lockspace
501 * dlm_new_lockspace
502 *
503 * device_remove_lockspace
504 * dlm_release_lockspace
505 */
506
507/* a write to a lockspace device is a lock or unlock request, a write
508 to the control device is to create/remove a lockspace */
509
510static ssize_t device_write(struct file *file, const char __user *buf,
511 size_t count, loff_t *ppos)
512{
513 struct dlm_user_proc *proc = file->private_data;
514 struct dlm_write_request *kbuf;
515 int error;
516
517#ifdef CONFIG_COMPAT
518 if (count < sizeof(struct dlm_write_request32))
519#else
520 if (count < sizeof(struct dlm_write_request))
521#endif
522 return -EINVAL;
523
524 /*
525 * can't compare against COMPAT/dlm_write_request32 because
526 * we don't yet know if is64bit is zero
527 */
528 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
529 return -EINVAL;
530
531 kbuf = memdup_user_nul(buf, count);
532 if (IS_ERR(kbuf))
533 return PTR_ERR(kbuf);
534
535 if (check_version(kbuf)) {
536 error = -EBADE;
537 goto out_free;
538 }
539
540#ifdef CONFIG_COMPAT
541 if (!kbuf->is64bit) {
542 struct dlm_write_request32 *k32buf;
543 int namelen = 0;
544
545 if (count > sizeof(struct dlm_write_request32))
546 namelen = count - sizeof(struct dlm_write_request32);
547
548 k32buf = (struct dlm_write_request32 *)kbuf;
549
550 /* add 1 after namelen so that the name string is terminated */
551 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
552 GFP_NOFS);
553 if (!kbuf) {
554 kfree(k32buf);
555 return -ENOMEM;
556 }
557
558 if (proc)
559 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
560
561 compat_input(kbuf, k32buf, namelen);
562 kfree(k32buf);
563 }
564#endif
565
566 /* do we really need this? can a write happen after a close? */
567 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
568 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
569 error = -EINVAL;
570 goto out_free;
571 }
572
573 error = -EINVAL;
574
575 switch (kbuf->cmd)
576 {
577 case DLM_USER_LOCK:
578 if (!proc) {
579 log_print("no locking on control device");
580 goto out_free;
581 }
582 error = device_user_lock(proc, &kbuf->i.lock);
583 break;
584
585 case DLM_USER_UNLOCK:
586 if (!proc) {
587 log_print("no locking on control device");
588 goto out_free;
589 }
590 error = device_user_unlock(proc, &kbuf->i.lock);
591 break;
592
593 case DLM_USER_DEADLOCK:
594 if (!proc) {
595 log_print("no locking on control device");
596 goto out_free;
597 }
598 error = device_user_deadlock(proc, &kbuf->i.lock);
599 break;
600
601 case DLM_USER_CREATE_LOCKSPACE:
602 if (proc) {
603 log_print("create/remove only on control device");
604 goto out_free;
605 }
606 error = device_create_lockspace(&kbuf->i.lspace);
607 break;
608
609 case DLM_USER_REMOVE_LOCKSPACE:
610 if (proc) {
611 log_print("create/remove only on control device");
612 goto out_free;
613 }
614 error = device_remove_lockspace(&kbuf->i.lspace);
615 break;
616
617 case DLM_USER_PURGE:
618 if (!proc) {
619 log_print("no locking on control device");
620 goto out_free;
621 }
622 error = device_user_purge(proc, &kbuf->i.purge);
623 break;
624
625 default:
626 log_print("Unknown command passed to DLM device : %d\n",
627 kbuf->cmd);
628 }
629
630 out_free:
631 kfree(kbuf);
632 return error;
633}
634
635/* Every process that opens the lockspace device has its own "proc" structure
636 hanging off the open file that's used to keep track of locks owned by the
637 process and asts that need to be delivered to the process. */
638
639static int device_open(struct inode *inode, struct file *file)
640{
641 struct dlm_user_proc *proc;
642 struct dlm_ls *ls;
643
644 ls = dlm_find_lockspace_device(iminor(inode));
645 if (!ls)
646 return -ENOENT;
647
648 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
649 if (!proc) {
650 dlm_put_lockspace(ls);
651 return -ENOMEM;
652 }
653
654 proc->lockspace = ls;
655 INIT_LIST_HEAD(&proc->asts);
656 INIT_LIST_HEAD(&proc->locks);
657 INIT_LIST_HEAD(&proc->unlocking);
658 spin_lock_init(&proc->asts_spin);
659 spin_lock_init(&proc->locks_spin);
660 init_waitqueue_head(&proc->wait);
661 file->private_data = proc;
662
663 return 0;
664}
665
666static int device_close(struct inode *inode, struct file *file)
667{
668 struct dlm_user_proc *proc = file->private_data;
669 struct dlm_ls *ls;
670
671 ls = dlm_find_lockspace_local(proc->lockspace);
672 if (!ls)
673 return -ENOENT;
674
675 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
676
677 dlm_clear_proc_locks(ls, proc);
678
679 /* at this point no more lkb's should exist for this lockspace,
680 so there's no chance of dlm_user_add_ast() being called and
681 looking for lkb->ua->proc */
682
683 kfree(proc);
684 file->private_data = NULL;
685
686 dlm_put_lockspace(ls);
687 dlm_put_lockspace(ls); /* for the find in device_open() */
688
689 /* FIXME: AUTOFREE: if this ls is no longer used do
690 device_remove_lockspace() */
691
692 return 0;
693}
694
695static int copy_result_to_user(struct dlm_user_args *ua, int compat,
696 uint32_t flags, int mode, int copy_lvb,
697 char __user *buf, size_t count)
698{
699#ifdef CONFIG_COMPAT
700 struct dlm_lock_result32 result32;
701#endif
702 struct dlm_lock_result result;
703 void *resultptr;
704 int error=0;
705 int len;
706 int struct_len;
707
708 memset(&result, 0, sizeof(struct dlm_lock_result));
709 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
710 result.version[1] = DLM_DEVICE_VERSION_MINOR;
711 result.version[2] = DLM_DEVICE_VERSION_PATCH;
712 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
713 result.user_lksb = ua->user_lksb;
714
715 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
716 in a conversion unless the conversion is successful. See code
717 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
718 notes that a new blocking AST address and parameter are set even if
719 the conversion fails, so maybe we should just do that. */
720
721 if (flags & DLM_CB_BAST) {
722 result.user_astaddr = ua->bastaddr;
723 result.user_astparam = ua->bastparam;
724 result.bast_mode = mode;
725 } else {
726 result.user_astaddr = ua->castaddr;
727 result.user_astparam = ua->castparam;
728 }
729
730#ifdef CONFIG_COMPAT
731 if (compat)
732 len = sizeof(struct dlm_lock_result32);
733 else
734#endif
735 len = sizeof(struct dlm_lock_result);
736 struct_len = len;
737
738 /* copy lvb to userspace if there is one, it's been updated, and
739 the user buffer has space for it */
740
741 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
742 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
743 DLM_USER_LVB_LEN)) {
744 error = -EFAULT;
745 goto out;
746 }
747
748 result.lvb_offset = len;
749 len += DLM_USER_LVB_LEN;
750 }
751
752 result.length = len;
753 resultptr = &result;
754#ifdef CONFIG_COMPAT
755 if (compat) {
756 compat_output(&result, &result32);
757 resultptr = &result32;
758 }
759#endif
760
761 if (copy_to_user(buf, resultptr, struct_len))
762 error = -EFAULT;
763 else
764 error = len;
765 out:
766 return error;
767}
768
769static int copy_version_to_user(char __user *buf, size_t count)
770{
771 struct dlm_device_version ver;
772
773 memset(&ver, 0, sizeof(struct dlm_device_version));
774 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
775 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
776 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
777
778 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
779 return -EFAULT;
780 return sizeof(struct dlm_device_version);
781}
782
783/* a read returns a single ast described in a struct dlm_lock_result */
784
785static ssize_t device_read(struct file *file, char __user *buf, size_t count,
786 loff_t *ppos)
787{
788 struct dlm_user_proc *proc = file->private_data;
789 DECLARE_WAITQUEUE(wait, current);
790 struct dlm_callback *cb;
791 int rv, ret;
792
793 if (count == sizeof(struct dlm_device_version)) {
794 rv = copy_version_to_user(buf, count);
795 return rv;
796 }
797
798 if (!proc) {
799 log_print("non-version read from control device %zu", count);
800 return -EINVAL;
801 }
802
803#ifdef CONFIG_COMPAT
804 if (count < sizeof(struct dlm_lock_result32))
805#else
806 if (count < sizeof(struct dlm_lock_result))
807#endif
808 return -EINVAL;
809
810 /* do we really need this? can a read happen after a close? */
811 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
812 return -EINVAL;
813
814 spin_lock_bh(&proc->asts_spin);
815 if (list_empty(&proc->asts)) {
816 if (file->f_flags & O_NONBLOCK) {
817 spin_unlock_bh(&proc->asts_spin);
818 return -EAGAIN;
819 }
820
821 add_wait_queue(&proc->wait, &wait);
822
823 repeat:
824 set_current_state(TASK_INTERRUPTIBLE);
825 if (list_empty(&proc->asts) && !signal_pending(current)) {
826 spin_unlock_bh(&proc->asts_spin);
827 schedule();
828 spin_lock_bh(&proc->asts_spin);
829 goto repeat;
830 }
831 set_current_state(TASK_RUNNING);
832 remove_wait_queue(&proc->wait, &wait);
833
834 if (signal_pending(current)) {
835 spin_unlock_bh(&proc->asts_spin);
836 return -ERESTARTSYS;
837 }
838 }
839
840 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
841 without removing lkb_cb_list; so empty lkb_cb_list is always
842 consistent with empty lkb_callbacks */
843
844 cb = list_first_entry(&proc->asts, struct dlm_callback, list);
845 list_del(&cb->list);
846 spin_unlock_bh(&proc->asts_spin);
847
848 if (cb->flags & DLM_CB_BAST) {
849 trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
850 cb->res_length);
851 } else if (cb->flags & DLM_CB_CAST) {
852 cb->lkb_lksb->sb_status = cb->sb_status;
853 cb->lkb_lksb->sb_flags = cb->sb_flags;
854 trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
855 cb->sb_flags, cb->res_name, cb->res_length);
856 }
857
858 ret = copy_result_to_user(&cb->ua,
859 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
860 cb->flags, cb->mode, cb->copy_lvb, buf, count);
861 dlm_free_cb(cb);
862 return ret;
863}
864
865static __poll_t device_poll(struct file *file, poll_table *wait)
866{
867 struct dlm_user_proc *proc = file->private_data;
868
869 poll_wait(file, &proc->wait, wait);
870
871 spin_lock_bh(&proc->asts_spin);
872 if (!list_empty(&proc->asts)) {
873 spin_unlock_bh(&proc->asts_spin);
874 return EPOLLIN | EPOLLRDNORM;
875 }
876 spin_unlock_bh(&proc->asts_spin);
877 return 0;
878}
879
880int dlm_user_daemon_available(void)
881{
882 /* dlm_controld hasn't started (or, has started, but not
883 properly populated configfs) */
884
885 if (!dlm_our_nodeid())
886 return 0;
887
888 /* This is to deal with versions of dlm_controld that don't
889 know about the monitor device. We assume that if the
890 dlm_controld was started (above), but the monitor device
891 was never opened, that it's an old version. dlm_controld
892 should open the monitor device before populating configfs. */
893
894 if (dlm_monitor_unused)
895 return 1;
896
897 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
898}
899
900static int ctl_device_open(struct inode *inode, struct file *file)
901{
902 file->private_data = NULL;
903 return 0;
904}
905
906static int ctl_device_close(struct inode *inode, struct file *file)
907{
908 return 0;
909}
910
911static int monitor_device_open(struct inode *inode, struct file *file)
912{
913 atomic_inc(&dlm_monitor_opened);
914 dlm_monitor_unused = 0;
915 return 0;
916}
917
918static int monitor_device_close(struct inode *inode, struct file *file)
919{
920 if (atomic_dec_and_test(&dlm_monitor_opened))
921 dlm_stop_lockspaces();
922 return 0;
923}
924
925static const struct file_operations device_fops = {
926 .open = device_open,
927 .release = device_close,
928 .read = device_read,
929 .write = device_write,
930 .poll = device_poll,
931 .owner = THIS_MODULE,
932 .llseek = noop_llseek,
933};
934
935static const struct file_operations ctl_device_fops = {
936 .open = ctl_device_open,
937 .release = ctl_device_close,
938 .read = device_read,
939 .write = device_write,
940 .owner = THIS_MODULE,
941 .llseek = noop_llseek,
942};
943
944static struct miscdevice ctl_device = {
945 .name = "dlm-control",
946 .fops = &ctl_device_fops,
947 .minor = MISC_DYNAMIC_MINOR,
948};
949
950static const struct file_operations monitor_device_fops = {
951 .open = monitor_device_open,
952 .release = monitor_device_close,
953 .owner = THIS_MODULE,
954 .llseek = noop_llseek,
955};
956
957static struct miscdevice monitor_device = {
958 .name = "dlm-monitor",
959 .fops = &monitor_device_fops,
960 .minor = MISC_DYNAMIC_MINOR,
961};
962
963int __init dlm_user_init(void)
964{
965 int error;
966
967 atomic_set(&dlm_monitor_opened, 0);
968
969 error = misc_register(&ctl_device);
970 if (error) {
971 log_print("misc_register failed for control device");
972 goto out;
973 }
974
975 error = misc_register(&monitor_device);
976 if (error) {
977 log_print("misc_register failed for monitor device");
978 misc_deregister(&ctl_device);
979 }
980 out:
981 return error;
982}
983
984void dlm_user_exit(void)
985{
986 misc_deregister(&ctl_device);
987 misc_deregister(&monitor_device);
988}
989
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
4 */
5
6#include <linux/miscdevice.h>
7#include <linux/init.h>
8#include <linux/wait.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/poll.h>
12#include <linux/signal.h>
13#include <linux/spinlock.h>
14#include <linux/dlm.h>
15#include <linux/dlm_device.h>
16#include <linux/slab.h>
17#include <linux/sched/signal.h>
18
19#include <trace/events/dlm.h>
20
21#include "dlm_internal.h"
22#include "lockspace.h"
23#include "lock.h"
24#include "lvb_table.h"
25#include "user.h"
26#include "ast.h"
27#include "config.h"
28#include "memory.h"
29
30static const char name_prefix[] = "dlm";
31static const struct file_operations device_fops;
32static atomic_t dlm_monitor_opened;
33static int dlm_monitor_unused = 1;
34
35#ifdef CONFIG_COMPAT
36
37struct dlm_lock_params32 {
38 __u8 mode;
39 __u8 namelen;
40 __u16 unused;
41 __u32 flags;
42 __u32 lkid;
43 __u32 parent;
44 __u64 xid;
45 __u64 timeout;
46 __u32 castparam;
47 __u32 castaddr;
48 __u32 bastparam;
49 __u32 bastaddr;
50 __u32 lksb;
51 char lvb[DLM_USER_LVB_LEN];
52 char name[];
53};
54
55struct dlm_write_request32 {
56 __u32 version[3];
57 __u8 cmd;
58 __u8 is64bit;
59 __u8 unused[2];
60
61 union {
62 struct dlm_lock_params32 lock;
63 struct dlm_lspace_params lspace;
64 struct dlm_purge_params purge;
65 } i;
66};
67
68struct dlm_lksb32 {
69 __u32 sb_status;
70 __u32 sb_lkid;
71 __u8 sb_flags;
72 __u32 sb_lvbptr;
73};
74
75struct dlm_lock_result32 {
76 __u32 version[3];
77 __u32 length;
78 __u32 user_astaddr;
79 __u32 user_astparam;
80 __u32 user_lksb;
81 struct dlm_lksb32 lksb;
82 __u8 bast_mode;
83 __u8 unused[3];
84 /* Offsets may be zero if no data is present */
85 __u32 lvb_offset;
86};
87
88static void compat_input(struct dlm_write_request *kb,
89 struct dlm_write_request32 *kb32,
90 int namelen)
91{
92 kb->version[0] = kb32->version[0];
93 kb->version[1] = kb32->version[1];
94 kb->version[2] = kb32->version[2];
95
96 kb->cmd = kb32->cmd;
97 kb->is64bit = kb32->is64bit;
98 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
99 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
100 kb->i.lspace.flags = kb32->i.lspace.flags;
101 kb->i.lspace.minor = kb32->i.lspace.minor;
102 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
103 } else if (kb->cmd == DLM_USER_PURGE) {
104 kb->i.purge.nodeid = kb32->i.purge.nodeid;
105 kb->i.purge.pid = kb32->i.purge.pid;
106 } else {
107 kb->i.lock.mode = kb32->i.lock.mode;
108 kb->i.lock.namelen = kb32->i.lock.namelen;
109 kb->i.lock.flags = kb32->i.lock.flags;
110 kb->i.lock.lkid = kb32->i.lock.lkid;
111 kb->i.lock.parent = kb32->i.lock.parent;
112 kb->i.lock.xid = kb32->i.lock.xid;
113 kb->i.lock.timeout = kb32->i.lock.timeout;
114 kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
115 kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
116 kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
117 kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
118 kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
119 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
120 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
121 }
122}
123
124static void compat_output(struct dlm_lock_result *res,
125 struct dlm_lock_result32 *res32)
126{
127 memset(res32, 0, sizeof(*res32));
128
129 res32->version[0] = res->version[0];
130 res32->version[1] = res->version[1];
131 res32->version[2] = res->version[2];
132
133 res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
134 res32->user_astparam = (__u32)(__force long)res->user_astparam;
135 res32->user_lksb = (__u32)(__force long)res->user_lksb;
136 res32->bast_mode = res->bast_mode;
137
138 res32->lvb_offset = res->lvb_offset;
139 res32->length = res->length;
140
141 res32->lksb.sb_status = res->lksb.sb_status;
142 res32->lksb.sb_flags = res->lksb.sb_flags;
143 res32->lksb.sb_lkid = res->lksb.sb_lkid;
144 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
145}
146#endif
147
148/* Figure out if this lock is at the end of its life and no longer
149 available for the application to use. The lkb still exists until
150 the final ast is read. A lock becomes EOL in three situations:
151 1. a noqueue request fails with EAGAIN
152 2. an unlock completes with EUNLOCK
153 3. a cancel of a waiting request completes with ECANCEL/EDEADLK
154 An EOL lock needs to be removed from the process's list of locks.
155 And we can't allow any new operation on an EOL lock. This is
156 not related to the lifetime of the lkb struct which is managed
157 entirely by refcount. */
158
159static int lkb_is_endoflife(int mode, int status)
160{
161 switch (status) {
162 case -DLM_EUNLOCK:
163 return 1;
164 case -DLM_ECANCEL:
165 case -ETIMEDOUT:
166 case -EDEADLK:
167 case -EAGAIN:
168 if (mode == DLM_LOCK_IV)
169 return 1;
170 break;
171 }
172 return 0;
173}
174
175/* we could possibly check if the cancel of an orphan has resulted in the lkb
176 being removed and then remove that lkb from the orphans list and free it */
177
178void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
179 int status, uint32_t sbflags)
180{
181 struct dlm_ls *ls;
182 struct dlm_user_args *ua;
183 struct dlm_user_proc *proc;
184 int rv;
185
186 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
187 return;
188
189 ls = lkb->lkb_resource->res_ls;
190 spin_lock(&ls->ls_clear_proc_locks);
191
192 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
193 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
194 lkb->ua so we can't try to use it. This second check is necessary
195 for cases where a completion ast is received for an operation that
196 began before clear_proc_locks did its cancel/unlock. */
197
198 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
199 goto out;
200
201 DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
202 ua = lkb->lkb_ua;
203 proc = ua->proc;
204
205 if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
206 goto out;
207
208 if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
209 lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
210
211 spin_lock(&proc->asts_spin);
212
213 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
214 switch (rv) {
215 case DLM_ENQUEUE_CALLBACK_FAILURE:
216 spin_unlock(&proc->asts_spin);
217 WARN_ON_ONCE(1);
218 goto out;
219 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
220 kref_get(&lkb->lkb_ref);
221 list_add_tail(&lkb->lkb_cb_list, &proc->asts);
222 wake_up_interruptible(&proc->wait);
223 break;
224 case DLM_ENQUEUE_CALLBACK_SUCCESS:
225 break;
226 default:
227 WARN_ON_ONCE(1);
228 break;
229 }
230 spin_unlock(&proc->asts_spin);
231
232 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
233 /* N.B. spin_lock locks_spin, not asts_spin */
234 spin_lock(&proc->locks_spin);
235 if (!list_empty(&lkb->lkb_ownqueue)) {
236 list_del_init(&lkb->lkb_ownqueue);
237 dlm_put_lkb(lkb);
238 }
239 spin_unlock(&proc->locks_spin);
240 }
241 out:
242 spin_unlock(&ls->ls_clear_proc_locks);
243}
244
245static int device_user_lock(struct dlm_user_proc *proc,
246 struct dlm_lock_params *params)
247{
248 struct dlm_ls *ls;
249 struct dlm_user_args *ua;
250 uint32_t lkid;
251 int error = -ENOMEM;
252
253 ls = dlm_find_lockspace_local(proc->lockspace);
254 if (!ls)
255 return -ENOENT;
256
257 if (!params->castaddr || !params->lksb) {
258 error = -EINVAL;
259 goto out;
260 }
261
262#ifdef CONFIG_DLM_DEPRECATED_API
263 if (params->timeout)
264 pr_warn_once("========================================================\n"
265 "WARNING: the lkb timeout feature is being deprecated and\n"
266 " will be removed in v6.2!\n"
267 "========================================================\n");
268#endif
269
270 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
271 if (!ua)
272 goto out;
273 ua->proc = proc;
274 ua->user_lksb = params->lksb;
275 ua->castparam = params->castparam;
276 ua->castaddr = params->castaddr;
277 ua->bastparam = params->bastparam;
278 ua->bastaddr = params->bastaddr;
279 ua->xid = params->xid;
280
281 if (params->flags & DLM_LKF_CONVERT) {
282#ifdef CONFIG_DLM_DEPRECATED_API
283 error = dlm_user_convert(ls, ua,
284 params->mode, params->flags,
285 params->lkid, params->lvb,
286 (unsigned long) params->timeout);
287#else
288 error = dlm_user_convert(ls, ua,
289 params->mode, params->flags,
290 params->lkid, params->lvb);
291#endif
292 } else if (params->flags & DLM_LKF_ORPHAN) {
293 error = dlm_user_adopt_orphan(ls, ua,
294 params->mode, params->flags,
295 params->name, params->namelen,
296 &lkid);
297 if (!error)
298 error = lkid;
299 } else {
300#ifdef CONFIG_DLM_DEPRECATED_API
301 error = dlm_user_request(ls, ua,
302 params->mode, params->flags,
303 params->name, params->namelen,
304 (unsigned long) params->timeout);
305#else
306 error = dlm_user_request(ls, ua,
307 params->mode, params->flags,
308 params->name, params->namelen);
309#endif
310 if (!error)
311 error = ua->lksb.sb_lkid;
312 }
313 out:
314 dlm_put_lockspace(ls);
315 return error;
316}
317
318static int device_user_unlock(struct dlm_user_proc *proc,
319 struct dlm_lock_params *params)
320{
321 struct dlm_ls *ls;
322 struct dlm_user_args *ua;
323 int error = -ENOMEM;
324
325 ls = dlm_find_lockspace_local(proc->lockspace);
326 if (!ls)
327 return -ENOENT;
328
329 ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
330 if (!ua)
331 goto out;
332 ua->proc = proc;
333 ua->user_lksb = params->lksb;
334 ua->castparam = params->castparam;
335 ua->castaddr = params->castaddr;
336
337 if (params->flags & DLM_LKF_CANCEL)
338 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
339 else
340 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
341 params->lvb);
342 out:
343 dlm_put_lockspace(ls);
344 return error;
345}
346
347static int device_user_deadlock(struct dlm_user_proc *proc,
348 struct dlm_lock_params *params)
349{
350 struct dlm_ls *ls;
351 int error;
352
353 ls = dlm_find_lockspace_local(proc->lockspace);
354 if (!ls)
355 return -ENOENT;
356
357 error = dlm_user_deadlock(ls, params->flags, params->lkid);
358
359 dlm_put_lockspace(ls);
360 return error;
361}
362
363static int dlm_device_register(struct dlm_ls *ls, char *name)
364{
365 int error, len;
366
367 /* The device is already registered. This happens when the
368 lockspace is created multiple times from userspace. */
369 if (ls->ls_device.name)
370 return 0;
371
372 error = -ENOMEM;
373 len = strlen(name) + strlen(name_prefix) + 2;
374 ls->ls_device.name = kzalloc(len, GFP_NOFS);
375 if (!ls->ls_device.name)
376 goto fail;
377
378 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
379 name);
380 ls->ls_device.fops = &device_fops;
381 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
382
383 error = misc_register(&ls->ls_device);
384 if (error) {
385 kfree(ls->ls_device.name);
386 /* this has to be set to NULL
387 * to avoid a double-free in dlm_device_deregister
388 */
389 ls->ls_device.name = NULL;
390 }
391fail:
392 return error;
393}
394
395int dlm_device_deregister(struct dlm_ls *ls)
396{
397 /* The device is not registered. This happens when the lockspace
398 was never used from userspace, or when device_create_lockspace()
399 calls dlm_release_lockspace() after the register fails. */
400 if (!ls->ls_device.name)
401 return 0;
402
403 misc_deregister(&ls->ls_device);
404 kfree(ls->ls_device.name);
405 return 0;
406}
407
408static int device_user_purge(struct dlm_user_proc *proc,
409 struct dlm_purge_params *params)
410{
411 struct dlm_ls *ls;
412 int error;
413
414 ls = dlm_find_lockspace_local(proc->lockspace);
415 if (!ls)
416 return -ENOENT;
417
418 error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
419
420 dlm_put_lockspace(ls);
421 return error;
422}
423
424static int device_create_lockspace(struct dlm_lspace_params *params)
425{
426 dlm_lockspace_t *lockspace;
427 struct dlm_ls *ls;
428 int error;
429
430 if (!capable(CAP_SYS_ADMIN))
431 return -EPERM;
432
433 error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name,
434 params->flags, DLM_USER_LVB_LEN, NULL,
435 NULL, NULL, &lockspace);
436 if (error)
437 return error;
438
439 ls = dlm_find_lockspace_local(lockspace);
440 if (!ls)
441 return -ENOENT;
442
443 error = dlm_device_register(ls, params->name);
444 dlm_put_lockspace(ls);
445
446 if (error)
447 dlm_release_lockspace(lockspace, 0);
448 else
449 error = ls->ls_device.minor;
450
451 return error;
452}
453
454static int device_remove_lockspace(struct dlm_lspace_params *params)
455{
456 dlm_lockspace_t *lockspace;
457 struct dlm_ls *ls;
458 int error, force = 0;
459
460 if (!capable(CAP_SYS_ADMIN))
461 return -EPERM;
462
463 ls = dlm_find_lockspace_device(params->minor);
464 if (!ls)
465 return -ENOENT;
466
467 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
468 force = 2;
469
470 lockspace = ls->ls_local_handle;
471 dlm_put_lockspace(ls);
472
473 /* The final dlm_release_lockspace waits for references to go to
474 zero, so all processes will need to close their device for the
475 ls before the release will proceed. release also calls the
476 device_deregister above. Converting a positive return value
477 from release to zero means that userspace won't know when its
478 release was the final one, but it shouldn't need to know. */
479
480 error = dlm_release_lockspace(lockspace, force);
481 if (error > 0)
482 error = 0;
483 return error;
484}
485
486/* Check the user's version matches ours */
487static int check_version(struct dlm_write_request *req)
488{
489 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
490 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
491 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
492
493 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
494 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
495 current->comm,
496 task_pid_nr(current),
497 req->version[0],
498 req->version[1],
499 req->version[2],
500 DLM_DEVICE_VERSION_MAJOR,
501 DLM_DEVICE_VERSION_MINOR,
502 DLM_DEVICE_VERSION_PATCH);
503 return -EINVAL;
504 }
505 return 0;
506}
507
508/*
509 * device_write
510 *
511 * device_user_lock
512 * dlm_user_request -> request_lock
513 * dlm_user_convert -> convert_lock
514 *
515 * device_user_unlock
516 * dlm_user_unlock -> unlock_lock
517 * dlm_user_cancel -> cancel_lock
518 *
519 * device_create_lockspace
520 * dlm_new_lockspace
521 *
522 * device_remove_lockspace
523 * dlm_release_lockspace
524 */
525
526/* a write to a lockspace device is a lock or unlock request, a write
527 to the control device is to create/remove a lockspace */
528
529static ssize_t device_write(struct file *file, const char __user *buf,
530 size_t count, loff_t *ppos)
531{
532 struct dlm_user_proc *proc = file->private_data;
533 struct dlm_write_request *kbuf;
534 int error;
535
536#ifdef CONFIG_COMPAT
537 if (count < sizeof(struct dlm_write_request32))
538#else
539 if (count < sizeof(struct dlm_write_request))
540#endif
541 return -EINVAL;
542
543 /*
544 * can't compare against COMPAT/dlm_write_request32 because
545 * we don't yet know if is64bit is zero
546 */
547 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
548 return -EINVAL;
549
550 kbuf = memdup_user_nul(buf, count);
551 if (IS_ERR(kbuf))
552 return PTR_ERR(kbuf);
553
554 if (check_version(kbuf)) {
555 error = -EBADE;
556 goto out_free;
557 }
558
559#ifdef CONFIG_COMPAT
560 if (!kbuf->is64bit) {
561 struct dlm_write_request32 *k32buf;
562 int namelen = 0;
563
564 if (count > sizeof(struct dlm_write_request32))
565 namelen = count - sizeof(struct dlm_write_request32);
566
567 k32buf = (struct dlm_write_request32 *)kbuf;
568
569 /* add 1 after namelen so that the name string is terminated */
570 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
571 GFP_NOFS);
572 if (!kbuf) {
573 kfree(k32buf);
574 return -ENOMEM;
575 }
576
577 if (proc)
578 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
579
580 compat_input(kbuf, k32buf, namelen);
581 kfree(k32buf);
582 }
583#endif
584
585 /* do we really need this? can a write happen after a close? */
586 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
587 (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
588 error = -EINVAL;
589 goto out_free;
590 }
591
592 error = -EINVAL;
593
594 switch (kbuf->cmd)
595 {
596 case DLM_USER_LOCK:
597 if (!proc) {
598 log_print("no locking on control device");
599 goto out_free;
600 }
601 error = device_user_lock(proc, &kbuf->i.lock);
602 break;
603
604 case DLM_USER_UNLOCK:
605 if (!proc) {
606 log_print("no locking on control device");
607 goto out_free;
608 }
609 error = device_user_unlock(proc, &kbuf->i.lock);
610 break;
611
612 case DLM_USER_DEADLOCK:
613 if (!proc) {
614 log_print("no locking on control device");
615 goto out_free;
616 }
617 error = device_user_deadlock(proc, &kbuf->i.lock);
618 break;
619
620 case DLM_USER_CREATE_LOCKSPACE:
621 if (proc) {
622 log_print("create/remove only on control device");
623 goto out_free;
624 }
625 error = device_create_lockspace(&kbuf->i.lspace);
626 break;
627
628 case DLM_USER_REMOVE_LOCKSPACE:
629 if (proc) {
630 log_print("create/remove only on control device");
631 goto out_free;
632 }
633 error = device_remove_lockspace(&kbuf->i.lspace);
634 break;
635
636 case DLM_USER_PURGE:
637 if (!proc) {
638 log_print("no locking on control device");
639 goto out_free;
640 }
641 error = device_user_purge(proc, &kbuf->i.purge);
642 break;
643
644 default:
645 log_print("Unknown command passed to DLM device : %d\n",
646 kbuf->cmd);
647 }
648
649 out_free:
650 kfree(kbuf);
651 return error;
652}
653
654/* Every process that opens the lockspace device has its own "proc" structure
655 hanging off the open file that's used to keep track of locks owned by the
656 process and asts that need to be delivered to the process. */
657
658static int device_open(struct inode *inode, struct file *file)
659{
660 struct dlm_user_proc *proc;
661 struct dlm_ls *ls;
662
663 ls = dlm_find_lockspace_device(iminor(inode));
664 if (!ls)
665 return -ENOENT;
666
667 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
668 if (!proc) {
669 dlm_put_lockspace(ls);
670 return -ENOMEM;
671 }
672
673 proc->lockspace = ls->ls_local_handle;
674 INIT_LIST_HEAD(&proc->asts);
675 INIT_LIST_HEAD(&proc->locks);
676 INIT_LIST_HEAD(&proc->unlocking);
677 spin_lock_init(&proc->asts_spin);
678 spin_lock_init(&proc->locks_spin);
679 init_waitqueue_head(&proc->wait);
680 file->private_data = proc;
681
682 return 0;
683}
684
685static int device_close(struct inode *inode, struct file *file)
686{
687 struct dlm_user_proc *proc = file->private_data;
688 struct dlm_ls *ls;
689
690 ls = dlm_find_lockspace_local(proc->lockspace);
691 if (!ls)
692 return -ENOENT;
693
694 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
695
696 dlm_clear_proc_locks(ls, proc);
697
698 /* at this point no more lkb's should exist for this lockspace,
699 so there's no chance of dlm_user_add_ast() being called and
700 looking for lkb->ua->proc */
701
702 kfree(proc);
703 file->private_data = NULL;
704
705 dlm_put_lockspace(ls);
706 dlm_put_lockspace(ls); /* for the find in device_open() */
707
708 /* FIXME: AUTOFREE: if this ls is no longer used do
709 device_remove_lockspace() */
710
711 return 0;
712}
713
714static int copy_result_to_user(struct dlm_user_args *ua, int compat,
715 uint32_t flags, int mode, int copy_lvb,
716 char __user *buf, size_t count)
717{
718#ifdef CONFIG_COMPAT
719 struct dlm_lock_result32 result32;
720#endif
721 struct dlm_lock_result result;
722 void *resultptr;
723 int error=0;
724 int len;
725 int struct_len;
726
727 memset(&result, 0, sizeof(struct dlm_lock_result));
728 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
729 result.version[1] = DLM_DEVICE_VERSION_MINOR;
730 result.version[2] = DLM_DEVICE_VERSION_PATCH;
731 memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
732 result.user_lksb = ua->user_lksb;
733
734 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
735 in a conversion unless the conversion is successful. See code
736 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
737 notes that a new blocking AST address and parameter are set even if
738 the conversion fails, so maybe we should just do that. */
739
740 if (flags & DLM_CB_BAST) {
741 result.user_astaddr = ua->bastaddr;
742 result.user_astparam = ua->bastparam;
743 result.bast_mode = mode;
744 } else {
745 result.user_astaddr = ua->castaddr;
746 result.user_astparam = ua->castparam;
747 }
748
749#ifdef CONFIG_COMPAT
750 if (compat)
751 len = sizeof(struct dlm_lock_result32);
752 else
753#endif
754 len = sizeof(struct dlm_lock_result);
755 struct_len = len;
756
757 /* copy lvb to userspace if there is one, it's been updated, and
758 the user buffer has space for it */
759
760 if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
761 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
762 DLM_USER_LVB_LEN)) {
763 error = -EFAULT;
764 goto out;
765 }
766
767 result.lvb_offset = len;
768 len += DLM_USER_LVB_LEN;
769 }
770
771 result.length = len;
772 resultptr = &result;
773#ifdef CONFIG_COMPAT
774 if (compat) {
775 compat_output(&result, &result32);
776 resultptr = &result32;
777 }
778#endif
779
780 if (copy_to_user(buf, resultptr, struct_len))
781 error = -EFAULT;
782 else
783 error = len;
784 out:
785 return error;
786}
787
788static int copy_version_to_user(char __user *buf, size_t count)
789{
790 struct dlm_device_version ver;
791
792 memset(&ver, 0, sizeof(struct dlm_device_version));
793 ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
794 ver.version[1] = DLM_DEVICE_VERSION_MINOR;
795 ver.version[2] = DLM_DEVICE_VERSION_PATCH;
796
797 if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
798 return -EFAULT;
799 return sizeof(struct dlm_device_version);
800}
801
802/* a read returns a single ast described in a struct dlm_lock_result */
803
804static ssize_t device_read(struct file *file, char __user *buf, size_t count,
805 loff_t *ppos)
806{
807 struct dlm_user_proc *proc = file->private_data;
808 struct dlm_lkb *lkb;
809 DECLARE_WAITQUEUE(wait, current);
810 struct dlm_callback *cb;
811 int rv, copy_lvb = 0;
812 int old_mode, new_mode;
813
814 if (count == sizeof(struct dlm_device_version)) {
815 rv = copy_version_to_user(buf, count);
816 return rv;
817 }
818
819 if (!proc) {
820 log_print("non-version read from control device %zu", count);
821 return -EINVAL;
822 }
823
824#ifdef CONFIG_COMPAT
825 if (count < sizeof(struct dlm_lock_result32))
826#else
827 if (count < sizeof(struct dlm_lock_result))
828#endif
829 return -EINVAL;
830
831 try_another:
832
833 /* do we really need this? can a read happen after a close? */
834 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
835 return -EINVAL;
836
837 spin_lock(&proc->asts_spin);
838 if (list_empty(&proc->asts)) {
839 if (file->f_flags & O_NONBLOCK) {
840 spin_unlock(&proc->asts_spin);
841 return -EAGAIN;
842 }
843
844 add_wait_queue(&proc->wait, &wait);
845
846 repeat:
847 set_current_state(TASK_INTERRUPTIBLE);
848 if (list_empty(&proc->asts) && !signal_pending(current)) {
849 spin_unlock(&proc->asts_spin);
850 schedule();
851 spin_lock(&proc->asts_spin);
852 goto repeat;
853 }
854 set_current_state(TASK_RUNNING);
855 remove_wait_queue(&proc->wait, &wait);
856
857 if (signal_pending(current)) {
858 spin_unlock(&proc->asts_spin);
859 return -ERESTARTSYS;
860 }
861 }
862
863 /* if we empty lkb_callbacks, we don't want to unlock the spinlock
864 without removing lkb_cb_list; so empty lkb_cb_list is always
865 consistent with empty lkb_callbacks */
866
867 lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
868
869 /* rem_lkb_callback sets a new lkb_last_cast */
870 old_mode = lkb->lkb_last_cast->mode;
871
872 rv = dlm_dequeue_lkb_callback(lkb, &cb);
873 switch (rv) {
874 case DLM_DEQUEUE_CALLBACK_EMPTY:
875 /* this shouldn't happen; lkb should have been removed from
876 * list when last item was dequeued
877 */
878 log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
879 list_del_init(&lkb->lkb_cb_list);
880 spin_unlock(&proc->asts_spin);
881 /* removes ref for proc->asts, may cause lkb to be freed */
882 dlm_put_lkb(lkb);
883 WARN_ON_ONCE(1);
884 goto try_another;
885 case DLM_DEQUEUE_CALLBACK_LAST:
886 list_del_init(&lkb->lkb_cb_list);
887 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
888 break;
889 case DLM_DEQUEUE_CALLBACK_SUCCESS:
890 break;
891 default:
892 WARN_ON_ONCE(1);
893 break;
894 }
895 spin_unlock(&proc->asts_spin);
896
897 if (cb->flags & DLM_CB_BAST) {
898 trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
899 } else if (cb->flags & DLM_CB_CAST) {
900 new_mode = cb->mode;
901
902 if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
903 dlm_lvb_operations[old_mode + 1][new_mode + 1])
904 copy_lvb = 1;
905
906 lkb->lkb_lksb->sb_status = cb->sb_status;
907 lkb->lkb_lksb->sb_flags = cb->sb_flags;
908 trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
909 }
910
911 rv = copy_result_to_user(lkb->lkb_ua,
912 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
913 cb->flags, cb->mode, copy_lvb, buf, count);
914
915 kref_put(&cb->ref, dlm_release_callback);
916
917 /* removes ref for proc->asts, may cause lkb to be freed */
918 if (rv == DLM_DEQUEUE_CALLBACK_LAST)
919 dlm_put_lkb(lkb);
920
921 return rv;
922}
923
924static __poll_t device_poll(struct file *file, poll_table *wait)
925{
926 struct dlm_user_proc *proc = file->private_data;
927
928 poll_wait(file, &proc->wait, wait);
929
930 spin_lock(&proc->asts_spin);
931 if (!list_empty(&proc->asts)) {
932 spin_unlock(&proc->asts_spin);
933 return EPOLLIN | EPOLLRDNORM;
934 }
935 spin_unlock(&proc->asts_spin);
936 return 0;
937}
938
939int dlm_user_daemon_available(void)
940{
941 /* dlm_controld hasn't started (or, has started, but not
942 properly populated configfs) */
943
944 if (!dlm_our_nodeid())
945 return 0;
946
947 /* This is to deal with versions of dlm_controld that don't
948 know about the monitor device. We assume that if the
949 dlm_controld was started (above), but the monitor device
950 was never opened, that it's an old version. dlm_controld
951 should open the monitor device before populating configfs. */
952
953 if (dlm_monitor_unused)
954 return 1;
955
956 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
957}
958
959static int ctl_device_open(struct inode *inode, struct file *file)
960{
961 file->private_data = NULL;
962 return 0;
963}
964
965static int ctl_device_close(struct inode *inode, struct file *file)
966{
967 return 0;
968}
969
970static int monitor_device_open(struct inode *inode, struct file *file)
971{
972 atomic_inc(&dlm_monitor_opened);
973 dlm_monitor_unused = 0;
974 return 0;
975}
976
977static int monitor_device_close(struct inode *inode, struct file *file)
978{
979 if (atomic_dec_and_test(&dlm_monitor_opened))
980 dlm_stop_lockspaces();
981 return 0;
982}
983
984static const struct file_operations device_fops = {
985 .open = device_open,
986 .release = device_close,
987 .read = device_read,
988 .write = device_write,
989 .poll = device_poll,
990 .owner = THIS_MODULE,
991 .llseek = noop_llseek,
992};
993
994static const struct file_operations ctl_device_fops = {
995 .open = ctl_device_open,
996 .release = ctl_device_close,
997 .read = device_read,
998 .write = device_write,
999 .owner = THIS_MODULE,
1000 .llseek = noop_llseek,
1001};
1002
1003static struct miscdevice ctl_device = {
1004 .name = "dlm-control",
1005 .fops = &ctl_device_fops,
1006 .minor = MISC_DYNAMIC_MINOR,
1007};
1008
1009static const struct file_operations monitor_device_fops = {
1010 .open = monitor_device_open,
1011 .release = monitor_device_close,
1012 .owner = THIS_MODULE,
1013 .llseek = noop_llseek,
1014};
1015
1016static struct miscdevice monitor_device = {
1017 .name = "dlm-monitor",
1018 .fops = &monitor_device_fops,
1019 .minor = MISC_DYNAMIC_MINOR,
1020};
1021
1022int __init dlm_user_init(void)
1023{
1024 int error;
1025
1026 atomic_set(&dlm_monitor_opened, 0);
1027
1028 error = misc_register(&ctl_device);
1029 if (error) {
1030 log_print("misc_register failed for control device");
1031 goto out;
1032 }
1033
1034 error = misc_register(&monitor_device);
1035 if (error) {
1036 log_print("misc_register failed for monitor device");
1037 misc_deregister(&ctl_device);
1038 }
1039 out:
1040 return error;
1041}
1042
1043void dlm_user_exit(void)
1044{
1045 misc_deregister(&ctl_device);
1046 misc_deregister(&monitor_device);
1047}
1048