Loading...
1/*
2 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
7 */
8
9#include <linux/fs.h>
10#include <linux/miscdevice.h>
11#include <linux/poll.h>
12#include <linux/dlm.h>
13#include <linux/dlm_plock.h>
14#include <linux/slab.h>
15
16#include "dlm_internal.h"
17#include "lockspace.h"
18
19static spinlock_t ops_lock;
20static struct list_head send_list;
21static struct list_head recv_list;
22static wait_queue_head_t send_wq;
23static wait_queue_head_t recv_wq;
24
25struct plock_op {
26 struct list_head list;
27 int done;
28 struct dlm_plock_info info;
29};
30
31struct plock_xop {
32 struct plock_op xop;
33 int (*callback)(struct file_lock *fl, int result);
34 void *fl;
35 void *file;
36 struct file_lock flc;
37};
38
39
40static inline void set_version(struct dlm_plock_info *info)
41{
42 info->version[0] = DLM_PLOCK_VERSION_MAJOR;
43 info->version[1] = DLM_PLOCK_VERSION_MINOR;
44 info->version[2] = DLM_PLOCK_VERSION_PATCH;
45}
46
47static int check_version(struct dlm_plock_info *info)
48{
49 if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
50 (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
51 log_print("plock device version mismatch: "
52 "kernel (%u.%u.%u), user (%u.%u.%u)",
53 DLM_PLOCK_VERSION_MAJOR,
54 DLM_PLOCK_VERSION_MINOR,
55 DLM_PLOCK_VERSION_PATCH,
56 info->version[0],
57 info->version[1],
58 info->version[2]);
59 return -EINVAL;
60 }
61 return 0;
62}
63
64static void send_op(struct plock_op *op)
65{
66 set_version(&op->info);
67 INIT_LIST_HEAD(&op->list);
68 spin_lock(&ops_lock);
69 list_add_tail(&op->list, &send_list);
70 spin_unlock(&ops_lock);
71 wake_up(&send_wq);
72}
73
74/* If a process was killed while waiting for the only plock on a file,
75 locks_remove_posix will not see any lock on the file so it won't
76 send an unlock-close to us to pass on to userspace to clean up the
77 abandoned waiter. So, we have to insert the unlock-close when the
78 lock call is interrupted. */
79
80static void do_unlock_close(struct dlm_ls *ls, u64 number,
81 struct file *file, struct file_lock *fl)
82{
83 struct plock_op *op;
84
85 op = kzalloc(sizeof(*op), GFP_NOFS);
86 if (!op)
87 return;
88
89 op->info.optype = DLM_PLOCK_OP_UNLOCK;
90 op->info.pid = fl->fl_pid;
91 op->info.fsid = ls->ls_global_id;
92 op->info.number = number;
93 op->info.start = 0;
94 op->info.end = OFFSET_MAX;
95 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
96 op->info.owner = (__u64) fl->fl_pid;
97 else
98 op->info.owner = (__u64)(long) fl->fl_owner;
99
100 op->info.flags |= DLM_PLOCK_FL_CLOSE;
101 send_op(op);
102}
103
104int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
105 int cmd, struct file_lock *fl)
106{
107 struct dlm_ls *ls;
108 struct plock_op *op;
109 struct plock_xop *xop;
110 int rv;
111
112 ls = dlm_find_lockspace_local(lockspace);
113 if (!ls)
114 return -EINVAL;
115
116 xop = kzalloc(sizeof(*xop), GFP_NOFS);
117 if (!xop) {
118 rv = -ENOMEM;
119 goto out;
120 }
121
122 op = &xop->xop;
123 op->info.optype = DLM_PLOCK_OP_LOCK;
124 op->info.pid = fl->fl_pid;
125 op->info.ex = (fl->fl_type == F_WRLCK);
126 op->info.wait = IS_SETLKW(cmd);
127 op->info.fsid = ls->ls_global_id;
128 op->info.number = number;
129 op->info.start = fl->fl_start;
130 op->info.end = fl->fl_end;
131 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
132 /* fl_owner is lockd which doesn't distinguish
133 processes on the nfs client */
134 op->info.owner = (__u64) fl->fl_pid;
135 xop->callback = fl->fl_lmops->lm_grant;
136 locks_init_lock(&xop->flc);
137 locks_copy_lock(&xop->flc, fl);
138 xop->fl = fl;
139 xop->file = file;
140 } else {
141 op->info.owner = (__u64)(long) fl->fl_owner;
142 xop->callback = NULL;
143 }
144
145 send_op(op);
146
147 if (xop->callback == NULL) {
148 rv = wait_event_interruptible(recv_wq, (op->done != 0));
149 if (rv == -ERESTARTSYS) {
150 log_debug(ls, "dlm_posix_lock: wait killed %llx",
151 (unsigned long long)number);
152 spin_lock(&ops_lock);
153 list_del(&op->list);
154 spin_unlock(&ops_lock);
155 kfree(xop);
156 do_unlock_close(ls, number, file, fl);
157 goto out;
158 }
159 } else {
160 rv = FILE_LOCK_DEFERRED;
161 goto out;
162 }
163
164 spin_lock(&ops_lock);
165 if (!list_empty(&op->list)) {
166 log_error(ls, "dlm_posix_lock: op on list %llx",
167 (unsigned long long)number);
168 list_del(&op->list);
169 }
170 spin_unlock(&ops_lock);
171
172 rv = op->info.rv;
173
174 if (!rv) {
175 if (locks_lock_file_wait(file, fl) < 0)
176 log_error(ls, "dlm_posix_lock: vfs lock error %llx",
177 (unsigned long long)number);
178 }
179
180 kfree(xop);
181out:
182 dlm_put_lockspace(ls);
183 return rv;
184}
185EXPORT_SYMBOL_GPL(dlm_posix_lock);
186
187/* Returns failure iff a successful lock operation should be canceled */
188static int dlm_plock_callback(struct plock_op *op)
189{
190 struct file *file;
191 struct file_lock *fl;
192 struct file_lock *flc;
193 int (*notify)(struct file_lock *fl, int result) = NULL;
194 struct plock_xop *xop = (struct plock_xop *)op;
195 int rv = 0;
196
197 spin_lock(&ops_lock);
198 if (!list_empty(&op->list)) {
199 log_print("dlm_plock_callback: op on list %llx",
200 (unsigned long long)op->info.number);
201 list_del(&op->list);
202 }
203 spin_unlock(&ops_lock);
204
205 /* check if the following 2 are still valid or make a copy */
206 file = xop->file;
207 flc = &xop->flc;
208 fl = xop->fl;
209 notify = xop->callback;
210
211 if (op->info.rv) {
212 notify(fl, op->info.rv);
213 goto out;
214 }
215
216 /* got fs lock; bookkeep locally as well: */
217 flc->fl_flags &= ~FL_SLEEP;
218 if (posix_lock_file(file, flc, NULL)) {
219 /*
220 * This can only happen in the case of kmalloc() failure.
221 * The filesystem's own lock is the authoritative lock,
222 * so a failure to get the lock locally is not a disaster.
223 * As long as the fs cannot reliably cancel locks (especially
224 * in a low-memory situation), we're better off ignoring
225 * this failure than trying to recover.
226 */
227 log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
228 (unsigned long long)op->info.number, file, fl);
229 }
230
231 rv = notify(fl, 0);
232 if (rv) {
233 /* XXX: We need to cancel the fs lock here: */
234 log_print("dlm_plock_callback: lock granted after lock request "
235 "failed; dangling lock!\n");
236 goto out;
237 }
238
239out:
240 kfree(xop);
241 return rv;
242}
243
244int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
245 struct file_lock *fl)
246{
247 struct dlm_ls *ls;
248 struct plock_op *op;
249 int rv;
250 unsigned char fl_flags = fl->fl_flags;
251
252 ls = dlm_find_lockspace_local(lockspace);
253 if (!ls)
254 return -EINVAL;
255
256 op = kzalloc(sizeof(*op), GFP_NOFS);
257 if (!op) {
258 rv = -ENOMEM;
259 goto out;
260 }
261
262 /* cause the vfs unlock to return ENOENT if lock is not found */
263 fl->fl_flags |= FL_EXISTS;
264
265 rv = locks_lock_file_wait(file, fl);
266 if (rv == -ENOENT) {
267 rv = 0;
268 goto out_free;
269 }
270 if (rv < 0) {
271 log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx",
272 rv, (unsigned long long)number);
273 }
274
275 op->info.optype = DLM_PLOCK_OP_UNLOCK;
276 op->info.pid = fl->fl_pid;
277 op->info.fsid = ls->ls_global_id;
278 op->info.number = number;
279 op->info.start = fl->fl_start;
280 op->info.end = fl->fl_end;
281 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
282 op->info.owner = (__u64) fl->fl_pid;
283 else
284 op->info.owner = (__u64)(long) fl->fl_owner;
285
286 if (fl->fl_flags & FL_CLOSE) {
287 op->info.flags |= DLM_PLOCK_FL_CLOSE;
288 send_op(op);
289 rv = 0;
290 goto out;
291 }
292
293 send_op(op);
294 wait_event(recv_wq, (op->done != 0));
295
296 spin_lock(&ops_lock);
297 if (!list_empty(&op->list)) {
298 log_error(ls, "dlm_posix_unlock: op on list %llx",
299 (unsigned long long)number);
300 list_del(&op->list);
301 }
302 spin_unlock(&ops_lock);
303
304 rv = op->info.rv;
305
306 if (rv == -ENOENT)
307 rv = 0;
308
309out_free:
310 kfree(op);
311out:
312 dlm_put_lockspace(ls);
313 fl->fl_flags = fl_flags;
314 return rv;
315}
316EXPORT_SYMBOL_GPL(dlm_posix_unlock);
317
318int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
319 struct file_lock *fl)
320{
321 struct dlm_ls *ls;
322 struct plock_op *op;
323 int rv;
324
325 ls = dlm_find_lockspace_local(lockspace);
326 if (!ls)
327 return -EINVAL;
328
329 op = kzalloc(sizeof(*op), GFP_NOFS);
330 if (!op) {
331 rv = -ENOMEM;
332 goto out;
333 }
334
335 op->info.optype = DLM_PLOCK_OP_GET;
336 op->info.pid = fl->fl_pid;
337 op->info.ex = (fl->fl_type == F_WRLCK);
338 op->info.fsid = ls->ls_global_id;
339 op->info.number = number;
340 op->info.start = fl->fl_start;
341 op->info.end = fl->fl_end;
342 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
343 op->info.owner = (__u64) fl->fl_pid;
344 else
345 op->info.owner = (__u64)(long) fl->fl_owner;
346
347 send_op(op);
348 wait_event(recv_wq, (op->done != 0));
349
350 spin_lock(&ops_lock);
351 if (!list_empty(&op->list)) {
352 log_error(ls, "dlm_posix_get: op on list %llx",
353 (unsigned long long)number);
354 list_del(&op->list);
355 }
356 spin_unlock(&ops_lock);
357
358 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
359 -ENOENT if there are no locks on the file */
360
361 rv = op->info.rv;
362
363 fl->fl_type = F_UNLCK;
364 if (rv == -ENOENT)
365 rv = 0;
366 else if (rv > 0) {
367 locks_init_lock(fl);
368 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
369 fl->fl_flags = FL_POSIX;
370 fl->fl_pid = op->info.pid;
371 fl->fl_start = op->info.start;
372 fl->fl_end = op->info.end;
373 rv = 0;
374 }
375
376 kfree(op);
377out:
378 dlm_put_lockspace(ls);
379 return rv;
380}
381EXPORT_SYMBOL_GPL(dlm_posix_get);
382
383/* a read copies out one plock request from the send list */
384static ssize_t dev_read(struct file *file, char __user *u, size_t count,
385 loff_t *ppos)
386{
387 struct dlm_plock_info info;
388 struct plock_op *op = NULL;
389
390 if (count < sizeof(info))
391 return -EINVAL;
392
393 spin_lock(&ops_lock);
394 if (!list_empty(&send_list)) {
395 op = list_entry(send_list.next, struct plock_op, list);
396 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
397 list_del(&op->list);
398 else
399 list_move(&op->list, &recv_list);
400 memcpy(&info, &op->info, sizeof(info));
401 }
402 spin_unlock(&ops_lock);
403
404 if (!op)
405 return -EAGAIN;
406
407 /* there is no need to get a reply from userspace for unlocks
408 that were generated by the vfs cleaning up for a close
409 (the process did not make an unlock call). */
410
411 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
412 kfree(op);
413
414 if (copy_to_user(u, &info, sizeof(info)))
415 return -EFAULT;
416 return sizeof(info);
417}
418
419/* a write copies in one plock result that should match a plock_op
420 on the recv list */
421static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
422 loff_t *ppos)
423{
424 struct dlm_plock_info info;
425 struct plock_op *op;
426 int found = 0, do_callback = 0;
427
428 if (count != sizeof(info))
429 return -EINVAL;
430
431 if (copy_from_user(&info, u, sizeof(info)))
432 return -EFAULT;
433
434 if (check_version(&info))
435 return -EINVAL;
436
437 spin_lock(&ops_lock);
438 list_for_each_entry(op, &recv_list, list) {
439 if (op->info.fsid == info.fsid &&
440 op->info.number == info.number &&
441 op->info.owner == info.owner) {
442 struct plock_xop *xop = (struct plock_xop *)op;
443 list_del_init(&op->list);
444 memcpy(&op->info, &info, sizeof(info));
445 if (xop->callback)
446 do_callback = 1;
447 else
448 op->done = 1;
449 found = 1;
450 break;
451 }
452 }
453 spin_unlock(&ops_lock);
454
455 if (found) {
456 if (do_callback)
457 dlm_plock_callback(op);
458 else
459 wake_up(&recv_wq);
460 } else
461 log_print("dev_write no op %x %llx", info.fsid,
462 (unsigned long long)info.number);
463 return count;
464}
465
466static unsigned int dev_poll(struct file *file, poll_table *wait)
467{
468 unsigned int mask = 0;
469
470 poll_wait(file, &send_wq, wait);
471
472 spin_lock(&ops_lock);
473 if (!list_empty(&send_list))
474 mask = POLLIN | POLLRDNORM;
475 spin_unlock(&ops_lock);
476
477 return mask;
478}
479
480static const struct file_operations dev_fops = {
481 .read = dev_read,
482 .write = dev_write,
483 .poll = dev_poll,
484 .owner = THIS_MODULE,
485 .llseek = noop_llseek,
486};
487
488static struct miscdevice plock_dev_misc = {
489 .minor = MISC_DYNAMIC_MINOR,
490 .name = DLM_PLOCK_MISC_NAME,
491 .fops = &dev_fops
492};
493
494int dlm_plock_init(void)
495{
496 int rv;
497
498 spin_lock_init(&ops_lock);
499 INIT_LIST_HEAD(&send_list);
500 INIT_LIST_HEAD(&recv_list);
501 init_waitqueue_head(&send_wq);
502 init_waitqueue_head(&recv_wq);
503
504 rv = misc_register(&plock_dev_misc);
505 if (rv)
506 log_print("dlm_plock_init: misc_register failed %d", rv);
507 return rv;
508}
509
510void dlm_plock_exit(void)
511{
512 misc_deregister(&plock_dev_misc);
513}
514
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/miscdevice.h>
8#include <linux/poll.h>
9#include <linux/dlm.h>
10#include <linux/dlm_plock.h>
11#include <linux/slab.h>
12
13#include "dlm_internal.h"
14#include "lockspace.h"
15
16static DEFINE_SPINLOCK(ops_lock);
17static LIST_HEAD(send_list);
18static LIST_HEAD(recv_list);
19static DECLARE_WAIT_QUEUE_HEAD(send_wq);
20static DECLARE_WAIT_QUEUE_HEAD(recv_wq);
21
22struct plock_async_data {
23 void *fl;
24 void *file;
25 struct file_lock flc;
26 int (*callback)(struct file_lock *fl, int result);
27};
28
29struct plock_op {
30 struct list_head list;
31 int done;
32 /* if lock op got interrupted while waiting dlm_controld reply */
33 bool sigint;
34 struct dlm_plock_info info;
35 /* if set indicates async handling */
36 struct plock_async_data *data;
37};
38
39static inline void set_version(struct dlm_plock_info *info)
40{
41 info->version[0] = DLM_PLOCK_VERSION_MAJOR;
42 info->version[1] = DLM_PLOCK_VERSION_MINOR;
43 info->version[2] = DLM_PLOCK_VERSION_PATCH;
44}
45
46static int check_version(struct dlm_plock_info *info)
47{
48 if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
49 (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
50 log_print("plock device version mismatch: "
51 "kernel (%u.%u.%u), user (%u.%u.%u)",
52 DLM_PLOCK_VERSION_MAJOR,
53 DLM_PLOCK_VERSION_MINOR,
54 DLM_PLOCK_VERSION_PATCH,
55 info->version[0],
56 info->version[1],
57 info->version[2]);
58 return -EINVAL;
59 }
60 return 0;
61}
62
63static void dlm_release_plock_op(struct plock_op *op)
64{
65 kfree(op->data);
66 kfree(op);
67}
68
69static void send_op(struct plock_op *op)
70{
71 set_version(&op->info);
72 spin_lock(&ops_lock);
73 list_add_tail(&op->list, &send_list);
74 spin_unlock(&ops_lock);
75 wake_up(&send_wq);
76}
77
78/* If a process was killed while waiting for the only plock on a file,
79 locks_remove_posix will not see any lock on the file so it won't
80 send an unlock-close to us to pass on to userspace to clean up the
81 abandoned waiter. So, we have to insert the unlock-close when the
82 lock call is interrupted. */
83
84static void do_unlock_close(const struct dlm_plock_info *info)
85{
86 struct plock_op *op;
87
88 op = kzalloc(sizeof(*op), GFP_NOFS);
89 if (!op)
90 return;
91
92 op->info.optype = DLM_PLOCK_OP_UNLOCK;
93 op->info.pid = info->pid;
94 op->info.fsid = info->fsid;
95 op->info.number = info->number;
96 op->info.start = 0;
97 op->info.end = OFFSET_MAX;
98 op->info.owner = info->owner;
99
100 op->info.flags |= DLM_PLOCK_FL_CLOSE;
101 send_op(op);
102}
103
104int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
105 int cmd, struct file_lock *fl)
106{
107 struct plock_async_data *op_data;
108 struct dlm_ls *ls;
109 struct plock_op *op;
110 int rv;
111
112 ls = dlm_find_lockspace_local(lockspace);
113 if (!ls)
114 return -EINVAL;
115
116 op = kzalloc(sizeof(*op), GFP_NOFS);
117 if (!op) {
118 rv = -ENOMEM;
119 goto out;
120 }
121
122 op->info.optype = DLM_PLOCK_OP_LOCK;
123 op->info.pid = fl->fl_pid;
124 op->info.ex = (fl->fl_type == F_WRLCK);
125 op->info.wait = IS_SETLKW(cmd);
126 op->info.fsid = ls->ls_global_id;
127 op->info.number = number;
128 op->info.start = fl->fl_start;
129 op->info.end = fl->fl_end;
130 /* async handling */
131 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
132 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
133 if (!op_data) {
134 dlm_release_plock_op(op);
135 rv = -ENOMEM;
136 goto out;
137 }
138
139 /* fl_owner is lockd which doesn't distinguish
140 processes on the nfs client */
141 op->info.owner = (__u64) fl->fl_pid;
142 op_data->callback = fl->fl_lmops->lm_grant;
143 locks_init_lock(&op_data->flc);
144 locks_copy_lock(&op_data->flc, fl);
145 op_data->fl = fl;
146 op_data->file = file;
147
148 op->data = op_data;
149
150 send_op(op);
151 rv = FILE_LOCK_DEFERRED;
152 goto out;
153 } else {
154 op->info.owner = (__u64)(long) fl->fl_owner;
155 }
156
157 send_op(op);
158
159 rv = wait_event_interruptible(recv_wq, (op->done != 0));
160 if (rv == -ERESTARTSYS) {
161 spin_lock(&ops_lock);
162 /* recheck under ops_lock if we got a done != 0,
163 * if so this interrupt case should be ignored
164 */
165 if (op->done != 0) {
166 spin_unlock(&ops_lock);
167 goto do_lock_wait;
168 }
169
170 op->sigint = true;
171 spin_unlock(&ops_lock);
172 log_debug(ls, "%s: wait interrupted %x %llx pid %d",
173 __func__, ls->ls_global_id,
174 (unsigned long long)number, op->info.pid);
175 goto out;
176 }
177
178do_lock_wait:
179
180 WARN_ON(!list_empty(&op->list));
181
182 rv = op->info.rv;
183
184 if (!rv) {
185 if (locks_lock_file_wait(file, fl) < 0)
186 log_error(ls, "dlm_posix_lock: vfs lock error %llx",
187 (unsigned long long)number);
188 }
189
190 dlm_release_plock_op(op);
191out:
192 dlm_put_lockspace(ls);
193 return rv;
194}
195EXPORT_SYMBOL_GPL(dlm_posix_lock);
196
197/* Returns failure iff a successful lock operation should be canceled */
198static int dlm_plock_callback(struct plock_op *op)
199{
200 struct plock_async_data *op_data = op->data;
201 struct file *file;
202 struct file_lock *fl;
203 struct file_lock *flc;
204 int (*notify)(struct file_lock *fl, int result) = NULL;
205 int rv = 0;
206
207 WARN_ON(!list_empty(&op->list));
208
209 /* check if the following 2 are still valid or make a copy */
210 file = op_data->file;
211 flc = &op_data->flc;
212 fl = op_data->fl;
213 notify = op_data->callback;
214
215 if (op->info.rv) {
216 notify(fl, op->info.rv);
217 goto out;
218 }
219
220 /* got fs lock; bookkeep locally as well: */
221 flc->fl_flags &= ~FL_SLEEP;
222 if (posix_lock_file(file, flc, NULL)) {
223 /*
224 * This can only happen in the case of kmalloc() failure.
225 * The filesystem's own lock is the authoritative lock,
226 * so a failure to get the lock locally is not a disaster.
227 * As long as the fs cannot reliably cancel locks (especially
228 * in a low-memory situation), we're better off ignoring
229 * this failure than trying to recover.
230 */
231 log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
232 (unsigned long long)op->info.number, file, fl);
233 }
234
235 rv = notify(fl, 0);
236 if (rv) {
237 /* XXX: We need to cancel the fs lock here: */
238 log_print("dlm_plock_callback: lock granted after lock request "
239 "failed; dangling lock!\n");
240 goto out;
241 }
242
243out:
244 dlm_release_plock_op(op);
245 return rv;
246}
247
248int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
249 struct file_lock *fl)
250{
251 struct dlm_ls *ls;
252 struct plock_op *op;
253 int rv;
254 unsigned char fl_flags = fl->fl_flags;
255
256 ls = dlm_find_lockspace_local(lockspace);
257 if (!ls)
258 return -EINVAL;
259
260 op = kzalloc(sizeof(*op), GFP_NOFS);
261 if (!op) {
262 rv = -ENOMEM;
263 goto out;
264 }
265
266 /* cause the vfs unlock to return ENOENT if lock is not found */
267 fl->fl_flags |= FL_EXISTS;
268
269 rv = locks_lock_file_wait(file, fl);
270 if (rv == -ENOENT) {
271 rv = 0;
272 goto out_free;
273 }
274 if (rv < 0) {
275 log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx",
276 rv, (unsigned long long)number);
277 }
278
279 op->info.optype = DLM_PLOCK_OP_UNLOCK;
280 op->info.pid = fl->fl_pid;
281 op->info.fsid = ls->ls_global_id;
282 op->info.number = number;
283 op->info.start = fl->fl_start;
284 op->info.end = fl->fl_end;
285 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
286 op->info.owner = (__u64) fl->fl_pid;
287 else
288 op->info.owner = (__u64)(long) fl->fl_owner;
289
290 if (fl->fl_flags & FL_CLOSE) {
291 op->info.flags |= DLM_PLOCK_FL_CLOSE;
292 send_op(op);
293 rv = 0;
294 goto out;
295 }
296
297 send_op(op);
298 wait_event(recv_wq, (op->done != 0));
299
300 WARN_ON(!list_empty(&op->list));
301
302 rv = op->info.rv;
303
304 if (rv == -ENOENT)
305 rv = 0;
306
307out_free:
308 dlm_release_plock_op(op);
309out:
310 dlm_put_lockspace(ls);
311 fl->fl_flags = fl_flags;
312 return rv;
313}
314EXPORT_SYMBOL_GPL(dlm_posix_unlock);
315
316int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
317 struct file_lock *fl)
318{
319 struct dlm_ls *ls;
320 struct plock_op *op;
321 int rv;
322
323 ls = dlm_find_lockspace_local(lockspace);
324 if (!ls)
325 return -EINVAL;
326
327 op = kzalloc(sizeof(*op), GFP_NOFS);
328 if (!op) {
329 rv = -ENOMEM;
330 goto out;
331 }
332
333 op->info.optype = DLM_PLOCK_OP_GET;
334 op->info.pid = fl->fl_pid;
335 op->info.ex = (fl->fl_type == F_WRLCK);
336 op->info.fsid = ls->ls_global_id;
337 op->info.number = number;
338 op->info.start = fl->fl_start;
339 op->info.end = fl->fl_end;
340 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
341 op->info.owner = (__u64) fl->fl_pid;
342 else
343 op->info.owner = (__u64)(long) fl->fl_owner;
344
345 send_op(op);
346 wait_event(recv_wq, (op->done != 0));
347
348 WARN_ON(!list_empty(&op->list));
349
350 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
351 -ENOENT if there are no locks on the file */
352
353 rv = op->info.rv;
354
355 fl->fl_type = F_UNLCK;
356 if (rv == -ENOENT)
357 rv = 0;
358 else if (rv > 0) {
359 locks_init_lock(fl);
360 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
361 fl->fl_flags = FL_POSIX;
362 fl->fl_pid = -op->info.pid;
363 fl->fl_start = op->info.start;
364 fl->fl_end = op->info.end;
365 rv = 0;
366 }
367
368 dlm_release_plock_op(op);
369out:
370 dlm_put_lockspace(ls);
371 return rv;
372}
373EXPORT_SYMBOL_GPL(dlm_posix_get);
374
375/* a read copies out one plock request from the send list */
376static ssize_t dev_read(struct file *file, char __user *u, size_t count,
377 loff_t *ppos)
378{
379 struct dlm_plock_info info;
380 struct plock_op *op = NULL;
381
382 if (count < sizeof(info))
383 return -EINVAL;
384
385 spin_lock(&ops_lock);
386 if (!list_empty(&send_list)) {
387 op = list_first_entry(&send_list, struct plock_op, list);
388 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
389 list_del(&op->list);
390 else
391 list_move(&op->list, &recv_list);
392 memcpy(&info, &op->info, sizeof(info));
393 }
394 spin_unlock(&ops_lock);
395
396 if (!op)
397 return -EAGAIN;
398
399 /* there is no need to get a reply from userspace for unlocks
400 that were generated by the vfs cleaning up for a close
401 (the process did not make an unlock call). */
402
403 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
404 dlm_release_plock_op(op);
405
406 if (copy_to_user(u, &info, sizeof(info)))
407 return -EFAULT;
408 return sizeof(info);
409}
410
411/* a write copies in one plock result that should match a plock_op
412 on the recv list */
413static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
414 loff_t *ppos)
415{
416 struct plock_op *op = NULL, *iter;
417 struct dlm_plock_info info;
418 int do_callback = 0;
419
420 if (count != sizeof(info))
421 return -EINVAL;
422
423 if (copy_from_user(&info, u, sizeof(info)))
424 return -EFAULT;
425
426 if (check_version(&info))
427 return -EINVAL;
428
429 spin_lock(&ops_lock);
430 list_for_each_entry(iter, &recv_list, list) {
431 if (iter->info.fsid == info.fsid &&
432 iter->info.number == info.number &&
433 iter->info.owner == info.owner) {
434 if (iter->sigint) {
435 list_del(&iter->list);
436 spin_unlock(&ops_lock);
437
438 pr_debug("%s: sigint cleanup %x %llx pid %d",
439 __func__, iter->info.fsid,
440 (unsigned long long)iter->info.number,
441 iter->info.pid);
442 do_unlock_close(&iter->info);
443 memcpy(&iter->info, &info, sizeof(info));
444 dlm_release_plock_op(iter);
445 return count;
446 }
447 list_del_init(&iter->list);
448 memcpy(&iter->info, &info, sizeof(info));
449 if (iter->data)
450 do_callback = 1;
451 else
452 iter->done = 1;
453 op = iter;
454 break;
455 }
456 }
457 spin_unlock(&ops_lock);
458
459 if (op) {
460 if (do_callback)
461 dlm_plock_callback(op);
462 else
463 wake_up(&recv_wq);
464 } else
465 log_print("%s: no op %x %llx", __func__,
466 info.fsid, (unsigned long long)info.number);
467 return count;
468}
469
470static __poll_t dev_poll(struct file *file, poll_table *wait)
471{
472 __poll_t mask = 0;
473
474 poll_wait(file, &send_wq, wait);
475
476 spin_lock(&ops_lock);
477 if (!list_empty(&send_list))
478 mask = EPOLLIN | EPOLLRDNORM;
479 spin_unlock(&ops_lock);
480
481 return mask;
482}
483
484static const struct file_operations dev_fops = {
485 .read = dev_read,
486 .write = dev_write,
487 .poll = dev_poll,
488 .owner = THIS_MODULE,
489 .llseek = noop_llseek,
490};
491
492static struct miscdevice plock_dev_misc = {
493 .minor = MISC_DYNAMIC_MINOR,
494 .name = DLM_PLOCK_MISC_NAME,
495 .fops = &dev_fops
496};
497
498int dlm_plock_init(void)
499{
500 int rv;
501
502 rv = misc_register(&plock_dev_misc);
503 if (rv)
504 log_print("dlm_plock_init: misc_register failed %d", rv);
505 return rv;
506}
507
508void dlm_plock_exit(void)
509{
510 misc_deregister(&plock_dev_misc);
511}
512