Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * (C) 2001 Clemson University and The University of Chicago
4 *
5 * Changes by Acxiom Corporation to add protocol version to kernel
6 * communication, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11#include "protocol.h"
12#include "orangefs-kernel.h"
13#include "orangefs-dev-proto.h"
14#include "orangefs-bufmap.h"
15#include "orangefs-debugfs.h"
16
17#include <linux/debugfs.h>
18#include <linux/slab.h>
19
20/* this file implements the /dev/pvfs2-req device node */
21
22uint32_t orangefs_userspace_version;
23
24static int open_access_count;
25
26static DEFINE_MUTEX(devreq_mutex);
27
28#define DUMP_DEVICE_ERROR() \
29do { \
30 gossip_err("*****************************************************\n");\
31 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
32 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
33 "are no ", ORANGEFS_REQDEVICE_NAME); \
34 gossip_err("instances of a program using this device\ncurrently " \
35 "running. (You must verify this!)\n"); \
36 gossip_err("For example, you can use the lsof program as follows:\n");\
37 gossip_err("'lsof | grep %s' (run this as root)\n", \
38 ORANGEFS_REQDEVICE_NAME); \
39 gossip_err(" open_access_count = %d\n", open_access_count); \
40 gossip_err("*****************************************************\n");\
41} while (0)
42
43static int hash_func(__u64 tag, int table_size)
44{
45 return do_div(tag, (unsigned int)table_size);
46}
47
48static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
49{
50 int index = hash_func(op->tag, hash_table_size);
51
52 list_add_tail(&op->list, &orangefs_htable_ops_in_progress[index]);
53}
54
55/*
56 * find the op with this tag and remove it from the in progress
57 * hash table.
58 */
59static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
60{
61 struct orangefs_kernel_op_s *op, *next;
62 int index;
63
64 index = hash_func(tag, hash_table_size);
65
66 spin_lock(&orangefs_htable_ops_in_progress_lock);
67 list_for_each_entry_safe(op,
68 next,
69 &orangefs_htable_ops_in_progress[index],
70 list) {
71 if (op->tag == tag && !op_state_purged(op) &&
72 !op_state_given_up(op)) {
73 list_del_init(&op->list);
74 spin_unlock(&orangefs_htable_ops_in_progress_lock);
75 return op;
76 }
77 }
78
79 spin_unlock(&orangefs_htable_ops_in_progress_lock);
80 return NULL;
81}
82
83/* Returns whether any FS are still pending remounted */
84static int mark_all_pending_mounts(void)
85{
86 int unmounted = 1;
87 struct orangefs_sb_info_s *orangefs_sb = NULL;
88
89 spin_lock(&orangefs_superblocks_lock);
90 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
91 /* All of these file system require a remount */
92 orangefs_sb->mount_pending = 1;
93 unmounted = 0;
94 }
95 spin_unlock(&orangefs_superblocks_lock);
96 return unmounted;
97}
98
99/*
100 * Determine if a given file system needs to be remounted or not
101 * Returns -1 on error
102 * 0 if already mounted
103 * 1 if needs remount
104 */
105static int fs_mount_pending(__s32 fsid)
106{
107 int mount_pending = -1;
108 struct orangefs_sb_info_s *orangefs_sb = NULL;
109
110 spin_lock(&orangefs_superblocks_lock);
111 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
112 if (orangefs_sb->fs_id == fsid) {
113 mount_pending = orangefs_sb->mount_pending;
114 break;
115 }
116 }
117 spin_unlock(&orangefs_superblocks_lock);
118 return mount_pending;
119}
120
121static int orangefs_devreq_open(struct inode *inode, struct file *file)
122{
123 int ret = -EINVAL;
124
125 /* in order to ensure that the filesystem driver sees correct UIDs */
126 if (file->f_cred->user_ns != &init_user_ns) {
127 gossip_err("%s: device cannot be opened outside init_user_ns\n",
128 __func__);
129 goto out;
130 }
131
132 if (!(file->f_flags & O_NONBLOCK)) {
133 gossip_err("%s: device cannot be opened in blocking mode\n",
134 __func__);
135 goto out;
136 }
137 ret = -EACCES;
138 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
139 mutex_lock(&devreq_mutex);
140
141 if (open_access_count == 0) {
142 open_access_count = 1;
143 ret = 0;
144 } else {
145 DUMP_DEVICE_ERROR();
146 }
147 mutex_unlock(&devreq_mutex);
148
149out:
150
151 gossip_debug(GOSSIP_DEV_DEBUG,
152 "pvfs2-client-core: open device complete (ret = %d)\n",
153 ret);
154 return ret;
155}
156
157/* Function for read() callers into the device */
158static ssize_t orangefs_devreq_read(struct file *file,
159 char __user *buf,
160 size_t count, loff_t *offset)
161{
162 struct orangefs_kernel_op_s *op, *temp;
163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
165 struct orangefs_kernel_op_s *cur_op;
166 unsigned long ret;
167
168 /* We do not support blocking IO. */
169 if (!(file->f_flags & O_NONBLOCK)) {
170 gossip_err("%s: blocking read from client-core.\n",
171 __func__);
172 return -EINVAL;
173 }
174
175 /*
176 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
177 * always read with that size buffer.
178 */
179 if (count != MAX_DEV_REQ_UPSIZE) {
180 gossip_err("orangefs: client-core tried to read wrong size\n");
181 return -EINVAL;
182 }
183
184 /* Check for an empty list before locking. */
185 if (list_empty(&orangefs_request_list))
186 return -EAGAIN;
187
188restart:
189 cur_op = NULL;
190 /* Get next op (if any) from top of list. */
191 spin_lock(&orangefs_request_list_lock);
192 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
193 __s32 fsid;
194 /* This lock is held past the end of the loop when we break. */
195 spin_lock(&op->lock);
196 if (unlikely(op_state_purged(op) || op_state_given_up(op))) {
197 spin_unlock(&op->lock);
198 continue;
199 }
200
201 fsid = fsid_of_op(op);
202 if (fsid != ORANGEFS_FS_ID_NULL) {
203 int ret;
204 /* Skip ops whose filesystem needs to be mounted. */
205 ret = fs_mount_pending(fsid);
206 if (ret == 1) {
207 gossip_debug(GOSSIP_DEV_DEBUG,
208 "%s: mount pending, skipping op tag "
209 "%llu %s\n",
210 __func__,
211 llu(op->tag),
212 get_opname_string(op));
213 spin_unlock(&op->lock);
214 continue;
215 /*
216 * Skip ops whose filesystem we don't know about unless
217 * it is being mounted or unmounted. It is possible for
218 * a filesystem we don't know about to be unmounted if
219 * it fails to mount in the kernel after userspace has
220 * been sent the mount request.
221 */
222 /* XXX: is there a better way to detect this? */
223 } else if (ret == -1 &&
224 !(op->upcall.type ==
225 ORANGEFS_VFS_OP_FS_MOUNT ||
226 op->upcall.type ==
227 ORANGEFS_VFS_OP_GETATTR ||
228 op->upcall.type ==
229 ORANGEFS_VFS_OP_FS_UMOUNT)) {
230 gossip_debug(GOSSIP_DEV_DEBUG,
231 "orangefs: skipping op tag %llu %s\n",
232 llu(op->tag), get_opname_string(op));
233 gossip_err(
234 "orangefs: ERROR: fs_mount_pending %d\n",
235 fsid);
236 spin_unlock(&op->lock);
237 continue;
238 }
239 }
240 /*
241 * Either this op does not pertain to a filesystem, is mounting
242 * a filesystem, or pertains to a mounted filesystem. Let it
243 * through.
244 */
245 cur_op = op;
246 break;
247 }
248
249 /*
250 * At this point we either have a valid op and can continue or have not
251 * found an op and must ask the client to try again later.
252 */
253 if (!cur_op) {
254 spin_unlock(&orangefs_request_list_lock);
255 return -EAGAIN;
256 }
257
258 gossip_debug(GOSSIP_DEV_DEBUG, "%s: reading op tag %llu %s\n",
259 __func__,
260 llu(cur_op->tag),
261 get_opname_string(cur_op));
262
263 /*
264 * Such an op should never be on the list in the first place. If so, we
265 * will abort.
266 */
267 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
268 gossip_err("orangefs: ERROR: Current op already queued.\n");
269 list_del_init(&cur_op->list);
270 spin_unlock(&cur_op->lock);
271 spin_unlock(&orangefs_request_list_lock);
272 return -EAGAIN;
273 }
274
275 list_del_init(&cur_op->list);
276 spin_unlock(&orangefs_request_list_lock);
277
278 spin_unlock(&cur_op->lock);
279
280 /* Push the upcall out. */
281 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
282 if (ret != 0)
283 goto error;
284 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
285 if (ret != 0)
286 goto error;
287 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
288 if (ret != 0)
289 goto error;
290 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
291 sizeof(struct orangefs_upcall_s));
292 if (ret != 0)
293 goto error;
294
295 spin_lock(&orangefs_htable_ops_in_progress_lock);
296 spin_lock(&cur_op->lock);
297 if (unlikely(op_state_given_up(cur_op))) {
298 spin_unlock(&cur_op->lock);
299 spin_unlock(&orangefs_htable_ops_in_progress_lock);
300 complete(&cur_op->waitq);
301 goto restart;
302 }
303
304 /*
305 * Set the operation to be in progress and move it between lists since
306 * it has been sent to the client.
307 */
308 set_op_state_inprogress(cur_op);
309 gossip_debug(GOSSIP_DEV_DEBUG,
310 "%s: 1 op:%s: op_state:%d: process:%s:\n",
311 __func__,
312 get_opname_string(cur_op),
313 cur_op->op_state,
314 current->comm);
315 orangefs_devreq_add_op(cur_op);
316 spin_unlock(&cur_op->lock);
317 spin_unlock(&orangefs_htable_ops_in_progress_lock);
318
319 /* The client only asks to read one size buffer. */
320 return MAX_DEV_REQ_UPSIZE;
321error:
322 /*
323 * We were unable to copy the op data to the client. Put the op back in
324 * list. If client has crashed, the op will be purged later when the
325 * device is released.
326 */
327 gossip_err("orangefs: Failed to copy data to user space\n");
328 spin_lock(&orangefs_request_list_lock);
329 spin_lock(&cur_op->lock);
330 if (likely(!op_state_given_up(cur_op))) {
331 set_op_state_waiting(cur_op);
332 gossip_debug(GOSSIP_DEV_DEBUG,
333 "%s: 2 op:%s: op_state:%d: process:%s:\n",
334 __func__,
335 get_opname_string(cur_op),
336 cur_op->op_state,
337 current->comm);
338 list_add(&cur_op->list, &orangefs_request_list);
339 spin_unlock(&cur_op->lock);
340 } else {
341 spin_unlock(&cur_op->lock);
342 complete(&cur_op->waitq);
343 }
344 spin_unlock(&orangefs_request_list_lock);
345 return -EFAULT;
346}
347
348/*
349 * Function for writev() callers into the device.
350 *
351 * Userspace should have written:
352 * - __u32 version
353 * - __u32 magic
354 * - __u64 tag
355 * - struct orangefs_downcall_s
356 * - trailer buffer (in the case of READDIR operations)
357 */
358static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
359 struct iov_iter *iter)
360{
361 ssize_t ret;
362 struct orangefs_kernel_op_s *op = NULL;
363 struct {
364 __u32 version;
365 __u32 magic;
366 __u64 tag;
367 } head;
368 int total = ret = iov_iter_count(iter);
369 int downcall_size = sizeof(struct orangefs_downcall_s);
370 int head_size = sizeof(head);
371
372 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
373 __func__,
374 total,
375 ret);
376
377 if (total < MAX_DEV_REQ_DOWNSIZE) {
378 gossip_err("%s: total:%d: must be at least:%u:\n",
379 __func__,
380 total,
381 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
382 return -EFAULT;
383 }
384
385 if (!copy_from_iter_full(&head, head_size, iter)) {
386 gossip_err("%s: failed to copy head.\n", __func__);
387 return -EFAULT;
388 }
389
390 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
391 gossip_err("%s: userspace claims version"
392 "%d, minimum version required: %d.\n",
393 __func__,
394 head.version,
395 ORANGEFS_MINIMUM_USERSPACE_VERSION);
396 return -EPROTO;
397 }
398
399 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
400 gossip_err("Error: Device magic number does not match.\n");
401 return -EPROTO;
402 }
403
404 if (!orangefs_userspace_version) {
405 orangefs_userspace_version = head.version;
406 } else if (orangefs_userspace_version != head.version) {
407 gossip_err("Error: userspace version changes\n");
408 return -EPROTO;
409 }
410
411 /* remove the op from the in progress hash table */
412 op = orangefs_devreq_remove_op(head.tag);
413 if (!op) {
414 gossip_debug(GOSSIP_DEV_DEBUG,
415 "%s: No one's waiting for tag %llu\n",
416 __func__, llu(head.tag));
417 return ret;
418 }
419
420 if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
421 gossip_err("%s: failed to copy downcall.\n", __func__);
422 goto Efault;
423 }
424
425 if (op->downcall.status)
426 goto wakeup;
427
428 /*
429 * We've successfully peeled off the head and the downcall.
430 * Something has gone awry if total doesn't equal the
431 * sum of head_size, downcall_size and trailer_size.
432 */
433 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
434 gossip_err("%s: funky write, head_size:%d"
435 ": downcall_size:%d: trailer_size:%lld"
436 ": total size:%d:\n",
437 __func__,
438 head_size,
439 downcall_size,
440 op->downcall.trailer_size,
441 total);
442 goto Efault;
443 }
444
445 /* Only READDIR operations should have trailers. */
446 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
447 (op->downcall.trailer_size != 0)) {
448 gossip_err("%s: %x operation with trailer.",
449 __func__,
450 op->downcall.type);
451 goto Efault;
452 }
453
454 /* READDIR operations should always have trailers. */
455 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
456 (op->downcall.trailer_size == 0)) {
457 gossip_err("%s: %x operation with no trailer.",
458 __func__,
459 op->downcall.type);
460 goto Efault;
461 }
462
463 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
464 goto wakeup;
465
466 op->downcall.trailer_buf = vzalloc(op->downcall.trailer_size);
467 if (!op->downcall.trailer_buf)
468 goto Enomem;
469
470 if (!copy_from_iter_full(op->downcall.trailer_buf,
471 op->downcall.trailer_size, iter)) {
472 gossip_err("%s: failed to copy trailer.\n", __func__);
473 vfree(op->downcall.trailer_buf);
474 goto Efault;
475 }
476
477wakeup:
478 /*
479 * Return to vfs waitqueue, and back to service_operation
480 * through wait_for_matching_downcall.
481 */
482 spin_lock(&op->lock);
483 if (unlikely(op_is_cancel(op))) {
484 spin_unlock(&op->lock);
485 put_cancel(op);
486 } else if (unlikely(op_state_given_up(op))) {
487 spin_unlock(&op->lock);
488 complete(&op->waitq);
489 } else {
490 set_op_state_serviced(op);
491 gossip_debug(GOSSIP_DEV_DEBUG,
492 "%s: op:%s: op_state:%d: process:%s:\n",
493 __func__,
494 get_opname_string(op),
495 op->op_state,
496 current->comm);
497 spin_unlock(&op->lock);
498 }
499 return ret;
500
501Efault:
502 op->downcall.status = -(ORANGEFS_ERROR_BIT | 9);
503 ret = -EFAULT;
504 goto wakeup;
505
506Enomem:
507 op->downcall.status = -(ORANGEFS_ERROR_BIT | 8);
508 ret = -ENOMEM;
509 goto wakeup;
510}
511
512/*
513 * NOTE: gets called when the last reference to this device is dropped.
514 * Using the open_access_count variable, we enforce a reference count
515 * on this file so that it can be opened by only one process at a time.
516 * the devreq_mutex is used to make sure all i/o has completed
517 * before we call orangefs_bufmap_finalize, and similar such tricky
518 * situations
519 */
520static int orangefs_devreq_release(struct inode *inode, struct file *file)
521{
522 int unmounted = 0;
523
524 gossip_debug(GOSSIP_DEV_DEBUG,
525 "%s:pvfs2-client-core: exiting, closing device\n",
526 __func__);
527
528 mutex_lock(&devreq_mutex);
529 orangefs_bufmap_finalize();
530
531 open_access_count = -1;
532
533 unmounted = mark_all_pending_mounts();
534 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
535 (unmounted ? "UNMOUNTED" : "MOUNTED"));
536
537 purge_waiting_ops();
538 purge_inprogress_ops();
539
540 orangefs_bufmap_run_down();
541
542 gossip_debug(GOSSIP_DEV_DEBUG,
543 "pvfs2-client-core: device close complete\n");
544 open_access_count = 0;
545 orangefs_userspace_version = 0;
546 mutex_unlock(&devreq_mutex);
547 return 0;
548}
549
550int is_daemon_in_service(void)
551{
552 int in_service;
553
554 /*
555 * What this function does is checks if client-core is alive
556 * based on the access count we maintain on the device.
557 */
558 mutex_lock(&devreq_mutex);
559 in_service = open_access_count == 1 ? 0 : -EIO;
560 mutex_unlock(&devreq_mutex);
561 return in_service;
562}
563
564bool __is_daemon_in_service(void)
565{
566 return open_access_count == 1;
567}
568
569static inline long check_ioctl_command(unsigned int command)
570{
571 /* Check for valid ioctl codes */
572 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
573 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
574 command,
575 _IOC_TYPE(command),
576 ORANGEFS_DEV_MAGIC);
577 return -EINVAL;
578 }
579 /* and valid ioctl commands */
580 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
581 gossip_err("Invalid ioctl command number [%d >= %d]\n",
582 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
583 return -ENOIOCTLCMD;
584 }
585 return 0;
586}
587
588static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
589{
590 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
591 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
592 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
593 struct ORANGEFS_dev_map_desc user_desc;
594 int ret = 0;
595 int upstream_kmod = 1;
596 struct orangefs_sb_info_s *orangefs_sb;
597
598 /* mtmoore: add locking here */
599
600 switch (command) {
601 case ORANGEFS_DEV_GET_MAGIC:
602 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
603 -EIO :
604 0);
605 case ORANGEFS_DEV_GET_MAX_UPSIZE:
606 return ((put_user(max_up_size,
607 (__s32 __user *) arg) == -EFAULT) ?
608 -EIO :
609 0);
610 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
611 return ((put_user(max_down_size,
612 (__s32 __user *) arg) == -EFAULT) ?
613 -EIO :
614 0);
615 case ORANGEFS_DEV_MAP:
616 ret = copy_from_user(&user_desc,
617 (struct ORANGEFS_dev_map_desc __user *)
618 arg,
619 sizeof(struct ORANGEFS_dev_map_desc));
620 /* WTF -EIO and not -EFAULT? */
621 return ret ? -EIO : orangefs_bufmap_initialize(&user_desc);
622 case ORANGEFS_DEV_REMOUNT_ALL:
623 gossip_debug(GOSSIP_DEV_DEBUG,
624 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
625 __func__);
626
627 /*
628 * remount all mounted orangefs volumes to regain the lost
629 * dynamic mount tables (if any) -- NOTE: this is done
630 * without keeping the superblock list locked due to the
631 * upcall/downcall waiting. also, the request mutex is
632 * used to ensure that no operations will be serviced until
633 * all of the remounts are serviced (to avoid ops between
634 * mounts to fail)
635 */
636 ret = mutex_lock_interruptible(&orangefs_request_mutex);
637 if (ret < 0)
638 return ret;
639 gossip_debug(GOSSIP_DEV_DEBUG,
640 "%s: priority remount in progress\n",
641 __func__);
642 spin_lock(&orangefs_superblocks_lock);
643 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
644 /*
645 * We have to drop the spinlock, so entries can be
646 * removed. They can't be freed, though, so we just
647 * keep the forward pointers and zero the back ones -
648 * that way we can get to the rest of the list.
649 */
650 if (!orangefs_sb->list.prev)
651 continue;
652 gossip_debug(GOSSIP_DEV_DEBUG,
653 "%s: Remounting SB %p\n",
654 __func__,
655 orangefs_sb);
656
657 spin_unlock(&orangefs_superblocks_lock);
658 ret = orangefs_remount(orangefs_sb);
659 spin_lock(&orangefs_superblocks_lock);
660 if (ret) {
661 gossip_debug(GOSSIP_DEV_DEBUG,
662 "SB %p remount failed\n",
663 orangefs_sb);
664 break;
665 }
666 }
667 spin_unlock(&orangefs_superblocks_lock);
668 gossip_debug(GOSSIP_DEV_DEBUG,
669 "%s: priority remount complete\n",
670 __func__);
671 mutex_unlock(&orangefs_request_mutex);
672 return ret;
673
674 case ORANGEFS_DEV_UPSTREAM:
675 ret = copy_to_user((void __user *)arg,
676 &upstream_kmod,
677 sizeof(upstream_kmod));
678
679 if (ret != 0)
680 return -EIO;
681 else
682 return ret;
683
684 case ORANGEFS_DEV_CLIENT_MASK:
685 return orangefs_debugfs_new_client_mask((void __user *)arg);
686 case ORANGEFS_DEV_CLIENT_STRING:
687 return orangefs_debugfs_new_client_string((void __user *)arg);
688 case ORANGEFS_DEV_DEBUG:
689 return orangefs_debugfs_new_debug((void __user *)arg);
690 default:
691 return -ENOIOCTLCMD;
692 }
693 return -ENOIOCTLCMD;
694}
695
696static long orangefs_devreq_ioctl(struct file *file,
697 unsigned int command, unsigned long arg)
698{
699 long ret;
700
701 /* Check for properly constructed commands */
702 ret = check_ioctl_command(command);
703 if (ret < 0)
704 return (int)ret;
705
706 return (int)dispatch_ioctl_command(command, arg);
707}
708
709#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
710
711/* Compat structure for the ORANGEFS_DEV_MAP ioctl */
712struct ORANGEFS_dev_map_desc32 {
713 compat_uptr_t ptr;
714 __s32 total_size;
715 __s32 size;
716 __s32 count;
717};
718
719static unsigned long translate_dev_map26(unsigned long args, long *error)
720{
721 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
722 /*
723 * Depending on the architecture, allocate some space on the
724 * user-call-stack based on our expected layout.
725 */
726 struct ORANGEFS_dev_map_desc __user *p =
727 compat_alloc_user_space(sizeof(*p));
728 compat_uptr_t addr;
729
730 *error = 0;
731 /* get the ptr from the 32 bit user-space */
732 if (get_user(addr, &p32->ptr))
733 goto err;
734 /* try to put that into a 64-bit layout */
735 if (put_user(compat_ptr(addr), &p->ptr))
736 goto err;
737 /* copy the remaining fields */
738 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
739 goto err;
740 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
741 goto err;
742 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
743 goto err;
744 return (unsigned long)p;
745err:
746 *error = -EFAULT;
747 return 0;
748}
749
750/*
751 * 32 bit user-space apps' ioctl handlers when kernel modules
752 * is compiled as a 64 bit one
753 */
754static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
755 unsigned long args)
756{
757 long ret;
758 unsigned long arg = args;
759
760 /* Check for properly constructed commands */
761 ret = check_ioctl_command(cmd);
762 if (ret < 0)
763 return ret;
764 if (cmd == ORANGEFS_DEV_MAP) {
765 /*
766 * convert the arguments to what we expect internally
767 * in kernel space
768 */
769 arg = translate_dev_map26(args, &ret);
770 if (ret < 0) {
771 gossip_err("Could not translate dev map\n");
772 return ret;
773 }
774 }
775 /* no other ioctl requires translation */
776 return dispatch_ioctl_command(cmd, arg);
777}
778
779#endif /* CONFIG_COMPAT is in .config */
780
781static __poll_t orangefs_devreq_poll(struct file *file,
782 struct poll_table_struct *poll_table)
783{
784 __poll_t poll_revent_mask = 0;
785
786 poll_wait(file, &orangefs_request_list_waitq, poll_table);
787
788 if (!list_empty(&orangefs_request_list))
789 poll_revent_mask |= EPOLLIN;
790 return poll_revent_mask;
791}
792
793/* the assigned character device major number */
794static int orangefs_dev_major;
795
796static const struct file_operations orangefs_devreq_file_operations = {
797 .owner = THIS_MODULE,
798 .read = orangefs_devreq_read,
799 .write_iter = orangefs_devreq_write_iter,
800 .open = orangefs_devreq_open,
801 .release = orangefs_devreq_release,
802 .unlocked_ioctl = orangefs_devreq_ioctl,
803
804#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
805 .compat_ioctl = orangefs_devreq_compat_ioctl,
806#endif
807 .poll = orangefs_devreq_poll
808};
809
810/*
811 * Initialize orangefs device specific state:
812 * Must be called at module load time only
813 */
814int orangefs_dev_init(void)
815{
816 /* register orangefs-req device */
817 orangefs_dev_major = register_chrdev(0,
818 ORANGEFS_REQDEVICE_NAME,
819 &orangefs_devreq_file_operations);
820 if (orangefs_dev_major < 0) {
821 gossip_debug(GOSSIP_DEV_DEBUG,
822 "Failed to register /dev/%s (error %d)\n",
823 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
824 return orangefs_dev_major;
825 }
826
827 gossip_debug(GOSSIP_DEV_DEBUG,
828 "*** /dev/%s character device registered ***\n",
829 ORANGEFS_REQDEVICE_NAME);
830 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
831 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
832 return 0;
833}
834
835void orangefs_dev_cleanup(void)
836{
837 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
838 gossip_debug(GOSSIP_DEV_DEBUG,
839 "*** /dev/%s character device unregistered ***\n",
840 ORANGEFS_REQDEVICE_NAME);
841}