Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/anon_inodes.h>
4#include <linux/atomic.h>
5#include <linux/bitmap.h>
6#include <linux/build_bug.h>
7#include <linux/cdev.h>
8#include <linux/cleanup.h>
9#include <linux/compat.h>
10#include <linux/compiler.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/file.h>
14#include <linux/gpio.h>
15#include <linux/gpio/driver.h>
16#include <linux/hte.h>
17#include <linux/interrupt.h>
18#include <linux/irqreturn.h>
19#include <linux/kfifo.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/overflow.h>
23#include <linux/pinctrl/consumer.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/spinlock.h>
27#include <linux/string.h>
28#include <linux/timekeeping.h>
29#include <linux/uaccess.h>
30#include <linux/workqueue.h>
31
32#include <uapi/linux/gpio.h>
33
34#include "gpiolib.h"
35#include "gpiolib-cdev.h"
36
37/*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44/*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56/* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62/*
63 * GPIO line handle management
64 */
65
66#ifdef CONFIG_GPIO_CDEV_V1
67/**
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
73 */
74struct linehandle_state {
75 struct gpio_device *gdev;
76 const char *label;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
78 u32 num_descs;
79};
80
81#define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
90
91#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
94
95static int linehandle_validate_flags(u32 flags)
96{
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
99 return -EINVAL;
100
101 /*
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
103 * contradictory.
104 */
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
107 return -EINVAL;
108
109 /*
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
113 */
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
116 return -EINVAL;
117
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
122 return -EINVAL;
123
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
130 return -EINVAL;
131
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
138 return -EINVAL;
139
140 return 0;
141}
142
143static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
144{
145 unsigned long flags = READ_ONCE(*flagsp);
146
147 assign_bit(FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
159
160 WRITE_ONCE(*flagsp, flags);
161}
162
163static long linehandle_set_config(struct linehandle_state *lh,
164 void __user *ip)
165{
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
168 int i, ret;
169 u32 lflags;
170
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
172 return -EFAULT;
173
174 lflags = gcnf.flags;
175 ret = linehandle_validate_flags(lflags);
176 if (ret)
177 return ret;
178
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
181 return -EINVAL;
182
183 for (i = 0; i < lh->num_descs; i++) {
184 desc = lh->descs[i];
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
186
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
189
190 ret = gpiod_direction_output_nonotify(desc, val);
191 if (ret)
192 return ret;
193 } else {
194 ret = gpiod_direction_input_nonotify(desc);
195 if (ret)
196 return ret;
197 }
198
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
200 }
201 return 0;
202}
203
204static long linehandle_ioctl(struct file *file, unsigned int cmd,
205 unsigned long arg)
206{
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
211 unsigned int i;
212 int ret;
213
214 guard(srcu)(&lh->gdev->srcu);
215
216 if (!rcu_access_pointer(lh->gdev->chip))
217 return -ENODEV;
218
219 switch (cmd) {
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
224 NULL, vals);
225 if (ret)
226 return ret;
227
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
231
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
233 return -EFAULT;
234
235 return 0;
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
237 /*
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
240 */
241 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
242 return -EPERM;
243
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
245 return -EFAULT;
246
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
250
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
253 true,
254 lh->num_descs,
255 lh->descs,
256 NULL,
257 vals);
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
260 default:
261 return -EINVAL;
262 }
263}
264
265#ifdef CONFIG_COMPAT
266static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
267 unsigned long arg)
268{
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
270}
271#endif
272
273static void linehandle_free(struct linehandle_state *lh)
274{
275 int i;
276
277 for (i = 0; i < lh->num_descs; i++)
278 if (lh->descs[i])
279 gpiod_free(lh->descs[i]);
280 kfree(lh->label);
281 gpio_device_put(lh->gdev);
282 kfree(lh);
283}
284
285static int linehandle_release(struct inode *inode, struct file *file)
286{
287 linehandle_free(file->private_data);
288 return 0;
289}
290
291static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
296#ifdef CONFIG_COMPAT
297 .compat_ioctl = linehandle_ioctl_compat,
298#endif
299};
300
301static int linehandle_create(struct gpio_device *gdev, void __user *ip)
302{
303 struct gpiohandle_request handlereq;
304 struct linehandle_state *lh;
305 struct file *file;
306 int fd, i, ret;
307 u32 lflags;
308
309 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
310 return -EFAULT;
311 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
312 return -EINVAL;
313
314 lflags = handlereq.flags;
315
316 ret = linehandle_validate_flags(lflags);
317 if (ret)
318 return ret;
319
320 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
321 if (!lh)
322 return -ENOMEM;
323 lh->gdev = gpio_device_get(gdev);
324
325 if (handlereq.consumer_label[0] != '\0') {
326 /* label is only initialized if consumer_label is set */
327 lh->label = kstrndup(handlereq.consumer_label,
328 sizeof(handlereq.consumer_label) - 1,
329 GFP_KERNEL);
330 if (!lh->label) {
331 ret = -ENOMEM;
332 goto out_free_lh;
333 }
334 }
335
336 lh->num_descs = handlereq.lines;
337
338 /* Request each GPIO */
339 for (i = 0; i < handlereq.lines; i++) {
340 u32 offset = handlereq.lineoffsets[i];
341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
342
343 if (IS_ERR(desc)) {
344 ret = PTR_ERR(desc);
345 goto out_free_lh;
346 }
347
348 ret = gpiod_request_user(desc, lh->label);
349 if (ret)
350 goto out_free_lh;
351 lh->descs[i] = desc;
352 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
353
354 ret = gpiod_set_transitory(desc, false);
355 if (ret < 0)
356 goto out_free_lh;
357
358 /*
359 * Lines have to be requested explicitly for input
360 * or output, else the line will be treated "as is".
361 */
362 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
363 int val = !!handlereq.default_values[i];
364
365 ret = gpiod_direction_output_nonotify(desc, val);
366 if (ret)
367 goto out_free_lh;
368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
369 ret = gpiod_direction_input_nonotify(desc);
370 if (ret)
371 goto out_free_lh;
372 }
373
374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
375
376 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
377 offset);
378 }
379
380 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
381 if (fd < 0) {
382 ret = fd;
383 goto out_free_lh;
384 }
385
386 file = anon_inode_getfile("gpio-linehandle",
387 &linehandle_fileops,
388 lh,
389 O_RDONLY | O_CLOEXEC);
390 if (IS_ERR(file)) {
391 ret = PTR_ERR(file);
392 goto out_put_unused_fd;
393 }
394
395 handlereq.fd = fd;
396 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
397 /*
398 * fput() will trigger the release() callback, so do not go onto
399 * the regular error cleanup path here.
400 */
401 fput(file);
402 put_unused_fd(fd);
403 return -EFAULT;
404 }
405
406 fd_install(fd, file);
407
408 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
409 lh->num_descs);
410
411 return 0;
412
413out_put_unused_fd:
414 put_unused_fd(fd);
415out_free_lh:
416 linehandle_free(lh);
417 return ret;
418}
419#endif /* CONFIG_GPIO_CDEV_V1 */
420
421/**
422 * struct line - contains the state of a requested line
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @sw_debounced: flag indicating if the software debouncer is active
436 * @level: the current debounced physical level of the line
437 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
438 * @raw_level: the line level at the time of event
439 * @total_discard_seq: the running counter of the discarded events
440 * @last_seqno: the last sequence number before debounce period expires
441 */
442struct line {
443 struct gpio_desc *desc;
444 /*
445 * -- edge detector specific fields --
446 */
447 struct linereq *req;
448 unsigned int irq;
449 /*
450 * The flags for the active edge detector configuration.
451 *
452 * edflags is set by linereq_create(), linereq_free(), and
453 * linereq_set_config(), which are themselves mutually
454 * exclusive, and is accessed by edge_irq_thread(),
455 * process_hw_ts_thread() and debounce_work_func(),
456 * which can all live with a slightly stale value.
457 */
458 u64 edflags;
459 /*
460 * timestamp_ns and req_seqno are accessed only by
461 * edge_irq_handler() and edge_irq_thread(), which are themselves
462 * mutually exclusive, so no additional protection is necessary.
463 */
464 u64 timestamp_ns;
465 u32 req_seqno;
466 /*
467 * line_seqno is accessed by either edge_irq_thread() or
468 * debounce_work_func(), which are themselves mutually exclusive,
469 * so no additional protection is necessary.
470 */
471 u32 line_seqno;
472 /*
473 * -- debouncer specific fields --
474 */
475 struct delayed_work work;
476 /*
477 * sw_debounce is accessed by linereq_set_config(), which is the
478 * only setter, and linereq_get_values(), which can live with a
479 * slightly stale value.
480 */
481 unsigned int sw_debounced;
482 /*
483 * level is accessed by debounce_work_func(), which is the only
484 * setter, and linereq_get_values() which can live with a slightly
485 * stale value.
486 */
487 unsigned int level;
488#ifdef CONFIG_HTE
489 struct hte_ts_desc hdesc;
490 /*
491 * HTE provider sets line level at the time of event. The valid
492 * value is 0 or 1 and negative value for an error.
493 */
494 int raw_level;
495 /*
496 * when sw_debounce is set on HTE enabled line, this is running
497 * counter of the discarded events.
498 */
499 u32 total_discard_seq;
500 /*
501 * when sw_debounce is set on HTE enabled line, this variable records
502 * last sequence number before debounce period expires.
503 */
504 u32 last_seqno;
505#endif /* CONFIG_HTE */
506};
507
508/**
509 * struct linereq - contains the state of a userspace line request
510 * @gdev: the GPIO device the line request pertains to
511 * @label: consumer label used to tag GPIO descriptors
512 * @num_lines: the number of lines in the lines array
513 * @wait: wait queue that handles blocking reads of events
514 * @device_unregistered_nb: notifier block for receiving gdev unregister events
515 * @event_buffer_size: the number of elements allocated in @events
516 * @events: KFIFO for the GPIO events
517 * @seqno: the sequence number for edge events generated on all lines in
518 * this line request. Note that this is not used when @num_lines is 1, as
519 * the line_seqno is then the same and is cheaper to calculate.
520 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
521 * of configuration, particularly multi-step accesses to desc flags.
522 * @lines: the lines held by this line request, with @num_lines elements.
523 */
524struct linereq {
525 struct gpio_device *gdev;
526 const char *label;
527 u32 num_lines;
528 wait_queue_head_t wait;
529 struct notifier_block device_unregistered_nb;
530 u32 event_buffer_size;
531 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
532 atomic_t seqno;
533 struct mutex config_mutex;
534 struct line lines[] __counted_by(num_lines);
535};
536
537#define GPIO_V2_LINE_BIAS_FLAGS \
538 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
539 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
540 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
541
542#define GPIO_V2_LINE_DIRECTION_FLAGS \
543 (GPIO_V2_LINE_FLAG_INPUT | \
544 GPIO_V2_LINE_FLAG_OUTPUT)
545
546#define GPIO_V2_LINE_DRIVE_FLAGS \
547 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
548 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
549
550#define GPIO_V2_LINE_EDGE_FLAGS \
551 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
552 GPIO_V2_LINE_FLAG_EDGE_FALLING)
553
554#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
555
556#define GPIO_V2_LINE_VALID_FLAGS \
557 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
558 GPIO_V2_LINE_DIRECTION_FLAGS | \
559 GPIO_V2_LINE_DRIVE_FLAGS | \
560 GPIO_V2_LINE_EDGE_FLAGS | \
561 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
562 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
563 GPIO_V2_LINE_BIAS_FLAGS)
564
565/* subset of flags relevant for edge detector configuration */
566#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
567 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
568 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
569 GPIO_V2_LINE_EDGE_FLAGS)
570
571static int linereq_unregistered_notify(struct notifier_block *nb,
572 unsigned long action, void *data)
573{
574 struct linereq *lr = container_of(nb, struct linereq,
575 device_unregistered_nb);
576
577 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
578
579 return NOTIFY_OK;
580}
581
582static void linereq_put_event(struct linereq *lr,
583 struct gpio_v2_line_event *le)
584{
585 bool overflow = false;
586
587 scoped_guard(spinlock, &lr->wait.lock) {
588 if (kfifo_is_full(&lr->events)) {
589 overflow = true;
590 kfifo_skip(&lr->events);
591 }
592 kfifo_in(&lr->events, le, 1);
593 }
594 if (!overflow)
595 wake_up_poll(&lr->wait, EPOLLIN);
596 else
597 pr_debug_ratelimited("event FIFO is full - event dropped\n");
598}
599
600static u64 line_event_timestamp(struct line *line)
601{
602 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
603 return ktime_get_real_ns();
604 else if (IS_ENABLED(CONFIG_HTE) &&
605 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
606 return line->timestamp_ns;
607
608 return ktime_get_ns();
609}
610
611static u32 line_event_id(int level)
612{
613 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
614 GPIO_V2_LINE_EVENT_FALLING_EDGE;
615}
616
617static inline char *make_irq_label(const char *orig)
618{
619 char *new;
620
621 if (!orig)
622 return NULL;
623
624 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
625 if (!new)
626 return ERR_PTR(-ENOMEM);
627
628 return new;
629}
630
631static inline void free_irq_label(const char *label)
632{
633 kfree(label);
634}
635
636#ifdef CONFIG_HTE
637
638static enum hte_return process_hw_ts_thread(void *p)
639{
640 struct line *line;
641 struct linereq *lr;
642 struct gpio_v2_line_event le;
643 u64 edflags;
644 int level;
645
646 if (!p)
647 return HTE_CB_HANDLED;
648
649 line = p;
650 lr = line->req;
651
652 memset(&le, 0, sizeof(le));
653
654 le.timestamp_ns = line->timestamp_ns;
655 edflags = READ_ONCE(line->edflags);
656
657 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
658 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
659 level = (line->raw_level >= 0) ?
660 line->raw_level :
661 gpiod_get_raw_value_cansleep(line->desc);
662
663 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
664 level = !level;
665
666 le.id = line_event_id(level);
667 break;
668 case GPIO_V2_LINE_FLAG_EDGE_RISING:
669 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
670 break;
671 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
672 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
673 break;
674 default:
675 return HTE_CB_HANDLED;
676 }
677 le.line_seqno = line->line_seqno;
678 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
679 le.offset = gpio_chip_hwgpio(line->desc);
680
681 linereq_put_event(lr, &le);
682
683 return HTE_CB_HANDLED;
684}
685
686static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
687{
688 struct line *line;
689 struct linereq *lr;
690 int diff_seqno = 0;
691
692 if (!ts || !p)
693 return HTE_CB_HANDLED;
694
695 line = p;
696 line->timestamp_ns = ts->tsc;
697 line->raw_level = ts->raw_level;
698 lr = line->req;
699
700 if (READ_ONCE(line->sw_debounced)) {
701 line->total_discard_seq++;
702 line->last_seqno = ts->seq;
703 mod_delayed_work(system_wq, &line->work,
704 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
705 } else {
706 if (unlikely(ts->seq < line->line_seqno))
707 return HTE_CB_HANDLED;
708
709 diff_seqno = ts->seq - line->line_seqno;
710 line->line_seqno = ts->seq;
711 if (lr->num_lines != 1)
712 line->req_seqno = atomic_add_return(diff_seqno,
713 &lr->seqno);
714
715 return HTE_RUN_SECOND_CB;
716 }
717
718 return HTE_CB_HANDLED;
719}
720
721static int hte_edge_setup(struct line *line, u64 eflags)
722{
723 int ret;
724 unsigned long flags = 0;
725 struct hte_ts_desc *hdesc = &line->hdesc;
726
727 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
728 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
729 HTE_FALLING_EDGE_TS :
730 HTE_RISING_EDGE_TS;
731 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
732 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
733 HTE_RISING_EDGE_TS :
734 HTE_FALLING_EDGE_TS;
735
736 line->total_discard_seq = 0;
737
738 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
739 line->desc);
740
741 ret = hte_ts_get(NULL, hdesc, 0);
742 if (ret)
743 return ret;
744
745 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
746 line);
747}
748
749#else
750
751static int hte_edge_setup(struct line *line, u64 eflags)
752{
753 return 0;
754}
755#endif /* CONFIG_HTE */
756
757static irqreturn_t edge_irq_thread(int irq, void *p)
758{
759 struct line *line = p;
760 struct linereq *lr = line->req;
761 struct gpio_v2_line_event le;
762
763 /* Do not leak kernel stack to userspace */
764 memset(&le, 0, sizeof(le));
765
766 if (line->timestamp_ns) {
767 le.timestamp_ns = line->timestamp_ns;
768 } else {
769 /*
770 * We may be running from a nested threaded interrupt in
771 * which case we didn't get the timestamp from
772 * edge_irq_handler().
773 */
774 le.timestamp_ns = line_event_timestamp(line);
775 if (lr->num_lines != 1)
776 line->req_seqno = atomic_inc_return(&lr->seqno);
777 }
778 line->timestamp_ns = 0;
779
780 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
781 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
782 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
783 break;
784 case GPIO_V2_LINE_FLAG_EDGE_RISING:
785 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
786 break;
787 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
788 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
789 break;
790 default:
791 return IRQ_NONE;
792 }
793 line->line_seqno++;
794 le.line_seqno = line->line_seqno;
795 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
796 le.offset = gpio_chip_hwgpio(line->desc);
797
798 linereq_put_event(lr, &le);
799
800 return IRQ_HANDLED;
801}
802
803static irqreturn_t edge_irq_handler(int irq, void *p)
804{
805 struct line *line = p;
806 struct linereq *lr = line->req;
807
808 /*
809 * Just store the timestamp in hardirq context so we get it as
810 * close in time as possible to the actual event.
811 */
812 line->timestamp_ns = line_event_timestamp(line);
813
814 if (lr->num_lines != 1)
815 line->req_seqno = atomic_inc_return(&lr->seqno);
816
817 return IRQ_WAKE_THREAD;
818}
819
820/*
821 * returns the current debounced logical value.
822 */
823static bool debounced_value(struct line *line)
824{
825 bool value;
826
827 /*
828 * minor race - debouncer may be stopped here, so edge_detector_stop()
829 * must leave the value unchanged so the following will read the level
830 * from when the debouncer was last running.
831 */
832 value = READ_ONCE(line->level);
833
834 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
835 value = !value;
836
837 return value;
838}
839
840static irqreturn_t debounce_irq_handler(int irq, void *p)
841{
842 struct line *line = p;
843
844 mod_delayed_work(system_wq, &line->work,
845 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
846
847 return IRQ_HANDLED;
848}
849
850static void debounce_work_func(struct work_struct *work)
851{
852 struct gpio_v2_line_event le;
853 struct line *line = container_of(work, struct line, work.work);
854 struct linereq *lr;
855 u64 eflags, edflags = READ_ONCE(line->edflags);
856 int level = -1;
857#ifdef CONFIG_HTE
858 int diff_seqno;
859
860 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
861 level = line->raw_level;
862#endif
863 if (level < 0)
864 level = gpiod_get_raw_value_cansleep(line->desc);
865 if (level < 0) {
866 pr_debug_ratelimited("debouncer failed to read line value\n");
867 return;
868 }
869
870 if (READ_ONCE(line->level) == level)
871 return;
872
873 WRITE_ONCE(line->level, level);
874
875 /* -- edge detection -- */
876 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
877 if (!eflags)
878 return;
879
880 /* switch from physical level to logical - if they differ */
881 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
882 level = !level;
883
884 /* ignore edges that are not being monitored */
885 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
886 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
887 return;
888
889 /* Do not leak kernel stack to userspace */
890 memset(&le, 0, sizeof(le));
891
892 lr = line->req;
893 le.timestamp_ns = line_event_timestamp(line);
894 le.offset = gpio_chip_hwgpio(line->desc);
895#ifdef CONFIG_HTE
896 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
897 /* discard events except the last one */
898 line->total_discard_seq -= 1;
899 diff_seqno = line->last_seqno - line->total_discard_seq -
900 line->line_seqno;
901 line->line_seqno = line->last_seqno - line->total_discard_seq;
902 le.line_seqno = line->line_seqno;
903 le.seqno = (lr->num_lines == 1) ?
904 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
905 } else
906#endif /* CONFIG_HTE */
907 {
908 line->line_seqno++;
909 le.line_seqno = line->line_seqno;
910 le.seqno = (lr->num_lines == 1) ?
911 le.line_seqno : atomic_inc_return(&lr->seqno);
912 }
913
914 le.id = line_event_id(level);
915
916 linereq_put_event(lr, &le);
917}
918
919static int debounce_setup(struct line *line, unsigned int debounce_period_us)
920{
921 unsigned long irqflags;
922 int ret, level, irq;
923 char *label;
924
925 /*
926 * Try hardware. Skip gpiod_set_config() to avoid emitting two
927 * CHANGED_CONFIG line state events.
928 */
929 ret = gpio_do_set_config(line->desc,
930 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
931 debounce_period_us));
932 if (ret != -ENOTSUPP)
933 return ret;
934
935 if (debounce_period_us) {
936 /* setup software debounce */
937 level = gpiod_get_raw_value_cansleep(line->desc);
938 if (level < 0)
939 return level;
940
941 if (!(IS_ENABLED(CONFIG_HTE) &&
942 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
943 irq = gpiod_to_irq(line->desc);
944 if (irq < 0)
945 return -ENXIO;
946
947 label = make_irq_label(line->req->label);
948 if (IS_ERR(label))
949 return -ENOMEM;
950
951 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
952 ret = request_irq(irq, debounce_irq_handler, irqflags,
953 label, line);
954 if (ret) {
955 free_irq_label(label);
956 return ret;
957 }
958 line->irq = irq;
959 } else {
960 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
961 if (ret)
962 return ret;
963 }
964
965 WRITE_ONCE(line->level, level);
966 WRITE_ONCE(line->sw_debounced, 1);
967 }
968 return 0;
969}
970
971static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
972 unsigned int line_idx)
973{
974 unsigned int i;
975 u64 mask = BIT_ULL(line_idx);
976
977 for (i = 0; i < lc->num_attrs; i++) {
978 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
979 (lc->attrs[i].mask & mask))
980 return true;
981 }
982 return false;
983}
984
985static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
987{
988 unsigned int i;
989 u64 mask = BIT_ULL(line_idx);
990
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return lc->attrs[i].attr.debounce_period_us;
995 }
996 return 0;
997}
998
999static void edge_detector_stop(struct line *line)
1000{
1001 if (line->irq) {
1002 free_irq_label(free_irq(line->irq, line));
1003 line->irq = 0;
1004 }
1005
1006#ifdef CONFIG_HTE
1007 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1008 hte_ts_put(&line->hdesc);
1009#endif
1010
1011 cancel_delayed_work_sync(&line->work);
1012 WRITE_ONCE(line->sw_debounced, 0);
1013 WRITE_ONCE(line->edflags, 0);
1014 if (line->desc)
1015 WRITE_ONCE(line->desc->debounce_period_us, 0);
1016 /* do not change line->level - see comment in debounced_value() */
1017}
1018
1019static int edge_detector_fifo_init(struct linereq *req)
1020{
1021 if (kfifo_initialized(&req->events))
1022 return 0;
1023
1024 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1025}
1026
1027static int edge_detector_setup(struct line *line,
1028 struct gpio_v2_line_config *lc,
1029 unsigned int line_idx, u64 edflags)
1030{
1031 u32 debounce_period_us;
1032 unsigned long irqflags = 0;
1033 u64 eflags;
1034 int irq, ret;
1035 char *label;
1036
1037 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1038 if (eflags) {
1039 ret = edge_detector_fifo_init(line->req);
1040 if (ret)
1041 return ret;
1042 }
1043 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1044 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1045 ret = debounce_setup(line, debounce_period_us);
1046 if (ret)
1047 return ret;
1048 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1049 }
1050
1051 /* detection disabled or sw debouncer will provide edge detection */
1052 if (!eflags || READ_ONCE(line->sw_debounced))
1053 return 0;
1054
1055 if (IS_ENABLED(CONFIG_HTE) &&
1056 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1057 return hte_edge_setup(line, edflags);
1058
1059 irq = gpiod_to_irq(line->desc);
1060 if (irq < 0)
1061 return -ENXIO;
1062
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1066 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1067 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1068 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1069 irqflags |= IRQF_ONESHOT;
1070
1071 label = make_irq_label(line->req->label);
1072 if (IS_ERR(label))
1073 return PTR_ERR(label);
1074
1075 /* Request a thread to read the events */
1076 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1077 irqflags, label, line);
1078 if (ret) {
1079 free_irq_label(label);
1080 return ret;
1081 }
1082
1083 line->irq = irq;
1084 return 0;
1085}
1086
1087static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
1089 unsigned int line_idx, u64 edflags)
1090{
1091 u64 active_edflags = READ_ONCE(line->edflags);
1092 unsigned int debounce_period_us =
1093 gpio_v2_line_config_debounce_period(lc, line_idx);
1094
1095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1097 return 0;
1098
1099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1102 /*
1103 * ensure event fifo is initialised if edge detection
1104 * is now enabled.
1105 */
1106 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1107 return edge_detector_fifo_init(line->req);
1108
1109 return 0;
1110 }
1111
1112 /* reconfiguring edge detection or sw debounce being disabled */
1113 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1114 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1115 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1116 edge_detector_stop(line);
1117
1118 return edge_detector_setup(line, lc, line_idx, edflags);
1119}
1120
1121static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1122 unsigned int line_idx)
1123{
1124 unsigned int i;
1125 u64 mask = BIT_ULL(line_idx);
1126
1127 for (i = 0; i < lc->num_attrs; i++) {
1128 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1129 (lc->attrs[i].mask & mask))
1130 return lc->attrs[i].attr.flags;
1131 }
1132 return lc->flags;
1133}
1134
1135static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1136 unsigned int line_idx)
1137{
1138 unsigned int i;
1139 u64 mask = BIT_ULL(line_idx);
1140
1141 for (i = 0; i < lc->num_attrs; i++) {
1142 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1143 (lc->attrs[i].mask & mask))
1144 return !!(lc->attrs[i].attr.values & mask);
1145 }
1146 return 0;
1147}
1148
1149static int gpio_v2_line_flags_validate(u64 flags)
1150{
1151 /* Return an error if an unknown flag is set */
1152 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1153 return -EINVAL;
1154
1155 if (!IS_ENABLED(CONFIG_HTE) &&
1156 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1157 return -EOPNOTSUPP;
1158
1159 /*
1160 * Do not allow both INPUT and OUTPUT flags to be set as they are
1161 * contradictory.
1162 */
1163 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1164 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1165 return -EINVAL;
1166
1167 /* Only allow one event clock source */
1168 if (IS_ENABLED(CONFIG_HTE) &&
1169 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1170 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1171 return -EINVAL;
1172
1173 /* Edge detection requires explicit input. */
1174 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1175 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1176 return -EINVAL;
1177
1178 /*
1179 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1180 * request. If the hardware actually supports enabling both at the
1181 * same time the electrical result would be disastrous.
1182 */
1183 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1184 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1185 return -EINVAL;
1186
1187 /* Drive requires explicit output direction. */
1188 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1189 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1190 return -EINVAL;
1191
1192 /* Bias requires explicit direction. */
1193 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1194 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1195 return -EINVAL;
1196
1197 /* Only one bias flag can be set. */
1198 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1199 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1200 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1201 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1202 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1203 return -EINVAL;
1204
1205 return 0;
1206}
1207
1208static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1209 unsigned int num_lines)
1210{
1211 unsigned int i;
1212 u64 flags;
1213 int ret;
1214
1215 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1216 return -EINVAL;
1217
1218 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1219 return -EINVAL;
1220
1221 for (i = 0; i < num_lines; i++) {
1222 flags = gpio_v2_line_config_flags(lc, i);
1223 ret = gpio_v2_line_flags_validate(flags);
1224 if (ret)
1225 return ret;
1226
1227 /* debounce requires explicit input */
1228 if (gpio_v2_line_config_debounced(lc, i) &&
1229 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1230 return -EINVAL;
1231 }
1232 return 0;
1233}
1234
1235static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1236 unsigned long *flagsp)
1237{
1238 unsigned long flags = READ_ONCE(*flagsp);
1239
1240 assign_bit(FLAG_ACTIVE_LOW, &flags,
1241 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1242
1243 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1244 set_bit(FLAG_IS_OUT, &flags);
1245 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1246 clear_bit(FLAG_IS_OUT, &flags);
1247
1248 assign_bit(FLAG_EDGE_RISING, &flags,
1249 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1250 assign_bit(FLAG_EDGE_FALLING, &flags,
1251 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1252
1253 assign_bit(FLAG_OPEN_DRAIN, &flags,
1254 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1255 assign_bit(FLAG_OPEN_SOURCE, &flags,
1256 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1257
1258 assign_bit(FLAG_PULL_UP, &flags,
1259 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1260 assign_bit(FLAG_PULL_DOWN, &flags,
1261 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1262 assign_bit(FLAG_BIAS_DISABLE, &flags,
1263 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1264
1265 assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
1266 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1267 assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
1268 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1269
1270 WRITE_ONCE(*flagsp, flags);
1271}
1272
1273static long linereq_get_values(struct linereq *lr, void __user *ip)
1274{
1275 struct gpio_v2_line_values lv;
1276 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1277 struct gpio_desc **descs;
1278 unsigned int i, didx, num_get;
1279 bool val;
1280 int ret;
1281
1282 /* NOTE: It's ok to read values of output lines. */
1283 if (copy_from_user(&lv, ip, sizeof(lv)))
1284 return -EFAULT;
1285
1286 /*
1287 * gpiod_get_array_value_complex() requires compacted desc and val
1288 * arrays, rather than the sparse ones in lv.
1289 * Calculation of num_get and construction of the desc array is
1290 * optimized to avoid allocation for the desc array for the common
1291 * num_get == 1 case.
1292 */
1293 /* scan requested lines to calculate the subset to get */
1294 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1295 if (lv.mask & BIT_ULL(i)) {
1296 num_get++;
1297 /* capture desc for the num_get == 1 case */
1298 descs = &lr->lines[i].desc;
1299 }
1300 }
1301
1302 if (num_get == 0)
1303 return -EINVAL;
1304
1305 if (num_get != 1) {
1306 /* build compacted desc array */
1307 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1308 if (!descs)
1309 return -ENOMEM;
1310 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1311 if (lv.mask & BIT_ULL(i)) {
1312 descs[didx] = lr->lines[i].desc;
1313 didx++;
1314 }
1315 }
1316 }
1317 ret = gpiod_get_array_value_complex(false, true, num_get,
1318 descs, NULL, vals);
1319
1320 if (num_get != 1)
1321 kfree(descs);
1322 if (ret)
1323 return ret;
1324
1325 lv.bits = 0;
1326 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1327 /* unpack compacted vals for the response */
1328 if (lv.mask & BIT_ULL(i)) {
1329 if (lr->lines[i].sw_debounced)
1330 val = debounced_value(&lr->lines[i]);
1331 else
1332 val = test_bit(didx, vals);
1333 if (val)
1334 lv.bits |= BIT_ULL(i);
1335 didx++;
1336 }
1337 }
1338
1339 if (copy_to_user(ip, &lv, sizeof(lv)))
1340 return -EFAULT;
1341
1342 return 0;
1343}
1344
1345static long linereq_set_values(struct linereq *lr, void __user *ip)
1346{
1347 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1348 struct gpio_v2_line_values lv;
1349 struct gpio_desc **descs;
1350 unsigned int i, didx, num_set;
1351 int ret;
1352
1353 if (copy_from_user(&lv, ip, sizeof(lv)))
1354 return -EFAULT;
1355
1356 guard(mutex)(&lr->config_mutex);
1357
1358 /*
1359 * gpiod_set_array_value_complex() requires compacted desc and val
1360 * arrays, rather than the sparse ones in lv.
1361 * Calculation of num_set and construction of the descs and vals arrays
1362 * is optimized to minimize scanning the lv->mask, and to avoid
1363 * allocation for the desc array for the common num_set == 1 case.
1364 */
1365 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1366 /* scan requested lines to determine the subset to be set */
1367 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1368 if (lv.mask & BIT_ULL(i)) {
1369 /* setting inputs is not allowed */
1370 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1371 return -EPERM;
1372 /* add to compacted values */
1373 if (lv.bits & BIT_ULL(i))
1374 __set_bit(num_set, vals);
1375 num_set++;
1376 /* capture desc for the num_set == 1 case */
1377 descs = &lr->lines[i].desc;
1378 }
1379 }
1380 if (num_set == 0)
1381 return -EINVAL;
1382
1383 if (num_set != 1) {
1384 /* build compacted desc array */
1385 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1386 if (!descs)
1387 return -ENOMEM;
1388 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1389 if (lv.mask & BIT_ULL(i)) {
1390 descs[didx] = lr->lines[i].desc;
1391 didx++;
1392 }
1393 }
1394 }
1395 ret = gpiod_set_array_value_complex(false, true, num_set,
1396 descs, NULL, vals);
1397
1398 if (num_set != 1)
1399 kfree(descs);
1400 return ret;
1401}
1402
1403static long linereq_set_config(struct linereq *lr, void __user *ip)
1404{
1405 struct gpio_v2_line_config lc;
1406 struct gpio_desc *desc;
1407 struct line *line;
1408 unsigned int i;
1409 u64 flags, edflags;
1410 int ret;
1411
1412 if (copy_from_user(&lc, ip, sizeof(lc)))
1413 return -EFAULT;
1414
1415 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1416 if (ret)
1417 return ret;
1418
1419 guard(mutex)(&lr->config_mutex);
1420
1421 for (i = 0; i < lr->num_lines; i++) {
1422 line = &lr->lines[i];
1423 desc = lr->lines[i].desc;
1424 flags = gpio_v2_line_config_flags(&lc, i);
1425 /*
1426 * Lines not explicitly reconfigured as input or output
1427 * are left unchanged.
1428 */
1429 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1430 continue;
1431 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1432 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1433 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1434 int val = gpio_v2_line_config_output_value(&lc, i);
1435
1436 edge_detector_stop(line);
1437 ret = gpiod_direction_output_nonotify(desc, val);
1438 if (ret)
1439 return ret;
1440 } else {
1441 ret = gpiod_direction_input_nonotify(desc);
1442 if (ret)
1443 return ret;
1444
1445 ret = edge_detector_update(line, &lc, i, edflags);
1446 if (ret)
1447 return ret;
1448 }
1449
1450 WRITE_ONCE(line->edflags, edflags);
1451
1452 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1453 }
1454 return 0;
1455}
1456
1457static long linereq_ioctl(struct file *file, unsigned int cmd,
1458 unsigned long arg)
1459{
1460 struct linereq *lr = file->private_data;
1461 void __user *ip = (void __user *)arg;
1462
1463 guard(srcu)(&lr->gdev->srcu);
1464
1465 if (!rcu_access_pointer(lr->gdev->chip))
1466 return -ENODEV;
1467
1468 switch (cmd) {
1469 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1470 return linereq_get_values(lr, ip);
1471 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1472 return linereq_set_values(lr, ip);
1473 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1474 return linereq_set_config(lr, ip);
1475 default:
1476 return -EINVAL;
1477 }
1478}
1479
1480#ifdef CONFIG_COMPAT
1481static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1482 unsigned long arg)
1483{
1484 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1485}
1486#endif
1487
1488static __poll_t linereq_poll(struct file *file,
1489 struct poll_table_struct *wait)
1490{
1491 struct linereq *lr = file->private_data;
1492 __poll_t events = 0;
1493
1494 guard(srcu)(&lr->gdev->srcu);
1495
1496 if (!rcu_access_pointer(lr->gdev->chip))
1497 return EPOLLHUP | EPOLLERR;
1498
1499 poll_wait(file, &lr->wait, wait);
1500
1501 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1502 &lr->wait.lock))
1503 events = EPOLLIN | EPOLLRDNORM;
1504
1505 return events;
1506}
1507
1508static ssize_t linereq_read(struct file *file, char __user *buf,
1509 size_t count, loff_t *f_ps)
1510{
1511 struct linereq *lr = file->private_data;
1512 struct gpio_v2_line_event le;
1513 ssize_t bytes_read = 0;
1514 int ret;
1515
1516 guard(srcu)(&lr->gdev->srcu);
1517
1518 if (!rcu_access_pointer(lr->gdev->chip))
1519 return -ENODEV;
1520
1521 if (count < sizeof(le))
1522 return -EINVAL;
1523
1524 do {
1525 scoped_guard(spinlock, &lr->wait.lock) {
1526 if (kfifo_is_empty(&lr->events)) {
1527 if (bytes_read)
1528 return bytes_read;
1529
1530 if (file->f_flags & O_NONBLOCK)
1531 return -EAGAIN;
1532
1533 ret = wait_event_interruptible_locked(lr->wait,
1534 !kfifo_is_empty(&lr->events));
1535 if (ret)
1536 return ret;
1537 }
1538
1539 if (kfifo_out(&lr->events, &le, 1) != 1) {
1540 /*
1541 * This should never happen - we hold the
1542 * lock from the moment we learned the fifo
1543 * is no longer empty until now.
1544 */
1545 WARN(1, "failed to read from non-empty kfifo");
1546 return -EIO;
1547 }
1548 }
1549
1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1551 return -EFAULT;
1552 bytes_read += sizeof(le);
1553 } while (count >= bytes_read + sizeof(le));
1554
1555 return bytes_read;
1556}
1557
1558static void linereq_free(struct linereq *lr)
1559{
1560 unsigned int i;
1561
1562 if (lr->device_unregistered_nb.notifier_call)
1563 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1564 &lr->device_unregistered_nb);
1565
1566 for (i = 0; i < lr->num_lines; i++) {
1567 if (lr->lines[i].desc) {
1568 edge_detector_stop(&lr->lines[i]);
1569 gpiod_free(lr->lines[i].desc);
1570 }
1571 }
1572 kfifo_free(&lr->events);
1573 kfree(lr->label);
1574 gpio_device_put(lr->gdev);
1575 kvfree(lr);
1576}
1577
1578static int linereq_release(struct inode *inode, struct file *file)
1579{
1580 struct linereq *lr = file->private_data;
1581
1582 linereq_free(lr);
1583 return 0;
1584}
1585
1586#ifdef CONFIG_PROC_FS
1587static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1588{
1589 struct linereq *lr = file->private_data;
1590 struct device *dev = &lr->gdev->dev;
1591 u16 i;
1592
1593 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1594
1595 for (i = 0; i < lr->num_lines; i++)
1596 seq_printf(out, "gpio-line:\t%d\n",
1597 gpio_chip_hwgpio(lr->lines[i].desc));
1598}
1599#endif
1600
1601static const struct file_operations line_fileops = {
1602 .release = linereq_release,
1603 .read = linereq_read,
1604 .poll = linereq_poll,
1605 .owner = THIS_MODULE,
1606 .llseek = noop_llseek,
1607 .unlocked_ioctl = linereq_ioctl,
1608#ifdef CONFIG_COMPAT
1609 .compat_ioctl = linereq_ioctl_compat,
1610#endif
1611#ifdef CONFIG_PROC_FS
1612 .show_fdinfo = linereq_show_fdinfo,
1613#endif
1614};
1615
1616static int linereq_create(struct gpio_device *gdev, void __user *ip)
1617{
1618 struct gpio_v2_line_request ulr;
1619 struct gpio_v2_line_config *lc;
1620 struct linereq *lr;
1621 struct file *file;
1622 u64 flags, edflags;
1623 unsigned int i;
1624 int fd, ret;
1625
1626 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1627 return -EFAULT;
1628
1629 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1630 return -EINVAL;
1631
1632 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1633 return -EINVAL;
1634
1635 lc = &ulr.config;
1636 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1637 if (ret)
1638 return ret;
1639
1640 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1641 if (!lr)
1642 return -ENOMEM;
1643 lr->num_lines = ulr.num_lines;
1644
1645 lr->gdev = gpio_device_get(gdev);
1646
1647 for (i = 0; i < ulr.num_lines; i++) {
1648 lr->lines[i].req = lr;
1649 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1650 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1651 }
1652
1653 if (ulr.consumer[0] != '\0') {
1654 /* label is only initialized if consumer is set */
1655 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1656 GFP_KERNEL);
1657 if (!lr->label) {
1658 ret = -ENOMEM;
1659 goto out_free_linereq;
1660 }
1661 }
1662
1663 mutex_init(&lr->config_mutex);
1664 init_waitqueue_head(&lr->wait);
1665 INIT_KFIFO(lr->events);
1666 lr->event_buffer_size = ulr.event_buffer_size;
1667 if (lr->event_buffer_size == 0)
1668 lr->event_buffer_size = ulr.num_lines * 16;
1669 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1670 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1671
1672 atomic_set(&lr->seqno, 0);
1673
1674 /* Request each GPIO */
1675 for (i = 0; i < ulr.num_lines; i++) {
1676 u32 offset = ulr.offsets[i];
1677 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1678
1679 if (IS_ERR(desc)) {
1680 ret = PTR_ERR(desc);
1681 goto out_free_linereq;
1682 }
1683
1684 ret = gpiod_request_user(desc, lr->label);
1685 if (ret)
1686 goto out_free_linereq;
1687
1688 lr->lines[i].desc = desc;
1689 flags = gpio_v2_line_config_flags(lc, i);
1690 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1691
1692 ret = gpiod_set_transitory(desc, false);
1693 if (ret < 0)
1694 goto out_free_linereq;
1695
1696 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1697 /*
1698 * Lines have to be requested explicitly for input
1699 * or output, else the line will be treated "as is".
1700 */
1701 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1702 int val = gpio_v2_line_config_output_value(lc, i);
1703
1704 ret = gpiod_direction_output_nonotify(desc, val);
1705 if (ret)
1706 goto out_free_linereq;
1707 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1708 ret = gpiod_direction_input_nonotify(desc);
1709 if (ret)
1710 goto out_free_linereq;
1711
1712 ret = edge_detector_setup(&lr->lines[i], lc, i,
1713 edflags);
1714 if (ret)
1715 goto out_free_linereq;
1716 }
1717
1718 lr->lines[i].edflags = edflags;
1719
1720 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1721
1722 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1723 offset);
1724 }
1725
1726 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1727 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1728 &lr->device_unregistered_nb);
1729 if (ret)
1730 goto out_free_linereq;
1731
1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1733 if (fd < 0) {
1734 ret = fd;
1735 goto out_free_linereq;
1736 }
1737
1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1739 O_RDONLY | O_CLOEXEC);
1740 if (IS_ERR(file)) {
1741 ret = PTR_ERR(file);
1742 goto out_put_unused_fd;
1743 }
1744
1745 ulr.fd = fd;
1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1747 /*
1748 * fput() will trigger the release() callback, so do not go onto
1749 * the regular error cleanup path here.
1750 */
1751 fput(file);
1752 put_unused_fd(fd);
1753 return -EFAULT;
1754 }
1755
1756 fd_install(fd, file);
1757
1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1759 lr->num_lines);
1760
1761 return 0;
1762
1763out_put_unused_fd:
1764 put_unused_fd(fd);
1765out_free_linereq:
1766 linereq_free(lr);
1767 return ret;
1768}
1769
1770#ifdef CONFIG_GPIO_CDEV_V1
1771
1772/*
1773 * GPIO line event management
1774 */
1775
1776/**
1777 * struct lineevent_state - contains the state of a userspace event
1778 * @gdev: the GPIO device the event pertains to
1779 * @label: consumer label used to tag descriptors
1780 * @desc: the GPIO descriptor held by this event
1781 * @eflags: the event flags this line was requested with
1782 * @irq: the interrupt that trigger in response to events on this GPIO
1783 * @wait: wait queue that handles blocking reads of events
1784 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1785 * @events: KFIFO for the GPIO events
1786 * @timestamp: cache for the timestamp storing it between hardirq
1787 * and IRQ thread, used to bring the timestamp close to the actual
1788 * event
1789 */
1790struct lineevent_state {
1791 struct gpio_device *gdev;
1792 const char *label;
1793 struct gpio_desc *desc;
1794 u32 eflags;
1795 int irq;
1796 wait_queue_head_t wait;
1797 struct notifier_block device_unregistered_nb;
1798 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1799 u64 timestamp;
1800};
1801
1802#define GPIOEVENT_REQUEST_VALID_FLAGS \
1803 (GPIOEVENT_REQUEST_RISING_EDGE | \
1804 GPIOEVENT_REQUEST_FALLING_EDGE)
1805
1806static __poll_t lineevent_poll(struct file *file,
1807 struct poll_table_struct *wait)
1808{
1809 struct lineevent_state *le = file->private_data;
1810 __poll_t events = 0;
1811
1812 guard(srcu)(&le->gdev->srcu);
1813
1814 if (!rcu_access_pointer(le->gdev->chip))
1815 return EPOLLHUP | EPOLLERR;
1816
1817 poll_wait(file, &le->wait, wait);
1818
1819 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1820 events = EPOLLIN | EPOLLRDNORM;
1821
1822 return events;
1823}
1824
1825static int lineevent_unregistered_notify(struct notifier_block *nb,
1826 unsigned long action, void *data)
1827{
1828 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1829 device_unregistered_nb);
1830
1831 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1832
1833 return NOTIFY_OK;
1834}
1835
1836struct compat_gpioeevent_data {
1837 compat_u64 timestamp;
1838 u32 id;
1839};
1840
1841static ssize_t lineevent_read(struct file *file, char __user *buf,
1842 size_t count, loff_t *f_ps)
1843{
1844 struct lineevent_state *le = file->private_data;
1845 struct gpioevent_data ge;
1846 ssize_t bytes_read = 0;
1847 ssize_t ge_size;
1848 int ret;
1849
1850 guard(srcu)(&le->gdev->srcu);
1851
1852 if (!rcu_access_pointer(le->gdev->chip))
1853 return -ENODEV;
1854
1855 /*
1856 * When compatible system call is being used the struct gpioevent_data,
1857 * in case of at least ia32, has different size due to the alignment
1858 * differences. Because we have first member 64 bits followed by one of
1859 * 32 bits there is no gap between them. The only difference is the
1860 * padding at the end of the data structure. Hence, we calculate the
1861 * actual sizeof() and pass this as an argument to copy_to_user() to
1862 * drop unneeded bytes from the output.
1863 */
1864 if (compat_need_64bit_alignment_fixup())
1865 ge_size = sizeof(struct compat_gpioeevent_data);
1866 else
1867 ge_size = sizeof(struct gpioevent_data);
1868 if (count < ge_size)
1869 return -EINVAL;
1870
1871 do {
1872 scoped_guard(spinlock, &le->wait.lock) {
1873 if (kfifo_is_empty(&le->events)) {
1874 if (bytes_read)
1875 return bytes_read;
1876
1877 if (file->f_flags & O_NONBLOCK)
1878 return -EAGAIN;
1879
1880 ret = wait_event_interruptible_locked(le->wait,
1881 !kfifo_is_empty(&le->events));
1882 if (ret)
1883 return ret;
1884 }
1885
1886 if (kfifo_out(&le->events, &ge, 1) != 1) {
1887 /*
1888 * This should never happen - we hold the
1889 * lock from the moment we learned the fifo
1890 * is no longer empty until now.
1891 */
1892 WARN(1, "failed to read from non-empty kfifo");
1893 return -EIO;
1894 }
1895 }
1896
1897 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1898 return -EFAULT;
1899 bytes_read += ge_size;
1900 } while (count >= bytes_read + ge_size);
1901
1902 return bytes_read;
1903}
1904
1905static void lineevent_free(struct lineevent_state *le)
1906{
1907 if (le->device_unregistered_nb.notifier_call)
1908 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1909 &le->device_unregistered_nb);
1910 if (le->irq)
1911 free_irq_label(free_irq(le->irq, le));
1912 if (le->desc)
1913 gpiod_free(le->desc);
1914 kfree(le->label);
1915 gpio_device_put(le->gdev);
1916 kfree(le);
1917}
1918
1919static int lineevent_release(struct inode *inode, struct file *file)
1920{
1921 lineevent_free(file->private_data);
1922 return 0;
1923}
1924
1925static long lineevent_ioctl(struct file *file, unsigned int cmd,
1926 unsigned long arg)
1927{
1928 struct lineevent_state *le = file->private_data;
1929 void __user *ip = (void __user *)arg;
1930 struct gpiohandle_data ghd;
1931
1932 guard(srcu)(&le->gdev->srcu);
1933
1934 if (!rcu_access_pointer(le->gdev->chip))
1935 return -ENODEV;
1936
1937 /*
1938 * We can get the value for an event line but not set it,
1939 * because it is input by definition.
1940 */
1941 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1942 int val;
1943
1944 memset(&ghd, 0, sizeof(ghd));
1945
1946 val = gpiod_get_value_cansleep(le->desc);
1947 if (val < 0)
1948 return val;
1949 ghd.values[0] = val;
1950
1951 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1952 return -EFAULT;
1953
1954 return 0;
1955 }
1956 return -EINVAL;
1957}
1958
1959#ifdef CONFIG_COMPAT
1960static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1961 unsigned long arg)
1962{
1963 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1964}
1965#endif
1966
1967static const struct file_operations lineevent_fileops = {
1968 .release = lineevent_release,
1969 .read = lineevent_read,
1970 .poll = lineevent_poll,
1971 .owner = THIS_MODULE,
1972 .llseek = noop_llseek,
1973 .unlocked_ioctl = lineevent_ioctl,
1974#ifdef CONFIG_COMPAT
1975 .compat_ioctl = lineevent_ioctl_compat,
1976#endif
1977};
1978
1979static irqreturn_t lineevent_irq_thread(int irq, void *p)
1980{
1981 struct lineevent_state *le = p;
1982 struct gpioevent_data ge;
1983 int ret;
1984
1985 /* Do not leak kernel stack to userspace */
1986 memset(&ge, 0, sizeof(ge));
1987
1988 /*
1989 * We may be running from a nested threaded interrupt in which case
1990 * we didn't get the timestamp from lineevent_irq_handler().
1991 */
1992 if (!le->timestamp)
1993 ge.timestamp = ktime_get_ns();
1994 else
1995 ge.timestamp = le->timestamp;
1996
1997 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1998 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1999 int level = gpiod_get_value_cansleep(le->desc);
2000
2001 if (level)
2002 /* Emit low-to-high event */
2003 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2004 else
2005 /* Emit high-to-low event */
2006 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2007 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2008 /* Emit low-to-high event */
2009 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2010 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2011 /* Emit high-to-low event */
2012 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2013 } else {
2014 return IRQ_NONE;
2015 }
2016
2017 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2018 1, &le->wait.lock);
2019 if (ret)
2020 wake_up_poll(&le->wait, EPOLLIN);
2021 else
2022 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2023
2024 return IRQ_HANDLED;
2025}
2026
2027static irqreturn_t lineevent_irq_handler(int irq, void *p)
2028{
2029 struct lineevent_state *le = p;
2030
2031 /*
2032 * Just store the timestamp in hardirq context so we get it as
2033 * close in time as possible to the actual event.
2034 */
2035 le->timestamp = ktime_get_ns();
2036
2037 return IRQ_WAKE_THREAD;
2038}
2039
2040static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2041{
2042 struct gpioevent_request eventreq;
2043 struct lineevent_state *le;
2044 struct gpio_desc *desc;
2045 struct file *file;
2046 u32 offset;
2047 u32 lflags;
2048 u32 eflags;
2049 int fd;
2050 int ret;
2051 int irq, irqflags = 0;
2052 char *label;
2053
2054 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2055 return -EFAULT;
2056
2057 offset = eventreq.lineoffset;
2058 lflags = eventreq.handleflags;
2059 eflags = eventreq.eventflags;
2060
2061 desc = gpio_device_get_desc(gdev, offset);
2062 if (IS_ERR(desc))
2063 return PTR_ERR(desc);
2064
2065 /* Return an error if a unknown flag is set */
2066 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2067 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2068 return -EINVAL;
2069
2070 /* This is just wrong: we don't look for events on output lines */
2071 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2072 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2073 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2074 return -EINVAL;
2075
2076 /* Only one bias flag can be set. */
2077 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2078 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2079 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2080 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2081 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2082 return -EINVAL;
2083
2084 le = kzalloc(sizeof(*le), GFP_KERNEL);
2085 if (!le)
2086 return -ENOMEM;
2087 le->gdev = gpio_device_get(gdev);
2088
2089 if (eventreq.consumer_label[0] != '\0') {
2090 /* label is only initialized if consumer_label is set */
2091 le->label = kstrndup(eventreq.consumer_label,
2092 sizeof(eventreq.consumer_label) - 1,
2093 GFP_KERNEL);
2094 if (!le->label) {
2095 ret = -ENOMEM;
2096 goto out_free_le;
2097 }
2098 }
2099
2100 ret = gpiod_request_user(desc, le->label);
2101 if (ret)
2102 goto out_free_le;
2103 le->desc = desc;
2104 le->eflags = eflags;
2105
2106 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2107
2108 ret = gpiod_direction_input(desc);
2109 if (ret)
2110 goto out_free_le;
2111
2112 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2113
2114 irq = gpiod_to_irq(desc);
2115 if (irq <= 0) {
2116 ret = -ENODEV;
2117 goto out_free_le;
2118 }
2119
2120 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2121 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2122 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2123 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2124 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2125 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2126 irqflags |= IRQF_ONESHOT;
2127
2128 INIT_KFIFO(le->events);
2129 init_waitqueue_head(&le->wait);
2130
2131 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2132 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2133 &le->device_unregistered_nb);
2134 if (ret)
2135 goto out_free_le;
2136
2137 label = make_irq_label(le->label);
2138 if (IS_ERR(label)) {
2139 ret = PTR_ERR(label);
2140 goto out_free_le;
2141 }
2142
2143 /* Request a thread to read the events */
2144 ret = request_threaded_irq(irq,
2145 lineevent_irq_handler,
2146 lineevent_irq_thread,
2147 irqflags,
2148 label,
2149 le);
2150 if (ret) {
2151 free_irq_label(label);
2152 goto out_free_le;
2153 }
2154
2155 le->irq = irq;
2156
2157 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2158 if (fd < 0) {
2159 ret = fd;
2160 goto out_free_le;
2161 }
2162
2163 file = anon_inode_getfile("gpio-event",
2164 &lineevent_fileops,
2165 le,
2166 O_RDONLY | O_CLOEXEC);
2167 if (IS_ERR(file)) {
2168 ret = PTR_ERR(file);
2169 goto out_put_unused_fd;
2170 }
2171
2172 eventreq.fd = fd;
2173 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2174 /*
2175 * fput() will trigger the release() callback, so do not go onto
2176 * the regular error cleanup path here.
2177 */
2178 fput(file);
2179 put_unused_fd(fd);
2180 return -EFAULT;
2181 }
2182
2183 fd_install(fd, file);
2184
2185 return 0;
2186
2187out_put_unused_fd:
2188 put_unused_fd(fd);
2189out_free_le:
2190 lineevent_free(le);
2191 return ret;
2192}
2193
2194static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2195 struct gpioline_info *info_v1)
2196{
2197 u64 flagsv2 = info_v2->flags;
2198
2199 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2200 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2201 info_v1->line_offset = info_v2->offset;
2202 info_v1->flags = 0;
2203
2204 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2205 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2206
2207 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2208 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2209
2210 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2211 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2212
2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2215 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2216 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2217
2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2222 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2223 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2224}
2225
2226static void gpio_v2_line_info_changed_to_v1(
2227 struct gpio_v2_line_info_changed *lic_v2,
2228 struct gpioline_info_changed *lic_v1)
2229{
2230 memset(lic_v1, 0, sizeof(*lic_v1));
2231 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2232 lic_v1->timestamp = lic_v2->timestamp_ns;
2233 lic_v1->event_type = lic_v2->event_type;
2234}
2235
2236#endif /* CONFIG_GPIO_CDEV_V1 */
2237
2238static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2239 struct gpio_v2_line_info *info, bool atomic)
2240{
2241 u32 debounce_period_us;
2242 unsigned long dflags;
2243 const char *label;
2244
2245 CLASS(gpio_chip_guard, guard)(desc);
2246 if (!guard.gc)
2247 return;
2248
2249 memset(info, 0, sizeof(*info));
2250 info->offset = gpio_chip_hwgpio(desc);
2251
2252 if (desc->name)
2253 strscpy(info->name, desc->name, sizeof(info->name));
2254
2255 dflags = READ_ONCE(desc->flags);
2256
2257 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2258 label = gpiod_get_label(desc);
2259 if (label && test_bit(FLAG_REQUESTED, &dflags))
2260 strscpy(info->consumer, label,
2261 sizeof(info->consumer));
2262 }
2263
2264 /*
2265 * Userspace only need know that the kernel is using this GPIO so it
2266 * can't use it.
2267 * The calculation of the used flag is slightly racy, as it may read
2268 * desc, gc and pinctrl state without a lock covering all three at
2269 * once. Worst case if the line is in transition and the calculation
2270 * is inconsistent then it looks to the user like they performed the
2271 * read on the other side of the transition - but that can always
2272 * happen.
2273 * The definitive test that a line is available to userspace is to
2274 * request it.
2275 */
2276 if (test_bit(FLAG_REQUESTED, &dflags) ||
2277 test_bit(FLAG_IS_HOGGED, &dflags) ||
2278 test_bit(FLAG_EXPORT, &dflags) ||
2279 test_bit(FLAG_SYSFS, &dflags) ||
2280 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2281 info->flags |= GPIO_V2_LINE_FLAG_USED;
2282 } else if (!atomic) {
2283 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2284 info->flags |= GPIO_V2_LINE_FLAG_USED;
2285 }
2286
2287 if (test_bit(FLAG_IS_OUT, &dflags))
2288 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2289 else
2290 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2291
2292 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2293 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2294
2295 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2296 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2297 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2298 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2299
2300 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2301 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2302 if (test_bit(FLAG_PULL_DOWN, &dflags))
2303 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2304 if (test_bit(FLAG_PULL_UP, &dflags))
2305 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2306
2307 if (test_bit(FLAG_EDGE_RISING, &dflags))
2308 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2309 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2310 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2311
2312 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2313 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2314 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2315 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2316
2317 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2318 if (debounce_period_us) {
2319 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2320 info->attrs[info->num_attrs].debounce_period_us =
2321 debounce_period_us;
2322 info->num_attrs++;
2323 }
2324}
2325
2326struct gpio_chardev_data {
2327 struct gpio_device *gdev;
2328 wait_queue_head_t wait;
2329 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2330 struct notifier_block lineinfo_changed_nb;
2331 struct notifier_block device_unregistered_nb;
2332 unsigned long *watched_lines;
2333#ifdef CONFIG_GPIO_CDEV_V1
2334 atomic_t watch_abi_version;
2335#endif
2336 struct file *fp;
2337};
2338
2339static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2340{
2341 struct gpio_device *gdev = cdev->gdev;
2342 struct gpiochip_info chipinfo;
2343
2344 memset(&chipinfo, 0, sizeof(chipinfo));
2345
2346 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2347 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2348 chipinfo.lines = gdev->ngpio;
2349 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2350 return -EFAULT;
2351 return 0;
2352}
2353
2354#ifdef CONFIG_GPIO_CDEV_V1
2355/*
2356 * returns 0 if the versions match, else the previously selected ABI version
2357 */
2358static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2359 unsigned int version)
2360{
2361 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2362
2363 if (abiv == version)
2364 return 0;
2365
2366 return abiv;
2367}
2368
2369static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2370 bool watch)
2371{
2372 struct gpio_desc *desc;
2373 struct gpioline_info lineinfo;
2374 struct gpio_v2_line_info lineinfo_v2;
2375
2376 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2377 return -EFAULT;
2378
2379 /* this doubles as a range check on line_offset */
2380 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2381 if (IS_ERR(desc))
2382 return PTR_ERR(desc);
2383
2384 if (watch) {
2385 if (lineinfo_ensure_abi_version(cdev, 1))
2386 return -EPERM;
2387
2388 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2389 return -EBUSY;
2390 }
2391
2392 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2393 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2394
2395 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2396 if (watch)
2397 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2398 return -EFAULT;
2399 }
2400
2401 return 0;
2402}
2403#endif
2404
2405static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2406 bool watch)
2407{
2408 struct gpio_desc *desc;
2409 struct gpio_v2_line_info lineinfo;
2410
2411 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2412 return -EFAULT;
2413
2414 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2415 return -EINVAL;
2416
2417 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2418 if (IS_ERR(desc))
2419 return PTR_ERR(desc);
2420
2421 if (watch) {
2422#ifdef CONFIG_GPIO_CDEV_V1
2423 if (lineinfo_ensure_abi_version(cdev, 2))
2424 return -EPERM;
2425#endif
2426 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2427 return -EBUSY;
2428 }
2429 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2430
2431 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2432 if (watch)
2433 clear_bit(lineinfo.offset, cdev->watched_lines);
2434 return -EFAULT;
2435 }
2436
2437 return 0;
2438}
2439
2440static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2441{
2442 __u32 offset;
2443
2444 if (copy_from_user(&offset, ip, sizeof(offset)))
2445 return -EFAULT;
2446
2447 if (offset >= cdev->gdev->ngpio)
2448 return -EINVAL;
2449
2450 if (!test_and_clear_bit(offset, cdev->watched_lines))
2451 return -EBUSY;
2452
2453 return 0;
2454}
2455
2456/*
2457 * gpio_ioctl() - ioctl handler for the GPIO chardev
2458 */
2459static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2460{
2461 struct gpio_chardev_data *cdev = file->private_data;
2462 struct gpio_device *gdev = cdev->gdev;
2463 void __user *ip = (void __user *)arg;
2464
2465 guard(srcu)(&gdev->srcu);
2466
2467 /* We fail any subsequent ioctl():s when the chip is gone */
2468 if (!rcu_access_pointer(gdev->chip))
2469 return -ENODEV;
2470
2471 /* Fill in the struct and pass to userspace */
2472 switch (cmd) {
2473 case GPIO_GET_CHIPINFO_IOCTL:
2474 return chipinfo_get(cdev, ip);
2475#ifdef CONFIG_GPIO_CDEV_V1
2476 case GPIO_GET_LINEHANDLE_IOCTL:
2477 return linehandle_create(gdev, ip);
2478 case GPIO_GET_LINEEVENT_IOCTL:
2479 return lineevent_create(gdev, ip);
2480 case GPIO_GET_LINEINFO_IOCTL:
2481 return lineinfo_get_v1(cdev, ip, false);
2482 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2483 return lineinfo_get_v1(cdev, ip, true);
2484#endif /* CONFIG_GPIO_CDEV_V1 */
2485 case GPIO_V2_GET_LINEINFO_IOCTL:
2486 return lineinfo_get(cdev, ip, false);
2487 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2488 return lineinfo_get(cdev, ip, true);
2489 case GPIO_V2_GET_LINE_IOCTL:
2490 return linereq_create(gdev, ip);
2491 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2492 return lineinfo_unwatch(cdev, ip);
2493 default:
2494 return -EINVAL;
2495 }
2496}
2497
2498#ifdef CONFIG_COMPAT
2499static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2500 unsigned long arg)
2501{
2502 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2503}
2504#endif
2505
2506struct lineinfo_changed_ctx {
2507 struct work_struct work;
2508 struct gpio_v2_line_info_changed chg;
2509 struct gpio_device *gdev;
2510 struct gpio_chardev_data *cdev;
2511};
2512
2513static void lineinfo_changed_func(struct work_struct *work)
2514{
2515 struct lineinfo_changed_ctx *ctx =
2516 container_of(work, struct lineinfo_changed_ctx, work);
2517 struct gpio_chip *gc;
2518 int ret;
2519
2520 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2521 /*
2522 * If nobody set the USED flag earlier, let's see with pinctrl
2523 * now. We're doing this late because it's a sleeping function.
2524 * Pin functions are in general much more static and while it's
2525 * not 100% bullet-proof, it's good enough for most cases.
2526 */
2527 scoped_guard(srcu, &ctx->gdev->srcu) {
2528 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2529 if (gc &&
2530 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2531 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2532 }
2533 }
2534
2535 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2536 &ctx->cdev->wait.lock);
2537 if (ret)
2538 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2539 else
2540 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2541
2542 gpio_device_put(ctx->gdev);
2543 fput(ctx->cdev->fp);
2544 kfree(ctx);
2545}
2546
2547static int lineinfo_changed_notify(struct notifier_block *nb,
2548 unsigned long action, void *data)
2549{
2550 struct gpio_chardev_data *cdev =
2551 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2552 struct lineinfo_changed_ctx *ctx;
2553 struct gpio_desc *desc = data;
2554
2555 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2556 return NOTIFY_DONE;
2557
2558 /*
2559 * If this is called from atomic context (for instance: with a spinlock
2560 * taken by the atomic notifier chain), any sleeping calls must be done
2561 * outside of this function in process context of the dedicated
2562 * workqueue.
2563 *
2564 * Let's gather as much info as possible from the descriptor and
2565 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2566 * is executed.
2567 */
2568
2569 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2570 if (!ctx) {
2571 pr_err("Failed to allocate memory for line info notification\n");
2572 return NOTIFY_DONE;
2573 }
2574
2575 ctx->chg.event_type = action;
2576 ctx->chg.timestamp_ns = ktime_get_ns();
2577 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2578 /* Keep the GPIO device alive until we emit the event. */
2579 ctx->gdev = gpio_device_get(desc->gdev);
2580 ctx->cdev = cdev;
2581 /* Keep the file descriptor alive too. */
2582 get_file(ctx->cdev->fp);
2583
2584 INIT_WORK(&ctx->work, lineinfo_changed_func);
2585 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2586
2587 return NOTIFY_OK;
2588}
2589
2590static int gpio_device_unregistered_notify(struct notifier_block *nb,
2591 unsigned long action, void *data)
2592{
2593 struct gpio_chardev_data *cdev = container_of(nb,
2594 struct gpio_chardev_data,
2595 device_unregistered_nb);
2596
2597 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2598
2599 return NOTIFY_OK;
2600}
2601
2602static __poll_t lineinfo_watch_poll(struct file *file,
2603 struct poll_table_struct *pollt)
2604{
2605 struct gpio_chardev_data *cdev = file->private_data;
2606 __poll_t events = 0;
2607
2608 guard(srcu)(&cdev->gdev->srcu);
2609
2610 if (!rcu_access_pointer(cdev->gdev->chip))
2611 return EPOLLHUP | EPOLLERR;
2612
2613 poll_wait(file, &cdev->wait, pollt);
2614
2615 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2616 &cdev->wait.lock))
2617 events = EPOLLIN | EPOLLRDNORM;
2618
2619 return events;
2620}
2621
2622static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2623 size_t count, loff_t *off)
2624{
2625 struct gpio_chardev_data *cdev = file->private_data;
2626 struct gpio_v2_line_info_changed event;
2627 ssize_t bytes_read = 0;
2628 int ret;
2629 size_t event_size;
2630
2631 guard(srcu)(&cdev->gdev->srcu);
2632
2633 if (!rcu_access_pointer(cdev->gdev->chip))
2634 return -ENODEV;
2635
2636#ifndef CONFIG_GPIO_CDEV_V1
2637 event_size = sizeof(struct gpio_v2_line_info_changed);
2638 if (count < event_size)
2639 return -EINVAL;
2640#endif
2641
2642 do {
2643 scoped_guard(spinlock, &cdev->wait.lock) {
2644 if (kfifo_is_empty(&cdev->events)) {
2645 if (bytes_read)
2646 return bytes_read;
2647
2648 if (file->f_flags & O_NONBLOCK)
2649 return -EAGAIN;
2650
2651 ret = wait_event_interruptible_locked(cdev->wait,
2652 !kfifo_is_empty(&cdev->events));
2653 if (ret)
2654 return ret;
2655 }
2656#ifdef CONFIG_GPIO_CDEV_V1
2657 /* must be after kfifo check so watch_abi_version is set */
2658 if (atomic_read(&cdev->watch_abi_version) == 2)
2659 event_size = sizeof(struct gpio_v2_line_info_changed);
2660 else
2661 event_size = sizeof(struct gpioline_info_changed);
2662 if (count < event_size)
2663 return -EINVAL;
2664#endif
2665 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2666 /*
2667 * This should never happen - we hold the
2668 * lock from the moment we learned the fifo
2669 * is no longer empty until now.
2670 */
2671 WARN(1, "failed to read from non-empty kfifo");
2672 return -EIO;
2673 }
2674 }
2675
2676#ifdef CONFIG_GPIO_CDEV_V1
2677 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2678 if (copy_to_user(buf + bytes_read, &event, event_size))
2679 return -EFAULT;
2680 } else {
2681 struct gpioline_info_changed event_v1;
2682
2683 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2684 if (copy_to_user(buf + bytes_read, &event_v1,
2685 event_size))
2686 return -EFAULT;
2687 }
2688#else
2689 if (copy_to_user(buf + bytes_read, &event, event_size))
2690 return -EFAULT;
2691#endif
2692 bytes_read += event_size;
2693 } while (count >= bytes_read + sizeof(event));
2694
2695 return bytes_read;
2696}
2697
2698/**
2699 * gpio_chrdev_open() - open the chardev for ioctl operations
2700 * @inode: inode for this chardev
2701 * @file: file struct for storing private data
2702 *
2703 * Returns:
2704 * 0 on success, or negative errno on failure.
2705 */
2706static int gpio_chrdev_open(struct inode *inode, struct file *file)
2707{
2708 struct gpio_device *gdev = container_of(inode->i_cdev,
2709 struct gpio_device, chrdev);
2710 struct gpio_chardev_data *cdev;
2711 int ret = -ENOMEM;
2712
2713 guard(srcu)(&gdev->srcu);
2714
2715 /* Fail on open if the backing gpiochip is gone */
2716 if (!rcu_access_pointer(gdev->chip))
2717 return -ENODEV;
2718
2719 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2720 if (!cdev)
2721 return -ENODEV;
2722
2723 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2724 if (!cdev->watched_lines)
2725 goto out_free_cdev;
2726
2727 init_waitqueue_head(&cdev->wait);
2728 INIT_KFIFO(cdev->events);
2729 cdev->gdev = gpio_device_get(gdev);
2730
2731 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2732 ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
2733 &cdev->lineinfo_changed_nb);
2734 if (ret)
2735 goto out_free_bitmap;
2736
2737 cdev->device_unregistered_nb.notifier_call =
2738 gpio_device_unregistered_notify;
2739 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2740 &cdev->device_unregistered_nb);
2741 if (ret)
2742 goto out_unregister_line_notifier;
2743
2744 file->private_data = cdev;
2745 cdev->fp = file;
2746
2747 ret = nonseekable_open(inode, file);
2748 if (ret)
2749 goto out_unregister_device_notifier;
2750
2751 return ret;
2752
2753out_unregister_device_notifier:
2754 blocking_notifier_chain_unregister(&gdev->device_notifier,
2755 &cdev->device_unregistered_nb);
2756out_unregister_line_notifier:
2757 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2758 &cdev->lineinfo_changed_nb);
2759out_free_bitmap:
2760 gpio_device_put(gdev);
2761 bitmap_free(cdev->watched_lines);
2762out_free_cdev:
2763 kfree(cdev);
2764 return ret;
2765}
2766
2767/**
2768 * gpio_chrdev_release() - close chardev after ioctl operations
2769 * @inode: inode for this chardev
2770 * @file: file struct for storing private data
2771 *
2772 * Returns:
2773 * 0 on success, or negative errno on failure.
2774 */
2775static int gpio_chrdev_release(struct inode *inode, struct file *file)
2776{
2777 struct gpio_chardev_data *cdev = file->private_data;
2778 struct gpio_device *gdev = cdev->gdev;
2779
2780 blocking_notifier_chain_unregister(&gdev->device_notifier,
2781 &cdev->device_unregistered_nb);
2782 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2783 &cdev->lineinfo_changed_nb);
2784 bitmap_free(cdev->watched_lines);
2785 gpio_device_put(gdev);
2786 kfree(cdev);
2787
2788 return 0;
2789}
2790
2791static const struct file_operations gpio_fileops = {
2792 .release = gpio_chrdev_release,
2793 .open = gpio_chrdev_open,
2794 .poll = lineinfo_watch_poll,
2795 .read = lineinfo_watch_read,
2796 .owner = THIS_MODULE,
2797 .unlocked_ioctl = gpio_ioctl,
2798#ifdef CONFIG_COMPAT
2799 .compat_ioctl = gpio_ioctl_compat,
2800#endif
2801};
2802
2803int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2804{
2805 struct gpio_chip *gc;
2806 int ret;
2807
2808 cdev_init(&gdev->chrdev, &gpio_fileops);
2809 gdev->chrdev.owner = THIS_MODULE;
2810 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2811
2812 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2813 dev_name(&gdev->dev));
2814 if (!gdev->line_state_wq)
2815 return -ENOMEM;
2816
2817 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2818 if (ret)
2819 return ret;
2820
2821 guard(srcu)(&gdev->srcu);
2822 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2823 if (!gc)
2824 return -ENODEV;
2825
2826 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2827
2828 return 0;
2829}
2830
2831void gpiolib_cdev_unregister(struct gpio_device *gdev)
2832{
2833 destroy_workqueue(gdev->line_state_wq);
2834 cdev_device_del(&gdev->chrdev, &gdev->dev);
2835 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2836}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/anon_inodes.h>
4#include <linux/atomic.h>
5#include <linux/bitmap.h>
6#include <linux/build_bug.h>
7#include <linux/cdev.h>
8#include <linux/cleanup.h>
9#include <linux/compat.h>
10#include <linux/compiler.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/file.h>
14#include <linux/gpio.h>
15#include <linux/gpio/driver.h>
16#include <linux/hte.h>
17#include <linux/interrupt.h>
18#include <linux/irqreturn.h>
19#include <linux/kernel.h>
20#include <linux/kfifo.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/overflow.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/poll.h>
26#include <linux/rbtree.h>
27#include <linux/rwsem.h>
28#include <linux/seq_file.h>
29#include <linux/spinlock.h>
30#include <linux/timekeeping.h>
31#include <linux/uaccess.h>
32#include <linux/workqueue.h>
33
34#include <uapi/linux/gpio.h>
35
36#include "gpiolib.h"
37#include "gpiolib-cdev.h"
38
39/*
40 * Array sizes must ensure 64-bit alignment and not create holes in the
41 * struct packing.
42 */
43static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
44static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
45
46/*
47 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
48 */
49static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
50static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
51static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
52static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
53static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
54static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
55static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
56static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
57
58/* Character device interface to GPIO.
59 *
60 * The GPIO character device, /dev/gpiochipN, provides userspace an
61 * interface to gpiolib GPIOs via ioctl()s.
62 */
63
64typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
65typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
66typedef ssize_t (*read_fn)(struct file *, char __user *,
67 size_t count, loff_t *);
68
69/*
70 * GPIO line handle management
71 */
72
73#ifdef CONFIG_GPIO_CDEV_V1
74/**
75 * struct linehandle_state - contains the state of a userspace handle
76 * @gdev: the GPIO device the handle pertains to
77 * @label: consumer label used to tag descriptors
78 * @descs: the GPIO descriptors held by this handle
79 * @num_descs: the number of descriptors held in the descs array
80 */
81struct linehandle_state {
82 struct gpio_device *gdev;
83 const char *label;
84 struct gpio_desc *descs[GPIOHANDLES_MAX];
85 u32 num_descs;
86};
87
88#define GPIOHANDLE_REQUEST_VALID_FLAGS \
89 (GPIOHANDLE_REQUEST_INPUT | \
90 GPIOHANDLE_REQUEST_OUTPUT | \
91 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
92 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
93 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
94 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
95 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
96 GPIOHANDLE_REQUEST_OPEN_SOURCE)
97
98static int linehandle_validate_flags(u32 flags)
99{
100 /* Return an error if an unknown flag is set */
101 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
102 return -EINVAL;
103
104 /*
105 * Do not allow both INPUT & OUTPUT flags to be set as they are
106 * contradictory.
107 */
108 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
109 (flags & GPIOHANDLE_REQUEST_OUTPUT))
110 return -EINVAL;
111
112 /*
113 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
114 * the hardware actually supports enabling both at the same time the
115 * electrical result would be disastrous.
116 */
117 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
118 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
119 return -EINVAL;
120
121 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
122 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
123 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
124 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
125 return -EINVAL;
126
127 /* Bias flags only allowed for input or output mode. */
128 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
129 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
130 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
131 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
132 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
133 return -EINVAL;
134
135 /* Only one bias flag can be set. */
136 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
137 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
138 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
139 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
140 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
141 return -EINVAL;
142
143 return 0;
144}
145
146static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
147{
148 assign_bit(FLAG_ACTIVE_LOW, flagsp,
149 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
150 assign_bit(FLAG_OPEN_DRAIN, flagsp,
151 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
152 assign_bit(FLAG_OPEN_SOURCE, flagsp,
153 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
154 assign_bit(FLAG_PULL_UP, flagsp,
155 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
156 assign_bit(FLAG_PULL_DOWN, flagsp,
157 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
158 assign_bit(FLAG_BIAS_DISABLE, flagsp,
159 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
160}
161
162static long linehandle_set_config(struct linehandle_state *lh,
163 void __user *ip)
164{
165 struct gpiohandle_config gcnf;
166 struct gpio_desc *desc;
167 int i, ret;
168 u32 lflags;
169
170 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
171 return -EFAULT;
172
173 lflags = gcnf.flags;
174 ret = linehandle_validate_flags(lflags);
175 if (ret)
176 return ret;
177
178 for (i = 0; i < lh->num_descs; i++) {
179 desc = lh->descs[i];
180 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
181
182 /*
183 * Lines have to be requested explicitly for input
184 * or output, else the line will be treated "as is".
185 */
186 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
187 int val = !!gcnf.default_values[i];
188
189 ret = gpiod_direction_output(desc, val);
190 if (ret)
191 return ret;
192 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
193 ret = gpiod_direction_input(desc);
194 if (ret)
195 return ret;
196 }
197
198 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
199 }
200 return 0;
201}
202
203static long linehandle_ioctl(struct file *file, unsigned int cmd,
204 unsigned long arg)
205{
206 struct linehandle_state *lh = file->private_data;
207 void __user *ip = (void __user *)arg;
208 struct gpiohandle_data ghd;
209 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
210 unsigned int i;
211 int ret;
212
213 guard(rwsem_read)(&lh->gdev->sem);
214
215 if (!lh->gdev->chip)
216 return -ENODEV;
217
218 switch (cmd) {
219 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
220 /* NOTE: It's okay to read values of output lines */
221 ret = gpiod_get_array_value_complex(false, true,
222 lh->num_descs, lh->descs,
223 NULL, vals);
224 if (ret)
225 return ret;
226
227 memset(&ghd, 0, sizeof(ghd));
228 for (i = 0; i < lh->num_descs; i++)
229 ghd.values[i] = test_bit(i, vals);
230
231 if (copy_to_user(ip, &ghd, sizeof(ghd)))
232 return -EFAULT;
233
234 return 0;
235 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
236 /*
237 * All line descriptors were created at once with the same
238 * flags so just check if the first one is really output.
239 */
240 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
241 return -EPERM;
242
243 if (copy_from_user(&ghd, ip, sizeof(ghd)))
244 return -EFAULT;
245
246 /* Clamp all values to [0,1] */
247 for (i = 0; i < lh->num_descs; i++)
248 __assign_bit(i, vals, ghd.values[i]);
249
250 /* Reuse the array setting function */
251 return gpiod_set_array_value_complex(false,
252 true,
253 lh->num_descs,
254 lh->descs,
255 NULL,
256 vals);
257 case GPIOHANDLE_SET_CONFIG_IOCTL:
258 return linehandle_set_config(lh, ip);
259 default:
260 return -EINVAL;
261 }
262}
263
264#ifdef CONFIG_COMPAT
265static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
266 unsigned long arg)
267{
268 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
269}
270#endif
271
272static void linehandle_free(struct linehandle_state *lh)
273{
274 int i;
275
276 for (i = 0; i < lh->num_descs; i++)
277 if (lh->descs[i])
278 gpiod_free(lh->descs[i]);
279 kfree(lh->label);
280 gpio_device_put(lh->gdev);
281 kfree(lh);
282}
283
284static int linehandle_release(struct inode *inode, struct file *file)
285{
286 linehandle_free(file->private_data);
287 return 0;
288}
289
290static const struct file_operations linehandle_fileops = {
291 .release = linehandle_release,
292 .owner = THIS_MODULE,
293 .llseek = noop_llseek,
294 .unlocked_ioctl = linehandle_ioctl,
295#ifdef CONFIG_COMPAT
296 .compat_ioctl = linehandle_ioctl_compat,
297#endif
298};
299
300static int linehandle_create(struct gpio_device *gdev, void __user *ip)
301{
302 struct gpiohandle_request handlereq;
303 struct linehandle_state *lh;
304 struct file *file;
305 int fd, i, ret;
306 u32 lflags;
307
308 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
309 return -EFAULT;
310 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
311 return -EINVAL;
312
313 lflags = handlereq.flags;
314
315 ret = linehandle_validate_flags(lflags);
316 if (ret)
317 return ret;
318
319 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
320 if (!lh)
321 return -ENOMEM;
322 lh->gdev = gpio_device_get(gdev);
323
324 if (handlereq.consumer_label[0] != '\0') {
325 /* label is only initialized if consumer_label is set */
326 lh->label = kstrndup(handlereq.consumer_label,
327 sizeof(handlereq.consumer_label) - 1,
328 GFP_KERNEL);
329 if (!lh->label) {
330 ret = -ENOMEM;
331 goto out_free_lh;
332 }
333 }
334
335 lh->num_descs = handlereq.lines;
336
337 /* Request each GPIO */
338 for (i = 0; i < handlereq.lines; i++) {
339 u32 offset = handlereq.lineoffsets[i];
340 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
341
342 if (IS_ERR(desc)) {
343 ret = PTR_ERR(desc);
344 goto out_free_lh;
345 }
346
347 ret = gpiod_request_user(desc, lh->label);
348 if (ret)
349 goto out_free_lh;
350 lh->descs[i] = desc;
351 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
352
353 ret = gpiod_set_transitory(desc, false);
354 if (ret < 0)
355 goto out_free_lh;
356
357 /*
358 * Lines have to be requested explicitly for input
359 * or output, else the line will be treated "as is".
360 */
361 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
362 int val = !!handlereq.default_values[i];
363
364 ret = gpiod_direction_output(desc, val);
365 if (ret)
366 goto out_free_lh;
367 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
368 ret = gpiod_direction_input(desc);
369 if (ret)
370 goto out_free_lh;
371 }
372
373 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
374
375 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
376 offset);
377 }
378
379 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
380 if (fd < 0) {
381 ret = fd;
382 goto out_free_lh;
383 }
384
385 file = anon_inode_getfile("gpio-linehandle",
386 &linehandle_fileops,
387 lh,
388 O_RDONLY | O_CLOEXEC);
389 if (IS_ERR(file)) {
390 ret = PTR_ERR(file);
391 goto out_put_unused_fd;
392 }
393
394 handlereq.fd = fd;
395 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
396 /*
397 * fput() will trigger the release() callback, so do not go onto
398 * the regular error cleanup path here.
399 */
400 fput(file);
401 put_unused_fd(fd);
402 return -EFAULT;
403 }
404
405 fd_install(fd, file);
406
407 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
408 lh->num_descs);
409
410 return 0;
411
412out_put_unused_fd:
413 put_unused_fd(fd);
414out_free_lh:
415 linehandle_free(lh);
416 return ret;
417}
418#endif /* CONFIG_GPIO_CDEV_V1 */
419
420/**
421 * struct line - contains the state of a requested line
422 * @node: to store the object in supinfo_tree if supplemental
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @debounce_period_us: the debounce period in microseconds
436 * @sw_debounced: flag indicating if the software debouncer is active
437 * @level: the current debounced physical level of the line
438 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
439 * @raw_level: the line level at the time of event
440 * @total_discard_seq: the running counter of the discarded events
441 * @last_seqno: the last sequence number before debounce period expires
442 */
443struct line {
444 struct rb_node node;
445 struct gpio_desc *desc;
446 /*
447 * -- edge detector specific fields --
448 */
449 struct linereq *req;
450 unsigned int irq;
451 /*
452 * The flags for the active edge detector configuration.
453 *
454 * edflags is set by linereq_create(), linereq_free(), and
455 * linereq_set_config_unlocked(), which are themselves mutually
456 * exclusive, and is accessed by edge_irq_thread(),
457 * process_hw_ts_thread() and debounce_work_func(),
458 * which can all live with a slightly stale value.
459 */
460 u64 edflags;
461 /*
462 * timestamp_ns and req_seqno are accessed only by
463 * edge_irq_handler() and edge_irq_thread(), which are themselves
464 * mutually exclusive, so no additional protection is necessary.
465 */
466 u64 timestamp_ns;
467 u32 req_seqno;
468 /*
469 * line_seqno is accessed by either edge_irq_thread() or
470 * debounce_work_func(), which are themselves mutually exclusive,
471 * so no additional protection is necessary.
472 */
473 u32 line_seqno;
474 /*
475 * -- debouncer specific fields --
476 */
477 struct delayed_work work;
478 /*
479 * debounce_period_us is accessed by debounce_irq_handler() and
480 * process_hw_ts() which are disabled when modified by
481 * debounce_setup(), edge_detector_setup() or edge_detector_stop()
482 * or can live with a stale version when updated by
483 * edge_detector_update().
484 * The modifying functions are themselves mutually exclusive.
485 */
486 unsigned int debounce_period_us;
487 /*
488 * sw_debounce is accessed by linereq_set_config(), which is the
489 * only setter, and linereq_get_values(), which can live with a
490 * slightly stale value.
491 */
492 unsigned int sw_debounced;
493 /*
494 * level is accessed by debounce_work_func(), which is the only
495 * setter, and linereq_get_values() which can live with a slightly
496 * stale value.
497 */
498 unsigned int level;
499#ifdef CONFIG_HTE
500 struct hte_ts_desc hdesc;
501 /*
502 * HTE provider sets line level at the time of event. The valid
503 * value is 0 or 1 and negative value for an error.
504 */
505 int raw_level;
506 /*
507 * when sw_debounce is set on HTE enabled line, this is running
508 * counter of the discarded events.
509 */
510 u32 total_discard_seq;
511 /*
512 * when sw_debounce is set on HTE enabled line, this variable records
513 * last sequence number before debounce period expires.
514 */
515 u32 last_seqno;
516#endif /* CONFIG_HTE */
517};
518
519/*
520 * a rbtree of the struct lines containing supplemental info.
521 * Used to populate gpio_v2_line_info with cdev specific fields not contained
522 * in the struct gpio_desc.
523 * A line is determined to contain supplemental information by
524 * line_has_supinfo().
525 */
526static struct rb_root supinfo_tree = RB_ROOT;
527/* covers supinfo_tree */
528static DEFINE_SPINLOCK(supinfo_lock);
529
530/**
531 * struct linereq - contains the state of a userspace line request
532 * @gdev: the GPIO device the line request pertains to
533 * @label: consumer label used to tag GPIO descriptors
534 * @num_lines: the number of lines in the lines array
535 * @wait: wait queue that handles blocking reads of events
536 * @device_unregistered_nb: notifier block for receiving gdev unregister events
537 * @event_buffer_size: the number of elements allocated in @events
538 * @events: KFIFO for the GPIO events
539 * @seqno: the sequence number for edge events generated on all lines in
540 * this line request. Note that this is not used when @num_lines is 1, as
541 * the line_seqno is then the same and is cheaper to calculate.
542 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
543 * of configuration, particularly multi-step accesses to desc flags and
544 * changes to supinfo status.
545 * @lines: the lines held by this line request, with @num_lines elements.
546 */
547struct linereq {
548 struct gpio_device *gdev;
549 const char *label;
550 u32 num_lines;
551 wait_queue_head_t wait;
552 struct notifier_block device_unregistered_nb;
553 u32 event_buffer_size;
554 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
555 atomic_t seqno;
556 struct mutex config_mutex;
557 struct line lines[] __counted_by(num_lines);
558};
559
560static void supinfo_insert(struct line *line)
561{
562 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
563 struct line *entry;
564
565 guard(spinlock)(&supinfo_lock);
566
567 while (*new) {
568 entry = container_of(*new, struct line, node);
569
570 parent = *new;
571 if (line->desc < entry->desc) {
572 new = &((*new)->rb_left);
573 } else if (line->desc > entry->desc) {
574 new = &((*new)->rb_right);
575 } else {
576 /* this should never happen */
577 WARN(1, "duplicate line inserted");
578 return;
579 }
580 }
581
582 rb_link_node(&line->node, parent, new);
583 rb_insert_color(&line->node, &supinfo_tree);
584}
585
586static void supinfo_erase(struct line *line)
587{
588 guard(spinlock)(&supinfo_lock);
589
590 rb_erase(&line->node, &supinfo_tree);
591}
592
593static struct line *supinfo_find(struct gpio_desc *desc)
594{
595 struct rb_node *node = supinfo_tree.rb_node;
596 struct line *line;
597
598 while (node) {
599 line = container_of(node, struct line, node);
600 if (desc < line->desc)
601 node = node->rb_left;
602 else if (desc > line->desc)
603 node = node->rb_right;
604 else
605 return line;
606 }
607 return NULL;
608}
609
610static void supinfo_to_lineinfo(struct gpio_desc *desc,
611 struct gpio_v2_line_info *info)
612{
613 struct gpio_v2_line_attribute *attr;
614 struct line *line;
615
616 guard(spinlock)(&supinfo_lock);
617
618 line = supinfo_find(desc);
619 if (!line)
620 return;
621
622 attr = &info->attrs[info->num_attrs];
623 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
624 attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
625 info->num_attrs++;
626}
627
628static inline bool line_has_supinfo(struct line *line)
629{
630 return READ_ONCE(line->debounce_period_us);
631}
632
633/*
634 * Checks line_has_supinfo() before and after the change to avoid unnecessary
635 * supinfo_tree access.
636 * Called indirectly by linereq_create() or linereq_set_config() so line
637 * is already protected from concurrent changes.
638 */
639static void line_set_debounce_period(struct line *line,
640 unsigned int debounce_period_us)
641{
642 bool was_suppl = line_has_supinfo(line);
643
644 WRITE_ONCE(line->debounce_period_us, debounce_period_us);
645
646 /* if supinfo status is unchanged then we're done */
647 if (line_has_supinfo(line) == was_suppl)
648 return;
649
650 /* supinfo status has changed, so update the tree */
651 if (was_suppl)
652 supinfo_erase(line);
653 else
654 supinfo_insert(line);
655}
656
657#define GPIO_V2_LINE_BIAS_FLAGS \
658 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
659 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
660 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
661
662#define GPIO_V2_LINE_DIRECTION_FLAGS \
663 (GPIO_V2_LINE_FLAG_INPUT | \
664 GPIO_V2_LINE_FLAG_OUTPUT)
665
666#define GPIO_V2_LINE_DRIVE_FLAGS \
667 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
668 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
669
670#define GPIO_V2_LINE_EDGE_FLAGS \
671 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
672 GPIO_V2_LINE_FLAG_EDGE_FALLING)
673
674#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
675
676#define GPIO_V2_LINE_VALID_FLAGS \
677 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
678 GPIO_V2_LINE_DIRECTION_FLAGS | \
679 GPIO_V2_LINE_DRIVE_FLAGS | \
680 GPIO_V2_LINE_EDGE_FLAGS | \
681 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
682 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
683 GPIO_V2_LINE_BIAS_FLAGS)
684
685/* subset of flags relevant for edge detector configuration */
686#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
687 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
688 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
689 GPIO_V2_LINE_EDGE_FLAGS)
690
691static int linereq_unregistered_notify(struct notifier_block *nb,
692 unsigned long action, void *data)
693{
694 struct linereq *lr = container_of(nb, struct linereq,
695 device_unregistered_nb);
696
697 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
698
699 return NOTIFY_OK;
700}
701
702static void linereq_put_event(struct linereq *lr,
703 struct gpio_v2_line_event *le)
704{
705 bool overflow = false;
706
707 scoped_guard(spinlock, &lr->wait.lock) {
708 if (kfifo_is_full(&lr->events)) {
709 overflow = true;
710 kfifo_skip(&lr->events);
711 }
712 kfifo_in(&lr->events, le, 1);
713 }
714 if (!overflow)
715 wake_up_poll(&lr->wait, EPOLLIN);
716 else
717 pr_debug_ratelimited("event FIFO is full - event dropped\n");
718}
719
720static u64 line_event_timestamp(struct line *line)
721{
722 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
723 return ktime_get_real_ns();
724 else if (IS_ENABLED(CONFIG_HTE) &&
725 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
726 return line->timestamp_ns;
727
728 return ktime_get_ns();
729}
730
731static u32 line_event_id(int level)
732{
733 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
734 GPIO_V2_LINE_EVENT_FALLING_EDGE;
735}
736
737#ifdef CONFIG_HTE
738
739static enum hte_return process_hw_ts_thread(void *p)
740{
741 struct line *line;
742 struct linereq *lr;
743 struct gpio_v2_line_event le;
744 u64 edflags;
745 int level;
746
747 if (!p)
748 return HTE_CB_HANDLED;
749
750 line = p;
751 lr = line->req;
752
753 memset(&le, 0, sizeof(le));
754
755 le.timestamp_ns = line->timestamp_ns;
756 edflags = READ_ONCE(line->edflags);
757
758 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
759 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
760 level = (line->raw_level >= 0) ?
761 line->raw_level :
762 gpiod_get_raw_value_cansleep(line->desc);
763
764 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
765 level = !level;
766
767 le.id = line_event_id(level);
768 break;
769 case GPIO_V2_LINE_FLAG_EDGE_RISING:
770 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
771 break;
772 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
773 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
774 break;
775 default:
776 return HTE_CB_HANDLED;
777 }
778 le.line_seqno = line->line_seqno;
779 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
780 le.offset = gpio_chip_hwgpio(line->desc);
781
782 linereq_put_event(lr, &le);
783
784 return HTE_CB_HANDLED;
785}
786
787static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
788{
789 struct line *line;
790 struct linereq *lr;
791 int diff_seqno = 0;
792
793 if (!ts || !p)
794 return HTE_CB_HANDLED;
795
796 line = p;
797 line->timestamp_ns = ts->tsc;
798 line->raw_level = ts->raw_level;
799 lr = line->req;
800
801 if (READ_ONCE(line->sw_debounced)) {
802 line->total_discard_seq++;
803 line->last_seqno = ts->seq;
804 mod_delayed_work(system_wq, &line->work,
805 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
806 } else {
807 if (unlikely(ts->seq < line->line_seqno))
808 return HTE_CB_HANDLED;
809
810 diff_seqno = ts->seq - line->line_seqno;
811 line->line_seqno = ts->seq;
812 if (lr->num_lines != 1)
813 line->req_seqno = atomic_add_return(diff_seqno,
814 &lr->seqno);
815
816 return HTE_RUN_SECOND_CB;
817 }
818
819 return HTE_CB_HANDLED;
820}
821
822static int hte_edge_setup(struct line *line, u64 eflags)
823{
824 int ret;
825 unsigned long flags = 0;
826 struct hte_ts_desc *hdesc = &line->hdesc;
827
828 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
829 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
830 HTE_FALLING_EDGE_TS :
831 HTE_RISING_EDGE_TS;
832 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
833 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
834 HTE_RISING_EDGE_TS :
835 HTE_FALLING_EDGE_TS;
836
837 line->total_discard_seq = 0;
838
839 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
840 line->desc);
841
842 ret = hte_ts_get(NULL, hdesc, 0);
843 if (ret)
844 return ret;
845
846 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
847 line);
848}
849
850#else
851
852static int hte_edge_setup(struct line *line, u64 eflags)
853{
854 return 0;
855}
856#endif /* CONFIG_HTE */
857
858static irqreturn_t edge_irq_thread(int irq, void *p)
859{
860 struct line *line = p;
861 struct linereq *lr = line->req;
862 struct gpio_v2_line_event le;
863
864 /* Do not leak kernel stack to userspace */
865 memset(&le, 0, sizeof(le));
866
867 if (line->timestamp_ns) {
868 le.timestamp_ns = line->timestamp_ns;
869 } else {
870 /*
871 * We may be running from a nested threaded interrupt in
872 * which case we didn't get the timestamp from
873 * edge_irq_handler().
874 */
875 le.timestamp_ns = line_event_timestamp(line);
876 if (lr->num_lines != 1)
877 line->req_seqno = atomic_inc_return(&lr->seqno);
878 }
879 line->timestamp_ns = 0;
880
881 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
882 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
883 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
884 break;
885 case GPIO_V2_LINE_FLAG_EDGE_RISING:
886 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
887 break;
888 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
889 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
890 break;
891 default:
892 return IRQ_NONE;
893 }
894 line->line_seqno++;
895 le.line_seqno = line->line_seqno;
896 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
897 le.offset = gpio_chip_hwgpio(line->desc);
898
899 linereq_put_event(lr, &le);
900
901 return IRQ_HANDLED;
902}
903
904static irqreturn_t edge_irq_handler(int irq, void *p)
905{
906 struct line *line = p;
907 struct linereq *lr = line->req;
908
909 /*
910 * Just store the timestamp in hardirq context so we get it as
911 * close in time as possible to the actual event.
912 */
913 line->timestamp_ns = line_event_timestamp(line);
914
915 if (lr->num_lines != 1)
916 line->req_seqno = atomic_inc_return(&lr->seqno);
917
918 return IRQ_WAKE_THREAD;
919}
920
921/*
922 * returns the current debounced logical value.
923 */
924static bool debounced_value(struct line *line)
925{
926 bool value;
927
928 /*
929 * minor race - debouncer may be stopped here, so edge_detector_stop()
930 * must leave the value unchanged so the following will read the level
931 * from when the debouncer was last running.
932 */
933 value = READ_ONCE(line->level);
934
935 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
936 value = !value;
937
938 return value;
939}
940
941static irqreturn_t debounce_irq_handler(int irq, void *p)
942{
943 struct line *line = p;
944
945 mod_delayed_work(system_wq, &line->work,
946 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
947
948 return IRQ_HANDLED;
949}
950
951static void debounce_work_func(struct work_struct *work)
952{
953 struct gpio_v2_line_event le;
954 struct line *line = container_of(work, struct line, work.work);
955 struct linereq *lr;
956 u64 eflags, edflags = READ_ONCE(line->edflags);
957 int level = -1;
958#ifdef CONFIG_HTE
959 int diff_seqno;
960
961 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
962 level = line->raw_level;
963#endif
964 if (level < 0)
965 level = gpiod_get_raw_value_cansleep(line->desc);
966 if (level < 0) {
967 pr_debug_ratelimited("debouncer failed to read line value\n");
968 return;
969 }
970
971 if (READ_ONCE(line->level) == level)
972 return;
973
974 WRITE_ONCE(line->level, level);
975
976 /* -- edge detection -- */
977 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
978 if (!eflags)
979 return;
980
981 /* switch from physical level to logical - if they differ */
982 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
983 level = !level;
984
985 /* ignore edges that are not being monitored */
986 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
987 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
988 return;
989
990 /* Do not leak kernel stack to userspace */
991 memset(&le, 0, sizeof(le));
992
993 lr = line->req;
994 le.timestamp_ns = line_event_timestamp(line);
995 le.offset = gpio_chip_hwgpio(line->desc);
996#ifdef CONFIG_HTE
997 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
998 /* discard events except the last one */
999 line->total_discard_seq -= 1;
1000 diff_seqno = line->last_seqno - line->total_discard_seq -
1001 line->line_seqno;
1002 line->line_seqno = line->last_seqno - line->total_discard_seq;
1003 le.line_seqno = line->line_seqno;
1004 le.seqno = (lr->num_lines == 1) ?
1005 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
1006 } else
1007#endif /* CONFIG_HTE */
1008 {
1009 line->line_seqno++;
1010 le.line_seqno = line->line_seqno;
1011 le.seqno = (lr->num_lines == 1) ?
1012 le.line_seqno : atomic_inc_return(&lr->seqno);
1013 }
1014
1015 le.id = line_event_id(level);
1016
1017 linereq_put_event(lr, &le);
1018}
1019
1020static int debounce_setup(struct line *line, unsigned int debounce_period_us)
1021{
1022 unsigned long irqflags;
1023 int ret, level, irq;
1024
1025 /* try hardware */
1026 ret = gpiod_set_debounce(line->desc, debounce_period_us);
1027 if (!ret) {
1028 line_set_debounce_period(line, debounce_period_us);
1029 return ret;
1030 }
1031 if (ret != -ENOTSUPP)
1032 return ret;
1033
1034 if (debounce_period_us) {
1035 /* setup software debounce */
1036 level = gpiod_get_raw_value_cansleep(line->desc);
1037 if (level < 0)
1038 return level;
1039
1040 if (!(IS_ENABLED(CONFIG_HTE) &&
1041 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
1042 irq = gpiod_to_irq(line->desc);
1043 if (irq < 0)
1044 return -ENXIO;
1045
1046 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
1047 ret = request_irq(irq, debounce_irq_handler, irqflags,
1048 line->req->label, line);
1049 if (ret)
1050 return ret;
1051 line->irq = irq;
1052 } else {
1053 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
1054 if (ret)
1055 return ret;
1056 }
1057
1058 WRITE_ONCE(line->level, level);
1059 WRITE_ONCE(line->sw_debounced, 1);
1060 }
1061 return 0;
1062}
1063
1064static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
1065 unsigned int line_idx)
1066{
1067 unsigned int i;
1068 u64 mask = BIT_ULL(line_idx);
1069
1070 for (i = 0; i < lc->num_attrs; i++) {
1071 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1072 (lc->attrs[i].mask & mask))
1073 return true;
1074 }
1075 return false;
1076}
1077
1078static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
1079 unsigned int line_idx)
1080{
1081 unsigned int i;
1082 u64 mask = BIT_ULL(line_idx);
1083
1084 for (i = 0; i < lc->num_attrs; i++) {
1085 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1086 (lc->attrs[i].mask & mask))
1087 return lc->attrs[i].attr.debounce_period_us;
1088 }
1089 return 0;
1090}
1091
1092static void edge_detector_stop(struct line *line)
1093{
1094 if (line->irq) {
1095 free_irq(line->irq, line);
1096 line->irq = 0;
1097 }
1098
1099#ifdef CONFIG_HTE
1100 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1101 hte_ts_put(&line->hdesc);
1102#endif
1103
1104 cancel_delayed_work_sync(&line->work);
1105 WRITE_ONCE(line->sw_debounced, 0);
1106 WRITE_ONCE(line->edflags, 0);
1107 line_set_debounce_period(line, 0);
1108 /* do not change line->level - see comment in debounced_value() */
1109}
1110
1111static int edge_detector_setup(struct line *line,
1112 struct gpio_v2_line_config *lc,
1113 unsigned int line_idx, u64 edflags)
1114{
1115 u32 debounce_period_us;
1116 unsigned long irqflags = 0;
1117 u64 eflags;
1118 int irq, ret;
1119
1120 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1121 if (eflags && !kfifo_initialized(&line->req->events)) {
1122 ret = kfifo_alloc(&line->req->events,
1123 line->req->event_buffer_size, GFP_KERNEL);
1124 if (ret)
1125 return ret;
1126 }
1127 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1128 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1129 ret = debounce_setup(line, debounce_period_us);
1130 if (ret)
1131 return ret;
1132 line_set_debounce_period(line, debounce_period_us);
1133 }
1134
1135 /* detection disabled or sw debouncer will provide edge detection */
1136 if (!eflags || READ_ONCE(line->sw_debounced))
1137 return 0;
1138
1139 if (IS_ENABLED(CONFIG_HTE) &&
1140 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1141 return hte_edge_setup(line, edflags);
1142
1143 irq = gpiod_to_irq(line->desc);
1144 if (irq < 0)
1145 return -ENXIO;
1146
1147 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1148 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1149 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1150 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1151 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1152 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1153 irqflags |= IRQF_ONESHOT;
1154
1155 /* Request a thread to read the events */
1156 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1157 irqflags, line->req->label, line);
1158 if (ret)
1159 return ret;
1160
1161 line->irq = irq;
1162 return 0;
1163}
1164
1165static int edge_detector_update(struct line *line,
1166 struct gpio_v2_line_config *lc,
1167 unsigned int line_idx, u64 edflags)
1168{
1169 u64 active_edflags = READ_ONCE(line->edflags);
1170 unsigned int debounce_period_us =
1171 gpio_v2_line_config_debounce_period(lc, line_idx);
1172
1173 if ((active_edflags == edflags) &&
1174 (READ_ONCE(line->debounce_period_us) == debounce_period_us))
1175 return 0;
1176
1177 /* sw debounced and still will be...*/
1178 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1179 line_set_debounce_period(line, debounce_period_us);
1180 return 0;
1181 }
1182
1183 /* reconfiguring edge detection or sw debounce being disabled */
1184 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1185 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1186 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1187 edge_detector_stop(line);
1188
1189 return edge_detector_setup(line, lc, line_idx, edflags);
1190}
1191
1192static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1193 unsigned int line_idx)
1194{
1195 unsigned int i;
1196 u64 mask = BIT_ULL(line_idx);
1197
1198 for (i = 0; i < lc->num_attrs; i++) {
1199 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1200 (lc->attrs[i].mask & mask))
1201 return lc->attrs[i].attr.flags;
1202 }
1203 return lc->flags;
1204}
1205
1206static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1207 unsigned int line_idx)
1208{
1209 unsigned int i;
1210 u64 mask = BIT_ULL(line_idx);
1211
1212 for (i = 0; i < lc->num_attrs; i++) {
1213 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1214 (lc->attrs[i].mask & mask))
1215 return !!(lc->attrs[i].attr.values & mask);
1216 }
1217 return 0;
1218}
1219
1220static int gpio_v2_line_flags_validate(u64 flags)
1221{
1222 /* Return an error if an unknown flag is set */
1223 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1224 return -EINVAL;
1225
1226 if (!IS_ENABLED(CONFIG_HTE) &&
1227 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1228 return -EOPNOTSUPP;
1229
1230 /*
1231 * Do not allow both INPUT and OUTPUT flags to be set as they are
1232 * contradictory.
1233 */
1234 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1235 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1236 return -EINVAL;
1237
1238 /* Only allow one event clock source */
1239 if (IS_ENABLED(CONFIG_HTE) &&
1240 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1241 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1242 return -EINVAL;
1243
1244 /* Edge detection requires explicit input. */
1245 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1246 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1247 return -EINVAL;
1248
1249 /*
1250 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1251 * request. If the hardware actually supports enabling both at the
1252 * same time the electrical result would be disastrous.
1253 */
1254 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1255 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1256 return -EINVAL;
1257
1258 /* Drive requires explicit output direction. */
1259 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1260 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1261 return -EINVAL;
1262
1263 /* Bias requires explicit direction. */
1264 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1265 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1266 return -EINVAL;
1267
1268 /* Only one bias flag can be set. */
1269 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1270 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1271 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1272 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1273 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1274 return -EINVAL;
1275
1276 return 0;
1277}
1278
1279static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1280 unsigned int num_lines)
1281{
1282 unsigned int i;
1283 u64 flags;
1284 int ret;
1285
1286 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1287 return -EINVAL;
1288
1289 if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
1290 return -EINVAL;
1291
1292 for (i = 0; i < num_lines; i++) {
1293 flags = gpio_v2_line_config_flags(lc, i);
1294 ret = gpio_v2_line_flags_validate(flags);
1295 if (ret)
1296 return ret;
1297
1298 /* debounce requires explicit input */
1299 if (gpio_v2_line_config_debounced(lc, i) &&
1300 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1301 return -EINVAL;
1302 }
1303 return 0;
1304}
1305
1306static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
1307 unsigned long *flagsp)
1308{
1309 assign_bit(FLAG_ACTIVE_LOW, flagsp,
1310 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1311
1312 if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
1313 set_bit(FLAG_IS_OUT, flagsp);
1314 else if (flags & GPIO_V2_LINE_FLAG_INPUT)
1315 clear_bit(FLAG_IS_OUT, flagsp);
1316
1317 assign_bit(FLAG_EDGE_RISING, flagsp,
1318 flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1319 assign_bit(FLAG_EDGE_FALLING, flagsp,
1320 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1321
1322 assign_bit(FLAG_OPEN_DRAIN, flagsp,
1323 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1324 assign_bit(FLAG_OPEN_SOURCE, flagsp,
1325 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1326
1327 assign_bit(FLAG_PULL_UP, flagsp,
1328 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1329 assign_bit(FLAG_PULL_DOWN, flagsp,
1330 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1331 assign_bit(FLAG_BIAS_DISABLE, flagsp,
1332 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1333
1334 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
1335 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1336 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
1337 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1338}
1339
1340static long linereq_get_values(struct linereq *lr, void __user *ip)
1341{
1342 struct gpio_v2_line_values lv;
1343 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1344 struct gpio_desc **descs;
1345 unsigned int i, didx, num_get;
1346 bool val;
1347 int ret;
1348
1349 /* NOTE: It's ok to read values of output lines. */
1350 if (copy_from_user(&lv, ip, sizeof(lv)))
1351 return -EFAULT;
1352
1353 /*
1354 * gpiod_get_array_value_complex() requires compacted desc and val
1355 * arrays, rather than the sparse ones in lv.
1356 * Calculation of num_get and construction of the desc array is
1357 * optimized to avoid allocation for the desc array for the common
1358 * num_get == 1 case.
1359 */
1360 /* scan requested lines to calculate the subset to get */
1361 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1362 if (lv.mask & BIT_ULL(i)) {
1363 num_get++;
1364 /* capture desc for the num_get == 1 case */
1365 descs = &lr->lines[i].desc;
1366 }
1367 }
1368
1369 if (num_get == 0)
1370 return -EINVAL;
1371
1372 if (num_get != 1) {
1373 /* build compacted desc array */
1374 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1375 if (!descs)
1376 return -ENOMEM;
1377 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1378 if (lv.mask & BIT_ULL(i)) {
1379 descs[didx] = lr->lines[i].desc;
1380 didx++;
1381 }
1382 }
1383 }
1384 ret = gpiod_get_array_value_complex(false, true, num_get,
1385 descs, NULL, vals);
1386
1387 if (num_get != 1)
1388 kfree(descs);
1389 if (ret)
1390 return ret;
1391
1392 lv.bits = 0;
1393 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1394 /* unpack compacted vals for the response */
1395 if (lv.mask & BIT_ULL(i)) {
1396 if (lr->lines[i].sw_debounced)
1397 val = debounced_value(&lr->lines[i]);
1398 else
1399 val = test_bit(didx, vals);
1400 if (val)
1401 lv.bits |= BIT_ULL(i);
1402 didx++;
1403 }
1404 }
1405
1406 if (copy_to_user(ip, &lv, sizeof(lv)))
1407 return -EFAULT;
1408
1409 return 0;
1410}
1411
1412static long linereq_set_values(struct linereq *lr, void __user *ip)
1413{
1414 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1415 struct gpio_v2_line_values lv;
1416 struct gpio_desc **descs;
1417 unsigned int i, didx, num_set;
1418 int ret;
1419
1420 if (copy_from_user(&lv, ip, sizeof(lv)))
1421 return -EFAULT;
1422
1423 guard(mutex)(&lr->config_mutex);
1424
1425 /*
1426 * gpiod_set_array_value_complex() requires compacted desc and val
1427 * arrays, rather than the sparse ones in lv.
1428 * Calculation of num_set and construction of the descs and vals arrays
1429 * is optimized to minimize scanning the lv->mask, and to avoid
1430 * allocation for the desc array for the common num_set == 1 case.
1431 */
1432 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1433 /* scan requested lines to determine the subset to be set */
1434 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1435 if (lv.mask & BIT_ULL(i)) {
1436 /* setting inputs is not allowed */
1437 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1438 return -EPERM;
1439 /* add to compacted values */
1440 if (lv.bits & BIT_ULL(i))
1441 __set_bit(num_set, vals);
1442 num_set++;
1443 /* capture desc for the num_set == 1 case */
1444 descs = &lr->lines[i].desc;
1445 }
1446 }
1447 if (num_set == 0)
1448 return -EINVAL;
1449
1450 if (num_set != 1) {
1451 /* build compacted desc array */
1452 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1453 if (!descs)
1454 return -ENOMEM;
1455 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1456 if (lv.mask & BIT_ULL(i)) {
1457 descs[didx] = lr->lines[i].desc;
1458 didx++;
1459 }
1460 }
1461 }
1462 ret = gpiod_set_array_value_complex(false, true, num_set,
1463 descs, NULL, vals);
1464
1465 if (num_set != 1)
1466 kfree(descs);
1467 return ret;
1468}
1469
1470static long linereq_set_config(struct linereq *lr, void __user *ip)
1471{
1472 struct gpio_v2_line_config lc;
1473 struct gpio_desc *desc;
1474 struct line *line;
1475 unsigned int i;
1476 u64 flags, edflags;
1477 int ret;
1478
1479 if (copy_from_user(&lc, ip, sizeof(lc)))
1480 return -EFAULT;
1481
1482 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1483 if (ret)
1484 return ret;
1485
1486 guard(mutex)(&lr->config_mutex);
1487
1488 for (i = 0; i < lr->num_lines; i++) {
1489 line = &lr->lines[i];
1490 desc = lr->lines[i].desc;
1491 flags = gpio_v2_line_config_flags(&lc, i);
1492 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1493 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1494 /*
1495 * Lines have to be requested explicitly for input
1496 * or output, else the line will be treated "as is".
1497 */
1498 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1499 int val = gpio_v2_line_config_output_value(&lc, i);
1500
1501 edge_detector_stop(line);
1502 ret = gpiod_direction_output(desc, val);
1503 if (ret)
1504 return ret;
1505 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1506 ret = gpiod_direction_input(desc);
1507 if (ret)
1508 return ret;
1509
1510 ret = edge_detector_update(line, &lc, i, edflags);
1511 if (ret)
1512 return ret;
1513 }
1514
1515 WRITE_ONCE(line->edflags, edflags);
1516
1517 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1518 }
1519 return 0;
1520}
1521
1522static long linereq_ioctl(struct file *file, unsigned int cmd,
1523 unsigned long arg)
1524{
1525 struct linereq *lr = file->private_data;
1526 void __user *ip = (void __user *)arg;
1527
1528 guard(rwsem_read)(&lr->gdev->sem);
1529
1530 if (!lr->gdev->chip)
1531 return -ENODEV;
1532
1533 switch (cmd) {
1534 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1535 return linereq_get_values(lr, ip);
1536 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1537 return linereq_set_values(lr, ip);
1538 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1539 return linereq_set_config(lr, ip);
1540 default:
1541 return -EINVAL;
1542 }
1543}
1544
1545#ifdef CONFIG_COMPAT
1546static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1547 unsigned long arg)
1548{
1549 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1550}
1551#endif
1552
1553static __poll_t linereq_poll(struct file *file,
1554 struct poll_table_struct *wait)
1555{
1556 struct linereq *lr = file->private_data;
1557 __poll_t events = 0;
1558
1559 guard(rwsem_read)(&lr->gdev->sem);
1560
1561 if (!lr->gdev->chip)
1562 return EPOLLHUP | EPOLLERR;
1563
1564 poll_wait(file, &lr->wait, wait);
1565
1566 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1567 &lr->wait.lock))
1568 events = EPOLLIN | EPOLLRDNORM;
1569
1570 return events;
1571}
1572
1573static ssize_t linereq_read(struct file *file, char __user *buf,
1574 size_t count, loff_t *f_ps)
1575{
1576 struct linereq *lr = file->private_data;
1577 struct gpio_v2_line_event le;
1578 ssize_t bytes_read = 0;
1579 int ret;
1580
1581 guard(rwsem_read)(&lr->gdev->sem);
1582
1583 if (!lr->gdev->chip)
1584 return -ENODEV;
1585
1586 if (count < sizeof(le))
1587 return -EINVAL;
1588
1589 do {
1590 scoped_guard(spinlock, &lr->wait.lock) {
1591 if (kfifo_is_empty(&lr->events)) {
1592 if (bytes_read)
1593 return bytes_read;
1594
1595 if (file->f_flags & O_NONBLOCK)
1596 return -EAGAIN;
1597
1598 ret = wait_event_interruptible_locked(lr->wait,
1599 !kfifo_is_empty(&lr->events));
1600 if (ret)
1601 return ret;
1602 }
1603
1604 ret = kfifo_out(&lr->events, &le, 1);
1605 }
1606 if (ret != 1) {
1607 /*
1608 * This should never happen - we were holding the
1609 * lock from the moment we learned the fifo is no
1610 * longer empty until now.
1611 */
1612 ret = -EIO;
1613 break;
1614 }
1615
1616 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1617 return -EFAULT;
1618 bytes_read += sizeof(le);
1619 } while (count >= bytes_read + sizeof(le));
1620
1621 return bytes_read;
1622}
1623
1624static void linereq_free(struct linereq *lr)
1625{
1626 struct line *line;
1627 unsigned int i;
1628
1629 if (lr->device_unregistered_nb.notifier_call)
1630 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1631 &lr->device_unregistered_nb);
1632
1633 for (i = 0; i < lr->num_lines; i++) {
1634 line = &lr->lines[i];
1635 if (!line->desc)
1636 continue;
1637
1638 edge_detector_stop(line);
1639 if (line_has_supinfo(line))
1640 supinfo_erase(line);
1641 gpiod_free(line->desc);
1642 }
1643 kfifo_free(&lr->events);
1644 kfree(lr->label);
1645 gpio_device_put(lr->gdev);
1646 kvfree(lr);
1647}
1648
1649static int linereq_release(struct inode *inode, struct file *file)
1650{
1651 struct linereq *lr = file->private_data;
1652
1653 linereq_free(lr);
1654 return 0;
1655}
1656
1657#ifdef CONFIG_PROC_FS
1658static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1659{
1660 struct linereq *lr = file->private_data;
1661 struct device *dev = &lr->gdev->dev;
1662 u16 i;
1663
1664 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1665
1666 for (i = 0; i < lr->num_lines; i++)
1667 seq_printf(out, "gpio-line:\t%d\n",
1668 gpio_chip_hwgpio(lr->lines[i].desc));
1669}
1670#endif
1671
1672static const struct file_operations line_fileops = {
1673 .release = linereq_release,
1674 .read = linereq_read,
1675 .poll = linereq_poll,
1676 .owner = THIS_MODULE,
1677 .llseek = noop_llseek,
1678 .unlocked_ioctl = linereq_ioctl,
1679#ifdef CONFIG_COMPAT
1680 .compat_ioctl = linereq_ioctl_compat,
1681#endif
1682#ifdef CONFIG_PROC_FS
1683 .show_fdinfo = linereq_show_fdinfo,
1684#endif
1685};
1686
1687static int linereq_create(struct gpio_device *gdev, void __user *ip)
1688{
1689 struct gpio_v2_line_request ulr;
1690 struct gpio_v2_line_config *lc;
1691 struct linereq *lr;
1692 struct file *file;
1693 u64 flags, edflags;
1694 unsigned int i;
1695 int fd, ret;
1696
1697 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1698 return -EFAULT;
1699
1700 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1701 return -EINVAL;
1702
1703 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
1704 return -EINVAL;
1705
1706 lc = &ulr.config;
1707 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1708 if (ret)
1709 return ret;
1710
1711 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1712 if (!lr)
1713 return -ENOMEM;
1714 lr->num_lines = ulr.num_lines;
1715
1716 lr->gdev = gpio_device_get(gdev);
1717
1718 for (i = 0; i < ulr.num_lines; i++) {
1719 lr->lines[i].req = lr;
1720 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1721 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1722 }
1723
1724 if (ulr.consumer[0] != '\0') {
1725 /* label is only initialized if consumer is set */
1726 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1727 GFP_KERNEL);
1728 if (!lr->label) {
1729 ret = -ENOMEM;
1730 goto out_free_linereq;
1731 }
1732 }
1733
1734 mutex_init(&lr->config_mutex);
1735 init_waitqueue_head(&lr->wait);
1736 lr->event_buffer_size = ulr.event_buffer_size;
1737 if (lr->event_buffer_size == 0)
1738 lr->event_buffer_size = ulr.num_lines * 16;
1739 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1740 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1741
1742 atomic_set(&lr->seqno, 0);
1743
1744 /* Request each GPIO */
1745 for (i = 0; i < ulr.num_lines; i++) {
1746 u32 offset = ulr.offsets[i];
1747 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
1748
1749 if (IS_ERR(desc)) {
1750 ret = PTR_ERR(desc);
1751 goto out_free_linereq;
1752 }
1753
1754 ret = gpiod_request_user(desc, lr->label);
1755 if (ret)
1756 goto out_free_linereq;
1757
1758 lr->lines[i].desc = desc;
1759 flags = gpio_v2_line_config_flags(lc, i);
1760 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1761
1762 ret = gpiod_set_transitory(desc, false);
1763 if (ret < 0)
1764 goto out_free_linereq;
1765
1766 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1767 /*
1768 * Lines have to be requested explicitly for input
1769 * or output, else the line will be treated "as is".
1770 */
1771 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1772 int val = gpio_v2_line_config_output_value(lc, i);
1773
1774 ret = gpiod_direction_output(desc, val);
1775 if (ret)
1776 goto out_free_linereq;
1777 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1778 ret = gpiod_direction_input(desc);
1779 if (ret)
1780 goto out_free_linereq;
1781
1782 ret = edge_detector_setup(&lr->lines[i], lc, i,
1783 edflags);
1784 if (ret)
1785 goto out_free_linereq;
1786 }
1787
1788 lr->lines[i].edflags = edflags;
1789
1790 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1791
1792 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1793 offset);
1794 }
1795
1796 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1797 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1798 &lr->device_unregistered_nb);
1799 if (ret)
1800 goto out_free_linereq;
1801
1802 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1803 if (fd < 0) {
1804 ret = fd;
1805 goto out_free_linereq;
1806 }
1807
1808 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1809 O_RDONLY | O_CLOEXEC);
1810 if (IS_ERR(file)) {
1811 ret = PTR_ERR(file);
1812 goto out_put_unused_fd;
1813 }
1814
1815 ulr.fd = fd;
1816 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1817 /*
1818 * fput() will trigger the release() callback, so do not go onto
1819 * the regular error cleanup path here.
1820 */
1821 fput(file);
1822 put_unused_fd(fd);
1823 return -EFAULT;
1824 }
1825
1826 fd_install(fd, file);
1827
1828 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1829 lr->num_lines);
1830
1831 return 0;
1832
1833out_put_unused_fd:
1834 put_unused_fd(fd);
1835out_free_linereq:
1836 linereq_free(lr);
1837 return ret;
1838}
1839
1840#ifdef CONFIG_GPIO_CDEV_V1
1841
1842/*
1843 * GPIO line event management
1844 */
1845
1846/**
1847 * struct lineevent_state - contains the state of a userspace event
1848 * @gdev: the GPIO device the event pertains to
1849 * @label: consumer label used to tag descriptors
1850 * @desc: the GPIO descriptor held by this event
1851 * @eflags: the event flags this line was requested with
1852 * @irq: the interrupt that trigger in response to events on this GPIO
1853 * @wait: wait queue that handles blocking reads of events
1854 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1855 * @events: KFIFO for the GPIO events
1856 * @timestamp: cache for the timestamp storing it between hardirq
1857 * and IRQ thread, used to bring the timestamp close to the actual
1858 * event
1859 */
1860struct lineevent_state {
1861 struct gpio_device *gdev;
1862 const char *label;
1863 struct gpio_desc *desc;
1864 u32 eflags;
1865 int irq;
1866 wait_queue_head_t wait;
1867 struct notifier_block device_unregistered_nb;
1868 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1869 u64 timestamp;
1870};
1871
1872#define GPIOEVENT_REQUEST_VALID_FLAGS \
1873 (GPIOEVENT_REQUEST_RISING_EDGE | \
1874 GPIOEVENT_REQUEST_FALLING_EDGE)
1875
1876static __poll_t lineevent_poll(struct file *file,
1877 struct poll_table_struct *wait)
1878{
1879 struct lineevent_state *le = file->private_data;
1880 __poll_t events = 0;
1881
1882 guard(rwsem_read)(&le->gdev->sem);
1883
1884 if (!le->gdev->chip)
1885 return EPOLLHUP | EPOLLERR;
1886
1887 poll_wait(file, &le->wait, wait);
1888
1889 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1890 events = EPOLLIN | EPOLLRDNORM;
1891
1892 return events;
1893}
1894
1895static int lineevent_unregistered_notify(struct notifier_block *nb,
1896 unsigned long action, void *data)
1897{
1898 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1899 device_unregistered_nb);
1900
1901 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1902
1903 return NOTIFY_OK;
1904}
1905
1906struct compat_gpioeevent_data {
1907 compat_u64 timestamp;
1908 u32 id;
1909};
1910
1911static ssize_t lineevent_read(struct file *file, char __user *buf,
1912 size_t count, loff_t *f_ps)
1913{
1914 struct lineevent_state *le = file->private_data;
1915 struct gpioevent_data ge;
1916 ssize_t bytes_read = 0;
1917 ssize_t ge_size;
1918 int ret;
1919
1920 guard(rwsem_read)(&le->gdev->sem);
1921
1922 if (!le->gdev->chip)
1923 return -ENODEV;
1924
1925 /*
1926 * When compatible system call is being used the struct gpioevent_data,
1927 * in case of at least ia32, has different size due to the alignment
1928 * differences. Because we have first member 64 bits followed by one of
1929 * 32 bits there is no gap between them. The only difference is the
1930 * padding at the end of the data structure. Hence, we calculate the
1931 * actual sizeof() and pass this as an argument to copy_to_user() to
1932 * drop unneeded bytes from the output.
1933 */
1934 if (compat_need_64bit_alignment_fixup())
1935 ge_size = sizeof(struct compat_gpioeevent_data);
1936 else
1937 ge_size = sizeof(struct gpioevent_data);
1938 if (count < ge_size)
1939 return -EINVAL;
1940
1941 do {
1942 scoped_guard(spinlock, &le->wait.lock) {
1943 if (kfifo_is_empty(&le->events)) {
1944 if (bytes_read)
1945 return bytes_read;
1946
1947 if (file->f_flags & O_NONBLOCK)
1948 return -EAGAIN;
1949
1950 ret = wait_event_interruptible_locked(le->wait,
1951 !kfifo_is_empty(&le->events));
1952 if (ret)
1953 return ret;
1954 }
1955
1956 ret = kfifo_out(&le->events, &ge, 1);
1957 }
1958 if (ret != 1) {
1959 /*
1960 * This should never happen - we were holding the lock
1961 * from the moment we learned the fifo is no longer
1962 * empty until now.
1963 */
1964 ret = -EIO;
1965 break;
1966 }
1967
1968 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1969 return -EFAULT;
1970 bytes_read += ge_size;
1971 } while (count >= bytes_read + ge_size);
1972
1973 return bytes_read;
1974}
1975
1976static void lineevent_free(struct lineevent_state *le)
1977{
1978 if (le->device_unregistered_nb.notifier_call)
1979 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1980 &le->device_unregistered_nb);
1981 if (le->irq)
1982 free_irq(le->irq, le);
1983 if (le->desc)
1984 gpiod_free(le->desc);
1985 kfree(le->label);
1986 gpio_device_put(le->gdev);
1987 kfree(le);
1988}
1989
1990static int lineevent_release(struct inode *inode, struct file *file)
1991{
1992 lineevent_free(file->private_data);
1993 return 0;
1994}
1995
1996static long lineevent_ioctl(struct file *file, unsigned int cmd,
1997 unsigned long arg)
1998{
1999 struct lineevent_state *le = file->private_data;
2000 void __user *ip = (void __user *)arg;
2001 struct gpiohandle_data ghd;
2002
2003 guard(rwsem_read)(&le->gdev->sem);
2004
2005 if (!le->gdev->chip)
2006 return -ENODEV;
2007
2008 /*
2009 * We can get the value for an event line but not set it,
2010 * because it is input by definition.
2011 */
2012 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
2013 int val;
2014
2015 memset(&ghd, 0, sizeof(ghd));
2016
2017 val = gpiod_get_value_cansleep(le->desc);
2018 if (val < 0)
2019 return val;
2020 ghd.values[0] = val;
2021
2022 if (copy_to_user(ip, &ghd, sizeof(ghd)))
2023 return -EFAULT;
2024
2025 return 0;
2026 }
2027 return -EINVAL;
2028}
2029
2030#ifdef CONFIG_COMPAT
2031static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
2032 unsigned long arg)
2033{
2034 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2035}
2036#endif
2037
2038static const struct file_operations lineevent_fileops = {
2039 .release = lineevent_release,
2040 .read = lineevent_read,
2041 .poll = lineevent_poll,
2042 .owner = THIS_MODULE,
2043 .llseek = noop_llseek,
2044 .unlocked_ioctl = lineevent_ioctl,
2045#ifdef CONFIG_COMPAT
2046 .compat_ioctl = lineevent_ioctl_compat,
2047#endif
2048};
2049
2050static irqreturn_t lineevent_irq_thread(int irq, void *p)
2051{
2052 struct lineevent_state *le = p;
2053 struct gpioevent_data ge;
2054 int ret;
2055
2056 /* Do not leak kernel stack to userspace */
2057 memset(&ge, 0, sizeof(ge));
2058
2059 /*
2060 * We may be running from a nested threaded interrupt in which case
2061 * we didn't get the timestamp from lineevent_irq_handler().
2062 */
2063 if (!le->timestamp)
2064 ge.timestamp = ktime_get_ns();
2065 else
2066 ge.timestamp = le->timestamp;
2067
2068 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
2069 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2070 int level = gpiod_get_value_cansleep(le->desc);
2071
2072 if (level)
2073 /* Emit low-to-high event */
2074 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2075 else
2076 /* Emit high-to-low event */
2077 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2078 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2079 /* Emit low-to-high event */
2080 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2081 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2082 /* Emit high-to-low event */
2083 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2084 } else {
2085 return IRQ_NONE;
2086 }
2087
2088 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2089 1, &le->wait.lock);
2090 if (ret)
2091 wake_up_poll(&le->wait, EPOLLIN);
2092 else
2093 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2094
2095 return IRQ_HANDLED;
2096}
2097
2098static irqreturn_t lineevent_irq_handler(int irq, void *p)
2099{
2100 struct lineevent_state *le = p;
2101
2102 /*
2103 * Just store the timestamp in hardirq context so we get it as
2104 * close in time as possible to the actual event.
2105 */
2106 le->timestamp = ktime_get_ns();
2107
2108 return IRQ_WAKE_THREAD;
2109}
2110
2111static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2112{
2113 struct gpioevent_request eventreq;
2114 struct lineevent_state *le;
2115 struct gpio_desc *desc;
2116 struct file *file;
2117 u32 offset;
2118 u32 lflags;
2119 u32 eflags;
2120 int fd;
2121 int ret;
2122 int irq, irqflags = 0;
2123
2124 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2125 return -EFAULT;
2126
2127 offset = eventreq.lineoffset;
2128 lflags = eventreq.handleflags;
2129 eflags = eventreq.eventflags;
2130
2131 desc = gpiochip_get_desc(gdev->chip, offset);
2132 if (IS_ERR(desc))
2133 return PTR_ERR(desc);
2134
2135 /* Return an error if a unknown flag is set */
2136 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2137 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2138 return -EINVAL;
2139
2140 /* This is just wrong: we don't look for events on output lines */
2141 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2142 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2143 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2144 return -EINVAL;
2145
2146 /* Only one bias flag can be set. */
2147 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2148 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2149 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2150 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2151 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2152 return -EINVAL;
2153
2154 le = kzalloc(sizeof(*le), GFP_KERNEL);
2155 if (!le)
2156 return -ENOMEM;
2157 le->gdev = gpio_device_get(gdev);
2158
2159 if (eventreq.consumer_label[0] != '\0') {
2160 /* label is only initialized if consumer_label is set */
2161 le->label = kstrndup(eventreq.consumer_label,
2162 sizeof(eventreq.consumer_label) - 1,
2163 GFP_KERNEL);
2164 if (!le->label) {
2165 ret = -ENOMEM;
2166 goto out_free_le;
2167 }
2168 }
2169
2170 ret = gpiod_request_user(desc, le->label);
2171 if (ret)
2172 goto out_free_le;
2173 le->desc = desc;
2174 le->eflags = eflags;
2175
2176 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2177
2178 ret = gpiod_direction_input(desc);
2179 if (ret)
2180 goto out_free_le;
2181
2182 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2183
2184 irq = gpiod_to_irq(desc);
2185 if (irq <= 0) {
2186 ret = -ENODEV;
2187 goto out_free_le;
2188 }
2189
2190 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2191 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2192 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2193 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2194 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2195 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2196 irqflags |= IRQF_ONESHOT;
2197
2198 INIT_KFIFO(le->events);
2199 init_waitqueue_head(&le->wait);
2200
2201 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2202 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2203 &le->device_unregistered_nb);
2204 if (ret)
2205 goto out_free_le;
2206
2207 /* Request a thread to read the events */
2208 ret = request_threaded_irq(irq,
2209 lineevent_irq_handler,
2210 lineevent_irq_thread,
2211 irqflags,
2212 le->label,
2213 le);
2214 if (ret)
2215 goto out_free_le;
2216
2217 le->irq = irq;
2218
2219 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2220 if (fd < 0) {
2221 ret = fd;
2222 goto out_free_le;
2223 }
2224
2225 file = anon_inode_getfile("gpio-event",
2226 &lineevent_fileops,
2227 le,
2228 O_RDONLY | O_CLOEXEC);
2229 if (IS_ERR(file)) {
2230 ret = PTR_ERR(file);
2231 goto out_put_unused_fd;
2232 }
2233
2234 eventreq.fd = fd;
2235 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2236 /*
2237 * fput() will trigger the release() callback, so do not go onto
2238 * the regular error cleanup path here.
2239 */
2240 fput(file);
2241 put_unused_fd(fd);
2242 return -EFAULT;
2243 }
2244
2245 fd_install(fd, file);
2246
2247 return 0;
2248
2249out_put_unused_fd:
2250 put_unused_fd(fd);
2251out_free_le:
2252 lineevent_free(le);
2253 return ret;
2254}
2255
2256static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2257 struct gpioline_info *info_v1)
2258{
2259 u64 flagsv2 = info_v2->flags;
2260
2261 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2262 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2263 info_v1->line_offset = info_v2->offset;
2264 info_v1->flags = 0;
2265
2266 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2267 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2268
2269 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2270 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2271
2272 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2273 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2274
2275 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2276 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2277 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2278 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2279
2280 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2281 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2282 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2283 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2284 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2285 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2286}
2287
2288static void gpio_v2_line_info_changed_to_v1(
2289 struct gpio_v2_line_info_changed *lic_v2,
2290 struct gpioline_info_changed *lic_v1)
2291{
2292 memset(lic_v1, 0, sizeof(*lic_v1));
2293 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2294 lic_v1->timestamp = lic_v2->timestamp_ns;
2295 lic_v1->event_type = lic_v2->event_type;
2296}
2297
2298#endif /* CONFIG_GPIO_CDEV_V1 */
2299
2300static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2301 struct gpio_v2_line_info *info)
2302{
2303 struct gpio_chip *gc = desc->gdev->chip;
2304 unsigned long dflags;
2305
2306 memset(info, 0, sizeof(*info));
2307 info->offset = gpio_chip_hwgpio(desc);
2308
2309 scoped_guard(spinlock_irqsave, &gpio_lock) {
2310 if (desc->name)
2311 strscpy(info->name, desc->name, sizeof(info->name));
2312
2313 if (desc->label)
2314 strscpy(info->consumer, desc->label,
2315 sizeof(info->consumer));
2316
2317 dflags = READ_ONCE(desc->flags);
2318 }
2319
2320 /*
2321 * Userspace only need know that the kernel is using this GPIO so it
2322 * can't use it.
2323 * The calculation of the used flag is slightly racy, as it may read
2324 * desc, gc and pinctrl state without a lock covering all three at
2325 * once. Worst case if the line is in transition and the calculation
2326 * is inconsistent then it looks to the user like they performed the
2327 * read on the other side of the transition - but that can always
2328 * happen.
2329 * The definitive test that a line is available to userspace is to
2330 * request it.
2331 */
2332 if (test_bit(FLAG_REQUESTED, &dflags) ||
2333 test_bit(FLAG_IS_HOGGED, &dflags) ||
2334 test_bit(FLAG_USED_AS_IRQ, &dflags) ||
2335 test_bit(FLAG_EXPORT, &dflags) ||
2336 test_bit(FLAG_SYSFS, &dflags) ||
2337 !gpiochip_line_is_valid(gc, info->offset) ||
2338 !pinctrl_gpio_can_use_line(gc, info->offset))
2339 info->flags |= GPIO_V2_LINE_FLAG_USED;
2340
2341 if (test_bit(FLAG_IS_OUT, &dflags))
2342 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2343 else
2344 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2345
2346 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2347 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2348
2349 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2350 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2351 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2352 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2353
2354 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2355 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2356 if (test_bit(FLAG_PULL_DOWN, &dflags))
2357 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2358 if (test_bit(FLAG_PULL_UP, &dflags))
2359 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2360
2361 if (test_bit(FLAG_EDGE_RISING, &dflags))
2362 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2363 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2364 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2365
2366 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2367 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2368 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2369 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2370}
2371
2372struct gpio_chardev_data {
2373 struct gpio_device *gdev;
2374 wait_queue_head_t wait;
2375 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2376 struct notifier_block lineinfo_changed_nb;
2377 struct notifier_block device_unregistered_nb;
2378 unsigned long *watched_lines;
2379#ifdef CONFIG_GPIO_CDEV_V1
2380 atomic_t watch_abi_version;
2381#endif
2382};
2383
2384static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2385{
2386 struct gpio_device *gdev = cdev->gdev;
2387 struct gpiochip_info chipinfo;
2388
2389 memset(&chipinfo, 0, sizeof(chipinfo));
2390
2391 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2392 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2393 chipinfo.lines = gdev->ngpio;
2394 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2395 return -EFAULT;
2396 return 0;
2397}
2398
2399#ifdef CONFIG_GPIO_CDEV_V1
2400/*
2401 * returns 0 if the versions match, else the previously selected ABI version
2402 */
2403static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2404 unsigned int version)
2405{
2406 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2407
2408 if (abiv == version)
2409 return 0;
2410
2411 return abiv;
2412}
2413
2414static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2415 bool watch)
2416{
2417 struct gpio_desc *desc;
2418 struct gpioline_info lineinfo;
2419 struct gpio_v2_line_info lineinfo_v2;
2420
2421 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2422 return -EFAULT;
2423
2424 /* this doubles as a range check on line_offset */
2425 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
2426 if (IS_ERR(desc))
2427 return PTR_ERR(desc);
2428
2429 if (watch) {
2430 if (lineinfo_ensure_abi_version(cdev, 1))
2431 return -EPERM;
2432
2433 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2434 return -EBUSY;
2435 }
2436
2437 gpio_desc_to_lineinfo(desc, &lineinfo_v2);
2438 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2439
2440 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2441 if (watch)
2442 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2443 return -EFAULT;
2444 }
2445
2446 return 0;
2447}
2448#endif
2449
2450static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2451 bool watch)
2452{
2453 struct gpio_desc *desc;
2454 struct gpio_v2_line_info lineinfo;
2455
2456 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2457 return -EFAULT;
2458
2459 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
2460 return -EINVAL;
2461
2462 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
2463 if (IS_ERR(desc))
2464 return PTR_ERR(desc);
2465
2466 if (watch) {
2467#ifdef CONFIG_GPIO_CDEV_V1
2468 if (lineinfo_ensure_abi_version(cdev, 2))
2469 return -EPERM;
2470#endif
2471 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2472 return -EBUSY;
2473 }
2474 gpio_desc_to_lineinfo(desc, &lineinfo);
2475 supinfo_to_lineinfo(desc, &lineinfo);
2476
2477 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2478 if (watch)
2479 clear_bit(lineinfo.offset, cdev->watched_lines);
2480 return -EFAULT;
2481 }
2482
2483 return 0;
2484}
2485
2486static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2487{
2488 __u32 offset;
2489
2490 if (copy_from_user(&offset, ip, sizeof(offset)))
2491 return -EFAULT;
2492
2493 if (offset >= cdev->gdev->ngpio)
2494 return -EINVAL;
2495
2496 if (!test_and_clear_bit(offset, cdev->watched_lines))
2497 return -EBUSY;
2498
2499 return 0;
2500}
2501
2502/*
2503 * gpio_ioctl() - ioctl handler for the GPIO chardev
2504 */
2505static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2506{
2507 struct gpio_chardev_data *cdev = file->private_data;
2508 struct gpio_device *gdev = cdev->gdev;
2509 void __user *ip = (void __user *)arg;
2510
2511 guard(rwsem_read)(&gdev->sem);
2512
2513 /* We fail any subsequent ioctl():s when the chip is gone */
2514 if (!gdev->chip)
2515 return -ENODEV;
2516
2517 /* Fill in the struct and pass to userspace */
2518 switch (cmd) {
2519 case GPIO_GET_CHIPINFO_IOCTL:
2520 return chipinfo_get(cdev, ip);
2521#ifdef CONFIG_GPIO_CDEV_V1
2522 case GPIO_GET_LINEHANDLE_IOCTL:
2523 return linehandle_create(gdev, ip);
2524 case GPIO_GET_LINEEVENT_IOCTL:
2525 return lineevent_create(gdev, ip);
2526 case GPIO_GET_LINEINFO_IOCTL:
2527 return lineinfo_get_v1(cdev, ip, false);
2528 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2529 return lineinfo_get_v1(cdev, ip, true);
2530#endif /* CONFIG_GPIO_CDEV_V1 */
2531 case GPIO_V2_GET_LINEINFO_IOCTL:
2532 return lineinfo_get(cdev, ip, false);
2533 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2534 return lineinfo_get(cdev, ip, true);
2535 case GPIO_V2_GET_LINE_IOCTL:
2536 return linereq_create(gdev, ip);
2537 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2538 return lineinfo_unwatch(cdev, ip);
2539 default:
2540 return -EINVAL;
2541 }
2542}
2543
2544#ifdef CONFIG_COMPAT
2545static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2546 unsigned long arg)
2547{
2548 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2549}
2550#endif
2551
2552static int lineinfo_changed_notify(struct notifier_block *nb,
2553 unsigned long action, void *data)
2554{
2555 struct gpio_chardev_data *cdev =
2556 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2557 struct gpio_v2_line_info_changed chg;
2558 struct gpio_desc *desc = data;
2559 int ret;
2560
2561 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2562 return NOTIFY_DONE;
2563
2564 memset(&chg, 0, sizeof(chg));
2565 chg.event_type = action;
2566 chg.timestamp_ns = ktime_get_ns();
2567 gpio_desc_to_lineinfo(desc, &chg.info);
2568 supinfo_to_lineinfo(desc, &chg.info);
2569
2570 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
2571 if (ret)
2572 wake_up_poll(&cdev->wait, EPOLLIN);
2573 else
2574 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2575
2576 return NOTIFY_OK;
2577}
2578
2579static int gpio_device_unregistered_notify(struct notifier_block *nb,
2580 unsigned long action, void *data)
2581{
2582 struct gpio_chardev_data *cdev = container_of(nb,
2583 struct gpio_chardev_data,
2584 device_unregistered_nb);
2585
2586 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2587
2588 return NOTIFY_OK;
2589}
2590
2591static __poll_t lineinfo_watch_poll(struct file *file,
2592 struct poll_table_struct *pollt)
2593{
2594 struct gpio_chardev_data *cdev = file->private_data;
2595 __poll_t events = 0;
2596
2597 guard(rwsem_read)(&cdev->gdev->sem);
2598
2599 if (!cdev->gdev->chip)
2600 return EPOLLHUP | EPOLLERR;
2601
2602 poll_wait(file, &cdev->wait, pollt);
2603
2604 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2605 &cdev->wait.lock))
2606 events = EPOLLIN | EPOLLRDNORM;
2607
2608 return events;
2609}
2610
2611static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2612 size_t count, loff_t *off)
2613{
2614 struct gpio_chardev_data *cdev = file->private_data;
2615 struct gpio_v2_line_info_changed event;
2616 ssize_t bytes_read = 0;
2617 int ret;
2618 size_t event_size;
2619
2620 guard(rwsem_read)(&cdev->gdev->sem);
2621
2622 if (!cdev->gdev->chip)
2623 return -ENODEV;
2624
2625#ifndef CONFIG_GPIO_CDEV_V1
2626 event_size = sizeof(struct gpio_v2_line_info_changed);
2627 if (count < event_size)
2628 return -EINVAL;
2629#endif
2630
2631 do {
2632 scoped_guard(spinlock, &cdev->wait.lock) {
2633 if (kfifo_is_empty(&cdev->events)) {
2634 if (bytes_read)
2635 return bytes_read;
2636
2637 if (file->f_flags & O_NONBLOCK)
2638 return -EAGAIN;
2639
2640 ret = wait_event_interruptible_locked(cdev->wait,
2641 !kfifo_is_empty(&cdev->events));
2642 if (ret)
2643 return ret;
2644 }
2645#ifdef CONFIG_GPIO_CDEV_V1
2646 /* must be after kfifo check so watch_abi_version is set */
2647 if (atomic_read(&cdev->watch_abi_version) == 2)
2648 event_size = sizeof(struct gpio_v2_line_info_changed);
2649 else
2650 event_size = sizeof(struct gpioline_info_changed);
2651 if (count < event_size)
2652 return -EINVAL;
2653#endif
2654 ret = kfifo_out(&cdev->events, &event, 1);
2655 }
2656 if (ret != 1) {
2657 ret = -EIO;
2658 break;
2659 /* We should never get here. See lineevent_read(). */
2660 }
2661
2662#ifdef CONFIG_GPIO_CDEV_V1
2663 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2664 if (copy_to_user(buf + bytes_read, &event, event_size))
2665 return -EFAULT;
2666 } else {
2667 struct gpioline_info_changed event_v1;
2668
2669 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2670 if (copy_to_user(buf + bytes_read, &event_v1,
2671 event_size))
2672 return -EFAULT;
2673 }
2674#else
2675 if (copy_to_user(buf + bytes_read, &event, event_size))
2676 return -EFAULT;
2677#endif
2678 bytes_read += event_size;
2679 } while (count >= bytes_read + sizeof(event));
2680
2681 return bytes_read;
2682}
2683
2684/**
2685 * gpio_chrdev_open() - open the chardev for ioctl operations
2686 * @inode: inode for this chardev
2687 * @file: file struct for storing private data
2688 * Returns 0 on success
2689 */
2690static int gpio_chrdev_open(struct inode *inode, struct file *file)
2691{
2692 struct gpio_device *gdev = container_of(inode->i_cdev,
2693 struct gpio_device, chrdev);
2694 struct gpio_chardev_data *cdev;
2695 int ret = -ENOMEM;
2696
2697 guard(rwsem_read)(&gdev->sem);
2698
2699 /* Fail on open if the backing gpiochip is gone */
2700 if (!gdev->chip)
2701 return -ENODEV;
2702
2703 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2704 if (!cdev)
2705 return -ENODEV;
2706
2707 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
2708 if (!cdev->watched_lines)
2709 goto out_free_cdev;
2710
2711 init_waitqueue_head(&cdev->wait);
2712 INIT_KFIFO(cdev->events);
2713 cdev->gdev = gpio_device_get(gdev);
2714
2715 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2716 ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
2717 &cdev->lineinfo_changed_nb);
2718 if (ret)
2719 goto out_free_bitmap;
2720
2721 cdev->device_unregistered_nb.notifier_call =
2722 gpio_device_unregistered_notify;
2723 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2724 &cdev->device_unregistered_nb);
2725 if (ret)
2726 goto out_unregister_line_notifier;
2727
2728 file->private_data = cdev;
2729
2730 ret = nonseekable_open(inode, file);
2731 if (ret)
2732 goto out_unregister_device_notifier;
2733
2734 return ret;
2735
2736out_unregister_device_notifier:
2737 blocking_notifier_chain_unregister(&gdev->device_notifier,
2738 &cdev->device_unregistered_nb);
2739out_unregister_line_notifier:
2740 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2741 &cdev->lineinfo_changed_nb);
2742out_free_bitmap:
2743 gpio_device_put(gdev);
2744 bitmap_free(cdev->watched_lines);
2745out_free_cdev:
2746 kfree(cdev);
2747 return ret;
2748}
2749
2750/**
2751 * gpio_chrdev_release() - close chardev after ioctl operations
2752 * @inode: inode for this chardev
2753 * @file: file struct for storing private data
2754 * Returns 0 on success
2755 */
2756static int gpio_chrdev_release(struct inode *inode, struct file *file)
2757{
2758 struct gpio_chardev_data *cdev = file->private_data;
2759 struct gpio_device *gdev = cdev->gdev;
2760
2761 bitmap_free(cdev->watched_lines);
2762 blocking_notifier_chain_unregister(&gdev->device_notifier,
2763 &cdev->device_unregistered_nb);
2764 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2765 &cdev->lineinfo_changed_nb);
2766 gpio_device_put(gdev);
2767 kfree(cdev);
2768
2769 return 0;
2770}
2771
2772static const struct file_operations gpio_fileops = {
2773 .release = gpio_chrdev_release,
2774 .open = gpio_chrdev_open,
2775 .poll = lineinfo_watch_poll,
2776 .read = lineinfo_watch_read,
2777 .owner = THIS_MODULE,
2778 .llseek = no_llseek,
2779 .unlocked_ioctl = gpio_ioctl,
2780#ifdef CONFIG_COMPAT
2781 .compat_ioctl = gpio_ioctl_compat,
2782#endif
2783};
2784
2785int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2786{
2787 int ret;
2788
2789 cdev_init(&gdev->chrdev, &gpio_fileops);
2790 gdev->chrdev.owner = THIS_MODULE;
2791 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2792
2793 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2794 if (ret)
2795 return ret;
2796
2797 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
2798 MAJOR(devt), gdev->id);
2799
2800 return 0;
2801}
2802
2803void gpiolib_cdev_unregister(struct gpio_device *gdev)
2804{
2805 cdev_device_del(&gdev->chrdev, &gdev->dev);
2806 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2807}