Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/anon_inodes.h>
4#include <linux/atomic.h>
5#include <linux/bitmap.h>
6#include <linux/build_bug.h>
7#include <linux/cdev.h>
8#include <linux/cleanup.h>
9#include <linux/compat.h>
10#include <linux/compiler.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/file.h>
14#include <linux/gpio.h>
15#include <linux/gpio/driver.h>
16#include <linux/hte.h>
17#include <linux/interrupt.h>
18#include <linux/irqreturn.h>
19#include <linux/kfifo.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/overflow.h>
23#include <linux/pinctrl/consumer.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/spinlock.h>
27#include <linux/string.h>
28#include <linux/timekeeping.h>
29#include <linux/uaccess.h>
30#include <linux/workqueue.h>
31
32#include <uapi/linux/gpio.h>
33
34#include "gpiolib.h"
35#include "gpiolib-cdev.h"
36
37/*
38 * Array sizes must ensure 64-bit alignment and not create holes in the
39 * struct packing.
40 */
41static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
42static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
43
44/*
45 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
46 */
47static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
48static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
49static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
50static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
51static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
52static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
53static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
54static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
55
56/* Character device interface to GPIO.
57 *
58 * The GPIO character device, /dev/gpiochipN, provides userspace an
59 * interface to gpiolib GPIOs via ioctl()s.
60 */
61
62/*
63 * GPIO line handle management
64 */
65
66#ifdef CONFIG_GPIO_CDEV_V1
67/**
68 * struct linehandle_state - contains the state of a userspace handle
69 * @gdev: the GPIO device the handle pertains to
70 * @label: consumer label used to tag descriptors
71 * @descs: the GPIO descriptors held by this handle
72 * @num_descs: the number of descriptors held in the descs array
73 */
74struct linehandle_state {
75 struct gpio_device *gdev;
76 const char *label;
77 struct gpio_desc *descs[GPIOHANDLES_MAX];
78 u32 num_descs;
79};
80
81#define GPIOHANDLE_REQUEST_VALID_FLAGS \
82 (GPIOHANDLE_REQUEST_INPUT | \
83 GPIOHANDLE_REQUEST_OUTPUT | \
84 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
85 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
87 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
88 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
89 GPIOHANDLE_REQUEST_OPEN_SOURCE)
90
91#define GPIOHANDLE_REQUEST_DIRECTION_FLAGS \
92 (GPIOHANDLE_REQUEST_INPUT | \
93 GPIOHANDLE_REQUEST_OUTPUT)
94
95static int linehandle_validate_flags(u32 flags)
96{
97 /* Return an error if an unknown flag is set */
98 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
99 return -EINVAL;
100
101 /*
102 * Do not allow both INPUT & OUTPUT flags to be set as they are
103 * contradictory.
104 */
105 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
106 (flags & GPIOHANDLE_REQUEST_OUTPUT))
107 return -EINVAL;
108
109 /*
110 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
111 * the hardware actually supports enabling both at the same time the
112 * electrical result would be disastrous.
113 */
114 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
115 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
116 return -EINVAL;
117
118 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
119 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
120 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
121 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
122 return -EINVAL;
123
124 /* Bias flags only allowed for input or output mode. */
125 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
126 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
127 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
128 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
129 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
130 return -EINVAL;
131
132 /* Only one bias flag can be set. */
133 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
134 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
135 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
136 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
137 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
138 return -EINVAL;
139
140 return 0;
141}
142
143static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
144{
145 unsigned long flags = READ_ONCE(*flagsp);
146
147 assign_bit(FLAG_ACTIVE_LOW, &flags,
148 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
149 assign_bit(FLAG_OPEN_DRAIN, &flags,
150 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
151 assign_bit(FLAG_OPEN_SOURCE, &flags,
152 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
153 assign_bit(FLAG_PULL_UP, &flags,
154 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
155 assign_bit(FLAG_PULL_DOWN, &flags,
156 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
157 assign_bit(FLAG_BIAS_DISABLE, &flags,
158 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
159
160 WRITE_ONCE(*flagsp, flags);
161}
162
163static long linehandle_set_config(struct linehandle_state *lh,
164 void __user *ip)
165{
166 struct gpiohandle_config gcnf;
167 struct gpio_desc *desc;
168 int i, ret;
169 u32 lflags;
170
171 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
172 return -EFAULT;
173
174 lflags = gcnf.flags;
175 ret = linehandle_validate_flags(lflags);
176 if (ret)
177 return ret;
178
179 /* Lines must be reconfigured explicitly as input or output. */
180 if (!(lflags & GPIOHANDLE_REQUEST_DIRECTION_FLAGS))
181 return -EINVAL;
182
183 for (i = 0; i < lh->num_descs; i++) {
184 desc = lh->descs[i];
185 linehandle_flags_to_desc_flags(lflags, &desc->flags);
186
187 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
188 int val = !!gcnf.default_values[i];
189
190 ret = gpiod_direction_output_nonotify(desc, val);
191 if (ret)
192 return ret;
193 } else {
194 ret = gpiod_direction_input_nonotify(desc);
195 if (ret)
196 return ret;
197 }
198
199 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
200 }
201 return 0;
202}
203
204static long linehandle_ioctl(struct file *file, unsigned int cmd,
205 unsigned long arg)
206{
207 struct linehandle_state *lh = file->private_data;
208 void __user *ip = (void __user *)arg;
209 struct gpiohandle_data ghd;
210 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
211 unsigned int i;
212 int ret;
213
214 guard(srcu)(&lh->gdev->srcu);
215
216 if (!rcu_access_pointer(lh->gdev->chip))
217 return -ENODEV;
218
219 switch (cmd) {
220 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
221 /* NOTE: It's okay to read values of output lines */
222 ret = gpiod_get_array_value_complex(false, true,
223 lh->num_descs, lh->descs,
224 NULL, vals);
225 if (ret)
226 return ret;
227
228 memset(&ghd, 0, sizeof(ghd));
229 for (i = 0; i < lh->num_descs; i++)
230 ghd.values[i] = test_bit(i, vals);
231
232 if (copy_to_user(ip, &ghd, sizeof(ghd)))
233 return -EFAULT;
234
235 return 0;
236 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
237 /*
238 * All line descriptors were created at once with the same
239 * flags so just check if the first one is really output.
240 */
241 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
242 return -EPERM;
243
244 if (copy_from_user(&ghd, ip, sizeof(ghd)))
245 return -EFAULT;
246
247 /* Clamp all values to [0,1] */
248 for (i = 0; i < lh->num_descs; i++)
249 __assign_bit(i, vals, ghd.values[i]);
250
251 /* Reuse the array setting function */
252 return gpiod_set_array_value_complex(false,
253 true,
254 lh->num_descs,
255 lh->descs,
256 NULL,
257 vals);
258 case GPIOHANDLE_SET_CONFIG_IOCTL:
259 return linehandle_set_config(lh, ip);
260 default:
261 return -EINVAL;
262 }
263}
264
265#ifdef CONFIG_COMPAT
266static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
267 unsigned long arg)
268{
269 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
270}
271#endif
272
273static void linehandle_free(struct linehandle_state *lh)
274{
275 int i;
276
277 for (i = 0; i < lh->num_descs; i++)
278 if (lh->descs[i])
279 gpiod_free(lh->descs[i]);
280 kfree(lh->label);
281 gpio_device_put(lh->gdev);
282 kfree(lh);
283}
284
285static int linehandle_release(struct inode *inode, struct file *file)
286{
287 linehandle_free(file->private_data);
288 return 0;
289}
290
291static const struct file_operations linehandle_fileops = {
292 .release = linehandle_release,
293 .owner = THIS_MODULE,
294 .llseek = noop_llseek,
295 .unlocked_ioctl = linehandle_ioctl,
296#ifdef CONFIG_COMPAT
297 .compat_ioctl = linehandle_ioctl_compat,
298#endif
299};
300
301static int linehandle_create(struct gpio_device *gdev, void __user *ip)
302{
303 struct gpiohandle_request handlereq;
304 struct linehandle_state *lh;
305 struct file *file;
306 int fd, i, ret;
307 u32 lflags;
308
309 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
310 return -EFAULT;
311 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
312 return -EINVAL;
313
314 lflags = handlereq.flags;
315
316 ret = linehandle_validate_flags(lflags);
317 if (ret)
318 return ret;
319
320 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
321 if (!lh)
322 return -ENOMEM;
323 lh->gdev = gpio_device_get(gdev);
324
325 if (handlereq.consumer_label[0] != '\0') {
326 /* label is only initialized if consumer_label is set */
327 lh->label = kstrndup(handlereq.consumer_label,
328 sizeof(handlereq.consumer_label) - 1,
329 GFP_KERNEL);
330 if (!lh->label) {
331 ret = -ENOMEM;
332 goto out_free_lh;
333 }
334 }
335
336 lh->num_descs = handlereq.lines;
337
338 /* Request each GPIO */
339 for (i = 0; i < handlereq.lines; i++) {
340 u32 offset = handlereq.lineoffsets[i];
341 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
342
343 if (IS_ERR(desc)) {
344 ret = PTR_ERR(desc);
345 goto out_free_lh;
346 }
347
348 ret = gpiod_request_user(desc, lh->label);
349 if (ret)
350 goto out_free_lh;
351 lh->descs[i] = desc;
352 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
353
354 ret = gpiod_set_transitory(desc, false);
355 if (ret < 0)
356 goto out_free_lh;
357
358 /*
359 * Lines have to be requested explicitly for input
360 * or output, else the line will be treated "as is".
361 */
362 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
363 int val = !!handlereq.default_values[i];
364
365 ret = gpiod_direction_output_nonotify(desc, val);
366 if (ret)
367 goto out_free_lh;
368 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
369 ret = gpiod_direction_input_nonotify(desc);
370 if (ret)
371 goto out_free_lh;
372 }
373
374 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
375
376 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
377 offset);
378 }
379
380 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
381 if (fd < 0) {
382 ret = fd;
383 goto out_free_lh;
384 }
385
386 file = anon_inode_getfile("gpio-linehandle",
387 &linehandle_fileops,
388 lh,
389 O_RDONLY | O_CLOEXEC);
390 if (IS_ERR(file)) {
391 ret = PTR_ERR(file);
392 goto out_put_unused_fd;
393 }
394
395 handlereq.fd = fd;
396 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
397 /*
398 * fput() will trigger the release() callback, so do not go onto
399 * the regular error cleanup path here.
400 */
401 fput(file);
402 put_unused_fd(fd);
403 return -EFAULT;
404 }
405
406 fd_install(fd, file);
407
408 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
409 lh->num_descs);
410
411 return 0;
412
413out_put_unused_fd:
414 put_unused_fd(fd);
415out_free_lh:
416 linehandle_free(lh);
417 return ret;
418}
419#endif /* CONFIG_GPIO_CDEV_V1 */
420
421/**
422 * struct line - contains the state of a requested line
423 * @desc: the GPIO descriptor for this line.
424 * @req: the corresponding line request
425 * @irq: the interrupt triggered in response to events on this GPIO
426 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
427 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
428 * @timestamp_ns: cache for the timestamp storing it between hardirq and
429 * IRQ thread, used to bring the timestamp close to the actual event
430 * @req_seqno: the seqno for the current edge event in the sequence of
431 * events for the corresponding line request. This is drawn from the @req.
432 * @line_seqno: the seqno for the current edge event in the sequence of
433 * events for this line.
434 * @work: the worker that implements software debouncing
435 * @sw_debounced: flag indicating if the software debouncer is active
436 * @level: the current debounced physical level of the line
437 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
438 * @raw_level: the line level at the time of event
439 * @total_discard_seq: the running counter of the discarded events
440 * @last_seqno: the last sequence number before debounce period expires
441 */
442struct line {
443 struct gpio_desc *desc;
444 /*
445 * -- edge detector specific fields --
446 */
447 struct linereq *req;
448 unsigned int irq;
449 /*
450 * The flags for the active edge detector configuration.
451 *
452 * edflags is set by linereq_create(), linereq_free(), and
453 * linereq_set_config(), which are themselves mutually
454 * exclusive, and is accessed by edge_irq_thread(),
455 * process_hw_ts_thread() and debounce_work_func(),
456 * which can all live with a slightly stale value.
457 */
458 u64 edflags;
459 /*
460 * timestamp_ns and req_seqno are accessed only by
461 * edge_irq_handler() and edge_irq_thread(), which are themselves
462 * mutually exclusive, so no additional protection is necessary.
463 */
464 u64 timestamp_ns;
465 u32 req_seqno;
466 /*
467 * line_seqno is accessed by either edge_irq_thread() or
468 * debounce_work_func(), which are themselves mutually exclusive,
469 * so no additional protection is necessary.
470 */
471 u32 line_seqno;
472 /*
473 * -- debouncer specific fields --
474 */
475 struct delayed_work work;
476 /*
477 * sw_debounce is accessed by linereq_set_config(), which is the
478 * only setter, and linereq_get_values(), which can live with a
479 * slightly stale value.
480 */
481 unsigned int sw_debounced;
482 /*
483 * level is accessed by debounce_work_func(), which is the only
484 * setter, and linereq_get_values() which can live with a slightly
485 * stale value.
486 */
487 unsigned int level;
488#ifdef CONFIG_HTE
489 struct hte_ts_desc hdesc;
490 /*
491 * HTE provider sets line level at the time of event. The valid
492 * value is 0 or 1 and negative value for an error.
493 */
494 int raw_level;
495 /*
496 * when sw_debounce is set on HTE enabled line, this is running
497 * counter of the discarded events.
498 */
499 u32 total_discard_seq;
500 /*
501 * when sw_debounce is set on HTE enabled line, this variable records
502 * last sequence number before debounce period expires.
503 */
504 u32 last_seqno;
505#endif /* CONFIG_HTE */
506};
507
508/**
509 * struct linereq - contains the state of a userspace line request
510 * @gdev: the GPIO device the line request pertains to
511 * @label: consumer label used to tag GPIO descriptors
512 * @num_lines: the number of lines in the lines array
513 * @wait: wait queue that handles blocking reads of events
514 * @device_unregistered_nb: notifier block for receiving gdev unregister events
515 * @event_buffer_size: the number of elements allocated in @events
516 * @events: KFIFO for the GPIO events
517 * @seqno: the sequence number for edge events generated on all lines in
518 * this line request. Note that this is not used when @num_lines is 1, as
519 * the line_seqno is then the same and is cheaper to calculate.
520 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
521 * of configuration, particularly multi-step accesses to desc flags.
522 * @lines: the lines held by this line request, with @num_lines elements.
523 */
524struct linereq {
525 struct gpio_device *gdev;
526 const char *label;
527 u32 num_lines;
528 wait_queue_head_t wait;
529 struct notifier_block device_unregistered_nb;
530 u32 event_buffer_size;
531 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
532 atomic_t seqno;
533 struct mutex config_mutex;
534 struct line lines[] __counted_by(num_lines);
535};
536
537#define GPIO_V2_LINE_BIAS_FLAGS \
538 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
539 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
540 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
541
542#define GPIO_V2_LINE_DIRECTION_FLAGS \
543 (GPIO_V2_LINE_FLAG_INPUT | \
544 GPIO_V2_LINE_FLAG_OUTPUT)
545
546#define GPIO_V2_LINE_DRIVE_FLAGS \
547 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
548 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
549
550#define GPIO_V2_LINE_EDGE_FLAGS \
551 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
552 GPIO_V2_LINE_FLAG_EDGE_FALLING)
553
554#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
555
556#define GPIO_V2_LINE_VALID_FLAGS \
557 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
558 GPIO_V2_LINE_DIRECTION_FLAGS | \
559 GPIO_V2_LINE_DRIVE_FLAGS | \
560 GPIO_V2_LINE_EDGE_FLAGS | \
561 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
562 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
563 GPIO_V2_LINE_BIAS_FLAGS)
564
565/* subset of flags relevant for edge detector configuration */
566#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
567 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
568 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
569 GPIO_V2_LINE_EDGE_FLAGS)
570
571static int linereq_unregistered_notify(struct notifier_block *nb,
572 unsigned long action, void *data)
573{
574 struct linereq *lr = container_of(nb, struct linereq,
575 device_unregistered_nb);
576
577 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
578
579 return NOTIFY_OK;
580}
581
582static void linereq_put_event(struct linereq *lr,
583 struct gpio_v2_line_event *le)
584{
585 bool overflow = false;
586
587 scoped_guard(spinlock, &lr->wait.lock) {
588 if (kfifo_is_full(&lr->events)) {
589 overflow = true;
590 kfifo_skip(&lr->events);
591 }
592 kfifo_in(&lr->events, le, 1);
593 }
594 if (!overflow)
595 wake_up_poll(&lr->wait, EPOLLIN);
596 else
597 pr_debug_ratelimited("event FIFO is full - event dropped\n");
598}
599
600static u64 line_event_timestamp(struct line *line)
601{
602 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
603 return ktime_get_real_ns();
604 else if (IS_ENABLED(CONFIG_HTE) &&
605 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
606 return line->timestamp_ns;
607
608 return ktime_get_ns();
609}
610
611static u32 line_event_id(int level)
612{
613 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
614 GPIO_V2_LINE_EVENT_FALLING_EDGE;
615}
616
617static inline char *make_irq_label(const char *orig)
618{
619 char *new;
620
621 if (!orig)
622 return NULL;
623
624 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
625 if (!new)
626 return ERR_PTR(-ENOMEM);
627
628 return new;
629}
630
631static inline void free_irq_label(const char *label)
632{
633 kfree(label);
634}
635
636#ifdef CONFIG_HTE
637
638static enum hte_return process_hw_ts_thread(void *p)
639{
640 struct line *line;
641 struct linereq *lr;
642 struct gpio_v2_line_event le;
643 u64 edflags;
644 int level;
645
646 if (!p)
647 return HTE_CB_HANDLED;
648
649 line = p;
650 lr = line->req;
651
652 memset(&le, 0, sizeof(le));
653
654 le.timestamp_ns = line->timestamp_ns;
655 edflags = READ_ONCE(line->edflags);
656
657 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
658 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
659 level = (line->raw_level >= 0) ?
660 line->raw_level :
661 gpiod_get_raw_value_cansleep(line->desc);
662
663 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
664 level = !level;
665
666 le.id = line_event_id(level);
667 break;
668 case GPIO_V2_LINE_FLAG_EDGE_RISING:
669 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
670 break;
671 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
672 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
673 break;
674 default:
675 return HTE_CB_HANDLED;
676 }
677 le.line_seqno = line->line_seqno;
678 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
679 le.offset = gpio_chip_hwgpio(line->desc);
680
681 linereq_put_event(lr, &le);
682
683 return HTE_CB_HANDLED;
684}
685
686static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
687{
688 struct line *line;
689 struct linereq *lr;
690 int diff_seqno = 0;
691
692 if (!ts || !p)
693 return HTE_CB_HANDLED;
694
695 line = p;
696 line->timestamp_ns = ts->tsc;
697 line->raw_level = ts->raw_level;
698 lr = line->req;
699
700 if (READ_ONCE(line->sw_debounced)) {
701 line->total_discard_seq++;
702 line->last_seqno = ts->seq;
703 mod_delayed_work(system_wq, &line->work,
704 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
705 } else {
706 if (unlikely(ts->seq < line->line_seqno))
707 return HTE_CB_HANDLED;
708
709 diff_seqno = ts->seq - line->line_seqno;
710 line->line_seqno = ts->seq;
711 if (lr->num_lines != 1)
712 line->req_seqno = atomic_add_return(diff_seqno,
713 &lr->seqno);
714
715 return HTE_RUN_SECOND_CB;
716 }
717
718 return HTE_CB_HANDLED;
719}
720
721static int hte_edge_setup(struct line *line, u64 eflags)
722{
723 int ret;
724 unsigned long flags = 0;
725 struct hte_ts_desc *hdesc = &line->hdesc;
726
727 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
728 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
729 HTE_FALLING_EDGE_TS :
730 HTE_RISING_EDGE_TS;
731 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
732 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
733 HTE_RISING_EDGE_TS :
734 HTE_FALLING_EDGE_TS;
735
736 line->total_discard_seq = 0;
737
738 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
739 line->desc);
740
741 ret = hte_ts_get(NULL, hdesc, 0);
742 if (ret)
743 return ret;
744
745 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
746 line);
747}
748
749#else
750
751static int hte_edge_setup(struct line *line, u64 eflags)
752{
753 return 0;
754}
755#endif /* CONFIG_HTE */
756
757static irqreturn_t edge_irq_thread(int irq, void *p)
758{
759 struct line *line = p;
760 struct linereq *lr = line->req;
761 struct gpio_v2_line_event le;
762
763 /* Do not leak kernel stack to userspace */
764 memset(&le, 0, sizeof(le));
765
766 if (line->timestamp_ns) {
767 le.timestamp_ns = line->timestamp_ns;
768 } else {
769 /*
770 * We may be running from a nested threaded interrupt in
771 * which case we didn't get the timestamp from
772 * edge_irq_handler().
773 */
774 le.timestamp_ns = line_event_timestamp(line);
775 if (lr->num_lines != 1)
776 line->req_seqno = atomic_inc_return(&lr->seqno);
777 }
778 line->timestamp_ns = 0;
779
780 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
781 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
782 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
783 break;
784 case GPIO_V2_LINE_FLAG_EDGE_RISING:
785 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
786 break;
787 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
788 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
789 break;
790 default:
791 return IRQ_NONE;
792 }
793 line->line_seqno++;
794 le.line_seqno = line->line_seqno;
795 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
796 le.offset = gpio_chip_hwgpio(line->desc);
797
798 linereq_put_event(lr, &le);
799
800 return IRQ_HANDLED;
801}
802
803static irqreturn_t edge_irq_handler(int irq, void *p)
804{
805 struct line *line = p;
806 struct linereq *lr = line->req;
807
808 /*
809 * Just store the timestamp in hardirq context so we get it as
810 * close in time as possible to the actual event.
811 */
812 line->timestamp_ns = line_event_timestamp(line);
813
814 if (lr->num_lines != 1)
815 line->req_seqno = atomic_inc_return(&lr->seqno);
816
817 return IRQ_WAKE_THREAD;
818}
819
820/*
821 * returns the current debounced logical value.
822 */
823static bool debounced_value(struct line *line)
824{
825 bool value;
826
827 /*
828 * minor race - debouncer may be stopped here, so edge_detector_stop()
829 * must leave the value unchanged so the following will read the level
830 * from when the debouncer was last running.
831 */
832 value = READ_ONCE(line->level);
833
834 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
835 value = !value;
836
837 return value;
838}
839
840static irqreturn_t debounce_irq_handler(int irq, void *p)
841{
842 struct line *line = p;
843
844 mod_delayed_work(system_wq, &line->work,
845 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
846
847 return IRQ_HANDLED;
848}
849
850static void debounce_work_func(struct work_struct *work)
851{
852 struct gpio_v2_line_event le;
853 struct line *line = container_of(work, struct line, work.work);
854 struct linereq *lr;
855 u64 eflags, edflags = READ_ONCE(line->edflags);
856 int level = -1;
857#ifdef CONFIG_HTE
858 int diff_seqno;
859
860 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
861 level = line->raw_level;
862#endif
863 if (level < 0)
864 level = gpiod_get_raw_value_cansleep(line->desc);
865 if (level < 0) {
866 pr_debug_ratelimited("debouncer failed to read line value\n");
867 return;
868 }
869
870 if (READ_ONCE(line->level) == level)
871 return;
872
873 WRITE_ONCE(line->level, level);
874
875 /* -- edge detection -- */
876 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
877 if (!eflags)
878 return;
879
880 /* switch from physical level to logical - if they differ */
881 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
882 level = !level;
883
884 /* ignore edges that are not being monitored */
885 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
886 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
887 return;
888
889 /* Do not leak kernel stack to userspace */
890 memset(&le, 0, sizeof(le));
891
892 lr = line->req;
893 le.timestamp_ns = line_event_timestamp(line);
894 le.offset = gpio_chip_hwgpio(line->desc);
895#ifdef CONFIG_HTE
896 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
897 /* discard events except the last one */
898 line->total_discard_seq -= 1;
899 diff_seqno = line->last_seqno - line->total_discard_seq -
900 line->line_seqno;
901 line->line_seqno = line->last_seqno - line->total_discard_seq;
902 le.line_seqno = line->line_seqno;
903 le.seqno = (lr->num_lines == 1) ?
904 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
905 } else
906#endif /* CONFIG_HTE */
907 {
908 line->line_seqno++;
909 le.line_seqno = line->line_seqno;
910 le.seqno = (lr->num_lines == 1) ?
911 le.line_seqno : atomic_inc_return(&lr->seqno);
912 }
913
914 le.id = line_event_id(level);
915
916 linereq_put_event(lr, &le);
917}
918
919static int debounce_setup(struct line *line, unsigned int debounce_period_us)
920{
921 unsigned long irqflags;
922 int ret, level, irq;
923 char *label;
924
925 /*
926 * Try hardware. Skip gpiod_set_config() to avoid emitting two
927 * CHANGED_CONFIG line state events.
928 */
929 ret = gpio_do_set_config(line->desc,
930 pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
931 debounce_period_us));
932 if (ret != -ENOTSUPP)
933 return ret;
934
935 if (debounce_period_us) {
936 /* setup software debounce */
937 level = gpiod_get_raw_value_cansleep(line->desc);
938 if (level < 0)
939 return level;
940
941 if (!(IS_ENABLED(CONFIG_HTE) &&
942 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
943 irq = gpiod_to_irq(line->desc);
944 if (irq < 0)
945 return -ENXIO;
946
947 label = make_irq_label(line->req->label);
948 if (IS_ERR(label))
949 return -ENOMEM;
950
951 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
952 ret = request_irq(irq, debounce_irq_handler, irqflags,
953 label, line);
954 if (ret) {
955 free_irq_label(label);
956 return ret;
957 }
958 line->irq = irq;
959 } else {
960 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
961 if (ret)
962 return ret;
963 }
964
965 WRITE_ONCE(line->level, level);
966 WRITE_ONCE(line->sw_debounced, 1);
967 }
968 return 0;
969}
970
971static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
972 unsigned int line_idx)
973{
974 unsigned int i;
975 u64 mask = BIT_ULL(line_idx);
976
977 for (i = 0; i < lc->num_attrs; i++) {
978 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
979 (lc->attrs[i].mask & mask))
980 return true;
981 }
982 return false;
983}
984
985static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
987{
988 unsigned int i;
989 u64 mask = BIT_ULL(line_idx);
990
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return lc->attrs[i].attr.debounce_period_us;
995 }
996 return 0;
997}
998
999static void edge_detector_stop(struct line *line)
1000{
1001 if (line->irq) {
1002 free_irq_label(free_irq(line->irq, line));
1003 line->irq = 0;
1004 }
1005
1006#ifdef CONFIG_HTE
1007 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1008 hte_ts_put(&line->hdesc);
1009#endif
1010
1011 cancel_delayed_work_sync(&line->work);
1012 WRITE_ONCE(line->sw_debounced, 0);
1013 WRITE_ONCE(line->edflags, 0);
1014 if (line->desc)
1015 WRITE_ONCE(line->desc->debounce_period_us, 0);
1016 /* do not change line->level - see comment in debounced_value() */
1017}
1018
1019static int edge_detector_fifo_init(struct linereq *req)
1020{
1021 if (kfifo_initialized(&req->events))
1022 return 0;
1023
1024 return kfifo_alloc(&req->events, req->event_buffer_size, GFP_KERNEL);
1025}
1026
1027static int edge_detector_setup(struct line *line,
1028 struct gpio_v2_line_config *lc,
1029 unsigned int line_idx, u64 edflags)
1030{
1031 u32 debounce_period_us;
1032 unsigned long irqflags = 0;
1033 u64 eflags;
1034 int irq, ret;
1035 char *label;
1036
1037 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1038 if (eflags) {
1039 ret = edge_detector_fifo_init(line->req);
1040 if (ret)
1041 return ret;
1042 }
1043 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1044 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1045 ret = debounce_setup(line, debounce_period_us);
1046 if (ret)
1047 return ret;
1048 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1049 }
1050
1051 /* detection disabled or sw debouncer will provide edge detection */
1052 if (!eflags || READ_ONCE(line->sw_debounced))
1053 return 0;
1054
1055 if (IS_ENABLED(CONFIG_HTE) &&
1056 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1057 return hte_edge_setup(line, edflags);
1058
1059 irq = gpiod_to_irq(line->desc);
1060 if (irq < 0)
1061 return -ENXIO;
1062
1063 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1064 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1065 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1066 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1067 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1068 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1069 irqflags |= IRQF_ONESHOT;
1070
1071 label = make_irq_label(line->req->label);
1072 if (IS_ERR(label))
1073 return PTR_ERR(label);
1074
1075 /* Request a thread to read the events */
1076 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1077 irqflags, label, line);
1078 if (ret) {
1079 free_irq_label(label);
1080 return ret;
1081 }
1082
1083 line->irq = irq;
1084 return 0;
1085}
1086
1087static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
1089 unsigned int line_idx, u64 edflags)
1090{
1091 u64 active_edflags = READ_ONCE(line->edflags);
1092 unsigned int debounce_period_us =
1093 gpio_v2_line_config_debounce_period(lc, line_idx);
1094
1095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
1097 return 0;
1098
1099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1102 /*
1103 * ensure event fifo is initialised if edge detection
1104 * is now enabled.
1105 */
1106 if (edflags & GPIO_V2_LINE_EDGE_FLAGS)
1107 return edge_detector_fifo_init(line->req);
1108
1109 return 0;
1110 }
1111
1112 /* reconfiguring edge detection or sw debounce being disabled */
1113 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1114 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1115 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1116 edge_detector_stop(line);
1117
1118 return edge_detector_setup(line, lc, line_idx, edflags);
1119}
1120
1121static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1122 unsigned int line_idx)
1123{
1124 unsigned int i;
1125 u64 mask = BIT_ULL(line_idx);
1126
1127 for (i = 0; i < lc->num_attrs; i++) {
1128 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1129 (lc->attrs[i].mask & mask))
1130 return lc->attrs[i].attr.flags;
1131 }
1132 return lc->flags;
1133}
1134
1135static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1136 unsigned int line_idx)
1137{
1138 unsigned int i;
1139 u64 mask = BIT_ULL(line_idx);
1140
1141 for (i = 0; i < lc->num_attrs; i++) {
1142 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1143 (lc->attrs[i].mask & mask))
1144 return !!(lc->attrs[i].attr.values & mask);
1145 }
1146 return 0;
1147}
1148
1149static int gpio_v2_line_flags_validate(u64 flags)
1150{
1151 /* Return an error if an unknown flag is set */
1152 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1153 return -EINVAL;
1154
1155 if (!IS_ENABLED(CONFIG_HTE) &&
1156 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1157 return -EOPNOTSUPP;
1158
1159 /*
1160 * Do not allow both INPUT and OUTPUT flags to be set as they are
1161 * contradictory.
1162 */
1163 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1164 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1165 return -EINVAL;
1166
1167 /* Only allow one event clock source */
1168 if (IS_ENABLED(CONFIG_HTE) &&
1169 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1170 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1171 return -EINVAL;
1172
1173 /* Edge detection requires explicit input. */
1174 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1175 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1176 return -EINVAL;
1177
1178 /*
1179 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1180 * request. If the hardware actually supports enabling both at the
1181 * same time the electrical result would be disastrous.
1182 */
1183 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1184 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1185 return -EINVAL;
1186
1187 /* Drive requires explicit output direction. */
1188 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1189 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1190 return -EINVAL;
1191
1192 /* Bias requires explicit direction. */
1193 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1194 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1195 return -EINVAL;
1196
1197 /* Only one bias flag can be set. */
1198 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1199 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1200 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1201 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1202 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1203 return -EINVAL;
1204
1205 return 0;
1206}
1207
1208static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1209 unsigned int num_lines)
1210{
1211 unsigned int i;
1212 u64 flags;
1213 int ret;
1214
1215 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1216 return -EINVAL;
1217
1218 if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
1219 return -EINVAL;
1220
1221 for (i = 0; i < num_lines; i++) {
1222 flags = gpio_v2_line_config_flags(lc, i);
1223 ret = gpio_v2_line_flags_validate(flags);
1224 if (ret)
1225 return ret;
1226
1227 /* debounce requires explicit input */
1228 if (gpio_v2_line_config_debounced(lc, i) &&
1229 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1230 return -EINVAL;
1231 }
1232 return 0;
1233}
1234
1235static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
1236 unsigned long *flagsp)
1237{
1238 unsigned long flags = READ_ONCE(*flagsp);
1239
1240 assign_bit(FLAG_ACTIVE_LOW, &flags,
1241 lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1242
1243 if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
1244 set_bit(FLAG_IS_OUT, &flags);
1245 else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
1246 clear_bit(FLAG_IS_OUT, &flags);
1247
1248 assign_bit(FLAG_EDGE_RISING, &flags,
1249 lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1250 assign_bit(FLAG_EDGE_FALLING, &flags,
1251 lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1252
1253 assign_bit(FLAG_OPEN_DRAIN, &flags,
1254 lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1255 assign_bit(FLAG_OPEN_SOURCE, &flags,
1256 lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1257
1258 assign_bit(FLAG_PULL_UP, &flags,
1259 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1260 assign_bit(FLAG_PULL_DOWN, &flags,
1261 lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1262 assign_bit(FLAG_BIAS_DISABLE, &flags,
1263 lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1264
1265 assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
1266 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1267 assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
1268 lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1269
1270 WRITE_ONCE(*flagsp, flags);
1271}
1272
1273static long linereq_get_values(struct linereq *lr, void __user *ip)
1274{
1275 struct gpio_v2_line_values lv;
1276 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1277 struct gpio_desc **descs;
1278 unsigned int i, didx, num_get;
1279 bool val;
1280 int ret;
1281
1282 /* NOTE: It's ok to read values of output lines. */
1283 if (copy_from_user(&lv, ip, sizeof(lv)))
1284 return -EFAULT;
1285
1286 /*
1287 * gpiod_get_array_value_complex() requires compacted desc and val
1288 * arrays, rather than the sparse ones in lv.
1289 * Calculation of num_get and construction of the desc array is
1290 * optimized to avoid allocation for the desc array for the common
1291 * num_get == 1 case.
1292 */
1293 /* scan requested lines to calculate the subset to get */
1294 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1295 if (lv.mask & BIT_ULL(i)) {
1296 num_get++;
1297 /* capture desc for the num_get == 1 case */
1298 descs = &lr->lines[i].desc;
1299 }
1300 }
1301
1302 if (num_get == 0)
1303 return -EINVAL;
1304
1305 if (num_get != 1) {
1306 /* build compacted desc array */
1307 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1308 if (!descs)
1309 return -ENOMEM;
1310 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1311 if (lv.mask & BIT_ULL(i)) {
1312 descs[didx] = lr->lines[i].desc;
1313 didx++;
1314 }
1315 }
1316 }
1317 ret = gpiod_get_array_value_complex(false, true, num_get,
1318 descs, NULL, vals);
1319
1320 if (num_get != 1)
1321 kfree(descs);
1322 if (ret)
1323 return ret;
1324
1325 lv.bits = 0;
1326 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1327 /* unpack compacted vals for the response */
1328 if (lv.mask & BIT_ULL(i)) {
1329 if (lr->lines[i].sw_debounced)
1330 val = debounced_value(&lr->lines[i]);
1331 else
1332 val = test_bit(didx, vals);
1333 if (val)
1334 lv.bits |= BIT_ULL(i);
1335 didx++;
1336 }
1337 }
1338
1339 if (copy_to_user(ip, &lv, sizeof(lv)))
1340 return -EFAULT;
1341
1342 return 0;
1343}
1344
1345static long linereq_set_values(struct linereq *lr, void __user *ip)
1346{
1347 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1348 struct gpio_v2_line_values lv;
1349 struct gpio_desc **descs;
1350 unsigned int i, didx, num_set;
1351 int ret;
1352
1353 if (copy_from_user(&lv, ip, sizeof(lv)))
1354 return -EFAULT;
1355
1356 guard(mutex)(&lr->config_mutex);
1357
1358 /*
1359 * gpiod_set_array_value_complex() requires compacted desc and val
1360 * arrays, rather than the sparse ones in lv.
1361 * Calculation of num_set and construction of the descs and vals arrays
1362 * is optimized to minimize scanning the lv->mask, and to avoid
1363 * allocation for the desc array for the common num_set == 1 case.
1364 */
1365 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1366 /* scan requested lines to determine the subset to be set */
1367 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1368 if (lv.mask & BIT_ULL(i)) {
1369 /* setting inputs is not allowed */
1370 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1371 return -EPERM;
1372 /* add to compacted values */
1373 if (lv.bits & BIT_ULL(i))
1374 __set_bit(num_set, vals);
1375 num_set++;
1376 /* capture desc for the num_set == 1 case */
1377 descs = &lr->lines[i].desc;
1378 }
1379 }
1380 if (num_set == 0)
1381 return -EINVAL;
1382
1383 if (num_set != 1) {
1384 /* build compacted desc array */
1385 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1386 if (!descs)
1387 return -ENOMEM;
1388 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1389 if (lv.mask & BIT_ULL(i)) {
1390 descs[didx] = lr->lines[i].desc;
1391 didx++;
1392 }
1393 }
1394 }
1395 ret = gpiod_set_array_value_complex(false, true, num_set,
1396 descs, NULL, vals);
1397
1398 if (num_set != 1)
1399 kfree(descs);
1400 return ret;
1401}
1402
1403static long linereq_set_config(struct linereq *lr, void __user *ip)
1404{
1405 struct gpio_v2_line_config lc;
1406 struct gpio_desc *desc;
1407 struct line *line;
1408 unsigned int i;
1409 u64 flags, edflags;
1410 int ret;
1411
1412 if (copy_from_user(&lc, ip, sizeof(lc)))
1413 return -EFAULT;
1414
1415 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1416 if (ret)
1417 return ret;
1418
1419 guard(mutex)(&lr->config_mutex);
1420
1421 for (i = 0; i < lr->num_lines; i++) {
1422 line = &lr->lines[i];
1423 desc = lr->lines[i].desc;
1424 flags = gpio_v2_line_config_flags(&lc, i);
1425 /*
1426 * Lines not explicitly reconfigured as input or output
1427 * are left unchanged.
1428 */
1429 if (!(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1430 continue;
1431 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1432 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1433 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1434 int val = gpio_v2_line_config_output_value(&lc, i);
1435
1436 edge_detector_stop(line);
1437 ret = gpiod_direction_output_nonotify(desc, val);
1438 if (ret)
1439 return ret;
1440 } else {
1441 ret = gpiod_direction_input_nonotify(desc);
1442 if (ret)
1443 return ret;
1444
1445 ret = edge_detector_update(line, &lc, i, edflags);
1446 if (ret)
1447 return ret;
1448 }
1449
1450 WRITE_ONCE(line->edflags, edflags);
1451
1452 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1453 }
1454 return 0;
1455}
1456
1457static long linereq_ioctl(struct file *file, unsigned int cmd,
1458 unsigned long arg)
1459{
1460 struct linereq *lr = file->private_data;
1461 void __user *ip = (void __user *)arg;
1462
1463 guard(srcu)(&lr->gdev->srcu);
1464
1465 if (!rcu_access_pointer(lr->gdev->chip))
1466 return -ENODEV;
1467
1468 switch (cmd) {
1469 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1470 return linereq_get_values(lr, ip);
1471 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1472 return linereq_set_values(lr, ip);
1473 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1474 return linereq_set_config(lr, ip);
1475 default:
1476 return -EINVAL;
1477 }
1478}
1479
1480#ifdef CONFIG_COMPAT
1481static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1482 unsigned long arg)
1483{
1484 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1485}
1486#endif
1487
1488static __poll_t linereq_poll(struct file *file,
1489 struct poll_table_struct *wait)
1490{
1491 struct linereq *lr = file->private_data;
1492 __poll_t events = 0;
1493
1494 guard(srcu)(&lr->gdev->srcu);
1495
1496 if (!rcu_access_pointer(lr->gdev->chip))
1497 return EPOLLHUP | EPOLLERR;
1498
1499 poll_wait(file, &lr->wait, wait);
1500
1501 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1502 &lr->wait.lock))
1503 events = EPOLLIN | EPOLLRDNORM;
1504
1505 return events;
1506}
1507
1508static ssize_t linereq_read(struct file *file, char __user *buf,
1509 size_t count, loff_t *f_ps)
1510{
1511 struct linereq *lr = file->private_data;
1512 struct gpio_v2_line_event le;
1513 ssize_t bytes_read = 0;
1514 int ret;
1515
1516 guard(srcu)(&lr->gdev->srcu);
1517
1518 if (!rcu_access_pointer(lr->gdev->chip))
1519 return -ENODEV;
1520
1521 if (count < sizeof(le))
1522 return -EINVAL;
1523
1524 do {
1525 scoped_guard(spinlock, &lr->wait.lock) {
1526 if (kfifo_is_empty(&lr->events)) {
1527 if (bytes_read)
1528 return bytes_read;
1529
1530 if (file->f_flags & O_NONBLOCK)
1531 return -EAGAIN;
1532
1533 ret = wait_event_interruptible_locked(lr->wait,
1534 !kfifo_is_empty(&lr->events));
1535 if (ret)
1536 return ret;
1537 }
1538
1539 if (kfifo_out(&lr->events, &le, 1) != 1) {
1540 /*
1541 * This should never happen - we hold the
1542 * lock from the moment we learned the fifo
1543 * is no longer empty until now.
1544 */
1545 WARN(1, "failed to read from non-empty kfifo");
1546 return -EIO;
1547 }
1548 }
1549
1550 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1551 return -EFAULT;
1552 bytes_read += sizeof(le);
1553 } while (count >= bytes_read + sizeof(le));
1554
1555 return bytes_read;
1556}
1557
1558static void linereq_free(struct linereq *lr)
1559{
1560 unsigned int i;
1561
1562 if (lr->device_unregistered_nb.notifier_call)
1563 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1564 &lr->device_unregistered_nb);
1565
1566 for (i = 0; i < lr->num_lines; i++) {
1567 if (lr->lines[i].desc) {
1568 edge_detector_stop(&lr->lines[i]);
1569 gpiod_free(lr->lines[i].desc);
1570 }
1571 }
1572 kfifo_free(&lr->events);
1573 kfree(lr->label);
1574 gpio_device_put(lr->gdev);
1575 kvfree(lr);
1576}
1577
1578static int linereq_release(struct inode *inode, struct file *file)
1579{
1580 struct linereq *lr = file->private_data;
1581
1582 linereq_free(lr);
1583 return 0;
1584}
1585
1586#ifdef CONFIG_PROC_FS
1587static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1588{
1589 struct linereq *lr = file->private_data;
1590 struct device *dev = &lr->gdev->dev;
1591 u16 i;
1592
1593 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1594
1595 for (i = 0; i < lr->num_lines; i++)
1596 seq_printf(out, "gpio-line:\t%d\n",
1597 gpio_chip_hwgpio(lr->lines[i].desc));
1598}
1599#endif
1600
1601static const struct file_operations line_fileops = {
1602 .release = linereq_release,
1603 .read = linereq_read,
1604 .poll = linereq_poll,
1605 .owner = THIS_MODULE,
1606 .llseek = noop_llseek,
1607 .unlocked_ioctl = linereq_ioctl,
1608#ifdef CONFIG_COMPAT
1609 .compat_ioctl = linereq_ioctl_compat,
1610#endif
1611#ifdef CONFIG_PROC_FS
1612 .show_fdinfo = linereq_show_fdinfo,
1613#endif
1614};
1615
1616static int linereq_create(struct gpio_device *gdev, void __user *ip)
1617{
1618 struct gpio_v2_line_request ulr;
1619 struct gpio_v2_line_config *lc;
1620 struct linereq *lr;
1621 struct file *file;
1622 u64 flags, edflags;
1623 unsigned int i;
1624 int fd, ret;
1625
1626 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1627 return -EFAULT;
1628
1629 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1630 return -EINVAL;
1631
1632 if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
1633 return -EINVAL;
1634
1635 lc = &ulr.config;
1636 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1637 if (ret)
1638 return ret;
1639
1640 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1641 if (!lr)
1642 return -ENOMEM;
1643 lr->num_lines = ulr.num_lines;
1644
1645 lr->gdev = gpio_device_get(gdev);
1646
1647 for (i = 0; i < ulr.num_lines; i++) {
1648 lr->lines[i].req = lr;
1649 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1650 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1651 }
1652
1653 if (ulr.consumer[0] != '\0') {
1654 /* label is only initialized if consumer is set */
1655 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1656 GFP_KERNEL);
1657 if (!lr->label) {
1658 ret = -ENOMEM;
1659 goto out_free_linereq;
1660 }
1661 }
1662
1663 mutex_init(&lr->config_mutex);
1664 init_waitqueue_head(&lr->wait);
1665 INIT_KFIFO(lr->events);
1666 lr->event_buffer_size = ulr.event_buffer_size;
1667 if (lr->event_buffer_size == 0)
1668 lr->event_buffer_size = ulr.num_lines * 16;
1669 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1670 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1671
1672 atomic_set(&lr->seqno, 0);
1673
1674 /* Request each GPIO */
1675 for (i = 0; i < ulr.num_lines; i++) {
1676 u32 offset = ulr.offsets[i];
1677 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1678
1679 if (IS_ERR(desc)) {
1680 ret = PTR_ERR(desc);
1681 goto out_free_linereq;
1682 }
1683
1684 ret = gpiod_request_user(desc, lr->label);
1685 if (ret)
1686 goto out_free_linereq;
1687
1688 lr->lines[i].desc = desc;
1689 flags = gpio_v2_line_config_flags(lc, i);
1690 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1691
1692 ret = gpiod_set_transitory(desc, false);
1693 if (ret < 0)
1694 goto out_free_linereq;
1695
1696 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1697 /*
1698 * Lines have to be requested explicitly for input
1699 * or output, else the line will be treated "as is".
1700 */
1701 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1702 int val = gpio_v2_line_config_output_value(lc, i);
1703
1704 ret = gpiod_direction_output_nonotify(desc, val);
1705 if (ret)
1706 goto out_free_linereq;
1707 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1708 ret = gpiod_direction_input_nonotify(desc);
1709 if (ret)
1710 goto out_free_linereq;
1711
1712 ret = edge_detector_setup(&lr->lines[i], lc, i,
1713 edflags);
1714 if (ret)
1715 goto out_free_linereq;
1716 }
1717
1718 lr->lines[i].edflags = edflags;
1719
1720 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1721
1722 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1723 offset);
1724 }
1725
1726 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1727 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1728 &lr->device_unregistered_nb);
1729 if (ret)
1730 goto out_free_linereq;
1731
1732 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1733 if (fd < 0) {
1734 ret = fd;
1735 goto out_free_linereq;
1736 }
1737
1738 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1739 O_RDONLY | O_CLOEXEC);
1740 if (IS_ERR(file)) {
1741 ret = PTR_ERR(file);
1742 goto out_put_unused_fd;
1743 }
1744
1745 ulr.fd = fd;
1746 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1747 /*
1748 * fput() will trigger the release() callback, so do not go onto
1749 * the regular error cleanup path here.
1750 */
1751 fput(file);
1752 put_unused_fd(fd);
1753 return -EFAULT;
1754 }
1755
1756 fd_install(fd, file);
1757
1758 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1759 lr->num_lines);
1760
1761 return 0;
1762
1763out_put_unused_fd:
1764 put_unused_fd(fd);
1765out_free_linereq:
1766 linereq_free(lr);
1767 return ret;
1768}
1769
1770#ifdef CONFIG_GPIO_CDEV_V1
1771
1772/*
1773 * GPIO line event management
1774 */
1775
1776/**
1777 * struct lineevent_state - contains the state of a userspace event
1778 * @gdev: the GPIO device the event pertains to
1779 * @label: consumer label used to tag descriptors
1780 * @desc: the GPIO descriptor held by this event
1781 * @eflags: the event flags this line was requested with
1782 * @irq: the interrupt that trigger in response to events on this GPIO
1783 * @wait: wait queue that handles blocking reads of events
1784 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1785 * @events: KFIFO for the GPIO events
1786 * @timestamp: cache for the timestamp storing it between hardirq
1787 * and IRQ thread, used to bring the timestamp close to the actual
1788 * event
1789 */
1790struct lineevent_state {
1791 struct gpio_device *gdev;
1792 const char *label;
1793 struct gpio_desc *desc;
1794 u32 eflags;
1795 int irq;
1796 wait_queue_head_t wait;
1797 struct notifier_block device_unregistered_nb;
1798 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1799 u64 timestamp;
1800};
1801
1802#define GPIOEVENT_REQUEST_VALID_FLAGS \
1803 (GPIOEVENT_REQUEST_RISING_EDGE | \
1804 GPIOEVENT_REQUEST_FALLING_EDGE)
1805
1806static __poll_t lineevent_poll(struct file *file,
1807 struct poll_table_struct *wait)
1808{
1809 struct lineevent_state *le = file->private_data;
1810 __poll_t events = 0;
1811
1812 guard(srcu)(&le->gdev->srcu);
1813
1814 if (!rcu_access_pointer(le->gdev->chip))
1815 return EPOLLHUP | EPOLLERR;
1816
1817 poll_wait(file, &le->wait, wait);
1818
1819 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1820 events = EPOLLIN | EPOLLRDNORM;
1821
1822 return events;
1823}
1824
1825static int lineevent_unregistered_notify(struct notifier_block *nb,
1826 unsigned long action, void *data)
1827{
1828 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1829 device_unregistered_nb);
1830
1831 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1832
1833 return NOTIFY_OK;
1834}
1835
1836struct compat_gpioeevent_data {
1837 compat_u64 timestamp;
1838 u32 id;
1839};
1840
1841static ssize_t lineevent_read(struct file *file, char __user *buf,
1842 size_t count, loff_t *f_ps)
1843{
1844 struct lineevent_state *le = file->private_data;
1845 struct gpioevent_data ge;
1846 ssize_t bytes_read = 0;
1847 ssize_t ge_size;
1848 int ret;
1849
1850 guard(srcu)(&le->gdev->srcu);
1851
1852 if (!rcu_access_pointer(le->gdev->chip))
1853 return -ENODEV;
1854
1855 /*
1856 * When compatible system call is being used the struct gpioevent_data,
1857 * in case of at least ia32, has different size due to the alignment
1858 * differences. Because we have first member 64 bits followed by one of
1859 * 32 bits there is no gap between them. The only difference is the
1860 * padding at the end of the data structure. Hence, we calculate the
1861 * actual sizeof() and pass this as an argument to copy_to_user() to
1862 * drop unneeded bytes from the output.
1863 */
1864 if (compat_need_64bit_alignment_fixup())
1865 ge_size = sizeof(struct compat_gpioeevent_data);
1866 else
1867 ge_size = sizeof(struct gpioevent_data);
1868 if (count < ge_size)
1869 return -EINVAL;
1870
1871 do {
1872 scoped_guard(spinlock, &le->wait.lock) {
1873 if (kfifo_is_empty(&le->events)) {
1874 if (bytes_read)
1875 return bytes_read;
1876
1877 if (file->f_flags & O_NONBLOCK)
1878 return -EAGAIN;
1879
1880 ret = wait_event_interruptible_locked(le->wait,
1881 !kfifo_is_empty(&le->events));
1882 if (ret)
1883 return ret;
1884 }
1885
1886 if (kfifo_out(&le->events, &ge, 1) != 1) {
1887 /*
1888 * This should never happen - we hold the
1889 * lock from the moment we learned the fifo
1890 * is no longer empty until now.
1891 */
1892 WARN(1, "failed to read from non-empty kfifo");
1893 return -EIO;
1894 }
1895 }
1896
1897 if (copy_to_user(buf + bytes_read, &ge, ge_size))
1898 return -EFAULT;
1899 bytes_read += ge_size;
1900 } while (count >= bytes_read + ge_size);
1901
1902 return bytes_read;
1903}
1904
1905static void lineevent_free(struct lineevent_state *le)
1906{
1907 if (le->device_unregistered_nb.notifier_call)
1908 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1909 &le->device_unregistered_nb);
1910 if (le->irq)
1911 free_irq_label(free_irq(le->irq, le));
1912 if (le->desc)
1913 gpiod_free(le->desc);
1914 kfree(le->label);
1915 gpio_device_put(le->gdev);
1916 kfree(le);
1917}
1918
1919static int lineevent_release(struct inode *inode, struct file *file)
1920{
1921 lineevent_free(file->private_data);
1922 return 0;
1923}
1924
1925static long lineevent_ioctl(struct file *file, unsigned int cmd,
1926 unsigned long arg)
1927{
1928 struct lineevent_state *le = file->private_data;
1929 void __user *ip = (void __user *)arg;
1930 struct gpiohandle_data ghd;
1931
1932 guard(srcu)(&le->gdev->srcu);
1933
1934 if (!rcu_access_pointer(le->gdev->chip))
1935 return -ENODEV;
1936
1937 /*
1938 * We can get the value for an event line but not set it,
1939 * because it is input by definition.
1940 */
1941 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1942 int val;
1943
1944 memset(&ghd, 0, sizeof(ghd));
1945
1946 val = gpiod_get_value_cansleep(le->desc);
1947 if (val < 0)
1948 return val;
1949 ghd.values[0] = val;
1950
1951 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1952 return -EFAULT;
1953
1954 return 0;
1955 }
1956 return -EINVAL;
1957}
1958
1959#ifdef CONFIG_COMPAT
1960static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
1961 unsigned long arg)
1962{
1963 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1964}
1965#endif
1966
1967static const struct file_operations lineevent_fileops = {
1968 .release = lineevent_release,
1969 .read = lineevent_read,
1970 .poll = lineevent_poll,
1971 .owner = THIS_MODULE,
1972 .llseek = noop_llseek,
1973 .unlocked_ioctl = lineevent_ioctl,
1974#ifdef CONFIG_COMPAT
1975 .compat_ioctl = lineevent_ioctl_compat,
1976#endif
1977};
1978
1979static irqreturn_t lineevent_irq_thread(int irq, void *p)
1980{
1981 struct lineevent_state *le = p;
1982 struct gpioevent_data ge;
1983 int ret;
1984
1985 /* Do not leak kernel stack to userspace */
1986 memset(&ge, 0, sizeof(ge));
1987
1988 /*
1989 * We may be running from a nested threaded interrupt in which case
1990 * we didn't get the timestamp from lineevent_irq_handler().
1991 */
1992 if (!le->timestamp)
1993 ge.timestamp = ktime_get_ns();
1994 else
1995 ge.timestamp = le->timestamp;
1996
1997 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
1998 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
1999 int level = gpiod_get_value_cansleep(le->desc);
2000
2001 if (level)
2002 /* Emit low-to-high event */
2003 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2004 else
2005 /* Emit high-to-low event */
2006 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2007 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2008 /* Emit low-to-high event */
2009 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2010 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2011 /* Emit high-to-low event */
2012 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2013 } else {
2014 return IRQ_NONE;
2015 }
2016
2017 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2018 1, &le->wait.lock);
2019 if (ret)
2020 wake_up_poll(&le->wait, EPOLLIN);
2021 else
2022 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2023
2024 return IRQ_HANDLED;
2025}
2026
2027static irqreturn_t lineevent_irq_handler(int irq, void *p)
2028{
2029 struct lineevent_state *le = p;
2030
2031 /*
2032 * Just store the timestamp in hardirq context so we get it as
2033 * close in time as possible to the actual event.
2034 */
2035 le->timestamp = ktime_get_ns();
2036
2037 return IRQ_WAKE_THREAD;
2038}
2039
2040static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2041{
2042 struct gpioevent_request eventreq;
2043 struct lineevent_state *le;
2044 struct gpio_desc *desc;
2045 struct file *file;
2046 u32 offset;
2047 u32 lflags;
2048 u32 eflags;
2049 int fd;
2050 int ret;
2051 int irq, irqflags = 0;
2052 char *label;
2053
2054 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2055 return -EFAULT;
2056
2057 offset = eventreq.lineoffset;
2058 lflags = eventreq.handleflags;
2059 eflags = eventreq.eventflags;
2060
2061 desc = gpio_device_get_desc(gdev, offset);
2062 if (IS_ERR(desc))
2063 return PTR_ERR(desc);
2064
2065 /* Return an error if a unknown flag is set */
2066 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2067 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2068 return -EINVAL;
2069
2070 /* This is just wrong: we don't look for events on output lines */
2071 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2072 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2073 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2074 return -EINVAL;
2075
2076 /* Only one bias flag can be set. */
2077 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2078 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2079 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2080 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2081 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2082 return -EINVAL;
2083
2084 le = kzalloc(sizeof(*le), GFP_KERNEL);
2085 if (!le)
2086 return -ENOMEM;
2087 le->gdev = gpio_device_get(gdev);
2088
2089 if (eventreq.consumer_label[0] != '\0') {
2090 /* label is only initialized if consumer_label is set */
2091 le->label = kstrndup(eventreq.consumer_label,
2092 sizeof(eventreq.consumer_label) - 1,
2093 GFP_KERNEL);
2094 if (!le->label) {
2095 ret = -ENOMEM;
2096 goto out_free_le;
2097 }
2098 }
2099
2100 ret = gpiod_request_user(desc, le->label);
2101 if (ret)
2102 goto out_free_le;
2103 le->desc = desc;
2104 le->eflags = eflags;
2105
2106 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2107
2108 ret = gpiod_direction_input(desc);
2109 if (ret)
2110 goto out_free_le;
2111
2112 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2113
2114 irq = gpiod_to_irq(desc);
2115 if (irq <= 0) {
2116 ret = -ENODEV;
2117 goto out_free_le;
2118 }
2119
2120 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2121 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2122 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2123 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2124 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2125 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2126 irqflags |= IRQF_ONESHOT;
2127
2128 INIT_KFIFO(le->events);
2129 init_waitqueue_head(&le->wait);
2130
2131 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2132 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2133 &le->device_unregistered_nb);
2134 if (ret)
2135 goto out_free_le;
2136
2137 label = make_irq_label(le->label);
2138 if (IS_ERR(label)) {
2139 ret = PTR_ERR(label);
2140 goto out_free_le;
2141 }
2142
2143 /* Request a thread to read the events */
2144 ret = request_threaded_irq(irq,
2145 lineevent_irq_handler,
2146 lineevent_irq_thread,
2147 irqflags,
2148 label,
2149 le);
2150 if (ret) {
2151 free_irq_label(label);
2152 goto out_free_le;
2153 }
2154
2155 le->irq = irq;
2156
2157 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2158 if (fd < 0) {
2159 ret = fd;
2160 goto out_free_le;
2161 }
2162
2163 file = anon_inode_getfile("gpio-event",
2164 &lineevent_fileops,
2165 le,
2166 O_RDONLY | O_CLOEXEC);
2167 if (IS_ERR(file)) {
2168 ret = PTR_ERR(file);
2169 goto out_put_unused_fd;
2170 }
2171
2172 eventreq.fd = fd;
2173 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2174 /*
2175 * fput() will trigger the release() callback, so do not go onto
2176 * the regular error cleanup path here.
2177 */
2178 fput(file);
2179 put_unused_fd(fd);
2180 return -EFAULT;
2181 }
2182
2183 fd_install(fd, file);
2184
2185 return 0;
2186
2187out_put_unused_fd:
2188 put_unused_fd(fd);
2189out_free_le:
2190 lineevent_free(le);
2191 return ret;
2192}
2193
2194static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2195 struct gpioline_info *info_v1)
2196{
2197 u64 flagsv2 = info_v2->flags;
2198
2199 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2200 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2201 info_v1->line_offset = info_v2->offset;
2202 info_v1->flags = 0;
2203
2204 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2205 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2206
2207 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2208 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2209
2210 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2211 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2212
2213 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2214 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2215 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2216 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2217
2218 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2219 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2220 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2221 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2222 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2223 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2224}
2225
2226static void gpio_v2_line_info_changed_to_v1(
2227 struct gpio_v2_line_info_changed *lic_v2,
2228 struct gpioline_info_changed *lic_v1)
2229{
2230 memset(lic_v1, 0, sizeof(*lic_v1));
2231 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2232 lic_v1->timestamp = lic_v2->timestamp_ns;
2233 lic_v1->event_type = lic_v2->event_type;
2234}
2235
2236#endif /* CONFIG_GPIO_CDEV_V1 */
2237
2238static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2239 struct gpio_v2_line_info *info, bool atomic)
2240{
2241 u32 debounce_period_us;
2242 unsigned long dflags;
2243 const char *label;
2244
2245 CLASS(gpio_chip_guard, guard)(desc);
2246 if (!guard.gc)
2247 return;
2248
2249 memset(info, 0, sizeof(*info));
2250 info->offset = gpio_chip_hwgpio(desc);
2251
2252 if (desc->name)
2253 strscpy(info->name, desc->name, sizeof(info->name));
2254
2255 dflags = READ_ONCE(desc->flags);
2256
2257 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2258 label = gpiod_get_label(desc);
2259 if (label && test_bit(FLAG_REQUESTED, &dflags))
2260 strscpy(info->consumer, label,
2261 sizeof(info->consumer));
2262 }
2263
2264 /*
2265 * Userspace only need know that the kernel is using this GPIO so it
2266 * can't use it.
2267 * The calculation of the used flag is slightly racy, as it may read
2268 * desc, gc and pinctrl state without a lock covering all three at
2269 * once. Worst case if the line is in transition and the calculation
2270 * is inconsistent then it looks to the user like they performed the
2271 * read on the other side of the transition - but that can always
2272 * happen.
2273 * The definitive test that a line is available to userspace is to
2274 * request it.
2275 */
2276 if (test_bit(FLAG_REQUESTED, &dflags) ||
2277 test_bit(FLAG_IS_HOGGED, &dflags) ||
2278 test_bit(FLAG_EXPORT, &dflags) ||
2279 test_bit(FLAG_SYSFS, &dflags) ||
2280 !gpiochip_line_is_valid(guard.gc, info->offset)) {
2281 info->flags |= GPIO_V2_LINE_FLAG_USED;
2282 } else if (!atomic) {
2283 if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
2284 info->flags |= GPIO_V2_LINE_FLAG_USED;
2285 }
2286
2287 if (test_bit(FLAG_IS_OUT, &dflags))
2288 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2289 else
2290 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2291
2292 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2293 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2294
2295 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2296 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2297 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2298 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2299
2300 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2301 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2302 if (test_bit(FLAG_PULL_DOWN, &dflags))
2303 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2304 if (test_bit(FLAG_PULL_UP, &dflags))
2305 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2306
2307 if (test_bit(FLAG_EDGE_RISING, &dflags))
2308 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2309 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2310 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2311
2312 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2313 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2314 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2315 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2316
2317 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2318 if (debounce_period_us) {
2319 info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2320 info->attrs[info->num_attrs].debounce_period_us =
2321 debounce_period_us;
2322 info->num_attrs++;
2323 }
2324}
2325
2326struct gpio_chardev_data {
2327 struct gpio_device *gdev;
2328 wait_queue_head_t wait;
2329 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2330 struct notifier_block lineinfo_changed_nb;
2331 struct notifier_block device_unregistered_nb;
2332 unsigned long *watched_lines;
2333#ifdef CONFIG_GPIO_CDEV_V1
2334 atomic_t watch_abi_version;
2335#endif
2336 struct file *fp;
2337};
2338
2339static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2340{
2341 struct gpio_device *gdev = cdev->gdev;
2342 struct gpiochip_info chipinfo;
2343
2344 memset(&chipinfo, 0, sizeof(chipinfo));
2345
2346 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2347 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2348 chipinfo.lines = gdev->ngpio;
2349 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2350 return -EFAULT;
2351 return 0;
2352}
2353
2354#ifdef CONFIG_GPIO_CDEV_V1
2355/*
2356 * returns 0 if the versions match, else the previously selected ABI version
2357 */
2358static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2359 unsigned int version)
2360{
2361 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2362
2363 if (abiv == version)
2364 return 0;
2365
2366 return abiv;
2367}
2368
2369static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2370 bool watch)
2371{
2372 struct gpio_desc *desc;
2373 struct gpioline_info lineinfo;
2374 struct gpio_v2_line_info lineinfo_v2;
2375
2376 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2377 return -EFAULT;
2378
2379 /* this doubles as a range check on line_offset */
2380 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2381 if (IS_ERR(desc))
2382 return PTR_ERR(desc);
2383
2384 if (watch) {
2385 if (lineinfo_ensure_abi_version(cdev, 1))
2386 return -EPERM;
2387
2388 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2389 return -EBUSY;
2390 }
2391
2392 gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
2393 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2394
2395 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2396 if (watch)
2397 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2398 return -EFAULT;
2399 }
2400
2401 return 0;
2402}
2403#endif
2404
2405static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2406 bool watch)
2407{
2408 struct gpio_desc *desc;
2409 struct gpio_v2_line_info lineinfo;
2410
2411 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2412 return -EFAULT;
2413
2414 if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
2415 return -EINVAL;
2416
2417 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2418 if (IS_ERR(desc))
2419 return PTR_ERR(desc);
2420
2421 if (watch) {
2422#ifdef CONFIG_GPIO_CDEV_V1
2423 if (lineinfo_ensure_abi_version(cdev, 2))
2424 return -EPERM;
2425#endif
2426 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2427 return -EBUSY;
2428 }
2429 gpio_desc_to_lineinfo(desc, &lineinfo, false);
2430
2431 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2432 if (watch)
2433 clear_bit(lineinfo.offset, cdev->watched_lines);
2434 return -EFAULT;
2435 }
2436
2437 return 0;
2438}
2439
2440static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2441{
2442 __u32 offset;
2443
2444 if (copy_from_user(&offset, ip, sizeof(offset)))
2445 return -EFAULT;
2446
2447 if (offset >= cdev->gdev->ngpio)
2448 return -EINVAL;
2449
2450 if (!test_and_clear_bit(offset, cdev->watched_lines))
2451 return -EBUSY;
2452
2453 return 0;
2454}
2455
2456/*
2457 * gpio_ioctl() - ioctl handler for the GPIO chardev
2458 */
2459static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2460{
2461 struct gpio_chardev_data *cdev = file->private_data;
2462 struct gpio_device *gdev = cdev->gdev;
2463 void __user *ip = (void __user *)arg;
2464
2465 guard(srcu)(&gdev->srcu);
2466
2467 /* We fail any subsequent ioctl():s when the chip is gone */
2468 if (!rcu_access_pointer(gdev->chip))
2469 return -ENODEV;
2470
2471 /* Fill in the struct and pass to userspace */
2472 switch (cmd) {
2473 case GPIO_GET_CHIPINFO_IOCTL:
2474 return chipinfo_get(cdev, ip);
2475#ifdef CONFIG_GPIO_CDEV_V1
2476 case GPIO_GET_LINEHANDLE_IOCTL:
2477 return linehandle_create(gdev, ip);
2478 case GPIO_GET_LINEEVENT_IOCTL:
2479 return lineevent_create(gdev, ip);
2480 case GPIO_GET_LINEINFO_IOCTL:
2481 return lineinfo_get_v1(cdev, ip, false);
2482 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2483 return lineinfo_get_v1(cdev, ip, true);
2484#endif /* CONFIG_GPIO_CDEV_V1 */
2485 case GPIO_V2_GET_LINEINFO_IOCTL:
2486 return lineinfo_get(cdev, ip, false);
2487 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2488 return lineinfo_get(cdev, ip, true);
2489 case GPIO_V2_GET_LINE_IOCTL:
2490 return linereq_create(gdev, ip);
2491 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2492 return lineinfo_unwatch(cdev, ip);
2493 default:
2494 return -EINVAL;
2495 }
2496}
2497
2498#ifdef CONFIG_COMPAT
2499static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2500 unsigned long arg)
2501{
2502 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2503}
2504#endif
2505
2506struct lineinfo_changed_ctx {
2507 struct work_struct work;
2508 struct gpio_v2_line_info_changed chg;
2509 struct gpio_device *gdev;
2510 struct gpio_chardev_data *cdev;
2511};
2512
2513static void lineinfo_changed_func(struct work_struct *work)
2514{
2515 struct lineinfo_changed_ctx *ctx =
2516 container_of(work, struct lineinfo_changed_ctx, work);
2517 struct gpio_chip *gc;
2518 int ret;
2519
2520 if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
2521 /*
2522 * If nobody set the USED flag earlier, let's see with pinctrl
2523 * now. We're doing this late because it's a sleeping function.
2524 * Pin functions are in general much more static and while it's
2525 * not 100% bullet-proof, it's good enough for most cases.
2526 */
2527 scoped_guard(srcu, &ctx->gdev->srcu) {
2528 gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
2529 if (gc &&
2530 !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
2531 ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
2532 }
2533 }
2534
2535 ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
2536 &ctx->cdev->wait.lock);
2537 if (ret)
2538 wake_up_poll(&ctx->cdev->wait, EPOLLIN);
2539 else
2540 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2541
2542 gpio_device_put(ctx->gdev);
2543 fput(ctx->cdev->fp);
2544 kfree(ctx);
2545}
2546
2547static int lineinfo_changed_notify(struct notifier_block *nb,
2548 unsigned long action, void *data)
2549{
2550 struct gpio_chardev_data *cdev =
2551 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2552 struct lineinfo_changed_ctx *ctx;
2553 struct gpio_desc *desc = data;
2554
2555 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2556 return NOTIFY_DONE;
2557
2558 /*
2559 * If this is called from atomic context (for instance: with a spinlock
2560 * taken by the atomic notifier chain), any sleeping calls must be done
2561 * outside of this function in process context of the dedicated
2562 * workqueue.
2563 *
2564 * Let's gather as much info as possible from the descriptor and
2565 * postpone just the call to pinctrl_gpio_can_use_line() until the work
2566 * is executed.
2567 */
2568
2569 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2570 if (!ctx) {
2571 pr_err("Failed to allocate memory for line info notification\n");
2572 return NOTIFY_DONE;
2573 }
2574
2575 ctx->chg.event_type = action;
2576 ctx->chg.timestamp_ns = ktime_get_ns();
2577 gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
2578 /* Keep the GPIO device alive until we emit the event. */
2579 ctx->gdev = gpio_device_get(desc->gdev);
2580 ctx->cdev = cdev;
2581 /* Keep the file descriptor alive too. */
2582 get_file(ctx->cdev->fp);
2583
2584 INIT_WORK(&ctx->work, lineinfo_changed_func);
2585 queue_work(ctx->gdev->line_state_wq, &ctx->work);
2586
2587 return NOTIFY_OK;
2588}
2589
2590static int gpio_device_unregistered_notify(struct notifier_block *nb,
2591 unsigned long action, void *data)
2592{
2593 struct gpio_chardev_data *cdev = container_of(nb,
2594 struct gpio_chardev_data,
2595 device_unregistered_nb);
2596
2597 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2598
2599 return NOTIFY_OK;
2600}
2601
2602static __poll_t lineinfo_watch_poll(struct file *file,
2603 struct poll_table_struct *pollt)
2604{
2605 struct gpio_chardev_data *cdev = file->private_data;
2606 __poll_t events = 0;
2607
2608 guard(srcu)(&cdev->gdev->srcu);
2609
2610 if (!rcu_access_pointer(cdev->gdev->chip))
2611 return EPOLLHUP | EPOLLERR;
2612
2613 poll_wait(file, &cdev->wait, pollt);
2614
2615 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2616 &cdev->wait.lock))
2617 events = EPOLLIN | EPOLLRDNORM;
2618
2619 return events;
2620}
2621
2622static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2623 size_t count, loff_t *off)
2624{
2625 struct gpio_chardev_data *cdev = file->private_data;
2626 struct gpio_v2_line_info_changed event;
2627 ssize_t bytes_read = 0;
2628 int ret;
2629 size_t event_size;
2630
2631 guard(srcu)(&cdev->gdev->srcu);
2632
2633 if (!rcu_access_pointer(cdev->gdev->chip))
2634 return -ENODEV;
2635
2636#ifndef CONFIG_GPIO_CDEV_V1
2637 event_size = sizeof(struct gpio_v2_line_info_changed);
2638 if (count < event_size)
2639 return -EINVAL;
2640#endif
2641
2642 do {
2643 scoped_guard(spinlock, &cdev->wait.lock) {
2644 if (kfifo_is_empty(&cdev->events)) {
2645 if (bytes_read)
2646 return bytes_read;
2647
2648 if (file->f_flags & O_NONBLOCK)
2649 return -EAGAIN;
2650
2651 ret = wait_event_interruptible_locked(cdev->wait,
2652 !kfifo_is_empty(&cdev->events));
2653 if (ret)
2654 return ret;
2655 }
2656#ifdef CONFIG_GPIO_CDEV_V1
2657 /* must be after kfifo check so watch_abi_version is set */
2658 if (atomic_read(&cdev->watch_abi_version) == 2)
2659 event_size = sizeof(struct gpio_v2_line_info_changed);
2660 else
2661 event_size = sizeof(struct gpioline_info_changed);
2662 if (count < event_size)
2663 return -EINVAL;
2664#endif
2665 if (kfifo_out(&cdev->events, &event, 1) != 1) {
2666 /*
2667 * This should never happen - we hold the
2668 * lock from the moment we learned the fifo
2669 * is no longer empty until now.
2670 */
2671 WARN(1, "failed to read from non-empty kfifo");
2672 return -EIO;
2673 }
2674 }
2675
2676#ifdef CONFIG_GPIO_CDEV_V1
2677 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2678 if (copy_to_user(buf + bytes_read, &event, event_size))
2679 return -EFAULT;
2680 } else {
2681 struct gpioline_info_changed event_v1;
2682
2683 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2684 if (copy_to_user(buf + bytes_read, &event_v1,
2685 event_size))
2686 return -EFAULT;
2687 }
2688#else
2689 if (copy_to_user(buf + bytes_read, &event, event_size))
2690 return -EFAULT;
2691#endif
2692 bytes_read += event_size;
2693 } while (count >= bytes_read + sizeof(event));
2694
2695 return bytes_read;
2696}
2697
2698/**
2699 * gpio_chrdev_open() - open the chardev for ioctl operations
2700 * @inode: inode for this chardev
2701 * @file: file struct for storing private data
2702 *
2703 * Returns:
2704 * 0 on success, or negative errno on failure.
2705 */
2706static int gpio_chrdev_open(struct inode *inode, struct file *file)
2707{
2708 struct gpio_device *gdev = container_of(inode->i_cdev,
2709 struct gpio_device, chrdev);
2710 struct gpio_chardev_data *cdev;
2711 int ret = -ENOMEM;
2712
2713 guard(srcu)(&gdev->srcu);
2714
2715 /* Fail on open if the backing gpiochip is gone */
2716 if (!rcu_access_pointer(gdev->chip))
2717 return -ENODEV;
2718
2719 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2720 if (!cdev)
2721 return -ENODEV;
2722
2723 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2724 if (!cdev->watched_lines)
2725 goto out_free_cdev;
2726
2727 init_waitqueue_head(&cdev->wait);
2728 INIT_KFIFO(cdev->events);
2729 cdev->gdev = gpio_device_get(gdev);
2730
2731 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2732 ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
2733 &cdev->lineinfo_changed_nb);
2734 if (ret)
2735 goto out_free_bitmap;
2736
2737 cdev->device_unregistered_nb.notifier_call =
2738 gpio_device_unregistered_notify;
2739 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2740 &cdev->device_unregistered_nb);
2741 if (ret)
2742 goto out_unregister_line_notifier;
2743
2744 file->private_data = cdev;
2745 cdev->fp = file;
2746
2747 ret = nonseekable_open(inode, file);
2748 if (ret)
2749 goto out_unregister_device_notifier;
2750
2751 return ret;
2752
2753out_unregister_device_notifier:
2754 blocking_notifier_chain_unregister(&gdev->device_notifier,
2755 &cdev->device_unregistered_nb);
2756out_unregister_line_notifier:
2757 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2758 &cdev->lineinfo_changed_nb);
2759out_free_bitmap:
2760 gpio_device_put(gdev);
2761 bitmap_free(cdev->watched_lines);
2762out_free_cdev:
2763 kfree(cdev);
2764 return ret;
2765}
2766
2767/**
2768 * gpio_chrdev_release() - close chardev after ioctl operations
2769 * @inode: inode for this chardev
2770 * @file: file struct for storing private data
2771 *
2772 * Returns:
2773 * 0 on success, or negative errno on failure.
2774 */
2775static int gpio_chrdev_release(struct inode *inode, struct file *file)
2776{
2777 struct gpio_chardev_data *cdev = file->private_data;
2778 struct gpio_device *gdev = cdev->gdev;
2779
2780 blocking_notifier_chain_unregister(&gdev->device_notifier,
2781 &cdev->device_unregistered_nb);
2782 atomic_notifier_chain_unregister(&gdev->line_state_notifier,
2783 &cdev->lineinfo_changed_nb);
2784 bitmap_free(cdev->watched_lines);
2785 gpio_device_put(gdev);
2786 kfree(cdev);
2787
2788 return 0;
2789}
2790
2791static const struct file_operations gpio_fileops = {
2792 .release = gpio_chrdev_release,
2793 .open = gpio_chrdev_open,
2794 .poll = lineinfo_watch_poll,
2795 .read = lineinfo_watch_read,
2796 .owner = THIS_MODULE,
2797 .unlocked_ioctl = gpio_ioctl,
2798#ifdef CONFIG_COMPAT
2799 .compat_ioctl = gpio_ioctl_compat,
2800#endif
2801};
2802
2803int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2804{
2805 struct gpio_chip *gc;
2806 int ret;
2807
2808 cdev_init(&gdev->chrdev, &gpio_fileops);
2809 gdev->chrdev.owner = THIS_MODULE;
2810 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2811
2812 gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2813 dev_name(&gdev->dev));
2814 if (!gdev->line_state_wq)
2815 return -ENOMEM;
2816
2817 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2818 if (ret)
2819 return ret;
2820
2821 guard(srcu)(&gdev->srcu);
2822 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2823 if (!gc)
2824 return -ENODEV;
2825
2826 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2827
2828 return 0;
2829}
2830
2831void gpiolib_cdev_unregister(struct gpio_device *gdev)
2832{
2833 destroy_workqueue(gdev->line_state_wq);
2834 cdev_device_del(&gdev->chrdev, &gdev->dev);
2835 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2836}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/anon_inodes.h>
4#include <linux/atomic.h>
5#include <linux/bitmap.h>
6#include <linux/build_bug.h>
7#include <linux/cdev.h>
8#include <linux/cleanup.h>
9#include <linux/compat.h>
10#include <linux/compiler.h>
11#include <linux/device.h>
12#include <linux/err.h>
13#include <linux/file.h>
14#include <linux/gpio.h>
15#include <linux/gpio/driver.h>
16#include <linux/hte.h>
17#include <linux/interrupt.h>
18#include <linux/irqreturn.h>
19#include <linux/kernel.h>
20#include <linux/kfifo.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/overflow.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/poll.h>
26#include <linux/rbtree.h>
27#include <linux/seq_file.h>
28#include <linux/spinlock.h>
29#include <linux/timekeeping.h>
30#include <linux/uaccess.h>
31#include <linux/workqueue.h>
32
33#include <uapi/linux/gpio.h>
34
35#include "gpiolib.h"
36#include "gpiolib-cdev.h"
37
38/*
39 * Array sizes must ensure 64-bit alignment and not create holes in the
40 * struct packing.
41 */
42static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
43static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
44
45/*
46 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
47 */
48static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
49static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
50static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
51static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
52static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
53static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
54static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
55static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
56
57/* Character device interface to GPIO.
58 *
59 * The GPIO character device, /dev/gpiochipN, provides userspace an
60 * interface to gpiolib GPIOs via ioctl()s.
61 */
62
63/*
64 * GPIO line handle management
65 */
66
67#ifdef CONFIG_GPIO_CDEV_V1
68/**
69 * struct linehandle_state - contains the state of a userspace handle
70 * @gdev: the GPIO device the handle pertains to
71 * @label: consumer label used to tag descriptors
72 * @descs: the GPIO descriptors held by this handle
73 * @num_descs: the number of descriptors held in the descs array
74 */
75struct linehandle_state {
76 struct gpio_device *gdev;
77 const char *label;
78 struct gpio_desc *descs[GPIOHANDLES_MAX];
79 u32 num_descs;
80};
81
82#define GPIOHANDLE_REQUEST_VALID_FLAGS \
83 (GPIOHANDLE_REQUEST_INPUT | \
84 GPIOHANDLE_REQUEST_OUTPUT | \
85 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
86 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
87 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
88 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
89 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
90 GPIOHANDLE_REQUEST_OPEN_SOURCE)
91
92static int linehandle_validate_flags(u32 flags)
93{
94 /* Return an error if an unknown flag is set */
95 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
96 return -EINVAL;
97
98 /*
99 * Do not allow both INPUT & OUTPUT flags to be set as they are
100 * contradictory.
101 */
102 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
103 (flags & GPIOHANDLE_REQUEST_OUTPUT))
104 return -EINVAL;
105
106 /*
107 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
108 * the hardware actually supports enabling both at the same time the
109 * electrical result would be disastrous.
110 */
111 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
112 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
113 return -EINVAL;
114
115 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
116 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
117 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
118 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
119 return -EINVAL;
120
121 /* Bias flags only allowed for input or output mode. */
122 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
123 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
124 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
125 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
126 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
127 return -EINVAL;
128
129 /* Only one bias flag can be set. */
130 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
131 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
132 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
133 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
134 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
135 return -EINVAL;
136
137 return 0;
138}
139
140static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
141{
142 assign_bit(FLAG_ACTIVE_LOW, flagsp,
143 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
144 assign_bit(FLAG_OPEN_DRAIN, flagsp,
145 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
146 assign_bit(FLAG_OPEN_SOURCE, flagsp,
147 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
148 assign_bit(FLAG_PULL_UP, flagsp,
149 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
150 assign_bit(FLAG_PULL_DOWN, flagsp,
151 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
152 assign_bit(FLAG_BIAS_DISABLE, flagsp,
153 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
154}
155
156static long linehandle_set_config(struct linehandle_state *lh,
157 void __user *ip)
158{
159 struct gpiohandle_config gcnf;
160 struct gpio_desc *desc;
161 int i, ret;
162 u32 lflags;
163
164 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
165 return -EFAULT;
166
167 lflags = gcnf.flags;
168 ret = linehandle_validate_flags(lflags);
169 if (ret)
170 return ret;
171
172 for (i = 0; i < lh->num_descs; i++) {
173 desc = lh->descs[i];
174 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
175
176 /*
177 * Lines have to be requested explicitly for input
178 * or output, else the line will be treated "as is".
179 */
180 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
181 int val = !!gcnf.default_values[i];
182
183 ret = gpiod_direction_output(desc, val);
184 if (ret)
185 return ret;
186 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
187 ret = gpiod_direction_input(desc);
188 if (ret)
189 return ret;
190 }
191
192 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
193 }
194 return 0;
195}
196
197static long linehandle_ioctl(struct file *file, unsigned int cmd,
198 unsigned long arg)
199{
200 struct linehandle_state *lh = file->private_data;
201 void __user *ip = (void __user *)arg;
202 struct gpiohandle_data ghd;
203 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
204 unsigned int i;
205 int ret;
206
207 guard(srcu)(&lh->gdev->srcu);
208
209 if (!rcu_access_pointer(lh->gdev->chip))
210 return -ENODEV;
211
212 switch (cmd) {
213 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
214 /* NOTE: It's okay to read values of output lines */
215 ret = gpiod_get_array_value_complex(false, true,
216 lh->num_descs, lh->descs,
217 NULL, vals);
218 if (ret)
219 return ret;
220
221 memset(&ghd, 0, sizeof(ghd));
222 for (i = 0; i < lh->num_descs; i++)
223 ghd.values[i] = test_bit(i, vals);
224
225 if (copy_to_user(ip, &ghd, sizeof(ghd)))
226 return -EFAULT;
227
228 return 0;
229 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
230 /*
231 * All line descriptors were created at once with the same
232 * flags so just check if the first one is really output.
233 */
234 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
235 return -EPERM;
236
237 if (copy_from_user(&ghd, ip, sizeof(ghd)))
238 return -EFAULT;
239
240 /* Clamp all values to [0,1] */
241 for (i = 0; i < lh->num_descs; i++)
242 __assign_bit(i, vals, ghd.values[i]);
243
244 /* Reuse the array setting function */
245 return gpiod_set_array_value_complex(false,
246 true,
247 lh->num_descs,
248 lh->descs,
249 NULL,
250 vals);
251 case GPIOHANDLE_SET_CONFIG_IOCTL:
252 return linehandle_set_config(lh, ip);
253 default:
254 return -EINVAL;
255 }
256}
257
258#ifdef CONFIG_COMPAT
259static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
260 unsigned long arg)
261{
262 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
263}
264#endif
265
266static void linehandle_free(struct linehandle_state *lh)
267{
268 int i;
269
270 for (i = 0; i < lh->num_descs; i++)
271 if (lh->descs[i])
272 gpiod_free(lh->descs[i]);
273 kfree(lh->label);
274 gpio_device_put(lh->gdev);
275 kfree(lh);
276}
277
278static int linehandle_release(struct inode *inode, struct file *file)
279{
280 linehandle_free(file->private_data);
281 return 0;
282}
283
284static const struct file_operations linehandle_fileops = {
285 .release = linehandle_release,
286 .owner = THIS_MODULE,
287 .llseek = noop_llseek,
288 .unlocked_ioctl = linehandle_ioctl,
289#ifdef CONFIG_COMPAT
290 .compat_ioctl = linehandle_ioctl_compat,
291#endif
292};
293
294static int linehandle_create(struct gpio_device *gdev, void __user *ip)
295{
296 struct gpiohandle_request handlereq;
297 struct linehandle_state *lh;
298 struct file *file;
299 int fd, i, ret;
300 u32 lflags;
301
302 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
303 return -EFAULT;
304 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
305 return -EINVAL;
306
307 lflags = handlereq.flags;
308
309 ret = linehandle_validate_flags(lflags);
310 if (ret)
311 return ret;
312
313 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
314 if (!lh)
315 return -ENOMEM;
316 lh->gdev = gpio_device_get(gdev);
317
318 if (handlereq.consumer_label[0] != '\0') {
319 /* label is only initialized if consumer_label is set */
320 lh->label = kstrndup(handlereq.consumer_label,
321 sizeof(handlereq.consumer_label) - 1,
322 GFP_KERNEL);
323 if (!lh->label) {
324 ret = -ENOMEM;
325 goto out_free_lh;
326 }
327 }
328
329 lh->num_descs = handlereq.lines;
330
331 /* Request each GPIO */
332 for (i = 0; i < handlereq.lines; i++) {
333 u32 offset = handlereq.lineoffsets[i];
334 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
335
336 if (IS_ERR(desc)) {
337 ret = PTR_ERR(desc);
338 goto out_free_lh;
339 }
340
341 ret = gpiod_request_user(desc, lh->label);
342 if (ret)
343 goto out_free_lh;
344 lh->descs[i] = desc;
345 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
346
347 ret = gpiod_set_transitory(desc, false);
348 if (ret < 0)
349 goto out_free_lh;
350
351 /*
352 * Lines have to be requested explicitly for input
353 * or output, else the line will be treated "as is".
354 */
355 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
356 int val = !!handlereq.default_values[i];
357
358 ret = gpiod_direction_output(desc, val);
359 if (ret)
360 goto out_free_lh;
361 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
362 ret = gpiod_direction_input(desc);
363 if (ret)
364 goto out_free_lh;
365 }
366
367 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
368
369 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
370 offset);
371 }
372
373 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
374 if (fd < 0) {
375 ret = fd;
376 goto out_free_lh;
377 }
378
379 file = anon_inode_getfile("gpio-linehandle",
380 &linehandle_fileops,
381 lh,
382 O_RDONLY | O_CLOEXEC);
383 if (IS_ERR(file)) {
384 ret = PTR_ERR(file);
385 goto out_put_unused_fd;
386 }
387
388 handlereq.fd = fd;
389 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
390 /*
391 * fput() will trigger the release() callback, so do not go onto
392 * the regular error cleanup path here.
393 */
394 fput(file);
395 put_unused_fd(fd);
396 return -EFAULT;
397 }
398
399 fd_install(fd, file);
400
401 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
402 lh->num_descs);
403
404 return 0;
405
406out_put_unused_fd:
407 put_unused_fd(fd);
408out_free_lh:
409 linehandle_free(lh);
410 return ret;
411}
412#endif /* CONFIG_GPIO_CDEV_V1 */
413
414/**
415 * struct line - contains the state of a requested line
416 * @node: to store the object in supinfo_tree if supplemental
417 * @desc: the GPIO descriptor for this line.
418 * @req: the corresponding line request
419 * @irq: the interrupt triggered in response to events on this GPIO
420 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
421 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
422 * @timestamp_ns: cache for the timestamp storing it between hardirq and
423 * IRQ thread, used to bring the timestamp close to the actual event
424 * @req_seqno: the seqno for the current edge event in the sequence of
425 * events for the corresponding line request. This is drawn from the @req.
426 * @line_seqno: the seqno for the current edge event in the sequence of
427 * events for this line.
428 * @work: the worker that implements software debouncing
429 * @debounce_period_us: the debounce period in microseconds
430 * @sw_debounced: flag indicating if the software debouncer is active
431 * @level: the current debounced physical level of the line
432 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
433 * @raw_level: the line level at the time of event
434 * @total_discard_seq: the running counter of the discarded events
435 * @last_seqno: the last sequence number before debounce period expires
436 */
437struct line {
438 struct rb_node node;
439 struct gpio_desc *desc;
440 /*
441 * -- edge detector specific fields --
442 */
443 struct linereq *req;
444 unsigned int irq;
445 /*
446 * The flags for the active edge detector configuration.
447 *
448 * edflags is set by linereq_create(), linereq_free(), and
449 * linereq_set_config_unlocked(), which are themselves mutually
450 * exclusive, and is accessed by edge_irq_thread(),
451 * process_hw_ts_thread() and debounce_work_func(),
452 * which can all live with a slightly stale value.
453 */
454 u64 edflags;
455 /*
456 * timestamp_ns and req_seqno are accessed only by
457 * edge_irq_handler() and edge_irq_thread(), which are themselves
458 * mutually exclusive, so no additional protection is necessary.
459 */
460 u64 timestamp_ns;
461 u32 req_seqno;
462 /*
463 * line_seqno is accessed by either edge_irq_thread() or
464 * debounce_work_func(), which are themselves mutually exclusive,
465 * so no additional protection is necessary.
466 */
467 u32 line_seqno;
468 /*
469 * -- debouncer specific fields --
470 */
471 struct delayed_work work;
472 /*
473 * debounce_period_us is accessed by debounce_irq_handler() and
474 * process_hw_ts() which are disabled when modified by
475 * debounce_setup(), edge_detector_setup() or edge_detector_stop()
476 * or can live with a stale version when updated by
477 * edge_detector_update().
478 * The modifying functions are themselves mutually exclusive.
479 */
480 unsigned int debounce_period_us;
481 /*
482 * sw_debounce is accessed by linereq_set_config(), which is the
483 * only setter, and linereq_get_values(), which can live with a
484 * slightly stale value.
485 */
486 unsigned int sw_debounced;
487 /*
488 * level is accessed by debounce_work_func(), which is the only
489 * setter, and linereq_get_values() which can live with a slightly
490 * stale value.
491 */
492 unsigned int level;
493#ifdef CONFIG_HTE
494 struct hte_ts_desc hdesc;
495 /*
496 * HTE provider sets line level at the time of event. The valid
497 * value is 0 or 1 and negative value for an error.
498 */
499 int raw_level;
500 /*
501 * when sw_debounce is set on HTE enabled line, this is running
502 * counter of the discarded events.
503 */
504 u32 total_discard_seq;
505 /*
506 * when sw_debounce is set on HTE enabled line, this variable records
507 * last sequence number before debounce period expires.
508 */
509 u32 last_seqno;
510#endif /* CONFIG_HTE */
511};
512
513/*
514 * a rbtree of the struct lines containing supplemental info.
515 * Used to populate gpio_v2_line_info with cdev specific fields not contained
516 * in the struct gpio_desc.
517 * A line is determined to contain supplemental information by
518 * line_has_supinfo().
519 */
520static struct rb_root supinfo_tree = RB_ROOT;
521/* covers supinfo_tree */
522static DEFINE_SPINLOCK(supinfo_lock);
523
524/**
525 * struct linereq - contains the state of a userspace line request
526 * @gdev: the GPIO device the line request pertains to
527 * @label: consumer label used to tag GPIO descriptors
528 * @num_lines: the number of lines in the lines array
529 * @wait: wait queue that handles blocking reads of events
530 * @device_unregistered_nb: notifier block for receiving gdev unregister events
531 * @event_buffer_size: the number of elements allocated in @events
532 * @events: KFIFO for the GPIO events
533 * @seqno: the sequence number for edge events generated on all lines in
534 * this line request. Note that this is not used when @num_lines is 1, as
535 * the line_seqno is then the same and is cheaper to calculate.
536 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
537 * of configuration, particularly multi-step accesses to desc flags and
538 * changes to supinfo status.
539 * @lines: the lines held by this line request, with @num_lines elements.
540 */
541struct linereq {
542 struct gpio_device *gdev;
543 const char *label;
544 u32 num_lines;
545 wait_queue_head_t wait;
546 struct notifier_block device_unregistered_nb;
547 u32 event_buffer_size;
548 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
549 atomic_t seqno;
550 struct mutex config_mutex;
551 struct line lines[] __counted_by(num_lines);
552};
553
554static void supinfo_insert(struct line *line)
555{
556 struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
557 struct line *entry;
558
559 guard(spinlock)(&supinfo_lock);
560
561 while (*new) {
562 entry = container_of(*new, struct line, node);
563
564 parent = *new;
565 if (line->desc < entry->desc) {
566 new = &((*new)->rb_left);
567 } else if (line->desc > entry->desc) {
568 new = &((*new)->rb_right);
569 } else {
570 /* this should never happen */
571 WARN(1, "duplicate line inserted");
572 return;
573 }
574 }
575
576 rb_link_node(&line->node, parent, new);
577 rb_insert_color(&line->node, &supinfo_tree);
578}
579
580static void supinfo_erase(struct line *line)
581{
582 guard(spinlock)(&supinfo_lock);
583
584 rb_erase(&line->node, &supinfo_tree);
585}
586
587static struct line *supinfo_find(struct gpio_desc *desc)
588{
589 struct rb_node *node = supinfo_tree.rb_node;
590 struct line *line;
591
592 while (node) {
593 line = container_of(node, struct line, node);
594 if (desc < line->desc)
595 node = node->rb_left;
596 else if (desc > line->desc)
597 node = node->rb_right;
598 else
599 return line;
600 }
601 return NULL;
602}
603
604static void supinfo_to_lineinfo(struct gpio_desc *desc,
605 struct gpio_v2_line_info *info)
606{
607 struct gpio_v2_line_attribute *attr;
608 struct line *line;
609
610 guard(spinlock)(&supinfo_lock);
611
612 line = supinfo_find(desc);
613 if (!line)
614 return;
615
616 attr = &info->attrs[info->num_attrs];
617 attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
618 attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
619 info->num_attrs++;
620}
621
622static inline bool line_has_supinfo(struct line *line)
623{
624 return READ_ONCE(line->debounce_period_us);
625}
626
627/*
628 * Checks line_has_supinfo() before and after the change to avoid unnecessary
629 * supinfo_tree access.
630 * Called indirectly by linereq_create() or linereq_set_config() so line
631 * is already protected from concurrent changes.
632 */
633static void line_set_debounce_period(struct line *line,
634 unsigned int debounce_period_us)
635{
636 bool was_suppl = line_has_supinfo(line);
637
638 WRITE_ONCE(line->debounce_period_us, debounce_period_us);
639
640 /* if supinfo status is unchanged then we're done */
641 if (line_has_supinfo(line) == was_suppl)
642 return;
643
644 /* supinfo status has changed, so update the tree */
645 if (was_suppl)
646 supinfo_erase(line);
647 else
648 supinfo_insert(line);
649}
650
651#define GPIO_V2_LINE_BIAS_FLAGS \
652 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
653 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
654 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
655
656#define GPIO_V2_LINE_DIRECTION_FLAGS \
657 (GPIO_V2_LINE_FLAG_INPUT | \
658 GPIO_V2_LINE_FLAG_OUTPUT)
659
660#define GPIO_V2_LINE_DRIVE_FLAGS \
661 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
662 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
663
664#define GPIO_V2_LINE_EDGE_FLAGS \
665 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
666 GPIO_V2_LINE_FLAG_EDGE_FALLING)
667
668#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
669
670#define GPIO_V2_LINE_VALID_FLAGS \
671 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
672 GPIO_V2_LINE_DIRECTION_FLAGS | \
673 GPIO_V2_LINE_DRIVE_FLAGS | \
674 GPIO_V2_LINE_EDGE_FLAGS | \
675 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
676 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
677 GPIO_V2_LINE_BIAS_FLAGS)
678
679/* subset of flags relevant for edge detector configuration */
680#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
681 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
682 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
683 GPIO_V2_LINE_EDGE_FLAGS)
684
685static int linereq_unregistered_notify(struct notifier_block *nb,
686 unsigned long action, void *data)
687{
688 struct linereq *lr = container_of(nb, struct linereq,
689 device_unregistered_nb);
690
691 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
692
693 return NOTIFY_OK;
694}
695
696static void linereq_put_event(struct linereq *lr,
697 struct gpio_v2_line_event *le)
698{
699 bool overflow = false;
700
701 scoped_guard(spinlock, &lr->wait.lock) {
702 if (kfifo_is_full(&lr->events)) {
703 overflow = true;
704 kfifo_skip(&lr->events);
705 }
706 kfifo_in(&lr->events, le, 1);
707 }
708 if (!overflow)
709 wake_up_poll(&lr->wait, EPOLLIN);
710 else
711 pr_debug_ratelimited("event FIFO is full - event dropped\n");
712}
713
714static u64 line_event_timestamp(struct line *line)
715{
716 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
717 return ktime_get_real_ns();
718 else if (IS_ENABLED(CONFIG_HTE) &&
719 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
720 return line->timestamp_ns;
721
722 return ktime_get_ns();
723}
724
725static u32 line_event_id(int level)
726{
727 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
728 GPIO_V2_LINE_EVENT_FALLING_EDGE;
729}
730
731static inline char *make_irq_label(const char *orig)
732{
733 char *new;
734
735 if (!orig)
736 return NULL;
737
738 new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
739 if (!new)
740 return ERR_PTR(-ENOMEM);
741
742 return new;
743}
744
745static inline void free_irq_label(const char *label)
746{
747 kfree(label);
748}
749
750#ifdef CONFIG_HTE
751
752static enum hte_return process_hw_ts_thread(void *p)
753{
754 struct line *line;
755 struct linereq *lr;
756 struct gpio_v2_line_event le;
757 u64 edflags;
758 int level;
759
760 if (!p)
761 return HTE_CB_HANDLED;
762
763 line = p;
764 lr = line->req;
765
766 memset(&le, 0, sizeof(le));
767
768 le.timestamp_ns = line->timestamp_ns;
769 edflags = READ_ONCE(line->edflags);
770
771 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
772 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
773 level = (line->raw_level >= 0) ?
774 line->raw_level :
775 gpiod_get_raw_value_cansleep(line->desc);
776
777 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
778 level = !level;
779
780 le.id = line_event_id(level);
781 break;
782 case GPIO_V2_LINE_FLAG_EDGE_RISING:
783 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
784 break;
785 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
786 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
787 break;
788 default:
789 return HTE_CB_HANDLED;
790 }
791 le.line_seqno = line->line_seqno;
792 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
793 le.offset = gpio_chip_hwgpio(line->desc);
794
795 linereq_put_event(lr, &le);
796
797 return HTE_CB_HANDLED;
798}
799
800static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
801{
802 struct line *line;
803 struct linereq *lr;
804 int diff_seqno = 0;
805
806 if (!ts || !p)
807 return HTE_CB_HANDLED;
808
809 line = p;
810 line->timestamp_ns = ts->tsc;
811 line->raw_level = ts->raw_level;
812 lr = line->req;
813
814 if (READ_ONCE(line->sw_debounced)) {
815 line->total_discard_seq++;
816 line->last_seqno = ts->seq;
817 mod_delayed_work(system_wq, &line->work,
818 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
819 } else {
820 if (unlikely(ts->seq < line->line_seqno))
821 return HTE_CB_HANDLED;
822
823 diff_seqno = ts->seq - line->line_seqno;
824 line->line_seqno = ts->seq;
825 if (lr->num_lines != 1)
826 line->req_seqno = atomic_add_return(diff_seqno,
827 &lr->seqno);
828
829 return HTE_RUN_SECOND_CB;
830 }
831
832 return HTE_CB_HANDLED;
833}
834
835static int hte_edge_setup(struct line *line, u64 eflags)
836{
837 int ret;
838 unsigned long flags = 0;
839 struct hte_ts_desc *hdesc = &line->hdesc;
840
841 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
842 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
843 HTE_FALLING_EDGE_TS :
844 HTE_RISING_EDGE_TS;
845 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
846 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
847 HTE_RISING_EDGE_TS :
848 HTE_FALLING_EDGE_TS;
849
850 line->total_discard_seq = 0;
851
852 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
853 line->desc);
854
855 ret = hte_ts_get(NULL, hdesc, 0);
856 if (ret)
857 return ret;
858
859 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
860 line);
861}
862
863#else
864
865static int hte_edge_setup(struct line *line, u64 eflags)
866{
867 return 0;
868}
869#endif /* CONFIG_HTE */
870
871static irqreturn_t edge_irq_thread(int irq, void *p)
872{
873 struct line *line = p;
874 struct linereq *lr = line->req;
875 struct gpio_v2_line_event le;
876
877 /* Do not leak kernel stack to userspace */
878 memset(&le, 0, sizeof(le));
879
880 if (line->timestamp_ns) {
881 le.timestamp_ns = line->timestamp_ns;
882 } else {
883 /*
884 * We may be running from a nested threaded interrupt in
885 * which case we didn't get the timestamp from
886 * edge_irq_handler().
887 */
888 le.timestamp_ns = line_event_timestamp(line);
889 if (lr->num_lines != 1)
890 line->req_seqno = atomic_inc_return(&lr->seqno);
891 }
892 line->timestamp_ns = 0;
893
894 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
895 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
896 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
897 break;
898 case GPIO_V2_LINE_FLAG_EDGE_RISING:
899 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
900 break;
901 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
902 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
903 break;
904 default:
905 return IRQ_NONE;
906 }
907 line->line_seqno++;
908 le.line_seqno = line->line_seqno;
909 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
910 le.offset = gpio_chip_hwgpio(line->desc);
911
912 linereq_put_event(lr, &le);
913
914 return IRQ_HANDLED;
915}
916
917static irqreturn_t edge_irq_handler(int irq, void *p)
918{
919 struct line *line = p;
920 struct linereq *lr = line->req;
921
922 /*
923 * Just store the timestamp in hardirq context so we get it as
924 * close in time as possible to the actual event.
925 */
926 line->timestamp_ns = line_event_timestamp(line);
927
928 if (lr->num_lines != 1)
929 line->req_seqno = atomic_inc_return(&lr->seqno);
930
931 return IRQ_WAKE_THREAD;
932}
933
934/*
935 * returns the current debounced logical value.
936 */
937static bool debounced_value(struct line *line)
938{
939 bool value;
940
941 /*
942 * minor race - debouncer may be stopped here, so edge_detector_stop()
943 * must leave the value unchanged so the following will read the level
944 * from when the debouncer was last running.
945 */
946 value = READ_ONCE(line->level);
947
948 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
949 value = !value;
950
951 return value;
952}
953
954static irqreturn_t debounce_irq_handler(int irq, void *p)
955{
956 struct line *line = p;
957
958 mod_delayed_work(system_wq, &line->work,
959 usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
960
961 return IRQ_HANDLED;
962}
963
964static void debounce_work_func(struct work_struct *work)
965{
966 struct gpio_v2_line_event le;
967 struct line *line = container_of(work, struct line, work.work);
968 struct linereq *lr;
969 u64 eflags, edflags = READ_ONCE(line->edflags);
970 int level = -1;
971#ifdef CONFIG_HTE
972 int diff_seqno;
973
974 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
975 level = line->raw_level;
976#endif
977 if (level < 0)
978 level = gpiod_get_raw_value_cansleep(line->desc);
979 if (level < 0) {
980 pr_debug_ratelimited("debouncer failed to read line value\n");
981 return;
982 }
983
984 if (READ_ONCE(line->level) == level)
985 return;
986
987 WRITE_ONCE(line->level, level);
988
989 /* -- edge detection -- */
990 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
991 if (!eflags)
992 return;
993
994 /* switch from physical level to logical - if they differ */
995 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
996 level = !level;
997
998 /* ignore edges that are not being monitored */
999 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
1000 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
1001 return;
1002
1003 /* Do not leak kernel stack to userspace */
1004 memset(&le, 0, sizeof(le));
1005
1006 lr = line->req;
1007 le.timestamp_ns = line_event_timestamp(line);
1008 le.offset = gpio_chip_hwgpio(line->desc);
1009#ifdef CONFIG_HTE
1010 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
1011 /* discard events except the last one */
1012 line->total_discard_seq -= 1;
1013 diff_seqno = line->last_seqno - line->total_discard_seq -
1014 line->line_seqno;
1015 line->line_seqno = line->last_seqno - line->total_discard_seq;
1016 le.line_seqno = line->line_seqno;
1017 le.seqno = (lr->num_lines == 1) ?
1018 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
1019 } else
1020#endif /* CONFIG_HTE */
1021 {
1022 line->line_seqno++;
1023 le.line_seqno = line->line_seqno;
1024 le.seqno = (lr->num_lines == 1) ?
1025 le.line_seqno : atomic_inc_return(&lr->seqno);
1026 }
1027
1028 le.id = line_event_id(level);
1029
1030 linereq_put_event(lr, &le);
1031}
1032
1033static int debounce_setup(struct line *line, unsigned int debounce_period_us)
1034{
1035 unsigned long irqflags;
1036 int ret, level, irq;
1037 char *label;
1038
1039 /* try hardware */
1040 ret = gpiod_set_debounce(line->desc, debounce_period_us);
1041 if (!ret) {
1042 line_set_debounce_period(line, debounce_period_us);
1043 return ret;
1044 }
1045 if (ret != -ENOTSUPP)
1046 return ret;
1047
1048 if (debounce_period_us) {
1049 /* setup software debounce */
1050 level = gpiod_get_raw_value_cansleep(line->desc);
1051 if (level < 0)
1052 return level;
1053
1054 if (!(IS_ENABLED(CONFIG_HTE) &&
1055 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
1056 irq = gpiod_to_irq(line->desc);
1057 if (irq < 0)
1058 return -ENXIO;
1059
1060 label = make_irq_label(line->req->label);
1061 if (IS_ERR(label))
1062 return -ENOMEM;
1063
1064 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
1065 ret = request_irq(irq, debounce_irq_handler, irqflags,
1066 label, line);
1067 if (ret) {
1068 free_irq_label(label);
1069 return ret;
1070 }
1071 line->irq = irq;
1072 } else {
1073 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
1074 if (ret)
1075 return ret;
1076 }
1077
1078 WRITE_ONCE(line->level, level);
1079 WRITE_ONCE(line->sw_debounced, 1);
1080 }
1081 return 0;
1082}
1083
1084static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
1085 unsigned int line_idx)
1086{
1087 unsigned int i;
1088 u64 mask = BIT_ULL(line_idx);
1089
1090 for (i = 0; i < lc->num_attrs; i++) {
1091 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1092 (lc->attrs[i].mask & mask))
1093 return true;
1094 }
1095 return false;
1096}
1097
1098static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
1099 unsigned int line_idx)
1100{
1101 unsigned int i;
1102 u64 mask = BIT_ULL(line_idx);
1103
1104 for (i = 0; i < lc->num_attrs; i++) {
1105 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1106 (lc->attrs[i].mask & mask))
1107 return lc->attrs[i].attr.debounce_period_us;
1108 }
1109 return 0;
1110}
1111
1112static void edge_detector_stop(struct line *line)
1113{
1114 if (line->irq) {
1115 free_irq_label(free_irq(line->irq, line));
1116 line->irq = 0;
1117 }
1118
1119#ifdef CONFIG_HTE
1120 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
1121 hte_ts_put(&line->hdesc);
1122#endif
1123
1124 cancel_delayed_work_sync(&line->work);
1125 WRITE_ONCE(line->sw_debounced, 0);
1126 WRITE_ONCE(line->edflags, 0);
1127 line_set_debounce_period(line, 0);
1128 /* do not change line->level - see comment in debounced_value() */
1129}
1130
1131static int edge_detector_setup(struct line *line,
1132 struct gpio_v2_line_config *lc,
1133 unsigned int line_idx, u64 edflags)
1134{
1135 u32 debounce_period_us;
1136 unsigned long irqflags = 0;
1137 u64 eflags;
1138 int irq, ret;
1139 char *label;
1140
1141 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1142 if (eflags && !kfifo_initialized(&line->req->events)) {
1143 ret = kfifo_alloc(&line->req->events,
1144 line->req->event_buffer_size, GFP_KERNEL);
1145 if (ret)
1146 return ret;
1147 }
1148 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1149 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
1150 ret = debounce_setup(line, debounce_period_us);
1151 if (ret)
1152 return ret;
1153 line_set_debounce_period(line, debounce_period_us);
1154 }
1155
1156 /* detection disabled or sw debouncer will provide edge detection */
1157 if (!eflags || READ_ONCE(line->sw_debounced))
1158 return 0;
1159
1160 if (IS_ENABLED(CONFIG_HTE) &&
1161 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1162 return hte_edge_setup(line, edflags);
1163
1164 irq = gpiod_to_irq(line->desc);
1165 if (irq < 0)
1166 return -ENXIO;
1167
1168 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1169 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1170 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1171 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1172 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1173 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1174 irqflags |= IRQF_ONESHOT;
1175
1176 label = make_irq_label(line->req->label);
1177 if (IS_ERR(label))
1178 return PTR_ERR(label);
1179
1180 /* Request a thread to read the events */
1181 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1182 irqflags, label, line);
1183 if (ret) {
1184 free_irq_label(label);
1185 return ret;
1186 }
1187
1188 line->irq = irq;
1189 return 0;
1190}
1191
1192static int edge_detector_update(struct line *line,
1193 struct gpio_v2_line_config *lc,
1194 unsigned int line_idx, u64 edflags)
1195{
1196 u64 eflags;
1197 int ret;
1198 u64 active_edflags = READ_ONCE(line->edflags);
1199 unsigned int debounce_period_us =
1200 gpio_v2_line_config_debounce_period(lc, line_idx);
1201
1202 if ((active_edflags == edflags) &&
1203 (READ_ONCE(line->debounce_period_us) == debounce_period_us))
1204 return 0;
1205
1206 /* sw debounced and still will be...*/
1207 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
1208 line_set_debounce_period(line, debounce_period_us);
1209 /*
1210 * ensure event fifo is initialised if edge detection
1211 * is now enabled.
1212 */
1213 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
1214 if (eflags && !kfifo_initialized(&line->req->events)) {
1215 ret = kfifo_alloc(&line->req->events,
1216 line->req->event_buffer_size,
1217 GFP_KERNEL);
1218 if (ret)
1219 return ret;
1220 }
1221 return 0;
1222 }
1223
1224 /* reconfiguring edge detection or sw debounce being disabled */
1225 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1226 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
1227 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
1228 edge_detector_stop(line);
1229
1230 return edge_detector_setup(line, lc, line_idx, edflags);
1231}
1232
1233static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1234 unsigned int line_idx)
1235{
1236 unsigned int i;
1237 u64 mask = BIT_ULL(line_idx);
1238
1239 for (i = 0; i < lc->num_attrs; i++) {
1240 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1241 (lc->attrs[i].mask & mask))
1242 return lc->attrs[i].attr.flags;
1243 }
1244 return lc->flags;
1245}
1246
1247static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1248 unsigned int line_idx)
1249{
1250 unsigned int i;
1251 u64 mask = BIT_ULL(line_idx);
1252
1253 for (i = 0; i < lc->num_attrs; i++) {
1254 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1255 (lc->attrs[i].mask & mask))
1256 return !!(lc->attrs[i].attr.values & mask);
1257 }
1258 return 0;
1259}
1260
1261static int gpio_v2_line_flags_validate(u64 flags)
1262{
1263 /* Return an error if an unknown flag is set */
1264 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1265 return -EINVAL;
1266
1267 if (!IS_ENABLED(CONFIG_HTE) &&
1268 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1269 return -EOPNOTSUPP;
1270
1271 /*
1272 * Do not allow both INPUT and OUTPUT flags to be set as they are
1273 * contradictory.
1274 */
1275 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1276 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1277 return -EINVAL;
1278
1279 /* Only allow one event clock source */
1280 if (IS_ENABLED(CONFIG_HTE) &&
1281 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
1282 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1283 return -EINVAL;
1284
1285 /* Edge detection requires explicit input. */
1286 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1287 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1288 return -EINVAL;
1289
1290 /*
1291 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1292 * request. If the hardware actually supports enabling both at the
1293 * same time the electrical result would be disastrous.
1294 */
1295 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1296 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1297 return -EINVAL;
1298
1299 /* Drive requires explicit output direction. */
1300 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1301 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1302 return -EINVAL;
1303
1304 /* Bias requires explicit direction. */
1305 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1306 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1307 return -EINVAL;
1308
1309 /* Only one bias flag can be set. */
1310 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1311 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1312 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1313 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1314 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1315 return -EINVAL;
1316
1317 return 0;
1318}
1319
1320static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1321 unsigned int num_lines)
1322{
1323 unsigned int i;
1324 u64 flags;
1325 int ret;
1326
1327 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1328 return -EINVAL;
1329
1330 if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
1331 return -EINVAL;
1332
1333 for (i = 0; i < num_lines; i++) {
1334 flags = gpio_v2_line_config_flags(lc, i);
1335 ret = gpio_v2_line_flags_validate(flags);
1336 if (ret)
1337 return ret;
1338
1339 /* debounce requires explicit input */
1340 if (gpio_v2_line_config_debounced(lc, i) &&
1341 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1342 return -EINVAL;
1343 }
1344 return 0;
1345}
1346
1347static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
1348 unsigned long *flagsp)
1349{
1350 assign_bit(FLAG_ACTIVE_LOW, flagsp,
1351 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1352
1353 if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
1354 set_bit(FLAG_IS_OUT, flagsp);
1355 else if (flags & GPIO_V2_LINE_FLAG_INPUT)
1356 clear_bit(FLAG_IS_OUT, flagsp);
1357
1358 assign_bit(FLAG_EDGE_RISING, flagsp,
1359 flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1360 assign_bit(FLAG_EDGE_FALLING, flagsp,
1361 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1362
1363 assign_bit(FLAG_OPEN_DRAIN, flagsp,
1364 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1365 assign_bit(FLAG_OPEN_SOURCE, flagsp,
1366 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1367
1368 assign_bit(FLAG_PULL_UP, flagsp,
1369 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1370 assign_bit(FLAG_PULL_DOWN, flagsp,
1371 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1372 assign_bit(FLAG_BIAS_DISABLE, flagsp,
1373 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
1374
1375 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
1376 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
1377 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
1378 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
1379}
1380
1381static long linereq_get_values(struct linereq *lr, void __user *ip)
1382{
1383 struct gpio_v2_line_values lv;
1384 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1385 struct gpio_desc **descs;
1386 unsigned int i, didx, num_get;
1387 bool val;
1388 int ret;
1389
1390 /* NOTE: It's ok to read values of output lines. */
1391 if (copy_from_user(&lv, ip, sizeof(lv)))
1392 return -EFAULT;
1393
1394 /*
1395 * gpiod_get_array_value_complex() requires compacted desc and val
1396 * arrays, rather than the sparse ones in lv.
1397 * Calculation of num_get and construction of the desc array is
1398 * optimized to avoid allocation for the desc array for the common
1399 * num_get == 1 case.
1400 */
1401 /* scan requested lines to calculate the subset to get */
1402 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1403 if (lv.mask & BIT_ULL(i)) {
1404 num_get++;
1405 /* capture desc for the num_get == 1 case */
1406 descs = &lr->lines[i].desc;
1407 }
1408 }
1409
1410 if (num_get == 0)
1411 return -EINVAL;
1412
1413 if (num_get != 1) {
1414 /* build compacted desc array */
1415 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1416 if (!descs)
1417 return -ENOMEM;
1418 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1419 if (lv.mask & BIT_ULL(i)) {
1420 descs[didx] = lr->lines[i].desc;
1421 didx++;
1422 }
1423 }
1424 }
1425 ret = gpiod_get_array_value_complex(false, true, num_get,
1426 descs, NULL, vals);
1427
1428 if (num_get != 1)
1429 kfree(descs);
1430 if (ret)
1431 return ret;
1432
1433 lv.bits = 0;
1434 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1435 /* unpack compacted vals for the response */
1436 if (lv.mask & BIT_ULL(i)) {
1437 if (lr->lines[i].sw_debounced)
1438 val = debounced_value(&lr->lines[i]);
1439 else
1440 val = test_bit(didx, vals);
1441 if (val)
1442 lv.bits |= BIT_ULL(i);
1443 didx++;
1444 }
1445 }
1446
1447 if (copy_to_user(ip, &lv, sizeof(lv)))
1448 return -EFAULT;
1449
1450 return 0;
1451}
1452
1453static long linereq_set_values(struct linereq *lr, void __user *ip)
1454{
1455 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1456 struct gpio_v2_line_values lv;
1457 struct gpio_desc **descs;
1458 unsigned int i, didx, num_set;
1459 int ret;
1460
1461 if (copy_from_user(&lv, ip, sizeof(lv)))
1462 return -EFAULT;
1463
1464 guard(mutex)(&lr->config_mutex);
1465
1466 /*
1467 * gpiod_set_array_value_complex() requires compacted desc and val
1468 * arrays, rather than the sparse ones in lv.
1469 * Calculation of num_set and construction of the descs and vals arrays
1470 * is optimized to minimize scanning the lv->mask, and to avoid
1471 * allocation for the desc array for the common num_set == 1 case.
1472 */
1473 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1474 /* scan requested lines to determine the subset to be set */
1475 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1476 if (lv.mask & BIT_ULL(i)) {
1477 /* setting inputs is not allowed */
1478 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1479 return -EPERM;
1480 /* add to compacted values */
1481 if (lv.bits & BIT_ULL(i))
1482 __set_bit(num_set, vals);
1483 num_set++;
1484 /* capture desc for the num_set == 1 case */
1485 descs = &lr->lines[i].desc;
1486 }
1487 }
1488 if (num_set == 0)
1489 return -EINVAL;
1490
1491 if (num_set != 1) {
1492 /* build compacted desc array */
1493 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1494 if (!descs)
1495 return -ENOMEM;
1496 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1497 if (lv.mask & BIT_ULL(i)) {
1498 descs[didx] = lr->lines[i].desc;
1499 didx++;
1500 }
1501 }
1502 }
1503 ret = gpiod_set_array_value_complex(false, true, num_set,
1504 descs, NULL, vals);
1505
1506 if (num_set != 1)
1507 kfree(descs);
1508 return ret;
1509}
1510
1511static long linereq_set_config(struct linereq *lr, void __user *ip)
1512{
1513 struct gpio_v2_line_config lc;
1514 struct gpio_desc *desc;
1515 struct line *line;
1516 unsigned int i;
1517 u64 flags, edflags;
1518 int ret;
1519
1520 if (copy_from_user(&lc, ip, sizeof(lc)))
1521 return -EFAULT;
1522
1523 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1524 if (ret)
1525 return ret;
1526
1527 guard(mutex)(&lr->config_mutex);
1528
1529 for (i = 0; i < lr->num_lines; i++) {
1530 line = &lr->lines[i];
1531 desc = lr->lines[i].desc;
1532 flags = gpio_v2_line_config_flags(&lc, i);
1533 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1534 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1535 /*
1536 * Lines have to be requested explicitly for input
1537 * or output, else the line will be treated "as is".
1538 */
1539 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1540 int val = gpio_v2_line_config_output_value(&lc, i);
1541
1542 edge_detector_stop(line);
1543 ret = gpiod_direction_output(desc, val);
1544 if (ret)
1545 return ret;
1546 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1547 ret = gpiod_direction_input(desc);
1548 if (ret)
1549 return ret;
1550
1551 ret = edge_detector_update(line, &lc, i, edflags);
1552 if (ret)
1553 return ret;
1554 }
1555
1556 WRITE_ONCE(line->edflags, edflags);
1557
1558 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
1559 }
1560 return 0;
1561}
1562
1563static long linereq_ioctl(struct file *file, unsigned int cmd,
1564 unsigned long arg)
1565{
1566 struct linereq *lr = file->private_data;
1567 void __user *ip = (void __user *)arg;
1568
1569 guard(srcu)(&lr->gdev->srcu);
1570
1571 if (!rcu_access_pointer(lr->gdev->chip))
1572 return -ENODEV;
1573
1574 switch (cmd) {
1575 case GPIO_V2_LINE_GET_VALUES_IOCTL:
1576 return linereq_get_values(lr, ip);
1577 case GPIO_V2_LINE_SET_VALUES_IOCTL:
1578 return linereq_set_values(lr, ip);
1579 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
1580 return linereq_set_config(lr, ip);
1581 default:
1582 return -EINVAL;
1583 }
1584}
1585
1586#ifdef CONFIG_COMPAT
1587static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1588 unsigned long arg)
1589{
1590 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1591}
1592#endif
1593
1594static __poll_t linereq_poll(struct file *file,
1595 struct poll_table_struct *wait)
1596{
1597 struct linereq *lr = file->private_data;
1598 __poll_t events = 0;
1599
1600 guard(srcu)(&lr->gdev->srcu);
1601
1602 if (!rcu_access_pointer(lr->gdev->chip))
1603 return EPOLLHUP | EPOLLERR;
1604
1605 poll_wait(file, &lr->wait, wait);
1606
1607 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1608 &lr->wait.lock))
1609 events = EPOLLIN | EPOLLRDNORM;
1610
1611 return events;
1612}
1613
1614static ssize_t linereq_read(struct file *file, char __user *buf,
1615 size_t count, loff_t *f_ps)
1616{
1617 struct linereq *lr = file->private_data;
1618 struct gpio_v2_line_event le;
1619 ssize_t bytes_read = 0;
1620 int ret;
1621
1622 guard(srcu)(&lr->gdev->srcu);
1623
1624 if (!rcu_access_pointer(lr->gdev->chip))
1625 return -ENODEV;
1626
1627 if (count < sizeof(le))
1628 return -EINVAL;
1629
1630 do {
1631 scoped_guard(spinlock, &lr->wait.lock) {
1632 if (kfifo_is_empty(&lr->events)) {
1633 if (bytes_read)
1634 return bytes_read;
1635
1636 if (file->f_flags & O_NONBLOCK)
1637 return -EAGAIN;
1638
1639 ret = wait_event_interruptible_locked(lr->wait,
1640 !kfifo_is_empty(&lr->events));
1641 if (ret)
1642 return ret;
1643 }
1644
1645 ret = kfifo_out(&lr->events, &le, 1);
1646 }
1647 if (ret != 1) {
1648 /*
1649 * This should never happen - we were holding the
1650 * lock from the moment we learned the fifo is no
1651 * longer empty until now.
1652 */
1653 ret = -EIO;
1654 break;
1655 }
1656
1657 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1658 return -EFAULT;
1659 bytes_read += sizeof(le);
1660 } while (count >= bytes_read + sizeof(le));
1661
1662 return bytes_read;
1663}
1664
1665static void linereq_free(struct linereq *lr)
1666{
1667 struct line *line;
1668 unsigned int i;
1669
1670 if (lr->device_unregistered_nb.notifier_call)
1671 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1672 &lr->device_unregistered_nb);
1673
1674 for (i = 0; i < lr->num_lines; i++) {
1675 line = &lr->lines[i];
1676 if (!line->desc)
1677 continue;
1678
1679 edge_detector_stop(line);
1680 if (line_has_supinfo(line))
1681 supinfo_erase(line);
1682 gpiod_free(line->desc);
1683 }
1684 kfifo_free(&lr->events);
1685 kfree(lr->label);
1686 gpio_device_put(lr->gdev);
1687 kvfree(lr);
1688}
1689
1690static int linereq_release(struct inode *inode, struct file *file)
1691{
1692 struct linereq *lr = file->private_data;
1693
1694 linereq_free(lr);
1695 return 0;
1696}
1697
1698#ifdef CONFIG_PROC_FS
1699static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1700{
1701 struct linereq *lr = file->private_data;
1702 struct device *dev = &lr->gdev->dev;
1703 u16 i;
1704
1705 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1706
1707 for (i = 0; i < lr->num_lines; i++)
1708 seq_printf(out, "gpio-line:\t%d\n",
1709 gpio_chip_hwgpio(lr->lines[i].desc));
1710}
1711#endif
1712
1713static const struct file_operations line_fileops = {
1714 .release = linereq_release,
1715 .read = linereq_read,
1716 .poll = linereq_poll,
1717 .owner = THIS_MODULE,
1718 .llseek = noop_llseek,
1719 .unlocked_ioctl = linereq_ioctl,
1720#ifdef CONFIG_COMPAT
1721 .compat_ioctl = linereq_ioctl_compat,
1722#endif
1723#ifdef CONFIG_PROC_FS
1724 .show_fdinfo = linereq_show_fdinfo,
1725#endif
1726};
1727
1728static int linereq_create(struct gpio_device *gdev, void __user *ip)
1729{
1730 struct gpio_v2_line_request ulr;
1731 struct gpio_v2_line_config *lc;
1732 struct linereq *lr;
1733 struct file *file;
1734 u64 flags, edflags;
1735 unsigned int i;
1736 int fd, ret;
1737
1738 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1739 return -EFAULT;
1740
1741 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1742 return -EINVAL;
1743
1744 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
1745 return -EINVAL;
1746
1747 lc = &ulr.config;
1748 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1749 if (ret)
1750 return ret;
1751
1752 lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1753 if (!lr)
1754 return -ENOMEM;
1755 lr->num_lines = ulr.num_lines;
1756
1757 lr->gdev = gpio_device_get(gdev);
1758
1759 for (i = 0; i < ulr.num_lines; i++) {
1760 lr->lines[i].req = lr;
1761 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1762 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1763 }
1764
1765 if (ulr.consumer[0] != '\0') {
1766 /* label is only initialized if consumer is set */
1767 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1768 GFP_KERNEL);
1769 if (!lr->label) {
1770 ret = -ENOMEM;
1771 goto out_free_linereq;
1772 }
1773 }
1774
1775 mutex_init(&lr->config_mutex);
1776 init_waitqueue_head(&lr->wait);
1777 lr->event_buffer_size = ulr.event_buffer_size;
1778 if (lr->event_buffer_size == 0)
1779 lr->event_buffer_size = ulr.num_lines * 16;
1780 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1781 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1782
1783 atomic_set(&lr->seqno, 0);
1784
1785 /* Request each GPIO */
1786 for (i = 0; i < ulr.num_lines; i++) {
1787 u32 offset = ulr.offsets[i];
1788 struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
1789
1790 if (IS_ERR(desc)) {
1791 ret = PTR_ERR(desc);
1792 goto out_free_linereq;
1793 }
1794
1795 ret = gpiod_request_user(desc, lr->label);
1796 if (ret)
1797 goto out_free_linereq;
1798
1799 lr->lines[i].desc = desc;
1800 flags = gpio_v2_line_config_flags(lc, i);
1801 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1802
1803 ret = gpiod_set_transitory(desc, false);
1804 if (ret < 0)
1805 goto out_free_linereq;
1806
1807 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
1808 /*
1809 * Lines have to be requested explicitly for input
1810 * or output, else the line will be treated "as is".
1811 */
1812 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1813 int val = gpio_v2_line_config_output_value(lc, i);
1814
1815 ret = gpiod_direction_output(desc, val);
1816 if (ret)
1817 goto out_free_linereq;
1818 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1819 ret = gpiod_direction_input(desc);
1820 if (ret)
1821 goto out_free_linereq;
1822
1823 ret = edge_detector_setup(&lr->lines[i], lc, i,
1824 edflags);
1825 if (ret)
1826 goto out_free_linereq;
1827 }
1828
1829 lr->lines[i].edflags = edflags;
1830
1831 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
1832
1833 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1834 offset);
1835 }
1836
1837 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1838 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1839 &lr->device_unregistered_nb);
1840 if (ret)
1841 goto out_free_linereq;
1842
1843 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1844 if (fd < 0) {
1845 ret = fd;
1846 goto out_free_linereq;
1847 }
1848
1849 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1850 O_RDONLY | O_CLOEXEC);
1851 if (IS_ERR(file)) {
1852 ret = PTR_ERR(file);
1853 goto out_put_unused_fd;
1854 }
1855
1856 ulr.fd = fd;
1857 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1858 /*
1859 * fput() will trigger the release() callback, so do not go onto
1860 * the regular error cleanup path here.
1861 */
1862 fput(file);
1863 put_unused_fd(fd);
1864 return -EFAULT;
1865 }
1866
1867 fd_install(fd, file);
1868
1869 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1870 lr->num_lines);
1871
1872 return 0;
1873
1874out_put_unused_fd:
1875 put_unused_fd(fd);
1876out_free_linereq:
1877 linereq_free(lr);
1878 return ret;
1879}
1880
1881#ifdef CONFIG_GPIO_CDEV_V1
1882
1883/*
1884 * GPIO line event management
1885 */
1886
1887/**
1888 * struct lineevent_state - contains the state of a userspace event
1889 * @gdev: the GPIO device the event pertains to
1890 * @label: consumer label used to tag descriptors
1891 * @desc: the GPIO descriptor held by this event
1892 * @eflags: the event flags this line was requested with
1893 * @irq: the interrupt that trigger in response to events on this GPIO
1894 * @wait: wait queue that handles blocking reads of events
1895 * @device_unregistered_nb: notifier block for receiving gdev unregister events
1896 * @events: KFIFO for the GPIO events
1897 * @timestamp: cache for the timestamp storing it between hardirq
1898 * and IRQ thread, used to bring the timestamp close to the actual
1899 * event
1900 */
1901struct lineevent_state {
1902 struct gpio_device *gdev;
1903 const char *label;
1904 struct gpio_desc *desc;
1905 u32 eflags;
1906 int irq;
1907 wait_queue_head_t wait;
1908 struct notifier_block device_unregistered_nb;
1909 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1910 u64 timestamp;
1911};
1912
1913#define GPIOEVENT_REQUEST_VALID_FLAGS \
1914 (GPIOEVENT_REQUEST_RISING_EDGE | \
1915 GPIOEVENT_REQUEST_FALLING_EDGE)
1916
1917static __poll_t lineevent_poll(struct file *file,
1918 struct poll_table_struct *wait)
1919{
1920 struct lineevent_state *le = file->private_data;
1921 __poll_t events = 0;
1922
1923 guard(srcu)(&le->gdev->srcu);
1924
1925 if (!rcu_access_pointer(le->gdev->chip))
1926 return EPOLLHUP | EPOLLERR;
1927
1928 poll_wait(file, &le->wait, wait);
1929
1930 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1931 events = EPOLLIN | EPOLLRDNORM;
1932
1933 return events;
1934}
1935
1936static int lineevent_unregistered_notify(struct notifier_block *nb,
1937 unsigned long action, void *data)
1938{
1939 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1940 device_unregistered_nb);
1941
1942 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1943
1944 return NOTIFY_OK;
1945}
1946
1947struct compat_gpioeevent_data {
1948 compat_u64 timestamp;
1949 u32 id;
1950};
1951
1952static ssize_t lineevent_read(struct file *file, char __user *buf,
1953 size_t count, loff_t *f_ps)
1954{
1955 struct lineevent_state *le = file->private_data;
1956 struct gpioevent_data ge;
1957 ssize_t bytes_read = 0;
1958 ssize_t ge_size;
1959 int ret;
1960
1961 guard(srcu)(&le->gdev->srcu);
1962
1963 if (!rcu_access_pointer(le->gdev->chip))
1964 return -ENODEV;
1965
1966 /*
1967 * When compatible system call is being used the struct gpioevent_data,
1968 * in case of at least ia32, has different size due to the alignment
1969 * differences. Because we have first member 64 bits followed by one of
1970 * 32 bits there is no gap between them. The only difference is the
1971 * padding at the end of the data structure. Hence, we calculate the
1972 * actual sizeof() and pass this as an argument to copy_to_user() to
1973 * drop unneeded bytes from the output.
1974 */
1975 if (compat_need_64bit_alignment_fixup())
1976 ge_size = sizeof(struct compat_gpioeevent_data);
1977 else
1978 ge_size = sizeof(struct gpioevent_data);
1979 if (count < ge_size)
1980 return -EINVAL;
1981
1982 do {
1983 scoped_guard(spinlock, &le->wait.lock) {
1984 if (kfifo_is_empty(&le->events)) {
1985 if (bytes_read)
1986 return bytes_read;
1987
1988 if (file->f_flags & O_NONBLOCK)
1989 return -EAGAIN;
1990
1991 ret = wait_event_interruptible_locked(le->wait,
1992 !kfifo_is_empty(&le->events));
1993 if (ret)
1994 return ret;
1995 }
1996
1997 ret = kfifo_out(&le->events, &ge, 1);
1998 }
1999 if (ret != 1) {
2000 /*
2001 * This should never happen - we were holding the lock
2002 * from the moment we learned the fifo is no longer
2003 * empty until now.
2004 */
2005 ret = -EIO;
2006 break;
2007 }
2008
2009 if (copy_to_user(buf + bytes_read, &ge, ge_size))
2010 return -EFAULT;
2011 bytes_read += ge_size;
2012 } while (count >= bytes_read + ge_size);
2013
2014 return bytes_read;
2015}
2016
2017static void lineevent_free(struct lineevent_state *le)
2018{
2019 if (le->device_unregistered_nb.notifier_call)
2020 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
2021 &le->device_unregistered_nb);
2022 if (le->irq)
2023 free_irq_label(free_irq(le->irq, le));
2024 if (le->desc)
2025 gpiod_free(le->desc);
2026 kfree(le->label);
2027 gpio_device_put(le->gdev);
2028 kfree(le);
2029}
2030
2031static int lineevent_release(struct inode *inode, struct file *file)
2032{
2033 lineevent_free(file->private_data);
2034 return 0;
2035}
2036
2037static long lineevent_ioctl(struct file *file, unsigned int cmd,
2038 unsigned long arg)
2039{
2040 struct lineevent_state *le = file->private_data;
2041 void __user *ip = (void __user *)arg;
2042 struct gpiohandle_data ghd;
2043
2044 guard(srcu)(&le->gdev->srcu);
2045
2046 if (!rcu_access_pointer(le->gdev->chip))
2047 return -ENODEV;
2048
2049 /*
2050 * We can get the value for an event line but not set it,
2051 * because it is input by definition.
2052 */
2053 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
2054 int val;
2055
2056 memset(&ghd, 0, sizeof(ghd));
2057
2058 val = gpiod_get_value_cansleep(le->desc);
2059 if (val < 0)
2060 return val;
2061 ghd.values[0] = val;
2062
2063 if (copy_to_user(ip, &ghd, sizeof(ghd)))
2064 return -EFAULT;
2065
2066 return 0;
2067 }
2068 return -EINVAL;
2069}
2070
2071#ifdef CONFIG_COMPAT
2072static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
2073 unsigned long arg)
2074{
2075 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2076}
2077#endif
2078
2079static const struct file_operations lineevent_fileops = {
2080 .release = lineevent_release,
2081 .read = lineevent_read,
2082 .poll = lineevent_poll,
2083 .owner = THIS_MODULE,
2084 .llseek = noop_llseek,
2085 .unlocked_ioctl = lineevent_ioctl,
2086#ifdef CONFIG_COMPAT
2087 .compat_ioctl = lineevent_ioctl_compat,
2088#endif
2089};
2090
2091static irqreturn_t lineevent_irq_thread(int irq, void *p)
2092{
2093 struct lineevent_state *le = p;
2094 struct gpioevent_data ge;
2095 int ret;
2096
2097 /* Do not leak kernel stack to userspace */
2098 memset(&ge, 0, sizeof(ge));
2099
2100 /*
2101 * We may be running from a nested threaded interrupt in which case
2102 * we didn't get the timestamp from lineevent_irq_handler().
2103 */
2104 if (!le->timestamp)
2105 ge.timestamp = ktime_get_ns();
2106 else
2107 ge.timestamp = le->timestamp;
2108
2109 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
2110 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2111 int level = gpiod_get_value_cansleep(le->desc);
2112
2113 if (level)
2114 /* Emit low-to-high event */
2115 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2116 else
2117 /* Emit high-to-low event */
2118 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2119 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2120 /* Emit low-to-high event */
2121 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2122 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2123 /* Emit high-to-low event */
2124 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2125 } else {
2126 return IRQ_NONE;
2127 }
2128
2129 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2130 1, &le->wait.lock);
2131 if (ret)
2132 wake_up_poll(&le->wait, EPOLLIN);
2133 else
2134 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2135
2136 return IRQ_HANDLED;
2137}
2138
2139static irqreturn_t lineevent_irq_handler(int irq, void *p)
2140{
2141 struct lineevent_state *le = p;
2142
2143 /*
2144 * Just store the timestamp in hardirq context so we get it as
2145 * close in time as possible to the actual event.
2146 */
2147 le->timestamp = ktime_get_ns();
2148
2149 return IRQ_WAKE_THREAD;
2150}
2151
2152static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2153{
2154 struct gpioevent_request eventreq;
2155 struct lineevent_state *le;
2156 struct gpio_desc *desc;
2157 struct file *file;
2158 u32 offset;
2159 u32 lflags;
2160 u32 eflags;
2161 int fd;
2162 int ret;
2163 int irq, irqflags = 0;
2164 char *label;
2165
2166 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2167 return -EFAULT;
2168
2169 offset = eventreq.lineoffset;
2170 lflags = eventreq.handleflags;
2171 eflags = eventreq.eventflags;
2172
2173 desc = gpio_device_get_desc(gdev, offset);
2174 if (IS_ERR(desc))
2175 return PTR_ERR(desc);
2176
2177 /* Return an error if a unknown flag is set */
2178 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2179 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2180 return -EINVAL;
2181
2182 /* This is just wrong: we don't look for events on output lines */
2183 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2184 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2185 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2186 return -EINVAL;
2187
2188 /* Only one bias flag can be set. */
2189 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2190 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2191 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2192 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2193 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2194 return -EINVAL;
2195
2196 le = kzalloc(sizeof(*le), GFP_KERNEL);
2197 if (!le)
2198 return -ENOMEM;
2199 le->gdev = gpio_device_get(gdev);
2200
2201 if (eventreq.consumer_label[0] != '\0') {
2202 /* label is only initialized if consumer_label is set */
2203 le->label = kstrndup(eventreq.consumer_label,
2204 sizeof(eventreq.consumer_label) - 1,
2205 GFP_KERNEL);
2206 if (!le->label) {
2207 ret = -ENOMEM;
2208 goto out_free_le;
2209 }
2210 }
2211
2212 ret = gpiod_request_user(desc, le->label);
2213 if (ret)
2214 goto out_free_le;
2215 le->desc = desc;
2216 le->eflags = eflags;
2217
2218 linehandle_flags_to_desc_flags(lflags, &desc->flags);
2219
2220 ret = gpiod_direction_input(desc);
2221 if (ret)
2222 goto out_free_le;
2223
2224 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
2225
2226 irq = gpiod_to_irq(desc);
2227 if (irq <= 0) {
2228 ret = -ENODEV;
2229 goto out_free_le;
2230 }
2231
2232 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2233 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2234 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2235 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2236 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2237 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2238 irqflags |= IRQF_ONESHOT;
2239
2240 INIT_KFIFO(le->events);
2241 init_waitqueue_head(&le->wait);
2242
2243 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2244 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2245 &le->device_unregistered_nb);
2246 if (ret)
2247 goto out_free_le;
2248
2249 label = make_irq_label(le->label);
2250 if (IS_ERR(label)) {
2251 ret = PTR_ERR(label);
2252 goto out_free_le;
2253 }
2254
2255 /* Request a thread to read the events */
2256 ret = request_threaded_irq(irq,
2257 lineevent_irq_handler,
2258 lineevent_irq_thread,
2259 irqflags,
2260 label,
2261 le);
2262 if (ret) {
2263 free_irq_label(label);
2264 goto out_free_le;
2265 }
2266
2267 le->irq = irq;
2268
2269 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2270 if (fd < 0) {
2271 ret = fd;
2272 goto out_free_le;
2273 }
2274
2275 file = anon_inode_getfile("gpio-event",
2276 &lineevent_fileops,
2277 le,
2278 O_RDONLY | O_CLOEXEC);
2279 if (IS_ERR(file)) {
2280 ret = PTR_ERR(file);
2281 goto out_put_unused_fd;
2282 }
2283
2284 eventreq.fd = fd;
2285 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2286 /*
2287 * fput() will trigger the release() callback, so do not go onto
2288 * the regular error cleanup path here.
2289 */
2290 fput(file);
2291 put_unused_fd(fd);
2292 return -EFAULT;
2293 }
2294
2295 fd_install(fd, file);
2296
2297 return 0;
2298
2299out_put_unused_fd:
2300 put_unused_fd(fd);
2301out_free_le:
2302 lineevent_free(le);
2303 return ret;
2304}
2305
2306static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2307 struct gpioline_info *info_v1)
2308{
2309 u64 flagsv2 = info_v2->flags;
2310
2311 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2312 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2313 info_v1->line_offset = info_v2->offset;
2314 info_v1->flags = 0;
2315
2316 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2317 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2318
2319 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2320 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2321
2322 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2323 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2324
2325 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2326 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2327 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2328 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2329
2330 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2331 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2332 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2333 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2334 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2335 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2336}
2337
2338static void gpio_v2_line_info_changed_to_v1(
2339 struct gpio_v2_line_info_changed *lic_v2,
2340 struct gpioline_info_changed *lic_v1)
2341{
2342 memset(lic_v1, 0, sizeof(*lic_v1));
2343 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2344 lic_v1->timestamp = lic_v2->timestamp_ns;
2345 lic_v1->event_type = lic_v2->event_type;
2346}
2347
2348#endif /* CONFIG_GPIO_CDEV_V1 */
2349
2350static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
2351 struct gpio_v2_line_info *info)
2352{
2353 unsigned long dflags;
2354 const char *label;
2355
2356 CLASS(gpio_chip_guard, guard)(desc);
2357 if (!guard.gc)
2358 return;
2359
2360 memset(info, 0, sizeof(*info));
2361 info->offset = gpio_chip_hwgpio(desc);
2362
2363 if (desc->name)
2364 strscpy(info->name, desc->name, sizeof(info->name));
2365
2366 dflags = READ_ONCE(desc->flags);
2367
2368 scoped_guard(srcu, &desc->gdev->desc_srcu) {
2369 label = gpiod_get_label(desc);
2370 if (label && test_bit(FLAG_REQUESTED, &dflags))
2371 strscpy(info->consumer, label,
2372 sizeof(info->consumer));
2373 }
2374
2375 /*
2376 * Userspace only need know that the kernel is using this GPIO so it
2377 * can't use it.
2378 * The calculation of the used flag is slightly racy, as it may read
2379 * desc, gc and pinctrl state without a lock covering all three at
2380 * once. Worst case if the line is in transition and the calculation
2381 * is inconsistent then it looks to the user like they performed the
2382 * read on the other side of the transition - but that can always
2383 * happen.
2384 * The definitive test that a line is available to userspace is to
2385 * request it.
2386 */
2387 if (test_bit(FLAG_REQUESTED, &dflags) ||
2388 test_bit(FLAG_IS_HOGGED, &dflags) ||
2389 test_bit(FLAG_USED_AS_IRQ, &dflags) ||
2390 test_bit(FLAG_EXPORT, &dflags) ||
2391 test_bit(FLAG_SYSFS, &dflags) ||
2392 !gpiochip_line_is_valid(guard.gc, info->offset) ||
2393 !pinctrl_gpio_can_use_line(guard.gc, info->offset))
2394 info->flags |= GPIO_V2_LINE_FLAG_USED;
2395
2396 if (test_bit(FLAG_IS_OUT, &dflags))
2397 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2398 else
2399 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2400
2401 if (test_bit(FLAG_ACTIVE_LOW, &dflags))
2402 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2403
2404 if (test_bit(FLAG_OPEN_DRAIN, &dflags))
2405 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
2406 if (test_bit(FLAG_OPEN_SOURCE, &dflags))
2407 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2408
2409 if (test_bit(FLAG_BIAS_DISABLE, &dflags))
2410 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
2411 if (test_bit(FLAG_PULL_DOWN, &dflags))
2412 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
2413 if (test_bit(FLAG_PULL_UP, &dflags))
2414 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
2415
2416 if (test_bit(FLAG_EDGE_RISING, &dflags))
2417 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2418 if (test_bit(FLAG_EDGE_FALLING, &dflags))
2419 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2420
2421 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
2422 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
2423 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
2424 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
2425}
2426
2427struct gpio_chardev_data {
2428 struct gpio_device *gdev;
2429 wait_queue_head_t wait;
2430 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
2431 struct notifier_block lineinfo_changed_nb;
2432 struct notifier_block device_unregistered_nb;
2433 unsigned long *watched_lines;
2434#ifdef CONFIG_GPIO_CDEV_V1
2435 atomic_t watch_abi_version;
2436#endif
2437};
2438
2439static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2440{
2441 struct gpio_device *gdev = cdev->gdev;
2442 struct gpiochip_info chipinfo;
2443
2444 memset(&chipinfo, 0, sizeof(chipinfo));
2445
2446 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2447 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2448 chipinfo.lines = gdev->ngpio;
2449 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2450 return -EFAULT;
2451 return 0;
2452}
2453
2454#ifdef CONFIG_GPIO_CDEV_V1
2455/*
2456 * returns 0 if the versions match, else the previously selected ABI version
2457 */
2458static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2459 unsigned int version)
2460{
2461 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2462
2463 if (abiv == version)
2464 return 0;
2465
2466 return abiv;
2467}
2468
2469static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2470 bool watch)
2471{
2472 struct gpio_desc *desc;
2473 struct gpioline_info lineinfo;
2474 struct gpio_v2_line_info lineinfo_v2;
2475
2476 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2477 return -EFAULT;
2478
2479 /* this doubles as a range check on line_offset */
2480 desc = gpio_device_get_desc(cdev->gdev, lineinfo.line_offset);
2481 if (IS_ERR(desc))
2482 return PTR_ERR(desc);
2483
2484 if (watch) {
2485 if (lineinfo_ensure_abi_version(cdev, 1))
2486 return -EPERM;
2487
2488 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2489 return -EBUSY;
2490 }
2491
2492 gpio_desc_to_lineinfo(desc, &lineinfo_v2);
2493 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2494
2495 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2496 if (watch)
2497 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2498 return -EFAULT;
2499 }
2500
2501 return 0;
2502}
2503#endif
2504
2505static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2506 bool watch)
2507{
2508 struct gpio_desc *desc;
2509 struct gpio_v2_line_info lineinfo;
2510
2511 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2512 return -EFAULT;
2513
2514 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
2515 return -EINVAL;
2516
2517 desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
2518 if (IS_ERR(desc))
2519 return PTR_ERR(desc);
2520
2521 if (watch) {
2522#ifdef CONFIG_GPIO_CDEV_V1
2523 if (lineinfo_ensure_abi_version(cdev, 2))
2524 return -EPERM;
2525#endif
2526 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2527 return -EBUSY;
2528 }
2529 gpio_desc_to_lineinfo(desc, &lineinfo);
2530 supinfo_to_lineinfo(desc, &lineinfo);
2531
2532 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2533 if (watch)
2534 clear_bit(lineinfo.offset, cdev->watched_lines);
2535 return -EFAULT;
2536 }
2537
2538 return 0;
2539}
2540
2541static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2542{
2543 __u32 offset;
2544
2545 if (copy_from_user(&offset, ip, sizeof(offset)))
2546 return -EFAULT;
2547
2548 if (offset >= cdev->gdev->ngpio)
2549 return -EINVAL;
2550
2551 if (!test_and_clear_bit(offset, cdev->watched_lines))
2552 return -EBUSY;
2553
2554 return 0;
2555}
2556
2557/*
2558 * gpio_ioctl() - ioctl handler for the GPIO chardev
2559 */
2560static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2561{
2562 struct gpio_chardev_data *cdev = file->private_data;
2563 struct gpio_device *gdev = cdev->gdev;
2564 void __user *ip = (void __user *)arg;
2565
2566 guard(srcu)(&gdev->srcu);
2567
2568 /* We fail any subsequent ioctl():s when the chip is gone */
2569 if (!rcu_access_pointer(gdev->chip))
2570 return -ENODEV;
2571
2572 /* Fill in the struct and pass to userspace */
2573 switch (cmd) {
2574 case GPIO_GET_CHIPINFO_IOCTL:
2575 return chipinfo_get(cdev, ip);
2576#ifdef CONFIG_GPIO_CDEV_V1
2577 case GPIO_GET_LINEHANDLE_IOCTL:
2578 return linehandle_create(gdev, ip);
2579 case GPIO_GET_LINEEVENT_IOCTL:
2580 return lineevent_create(gdev, ip);
2581 case GPIO_GET_LINEINFO_IOCTL:
2582 return lineinfo_get_v1(cdev, ip, false);
2583 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2584 return lineinfo_get_v1(cdev, ip, true);
2585#endif /* CONFIG_GPIO_CDEV_V1 */
2586 case GPIO_V2_GET_LINEINFO_IOCTL:
2587 return lineinfo_get(cdev, ip, false);
2588 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2589 return lineinfo_get(cdev, ip, true);
2590 case GPIO_V2_GET_LINE_IOCTL:
2591 return linereq_create(gdev, ip);
2592 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
2593 return lineinfo_unwatch(cdev, ip);
2594 default:
2595 return -EINVAL;
2596 }
2597}
2598
2599#ifdef CONFIG_COMPAT
2600static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
2601 unsigned long arg)
2602{
2603 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
2604}
2605#endif
2606
2607static int lineinfo_changed_notify(struct notifier_block *nb,
2608 unsigned long action, void *data)
2609{
2610 struct gpio_chardev_data *cdev =
2611 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
2612 struct gpio_v2_line_info_changed chg;
2613 struct gpio_desc *desc = data;
2614 int ret;
2615
2616 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
2617 return NOTIFY_DONE;
2618
2619 memset(&chg, 0, sizeof(chg));
2620 chg.event_type = action;
2621 chg.timestamp_ns = ktime_get_ns();
2622 gpio_desc_to_lineinfo(desc, &chg.info);
2623 supinfo_to_lineinfo(desc, &chg.info);
2624
2625 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
2626 if (ret)
2627 wake_up_poll(&cdev->wait, EPOLLIN);
2628 else
2629 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2630
2631 return NOTIFY_OK;
2632}
2633
2634static int gpio_device_unregistered_notify(struct notifier_block *nb,
2635 unsigned long action, void *data)
2636{
2637 struct gpio_chardev_data *cdev = container_of(nb,
2638 struct gpio_chardev_data,
2639 device_unregistered_nb);
2640
2641 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2642
2643 return NOTIFY_OK;
2644}
2645
2646static __poll_t lineinfo_watch_poll(struct file *file,
2647 struct poll_table_struct *pollt)
2648{
2649 struct gpio_chardev_data *cdev = file->private_data;
2650 __poll_t events = 0;
2651
2652 guard(srcu)(&cdev->gdev->srcu);
2653
2654 if (!rcu_access_pointer(cdev->gdev->chip))
2655 return EPOLLHUP | EPOLLERR;
2656
2657 poll_wait(file, &cdev->wait, pollt);
2658
2659 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2660 &cdev->wait.lock))
2661 events = EPOLLIN | EPOLLRDNORM;
2662
2663 return events;
2664}
2665
2666static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2667 size_t count, loff_t *off)
2668{
2669 struct gpio_chardev_data *cdev = file->private_data;
2670 struct gpio_v2_line_info_changed event;
2671 ssize_t bytes_read = 0;
2672 int ret;
2673 size_t event_size;
2674
2675 guard(srcu)(&cdev->gdev->srcu);
2676
2677 if (!rcu_access_pointer(cdev->gdev->chip))
2678 return -ENODEV;
2679
2680#ifndef CONFIG_GPIO_CDEV_V1
2681 event_size = sizeof(struct gpio_v2_line_info_changed);
2682 if (count < event_size)
2683 return -EINVAL;
2684#endif
2685
2686 do {
2687 scoped_guard(spinlock, &cdev->wait.lock) {
2688 if (kfifo_is_empty(&cdev->events)) {
2689 if (bytes_read)
2690 return bytes_read;
2691
2692 if (file->f_flags & O_NONBLOCK)
2693 return -EAGAIN;
2694
2695 ret = wait_event_interruptible_locked(cdev->wait,
2696 !kfifo_is_empty(&cdev->events));
2697 if (ret)
2698 return ret;
2699 }
2700#ifdef CONFIG_GPIO_CDEV_V1
2701 /* must be after kfifo check so watch_abi_version is set */
2702 if (atomic_read(&cdev->watch_abi_version) == 2)
2703 event_size = sizeof(struct gpio_v2_line_info_changed);
2704 else
2705 event_size = sizeof(struct gpioline_info_changed);
2706 if (count < event_size)
2707 return -EINVAL;
2708#endif
2709 ret = kfifo_out(&cdev->events, &event, 1);
2710 }
2711 if (ret != 1) {
2712 ret = -EIO;
2713 break;
2714 /* We should never get here. See lineevent_read(). */
2715 }
2716
2717#ifdef CONFIG_GPIO_CDEV_V1
2718 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2719 if (copy_to_user(buf + bytes_read, &event, event_size))
2720 return -EFAULT;
2721 } else {
2722 struct gpioline_info_changed event_v1;
2723
2724 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2725 if (copy_to_user(buf + bytes_read, &event_v1,
2726 event_size))
2727 return -EFAULT;
2728 }
2729#else
2730 if (copy_to_user(buf + bytes_read, &event, event_size))
2731 return -EFAULT;
2732#endif
2733 bytes_read += event_size;
2734 } while (count >= bytes_read + sizeof(event));
2735
2736 return bytes_read;
2737}
2738
2739/**
2740 * gpio_chrdev_open() - open the chardev for ioctl operations
2741 * @inode: inode for this chardev
2742 * @file: file struct for storing private data
2743 * Returns 0 on success
2744 */
2745static int gpio_chrdev_open(struct inode *inode, struct file *file)
2746{
2747 struct gpio_device *gdev = container_of(inode->i_cdev,
2748 struct gpio_device, chrdev);
2749 struct gpio_chardev_data *cdev;
2750 int ret = -ENOMEM;
2751
2752 guard(srcu)(&gdev->srcu);
2753
2754 /* Fail on open if the backing gpiochip is gone */
2755 if (!rcu_access_pointer(gdev->chip))
2756 return -ENODEV;
2757
2758 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2759 if (!cdev)
2760 return -ENODEV;
2761
2762 cdev->watched_lines = bitmap_zalloc(gdev->ngpio, GFP_KERNEL);
2763 if (!cdev->watched_lines)
2764 goto out_free_cdev;
2765
2766 init_waitqueue_head(&cdev->wait);
2767 INIT_KFIFO(cdev->events);
2768 cdev->gdev = gpio_device_get(gdev);
2769
2770 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
2771 ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
2772 &cdev->lineinfo_changed_nb);
2773 if (ret)
2774 goto out_free_bitmap;
2775
2776 cdev->device_unregistered_nb.notifier_call =
2777 gpio_device_unregistered_notify;
2778 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2779 &cdev->device_unregistered_nb);
2780 if (ret)
2781 goto out_unregister_line_notifier;
2782
2783 file->private_data = cdev;
2784
2785 ret = nonseekable_open(inode, file);
2786 if (ret)
2787 goto out_unregister_device_notifier;
2788
2789 return ret;
2790
2791out_unregister_device_notifier:
2792 blocking_notifier_chain_unregister(&gdev->device_notifier,
2793 &cdev->device_unregistered_nb);
2794out_unregister_line_notifier:
2795 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2796 &cdev->lineinfo_changed_nb);
2797out_free_bitmap:
2798 gpio_device_put(gdev);
2799 bitmap_free(cdev->watched_lines);
2800out_free_cdev:
2801 kfree(cdev);
2802 return ret;
2803}
2804
2805/**
2806 * gpio_chrdev_release() - close chardev after ioctl operations
2807 * @inode: inode for this chardev
2808 * @file: file struct for storing private data
2809 * Returns 0 on success
2810 */
2811static int gpio_chrdev_release(struct inode *inode, struct file *file)
2812{
2813 struct gpio_chardev_data *cdev = file->private_data;
2814 struct gpio_device *gdev = cdev->gdev;
2815
2816 blocking_notifier_chain_unregister(&gdev->device_notifier,
2817 &cdev->device_unregistered_nb);
2818 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
2819 &cdev->lineinfo_changed_nb);
2820 bitmap_free(cdev->watched_lines);
2821 gpio_device_put(gdev);
2822 kfree(cdev);
2823
2824 return 0;
2825}
2826
2827static const struct file_operations gpio_fileops = {
2828 .release = gpio_chrdev_release,
2829 .open = gpio_chrdev_open,
2830 .poll = lineinfo_watch_poll,
2831 .read = lineinfo_watch_read,
2832 .owner = THIS_MODULE,
2833 .llseek = no_llseek,
2834 .unlocked_ioctl = gpio_ioctl,
2835#ifdef CONFIG_COMPAT
2836 .compat_ioctl = gpio_ioctl_compat,
2837#endif
2838};
2839
2840int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2841{
2842 struct gpio_chip *gc;
2843 int ret;
2844
2845 cdev_init(&gdev->chrdev, &gpio_fileops);
2846 gdev->chrdev.owner = THIS_MODULE;
2847 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2848
2849 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2850 if (ret)
2851 return ret;
2852
2853 guard(srcu)(&gdev->srcu);
2854 gc = srcu_dereference(gdev->chip, &gdev->srcu);
2855 if (!gc)
2856 return -ENODEV;
2857
2858 chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
2859
2860 return 0;
2861}
2862
2863void gpiolib_cdev_unregister(struct gpio_device *gdev)
2864{
2865 cdev_device_del(&gdev->chrdev, &gdev->dev);
2866 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
2867}