Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * V4L2 sub-device
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
9 */
10
11#include <linux/export.h>
12#include <linux/ioctl.h>
13#include <linux/leds.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/overflow.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/version.h>
21#include <linux/videodev2.h>
22
23#include <media/v4l2-ctrls.h>
24#include <media/v4l2-device.h>
25#include <media/v4l2-event.h>
26#include <media/v4l2-fh.h>
27#include <media/v4l2-ioctl.h>
28
29#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
30/*
31 * The Streams API is an experimental feature. To use the Streams API, set
32 * 'v4l2_subdev_enable_streams_api' to 1 below.
33 */
34
35static bool v4l2_subdev_enable_streams_api;
36#endif
37
38/*
39 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
40 * of streams.
41 *
42 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
43 * restricts the total number of streams in a pad, although the stream ID is
44 * not restricted.
45 */
46#define V4L2_SUBDEV_MAX_STREAM_ID 63
47
48#include "v4l2-subdev-priv.h"
49
50#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
51static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
52{
53 struct v4l2_subdev_state *state;
54 static struct lock_class_key key;
55
56 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
57 if (IS_ERR(state))
58 return PTR_ERR(state);
59
60 fh->state = state;
61
62 return 0;
63}
64
65static void subdev_fh_free(struct v4l2_subdev_fh *fh)
66{
67 __v4l2_subdev_state_free(fh->state);
68 fh->state = NULL;
69}
70
71static int subdev_open(struct file *file)
72{
73 struct video_device *vdev = video_devdata(file);
74 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
75 struct v4l2_subdev_fh *subdev_fh;
76 int ret;
77
78 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
79 if (subdev_fh == NULL)
80 return -ENOMEM;
81
82 ret = subdev_fh_init(subdev_fh, sd);
83 if (ret) {
84 kfree(subdev_fh);
85 return ret;
86 }
87
88 v4l2_fh_init(&subdev_fh->vfh, vdev);
89 v4l2_fh_add(&subdev_fh->vfh);
90 file->private_data = &subdev_fh->vfh;
91
92 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
93 struct module *owner;
94
95 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
96 if (!try_module_get(owner)) {
97 ret = -EBUSY;
98 goto err;
99 }
100 subdev_fh->owner = owner;
101 }
102
103 if (sd->internal_ops && sd->internal_ops->open) {
104 ret = sd->internal_ops->open(sd, subdev_fh);
105 if (ret < 0)
106 goto err;
107 }
108
109 return 0;
110
111err:
112 module_put(subdev_fh->owner);
113 v4l2_fh_del(&subdev_fh->vfh);
114 v4l2_fh_exit(&subdev_fh->vfh);
115 subdev_fh_free(subdev_fh);
116 kfree(subdev_fh);
117
118 return ret;
119}
120
121static int subdev_close(struct file *file)
122{
123 struct video_device *vdev = video_devdata(file);
124 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
125 struct v4l2_fh *vfh = file->private_data;
126 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
127
128 if (sd->internal_ops && sd->internal_ops->close)
129 sd->internal_ops->close(sd, subdev_fh);
130 module_put(subdev_fh->owner);
131 v4l2_fh_del(vfh);
132 v4l2_fh_exit(vfh);
133 subdev_fh_free(subdev_fh);
134 kfree(subdev_fh);
135 file->private_data = NULL;
136
137 return 0;
138}
139#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
140static int subdev_open(struct file *file)
141{
142 return -ENODEV;
143}
144
145static int subdev_close(struct file *file)
146{
147 return -ENODEV;
148}
149#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
150
151static inline int check_which(u32 which)
152{
153 if (which != V4L2_SUBDEV_FORMAT_TRY &&
154 which != V4L2_SUBDEV_FORMAT_ACTIVE)
155 return -EINVAL;
156
157 return 0;
158}
159
160static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
161{
162#if defined(CONFIG_MEDIA_CONTROLLER)
163 if (sd->entity.num_pads) {
164 if (pad >= sd->entity.num_pads)
165 return -EINVAL;
166 return 0;
167 }
168#endif
169 /* allow pad 0 on subdevices not registered as media entities */
170 if (pad > 0)
171 return -EINVAL;
172 return 0;
173}
174
175static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
176 u32 which, u32 pad, u32 stream)
177{
178 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
179#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
180 if (!v4l2_subdev_state_get_format(state, pad, stream))
181 return -EINVAL;
182 return 0;
183#else
184 return -EINVAL;
185#endif
186 }
187
188 if (stream != 0)
189 return -EINVAL;
190
191 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
192 return -EINVAL;
193
194 return 0;
195}
196
197static inline int check_format(struct v4l2_subdev *sd,
198 struct v4l2_subdev_state *state,
199 struct v4l2_subdev_format *format)
200{
201 if (!format)
202 return -EINVAL;
203
204 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
205 check_state(sd, state, format->which, format->pad, format->stream);
206}
207
208static int call_get_fmt(struct v4l2_subdev *sd,
209 struct v4l2_subdev_state *state,
210 struct v4l2_subdev_format *format)
211{
212 return check_format(sd, state, format) ? :
213 sd->ops->pad->get_fmt(sd, state, format);
214}
215
216static int call_set_fmt(struct v4l2_subdev *sd,
217 struct v4l2_subdev_state *state,
218 struct v4l2_subdev_format *format)
219{
220 return check_format(sd, state, format) ? :
221 sd->ops->pad->set_fmt(sd, state, format);
222}
223
224static int call_enum_mbus_code(struct v4l2_subdev *sd,
225 struct v4l2_subdev_state *state,
226 struct v4l2_subdev_mbus_code_enum *code)
227{
228 if (!code)
229 return -EINVAL;
230
231 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
232 check_state(sd, state, code->which, code->pad, code->stream) ? :
233 sd->ops->pad->enum_mbus_code(sd, state, code);
234}
235
236static int call_enum_frame_size(struct v4l2_subdev *sd,
237 struct v4l2_subdev_state *state,
238 struct v4l2_subdev_frame_size_enum *fse)
239{
240 if (!fse)
241 return -EINVAL;
242
243 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
244 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
245 sd->ops->pad->enum_frame_size(sd, state, fse);
246}
247
248static int call_enum_frame_interval(struct v4l2_subdev *sd,
249 struct v4l2_subdev_state *state,
250 struct v4l2_subdev_frame_interval_enum *fie)
251{
252 if (!fie)
253 return -EINVAL;
254
255 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
256 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
257 sd->ops->pad->enum_frame_interval(sd, state, fie);
258}
259
260static inline int check_selection(struct v4l2_subdev *sd,
261 struct v4l2_subdev_state *state,
262 struct v4l2_subdev_selection *sel)
263{
264 if (!sel)
265 return -EINVAL;
266
267 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
268 check_state(sd, state, sel->which, sel->pad, sel->stream);
269}
270
271static int call_get_selection(struct v4l2_subdev *sd,
272 struct v4l2_subdev_state *state,
273 struct v4l2_subdev_selection *sel)
274{
275 return check_selection(sd, state, sel) ? :
276 sd->ops->pad->get_selection(sd, state, sel);
277}
278
279static int call_set_selection(struct v4l2_subdev *sd,
280 struct v4l2_subdev_state *state,
281 struct v4l2_subdev_selection *sel)
282{
283 return check_selection(sd, state, sel) ? :
284 sd->ops->pad->set_selection(sd, state, sel);
285}
286
287static inline int check_frame_interval(struct v4l2_subdev *sd,
288 struct v4l2_subdev_state *state,
289 struct v4l2_subdev_frame_interval *fi)
290{
291 if (!fi)
292 return -EINVAL;
293
294 return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
295 check_state(sd, state, fi->which, fi->pad, fi->stream);
296}
297
298static int call_get_frame_interval(struct v4l2_subdev *sd,
299 struct v4l2_subdev_state *state,
300 struct v4l2_subdev_frame_interval *fi)
301{
302 return check_frame_interval(sd, state, fi) ? :
303 sd->ops->pad->get_frame_interval(sd, state, fi);
304}
305
306static int call_set_frame_interval(struct v4l2_subdev *sd,
307 struct v4l2_subdev_state *state,
308 struct v4l2_subdev_frame_interval *fi)
309{
310 return check_frame_interval(sd, state, fi) ? :
311 sd->ops->pad->set_frame_interval(sd, state, fi);
312}
313
314static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
315 struct v4l2_mbus_frame_desc *fd)
316{
317 unsigned int i;
318 int ret;
319
320 memset(fd, 0, sizeof(*fd));
321
322 ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
323 if (ret)
324 return ret;
325
326 dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
327 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
328 fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
329 "unknown");
330
331 for (i = 0; i < fd->num_entries; i++) {
332 struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
333 char buf[20] = "";
334
335 if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
336 WARN_ON(snprintf(buf, sizeof(buf),
337 ", vc %u, dt 0x%02x",
338 entry->bus.csi2.vc,
339 entry->bus.csi2.dt) >= sizeof(buf));
340
341 dev_dbg(sd->dev,
342 "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
343 entry->stream, entry->pixelcode, entry->length,
344 entry->flags, buf);
345 }
346
347 return 0;
348}
349
350static inline int check_edid(struct v4l2_subdev *sd,
351 struct v4l2_subdev_edid *edid)
352{
353 if (!edid)
354 return -EINVAL;
355
356 if (edid->blocks && edid->edid == NULL)
357 return -EINVAL;
358
359 return check_pad(sd, edid->pad);
360}
361
362static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
363{
364 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
365}
366
367static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
368{
369 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
370}
371
372static int call_dv_timings_cap(struct v4l2_subdev *sd,
373 struct v4l2_dv_timings_cap *cap)
374{
375 if (!cap)
376 return -EINVAL;
377
378 return check_pad(sd, cap->pad) ? :
379 sd->ops->pad->dv_timings_cap(sd, cap);
380}
381
382static int call_enum_dv_timings(struct v4l2_subdev *sd,
383 struct v4l2_enum_dv_timings *dvt)
384{
385 if (!dvt)
386 return -EINVAL;
387
388 return check_pad(sd, dvt->pad) ? :
389 sd->ops->pad->enum_dv_timings(sd, dvt);
390}
391
392static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
393 struct v4l2_mbus_config *config)
394{
395 return check_pad(sd, pad) ? :
396 sd->ops->pad->get_mbus_config(sd, pad, config);
397}
398
399static int call_s_stream(struct v4l2_subdev *sd, int enable)
400{
401 int ret;
402
403 /*
404 * The .s_stream() operation must never be called to start or stop an
405 * already started or stopped subdev. Catch offenders but don't return
406 * an error yet to avoid regressions.
407 *
408 * As .s_stream() is mutually exclusive with the .enable_streams() and
409 * .disable_streams() operation, we can use the enabled_streams field
410 * to store the subdev streaming state.
411 */
412 if (WARN_ON(!!sd->enabled_streams == !!enable))
413 return 0;
414
415 ret = sd->ops->video->s_stream(sd, enable);
416
417 if (!enable && ret < 0) {
418 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
419 ret = 0;
420 }
421
422 if (!ret) {
423 sd->enabled_streams = enable ? BIT(0) : 0;
424
425#if IS_REACHABLE(CONFIG_LEDS_CLASS)
426 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
427 if (enable)
428 led_set_brightness(sd->privacy_led,
429 sd->privacy_led->max_brightness);
430 else
431 led_set_brightness(sd->privacy_led, 0);
432 }
433#endif
434 }
435
436 return ret;
437}
438
439#ifdef CONFIG_MEDIA_CONTROLLER
440/*
441 * Create state-management wrapper for pad ops dealing with subdev state. The
442 * wrapper handles the case where the caller does not provide the called
443 * subdev's state. This should be removed when all the callers are fixed.
444 */
445#define DEFINE_STATE_WRAPPER(f, arg_type) \
446 static int call_##f##_state(struct v4l2_subdev *sd, \
447 struct v4l2_subdev_state *_state, \
448 arg_type *arg) \
449 { \
450 struct v4l2_subdev_state *state = _state; \
451 int ret; \
452 if (!_state) \
453 state = v4l2_subdev_lock_and_get_active_state(sd); \
454 ret = call_##f(sd, state, arg); \
455 if (!_state && state) \
456 v4l2_subdev_unlock_state(state); \
457 return ret; \
458 }
459
460#else /* CONFIG_MEDIA_CONTROLLER */
461
462#define DEFINE_STATE_WRAPPER(f, arg_type) \
463 static int call_##f##_state(struct v4l2_subdev *sd, \
464 struct v4l2_subdev_state *state, \
465 arg_type *arg) \
466 { \
467 return call_##f(sd, state, arg); \
468 }
469
470#endif /* CONFIG_MEDIA_CONTROLLER */
471
472DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
473DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
474DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
475DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
476DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
477DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
478DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
479
480static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
481 .get_fmt = call_get_fmt_state,
482 .set_fmt = call_set_fmt_state,
483 .enum_mbus_code = call_enum_mbus_code_state,
484 .enum_frame_size = call_enum_frame_size_state,
485 .enum_frame_interval = call_enum_frame_interval_state,
486 .get_selection = call_get_selection_state,
487 .set_selection = call_set_selection_state,
488 .get_frame_interval = call_get_frame_interval,
489 .set_frame_interval = call_set_frame_interval,
490 .get_edid = call_get_edid,
491 .set_edid = call_set_edid,
492 .dv_timings_cap = call_dv_timings_cap,
493 .enum_dv_timings = call_enum_dv_timings,
494 .get_frame_desc = call_get_frame_desc,
495 .get_mbus_config = call_get_mbus_config,
496};
497
498static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
499 .s_stream = call_s_stream,
500};
501
502const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
503 .pad = &v4l2_subdev_call_pad_wrappers,
504 .video = &v4l2_subdev_call_video_wrappers,
505};
506EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
507
508#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
509
510static struct v4l2_subdev_state *
511subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
512 unsigned int cmd, void *arg)
513{
514 u32 which;
515
516 switch (cmd) {
517 default:
518 return NULL;
519 case VIDIOC_SUBDEV_G_FMT:
520 case VIDIOC_SUBDEV_S_FMT:
521 which = ((struct v4l2_subdev_format *)arg)->which;
522 break;
523 case VIDIOC_SUBDEV_G_CROP:
524 case VIDIOC_SUBDEV_S_CROP:
525 which = ((struct v4l2_subdev_crop *)arg)->which;
526 break;
527 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
528 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
529 break;
530 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
531 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
532 break;
533 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
534 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
535 break;
536 case VIDIOC_SUBDEV_G_SELECTION:
537 case VIDIOC_SUBDEV_S_SELECTION:
538 which = ((struct v4l2_subdev_selection *)arg)->which;
539 break;
540 case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
541 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
542 struct v4l2_subdev_frame_interval *fi = arg;
543
544 if (!(subdev_fh->client_caps &
545 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
546 fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
547
548 which = fi->which;
549 break;
550 }
551 case VIDIOC_SUBDEV_G_ROUTING:
552 case VIDIOC_SUBDEV_S_ROUTING:
553 which = ((struct v4l2_subdev_routing *)arg)->which;
554 break;
555 }
556
557 return which == V4L2_SUBDEV_FORMAT_TRY ?
558 subdev_fh->state :
559 v4l2_subdev_get_unlocked_active_state(sd);
560}
561
562static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
563 struct v4l2_subdev_state *state)
564{
565 struct video_device *vdev = video_devdata(file);
566 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
567 struct v4l2_fh *vfh = file->private_data;
568 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
569 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
570 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
571 bool client_supports_streams = subdev_fh->client_caps &
572 V4L2_SUBDEV_CLIENT_CAP_STREAMS;
573 int rval;
574
575 /*
576 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
577 * Remove this when the API is no longer experimental.
578 */
579 if (!v4l2_subdev_enable_streams_api)
580 streams_subdev = false;
581
582 switch (cmd) {
583 case VIDIOC_SUBDEV_QUERYCAP: {
584 struct v4l2_subdev_capability *cap = arg;
585
586 memset(cap->reserved, 0, sizeof(cap->reserved));
587 cap->version = LINUX_VERSION_CODE;
588 cap->capabilities =
589 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
590 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
591
592 return 0;
593 }
594
595 case VIDIOC_QUERYCTRL:
596 /*
597 * TODO: this really should be folded into v4l2_queryctrl (this
598 * currently returns -EINVAL for NULL control handlers).
599 * However, v4l2_queryctrl() is still called directly by
600 * drivers as well and until that has been addressed I believe
601 * it is safer to do the check here. The same is true for the
602 * other control ioctls below.
603 */
604 if (!vfh->ctrl_handler)
605 return -ENOTTY;
606 return v4l2_queryctrl(vfh->ctrl_handler, arg);
607
608 case VIDIOC_QUERY_EXT_CTRL:
609 if (!vfh->ctrl_handler)
610 return -ENOTTY;
611 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
612
613 case VIDIOC_QUERYMENU:
614 if (!vfh->ctrl_handler)
615 return -ENOTTY;
616 return v4l2_querymenu(vfh->ctrl_handler, arg);
617
618 case VIDIOC_G_CTRL:
619 if (!vfh->ctrl_handler)
620 return -ENOTTY;
621 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
622
623 case VIDIOC_S_CTRL:
624 if (!vfh->ctrl_handler)
625 return -ENOTTY;
626 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
627
628 case VIDIOC_G_EXT_CTRLS:
629 if (!vfh->ctrl_handler)
630 return -ENOTTY;
631 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
632 vdev, sd->v4l2_dev->mdev, arg);
633
634 case VIDIOC_S_EXT_CTRLS:
635 if (!vfh->ctrl_handler)
636 return -ENOTTY;
637 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
638 vdev, sd->v4l2_dev->mdev, arg);
639
640 case VIDIOC_TRY_EXT_CTRLS:
641 if (!vfh->ctrl_handler)
642 return -ENOTTY;
643 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
644 vdev, sd->v4l2_dev->mdev, arg);
645
646 case VIDIOC_DQEVENT:
647 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
648 return -ENOIOCTLCMD;
649
650 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
651
652 case VIDIOC_SUBSCRIBE_EVENT:
653 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
654
655 case VIDIOC_UNSUBSCRIBE_EVENT:
656 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
657
658#ifdef CONFIG_VIDEO_ADV_DEBUG
659 case VIDIOC_DBG_G_REGISTER:
660 {
661 struct v4l2_dbg_register *p = arg;
662
663 if (!capable(CAP_SYS_ADMIN))
664 return -EPERM;
665 return v4l2_subdev_call(sd, core, g_register, p);
666 }
667 case VIDIOC_DBG_S_REGISTER:
668 {
669 struct v4l2_dbg_register *p = arg;
670
671 if (!capable(CAP_SYS_ADMIN))
672 return -EPERM;
673 return v4l2_subdev_call(sd, core, s_register, p);
674 }
675 case VIDIOC_DBG_G_CHIP_INFO:
676 {
677 struct v4l2_dbg_chip_info *p = arg;
678
679 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
680 return -EINVAL;
681 if (sd->ops->core && sd->ops->core->s_register)
682 p->flags |= V4L2_CHIP_FL_WRITABLE;
683 if (sd->ops->core && sd->ops->core->g_register)
684 p->flags |= V4L2_CHIP_FL_READABLE;
685 strscpy(p->name, sd->name, sizeof(p->name));
686 return 0;
687 }
688#endif
689
690 case VIDIOC_LOG_STATUS: {
691 int ret;
692
693 pr_info("%s: ================= START STATUS =================\n",
694 sd->name);
695 ret = v4l2_subdev_call(sd, core, log_status);
696 pr_info("%s: ================== END STATUS ==================\n",
697 sd->name);
698 return ret;
699 }
700
701 case VIDIOC_SUBDEV_G_FMT: {
702 struct v4l2_subdev_format *format = arg;
703
704 if (!client_supports_streams)
705 format->stream = 0;
706
707 memset(format->reserved, 0, sizeof(format->reserved));
708 memset(format->format.reserved, 0, sizeof(format->format.reserved));
709 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
710 }
711
712 case VIDIOC_SUBDEV_S_FMT: {
713 struct v4l2_subdev_format *format = arg;
714
715 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
716 return -EPERM;
717
718 if (!client_supports_streams)
719 format->stream = 0;
720
721 memset(format->reserved, 0, sizeof(format->reserved));
722 memset(format->format.reserved, 0, sizeof(format->format.reserved));
723 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
724 }
725
726 case VIDIOC_SUBDEV_G_CROP: {
727 struct v4l2_subdev_crop *crop = arg;
728 struct v4l2_subdev_selection sel;
729
730 if (!client_supports_streams)
731 crop->stream = 0;
732
733 memset(crop->reserved, 0, sizeof(crop->reserved));
734 memset(&sel, 0, sizeof(sel));
735 sel.which = crop->which;
736 sel.pad = crop->pad;
737 sel.stream = crop->stream;
738 sel.target = V4L2_SEL_TGT_CROP;
739
740 rval = v4l2_subdev_call(
741 sd, pad, get_selection, state, &sel);
742
743 crop->rect = sel.r;
744
745 return rval;
746 }
747
748 case VIDIOC_SUBDEV_S_CROP: {
749 struct v4l2_subdev_crop *crop = arg;
750 struct v4l2_subdev_selection sel;
751
752 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
753 return -EPERM;
754
755 if (!client_supports_streams)
756 crop->stream = 0;
757
758 memset(crop->reserved, 0, sizeof(crop->reserved));
759 memset(&sel, 0, sizeof(sel));
760 sel.which = crop->which;
761 sel.pad = crop->pad;
762 sel.stream = crop->stream;
763 sel.target = V4L2_SEL_TGT_CROP;
764 sel.r = crop->rect;
765
766 rval = v4l2_subdev_call(
767 sd, pad, set_selection, state, &sel);
768
769 crop->rect = sel.r;
770
771 return rval;
772 }
773
774 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
775 struct v4l2_subdev_mbus_code_enum *code = arg;
776
777 if (!client_supports_streams)
778 code->stream = 0;
779
780 memset(code->reserved, 0, sizeof(code->reserved));
781 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
782 code);
783 }
784
785 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
786 struct v4l2_subdev_frame_size_enum *fse = arg;
787
788 if (!client_supports_streams)
789 fse->stream = 0;
790
791 memset(fse->reserved, 0, sizeof(fse->reserved));
792 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
793 fse);
794 }
795
796 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
797 struct v4l2_subdev_frame_interval *fi = arg;
798
799 if (!client_supports_streams)
800 fi->stream = 0;
801
802 memset(fi->reserved, 0, sizeof(fi->reserved));
803 return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
804 }
805
806 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
807 struct v4l2_subdev_frame_interval *fi = arg;
808
809 if (!client_supports_streams)
810 fi->stream = 0;
811
812 if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
813 return -EPERM;
814
815 memset(fi->reserved, 0, sizeof(fi->reserved));
816 return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
817 }
818
819 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
820 struct v4l2_subdev_frame_interval_enum *fie = arg;
821
822 if (!client_supports_streams)
823 fie->stream = 0;
824
825 memset(fie->reserved, 0, sizeof(fie->reserved));
826 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
827 fie);
828 }
829
830 case VIDIOC_SUBDEV_G_SELECTION: {
831 struct v4l2_subdev_selection *sel = arg;
832
833 if (!client_supports_streams)
834 sel->stream = 0;
835
836 memset(sel->reserved, 0, sizeof(sel->reserved));
837 return v4l2_subdev_call(
838 sd, pad, get_selection, state, sel);
839 }
840
841 case VIDIOC_SUBDEV_S_SELECTION: {
842 struct v4l2_subdev_selection *sel = arg;
843
844 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
845 return -EPERM;
846
847 if (!client_supports_streams)
848 sel->stream = 0;
849
850 memset(sel->reserved, 0, sizeof(sel->reserved));
851 return v4l2_subdev_call(
852 sd, pad, set_selection, state, sel);
853 }
854
855 case VIDIOC_G_EDID: {
856 struct v4l2_subdev_edid *edid = arg;
857
858 return v4l2_subdev_call(sd, pad, get_edid, edid);
859 }
860
861 case VIDIOC_S_EDID: {
862 struct v4l2_subdev_edid *edid = arg;
863
864 return v4l2_subdev_call(sd, pad, set_edid, edid);
865 }
866
867 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
868 struct v4l2_dv_timings_cap *cap = arg;
869
870 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
871 }
872
873 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
874 struct v4l2_enum_dv_timings *dvt = arg;
875
876 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
877 }
878
879 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
880 return v4l2_subdev_call(sd, video, query_dv_timings, arg);
881
882 case VIDIOC_SUBDEV_G_DV_TIMINGS:
883 return v4l2_subdev_call(sd, video, g_dv_timings, arg);
884
885 case VIDIOC_SUBDEV_S_DV_TIMINGS:
886 if (ro_subdev)
887 return -EPERM;
888
889 return v4l2_subdev_call(sd, video, s_dv_timings, arg);
890
891 case VIDIOC_SUBDEV_G_STD:
892 return v4l2_subdev_call(sd, video, g_std, arg);
893
894 case VIDIOC_SUBDEV_S_STD: {
895 v4l2_std_id *std = arg;
896
897 if (ro_subdev)
898 return -EPERM;
899
900 return v4l2_subdev_call(sd, video, s_std, *std);
901 }
902
903 case VIDIOC_SUBDEV_ENUMSTD: {
904 struct v4l2_standard *p = arg;
905 v4l2_std_id id;
906
907 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
908 return -EINVAL;
909
910 return v4l_video_std_enumstd(p, id);
911 }
912
913 case VIDIOC_SUBDEV_QUERYSTD:
914 return v4l2_subdev_call(sd, video, querystd, arg);
915
916 case VIDIOC_SUBDEV_G_ROUTING: {
917 struct v4l2_subdev_routing *routing = arg;
918 struct v4l2_subdev_krouting *krouting;
919
920 if (!v4l2_subdev_enable_streams_api)
921 return -ENOIOCTLCMD;
922
923 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
924 return -ENOIOCTLCMD;
925
926 memset(routing->reserved, 0, sizeof(routing->reserved));
927
928 krouting = &state->routing;
929
930 if (routing->num_routes < krouting->num_routes) {
931 routing->num_routes = krouting->num_routes;
932 return -ENOSPC;
933 }
934
935 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
936 krouting->routes,
937 krouting->num_routes * sizeof(*krouting->routes));
938 routing->num_routes = krouting->num_routes;
939
940 return 0;
941 }
942
943 case VIDIOC_SUBDEV_S_ROUTING: {
944 struct v4l2_subdev_routing *routing = arg;
945 struct v4l2_subdev_route *routes =
946 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
947 struct v4l2_subdev_krouting krouting = {};
948 unsigned int i;
949
950 if (!v4l2_subdev_enable_streams_api)
951 return -ENOIOCTLCMD;
952
953 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
954 return -ENOIOCTLCMD;
955
956 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
957 return -EPERM;
958
959 memset(routing->reserved, 0, sizeof(routing->reserved));
960
961 for (i = 0; i < routing->num_routes; ++i) {
962 const struct v4l2_subdev_route *route = &routes[i];
963 const struct media_pad *pads = sd->entity.pads;
964
965 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
966 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
967 return -EINVAL;
968
969 if (route->sink_pad >= sd->entity.num_pads)
970 return -EINVAL;
971
972 if (!(pads[route->sink_pad].flags &
973 MEDIA_PAD_FL_SINK))
974 return -EINVAL;
975
976 if (route->source_pad >= sd->entity.num_pads)
977 return -EINVAL;
978
979 if (!(pads[route->source_pad].flags &
980 MEDIA_PAD_FL_SOURCE))
981 return -EINVAL;
982 }
983
984 krouting.num_routes = routing->num_routes;
985 krouting.routes = routes;
986
987 return v4l2_subdev_call(sd, pad, set_routing, state,
988 routing->which, &krouting);
989 }
990
991 case VIDIOC_SUBDEV_G_CLIENT_CAP: {
992 struct v4l2_subdev_client_capability *client_cap = arg;
993
994 client_cap->capabilities = subdev_fh->client_caps;
995
996 return 0;
997 }
998
999 case VIDIOC_SUBDEV_S_CLIENT_CAP: {
1000 struct v4l2_subdev_client_capability *client_cap = arg;
1001
1002 /*
1003 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
1004 * enabled. Remove this when streams API is no longer
1005 * experimental.
1006 */
1007 if (!v4l2_subdev_enable_streams_api)
1008 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
1009
1010 /* Filter out unsupported capabilities */
1011 client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
1012 V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
1013
1014 subdev_fh->client_caps = client_cap->capabilities;
1015
1016 return 0;
1017 }
1018
1019 default:
1020 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
1021 }
1022
1023 return 0;
1024}
1025
1026static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
1027{
1028 struct video_device *vdev = video_devdata(file);
1029 struct mutex *lock = vdev->lock;
1030 long ret = -ENODEV;
1031
1032 if (lock && mutex_lock_interruptible(lock))
1033 return -ERESTARTSYS;
1034
1035 if (video_is_registered(vdev)) {
1036 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1037 struct v4l2_fh *vfh = file->private_data;
1038 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
1039 struct v4l2_subdev_state *state;
1040
1041 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
1042
1043 if (state)
1044 v4l2_subdev_lock_state(state);
1045
1046 ret = subdev_do_ioctl(file, cmd, arg, state);
1047
1048 if (state)
1049 v4l2_subdev_unlock_state(state);
1050 }
1051
1052 if (lock)
1053 mutex_unlock(lock);
1054 return ret;
1055}
1056
1057static long subdev_ioctl(struct file *file, unsigned int cmd,
1058 unsigned long arg)
1059{
1060 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
1061}
1062
1063#ifdef CONFIG_COMPAT
1064static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1065 unsigned long arg)
1066{
1067 struct video_device *vdev = video_devdata(file);
1068 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1069
1070 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
1071}
1072#endif
1073
1074#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1075static long subdev_ioctl(struct file *file, unsigned int cmd,
1076 unsigned long arg)
1077{
1078 return -ENODEV;
1079}
1080
1081#ifdef CONFIG_COMPAT
1082static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1083 unsigned long arg)
1084{
1085 return -ENODEV;
1086}
1087#endif
1088#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1089
1090static __poll_t subdev_poll(struct file *file, poll_table *wait)
1091{
1092 struct video_device *vdev = video_devdata(file);
1093 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1094 struct v4l2_fh *fh = file->private_data;
1095
1096 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
1097 return EPOLLERR;
1098
1099 poll_wait(file, &fh->wait, wait);
1100
1101 if (v4l2_event_pending(fh))
1102 return EPOLLPRI;
1103
1104 return 0;
1105}
1106
1107const struct v4l2_file_operations v4l2_subdev_fops = {
1108 .owner = THIS_MODULE,
1109 .open = subdev_open,
1110 .unlocked_ioctl = subdev_ioctl,
1111#ifdef CONFIG_COMPAT
1112 .compat_ioctl32 = subdev_compat_ioctl32,
1113#endif
1114 .release = subdev_close,
1115 .poll = subdev_poll,
1116};
1117
1118#ifdef CONFIG_MEDIA_CONTROLLER
1119
1120int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1121 struct fwnode_endpoint *endpoint)
1122{
1123 struct fwnode_handle *fwnode;
1124 struct v4l2_subdev *sd;
1125
1126 if (!is_media_entity_v4l2_subdev(entity))
1127 return -EINVAL;
1128
1129 sd = media_entity_to_v4l2_subdev(entity);
1130
1131 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1132 fwnode_handle_put(fwnode);
1133
1134 if (device_match_fwnode(sd->dev, fwnode))
1135 return endpoint->port;
1136
1137 return -ENXIO;
1138}
1139EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1140
1141int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1142 struct media_link *link,
1143 struct v4l2_subdev_format *source_fmt,
1144 struct v4l2_subdev_format *sink_fmt)
1145{
1146 bool pass = true;
1147
1148 /* The width, height and code must match. */
1149 if (source_fmt->format.width != sink_fmt->format.width) {
1150 dev_dbg(sd->entity.graph_obj.mdev->dev,
1151 "%s: width does not match (source %u, sink %u)\n",
1152 __func__,
1153 source_fmt->format.width, sink_fmt->format.width);
1154 pass = false;
1155 }
1156
1157 if (source_fmt->format.height != sink_fmt->format.height) {
1158 dev_dbg(sd->entity.graph_obj.mdev->dev,
1159 "%s: height does not match (source %u, sink %u)\n",
1160 __func__,
1161 source_fmt->format.height, sink_fmt->format.height);
1162 pass = false;
1163 }
1164
1165 if (source_fmt->format.code != sink_fmt->format.code) {
1166 dev_dbg(sd->entity.graph_obj.mdev->dev,
1167 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1168 __func__,
1169 source_fmt->format.code, sink_fmt->format.code);
1170 pass = false;
1171 }
1172
1173 /* The field order must match, or the sink field order must be NONE
1174 * to support interlaced hardware connected to bridges that support
1175 * progressive formats only.
1176 */
1177 if (source_fmt->format.field != sink_fmt->format.field &&
1178 sink_fmt->format.field != V4L2_FIELD_NONE) {
1179 dev_dbg(sd->entity.graph_obj.mdev->dev,
1180 "%s: field does not match (source %u, sink %u)\n",
1181 __func__,
1182 source_fmt->format.field, sink_fmt->format.field);
1183 pass = false;
1184 }
1185
1186 if (pass)
1187 return 0;
1188
1189 dev_dbg(sd->entity.graph_obj.mdev->dev,
1190 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1191 link->source->entity->name, link->source->index,
1192 link->sink->entity->name, link->sink->index);
1193
1194 return -EPIPE;
1195}
1196EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1197
1198static int
1199v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1200 struct v4l2_subdev_format *fmt,
1201 bool states_locked)
1202{
1203 struct v4l2_subdev_state *state;
1204 struct v4l2_subdev *sd;
1205 int ret;
1206
1207 if (!is_media_entity_v4l2_subdev(pad->entity)) {
1208 WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1209 "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1210 pad->entity->function, pad->entity->name);
1211
1212 return -EINVAL;
1213 }
1214
1215 sd = media_entity_to_v4l2_subdev(pad->entity);
1216
1217 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1218 fmt->pad = pad->index;
1219 fmt->stream = stream;
1220
1221 if (states_locked)
1222 state = v4l2_subdev_get_locked_active_state(sd);
1223 else
1224 state = v4l2_subdev_lock_and_get_active_state(sd);
1225
1226 ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1227
1228 if (!states_locked && state)
1229 v4l2_subdev_unlock_state(state);
1230
1231 return ret;
1232}
1233
1234#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1235
1236static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1237 u64 *streams_mask,
1238 bool states_locked)
1239{
1240 struct v4l2_subdev_route *route;
1241 struct v4l2_subdev_state *state;
1242 struct v4l2_subdev *subdev;
1243
1244 subdev = media_entity_to_v4l2_subdev(pad->entity);
1245
1246 *streams_mask = 0;
1247
1248 if (states_locked)
1249 state = v4l2_subdev_get_locked_active_state(subdev);
1250 else
1251 state = v4l2_subdev_lock_and_get_active_state(subdev);
1252
1253 if (WARN_ON(!state))
1254 return;
1255
1256 for_each_active_route(&state->routing, route) {
1257 u32 route_pad;
1258 u32 route_stream;
1259
1260 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1261 route_pad = route->source_pad;
1262 route_stream = route->source_stream;
1263 } else {
1264 route_pad = route->sink_pad;
1265 route_stream = route->sink_stream;
1266 }
1267
1268 if (route_pad != pad->index)
1269 continue;
1270
1271 *streams_mask |= BIT_ULL(route_stream);
1272 }
1273
1274 if (!states_locked)
1275 v4l2_subdev_unlock_state(state);
1276}
1277
1278#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1279
1280static void v4l2_link_validate_get_streams(struct media_pad *pad,
1281 u64 *streams_mask,
1282 bool states_locked)
1283{
1284 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1285
1286 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1287 /* Non-streams subdevs have an implicit stream 0 */
1288 *streams_mask = BIT_ULL(0);
1289 return;
1290 }
1291
1292#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1293 __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
1294#else
1295 /* This shouldn't happen */
1296 *streams_mask = 0;
1297#endif
1298}
1299
1300static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
1301{
1302 struct v4l2_subdev *sink_subdev =
1303 media_entity_to_v4l2_subdev(link->sink->entity);
1304 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1305 u64 source_streams_mask;
1306 u64 sink_streams_mask;
1307 u64 dangling_sink_streams;
1308 u32 stream;
1309 int ret;
1310
1311 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1312 link->source->entity->name, link->source->index,
1313 link->sink->entity->name, link->sink->index);
1314
1315 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1316 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
1317
1318 /*
1319 * It is ok to have more source streams than sink streams as extra
1320 * source streams can just be ignored by the receiver, but having extra
1321 * sink streams is an error as streams must have a source.
1322 */
1323 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1324 sink_streams_mask;
1325 if (dangling_sink_streams) {
1326 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1327 dangling_sink_streams);
1328 return -EINVAL;
1329 }
1330
1331 /* Validate source and sink stream formats */
1332
1333 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1334 struct v4l2_subdev_format sink_fmt, source_fmt;
1335
1336 if (!(sink_streams_mask & BIT_ULL(stream)))
1337 continue;
1338
1339 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1340 link->source->entity->name, link->source->index, stream,
1341 link->sink->entity->name, link->sink->index, stream);
1342
1343 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1344 &source_fmt, states_locked);
1345 if (ret < 0) {
1346 dev_dbg(dev,
1347 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1348 link->source->entity->name, link->source->index,
1349 stream);
1350 continue;
1351 }
1352
1353 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1354 &sink_fmt, states_locked);
1355 if (ret < 0) {
1356 dev_dbg(dev,
1357 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1358 link->sink->entity->name, link->sink->index,
1359 stream);
1360 continue;
1361 }
1362
1363 /* TODO: add stream number to link_validate() */
1364 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1365 &source_fmt, &sink_fmt);
1366 if (!ret)
1367 continue;
1368
1369 if (ret != -ENOIOCTLCMD)
1370 return ret;
1371
1372 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1373 &source_fmt, &sink_fmt);
1374
1375 if (ret)
1376 return ret;
1377 }
1378
1379 return 0;
1380}
1381
1382int v4l2_subdev_link_validate(struct media_link *link)
1383{
1384 struct v4l2_subdev *source_sd, *sink_sd;
1385 struct v4l2_subdev_state *source_state, *sink_state;
1386 bool states_locked;
1387 int ret;
1388
1389 if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
1390 !is_media_entity_v4l2_subdev(link->source->entity)) {
1391 pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
1392 !is_media_entity_v4l2_subdev(link->sink->entity) ?
1393 "sink" : "source",
1394 link->source->entity->name, link->source->index,
1395 link->sink->entity->name, link->sink->index);
1396 return 0;
1397 }
1398
1399 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1400 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1401
1402 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1403 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1404
1405 states_locked = sink_state && source_state;
1406
1407 if (states_locked) {
1408 v4l2_subdev_lock_state(sink_state);
1409 v4l2_subdev_lock_state(source_state);
1410 }
1411
1412 ret = v4l2_subdev_link_validate_locked(link, states_locked);
1413
1414 if (states_locked) {
1415 v4l2_subdev_unlock_state(sink_state);
1416 v4l2_subdev_unlock_state(source_state);
1417 }
1418
1419 return ret;
1420}
1421EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1422
1423bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1424 unsigned int pad0, unsigned int pad1)
1425{
1426 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1427 struct v4l2_subdev_krouting *routing;
1428 struct v4l2_subdev_state *state;
1429 unsigned int i;
1430
1431 state = v4l2_subdev_lock_and_get_active_state(sd);
1432
1433 routing = &state->routing;
1434
1435 for (i = 0; i < routing->num_routes; ++i) {
1436 struct v4l2_subdev_route *route = &routing->routes[i];
1437
1438 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1439 continue;
1440
1441 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1442 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1443 v4l2_subdev_unlock_state(state);
1444 return true;
1445 }
1446 }
1447
1448 v4l2_subdev_unlock_state(state);
1449
1450 return false;
1451}
1452EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1453
1454struct v4l2_subdev_state *
1455__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1456 struct lock_class_key *lock_key)
1457{
1458 struct v4l2_subdev_state *state;
1459 int ret;
1460
1461 state = kzalloc(sizeof(*state), GFP_KERNEL);
1462 if (!state)
1463 return ERR_PTR(-ENOMEM);
1464
1465 __mutex_init(&state->_lock, lock_name, lock_key);
1466 if (sd->state_lock)
1467 state->lock = sd->state_lock;
1468 else
1469 state->lock = &state->_lock;
1470
1471 state->sd = sd;
1472
1473 /* Drivers that support streams do not need the legacy pad config */
1474 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1475 state->pads = kvcalloc(sd->entity.num_pads,
1476 sizeof(*state->pads), GFP_KERNEL);
1477 if (!state->pads) {
1478 ret = -ENOMEM;
1479 goto err;
1480 }
1481 }
1482
1483 if (sd->internal_ops && sd->internal_ops->init_state) {
1484 /*
1485 * There can be no race at this point, but we lock the state
1486 * anyway to satisfy lockdep checks.
1487 */
1488 v4l2_subdev_lock_state(state);
1489 ret = sd->internal_ops->init_state(sd, state);
1490 v4l2_subdev_unlock_state(state);
1491
1492 if (ret)
1493 goto err;
1494 }
1495
1496 return state;
1497
1498err:
1499 if (state && state->pads)
1500 kvfree(state->pads);
1501
1502 kfree(state);
1503
1504 return ERR_PTR(ret);
1505}
1506EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1507
1508void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1509{
1510 if (!state)
1511 return;
1512
1513 mutex_destroy(&state->_lock);
1514
1515 kfree(state->routing.routes);
1516 kvfree(state->stream_configs.configs);
1517 kvfree(state->pads);
1518 kfree(state);
1519}
1520EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1521
1522int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1523 struct lock_class_key *key)
1524{
1525 struct v4l2_subdev_state *state;
1526
1527 state = __v4l2_subdev_state_alloc(sd, name, key);
1528 if (IS_ERR(state))
1529 return PTR_ERR(state);
1530
1531 sd->active_state = state;
1532
1533 return 0;
1534}
1535EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1536
1537void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1538{
1539 struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1540
1541 __v4l2_subdev_state_free(sd->active_state);
1542 sd->active_state = NULL;
1543
1544 /* Uninitialised sub-device, bail out here. */
1545 if (!sd->async_subdev_endpoint_list.next)
1546 return;
1547
1548 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1549 async_subdev_endpoint_entry) {
1550 list_del(&ase->async_subdev_endpoint_entry);
1551
1552 kfree(ase);
1553 }
1554}
1555EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1556
1557struct v4l2_mbus_framefmt *
1558__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
1559 unsigned int pad, u32 stream)
1560{
1561 struct v4l2_subdev_stream_configs *stream_configs;
1562 unsigned int i;
1563
1564 if (WARN_ON_ONCE(!state))
1565 return NULL;
1566
1567 if (state->pads) {
1568 if (stream)
1569 return NULL;
1570
1571 if (pad >= state->sd->entity.num_pads)
1572 return NULL;
1573
1574 return &state->pads[pad].format;
1575 }
1576
1577 lockdep_assert_held(state->lock);
1578
1579 stream_configs = &state->stream_configs;
1580
1581 for (i = 0; i < stream_configs->num_configs; ++i) {
1582 if (stream_configs->configs[i].pad == pad &&
1583 stream_configs->configs[i].stream == stream)
1584 return &stream_configs->configs[i].fmt;
1585 }
1586
1587 return NULL;
1588}
1589EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
1590
1591struct v4l2_rect *
1592__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
1593 u32 stream)
1594{
1595 struct v4l2_subdev_stream_configs *stream_configs;
1596 unsigned int i;
1597
1598 if (WARN_ON_ONCE(!state))
1599 return NULL;
1600
1601 if (state->pads) {
1602 if (stream)
1603 return NULL;
1604
1605 if (pad >= state->sd->entity.num_pads)
1606 return NULL;
1607
1608 return &state->pads[pad].crop;
1609 }
1610
1611 lockdep_assert_held(state->lock);
1612
1613 stream_configs = &state->stream_configs;
1614
1615 for (i = 0; i < stream_configs->num_configs; ++i) {
1616 if (stream_configs->configs[i].pad == pad &&
1617 stream_configs->configs[i].stream == stream)
1618 return &stream_configs->configs[i].crop;
1619 }
1620
1621 return NULL;
1622}
1623EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
1624
1625struct v4l2_rect *
1626__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
1627 unsigned int pad, u32 stream)
1628{
1629 struct v4l2_subdev_stream_configs *stream_configs;
1630 unsigned int i;
1631
1632 if (WARN_ON_ONCE(!state))
1633 return NULL;
1634
1635 if (state->pads) {
1636 if (stream)
1637 return NULL;
1638
1639 if (pad >= state->sd->entity.num_pads)
1640 return NULL;
1641
1642 return &state->pads[pad].compose;
1643 }
1644
1645 lockdep_assert_held(state->lock);
1646
1647 stream_configs = &state->stream_configs;
1648
1649 for (i = 0; i < stream_configs->num_configs; ++i) {
1650 if (stream_configs->configs[i].pad == pad &&
1651 stream_configs->configs[i].stream == stream)
1652 return &stream_configs->configs[i].compose;
1653 }
1654
1655 return NULL;
1656}
1657EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
1658
1659struct v4l2_fract *
1660__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
1661 unsigned int pad, u32 stream)
1662{
1663 struct v4l2_subdev_stream_configs *stream_configs;
1664 unsigned int i;
1665
1666 if (WARN_ON(!state))
1667 return NULL;
1668
1669 lockdep_assert_held(state->lock);
1670
1671 if (state->pads) {
1672 if (stream)
1673 return NULL;
1674
1675 if (pad >= state->sd->entity.num_pads)
1676 return NULL;
1677
1678 return &state->pads[pad].interval;
1679 }
1680
1681 lockdep_assert_held(state->lock);
1682
1683 stream_configs = &state->stream_configs;
1684
1685 for (i = 0; i < stream_configs->num_configs; ++i) {
1686 if (stream_configs->configs[i].pad == pad &&
1687 stream_configs->configs[i].stream == stream)
1688 return &stream_configs->configs[i].interval;
1689 }
1690
1691 return NULL;
1692}
1693EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
1694
1695#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1696
1697static int
1698v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1699 const struct v4l2_subdev_krouting *routing)
1700{
1701 struct v4l2_subdev_stream_configs new_configs = { 0 };
1702 struct v4l2_subdev_route *route;
1703 u32 idx;
1704
1705 /* Count number of formats needed */
1706 for_each_active_route(routing, route) {
1707 /*
1708 * Each route needs a format on both ends of the route.
1709 */
1710 new_configs.num_configs += 2;
1711 }
1712
1713 if (new_configs.num_configs) {
1714 new_configs.configs = kvcalloc(new_configs.num_configs,
1715 sizeof(*new_configs.configs),
1716 GFP_KERNEL);
1717
1718 if (!new_configs.configs)
1719 return -ENOMEM;
1720 }
1721
1722 /*
1723 * Fill in the 'pad' and stream' value for each item in the array from
1724 * the routing table
1725 */
1726 idx = 0;
1727
1728 for_each_active_route(routing, route) {
1729 new_configs.configs[idx].pad = route->sink_pad;
1730 new_configs.configs[idx].stream = route->sink_stream;
1731
1732 idx++;
1733
1734 new_configs.configs[idx].pad = route->source_pad;
1735 new_configs.configs[idx].stream = route->source_stream;
1736
1737 idx++;
1738 }
1739
1740 kvfree(stream_configs->configs);
1741 *stream_configs = new_configs;
1742
1743 return 0;
1744}
1745
1746int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1747 struct v4l2_subdev_format *format)
1748{
1749 struct v4l2_mbus_framefmt *fmt;
1750
1751 fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
1752 if (!fmt)
1753 return -EINVAL;
1754
1755 format->format = *fmt;
1756
1757 return 0;
1758}
1759EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1760
1761int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
1762 struct v4l2_subdev_state *state,
1763 struct v4l2_subdev_frame_interval *fi)
1764{
1765 struct v4l2_fract *interval;
1766
1767 interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
1768 if (!interval)
1769 return -EINVAL;
1770
1771 fi->interval = *interval;
1772
1773 return 0;
1774}
1775EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
1776
1777int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1778 struct v4l2_subdev_state *state,
1779 const struct v4l2_subdev_krouting *routing)
1780{
1781 struct v4l2_subdev_krouting *dst = &state->routing;
1782 const struct v4l2_subdev_krouting *src = routing;
1783 struct v4l2_subdev_krouting new_routing = { 0 };
1784 size_t bytes;
1785 int r;
1786
1787 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1788 sizeof(*src->routes), &bytes)))
1789 return -EOVERFLOW;
1790
1791 lockdep_assert_held(state->lock);
1792
1793 if (src->num_routes > 0) {
1794 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1795 if (!new_routing.routes)
1796 return -ENOMEM;
1797 }
1798
1799 new_routing.num_routes = src->num_routes;
1800
1801 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1802 &new_routing);
1803 if (r) {
1804 kfree(new_routing.routes);
1805 return r;
1806 }
1807
1808 kfree(dst->routes);
1809 *dst = new_routing;
1810
1811 return 0;
1812}
1813EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1814
1815struct v4l2_subdev_route *
1816__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1817 struct v4l2_subdev_route *route)
1818{
1819 if (route)
1820 ++route;
1821 else
1822 route = &routing->routes[0];
1823
1824 for (; route < routing->routes + routing->num_routes; ++route) {
1825 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1826 continue;
1827
1828 return route;
1829 }
1830
1831 return NULL;
1832}
1833EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1834
1835int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1836 struct v4l2_subdev_state *state,
1837 const struct v4l2_subdev_krouting *routing,
1838 const struct v4l2_mbus_framefmt *fmt)
1839{
1840 struct v4l2_subdev_stream_configs *stream_configs;
1841 unsigned int i;
1842 int ret;
1843
1844 ret = v4l2_subdev_set_routing(sd, state, routing);
1845 if (ret)
1846 return ret;
1847
1848 stream_configs = &state->stream_configs;
1849
1850 for (i = 0; i < stream_configs->num_configs; ++i)
1851 stream_configs->configs[i].fmt = *fmt;
1852
1853 return 0;
1854}
1855EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1856
1857int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1858 u32 pad, u32 stream, u32 *other_pad,
1859 u32 *other_stream)
1860{
1861 unsigned int i;
1862
1863 for (i = 0; i < routing->num_routes; ++i) {
1864 struct v4l2_subdev_route *route = &routing->routes[i];
1865
1866 if (route->source_pad == pad &&
1867 route->source_stream == stream) {
1868 if (other_pad)
1869 *other_pad = route->sink_pad;
1870 if (other_stream)
1871 *other_stream = route->sink_stream;
1872 return 0;
1873 }
1874
1875 if (route->sink_pad == pad && route->sink_stream == stream) {
1876 if (other_pad)
1877 *other_pad = route->source_pad;
1878 if (other_stream)
1879 *other_stream = route->source_stream;
1880 return 0;
1881 }
1882 }
1883
1884 return -EINVAL;
1885}
1886EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1887
1888struct v4l2_mbus_framefmt *
1889v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1890 u32 pad, u32 stream)
1891{
1892 u32 other_pad, other_stream;
1893 int ret;
1894
1895 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1896 pad, stream,
1897 &other_pad, &other_stream);
1898 if (ret)
1899 return NULL;
1900
1901 return v4l2_subdev_state_get_format(state, other_pad, other_stream);
1902}
1903EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1904
1905u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1906 u32 pad0, u32 pad1, u64 *streams)
1907{
1908 const struct v4l2_subdev_krouting *routing = &state->routing;
1909 struct v4l2_subdev_route *route;
1910 u64 streams0 = 0;
1911 u64 streams1 = 0;
1912
1913 for_each_active_route(routing, route) {
1914 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1915 (*streams & BIT_ULL(route->sink_stream))) {
1916 streams0 |= BIT_ULL(route->sink_stream);
1917 streams1 |= BIT_ULL(route->source_stream);
1918 }
1919 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1920 (*streams & BIT_ULL(route->source_stream))) {
1921 streams0 |= BIT_ULL(route->source_stream);
1922 streams1 |= BIT_ULL(route->sink_stream);
1923 }
1924 }
1925
1926 *streams = streams0;
1927 return streams1;
1928}
1929EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1930
1931int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1932 const struct v4l2_subdev_krouting *routing,
1933 enum v4l2_subdev_routing_restriction disallow)
1934{
1935 u32 *remote_pads = NULL;
1936 unsigned int i, j;
1937 int ret = -EINVAL;
1938
1939 if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
1940 V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
1941 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1942 GFP_KERNEL);
1943 if (!remote_pads)
1944 return -ENOMEM;
1945
1946 for (i = 0; i < sd->entity.num_pads; ++i)
1947 remote_pads[i] = U32_MAX;
1948 }
1949
1950 for (i = 0; i < routing->num_routes; ++i) {
1951 const struct v4l2_subdev_route *route = &routing->routes[i];
1952
1953 /* Validate the sink and source pad numbers. */
1954 if (route->sink_pad >= sd->entity.num_pads ||
1955 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1956 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1957 i, route->sink_pad);
1958 goto out;
1959 }
1960
1961 if (route->source_pad >= sd->entity.num_pads ||
1962 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1963 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1964 i, route->source_pad);
1965 goto out;
1966 }
1967
1968 /*
1969 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
1970 * sink pad must be routed to a single source pad.
1971 */
1972 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
1973 if (remote_pads[route->sink_pad] != U32_MAX &&
1974 remote_pads[route->sink_pad] != route->source_pad) {
1975 dev_dbg(sd->dev,
1976 "route %u attempts to mix %s streams\n",
1977 i, "sink");
1978 goto out;
1979 }
1980 }
1981
1982 /*
1983 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
1984 * source pad must originate from a single sink pad.
1985 */
1986 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
1987 if (remote_pads[route->source_pad] != U32_MAX &&
1988 remote_pads[route->source_pad] != route->sink_pad) {
1989 dev_dbg(sd->dev,
1990 "route %u attempts to mix %s streams\n",
1991 i, "source");
1992 goto out;
1993 }
1994 }
1995
1996 /*
1997 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
1998 * side can not do stream multiplexing, i.e. there can be only
1999 * a single stream in a sink pad.
2000 */
2001 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
2002 if (remote_pads[route->sink_pad] != U32_MAX) {
2003 dev_dbg(sd->dev,
2004 "route %u attempts to multiplex on %s pad %u\n",
2005 i, "sink", route->sink_pad);
2006 goto out;
2007 }
2008 }
2009
2010 /*
2011 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
2012 * source side can not do stream multiplexing, i.e. there can
2013 * be only a single stream in a source pad.
2014 */
2015 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
2016 if (remote_pads[route->source_pad] != U32_MAX) {
2017 dev_dbg(sd->dev,
2018 "route %u attempts to multiplex on %s pad %u\n",
2019 i, "source", route->source_pad);
2020 goto out;
2021 }
2022 }
2023
2024 if (remote_pads) {
2025 remote_pads[route->sink_pad] = route->source_pad;
2026 remote_pads[route->source_pad] = route->sink_pad;
2027 }
2028
2029 for (j = i + 1; j < routing->num_routes; ++j) {
2030 const struct v4l2_subdev_route *r = &routing->routes[j];
2031
2032 /*
2033 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
2034 * originate from the same (sink) stream.
2035 */
2036 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
2037 route->sink_pad == r->sink_pad &&
2038 route->sink_stream == r->sink_stream) {
2039 dev_dbg(sd->dev,
2040 "routes %u and %u originate from same sink (%u/%u)\n",
2041 i, j, route->sink_pad,
2042 route->sink_stream);
2043 goto out;
2044 }
2045
2046 /*
2047 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
2048 * at the same (source) stream.
2049 */
2050 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
2051 route->source_pad == r->source_pad &&
2052 route->source_stream == r->source_stream) {
2053 dev_dbg(sd->dev,
2054 "routes %u and %u end at same source (%u/%u)\n",
2055 i, j, route->source_pad,
2056 route->source_stream);
2057 goto out;
2058 }
2059 }
2060 }
2061
2062 ret = 0;
2063
2064out:
2065 kfree(remote_pads);
2066 return ret;
2067}
2068EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
2069
2070static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2071 u64 streams_mask)
2072{
2073 struct device *dev = sd->entity.graph_obj.mdev->dev;
2074 unsigned int i;
2075 int ret;
2076
2077 /*
2078 * The subdev doesn't implement pad-based stream enable, fall back
2079 * on the .s_stream() operation. This can only be done for subdevs that
2080 * have a single source pad, as sd->enabled_streams is global to the
2081 * subdev.
2082 */
2083 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2084 return -EOPNOTSUPP;
2085
2086 for (i = 0; i < sd->entity.num_pads; ++i) {
2087 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
2088 return -EOPNOTSUPP;
2089 }
2090
2091 if (sd->enabled_streams & streams_mask) {
2092 dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
2093 streams_mask, sd->entity.name, pad);
2094 return -EALREADY;
2095 }
2096
2097 /* Start streaming when the first streams are enabled. */
2098 if (!sd->enabled_streams) {
2099 ret = v4l2_subdev_call(sd, video, s_stream, 1);
2100 if (ret)
2101 return ret;
2102 }
2103
2104 sd->enabled_streams |= streams_mask;
2105
2106 return 0;
2107}
2108
2109int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
2110 u64 streams_mask)
2111{
2112 struct device *dev = sd->entity.graph_obj.mdev->dev;
2113 struct v4l2_subdev_state *state;
2114 u64 found_streams = 0;
2115 unsigned int i;
2116 int ret;
2117
2118 /* A few basic sanity checks first. */
2119 if (pad >= sd->entity.num_pads)
2120 return -EINVAL;
2121
2122 if (!streams_mask)
2123 return 0;
2124
2125 /* Fallback on .s_stream() if .enable_streams() isn't available. */
2126 if (!sd->ops->pad || !sd->ops->pad->enable_streams)
2127 return v4l2_subdev_enable_streams_fallback(sd, pad,
2128 streams_mask);
2129
2130 state = v4l2_subdev_lock_and_get_active_state(sd);
2131
2132 /*
2133 * Verify that the requested streams exist and that they are not
2134 * already enabled.
2135 */
2136 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2137 struct v4l2_subdev_stream_config *cfg =
2138 &state->stream_configs.configs[i];
2139
2140 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2141 continue;
2142
2143 found_streams |= BIT_ULL(cfg->stream);
2144
2145 if (cfg->enabled) {
2146 dev_dbg(dev, "stream %u already enabled on %s:%u\n",
2147 cfg->stream, sd->entity.name, pad);
2148 ret = -EALREADY;
2149 goto done;
2150 }
2151 }
2152
2153 if (found_streams != streams_mask) {
2154 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2155 streams_mask & ~found_streams, sd->entity.name, pad);
2156 ret = -EINVAL;
2157 goto done;
2158 }
2159
2160 dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
2161
2162 /* Call the .enable_streams() operation. */
2163 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
2164 streams_mask);
2165 if (ret) {
2166 dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
2167 streams_mask, ret);
2168 goto done;
2169 }
2170
2171 /* Mark the streams as enabled. */
2172 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2173 struct v4l2_subdev_stream_config *cfg =
2174 &state->stream_configs.configs[i];
2175
2176 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2177 cfg->enabled = true;
2178 }
2179
2180done:
2181 v4l2_subdev_unlock_state(state);
2182
2183 return ret;
2184}
2185EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2186
2187static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2188 u64 streams_mask)
2189{
2190 struct device *dev = sd->entity.graph_obj.mdev->dev;
2191 unsigned int i;
2192 int ret;
2193
2194 /*
2195 * If the subdev doesn't implement pad-based stream enable, fall back
2196 * on the .s_stream() operation. This can only be done for subdevs that
2197 * have a single source pad, as sd->enabled_streams is global to the
2198 * subdev.
2199 */
2200 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2201 return -EOPNOTSUPP;
2202
2203 for (i = 0; i < sd->entity.num_pads; ++i) {
2204 if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
2205 return -EOPNOTSUPP;
2206 }
2207
2208 if ((sd->enabled_streams & streams_mask) != streams_mask) {
2209 dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
2210 streams_mask, sd->entity.name, pad);
2211 return -EALREADY;
2212 }
2213
2214 /* Stop streaming when the last streams are disabled. */
2215 if (!(sd->enabled_streams & ~streams_mask)) {
2216 ret = v4l2_subdev_call(sd, video, s_stream, 0);
2217 if (ret)
2218 return ret;
2219 }
2220
2221 sd->enabled_streams &= ~streams_mask;
2222
2223 return 0;
2224}
2225
2226int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2227 u64 streams_mask)
2228{
2229 struct device *dev = sd->entity.graph_obj.mdev->dev;
2230 struct v4l2_subdev_state *state;
2231 u64 found_streams = 0;
2232 unsigned int i;
2233 int ret;
2234
2235 /* A few basic sanity checks first. */
2236 if (pad >= sd->entity.num_pads)
2237 return -EINVAL;
2238
2239 if (!streams_mask)
2240 return 0;
2241
2242 /* Fallback on .s_stream() if .disable_streams() isn't available. */
2243 if (!sd->ops->pad || !sd->ops->pad->disable_streams)
2244 return v4l2_subdev_disable_streams_fallback(sd, pad,
2245 streams_mask);
2246
2247 state = v4l2_subdev_lock_and_get_active_state(sd);
2248
2249 /*
2250 * Verify that the requested streams exist and that they are not
2251 * already disabled.
2252 */
2253 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2254 struct v4l2_subdev_stream_config *cfg =
2255 &state->stream_configs.configs[i];
2256
2257 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2258 continue;
2259
2260 found_streams |= BIT_ULL(cfg->stream);
2261
2262 if (!cfg->enabled) {
2263 dev_dbg(dev, "stream %u already disabled on %s:%u\n",
2264 cfg->stream, sd->entity.name, pad);
2265 ret = -EALREADY;
2266 goto done;
2267 }
2268 }
2269
2270 if (found_streams != streams_mask) {
2271 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2272 streams_mask & ~found_streams, sd->entity.name, pad);
2273 ret = -EINVAL;
2274 goto done;
2275 }
2276
2277 dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
2278
2279 /* Call the .disable_streams() operation. */
2280 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2281 streams_mask);
2282 if (ret) {
2283 dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2284 streams_mask, ret);
2285 goto done;
2286 }
2287
2288 /* Mark the streams as disabled. */
2289 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2290 struct v4l2_subdev_stream_config *cfg =
2291 &state->stream_configs.configs[i];
2292
2293 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2294 cfg->enabled = false;
2295 }
2296
2297done:
2298 v4l2_subdev_unlock_state(state);
2299
2300 return ret;
2301}
2302EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2303
2304int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2305{
2306 struct v4l2_subdev_state *state;
2307 struct v4l2_subdev_route *route;
2308 struct media_pad *pad;
2309 u64 source_mask = 0;
2310 int pad_index = -1;
2311
2312 /*
2313 * Find the source pad. This helper is meant for subdevs that have a
2314 * single source pad, so failures shouldn't happen, but catch them
2315 * loudly nonetheless as they indicate a driver bug.
2316 */
2317 media_entity_for_each_pad(&sd->entity, pad) {
2318 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2319 pad_index = pad->index;
2320 break;
2321 }
2322 }
2323
2324 if (WARN_ON(pad_index == -1))
2325 return -EINVAL;
2326
2327 /*
2328 * As there's a single source pad, just collect all the source streams.
2329 */
2330 state = v4l2_subdev_lock_and_get_active_state(sd);
2331
2332 for_each_active_route(&state->routing, route)
2333 source_mask |= BIT_ULL(route->source_stream);
2334
2335 v4l2_subdev_unlock_state(state);
2336
2337 if (enable)
2338 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2339 else
2340 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2341}
2342EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2343
2344#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2345
2346#endif /* CONFIG_MEDIA_CONTROLLER */
2347
2348void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2349{
2350 INIT_LIST_HEAD(&sd->list);
2351 BUG_ON(!ops);
2352 sd->ops = ops;
2353 sd->v4l2_dev = NULL;
2354 sd->flags = 0;
2355 sd->name[0] = '\0';
2356 sd->grp_id = 0;
2357 sd->dev_priv = NULL;
2358 sd->host_priv = NULL;
2359 sd->privacy_led = NULL;
2360 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
2361#if defined(CONFIG_MEDIA_CONTROLLER)
2362 sd->entity.name = sd->name;
2363 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2364 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2365#endif
2366}
2367EXPORT_SYMBOL(v4l2_subdev_init);
2368
2369void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2370 const struct v4l2_event *ev)
2371{
2372 v4l2_event_queue(sd->devnode, ev);
2373 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2374}
2375EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2376
2377int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2378{
2379#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2380 sd->privacy_led = led_get(sd->dev, "privacy-led");
2381 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2382 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2383 "getting privacy LED\n");
2384
2385 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2386 mutex_lock(&sd->privacy_led->led_access);
2387 led_sysfs_disable(sd->privacy_led);
2388 led_trigger_remove(sd->privacy_led);
2389 led_set_brightness(sd->privacy_led, 0);
2390 mutex_unlock(&sd->privacy_led->led_access);
2391 }
2392#endif
2393 return 0;
2394}
2395EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2396
2397void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2398{
2399#if IS_REACHABLE(CONFIG_LEDS_CLASS)
2400 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2401 mutex_lock(&sd->privacy_led->led_access);
2402 led_sysfs_enable(sd->privacy_led);
2403 mutex_unlock(&sd->privacy_led->led_access);
2404 led_put(sd->privacy_led);
2405 }
2406#endif
2407}
2408EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);