1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * V4L2 sub-device
4 *
5 * Copyright (C) 2010 Nokia Corporation
6 *
7 * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * Sakari Ailus <sakari.ailus@iki.fi>
9 */
10
11 #include <linux/export.h>
12 #include <linux/ioctl.h>
13 #include <linux/leds.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/version.h>
20 #include <linux/videodev2.h>
21
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-ioctl.h>
27
28 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
29 /*
30 * The Streams API is an experimental feature. To use the Streams API, set
31 * 'v4l2_subdev_enable_streams_api' to 1 below.
32 */
33
34 static bool v4l2_subdev_enable_streams_api;
35 #endif
36
37 /*
38 * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
39 * of streams.
40 *
41 * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
42 * restricts the total number of streams in a pad, although the stream ID is
43 * not restricted.
44 */
45 #define V4L2_SUBDEV_MAX_STREAM_ID 63
46
47 #include "v4l2-subdev-priv.h"
48
49 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
subdev_fh_init(struct v4l2_subdev_fh * fh,struct v4l2_subdev * sd)50 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
51 {
52 struct v4l2_subdev_state *state;
53 static struct lock_class_key key;
54
55 state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
56 if (IS_ERR(state))
57 return PTR_ERR(state);
58
59 fh->state = state;
60
61 return 0;
62 }
63
subdev_fh_free(struct v4l2_subdev_fh * fh)64 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
65 {
66 __v4l2_subdev_state_free(fh->state);
67 fh->state = NULL;
68 }
69
subdev_open(struct file * file)70 static int subdev_open(struct file *file)
71 {
72 struct video_device *vdev = video_devdata(file);
73 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
74 struct v4l2_subdev_fh *subdev_fh;
75 int ret;
76
77 subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
78 if (subdev_fh == NULL)
79 return -ENOMEM;
80
81 ret = subdev_fh_init(subdev_fh, sd);
82 if (ret) {
83 kfree(subdev_fh);
84 return ret;
85 }
86
87 v4l2_fh_init(&subdev_fh->vfh, vdev);
88 v4l2_fh_add(&subdev_fh->vfh);
89 file->private_data = &subdev_fh->vfh;
90
91 if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
92 struct module *owner;
93
94 owner = sd->entity.graph_obj.mdev->dev->driver->owner;
95 if (!try_module_get(owner)) {
96 ret = -EBUSY;
97 goto err;
98 }
99 subdev_fh->owner = owner;
100 }
101
102 if (sd->internal_ops && sd->internal_ops->open) {
103 ret = sd->internal_ops->open(sd, subdev_fh);
104 if (ret < 0)
105 goto err;
106 }
107
108 return 0;
109
110 err:
111 module_put(subdev_fh->owner);
112 v4l2_fh_del(&subdev_fh->vfh);
113 v4l2_fh_exit(&subdev_fh->vfh);
114 subdev_fh_free(subdev_fh);
115 kfree(subdev_fh);
116
117 return ret;
118 }
119
subdev_close(struct file * file)120 static int subdev_close(struct file *file)
121 {
122 struct video_device *vdev = video_devdata(file);
123 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
124 struct v4l2_fh *vfh = file->private_data;
125 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
126
127 if (sd->internal_ops && sd->internal_ops->close)
128 sd->internal_ops->close(sd, subdev_fh);
129 module_put(subdev_fh->owner);
130 v4l2_fh_del(vfh);
131 v4l2_fh_exit(vfh);
132 subdev_fh_free(subdev_fh);
133 kfree(subdev_fh);
134 file->private_data = NULL;
135
136 return 0;
137 }
138 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_open(struct file * file)139 static int subdev_open(struct file *file)
140 {
141 return -ENODEV;
142 }
143
subdev_close(struct file * file)144 static int subdev_close(struct file *file)
145 {
146 return -ENODEV;
147 }
148 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
149
check_which(u32 which)150 static inline int check_which(u32 which)
151 {
152 if (which != V4L2_SUBDEV_FORMAT_TRY &&
153 which != V4L2_SUBDEV_FORMAT_ACTIVE)
154 return -EINVAL;
155
156 return 0;
157 }
158
check_pad(struct v4l2_subdev * sd,u32 pad)159 static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
160 {
161 #if defined(CONFIG_MEDIA_CONTROLLER)
162 if (sd->entity.num_pads) {
163 if (pad >= sd->entity.num_pads)
164 return -EINVAL;
165 return 0;
166 }
167 #endif
168 /* allow pad 0 on subdevices not registered as media entities */
169 if (pad > 0)
170 return -EINVAL;
171 return 0;
172 }
173
check_state(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,u32 which,u32 pad,u32 stream)174 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
175 u32 which, u32 pad, u32 stream)
176 {
177 if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
178 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
179 if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
180 return -EINVAL;
181 return 0;
182 #else
183 return -EINVAL;
184 #endif
185 }
186
187 if (stream != 0)
188 return -EINVAL;
189
190 if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
191 return -EINVAL;
192
193 return 0;
194 }
195
check_format(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)196 static inline int check_format(struct v4l2_subdev *sd,
197 struct v4l2_subdev_state *state,
198 struct v4l2_subdev_format *format)
199 {
200 if (!format)
201 return -EINVAL;
202
203 return check_which(format->which) ? : check_pad(sd, format->pad) ? :
204 check_state(sd, state, format->which, format->pad, format->stream);
205 }
206
call_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)207 static int call_get_fmt(struct v4l2_subdev *sd,
208 struct v4l2_subdev_state *state,
209 struct v4l2_subdev_format *format)
210 {
211 return check_format(sd, state, format) ? :
212 sd->ops->pad->get_fmt(sd, state, format);
213 }
214
call_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)215 static int call_set_fmt(struct v4l2_subdev *sd,
216 struct v4l2_subdev_state *state,
217 struct v4l2_subdev_format *format)
218 {
219 return check_format(sd, state, format) ? :
220 sd->ops->pad->set_fmt(sd, state, format);
221 }
222
call_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_mbus_code_enum * code)223 static int call_enum_mbus_code(struct v4l2_subdev *sd,
224 struct v4l2_subdev_state *state,
225 struct v4l2_subdev_mbus_code_enum *code)
226 {
227 if (!code)
228 return -EINVAL;
229
230 return check_which(code->which) ? : check_pad(sd, code->pad) ? :
231 check_state(sd, state, code->which, code->pad, code->stream) ? :
232 sd->ops->pad->enum_mbus_code(sd, state, code);
233 }
234
call_enum_frame_size(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_size_enum * fse)235 static int call_enum_frame_size(struct v4l2_subdev *sd,
236 struct v4l2_subdev_state *state,
237 struct v4l2_subdev_frame_size_enum *fse)
238 {
239 if (!fse)
240 return -EINVAL;
241
242 return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
243 check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
244 sd->ops->pad->enum_frame_size(sd, state, fse);
245 }
246
check_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)247 static inline int check_frame_interval(struct v4l2_subdev *sd,
248 struct v4l2_subdev_frame_interval *fi)
249 {
250 if (!fi)
251 return -EINVAL;
252
253 return check_pad(sd, fi->pad);
254 }
255
call_g_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)256 static int call_g_frame_interval(struct v4l2_subdev *sd,
257 struct v4l2_subdev_frame_interval *fi)
258 {
259 return check_frame_interval(sd, fi) ? :
260 sd->ops->video->g_frame_interval(sd, fi);
261 }
262
call_s_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_frame_interval * fi)263 static int call_s_frame_interval(struct v4l2_subdev *sd,
264 struct v4l2_subdev_frame_interval *fi)
265 {
266 return check_frame_interval(sd, fi) ? :
267 sd->ops->video->s_frame_interval(sd, fi);
268 }
269
call_enum_frame_interval(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_frame_interval_enum * fie)270 static int call_enum_frame_interval(struct v4l2_subdev *sd,
271 struct v4l2_subdev_state *state,
272 struct v4l2_subdev_frame_interval_enum *fie)
273 {
274 if (!fie)
275 return -EINVAL;
276
277 return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
278 check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
279 sd->ops->pad->enum_frame_interval(sd, state, fie);
280 }
281
check_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)282 static inline int check_selection(struct v4l2_subdev *sd,
283 struct v4l2_subdev_state *state,
284 struct v4l2_subdev_selection *sel)
285 {
286 if (!sel)
287 return -EINVAL;
288
289 return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
290 check_state(sd, state, sel->which, sel->pad, sel->stream);
291 }
292
call_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)293 static int call_get_selection(struct v4l2_subdev *sd,
294 struct v4l2_subdev_state *state,
295 struct v4l2_subdev_selection *sel)
296 {
297 return check_selection(sd, state, sel) ? :
298 sd->ops->pad->get_selection(sd, state, sel);
299 }
300
call_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_selection * sel)301 static int call_set_selection(struct v4l2_subdev *sd,
302 struct v4l2_subdev_state *state,
303 struct v4l2_subdev_selection *sel)
304 {
305 return check_selection(sd, state, sel) ? :
306 sd->ops->pad->set_selection(sd, state, sel);
307 }
308
check_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)309 static inline int check_edid(struct v4l2_subdev *sd,
310 struct v4l2_subdev_edid *edid)
311 {
312 if (!edid)
313 return -EINVAL;
314
315 if (edid->blocks && edid->edid == NULL)
316 return -EINVAL;
317
318 return check_pad(sd, edid->pad);
319 }
320
call_get_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)321 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
322 {
323 return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
324 }
325
call_set_edid(struct v4l2_subdev * sd,struct v4l2_subdev_edid * edid)326 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
327 {
328 return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
329 }
330
call_dv_timings_cap(struct v4l2_subdev * sd,struct v4l2_dv_timings_cap * cap)331 static int call_dv_timings_cap(struct v4l2_subdev *sd,
332 struct v4l2_dv_timings_cap *cap)
333 {
334 if (!cap)
335 return -EINVAL;
336
337 return check_pad(sd, cap->pad) ? :
338 sd->ops->pad->dv_timings_cap(sd, cap);
339 }
340
call_enum_dv_timings(struct v4l2_subdev * sd,struct v4l2_enum_dv_timings * dvt)341 static int call_enum_dv_timings(struct v4l2_subdev *sd,
342 struct v4l2_enum_dv_timings *dvt)
343 {
344 if (!dvt)
345 return -EINVAL;
346
347 return check_pad(sd, dvt->pad) ? :
348 sd->ops->pad->enum_dv_timings(sd, dvt);
349 }
350
call_get_mbus_config(struct v4l2_subdev * sd,unsigned int pad,struct v4l2_mbus_config * config)351 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
352 struct v4l2_mbus_config *config)
353 {
354 memset(config, 0, sizeof(*config));
355
356 return check_pad(sd, pad) ? :
357 sd->ops->pad->get_mbus_config(sd, pad, config);
358 }
359
call_s_stream(struct v4l2_subdev * sd,int enable)360 static int call_s_stream(struct v4l2_subdev *sd, int enable)
361 {
362 int ret;
363
364 /*
365 * The .s_stream() operation must never be called to start or stop an
366 * already started or stopped subdev. Catch offenders but don't return
367 * an error yet to avoid regressions.
368 */
369 if (WARN_ON(sd->s_stream_enabled == !!enable))
370 return 0;
371
372 ret = sd->ops->video->s_stream(sd, enable);
373
374 if (!enable && ret < 0) {
375 dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
376 ret = 0;
377 }
378
379 if (!ret) {
380 sd->s_stream_enabled = enable;
381
382 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
383 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
384 if (enable)
385 led_set_brightness(sd->privacy_led,
386 sd->privacy_led->max_brightness);
387 else
388 led_set_brightness(sd->privacy_led, 0);
389 }
390 #endif
391 }
392
393 return ret;
394 }
395
396 #ifdef CONFIG_MEDIA_CONTROLLER
397 /*
398 * Create state-management wrapper for pad ops dealing with subdev state. The
399 * wrapper handles the case where the caller does not provide the called
400 * subdev's state. This should be removed when all the callers are fixed.
401 */
402 #define DEFINE_STATE_WRAPPER(f, arg_type) \
403 static int call_##f##_state(struct v4l2_subdev *sd, \
404 struct v4l2_subdev_state *_state, \
405 arg_type *arg) \
406 { \
407 struct v4l2_subdev_state *state = _state; \
408 int ret; \
409 if (!_state) \
410 state = v4l2_subdev_lock_and_get_active_state(sd); \
411 ret = call_##f(sd, state, arg); \
412 if (!_state && state) \
413 v4l2_subdev_unlock_state(state); \
414 return ret; \
415 }
416
417 #else /* CONFIG_MEDIA_CONTROLLER */
418
419 #define DEFINE_STATE_WRAPPER(f, arg_type) \
420 static int call_##f##_state(struct v4l2_subdev *sd, \
421 struct v4l2_subdev_state *state, \
422 arg_type *arg) \
423 { \
424 return call_##f(sd, state, arg); \
425 }
426
427 #endif /* CONFIG_MEDIA_CONTROLLER */
428
429 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
430 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
431 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
432 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
433 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
434 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
435 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
436
437 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
438 .get_fmt = call_get_fmt_state,
439 .set_fmt = call_set_fmt_state,
440 .enum_mbus_code = call_enum_mbus_code_state,
441 .enum_frame_size = call_enum_frame_size_state,
442 .enum_frame_interval = call_enum_frame_interval_state,
443 .get_selection = call_get_selection_state,
444 .set_selection = call_set_selection_state,
445 .get_edid = call_get_edid,
446 .set_edid = call_set_edid,
447 .dv_timings_cap = call_dv_timings_cap,
448 .enum_dv_timings = call_enum_dv_timings,
449 .get_mbus_config = call_get_mbus_config,
450 };
451
452 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
453 .g_frame_interval = call_g_frame_interval,
454 .s_frame_interval = call_s_frame_interval,
455 .s_stream = call_s_stream,
456 };
457
458 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
459 .pad = &v4l2_subdev_call_pad_wrappers,
460 .video = &v4l2_subdev_call_video_wrappers,
461 };
462 EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
463
464 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
465
466 static struct v4l2_subdev_state *
subdev_ioctl_get_state(struct v4l2_subdev * sd,struct v4l2_subdev_fh * subdev_fh,unsigned int cmd,void * arg)467 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
468 unsigned int cmd, void *arg)
469 {
470 u32 which;
471
472 switch (cmd) {
473 default:
474 return NULL;
475 case VIDIOC_SUBDEV_G_FMT:
476 case VIDIOC_SUBDEV_S_FMT:
477 which = ((struct v4l2_subdev_format *)arg)->which;
478 break;
479 case VIDIOC_SUBDEV_G_CROP:
480 case VIDIOC_SUBDEV_S_CROP:
481 which = ((struct v4l2_subdev_crop *)arg)->which;
482 break;
483 case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
484 which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
485 break;
486 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
487 which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
488 break;
489 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
490 which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
491 break;
492 case VIDIOC_SUBDEV_G_SELECTION:
493 case VIDIOC_SUBDEV_S_SELECTION:
494 which = ((struct v4l2_subdev_selection *)arg)->which;
495 break;
496 case VIDIOC_SUBDEV_G_ROUTING:
497 case VIDIOC_SUBDEV_S_ROUTING:
498 which = ((struct v4l2_subdev_routing *)arg)->which;
499 break;
500 }
501
502 return which == V4L2_SUBDEV_FORMAT_TRY ?
503 subdev_fh->state :
504 v4l2_subdev_get_unlocked_active_state(sd);
505 }
506
subdev_do_ioctl(struct file * file,unsigned int cmd,void * arg,struct v4l2_subdev_state * state)507 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
508 struct v4l2_subdev_state *state)
509 {
510 struct video_device *vdev = video_devdata(file);
511 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
512 struct v4l2_fh *vfh = file->private_data;
513 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
514 bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
515 bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
516 bool client_supports_streams = subdev_fh->client_caps &
517 V4L2_SUBDEV_CLIENT_CAP_STREAMS;
518 int rval;
519
520 /*
521 * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
522 * Remove this when the API is no longer experimental.
523 */
524 if (!v4l2_subdev_enable_streams_api)
525 streams_subdev = false;
526
527 switch (cmd) {
528 case VIDIOC_SUBDEV_QUERYCAP: {
529 struct v4l2_subdev_capability *cap = arg;
530
531 memset(cap->reserved, 0, sizeof(cap->reserved));
532 cap->version = LINUX_VERSION_CODE;
533 cap->capabilities =
534 (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
535 (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
536
537 return 0;
538 }
539
540 case VIDIOC_QUERYCTRL:
541 /*
542 * TODO: this really should be folded into v4l2_queryctrl (this
543 * currently returns -EINVAL for NULL control handlers).
544 * However, v4l2_queryctrl() is still called directly by
545 * drivers as well and until that has been addressed I believe
546 * it is safer to do the check here. The same is true for the
547 * other control ioctls below.
548 */
549 if (!vfh->ctrl_handler)
550 return -ENOTTY;
551 return v4l2_queryctrl(vfh->ctrl_handler, arg);
552
553 case VIDIOC_QUERY_EXT_CTRL:
554 if (!vfh->ctrl_handler)
555 return -ENOTTY;
556 return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
557
558 case VIDIOC_QUERYMENU:
559 if (!vfh->ctrl_handler)
560 return -ENOTTY;
561 return v4l2_querymenu(vfh->ctrl_handler, arg);
562
563 case VIDIOC_G_CTRL:
564 if (!vfh->ctrl_handler)
565 return -ENOTTY;
566 return v4l2_g_ctrl(vfh->ctrl_handler, arg);
567
568 case VIDIOC_S_CTRL:
569 if (!vfh->ctrl_handler)
570 return -ENOTTY;
571 return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
572
573 case VIDIOC_G_EXT_CTRLS:
574 if (!vfh->ctrl_handler)
575 return -ENOTTY;
576 return v4l2_g_ext_ctrls(vfh->ctrl_handler,
577 vdev, sd->v4l2_dev->mdev, arg);
578
579 case VIDIOC_S_EXT_CTRLS:
580 if (!vfh->ctrl_handler)
581 return -ENOTTY;
582 return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
583 vdev, sd->v4l2_dev->mdev, arg);
584
585 case VIDIOC_TRY_EXT_CTRLS:
586 if (!vfh->ctrl_handler)
587 return -ENOTTY;
588 return v4l2_try_ext_ctrls(vfh->ctrl_handler,
589 vdev, sd->v4l2_dev->mdev, arg);
590
591 case VIDIOC_DQEVENT:
592 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
593 return -ENOIOCTLCMD;
594
595 return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
596
597 case VIDIOC_SUBSCRIBE_EVENT:
598 return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
599
600 case VIDIOC_UNSUBSCRIBE_EVENT:
601 return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
602
603 #ifdef CONFIG_VIDEO_ADV_DEBUG
604 case VIDIOC_DBG_G_REGISTER:
605 {
606 struct v4l2_dbg_register *p = arg;
607
608 if (!capable(CAP_SYS_ADMIN))
609 return -EPERM;
610 return v4l2_subdev_call(sd, core, g_register, p);
611 }
612 case VIDIOC_DBG_S_REGISTER:
613 {
614 struct v4l2_dbg_register *p = arg;
615
616 if (!capable(CAP_SYS_ADMIN))
617 return -EPERM;
618 return v4l2_subdev_call(sd, core, s_register, p);
619 }
620 case VIDIOC_DBG_G_CHIP_INFO:
621 {
622 struct v4l2_dbg_chip_info *p = arg;
623
624 if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
625 return -EINVAL;
626 if (sd->ops->core && sd->ops->core->s_register)
627 p->flags |= V4L2_CHIP_FL_WRITABLE;
628 if (sd->ops->core && sd->ops->core->g_register)
629 p->flags |= V4L2_CHIP_FL_READABLE;
630 strscpy(p->name, sd->name, sizeof(p->name));
631 return 0;
632 }
633 #endif
634
635 case VIDIOC_LOG_STATUS: {
636 int ret;
637
638 pr_info("%s: ================= START STATUS =================\n",
639 sd->name);
640 ret = v4l2_subdev_call(sd, core, log_status);
641 pr_info("%s: ================== END STATUS ==================\n",
642 sd->name);
643 return ret;
644 }
645
646 case VIDIOC_SUBDEV_G_FMT: {
647 struct v4l2_subdev_format *format = arg;
648
649 if (!client_supports_streams)
650 format->stream = 0;
651
652 memset(format->reserved, 0, sizeof(format->reserved));
653 memset(format->format.reserved, 0, sizeof(format->format.reserved));
654 return v4l2_subdev_call(sd, pad, get_fmt, state, format);
655 }
656
657 case VIDIOC_SUBDEV_S_FMT: {
658 struct v4l2_subdev_format *format = arg;
659
660 if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
661 return -EPERM;
662
663 if (!client_supports_streams)
664 format->stream = 0;
665
666 memset(format->reserved, 0, sizeof(format->reserved));
667 memset(format->format.reserved, 0, sizeof(format->format.reserved));
668 return v4l2_subdev_call(sd, pad, set_fmt, state, format);
669 }
670
671 case VIDIOC_SUBDEV_G_CROP: {
672 struct v4l2_subdev_crop *crop = arg;
673 struct v4l2_subdev_selection sel;
674
675 if (!client_supports_streams)
676 crop->stream = 0;
677
678 memset(crop->reserved, 0, sizeof(crop->reserved));
679 memset(&sel, 0, sizeof(sel));
680 sel.which = crop->which;
681 sel.pad = crop->pad;
682 sel.stream = crop->stream;
683 sel.target = V4L2_SEL_TGT_CROP;
684
685 rval = v4l2_subdev_call(
686 sd, pad, get_selection, state, &sel);
687
688 crop->rect = sel.r;
689
690 return rval;
691 }
692
693 case VIDIOC_SUBDEV_S_CROP: {
694 struct v4l2_subdev_crop *crop = arg;
695 struct v4l2_subdev_selection sel;
696
697 if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
698 return -EPERM;
699
700 if (!client_supports_streams)
701 crop->stream = 0;
702
703 memset(crop->reserved, 0, sizeof(crop->reserved));
704 memset(&sel, 0, sizeof(sel));
705 sel.which = crop->which;
706 sel.pad = crop->pad;
707 sel.stream = crop->stream;
708 sel.target = V4L2_SEL_TGT_CROP;
709 sel.r = crop->rect;
710
711 rval = v4l2_subdev_call(
712 sd, pad, set_selection, state, &sel);
713
714 crop->rect = sel.r;
715
716 return rval;
717 }
718
719 case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
720 struct v4l2_subdev_mbus_code_enum *code = arg;
721
722 if (!client_supports_streams)
723 code->stream = 0;
724
725 memset(code->reserved, 0, sizeof(code->reserved));
726 return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
727 code);
728 }
729
730 case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
731 struct v4l2_subdev_frame_size_enum *fse = arg;
732
733 if (!client_supports_streams)
734 fse->stream = 0;
735
736 memset(fse->reserved, 0, sizeof(fse->reserved));
737 return v4l2_subdev_call(sd, pad, enum_frame_size, state,
738 fse);
739 }
740
741 case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
742 struct v4l2_subdev_frame_interval *fi = arg;
743
744 if (!client_supports_streams)
745 fi->stream = 0;
746
747 memset(fi->reserved, 0, sizeof(fi->reserved));
748 return v4l2_subdev_call(sd, video, g_frame_interval, arg);
749 }
750
751 case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
752 struct v4l2_subdev_frame_interval *fi = arg;
753
754 if (ro_subdev)
755 return -EPERM;
756
757 if (!client_supports_streams)
758 fi->stream = 0;
759
760 memset(fi->reserved, 0, sizeof(fi->reserved));
761 return v4l2_subdev_call(sd, video, s_frame_interval, arg);
762 }
763
764 case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
765 struct v4l2_subdev_frame_interval_enum *fie = arg;
766
767 if (!client_supports_streams)
768 fie->stream = 0;
769
770 memset(fie->reserved, 0, sizeof(fie->reserved));
771 return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
772 fie);
773 }
774
775 case VIDIOC_SUBDEV_G_SELECTION: {
776 struct v4l2_subdev_selection *sel = arg;
777
778 if (!client_supports_streams)
779 sel->stream = 0;
780
781 memset(sel->reserved, 0, sizeof(sel->reserved));
782 return v4l2_subdev_call(
783 sd, pad, get_selection, state, sel);
784 }
785
786 case VIDIOC_SUBDEV_S_SELECTION: {
787 struct v4l2_subdev_selection *sel = arg;
788
789 if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
790 return -EPERM;
791
792 if (!client_supports_streams)
793 sel->stream = 0;
794
795 memset(sel->reserved, 0, sizeof(sel->reserved));
796 return v4l2_subdev_call(
797 sd, pad, set_selection, state, sel);
798 }
799
800 case VIDIOC_G_EDID: {
801 struct v4l2_subdev_edid *edid = arg;
802
803 return v4l2_subdev_call(sd, pad, get_edid, edid);
804 }
805
806 case VIDIOC_S_EDID: {
807 struct v4l2_subdev_edid *edid = arg;
808
809 return v4l2_subdev_call(sd, pad, set_edid, edid);
810 }
811
812 case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
813 struct v4l2_dv_timings_cap *cap = arg;
814
815 return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
816 }
817
818 case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
819 struct v4l2_enum_dv_timings *dvt = arg;
820
821 return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
822 }
823
824 case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
825 return v4l2_subdev_call(sd, video, query_dv_timings, arg);
826
827 case VIDIOC_SUBDEV_G_DV_TIMINGS:
828 return v4l2_subdev_call(sd, video, g_dv_timings, arg);
829
830 case VIDIOC_SUBDEV_S_DV_TIMINGS:
831 if (ro_subdev)
832 return -EPERM;
833
834 return v4l2_subdev_call(sd, video, s_dv_timings, arg);
835
836 case VIDIOC_SUBDEV_G_STD:
837 return v4l2_subdev_call(sd, video, g_std, arg);
838
839 case VIDIOC_SUBDEV_S_STD: {
840 v4l2_std_id *std = arg;
841
842 if (ro_subdev)
843 return -EPERM;
844
845 return v4l2_subdev_call(sd, video, s_std, *std);
846 }
847
848 case VIDIOC_SUBDEV_ENUMSTD: {
849 struct v4l2_standard *p = arg;
850 v4l2_std_id id;
851
852 if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
853 return -EINVAL;
854
855 return v4l_video_std_enumstd(p, id);
856 }
857
858 case VIDIOC_SUBDEV_QUERYSTD:
859 return v4l2_subdev_call(sd, video, querystd, arg);
860
861 case VIDIOC_SUBDEV_G_ROUTING: {
862 struct v4l2_subdev_routing *routing = arg;
863 struct v4l2_subdev_krouting *krouting;
864
865 if (!v4l2_subdev_enable_streams_api)
866 return -ENOIOCTLCMD;
867
868 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
869 return -ENOIOCTLCMD;
870
871 memset(routing->reserved, 0, sizeof(routing->reserved));
872
873 krouting = &state->routing;
874
875 if (routing->num_routes < krouting->num_routes) {
876 routing->num_routes = krouting->num_routes;
877 return -ENOSPC;
878 }
879
880 memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
881 krouting->routes,
882 krouting->num_routes * sizeof(*krouting->routes));
883 routing->num_routes = krouting->num_routes;
884
885 return 0;
886 }
887
888 case VIDIOC_SUBDEV_S_ROUTING: {
889 struct v4l2_subdev_routing *routing = arg;
890 struct v4l2_subdev_route *routes =
891 (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
892 struct v4l2_subdev_krouting krouting = {};
893 unsigned int i;
894
895 if (!v4l2_subdev_enable_streams_api)
896 return -ENOIOCTLCMD;
897
898 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
899 return -ENOIOCTLCMD;
900
901 if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
902 return -EPERM;
903
904 memset(routing->reserved, 0, sizeof(routing->reserved));
905
906 for (i = 0; i < routing->num_routes; ++i) {
907 const struct v4l2_subdev_route *route = &routes[i];
908 const struct media_pad *pads = sd->entity.pads;
909
910 if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
911 route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
912 return -EINVAL;
913
914 if (route->sink_pad >= sd->entity.num_pads)
915 return -EINVAL;
916
917 if (!(pads[route->sink_pad].flags &
918 MEDIA_PAD_FL_SINK))
919 return -EINVAL;
920
921 if (route->source_pad >= sd->entity.num_pads)
922 return -EINVAL;
923
924 if (!(pads[route->source_pad].flags &
925 MEDIA_PAD_FL_SOURCE))
926 return -EINVAL;
927 }
928
929 krouting.num_routes = routing->num_routes;
930 krouting.routes = routes;
931
932 return v4l2_subdev_call(sd, pad, set_routing, state,
933 routing->which, &krouting);
934 }
935
936 case VIDIOC_SUBDEV_G_CLIENT_CAP: {
937 struct v4l2_subdev_client_capability *client_cap = arg;
938
939 client_cap->capabilities = subdev_fh->client_caps;
940
941 return 0;
942 }
943
944 case VIDIOC_SUBDEV_S_CLIENT_CAP: {
945 struct v4l2_subdev_client_capability *client_cap = arg;
946
947 /*
948 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
949 * enabled. Remove this when streams API is no longer
950 * experimental.
951 */
952 if (!v4l2_subdev_enable_streams_api)
953 client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
954
955 /* Filter out unsupported capabilities */
956 client_cap->capabilities &= V4L2_SUBDEV_CLIENT_CAP_STREAMS;
957
958 subdev_fh->client_caps = client_cap->capabilities;
959
960 return 0;
961 }
962
963 default:
964 return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
965 }
966
967 return 0;
968 }
969
subdev_do_ioctl_lock(struct file * file,unsigned int cmd,void * arg)970 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
971 {
972 struct video_device *vdev = video_devdata(file);
973 struct mutex *lock = vdev->lock;
974 long ret = -ENODEV;
975
976 if (lock && mutex_lock_interruptible(lock))
977 return -ERESTARTSYS;
978
979 if (video_is_registered(vdev)) {
980 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
981 struct v4l2_fh *vfh = file->private_data;
982 struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
983 struct v4l2_subdev_state *state;
984
985 state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
986
987 if (state)
988 v4l2_subdev_lock_state(state);
989
990 ret = subdev_do_ioctl(file, cmd, arg, state);
991
992 if (state)
993 v4l2_subdev_unlock_state(state);
994 }
995
996 if (lock)
997 mutex_unlock(lock);
998 return ret;
999 }
1000
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1001 static long subdev_ioctl(struct file *file, unsigned int cmd,
1002 unsigned long arg)
1003 {
1004 return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
1005 }
1006
1007 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)1008 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1009 unsigned long arg)
1010 {
1011 struct video_device *vdev = video_devdata(file);
1012 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1013
1014 return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
1015 }
1016 #endif
1017
1018 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
subdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1019 static long subdev_ioctl(struct file *file, unsigned int cmd,
1020 unsigned long arg)
1021 {
1022 return -ENODEV;
1023 }
1024
1025 #ifdef CONFIG_COMPAT
subdev_compat_ioctl32(struct file * file,unsigned int cmd,unsigned long arg)1026 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1027 unsigned long arg)
1028 {
1029 return -ENODEV;
1030 }
1031 #endif
1032 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1033
subdev_poll(struct file * file,poll_table * wait)1034 static __poll_t subdev_poll(struct file *file, poll_table *wait)
1035 {
1036 struct video_device *vdev = video_devdata(file);
1037 struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1038 struct v4l2_fh *fh = file->private_data;
1039
1040 if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
1041 return EPOLLERR;
1042
1043 poll_wait(file, &fh->wait, wait);
1044
1045 if (v4l2_event_pending(fh))
1046 return EPOLLPRI;
1047
1048 return 0;
1049 }
1050
1051 const struct v4l2_file_operations v4l2_subdev_fops = {
1052 .owner = THIS_MODULE,
1053 .open = subdev_open,
1054 .unlocked_ioctl = subdev_ioctl,
1055 #ifdef CONFIG_COMPAT
1056 .compat_ioctl32 = subdev_compat_ioctl32,
1057 #endif
1058 .release = subdev_close,
1059 .poll = subdev_poll,
1060 };
1061
1062 #ifdef CONFIG_MEDIA_CONTROLLER
1063
v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity * entity,struct fwnode_endpoint * endpoint)1064 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1065 struct fwnode_endpoint *endpoint)
1066 {
1067 struct fwnode_handle *fwnode;
1068 struct v4l2_subdev *sd;
1069
1070 if (!is_media_entity_v4l2_subdev(entity))
1071 return -EINVAL;
1072
1073 sd = media_entity_to_v4l2_subdev(entity);
1074
1075 fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1076 fwnode_handle_put(fwnode);
1077
1078 if (device_match_fwnode(sd->dev, fwnode))
1079 return endpoint->port;
1080
1081 return -ENXIO;
1082 }
1083 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1084
v4l2_subdev_link_validate_default(struct v4l2_subdev * sd,struct media_link * link,struct v4l2_subdev_format * source_fmt,struct v4l2_subdev_format * sink_fmt)1085 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1086 struct media_link *link,
1087 struct v4l2_subdev_format *source_fmt,
1088 struct v4l2_subdev_format *sink_fmt)
1089 {
1090 bool pass = true;
1091
1092 /* The width, height and code must match. */
1093 if (source_fmt->format.width != sink_fmt->format.width) {
1094 dev_dbg(sd->entity.graph_obj.mdev->dev,
1095 "%s: width does not match (source %u, sink %u)\n",
1096 __func__,
1097 source_fmt->format.width, sink_fmt->format.width);
1098 pass = false;
1099 }
1100
1101 if (source_fmt->format.height != sink_fmt->format.height) {
1102 dev_dbg(sd->entity.graph_obj.mdev->dev,
1103 "%s: height does not match (source %u, sink %u)\n",
1104 __func__,
1105 source_fmt->format.height, sink_fmt->format.height);
1106 pass = false;
1107 }
1108
1109 if (source_fmt->format.code != sink_fmt->format.code) {
1110 dev_dbg(sd->entity.graph_obj.mdev->dev,
1111 "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1112 __func__,
1113 source_fmt->format.code, sink_fmt->format.code);
1114 pass = false;
1115 }
1116
1117 /* The field order must match, or the sink field order must be NONE
1118 * to support interlaced hardware connected to bridges that support
1119 * progressive formats only.
1120 */
1121 if (source_fmt->format.field != sink_fmt->format.field &&
1122 sink_fmt->format.field != V4L2_FIELD_NONE) {
1123 dev_dbg(sd->entity.graph_obj.mdev->dev,
1124 "%s: field does not match (source %u, sink %u)\n",
1125 __func__,
1126 source_fmt->format.field, sink_fmt->format.field);
1127 pass = false;
1128 }
1129
1130 if (pass)
1131 return 0;
1132
1133 dev_dbg(sd->entity.graph_obj.mdev->dev,
1134 "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1135 link->source->entity->name, link->source->index,
1136 link->sink->entity->name, link->sink->index);
1137
1138 return -EPIPE;
1139 }
1140 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1141
1142 static int
v4l2_subdev_link_validate_get_format(struct media_pad * pad,u32 stream,struct v4l2_subdev_format * fmt,bool states_locked)1143 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1144 struct v4l2_subdev_format *fmt,
1145 bool states_locked)
1146 {
1147 struct v4l2_subdev_state *state;
1148 struct v4l2_subdev *sd;
1149 int ret;
1150
1151 if (!is_media_entity_v4l2_subdev(pad->entity)) {
1152 WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1153 "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1154 pad->entity->function, pad->entity->name);
1155
1156 return -EINVAL;
1157 }
1158
1159 sd = media_entity_to_v4l2_subdev(pad->entity);
1160
1161 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1162 fmt->pad = pad->index;
1163 fmt->stream = stream;
1164
1165 if (states_locked)
1166 state = v4l2_subdev_get_locked_active_state(sd);
1167 else
1168 state = v4l2_subdev_lock_and_get_active_state(sd);
1169
1170 ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1171
1172 if (!states_locked && state)
1173 v4l2_subdev_unlock_state(state);
1174
1175 return ret;
1176 }
1177
1178 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1179
__v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask,bool states_locked)1180 static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1181 u64 *streams_mask,
1182 bool states_locked)
1183 {
1184 struct v4l2_subdev_route *route;
1185 struct v4l2_subdev_state *state;
1186 struct v4l2_subdev *subdev;
1187
1188 subdev = media_entity_to_v4l2_subdev(pad->entity);
1189
1190 *streams_mask = 0;
1191
1192 if (states_locked)
1193 state = v4l2_subdev_get_locked_active_state(subdev);
1194 else
1195 state = v4l2_subdev_lock_and_get_active_state(subdev);
1196
1197 if (WARN_ON(!state))
1198 return;
1199
1200 for_each_active_route(&state->routing, route) {
1201 u32 route_pad;
1202 u32 route_stream;
1203
1204 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1205 route_pad = route->source_pad;
1206 route_stream = route->source_stream;
1207 } else {
1208 route_pad = route->sink_pad;
1209 route_stream = route->sink_stream;
1210 }
1211
1212 if (route_pad != pad->index)
1213 continue;
1214
1215 *streams_mask |= BIT_ULL(route_stream);
1216 }
1217
1218 if (!states_locked)
1219 v4l2_subdev_unlock_state(state);
1220 }
1221
1222 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1223
v4l2_link_validate_get_streams(struct media_pad * pad,u64 * streams_mask,bool states_locked)1224 static void v4l2_link_validate_get_streams(struct media_pad *pad,
1225 u64 *streams_mask,
1226 bool states_locked)
1227 {
1228 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1229
1230 if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1231 /* Non-streams subdevs have an implicit stream 0 */
1232 *streams_mask = BIT_ULL(0);
1233 return;
1234 }
1235
1236 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1237 __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
1238 #else
1239 /* This shouldn't happen */
1240 *streams_mask = 0;
1241 #endif
1242 }
1243
v4l2_subdev_link_validate_locked(struct media_link * link,bool states_locked)1244 static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
1245 {
1246 struct v4l2_subdev *sink_subdev =
1247 media_entity_to_v4l2_subdev(link->sink->entity);
1248 struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1249 u64 source_streams_mask;
1250 u64 sink_streams_mask;
1251 u64 dangling_sink_streams;
1252 u32 stream;
1253 int ret;
1254
1255 dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1256 link->source->entity->name, link->source->index,
1257 link->sink->entity->name, link->sink->index);
1258
1259 v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1260 v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
1261
1262 /*
1263 * It is ok to have more source streams than sink streams as extra
1264 * source streams can just be ignored by the receiver, but having extra
1265 * sink streams is an error as streams must have a source.
1266 */
1267 dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1268 sink_streams_mask;
1269 if (dangling_sink_streams) {
1270 dev_err(dev, "Dangling sink streams: mask %#llx\n",
1271 dangling_sink_streams);
1272 return -EINVAL;
1273 }
1274
1275 /* Validate source and sink stream formats */
1276
1277 for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1278 struct v4l2_subdev_format sink_fmt, source_fmt;
1279
1280 if (!(sink_streams_mask & BIT_ULL(stream)))
1281 continue;
1282
1283 dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1284 link->source->entity->name, link->source->index, stream,
1285 link->sink->entity->name, link->sink->index, stream);
1286
1287 ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1288 &source_fmt, states_locked);
1289 if (ret < 0) {
1290 dev_dbg(dev,
1291 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1292 link->source->entity->name, link->source->index,
1293 stream);
1294 continue;
1295 }
1296
1297 ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1298 &sink_fmt, states_locked);
1299 if (ret < 0) {
1300 dev_dbg(dev,
1301 "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1302 link->sink->entity->name, link->sink->index,
1303 stream);
1304 continue;
1305 }
1306
1307 /* TODO: add stream number to link_validate() */
1308 ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1309 &source_fmt, &sink_fmt);
1310 if (!ret)
1311 continue;
1312
1313 if (ret != -ENOIOCTLCMD)
1314 return ret;
1315
1316 ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1317 &source_fmt, &sink_fmt);
1318
1319 if (ret)
1320 return ret;
1321 }
1322
1323 return 0;
1324 }
1325
v4l2_subdev_link_validate(struct media_link * link)1326 int v4l2_subdev_link_validate(struct media_link *link)
1327 {
1328 struct v4l2_subdev *source_sd, *sink_sd;
1329 struct v4l2_subdev_state *source_state, *sink_state;
1330 bool states_locked;
1331 int ret;
1332
1333 if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
1334 !is_media_entity_v4l2_subdev(link->source->entity)) {
1335 pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
1336 !is_media_entity_v4l2_subdev(link->sink->entity) ?
1337 "sink" : "source",
1338 link->source->entity->name, link->source->index,
1339 link->sink->entity->name, link->sink->index);
1340 return 0;
1341 }
1342
1343 sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1344 source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1345
1346 sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1347 source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1348
1349 states_locked = sink_state && source_state;
1350
1351 if (states_locked) {
1352 v4l2_subdev_lock_state(sink_state);
1353 v4l2_subdev_lock_state(source_state);
1354 }
1355
1356 ret = v4l2_subdev_link_validate_locked(link, states_locked);
1357
1358 if (states_locked) {
1359 v4l2_subdev_unlock_state(sink_state);
1360 v4l2_subdev_unlock_state(source_state);
1361 }
1362
1363 return ret;
1364 }
1365 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1366
v4l2_subdev_has_pad_interdep(struct media_entity * entity,unsigned int pad0,unsigned int pad1)1367 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1368 unsigned int pad0, unsigned int pad1)
1369 {
1370 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1371 struct v4l2_subdev_krouting *routing;
1372 struct v4l2_subdev_state *state;
1373 unsigned int i;
1374
1375 state = v4l2_subdev_lock_and_get_active_state(sd);
1376
1377 routing = &state->routing;
1378
1379 for (i = 0; i < routing->num_routes; ++i) {
1380 struct v4l2_subdev_route *route = &routing->routes[i];
1381
1382 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1383 continue;
1384
1385 if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1386 (route->source_pad == pad0 && route->sink_pad == pad1)) {
1387 v4l2_subdev_unlock_state(state);
1388 return true;
1389 }
1390 }
1391
1392 v4l2_subdev_unlock_state(state);
1393
1394 return false;
1395 }
1396 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1397
1398 struct v4l2_subdev_state *
__v4l2_subdev_state_alloc(struct v4l2_subdev * sd,const char * lock_name,struct lock_class_key * lock_key)1399 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1400 struct lock_class_key *lock_key)
1401 {
1402 struct v4l2_subdev_state *state;
1403 int ret;
1404
1405 state = kzalloc(sizeof(*state), GFP_KERNEL);
1406 if (!state)
1407 return ERR_PTR(-ENOMEM);
1408
1409 __mutex_init(&state->_lock, lock_name, lock_key);
1410 if (sd->state_lock)
1411 state->lock = sd->state_lock;
1412 else
1413 state->lock = &state->_lock;
1414
1415 /* Drivers that support streams do not need the legacy pad config */
1416 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1417 state->pads = kvcalloc(sd->entity.num_pads,
1418 sizeof(*state->pads), GFP_KERNEL);
1419 if (!state->pads) {
1420 ret = -ENOMEM;
1421 goto err;
1422 }
1423 }
1424
1425 /*
1426 * There can be no race at this point, but we lock the state anyway to
1427 * satisfy lockdep checks.
1428 */
1429 v4l2_subdev_lock_state(state);
1430 ret = v4l2_subdev_call(sd, pad, init_cfg, state);
1431 v4l2_subdev_unlock_state(state);
1432
1433 if (ret < 0 && ret != -ENOIOCTLCMD)
1434 goto err;
1435
1436 return state;
1437
1438 err:
1439 if (state && state->pads)
1440 kvfree(state->pads);
1441
1442 kfree(state);
1443
1444 return ERR_PTR(ret);
1445 }
1446 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1447
__v4l2_subdev_state_free(struct v4l2_subdev_state * state)1448 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1449 {
1450 if (!state)
1451 return;
1452
1453 mutex_destroy(&state->_lock);
1454
1455 kfree(state->routing.routes);
1456 kvfree(state->stream_configs.configs);
1457 kvfree(state->pads);
1458 kfree(state);
1459 }
1460 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1461
__v4l2_subdev_init_finalize(struct v4l2_subdev * sd,const char * name,struct lock_class_key * key)1462 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1463 struct lock_class_key *key)
1464 {
1465 struct v4l2_subdev_state *state;
1466
1467 state = __v4l2_subdev_state_alloc(sd, name, key);
1468 if (IS_ERR(state))
1469 return PTR_ERR(state);
1470
1471 sd->active_state = state;
1472
1473 return 0;
1474 }
1475 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1476
v4l2_subdev_cleanup(struct v4l2_subdev * sd)1477 void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1478 {
1479 struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1480
1481 __v4l2_subdev_state_free(sd->active_state);
1482 sd->active_state = NULL;
1483
1484 if (list_empty(&sd->async_subdev_endpoint_list))
1485 return;
1486
1487 list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1488 async_subdev_endpoint_entry) {
1489 list_del(&ase->async_subdev_endpoint_entry);
1490
1491 kfree(ase);
1492 }
1493 }
1494 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1495
1496 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1497
1498 static int
v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs * stream_configs,const struct v4l2_subdev_krouting * routing)1499 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1500 const struct v4l2_subdev_krouting *routing)
1501 {
1502 struct v4l2_subdev_stream_configs new_configs = { 0 };
1503 struct v4l2_subdev_route *route;
1504 u32 idx;
1505
1506 /* Count number of formats needed */
1507 for_each_active_route(routing, route) {
1508 /*
1509 * Each route needs a format on both ends of the route.
1510 */
1511 new_configs.num_configs += 2;
1512 }
1513
1514 if (new_configs.num_configs) {
1515 new_configs.configs = kvcalloc(new_configs.num_configs,
1516 sizeof(*new_configs.configs),
1517 GFP_KERNEL);
1518
1519 if (!new_configs.configs)
1520 return -ENOMEM;
1521 }
1522
1523 /*
1524 * Fill in the 'pad' and stream' value for each item in the array from
1525 * the routing table
1526 */
1527 idx = 0;
1528
1529 for_each_active_route(routing, route) {
1530 new_configs.configs[idx].pad = route->sink_pad;
1531 new_configs.configs[idx].stream = route->sink_stream;
1532
1533 idx++;
1534
1535 new_configs.configs[idx].pad = route->source_pad;
1536 new_configs.configs[idx].stream = route->source_stream;
1537
1538 idx++;
1539 }
1540
1541 kvfree(stream_configs->configs);
1542 *stream_configs = new_configs;
1543
1544 return 0;
1545 }
1546
v4l2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)1547 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1548 struct v4l2_subdev_format *format)
1549 {
1550 struct v4l2_mbus_framefmt *fmt;
1551
1552 if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
1553 fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
1554 format->stream);
1555 else if (format->pad < sd->entity.num_pads && format->stream == 0)
1556 fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
1557 else
1558 fmt = NULL;
1559
1560 if (!fmt)
1561 return -EINVAL;
1562
1563 format->format = *fmt;
1564
1565 return 0;
1566 }
1567 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1568
v4l2_subdev_set_routing(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,const struct v4l2_subdev_krouting * routing)1569 int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1570 struct v4l2_subdev_state *state,
1571 const struct v4l2_subdev_krouting *routing)
1572 {
1573 struct v4l2_subdev_krouting *dst = &state->routing;
1574 const struct v4l2_subdev_krouting *src = routing;
1575 struct v4l2_subdev_krouting new_routing = { 0 };
1576 size_t bytes;
1577 int r;
1578
1579 if (unlikely(check_mul_overflow((size_t)src->num_routes,
1580 sizeof(*src->routes), &bytes)))
1581 return -EOVERFLOW;
1582
1583 lockdep_assert_held(state->lock);
1584
1585 if (src->num_routes > 0) {
1586 new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1587 if (!new_routing.routes)
1588 return -ENOMEM;
1589 }
1590
1591 new_routing.num_routes = src->num_routes;
1592
1593 r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1594 &new_routing);
1595 if (r) {
1596 kfree(new_routing.routes);
1597 return r;
1598 }
1599
1600 kfree(dst->routes);
1601 *dst = new_routing;
1602
1603 return 0;
1604 }
1605 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1606
1607 struct v4l2_subdev_route *
__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting * routing,struct v4l2_subdev_route * route)1608 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1609 struct v4l2_subdev_route *route)
1610 {
1611 if (route)
1612 ++route;
1613 else
1614 route = &routing->routes[0];
1615
1616 for (; route < routing->routes + routing->num_routes; ++route) {
1617 if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1618 continue;
1619
1620 return route;
1621 }
1622
1623 return NULL;
1624 }
1625 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1626
v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * state,const struct v4l2_subdev_krouting * routing,const struct v4l2_mbus_framefmt * fmt)1627 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1628 struct v4l2_subdev_state *state,
1629 const struct v4l2_subdev_krouting *routing,
1630 const struct v4l2_mbus_framefmt *fmt)
1631 {
1632 struct v4l2_subdev_stream_configs *stream_configs;
1633 unsigned int i;
1634 int ret;
1635
1636 ret = v4l2_subdev_set_routing(sd, state, routing);
1637 if (ret)
1638 return ret;
1639
1640 stream_configs = &state->stream_configs;
1641
1642 for (i = 0; i < stream_configs->num_configs; ++i)
1643 stream_configs->configs[i].fmt = *fmt;
1644
1645 return 0;
1646 }
1647 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1648
1649 struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1650 v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
1651 unsigned int pad, u32 stream)
1652 {
1653 struct v4l2_subdev_stream_configs *stream_configs;
1654 unsigned int i;
1655
1656 lockdep_assert_held(state->lock);
1657
1658 stream_configs = &state->stream_configs;
1659
1660 for (i = 0; i < stream_configs->num_configs; ++i) {
1661 if (stream_configs->configs[i].pad == pad &&
1662 stream_configs->configs[i].stream == stream)
1663 return &stream_configs->configs[i].fmt;
1664 }
1665
1666 return NULL;
1667 }
1668 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
1669
1670 struct v4l2_rect *
v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1671 v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
1672 unsigned int pad, u32 stream)
1673 {
1674 struct v4l2_subdev_stream_configs *stream_configs;
1675 unsigned int i;
1676
1677 lockdep_assert_held(state->lock);
1678
1679 stream_configs = &state->stream_configs;
1680
1681 for (i = 0; i < stream_configs->num_configs; ++i) {
1682 if (stream_configs->configs[i].pad == pad &&
1683 stream_configs->configs[i].stream == stream)
1684 return &stream_configs->configs[i].crop;
1685 }
1686
1687 return NULL;
1688 }
1689 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
1690
1691 struct v4l2_rect *
v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state * state,unsigned int pad,u32 stream)1692 v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
1693 unsigned int pad, u32 stream)
1694 {
1695 struct v4l2_subdev_stream_configs *stream_configs;
1696 unsigned int i;
1697
1698 lockdep_assert_held(state->lock);
1699
1700 stream_configs = &state->stream_configs;
1701
1702 for (i = 0; i < stream_configs->num_configs; ++i) {
1703 if (stream_configs->configs[i].pad == pad &&
1704 stream_configs->configs[i].stream == stream)
1705 return &stream_configs->configs[i].compose;
1706 }
1707
1708 return NULL;
1709 }
1710 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
1711
v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting * routing,u32 pad,u32 stream,u32 * other_pad,u32 * other_stream)1712 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1713 u32 pad, u32 stream, u32 *other_pad,
1714 u32 *other_stream)
1715 {
1716 unsigned int i;
1717
1718 for (i = 0; i < routing->num_routes; ++i) {
1719 struct v4l2_subdev_route *route = &routing->routes[i];
1720
1721 if (route->source_pad == pad &&
1722 route->source_stream == stream) {
1723 if (other_pad)
1724 *other_pad = route->sink_pad;
1725 if (other_stream)
1726 *other_stream = route->sink_stream;
1727 return 0;
1728 }
1729
1730 if (route->sink_pad == pad && route->sink_stream == stream) {
1731 if (other_pad)
1732 *other_pad = route->source_pad;
1733 if (other_stream)
1734 *other_stream = route->source_stream;
1735 return 0;
1736 }
1737 }
1738
1739 return -EINVAL;
1740 }
1741 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1742
1743 struct v4l2_mbus_framefmt *
v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state * state,u32 pad,u32 stream)1744 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1745 u32 pad, u32 stream)
1746 {
1747 u32 other_pad, other_stream;
1748 int ret;
1749
1750 ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1751 pad, stream,
1752 &other_pad, &other_stream);
1753 if (ret)
1754 return NULL;
1755
1756 return v4l2_subdev_state_get_stream_format(state, other_pad,
1757 other_stream);
1758 }
1759 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1760
v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state * state,u32 pad0,u32 pad1,u64 * streams)1761 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1762 u32 pad0, u32 pad1, u64 *streams)
1763 {
1764 const struct v4l2_subdev_krouting *routing = &state->routing;
1765 struct v4l2_subdev_route *route;
1766 u64 streams0 = 0;
1767 u64 streams1 = 0;
1768
1769 for_each_active_route(routing, route) {
1770 if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1771 (*streams & BIT_ULL(route->sink_stream))) {
1772 streams0 |= BIT_ULL(route->sink_stream);
1773 streams1 |= BIT_ULL(route->source_stream);
1774 }
1775 if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1776 (*streams & BIT_ULL(route->source_stream))) {
1777 streams0 |= BIT_ULL(route->source_stream);
1778 streams1 |= BIT_ULL(route->sink_stream);
1779 }
1780 }
1781
1782 *streams = streams0;
1783 return streams1;
1784 }
1785 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1786
v4l2_subdev_routing_validate(struct v4l2_subdev * sd,const struct v4l2_subdev_krouting * routing,enum v4l2_subdev_routing_restriction disallow)1787 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1788 const struct v4l2_subdev_krouting *routing,
1789 enum v4l2_subdev_routing_restriction disallow)
1790 {
1791 u32 *remote_pads = NULL;
1792 unsigned int i, j;
1793 int ret = -EINVAL;
1794
1795 if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
1796 V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
1797 remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1798 GFP_KERNEL);
1799 if (!remote_pads)
1800 return -ENOMEM;
1801
1802 for (i = 0; i < sd->entity.num_pads; ++i)
1803 remote_pads[i] = U32_MAX;
1804 }
1805
1806 for (i = 0; i < routing->num_routes; ++i) {
1807 const struct v4l2_subdev_route *route = &routing->routes[i];
1808
1809 /* Validate the sink and source pad numbers. */
1810 if (route->sink_pad >= sd->entity.num_pads ||
1811 !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1812 dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1813 i, route->sink_pad);
1814 goto out;
1815 }
1816
1817 if (route->source_pad >= sd->entity.num_pads ||
1818 !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1819 dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1820 i, route->source_pad);
1821 goto out;
1822 }
1823
1824 /*
1825 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
1826 * sink pad must be routed to a single source pad.
1827 */
1828 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
1829 if (remote_pads[route->sink_pad] != U32_MAX &&
1830 remote_pads[route->sink_pad] != route->source_pad) {
1831 dev_dbg(sd->dev,
1832 "route %u attempts to mix %s streams\n",
1833 i, "sink");
1834 goto out;
1835 }
1836 }
1837
1838 /*
1839 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
1840 * source pad must originate from a single sink pad.
1841 */
1842 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
1843 if (remote_pads[route->source_pad] != U32_MAX &&
1844 remote_pads[route->source_pad] != route->sink_pad) {
1845 dev_dbg(sd->dev,
1846 "route %u attempts to mix %s streams\n",
1847 i, "source");
1848 goto out;
1849 }
1850 }
1851
1852 /*
1853 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
1854 * side can not do stream multiplexing, i.e. there can be only
1855 * a single stream in a sink pad.
1856 */
1857 if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
1858 if (remote_pads[route->sink_pad] != U32_MAX) {
1859 dev_dbg(sd->dev,
1860 "route %u attempts to multiplex on %s pad %u\n",
1861 i, "sink", route->sink_pad);
1862 goto out;
1863 }
1864 }
1865
1866 /*
1867 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
1868 * source side can not do stream multiplexing, i.e. there can
1869 * be only a single stream in a source pad.
1870 */
1871 if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
1872 if (remote_pads[route->source_pad] != U32_MAX) {
1873 dev_dbg(sd->dev,
1874 "route %u attempts to multiplex on %s pad %u\n",
1875 i, "source", route->source_pad);
1876 goto out;
1877 }
1878 }
1879
1880 if (remote_pads) {
1881 remote_pads[route->sink_pad] = route->source_pad;
1882 remote_pads[route->source_pad] = route->sink_pad;
1883 }
1884
1885 for (j = i + 1; j < routing->num_routes; ++j) {
1886 const struct v4l2_subdev_route *r = &routing->routes[j];
1887
1888 /*
1889 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
1890 * originate from the same (sink) stream.
1891 */
1892 if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
1893 route->sink_pad == r->sink_pad &&
1894 route->sink_stream == r->sink_stream) {
1895 dev_dbg(sd->dev,
1896 "routes %u and %u originate from same sink (%u/%u)\n",
1897 i, j, route->sink_pad,
1898 route->sink_stream);
1899 goto out;
1900 }
1901
1902 /*
1903 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
1904 * at the same (source) stream.
1905 */
1906 if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
1907 route->source_pad == r->source_pad &&
1908 route->source_stream == r->source_stream) {
1909 dev_dbg(sd->dev,
1910 "routes %u and %u end at same source (%u/%u)\n",
1911 i, j, route->source_pad,
1912 route->source_stream);
1913 goto out;
1914 }
1915 }
1916 }
1917
1918 ret = 0;
1919
1920 out:
1921 kfree(remote_pads);
1922 return ret;
1923 }
1924 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
1925
v4l2_subdev_enable_streams_fallback(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1926 static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1927 u64 streams_mask)
1928 {
1929 struct device *dev = sd->entity.graph_obj.mdev->dev;
1930 int ret;
1931
1932 /*
1933 * The subdev doesn't implement pad-based stream enable, fall back
1934 * to the .s_stream() operation.
1935 */
1936 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1937 return -EOPNOTSUPP;
1938
1939 /*
1940 * .s_stream() means there is no streams support, so the only allowed
1941 * stream is the implicit stream 0.
1942 */
1943 if (streams_mask != BIT_ULL(0))
1944 return -EOPNOTSUPP;
1945
1946 /*
1947 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
1948 * with 64 pads or less can be supported.
1949 */
1950 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
1951 return -EOPNOTSUPP;
1952
1953 if (sd->enabled_pads & BIT_ULL(pad)) {
1954 dev_dbg(dev, "pad %u already enabled on %s\n",
1955 pad, sd->entity.name);
1956 return -EALREADY;
1957 }
1958
1959 /* Start streaming when the first pad is enabled. */
1960 if (!sd->enabled_pads) {
1961 ret = v4l2_subdev_call(sd, video, s_stream, 1);
1962 if (ret)
1963 return ret;
1964 }
1965
1966 sd->enabled_pads |= BIT_ULL(pad);
1967
1968 return 0;
1969 }
1970
v4l2_subdev_enable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)1971 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
1972 u64 streams_mask)
1973 {
1974 struct device *dev = sd->entity.graph_obj.mdev->dev;
1975 struct v4l2_subdev_state *state;
1976 u64 found_streams = 0;
1977 unsigned int i;
1978 int ret;
1979
1980 /* A few basic sanity checks first. */
1981 if (pad >= sd->entity.num_pads)
1982 return -EINVAL;
1983
1984 if (!streams_mask)
1985 return 0;
1986
1987 /* Fallback on .s_stream() if .enable_streams() isn't available. */
1988 if (!sd->ops->pad || !sd->ops->pad->enable_streams)
1989 return v4l2_subdev_enable_streams_fallback(sd, pad,
1990 streams_mask);
1991
1992 state = v4l2_subdev_lock_and_get_active_state(sd);
1993
1994 /*
1995 * Verify that the requested streams exist and that they are not
1996 * already enabled.
1997 */
1998 for (i = 0; i < state->stream_configs.num_configs; ++i) {
1999 struct v4l2_subdev_stream_config *cfg =
2000 &state->stream_configs.configs[i];
2001
2002 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2003 continue;
2004
2005 found_streams |= BIT_ULL(cfg->stream);
2006
2007 if (cfg->enabled) {
2008 dev_dbg(dev, "stream %u already enabled on %s:%u\n",
2009 cfg->stream, sd->entity.name, pad);
2010 ret = -EALREADY;
2011 goto done;
2012 }
2013 }
2014
2015 if (found_streams != streams_mask) {
2016 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2017 streams_mask & ~found_streams, sd->entity.name, pad);
2018 ret = -EINVAL;
2019 goto done;
2020 }
2021
2022 dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
2023
2024 /* Call the .enable_streams() operation. */
2025 ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
2026 streams_mask);
2027 if (ret) {
2028 dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
2029 streams_mask, ret);
2030 goto done;
2031 }
2032
2033 /* Mark the streams as enabled. */
2034 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2035 struct v4l2_subdev_stream_config *cfg =
2036 &state->stream_configs.configs[i];
2037
2038 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2039 cfg->enabled = true;
2040 }
2041
2042 done:
2043 v4l2_subdev_unlock_state(state);
2044
2045 return ret;
2046 }
2047 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2048
v4l2_subdev_disable_streams_fallback(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)2049 static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2050 u64 streams_mask)
2051 {
2052 struct device *dev = sd->entity.graph_obj.mdev->dev;
2053 int ret;
2054
2055 /*
2056 * If the subdev doesn't implement pad-based stream enable, fall back
2057 * to the .s_stream() operation.
2058 */
2059 if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2060 return -EOPNOTSUPP;
2061
2062 /*
2063 * .s_stream() means there is no streams support, so the only allowed
2064 * stream is the implicit stream 0.
2065 */
2066 if (streams_mask != BIT_ULL(0))
2067 return -EOPNOTSUPP;
2068
2069 /*
2070 * We use a 64-bit bitmask for tracking enabled pads, so only subdevices
2071 * with 64 pads or less can be supported.
2072 */
2073 if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
2074 return -EOPNOTSUPP;
2075
2076 if (!(sd->enabled_pads & BIT_ULL(pad))) {
2077 dev_dbg(dev, "pad %u already disabled on %s\n",
2078 pad, sd->entity.name);
2079 return -EALREADY;
2080 }
2081
2082 /* Stop streaming when the last streams are disabled. */
2083 if (!(sd->enabled_pads & ~BIT_ULL(pad))) {
2084 ret = v4l2_subdev_call(sd, video, s_stream, 0);
2085 if (ret)
2086 return ret;
2087 }
2088
2089 sd->enabled_pads &= ~BIT_ULL(pad);
2090
2091 return 0;
2092 }
2093
v4l2_subdev_disable_streams(struct v4l2_subdev * sd,u32 pad,u64 streams_mask)2094 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2095 u64 streams_mask)
2096 {
2097 struct device *dev = sd->entity.graph_obj.mdev->dev;
2098 struct v4l2_subdev_state *state;
2099 u64 found_streams = 0;
2100 unsigned int i;
2101 int ret;
2102
2103 /* A few basic sanity checks first. */
2104 if (pad >= sd->entity.num_pads)
2105 return -EINVAL;
2106
2107 if (!streams_mask)
2108 return 0;
2109
2110 /* Fallback on .s_stream() if .disable_streams() isn't available. */
2111 if (!sd->ops->pad || !sd->ops->pad->disable_streams)
2112 return v4l2_subdev_disable_streams_fallback(sd, pad,
2113 streams_mask);
2114
2115 state = v4l2_subdev_lock_and_get_active_state(sd);
2116
2117 /*
2118 * Verify that the requested streams exist and that they are not
2119 * already disabled.
2120 */
2121 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2122 struct v4l2_subdev_stream_config *cfg =
2123 &state->stream_configs.configs[i];
2124
2125 if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2126 continue;
2127
2128 found_streams |= BIT_ULL(cfg->stream);
2129
2130 if (!cfg->enabled) {
2131 dev_dbg(dev, "stream %u already disabled on %s:%u\n",
2132 cfg->stream, sd->entity.name, pad);
2133 ret = -EALREADY;
2134 goto done;
2135 }
2136 }
2137
2138 if (found_streams != streams_mask) {
2139 dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2140 streams_mask & ~found_streams, sd->entity.name, pad);
2141 ret = -EINVAL;
2142 goto done;
2143 }
2144
2145 dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
2146
2147 /* Call the .disable_streams() operation. */
2148 ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2149 streams_mask);
2150 if (ret) {
2151 dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2152 streams_mask, ret);
2153 goto done;
2154 }
2155
2156 /* Mark the streams as disabled. */
2157 for (i = 0; i < state->stream_configs.num_configs; ++i) {
2158 struct v4l2_subdev_stream_config *cfg =
2159 &state->stream_configs.configs[i];
2160
2161 if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2162 cfg->enabled = false;
2163 }
2164
2165 done:
2166 v4l2_subdev_unlock_state(state);
2167
2168 return ret;
2169 }
2170 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2171
v4l2_subdev_s_stream_helper(struct v4l2_subdev * sd,int enable)2172 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2173 {
2174 struct v4l2_subdev_state *state;
2175 struct v4l2_subdev_route *route;
2176 struct media_pad *pad;
2177 u64 source_mask = 0;
2178 int pad_index = -1;
2179
2180 /*
2181 * Find the source pad. This helper is meant for subdevs that have a
2182 * single source pad, so failures shouldn't happen, but catch them
2183 * loudly nonetheless as they indicate a driver bug.
2184 */
2185 media_entity_for_each_pad(&sd->entity, pad) {
2186 if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2187 pad_index = pad->index;
2188 break;
2189 }
2190 }
2191
2192 if (WARN_ON(pad_index == -1))
2193 return -EINVAL;
2194
2195 /*
2196 * As there's a single source pad, just collect all the source streams.
2197 */
2198 state = v4l2_subdev_lock_and_get_active_state(sd);
2199
2200 for_each_active_route(&state->routing, route)
2201 source_mask |= BIT_ULL(route->source_stream);
2202
2203 v4l2_subdev_unlock_state(state);
2204
2205 if (enable)
2206 return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2207 else
2208 return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2209 }
2210 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2211
2212 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2213
2214 #endif /* CONFIG_MEDIA_CONTROLLER */
2215
v4l2_subdev_init(struct v4l2_subdev * sd,const struct v4l2_subdev_ops * ops)2216 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2217 {
2218 INIT_LIST_HEAD(&sd->list);
2219 BUG_ON(!ops);
2220 sd->ops = ops;
2221 sd->v4l2_dev = NULL;
2222 sd->flags = 0;
2223 sd->name[0] = '\0';
2224 sd->grp_id = 0;
2225 sd->dev_priv = NULL;
2226 sd->host_priv = NULL;
2227 sd->privacy_led = NULL;
2228 INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
2229 #if defined(CONFIG_MEDIA_CONTROLLER)
2230 sd->entity.name = sd->name;
2231 sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2232 sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2233 #endif
2234 }
2235 EXPORT_SYMBOL(v4l2_subdev_init);
2236
v4l2_subdev_notify_event(struct v4l2_subdev * sd,const struct v4l2_event * ev)2237 void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2238 const struct v4l2_event *ev)
2239 {
2240 v4l2_event_queue(sd->devnode, ev);
2241 v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2242 }
2243 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2244
v4l2_subdev_is_streaming(struct v4l2_subdev * sd)2245 bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
2246 {
2247 struct v4l2_subdev_state *state;
2248
2249 if (!v4l2_subdev_has_op(sd, pad, enable_streams))
2250 return sd->s_stream_enabled;
2251
2252 if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
2253 return !!sd->enabled_pads;
2254
2255 state = v4l2_subdev_get_locked_active_state(sd);
2256
2257 for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
2258 const struct v4l2_subdev_stream_config *cfg;
2259
2260 cfg = &state->stream_configs.configs[i];
2261
2262 if (cfg->enabled)
2263 return true;
2264 }
2265
2266 return false;
2267 }
2268 EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
2269
v4l2_subdev_get_privacy_led(struct v4l2_subdev * sd)2270 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2271 {
2272 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2273 sd->privacy_led = led_get(sd->dev, "privacy-led");
2274 if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2275 return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2276 "getting privacy LED\n");
2277
2278 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2279 mutex_lock(&sd->privacy_led->led_access);
2280 led_sysfs_disable(sd->privacy_led);
2281 led_trigger_remove(sd->privacy_led);
2282 led_set_brightness(sd->privacy_led, 0);
2283 mutex_unlock(&sd->privacy_led->led_access);
2284 }
2285 #endif
2286 return 0;
2287 }
2288 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2289
v4l2_subdev_put_privacy_led(struct v4l2_subdev * sd)2290 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2291 {
2292 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2293 if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2294 mutex_lock(&sd->privacy_led->led_access);
2295 led_sysfs_enable(sd->privacy_led);
2296 mutex_unlock(&sd->privacy_led->led_access);
2297 led_put(sd->privacy_led);
2298 }
2299 #endif
2300 }
2301 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
2302