1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 sub-device
4  *
5  * Copyright (C) 2010 Nokia Corporation
6  *
7  * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8  *	    Sakari Ailus <sakari.ailus@iki.fi>
9  */
10 
11 #include <linux/export.h>
12 #include <linux/ioctl.h>
13 #include <linux/leds.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/version.h>
20 #include <linux/videodev2.h>
21 
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-ioctl.h>
27 
28 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
29 /*
30  * The Streams API is an experimental feature. To use the Streams API, set
31  * 'v4l2_subdev_enable_streams_api' to 1 below.
32  */
33 
34 static bool v4l2_subdev_enable_streams_api;
35 #endif
36 
37 /*
38  * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
39  * of streams.
40  *
41  * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
42  * restricts the total number of streams in a pad, although the stream ID is
43  * not restricted.
44  */
45 #define V4L2_SUBDEV_MAX_STREAM_ID 63
46 
47 #include "v4l2-subdev-priv.h"
48 
49 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
50 static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
51 {
52 	struct v4l2_subdev_state *state;
53 	static struct lock_class_key key;
54 
55 	state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
56 	if (IS_ERR(state))
57 		return PTR_ERR(state);
58 
59 	fh->state = state;
60 
61 	return 0;
62 }
63 
64 static void subdev_fh_free(struct v4l2_subdev_fh *fh)
65 {
66 	__v4l2_subdev_state_free(fh->state);
67 	fh->state = NULL;
68 }
69 
70 static int subdev_open(struct file *file)
71 {
72 	struct video_device *vdev = video_devdata(file);
73 	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
74 	struct v4l2_subdev_fh *subdev_fh;
75 	int ret;
76 
77 	subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
78 	if (subdev_fh == NULL)
79 		return -ENOMEM;
80 
81 	ret = subdev_fh_init(subdev_fh, sd);
82 	if (ret) {
83 		kfree(subdev_fh);
84 		return ret;
85 	}
86 
87 	v4l2_fh_init(&subdev_fh->vfh, vdev);
88 	v4l2_fh_add(&subdev_fh->vfh);
89 	file->private_data = &subdev_fh->vfh;
90 
91 	if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
92 		struct module *owner;
93 
94 		owner = sd->entity.graph_obj.mdev->dev->driver->owner;
95 		if (!try_module_get(owner)) {
96 			ret = -EBUSY;
97 			goto err;
98 		}
99 		subdev_fh->owner = owner;
100 	}
101 
102 	if (sd->internal_ops && sd->internal_ops->open) {
103 		ret = sd->internal_ops->open(sd, subdev_fh);
104 		if (ret < 0)
105 			goto err;
106 	}
107 
108 	return 0;
109 
110 err:
111 	module_put(subdev_fh->owner);
112 	v4l2_fh_del(&subdev_fh->vfh);
113 	v4l2_fh_exit(&subdev_fh->vfh);
114 	subdev_fh_free(subdev_fh);
115 	kfree(subdev_fh);
116 
117 	return ret;
118 }
119 
120 static int subdev_close(struct file *file)
121 {
122 	struct video_device *vdev = video_devdata(file);
123 	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
124 	struct v4l2_fh *vfh = file->private_data;
125 	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
126 
127 	if (sd->internal_ops && sd->internal_ops->close)
128 		sd->internal_ops->close(sd, subdev_fh);
129 	module_put(subdev_fh->owner);
130 	v4l2_fh_del(vfh);
131 	v4l2_fh_exit(vfh);
132 	subdev_fh_free(subdev_fh);
133 	kfree(subdev_fh);
134 	file->private_data = NULL;
135 
136 	return 0;
137 }
138 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
139 static int subdev_open(struct file *file)
140 {
141 	return -ENODEV;
142 }
143 
144 static int subdev_close(struct file *file)
145 {
146 	return -ENODEV;
147 }
148 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
149 
150 static inline int check_which(u32 which)
151 {
152 	if (which != V4L2_SUBDEV_FORMAT_TRY &&
153 	    which != V4L2_SUBDEV_FORMAT_ACTIVE)
154 		return -EINVAL;
155 
156 	return 0;
157 }
158 
159 static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
160 {
161 #if defined(CONFIG_MEDIA_CONTROLLER)
162 	if (sd->entity.num_pads) {
163 		if (pad >= sd->entity.num_pads)
164 			return -EINVAL;
165 		return 0;
166 	}
167 #endif
168 	/* allow pad 0 on subdevices not registered as media entities */
169 	if (pad > 0)
170 		return -EINVAL;
171 	return 0;
172 }
173 
174 static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
175 		       u32 which, u32 pad, u32 stream)
176 {
177 	if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
178 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
179 		if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
180 			return -EINVAL;
181 		return 0;
182 #else
183 		return -EINVAL;
184 #endif
185 	}
186 
187 	if (stream != 0)
188 		return -EINVAL;
189 
190 	if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
191 		return -EINVAL;
192 
193 	return 0;
194 }
195 
196 static inline int check_format(struct v4l2_subdev *sd,
197 			       struct v4l2_subdev_state *state,
198 			       struct v4l2_subdev_format *format)
199 {
200 	if (!format)
201 		return -EINVAL;
202 
203 	return check_which(format->which) ? : check_pad(sd, format->pad) ? :
204 	       check_state(sd, state, format->which, format->pad, format->stream);
205 }
206 
207 static int call_get_fmt(struct v4l2_subdev *sd,
208 			struct v4l2_subdev_state *state,
209 			struct v4l2_subdev_format *format)
210 {
211 	return check_format(sd, state, format) ? :
212 	       sd->ops->pad->get_fmt(sd, state, format);
213 }
214 
215 static int call_set_fmt(struct v4l2_subdev *sd,
216 			struct v4l2_subdev_state *state,
217 			struct v4l2_subdev_format *format)
218 {
219 	return check_format(sd, state, format) ? :
220 	       sd->ops->pad->set_fmt(sd, state, format);
221 }
222 
223 static int call_enum_mbus_code(struct v4l2_subdev *sd,
224 			       struct v4l2_subdev_state *state,
225 			       struct v4l2_subdev_mbus_code_enum *code)
226 {
227 	if (!code)
228 		return -EINVAL;
229 
230 	return check_which(code->which) ? : check_pad(sd, code->pad) ? :
231 	       check_state(sd, state, code->which, code->pad, code->stream) ? :
232 	       sd->ops->pad->enum_mbus_code(sd, state, code);
233 }
234 
235 static int call_enum_frame_size(struct v4l2_subdev *sd,
236 				struct v4l2_subdev_state *state,
237 				struct v4l2_subdev_frame_size_enum *fse)
238 {
239 	if (!fse)
240 		return -EINVAL;
241 
242 	return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
243 	       check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
244 	       sd->ops->pad->enum_frame_size(sd, state, fse);
245 }
246 
247 static inline int check_frame_interval(struct v4l2_subdev *sd,
248 				       struct v4l2_subdev_frame_interval *fi)
249 {
250 	if (!fi)
251 		return -EINVAL;
252 
253 	return check_pad(sd, fi->pad);
254 }
255 
256 static int call_g_frame_interval(struct v4l2_subdev *sd,
257 				 struct v4l2_subdev_frame_interval *fi)
258 {
259 	return check_frame_interval(sd, fi) ? :
260 	       sd->ops->video->g_frame_interval(sd, fi);
261 }
262 
263 static int call_s_frame_interval(struct v4l2_subdev *sd,
264 				 struct v4l2_subdev_frame_interval *fi)
265 {
266 	return check_frame_interval(sd, fi) ? :
267 	       sd->ops->video->s_frame_interval(sd, fi);
268 }
269 
270 static int call_enum_frame_interval(struct v4l2_subdev *sd,
271 				    struct v4l2_subdev_state *state,
272 				    struct v4l2_subdev_frame_interval_enum *fie)
273 {
274 	if (!fie)
275 		return -EINVAL;
276 
277 	return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
278 	       check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
279 	       sd->ops->pad->enum_frame_interval(sd, state, fie);
280 }
281 
282 static inline int check_selection(struct v4l2_subdev *sd,
283 				  struct v4l2_subdev_state *state,
284 				  struct v4l2_subdev_selection *sel)
285 {
286 	if (!sel)
287 		return -EINVAL;
288 
289 	return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
290 	       check_state(sd, state, sel->which, sel->pad, sel->stream);
291 }
292 
293 static int call_get_selection(struct v4l2_subdev *sd,
294 			      struct v4l2_subdev_state *state,
295 			      struct v4l2_subdev_selection *sel)
296 {
297 	return check_selection(sd, state, sel) ? :
298 	       sd->ops->pad->get_selection(sd, state, sel);
299 }
300 
301 static int call_set_selection(struct v4l2_subdev *sd,
302 			      struct v4l2_subdev_state *state,
303 			      struct v4l2_subdev_selection *sel)
304 {
305 	return check_selection(sd, state, sel) ? :
306 	       sd->ops->pad->set_selection(sd, state, sel);
307 }
308 
309 static inline int check_edid(struct v4l2_subdev *sd,
310 			     struct v4l2_subdev_edid *edid)
311 {
312 	if (!edid)
313 		return -EINVAL;
314 
315 	if (edid->blocks && edid->edid == NULL)
316 		return -EINVAL;
317 
318 	return check_pad(sd, edid->pad);
319 }
320 
321 static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
322 {
323 	return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
324 }
325 
326 static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
327 {
328 	return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
329 }
330 
331 static int call_dv_timings_cap(struct v4l2_subdev *sd,
332 			       struct v4l2_dv_timings_cap *cap)
333 {
334 	if (!cap)
335 		return -EINVAL;
336 
337 	return check_pad(sd, cap->pad) ? :
338 	       sd->ops->pad->dv_timings_cap(sd, cap);
339 }
340 
341 static int call_enum_dv_timings(struct v4l2_subdev *sd,
342 				struct v4l2_enum_dv_timings *dvt)
343 {
344 	if (!dvt)
345 		return -EINVAL;
346 
347 	return check_pad(sd, dvt->pad) ? :
348 	       sd->ops->pad->enum_dv_timings(sd, dvt);
349 }
350 
351 static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
352 				struct v4l2_mbus_config *config)
353 {
354 	return check_pad(sd, pad) ? :
355 	       sd->ops->pad->get_mbus_config(sd, pad, config);
356 }
357 
358 static int call_s_stream(struct v4l2_subdev *sd, int enable)
359 {
360 	int ret;
361 
362 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
363 	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
364 		if (enable)
365 			led_set_brightness(sd->privacy_led,
366 					   sd->privacy_led->max_brightness);
367 		else
368 			led_set_brightness(sd->privacy_led, 0);
369 	}
370 #endif
371 	ret = sd->ops->video->s_stream(sd, enable);
372 
373 	if (!enable && ret < 0) {
374 		dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
375 		return 0;
376 	}
377 
378 	return ret;
379 }
380 
381 #ifdef CONFIG_MEDIA_CONTROLLER
382 /*
383  * Create state-management wrapper for pad ops dealing with subdev state. The
384  * wrapper handles the case where the caller does not provide the called
385  * subdev's state. This should be removed when all the callers are fixed.
386  */
387 #define DEFINE_STATE_WRAPPER(f, arg_type)                                  \
388 	static int call_##f##_state(struct v4l2_subdev *sd,                \
389 				    struct v4l2_subdev_state *_state,      \
390 				    arg_type *arg)                         \
391 	{                                                                  \
392 		struct v4l2_subdev_state *state = _state;                  \
393 		int ret;                                                   \
394 		if (!_state)                                               \
395 			state = v4l2_subdev_lock_and_get_active_state(sd); \
396 		ret = call_##f(sd, state, arg);                            \
397 		if (!_state && state)                                      \
398 			v4l2_subdev_unlock_state(state);                   \
399 		return ret;                                                \
400 	}
401 
402 #else /* CONFIG_MEDIA_CONTROLLER */
403 
404 #define DEFINE_STATE_WRAPPER(f, arg_type)                            \
405 	static int call_##f##_state(struct v4l2_subdev *sd,          \
406 				    struct v4l2_subdev_state *state, \
407 				    arg_type *arg)                   \
408 	{                                                            \
409 		return call_##f(sd, state, arg);                     \
410 	}
411 
412 #endif /* CONFIG_MEDIA_CONTROLLER */
413 
414 DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
415 DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
416 DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
417 DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
418 DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
419 DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
420 DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
421 
422 static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
423 	.get_fmt		= call_get_fmt_state,
424 	.set_fmt		= call_set_fmt_state,
425 	.enum_mbus_code		= call_enum_mbus_code_state,
426 	.enum_frame_size	= call_enum_frame_size_state,
427 	.enum_frame_interval	= call_enum_frame_interval_state,
428 	.get_selection		= call_get_selection_state,
429 	.set_selection		= call_set_selection_state,
430 	.get_edid		= call_get_edid,
431 	.set_edid		= call_set_edid,
432 	.dv_timings_cap		= call_dv_timings_cap,
433 	.enum_dv_timings	= call_enum_dv_timings,
434 	.get_mbus_config	= call_get_mbus_config,
435 };
436 
437 static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
438 	.g_frame_interval	= call_g_frame_interval,
439 	.s_frame_interval	= call_s_frame_interval,
440 	.s_stream		= call_s_stream,
441 };
442 
443 const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
444 	.pad	= &v4l2_subdev_call_pad_wrappers,
445 	.video	= &v4l2_subdev_call_video_wrappers,
446 };
447 EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
448 
449 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
450 
451 static struct v4l2_subdev_state *
452 subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
453 		       unsigned int cmd, void *arg)
454 {
455 	u32 which;
456 
457 	switch (cmd) {
458 	default:
459 		return NULL;
460 	case VIDIOC_SUBDEV_G_FMT:
461 	case VIDIOC_SUBDEV_S_FMT:
462 		which = ((struct v4l2_subdev_format *)arg)->which;
463 		break;
464 	case VIDIOC_SUBDEV_G_CROP:
465 	case VIDIOC_SUBDEV_S_CROP:
466 		which = ((struct v4l2_subdev_crop *)arg)->which;
467 		break;
468 	case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
469 		which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
470 		break;
471 	case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
472 		which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
473 		break;
474 	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
475 		which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
476 		break;
477 	case VIDIOC_SUBDEV_G_SELECTION:
478 	case VIDIOC_SUBDEV_S_SELECTION:
479 		which = ((struct v4l2_subdev_selection *)arg)->which;
480 		break;
481 	case VIDIOC_SUBDEV_G_ROUTING:
482 	case VIDIOC_SUBDEV_S_ROUTING:
483 		which = ((struct v4l2_subdev_routing *)arg)->which;
484 		break;
485 	}
486 
487 	return which == V4L2_SUBDEV_FORMAT_TRY ?
488 			     subdev_fh->state :
489 			     v4l2_subdev_get_unlocked_active_state(sd);
490 }
491 
492 static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
493 			    struct v4l2_subdev_state *state)
494 {
495 	struct video_device *vdev = video_devdata(file);
496 	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
497 	struct v4l2_fh *vfh = file->private_data;
498 	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
499 	bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
500 	bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
501 	bool client_supports_streams = subdev_fh->client_caps &
502 				       V4L2_SUBDEV_CLIENT_CAP_STREAMS;
503 	int rval;
504 
505 	switch (cmd) {
506 	case VIDIOC_SUBDEV_QUERYCAP: {
507 		struct v4l2_subdev_capability *cap = arg;
508 
509 		memset(cap->reserved, 0, sizeof(cap->reserved));
510 		cap->version = LINUX_VERSION_CODE;
511 		cap->capabilities =
512 			(ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
513 			(streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
514 
515 		return 0;
516 	}
517 
518 	case VIDIOC_QUERYCTRL:
519 		/*
520 		 * TODO: this really should be folded into v4l2_queryctrl (this
521 		 * currently returns -EINVAL for NULL control handlers).
522 		 * However, v4l2_queryctrl() is still called directly by
523 		 * drivers as well and until that has been addressed I believe
524 		 * it is safer to do the check here. The same is true for the
525 		 * other control ioctls below.
526 		 */
527 		if (!vfh->ctrl_handler)
528 			return -ENOTTY;
529 		return v4l2_queryctrl(vfh->ctrl_handler, arg);
530 
531 	case VIDIOC_QUERY_EXT_CTRL:
532 		if (!vfh->ctrl_handler)
533 			return -ENOTTY;
534 		return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
535 
536 	case VIDIOC_QUERYMENU:
537 		if (!vfh->ctrl_handler)
538 			return -ENOTTY;
539 		return v4l2_querymenu(vfh->ctrl_handler, arg);
540 
541 	case VIDIOC_G_CTRL:
542 		if (!vfh->ctrl_handler)
543 			return -ENOTTY;
544 		return v4l2_g_ctrl(vfh->ctrl_handler, arg);
545 
546 	case VIDIOC_S_CTRL:
547 		if (!vfh->ctrl_handler)
548 			return -ENOTTY;
549 		return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
550 
551 	case VIDIOC_G_EXT_CTRLS:
552 		if (!vfh->ctrl_handler)
553 			return -ENOTTY;
554 		return v4l2_g_ext_ctrls(vfh->ctrl_handler,
555 					vdev, sd->v4l2_dev->mdev, arg);
556 
557 	case VIDIOC_S_EXT_CTRLS:
558 		if (!vfh->ctrl_handler)
559 			return -ENOTTY;
560 		return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
561 					vdev, sd->v4l2_dev->mdev, arg);
562 
563 	case VIDIOC_TRY_EXT_CTRLS:
564 		if (!vfh->ctrl_handler)
565 			return -ENOTTY;
566 		return v4l2_try_ext_ctrls(vfh->ctrl_handler,
567 					  vdev, sd->v4l2_dev->mdev, arg);
568 
569 	case VIDIOC_DQEVENT:
570 		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
571 			return -ENOIOCTLCMD;
572 
573 		return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
574 
575 	case VIDIOC_SUBSCRIBE_EVENT:
576 		return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
577 
578 	case VIDIOC_UNSUBSCRIBE_EVENT:
579 		return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
580 
581 #ifdef CONFIG_VIDEO_ADV_DEBUG
582 	case VIDIOC_DBG_G_REGISTER:
583 	{
584 		struct v4l2_dbg_register *p = arg;
585 
586 		if (!capable(CAP_SYS_ADMIN))
587 			return -EPERM;
588 		return v4l2_subdev_call(sd, core, g_register, p);
589 	}
590 	case VIDIOC_DBG_S_REGISTER:
591 	{
592 		struct v4l2_dbg_register *p = arg;
593 
594 		if (!capable(CAP_SYS_ADMIN))
595 			return -EPERM;
596 		return v4l2_subdev_call(sd, core, s_register, p);
597 	}
598 	case VIDIOC_DBG_G_CHIP_INFO:
599 	{
600 		struct v4l2_dbg_chip_info *p = arg;
601 
602 		if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
603 			return -EINVAL;
604 		if (sd->ops->core && sd->ops->core->s_register)
605 			p->flags |= V4L2_CHIP_FL_WRITABLE;
606 		if (sd->ops->core && sd->ops->core->g_register)
607 			p->flags |= V4L2_CHIP_FL_READABLE;
608 		strscpy(p->name, sd->name, sizeof(p->name));
609 		return 0;
610 	}
611 #endif
612 
613 	case VIDIOC_LOG_STATUS: {
614 		int ret;
615 
616 		pr_info("%s: =================  START STATUS  =================\n",
617 			sd->name);
618 		ret = v4l2_subdev_call(sd, core, log_status);
619 		pr_info("%s: ==================  END STATUS  ==================\n",
620 			sd->name);
621 		return ret;
622 	}
623 
624 	case VIDIOC_SUBDEV_G_FMT: {
625 		struct v4l2_subdev_format *format = arg;
626 
627 		if (!client_supports_streams)
628 			format->stream = 0;
629 
630 		memset(format->reserved, 0, sizeof(format->reserved));
631 		memset(format->format.reserved, 0, sizeof(format->format.reserved));
632 		return v4l2_subdev_call(sd, pad, get_fmt, state, format);
633 	}
634 
635 	case VIDIOC_SUBDEV_S_FMT: {
636 		struct v4l2_subdev_format *format = arg;
637 
638 		if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
639 			return -EPERM;
640 
641 		if (!client_supports_streams)
642 			format->stream = 0;
643 
644 		memset(format->reserved, 0, sizeof(format->reserved));
645 		memset(format->format.reserved, 0, sizeof(format->format.reserved));
646 		return v4l2_subdev_call(sd, pad, set_fmt, state, format);
647 	}
648 
649 	case VIDIOC_SUBDEV_G_CROP: {
650 		struct v4l2_subdev_crop *crop = arg;
651 		struct v4l2_subdev_selection sel;
652 
653 		if (!client_supports_streams)
654 			crop->stream = 0;
655 
656 		memset(crop->reserved, 0, sizeof(crop->reserved));
657 		memset(&sel, 0, sizeof(sel));
658 		sel.which = crop->which;
659 		sel.pad = crop->pad;
660 		sel.target = V4L2_SEL_TGT_CROP;
661 
662 		rval = v4l2_subdev_call(
663 			sd, pad, get_selection, state, &sel);
664 
665 		crop->rect = sel.r;
666 
667 		return rval;
668 	}
669 
670 	case VIDIOC_SUBDEV_S_CROP: {
671 		struct v4l2_subdev_crop *crop = arg;
672 		struct v4l2_subdev_selection sel;
673 
674 		if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
675 			return -EPERM;
676 
677 		if (!client_supports_streams)
678 			crop->stream = 0;
679 
680 		memset(crop->reserved, 0, sizeof(crop->reserved));
681 		memset(&sel, 0, sizeof(sel));
682 		sel.which = crop->which;
683 		sel.pad = crop->pad;
684 		sel.target = V4L2_SEL_TGT_CROP;
685 		sel.r = crop->rect;
686 
687 		rval = v4l2_subdev_call(
688 			sd, pad, set_selection, state, &sel);
689 
690 		crop->rect = sel.r;
691 
692 		return rval;
693 	}
694 
695 	case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
696 		struct v4l2_subdev_mbus_code_enum *code = arg;
697 
698 		if (!client_supports_streams)
699 			code->stream = 0;
700 
701 		memset(code->reserved, 0, sizeof(code->reserved));
702 		return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
703 					code);
704 	}
705 
706 	case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
707 		struct v4l2_subdev_frame_size_enum *fse = arg;
708 
709 		if (!client_supports_streams)
710 			fse->stream = 0;
711 
712 		memset(fse->reserved, 0, sizeof(fse->reserved));
713 		return v4l2_subdev_call(sd, pad, enum_frame_size, state,
714 					fse);
715 	}
716 
717 	case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
718 		struct v4l2_subdev_frame_interval *fi = arg;
719 
720 		if (!client_supports_streams)
721 			fi->stream = 0;
722 
723 		memset(fi->reserved, 0, sizeof(fi->reserved));
724 		return v4l2_subdev_call(sd, video, g_frame_interval, arg);
725 	}
726 
727 	case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
728 		struct v4l2_subdev_frame_interval *fi = arg;
729 
730 		if (ro_subdev)
731 			return -EPERM;
732 
733 		if (!client_supports_streams)
734 			fi->stream = 0;
735 
736 		memset(fi->reserved, 0, sizeof(fi->reserved));
737 		return v4l2_subdev_call(sd, video, s_frame_interval, arg);
738 	}
739 
740 	case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
741 		struct v4l2_subdev_frame_interval_enum *fie = arg;
742 
743 		if (!client_supports_streams)
744 			fie->stream = 0;
745 
746 		memset(fie->reserved, 0, sizeof(fie->reserved));
747 		return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
748 					fie);
749 	}
750 
751 	case VIDIOC_SUBDEV_G_SELECTION: {
752 		struct v4l2_subdev_selection *sel = arg;
753 
754 		if (!client_supports_streams)
755 			sel->stream = 0;
756 
757 		memset(sel->reserved, 0, sizeof(sel->reserved));
758 		return v4l2_subdev_call(
759 			sd, pad, get_selection, state, sel);
760 	}
761 
762 	case VIDIOC_SUBDEV_S_SELECTION: {
763 		struct v4l2_subdev_selection *sel = arg;
764 
765 		if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
766 			return -EPERM;
767 
768 		if (!client_supports_streams)
769 			sel->stream = 0;
770 
771 		memset(sel->reserved, 0, sizeof(sel->reserved));
772 		return v4l2_subdev_call(
773 			sd, pad, set_selection, state, sel);
774 	}
775 
776 	case VIDIOC_G_EDID: {
777 		struct v4l2_subdev_edid *edid = arg;
778 
779 		return v4l2_subdev_call(sd, pad, get_edid, edid);
780 	}
781 
782 	case VIDIOC_S_EDID: {
783 		struct v4l2_subdev_edid *edid = arg;
784 
785 		return v4l2_subdev_call(sd, pad, set_edid, edid);
786 	}
787 
788 	case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
789 		struct v4l2_dv_timings_cap *cap = arg;
790 
791 		return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
792 	}
793 
794 	case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
795 		struct v4l2_enum_dv_timings *dvt = arg;
796 
797 		return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
798 	}
799 
800 	case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
801 		return v4l2_subdev_call(sd, video, query_dv_timings, arg);
802 
803 	case VIDIOC_SUBDEV_G_DV_TIMINGS:
804 		return v4l2_subdev_call(sd, video, g_dv_timings, arg);
805 
806 	case VIDIOC_SUBDEV_S_DV_TIMINGS:
807 		if (ro_subdev)
808 			return -EPERM;
809 
810 		return v4l2_subdev_call(sd, video, s_dv_timings, arg);
811 
812 	case VIDIOC_SUBDEV_G_STD:
813 		return v4l2_subdev_call(sd, video, g_std, arg);
814 
815 	case VIDIOC_SUBDEV_S_STD: {
816 		v4l2_std_id *std = arg;
817 
818 		if (ro_subdev)
819 			return -EPERM;
820 
821 		return v4l2_subdev_call(sd, video, s_std, *std);
822 	}
823 
824 	case VIDIOC_SUBDEV_ENUMSTD: {
825 		struct v4l2_standard *p = arg;
826 		v4l2_std_id id;
827 
828 		if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
829 			return -EINVAL;
830 
831 		return v4l_video_std_enumstd(p, id);
832 	}
833 
834 	case VIDIOC_SUBDEV_QUERYSTD:
835 		return v4l2_subdev_call(sd, video, querystd, arg);
836 
837 	case VIDIOC_SUBDEV_G_ROUTING: {
838 		struct v4l2_subdev_routing *routing = arg;
839 		struct v4l2_subdev_krouting *krouting;
840 
841 		if (!v4l2_subdev_enable_streams_api)
842 			return -ENOIOCTLCMD;
843 
844 		if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
845 			return -ENOIOCTLCMD;
846 
847 		memset(routing->reserved, 0, sizeof(routing->reserved));
848 
849 		krouting = &state->routing;
850 
851 		if (routing->num_routes < krouting->num_routes) {
852 			routing->num_routes = krouting->num_routes;
853 			return -ENOSPC;
854 		}
855 
856 		memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
857 		       krouting->routes,
858 		       krouting->num_routes * sizeof(*krouting->routes));
859 		routing->num_routes = krouting->num_routes;
860 
861 		return 0;
862 	}
863 
864 	case VIDIOC_SUBDEV_S_ROUTING: {
865 		struct v4l2_subdev_routing *routing = arg;
866 		struct v4l2_subdev_route *routes =
867 			(struct v4l2_subdev_route *)(uintptr_t)routing->routes;
868 		struct v4l2_subdev_krouting krouting = {};
869 		unsigned int i;
870 
871 		if (!v4l2_subdev_enable_streams_api)
872 			return -ENOIOCTLCMD;
873 
874 		if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
875 			return -ENOIOCTLCMD;
876 
877 		if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
878 			return -EPERM;
879 
880 		memset(routing->reserved, 0, sizeof(routing->reserved));
881 
882 		for (i = 0; i < routing->num_routes; ++i) {
883 			const struct v4l2_subdev_route *route = &routes[i];
884 			const struct media_pad *pads = sd->entity.pads;
885 
886 			if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
887 			    route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
888 				return -EINVAL;
889 
890 			if (route->sink_pad >= sd->entity.num_pads)
891 				return -EINVAL;
892 
893 			if (!(pads[route->sink_pad].flags &
894 			      MEDIA_PAD_FL_SINK))
895 				return -EINVAL;
896 
897 			if (route->source_pad >= sd->entity.num_pads)
898 				return -EINVAL;
899 
900 			if (!(pads[route->source_pad].flags &
901 			      MEDIA_PAD_FL_SOURCE))
902 				return -EINVAL;
903 		}
904 
905 		krouting.num_routes = routing->num_routes;
906 		krouting.routes = routes;
907 
908 		return v4l2_subdev_call(sd, pad, set_routing, state,
909 					routing->which, &krouting);
910 	}
911 
912 	case VIDIOC_SUBDEV_G_CLIENT_CAP: {
913 		struct v4l2_subdev_client_capability *client_cap = arg;
914 
915 		client_cap->capabilities = subdev_fh->client_caps;
916 
917 		return 0;
918 	}
919 
920 	case VIDIOC_SUBDEV_S_CLIENT_CAP: {
921 		struct v4l2_subdev_client_capability *client_cap = arg;
922 
923 		/*
924 		 * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
925 		 * enabled. Remove this when streams API is no longer
926 		 * experimental.
927 		 */
928 		if (!v4l2_subdev_enable_streams_api)
929 			client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
930 
931 		/* Filter out unsupported capabilities */
932 		client_cap->capabilities &= V4L2_SUBDEV_CLIENT_CAP_STREAMS;
933 
934 		subdev_fh->client_caps = client_cap->capabilities;
935 
936 		return 0;
937 	}
938 
939 	default:
940 		return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
941 	}
942 
943 	return 0;
944 }
945 
946 static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
947 {
948 	struct video_device *vdev = video_devdata(file);
949 	struct mutex *lock = vdev->lock;
950 	long ret = -ENODEV;
951 
952 	if (lock && mutex_lock_interruptible(lock))
953 		return -ERESTARTSYS;
954 
955 	if (video_is_registered(vdev)) {
956 		struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
957 		struct v4l2_fh *vfh = file->private_data;
958 		struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
959 		struct v4l2_subdev_state *state;
960 
961 		state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
962 
963 		if (state)
964 			v4l2_subdev_lock_state(state);
965 
966 		ret = subdev_do_ioctl(file, cmd, arg, state);
967 
968 		if (state)
969 			v4l2_subdev_unlock_state(state);
970 	}
971 
972 	if (lock)
973 		mutex_unlock(lock);
974 	return ret;
975 }
976 
977 static long subdev_ioctl(struct file *file, unsigned int cmd,
978 	unsigned long arg)
979 {
980 	return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
981 }
982 
983 #ifdef CONFIG_COMPAT
984 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
985 	unsigned long arg)
986 {
987 	struct video_device *vdev = video_devdata(file);
988 	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
989 
990 	return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
991 }
992 #endif
993 
994 #else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
995 static long subdev_ioctl(struct file *file, unsigned int cmd,
996 			 unsigned long arg)
997 {
998 	return -ENODEV;
999 }
1000 
1001 #ifdef CONFIG_COMPAT
1002 static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
1003 				  unsigned long arg)
1004 {
1005 	return -ENODEV;
1006 }
1007 #endif
1008 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1009 
1010 static __poll_t subdev_poll(struct file *file, poll_table *wait)
1011 {
1012 	struct video_device *vdev = video_devdata(file);
1013 	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
1014 	struct v4l2_fh *fh = file->private_data;
1015 
1016 	if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
1017 		return EPOLLERR;
1018 
1019 	poll_wait(file, &fh->wait, wait);
1020 
1021 	if (v4l2_event_pending(fh))
1022 		return EPOLLPRI;
1023 
1024 	return 0;
1025 }
1026 
1027 const struct v4l2_file_operations v4l2_subdev_fops = {
1028 	.owner = THIS_MODULE,
1029 	.open = subdev_open,
1030 	.unlocked_ioctl = subdev_ioctl,
1031 #ifdef CONFIG_COMPAT
1032 	.compat_ioctl32 = subdev_compat_ioctl32,
1033 #endif
1034 	.release = subdev_close,
1035 	.poll = subdev_poll,
1036 };
1037 
1038 #ifdef CONFIG_MEDIA_CONTROLLER
1039 
1040 int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
1041 				      struct fwnode_endpoint *endpoint)
1042 {
1043 	struct fwnode_handle *fwnode;
1044 	struct v4l2_subdev *sd;
1045 
1046 	if (!is_media_entity_v4l2_subdev(entity))
1047 		return -EINVAL;
1048 
1049 	sd = media_entity_to_v4l2_subdev(entity);
1050 
1051 	fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
1052 	fwnode_handle_put(fwnode);
1053 
1054 	if (device_match_fwnode(sd->dev, fwnode))
1055 		return endpoint->port;
1056 
1057 	return -ENXIO;
1058 }
1059 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
1060 
1061 int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
1062 				      struct media_link *link,
1063 				      struct v4l2_subdev_format *source_fmt,
1064 				      struct v4l2_subdev_format *sink_fmt)
1065 {
1066 	bool pass = true;
1067 
1068 	/* The width, height and code must match. */
1069 	if (source_fmt->format.width != sink_fmt->format.width) {
1070 		dev_dbg(sd->entity.graph_obj.mdev->dev,
1071 			"%s: width does not match (source %u, sink %u)\n",
1072 			__func__,
1073 			source_fmt->format.width, sink_fmt->format.width);
1074 		pass = false;
1075 	}
1076 
1077 	if (source_fmt->format.height != sink_fmt->format.height) {
1078 		dev_dbg(sd->entity.graph_obj.mdev->dev,
1079 			"%s: height does not match (source %u, sink %u)\n",
1080 			__func__,
1081 			source_fmt->format.height, sink_fmt->format.height);
1082 		pass = false;
1083 	}
1084 
1085 	if (source_fmt->format.code != sink_fmt->format.code) {
1086 		dev_dbg(sd->entity.graph_obj.mdev->dev,
1087 			"%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
1088 			__func__,
1089 			source_fmt->format.code, sink_fmt->format.code);
1090 		pass = false;
1091 	}
1092 
1093 	/* The field order must match, or the sink field order must be NONE
1094 	 * to support interlaced hardware connected to bridges that support
1095 	 * progressive formats only.
1096 	 */
1097 	if (source_fmt->format.field != sink_fmt->format.field &&
1098 	    sink_fmt->format.field != V4L2_FIELD_NONE) {
1099 		dev_dbg(sd->entity.graph_obj.mdev->dev,
1100 			"%s: field does not match (source %u, sink %u)\n",
1101 			__func__,
1102 			source_fmt->format.field, sink_fmt->format.field);
1103 		pass = false;
1104 	}
1105 
1106 	if (pass)
1107 		return 0;
1108 
1109 	dev_dbg(sd->entity.graph_obj.mdev->dev,
1110 		"%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
1111 		link->source->entity->name, link->source->index,
1112 		link->sink->entity->name, link->sink->index);
1113 
1114 	return -EPIPE;
1115 }
1116 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
1117 
1118 static int
1119 v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
1120 				     struct v4l2_subdev_format *fmt,
1121 				     bool states_locked)
1122 {
1123 	struct v4l2_subdev_state *state;
1124 	struct v4l2_subdev *sd;
1125 	int ret;
1126 
1127 	if (!is_media_entity_v4l2_subdev(pad->entity)) {
1128 		WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
1129 		     "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
1130 		     pad->entity->function, pad->entity->name);
1131 
1132 		return -EINVAL;
1133 	}
1134 
1135 	sd = media_entity_to_v4l2_subdev(pad->entity);
1136 
1137 	fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1138 	fmt->pad = pad->index;
1139 	fmt->stream = stream;
1140 
1141 	if (states_locked)
1142 		state = v4l2_subdev_get_locked_active_state(sd);
1143 	else
1144 		state = v4l2_subdev_lock_and_get_active_state(sd);
1145 
1146 	ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
1147 
1148 	if (!states_locked && state)
1149 		v4l2_subdev_unlock_state(state);
1150 
1151 	return ret;
1152 }
1153 
1154 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1155 
1156 static void __v4l2_link_validate_get_streams(struct media_pad *pad,
1157 					     u64 *streams_mask,
1158 					     bool states_locked)
1159 {
1160 	struct v4l2_subdev_route *route;
1161 	struct v4l2_subdev_state *state;
1162 	struct v4l2_subdev *subdev;
1163 
1164 	subdev = media_entity_to_v4l2_subdev(pad->entity);
1165 
1166 	*streams_mask = 0;
1167 
1168 	if (states_locked)
1169 		state = v4l2_subdev_get_locked_active_state(subdev);
1170 	else
1171 		state = v4l2_subdev_lock_and_get_active_state(subdev);
1172 
1173 	if (WARN_ON(!state))
1174 		return;
1175 
1176 	for_each_active_route(&state->routing, route) {
1177 		u32 route_pad;
1178 		u32 route_stream;
1179 
1180 		if (pad->flags & MEDIA_PAD_FL_SOURCE) {
1181 			route_pad = route->source_pad;
1182 			route_stream = route->source_stream;
1183 		} else {
1184 			route_pad = route->sink_pad;
1185 			route_stream = route->sink_stream;
1186 		}
1187 
1188 		if (route_pad != pad->index)
1189 			continue;
1190 
1191 		*streams_mask |= BIT_ULL(route_stream);
1192 	}
1193 
1194 	if (!states_locked)
1195 		v4l2_subdev_unlock_state(state);
1196 }
1197 
1198 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
1199 
1200 static void v4l2_link_validate_get_streams(struct media_pad *pad,
1201 					   u64 *streams_mask,
1202 					   bool states_locked)
1203 {
1204 	struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
1205 
1206 	if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
1207 		/* Non-streams subdevs have an implicit stream 0 */
1208 		*streams_mask = BIT_ULL(0);
1209 		return;
1210 	}
1211 
1212 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1213 	__v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
1214 #else
1215 	/* This shouldn't happen */
1216 	*streams_mask = 0;
1217 #endif
1218 }
1219 
1220 static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
1221 {
1222 	struct v4l2_subdev *sink_subdev =
1223 		media_entity_to_v4l2_subdev(link->sink->entity);
1224 	struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
1225 	u64 source_streams_mask;
1226 	u64 sink_streams_mask;
1227 	u64 dangling_sink_streams;
1228 	u32 stream;
1229 	int ret;
1230 
1231 	dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
1232 		link->source->entity->name, link->source->index,
1233 		link->sink->entity->name, link->sink->index);
1234 
1235 	v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
1236 	v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
1237 
1238 	/*
1239 	 * It is ok to have more source streams than sink streams as extra
1240 	 * source streams can just be ignored by the receiver, but having extra
1241 	 * sink streams is an error as streams must have a source.
1242 	 */
1243 	dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
1244 				sink_streams_mask;
1245 	if (dangling_sink_streams) {
1246 		dev_err(dev, "Dangling sink streams: mask %#llx\n",
1247 			dangling_sink_streams);
1248 		return -EINVAL;
1249 	}
1250 
1251 	/* Validate source and sink stream formats */
1252 
1253 	for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
1254 		struct v4l2_subdev_format sink_fmt, source_fmt;
1255 
1256 		if (!(sink_streams_mask & BIT_ULL(stream)))
1257 			continue;
1258 
1259 		dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
1260 			link->source->entity->name, link->source->index, stream,
1261 			link->sink->entity->name, link->sink->index, stream);
1262 
1263 		ret = v4l2_subdev_link_validate_get_format(link->source, stream,
1264 							   &source_fmt, states_locked);
1265 		if (ret < 0) {
1266 			dev_dbg(dev,
1267 				"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1268 				link->source->entity->name, link->source->index,
1269 				stream);
1270 			continue;
1271 		}
1272 
1273 		ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
1274 							   &sink_fmt, states_locked);
1275 		if (ret < 0) {
1276 			dev_dbg(dev,
1277 				"Failed to get format for \"%s\":%u:%u (but that's ok)\n",
1278 				link->sink->entity->name, link->sink->index,
1279 				stream);
1280 			continue;
1281 		}
1282 
1283 		/* TODO: add stream number to link_validate() */
1284 		ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
1285 				       &source_fmt, &sink_fmt);
1286 		if (!ret)
1287 			continue;
1288 
1289 		if (ret != -ENOIOCTLCMD)
1290 			return ret;
1291 
1292 		ret = v4l2_subdev_link_validate_default(sink_subdev, link,
1293 							&source_fmt, &sink_fmt);
1294 
1295 		if (ret)
1296 			return ret;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 int v4l2_subdev_link_validate(struct media_link *link)
1303 {
1304 	struct v4l2_subdev *source_sd, *sink_sd;
1305 	struct v4l2_subdev_state *source_state, *sink_state;
1306 	bool states_locked;
1307 	int ret;
1308 
1309 	if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
1310 	    !is_media_entity_v4l2_subdev(link->source->entity)) {
1311 		pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
1312 			     !is_media_entity_v4l2_subdev(link->sink->entity) ?
1313 			     "sink" : "source",
1314 			     link->source->entity->name, link->source->index,
1315 			     link->sink->entity->name, link->sink->index);
1316 		return 0;
1317 	}
1318 
1319 	sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
1320 	source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1321 
1322 	sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
1323 	source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
1324 
1325 	states_locked = sink_state && source_state;
1326 
1327 	if (states_locked) {
1328 		v4l2_subdev_lock_state(sink_state);
1329 		v4l2_subdev_lock_state(source_state);
1330 	}
1331 
1332 	ret = v4l2_subdev_link_validate_locked(link, states_locked);
1333 
1334 	if (states_locked) {
1335 		v4l2_subdev_unlock_state(sink_state);
1336 		v4l2_subdev_unlock_state(source_state);
1337 	}
1338 
1339 	return ret;
1340 }
1341 EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
1342 
1343 bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
1344 				  unsigned int pad0, unsigned int pad1)
1345 {
1346 	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
1347 	struct v4l2_subdev_krouting *routing;
1348 	struct v4l2_subdev_state *state;
1349 	unsigned int i;
1350 
1351 	state = v4l2_subdev_lock_and_get_active_state(sd);
1352 
1353 	routing = &state->routing;
1354 
1355 	for (i = 0; i < routing->num_routes; ++i) {
1356 		struct v4l2_subdev_route *route = &routing->routes[i];
1357 
1358 		if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1359 			continue;
1360 
1361 		if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
1362 		    (route->source_pad == pad0 && route->sink_pad == pad1)) {
1363 			v4l2_subdev_unlock_state(state);
1364 			return true;
1365 		}
1366 	}
1367 
1368 	v4l2_subdev_unlock_state(state);
1369 
1370 	return false;
1371 }
1372 EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
1373 
1374 struct v4l2_subdev_state *
1375 __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
1376 			  struct lock_class_key *lock_key)
1377 {
1378 	struct v4l2_subdev_state *state;
1379 	int ret;
1380 
1381 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1382 	if (!state)
1383 		return ERR_PTR(-ENOMEM);
1384 
1385 	__mutex_init(&state->_lock, lock_name, lock_key);
1386 	if (sd->state_lock)
1387 		state->lock = sd->state_lock;
1388 	else
1389 		state->lock = &state->_lock;
1390 
1391 	/* Drivers that support streams do not need the legacy pad config */
1392 	if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
1393 		state->pads = kvcalloc(sd->entity.num_pads,
1394 				       sizeof(*state->pads), GFP_KERNEL);
1395 		if (!state->pads) {
1396 			ret = -ENOMEM;
1397 			goto err;
1398 		}
1399 	}
1400 
1401 	/*
1402 	 * There can be no race at this point, but we lock the state anyway to
1403 	 * satisfy lockdep checks.
1404 	 */
1405 	v4l2_subdev_lock_state(state);
1406 	ret = v4l2_subdev_call(sd, pad, init_cfg, state);
1407 	v4l2_subdev_unlock_state(state);
1408 
1409 	if (ret < 0 && ret != -ENOIOCTLCMD)
1410 		goto err;
1411 
1412 	return state;
1413 
1414 err:
1415 	if (state && state->pads)
1416 		kvfree(state->pads);
1417 
1418 	kfree(state);
1419 
1420 	return ERR_PTR(ret);
1421 }
1422 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
1423 
1424 void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
1425 {
1426 	if (!state)
1427 		return;
1428 
1429 	mutex_destroy(&state->_lock);
1430 
1431 	kfree(state->routing.routes);
1432 	kvfree(state->stream_configs.configs);
1433 	kvfree(state->pads);
1434 	kfree(state);
1435 }
1436 EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
1437 
1438 int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
1439 				struct lock_class_key *key)
1440 {
1441 	struct v4l2_subdev_state *state;
1442 
1443 	state = __v4l2_subdev_state_alloc(sd, name, key);
1444 	if (IS_ERR(state))
1445 		return PTR_ERR(state);
1446 
1447 	sd->active_state = state;
1448 
1449 	return 0;
1450 }
1451 EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
1452 
1453 void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
1454 {
1455 	struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
1456 
1457 	__v4l2_subdev_state_free(sd->active_state);
1458 	sd->active_state = NULL;
1459 
1460 	if (list_empty(&sd->async_subdev_endpoint_list))
1461 		return;
1462 
1463 	list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
1464 				 async_subdev_endpoint_entry) {
1465 		list_del(&ase->async_subdev_endpoint_entry);
1466 
1467 		kfree(ase);
1468 	}
1469 }
1470 EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
1471 
1472 #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
1473 
1474 static int
1475 v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
1476 				const struct v4l2_subdev_krouting *routing)
1477 {
1478 	struct v4l2_subdev_stream_configs new_configs = { 0 };
1479 	struct v4l2_subdev_route *route;
1480 	u32 idx;
1481 
1482 	/* Count number of formats needed */
1483 	for_each_active_route(routing, route) {
1484 		/*
1485 		 * Each route needs a format on both ends of the route.
1486 		 */
1487 		new_configs.num_configs += 2;
1488 	}
1489 
1490 	if (new_configs.num_configs) {
1491 		new_configs.configs = kvcalloc(new_configs.num_configs,
1492 					       sizeof(*new_configs.configs),
1493 					       GFP_KERNEL);
1494 
1495 		if (!new_configs.configs)
1496 			return -ENOMEM;
1497 	}
1498 
1499 	/*
1500 	 * Fill in the 'pad' and stream' value for each item in the array from
1501 	 * the routing table
1502 	 */
1503 	idx = 0;
1504 
1505 	for_each_active_route(routing, route) {
1506 		new_configs.configs[idx].pad = route->sink_pad;
1507 		new_configs.configs[idx].stream = route->sink_stream;
1508 
1509 		idx++;
1510 
1511 		new_configs.configs[idx].pad = route->source_pad;
1512 		new_configs.configs[idx].stream = route->source_stream;
1513 
1514 		idx++;
1515 	}
1516 
1517 	kvfree(stream_configs->configs);
1518 	*stream_configs = new_configs;
1519 
1520 	return 0;
1521 }
1522 
1523 int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
1524 			struct v4l2_subdev_format *format)
1525 {
1526 	struct v4l2_mbus_framefmt *fmt;
1527 
1528 	if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
1529 		fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
1530 							  format->stream);
1531 	else if (format->pad < sd->entity.num_pads && format->stream == 0)
1532 		fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
1533 	else
1534 		fmt = NULL;
1535 
1536 	if (!fmt)
1537 		return -EINVAL;
1538 
1539 	format->format = *fmt;
1540 
1541 	return 0;
1542 }
1543 EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
1544 
1545 int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
1546 			    struct v4l2_subdev_state *state,
1547 			    const struct v4l2_subdev_krouting *routing)
1548 {
1549 	struct v4l2_subdev_krouting *dst = &state->routing;
1550 	const struct v4l2_subdev_krouting *src = routing;
1551 	struct v4l2_subdev_krouting new_routing = { 0 };
1552 	size_t bytes;
1553 	int r;
1554 
1555 	if (unlikely(check_mul_overflow((size_t)src->num_routes,
1556 					sizeof(*src->routes), &bytes)))
1557 		return -EOVERFLOW;
1558 
1559 	lockdep_assert_held(state->lock);
1560 
1561 	if (src->num_routes > 0) {
1562 		new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
1563 		if (!new_routing.routes)
1564 			return -ENOMEM;
1565 	}
1566 
1567 	new_routing.num_routes = src->num_routes;
1568 
1569 	r = v4l2_subdev_init_stream_configs(&state->stream_configs,
1570 					    &new_routing);
1571 	if (r) {
1572 		kfree(new_routing.routes);
1573 		return r;
1574 	}
1575 
1576 	kfree(dst->routes);
1577 	*dst = new_routing;
1578 
1579 	return 0;
1580 }
1581 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
1582 
1583 struct v4l2_subdev_route *
1584 __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
1585 				struct v4l2_subdev_route *route)
1586 {
1587 	if (route)
1588 		++route;
1589 	else
1590 		route = &routing->routes[0];
1591 
1592 	for (; route < routing->routes + routing->num_routes; ++route) {
1593 		if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
1594 			continue;
1595 
1596 		return route;
1597 	}
1598 
1599 	return NULL;
1600 }
1601 EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
1602 
1603 int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
1604 				     struct v4l2_subdev_state *state,
1605 				     const struct v4l2_subdev_krouting *routing,
1606 				     const struct v4l2_mbus_framefmt *fmt)
1607 {
1608 	struct v4l2_subdev_stream_configs *stream_configs;
1609 	unsigned int i;
1610 	int ret;
1611 
1612 	ret = v4l2_subdev_set_routing(sd, state, routing);
1613 	if (ret)
1614 		return ret;
1615 
1616 	stream_configs = &state->stream_configs;
1617 
1618 	for (i = 0; i < stream_configs->num_configs; ++i)
1619 		stream_configs->configs[i].fmt = *fmt;
1620 
1621 	return 0;
1622 }
1623 EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
1624 
1625 struct v4l2_mbus_framefmt *
1626 v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
1627 				    unsigned int pad, u32 stream)
1628 {
1629 	struct v4l2_subdev_stream_configs *stream_configs;
1630 	unsigned int i;
1631 
1632 	lockdep_assert_held(state->lock);
1633 
1634 	stream_configs = &state->stream_configs;
1635 
1636 	for (i = 0; i < stream_configs->num_configs; ++i) {
1637 		if (stream_configs->configs[i].pad == pad &&
1638 		    stream_configs->configs[i].stream == stream)
1639 			return &stream_configs->configs[i].fmt;
1640 	}
1641 
1642 	return NULL;
1643 }
1644 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
1645 
1646 struct v4l2_rect *
1647 v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
1648 				  unsigned int pad, u32 stream)
1649 {
1650 	struct v4l2_subdev_stream_configs *stream_configs;
1651 	unsigned int i;
1652 
1653 	lockdep_assert_held(state->lock);
1654 
1655 	stream_configs = &state->stream_configs;
1656 
1657 	for (i = 0; i < stream_configs->num_configs; ++i) {
1658 		if (stream_configs->configs[i].pad == pad &&
1659 		    stream_configs->configs[i].stream == stream)
1660 			return &stream_configs->configs[i].crop;
1661 	}
1662 
1663 	return NULL;
1664 }
1665 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
1666 
1667 struct v4l2_rect *
1668 v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
1669 				     unsigned int pad, u32 stream)
1670 {
1671 	struct v4l2_subdev_stream_configs *stream_configs;
1672 	unsigned int i;
1673 
1674 	lockdep_assert_held(state->lock);
1675 
1676 	stream_configs = &state->stream_configs;
1677 
1678 	for (i = 0; i < stream_configs->num_configs; ++i) {
1679 		if (stream_configs->configs[i].pad == pad &&
1680 		    stream_configs->configs[i].stream == stream)
1681 			return &stream_configs->configs[i].compose;
1682 	}
1683 
1684 	return NULL;
1685 }
1686 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
1687 
1688 int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
1689 					  u32 pad, u32 stream, u32 *other_pad,
1690 					  u32 *other_stream)
1691 {
1692 	unsigned int i;
1693 
1694 	for (i = 0; i < routing->num_routes; ++i) {
1695 		struct v4l2_subdev_route *route = &routing->routes[i];
1696 
1697 		if (route->source_pad == pad &&
1698 		    route->source_stream == stream) {
1699 			if (other_pad)
1700 				*other_pad = route->sink_pad;
1701 			if (other_stream)
1702 				*other_stream = route->sink_stream;
1703 			return 0;
1704 		}
1705 
1706 		if (route->sink_pad == pad && route->sink_stream == stream) {
1707 			if (other_pad)
1708 				*other_pad = route->source_pad;
1709 			if (other_stream)
1710 				*other_stream = route->source_stream;
1711 			return 0;
1712 		}
1713 	}
1714 
1715 	return -EINVAL;
1716 }
1717 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
1718 
1719 struct v4l2_mbus_framefmt *
1720 v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
1721 					     u32 pad, u32 stream)
1722 {
1723 	u32 other_pad, other_stream;
1724 	int ret;
1725 
1726 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
1727 						    pad, stream,
1728 						    &other_pad, &other_stream);
1729 	if (ret)
1730 		return NULL;
1731 
1732 	return v4l2_subdev_state_get_stream_format(state, other_pad,
1733 						   other_stream);
1734 }
1735 EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
1736 
1737 u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
1738 				    u32 pad0, u32 pad1, u64 *streams)
1739 {
1740 	const struct v4l2_subdev_krouting *routing = &state->routing;
1741 	struct v4l2_subdev_route *route;
1742 	u64 streams0 = 0;
1743 	u64 streams1 = 0;
1744 
1745 	for_each_active_route(routing, route) {
1746 		if (route->sink_pad == pad0 && route->source_pad == pad1 &&
1747 		    (*streams & BIT_ULL(route->sink_stream))) {
1748 			streams0 |= BIT_ULL(route->sink_stream);
1749 			streams1 |= BIT_ULL(route->source_stream);
1750 		}
1751 		if (route->source_pad == pad0 && route->sink_pad == pad1 &&
1752 		    (*streams & BIT_ULL(route->source_stream))) {
1753 			streams0 |= BIT_ULL(route->source_stream);
1754 			streams1 |= BIT_ULL(route->sink_stream);
1755 		}
1756 	}
1757 
1758 	*streams = streams0;
1759 	return streams1;
1760 }
1761 EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
1762 
1763 int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
1764 				 const struct v4l2_subdev_krouting *routing,
1765 				 enum v4l2_subdev_routing_restriction disallow)
1766 {
1767 	u32 *remote_pads = NULL;
1768 	unsigned int i, j;
1769 	int ret = -EINVAL;
1770 
1771 	if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
1772 			V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
1773 		remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
1774 				      GFP_KERNEL);
1775 		if (!remote_pads)
1776 			return -ENOMEM;
1777 
1778 		for (i = 0; i < sd->entity.num_pads; ++i)
1779 			remote_pads[i] = U32_MAX;
1780 	}
1781 
1782 	for (i = 0; i < routing->num_routes; ++i) {
1783 		const struct v4l2_subdev_route *route = &routing->routes[i];
1784 
1785 		/* Validate the sink and source pad numbers. */
1786 		if (route->sink_pad >= sd->entity.num_pads ||
1787 		    !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
1788 			dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
1789 				i, route->sink_pad);
1790 			goto out;
1791 		}
1792 
1793 		if (route->source_pad >= sd->entity.num_pads ||
1794 		    !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
1795 			dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
1796 				i, route->source_pad);
1797 			goto out;
1798 		}
1799 
1800 		/*
1801 		 * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
1802 		 * sink pad must be routed to a single source pad.
1803 		 */
1804 		if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
1805 			if (remote_pads[route->sink_pad] != U32_MAX &&
1806 			    remote_pads[route->sink_pad] != route->source_pad) {
1807 				dev_dbg(sd->dev,
1808 					"route %u attempts to mix %s streams\n",
1809 					i, "sink");
1810 				goto out;
1811 			}
1812 		}
1813 
1814 		/*
1815 		 * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
1816 		 * source pad must originate from a single sink pad.
1817 		 */
1818 		if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
1819 			if (remote_pads[route->source_pad] != U32_MAX &&
1820 			    remote_pads[route->source_pad] != route->sink_pad) {
1821 				dev_dbg(sd->dev,
1822 					"route %u attempts to mix %s streams\n",
1823 					i, "source");
1824 				goto out;
1825 			}
1826 		}
1827 
1828 		/*
1829 		 * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
1830 		 * side can not do stream multiplexing, i.e. there can be only
1831 		 * a single stream in a sink pad.
1832 		 */
1833 		if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
1834 			if (remote_pads[route->sink_pad] != U32_MAX) {
1835 				dev_dbg(sd->dev,
1836 					"route %u attempts to multiplex on %s pad %u\n",
1837 					i, "sink", route->sink_pad);
1838 				goto out;
1839 			}
1840 		}
1841 
1842 		/*
1843 		 * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
1844 		 * source side can not do stream multiplexing, i.e. there can
1845 		 * be only a single stream in a source pad.
1846 		 */
1847 		if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
1848 			if (remote_pads[route->source_pad] != U32_MAX) {
1849 				dev_dbg(sd->dev,
1850 					"route %u attempts to multiplex on %s pad %u\n",
1851 					i, "source", route->source_pad);
1852 				goto out;
1853 			}
1854 		}
1855 
1856 		if (remote_pads) {
1857 			remote_pads[route->sink_pad] = route->source_pad;
1858 			remote_pads[route->source_pad] = route->sink_pad;
1859 		}
1860 
1861 		for (j = i + 1; j < routing->num_routes; ++j) {
1862 			const struct v4l2_subdev_route *r = &routing->routes[j];
1863 
1864 			/*
1865 			 * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
1866 			 * originate from the same (sink) stream.
1867 			 */
1868 			if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
1869 			    route->sink_pad == r->sink_pad &&
1870 			    route->sink_stream == r->sink_stream) {
1871 				dev_dbg(sd->dev,
1872 					"routes %u and %u originate from same sink (%u/%u)\n",
1873 					i, j, route->sink_pad,
1874 					route->sink_stream);
1875 				goto out;
1876 			}
1877 
1878 			/*
1879 			 * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
1880 			 * at the same (source) stream.
1881 			 */
1882 			if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
1883 			    route->source_pad == r->source_pad &&
1884 			    route->source_stream == r->source_stream) {
1885 				dev_dbg(sd->dev,
1886 					"routes %u and %u end at same source (%u/%u)\n",
1887 					i, j, route->source_pad,
1888 					route->source_stream);
1889 				goto out;
1890 			}
1891 		}
1892 	}
1893 
1894 	ret = 0;
1895 
1896 out:
1897 	kfree(remote_pads);
1898 	return ret;
1899 }
1900 EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
1901 
1902 static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
1903 					       u64 streams_mask)
1904 {
1905 	struct device *dev = sd->entity.graph_obj.mdev->dev;
1906 	unsigned int i;
1907 	int ret;
1908 
1909 	/*
1910 	 * The subdev doesn't implement pad-based stream enable, fall back
1911 	 * on the .s_stream() operation. This can only be done for subdevs that
1912 	 * have a single source pad, as sd->enabled_streams is global to the
1913 	 * subdev.
1914 	 */
1915 	if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
1916 		return -EOPNOTSUPP;
1917 
1918 	for (i = 0; i < sd->entity.num_pads; ++i) {
1919 		if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
1920 			return -EOPNOTSUPP;
1921 	}
1922 
1923 	if (sd->enabled_streams & streams_mask) {
1924 		dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
1925 			streams_mask, sd->entity.name, pad);
1926 		return -EALREADY;
1927 	}
1928 
1929 	/* Start streaming when the first streams are enabled. */
1930 	if (!sd->enabled_streams) {
1931 		ret = v4l2_subdev_call(sd, video, s_stream, 1);
1932 		if (ret)
1933 			return ret;
1934 	}
1935 
1936 	sd->enabled_streams |= streams_mask;
1937 
1938 	return 0;
1939 }
1940 
1941 int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
1942 			       u64 streams_mask)
1943 {
1944 	struct device *dev = sd->entity.graph_obj.mdev->dev;
1945 	struct v4l2_subdev_state *state;
1946 	u64 found_streams = 0;
1947 	unsigned int i;
1948 	int ret;
1949 
1950 	/* A few basic sanity checks first. */
1951 	if (pad >= sd->entity.num_pads)
1952 		return -EINVAL;
1953 
1954 	if (!streams_mask)
1955 		return 0;
1956 
1957 	/* Fallback on .s_stream() if .enable_streams() isn't available. */
1958 	if (!sd->ops->pad || !sd->ops->pad->enable_streams)
1959 		return v4l2_subdev_enable_streams_fallback(sd, pad,
1960 							   streams_mask);
1961 
1962 	state = v4l2_subdev_lock_and_get_active_state(sd);
1963 
1964 	/*
1965 	 * Verify that the requested streams exist and that they are not
1966 	 * already enabled.
1967 	 */
1968 	for (i = 0; i < state->stream_configs.num_configs; ++i) {
1969 		struct v4l2_subdev_stream_config *cfg =
1970 			&state->stream_configs.configs[i];
1971 
1972 		if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
1973 			continue;
1974 
1975 		found_streams |= BIT_ULL(cfg->stream);
1976 
1977 		if (cfg->enabled) {
1978 			dev_dbg(dev, "stream %u already enabled on %s:%u\n",
1979 				cfg->stream, sd->entity.name, pad);
1980 			ret = -EALREADY;
1981 			goto done;
1982 		}
1983 	}
1984 
1985 	if (found_streams != streams_mask) {
1986 		dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
1987 			streams_mask & ~found_streams, sd->entity.name, pad);
1988 		ret = -EINVAL;
1989 		goto done;
1990 	}
1991 
1992 	dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
1993 
1994 	/* Call the .enable_streams() operation. */
1995 	ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
1996 			       streams_mask);
1997 	if (ret) {
1998 		dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
1999 			streams_mask, ret);
2000 		goto done;
2001 	}
2002 
2003 	/* Mark the streams as enabled. */
2004 	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2005 		struct v4l2_subdev_stream_config *cfg =
2006 			&state->stream_configs.configs[i];
2007 
2008 		if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2009 			cfg->enabled = true;
2010 	}
2011 
2012 done:
2013 	v4l2_subdev_unlock_state(state);
2014 
2015 	return ret;
2016 }
2017 EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
2018 
2019 static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
2020 						u64 streams_mask)
2021 {
2022 	struct device *dev = sd->entity.graph_obj.mdev->dev;
2023 	unsigned int i;
2024 	int ret;
2025 
2026 	/*
2027 	 * If the subdev doesn't implement pad-based stream enable, fall  back
2028 	 * on the .s_stream() operation. This can only be done for subdevs that
2029 	 * have a single source pad, as sd->enabled_streams is global to the
2030 	 * subdev.
2031 	 */
2032 	if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
2033 		return -EOPNOTSUPP;
2034 
2035 	for (i = 0; i < sd->entity.num_pads; ++i) {
2036 		if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
2037 			return -EOPNOTSUPP;
2038 	}
2039 
2040 	if ((sd->enabled_streams & streams_mask) != streams_mask) {
2041 		dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
2042 			streams_mask, sd->entity.name, pad);
2043 		return -EALREADY;
2044 	}
2045 
2046 	/* Stop streaming when the last streams are disabled. */
2047 	if (!(sd->enabled_streams & ~streams_mask)) {
2048 		ret = v4l2_subdev_call(sd, video, s_stream, 0);
2049 		if (ret)
2050 			return ret;
2051 	}
2052 
2053 	sd->enabled_streams &= ~streams_mask;
2054 
2055 	return 0;
2056 }
2057 
2058 int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
2059 				u64 streams_mask)
2060 {
2061 	struct device *dev = sd->entity.graph_obj.mdev->dev;
2062 	struct v4l2_subdev_state *state;
2063 	u64 found_streams = 0;
2064 	unsigned int i;
2065 	int ret;
2066 
2067 	/* A few basic sanity checks first. */
2068 	if (pad >= sd->entity.num_pads)
2069 		return -EINVAL;
2070 
2071 	if (!streams_mask)
2072 		return 0;
2073 
2074 	/* Fallback on .s_stream() if .disable_streams() isn't available. */
2075 	if (!sd->ops->pad || !sd->ops->pad->disable_streams)
2076 		return v4l2_subdev_disable_streams_fallback(sd, pad,
2077 							    streams_mask);
2078 
2079 	state = v4l2_subdev_lock_and_get_active_state(sd);
2080 
2081 	/*
2082 	 * Verify that the requested streams exist and that they are not
2083 	 * already disabled.
2084 	 */
2085 	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2086 		struct v4l2_subdev_stream_config *cfg =
2087 			&state->stream_configs.configs[i];
2088 
2089 		if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
2090 			continue;
2091 
2092 		found_streams |= BIT_ULL(cfg->stream);
2093 
2094 		if (!cfg->enabled) {
2095 			dev_dbg(dev, "stream %u already disabled on %s:%u\n",
2096 				cfg->stream, sd->entity.name, pad);
2097 			ret = -EALREADY;
2098 			goto done;
2099 		}
2100 	}
2101 
2102 	if (found_streams != streams_mask) {
2103 		dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
2104 			streams_mask & ~found_streams, sd->entity.name, pad);
2105 		ret = -EINVAL;
2106 		goto done;
2107 	}
2108 
2109 	dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
2110 
2111 	/* Call the .disable_streams() operation. */
2112 	ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
2113 			       streams_mask);
2114 	if (ret) {
2115 		dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
2116 			streams_mask, ret);
2117 		goto done;
2118 	}
2119 
2120 	/* Mark the streams as disabled. */
2121 	for (i = 0; i < state->stream_configs.num_configs; ++i) {
2122 		struct v4l2_subdev_stream_config *cfg =
2123 			&state->stream_configs.configs[i];
2124 
2125 		if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
2126 			cfg->enabled = false;
2127 	}
2128 
2129 done:
2130 	v4l2_subdev_unlock_state(state);
2131 
2132 	return ret;
2133 }
2134 EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
2135 
2136 int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
2137 {
2138 	struct v4l2_subdev_state *state;
2139 	struct v4l2_subdev_route *route;
2140 	struct media_pad *pad;
2141 	u64 source_mask = 0;
2142 	int pad_index = -1;
2143 
2144 	/*
2145 	 * Find the source pad. This helper is meant for subdevs that have a
2146 	 * single source pad, so failures shouldn't happen, but catch them
2147 	 * loudly nonetheless as they indicate a driver bug.
2148 	 */
2149 	media_entity_for_each_pad(&sd->entity, pad) {
2150 		if (pad->flags & MEDIA_PAD_FL_SOURCE) {
2151 			pad_index = pad->index;
2152 			break;
2153 		}
2154 	}
2155 
2156 	if (WARN_ON(pad_index == -1))
2157 		return -EINVAL;
2158 
2159 	/*
2160 	 * As there's a single source pad, just collect all the source streams.
2161 	 */
2162 	state = v4l2_subdev_lock_and_get_active_state(sd);
2163 
2164 	for_each_active_route(&state->routing, route)
2165 		source_mask |= BIT_ULL(route->source_stream);
2166 
2167 	v4l2_subdev_unlock_state(state);
2168 
2169 	if (enable)
2170 		return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
2171 	else
2172 		return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
2173 }
2174 EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
2175 
2176 #endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
2177 
2178 #endif /* CONFIG_MEDIA_CONTROLLER */
2179 
2180 void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
2181 {
2182 	INIT_LIST_HEAD(&sd->list);
2183 	BUG_ON(!ops);
2184 	sd->ops = ops;
2185 	sd->v4l2_dev = NULL;
2186 	sd->flags = 0;
2187 	sd->name[0] = '\0';
2188 	sd->grp_id = 0;
2189 	sd->dev_priv = NULL;
2190 	sd->host_priv = NULL;
2191 	sd->privacy_led = NULL;
2192 	INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
2193 #if defined(CONFIG_MEDIA_CONTROLLER)
2194 	sd->entity.name = sd->name;
2195 	sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
2196 	sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
2197 #endif
2198 }
2199 EXPORT_SYMBOL(v4l2_subdev_init);
2200 
2201 void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
2202 			      const struct v4l2_event *ev)
2203 {
2204 	v4l2_event_queue(sd->devnode, ev);
2205 	v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
2206 }
2207 EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
2208 
2209 int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
2210 {
2211 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2212 	sd->privacy_led = led_get(sd->dev, "privacy-led");
2213 	if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
2214 		return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
2215 				     "getting privacy LED\n");
2216 
2217 	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2218 		mutex_lock(&sd->privacy_led->led_access);
2219 		led_sysfs_disable(sd->privacy_led);
2220 		led_trigger_remove(sd->privacy_led);
2221 		led_set_brightness(sd->privacy_led, 0);
2222 		mutex_unlock(&sd->privacy_led->led_access);
2223 	}
2224 #endif
2225 	return 0;
2226 }
2227 EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
2228 
2229 void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
2230 {
2231 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
2232 	if (!IS_ERR_OR_NULL(sd->privacy_led)) {
2233 		mutex_lock(&sd->privacy_led->led_access);
2234 		led_sysfs_enable(sd->privacy_led);
2235 		mutex_unlock(&sd->privacy_led->led_access);
2236 		led_put(sd->privacy_led);
2237 	}
2238 #endif
2239 }
2240 EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
2241