1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
27 #include "v4l2-subdev-priv.h"
28 
29 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
30 				    struct v4l2_subdev *subdev,
31 				    struct v4l2_async_subdev *asd)
32 {
33 	if (!n->ops || !n->ops->bound)
34 		return 0;
35 
36 	return n->ops->bound(n, subdev, asd);
37 }
38 
39 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
40 				      struct v4l2_subdev *subdev,
41 				      struct v4l2_async_subdev *asd)
42 {
43 	if (!n->ops || !n->ops->unbind)
44 		return;
45 
46 	n->ops->unbind(n, subdev, asd);
47 }
48 
49 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
50 {
51 	if (!n->ops || !n->ops->complete)
52 		return 0;
53 
54 	return n->ops->complete(n);
55 }
56 
57 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
58 				       struct v4l2_async_subdev *asd)
59 {
60 	if (!n->ops || !n->ops->destroy)
61 		return;
62 
63 	n->ops->destroy(asd);
64 }
65 
66 static bool match_i2c(struct v4l2_async_notifier *notifier,
67 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
68 {
69 #if IS_ENABLED(CONFIG_I2C)
70 	struct i2c_client *client = i2c_verify_client(sd->dev);
71 
72 	return client &&
73 		asd->match.i2c.adapter_id == client->adapter->nr &&
74 		asd->match.i2c.address == client->addr;
75 #else
76 	return false;
77 #endif
78 }
79 
80 static bool
81 match_fwnode_one(struct v4l2_async_notifier *notifier,
82 		 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
83 		 struct v4l2_async_subdev *asd)
84 {
85 	struct fwnode_handle *other_fwnode;
86 	struct fwnode_handle *dev_fwnode;
87 	bool asd_fwnode_is_ep;
88 	bool sd_fwnode_is_ep;
89 	struct device *dev;
90 
91 	/*
92 	 * Both the subdev and the async subdev can provide either an endpoint
93 	 * fwnode or a device fwnode. Start with the simple case of direct
94 	 * fwnode matching.
95 	 */
96 	if (sd_fwnode == asd->match.fwnode)
97 		return true;
98 
99 	/*
100 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
101 	 * endpoint or a device. If they're of the same type, there's no match.
102 	 * Technically speaking this checks if the nodes refer to a connected
103 	 * endpoint, which is the simplest check that works for both OF and
104 	 * ACPI. This won't make a difference, as drivers should not try to
105 	 * match unconnected endpoints.
106 	 */
107 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
108 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
109 
110 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
111 		return false;
112 
113 	/*
114 	 * The sd and asd fwnodes are of different types. Get the device fwnode
115 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
116 	 */
117 	if (sd_fwnode_is_ep) {
118 		dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
119 		other_fwnode = asd->match.fwnode;
120 	} else {
121 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
122 		other_fwnode = sd_fwnode;
123 	}
124 
125 	fwnode_handle_put(dev_fwnode);
126 
127 	if (dev_fwnode != other_fwnode)
128 		return false;
129 
130 	/*
131 	 * We have a heterogeneous match. Retrieve the struct device of the side
132 	 * that matched on a device fwnode to print its driver name.
133 	 */
134 	if (sd_fwnode_is_ep)
135 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
136 		    : notifier->sd->dev;
137 	else
138 		dev = sd->dev;
139 
140 	if (dev && dev->driver) {
141 		if (sd_fwnode_is_ep)
142 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
143 				 dev->driver->name);
144 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
145 			   dev->driver->name);
146 	}
147 
148 	return true;
149 }
150 
151 static bool match_fwnode(struct v4l2_async_notifier *notifier,
152 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
153 {
154 	if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
155 		return true;
156 
157 	/* Also check the secondary fwnode. */
158 	if (IS_ERR_OR_NULL(sd->fwnode->secondary))
159 		return false;
160 
161 	return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
162 }
163 
164 static LIST_HEAD(subdev_list);
165 static LIST_HEAD(notifier_list);
166 static DEFINE_MUTEX(list_lock);
167 
168 static struct v4l2_async_subdev *
169 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
170 		      struct v4l2_subdev *sd)
171 {
172 	bool (*match)(struct v4l2_async_notifier *notifier,
173 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
174 	struct v4l2_async_subdev *asd;
175 
176 	list_for_each_entry(asd, &notifier->waiting, list) {
177 		/* bus_type has been verified valid before */
178 		switch (asd->match_type) {
179 		case V4L2_ASYNC_MATCH_I2C:
180 			match = match_i2c;
181 			break;
182 		case V4L2_ASYNC_MATCH_FWNODE:
183 			match = match_fwnode;
184 			break;
185 		default:
186 			/* Cannot happen, unless someone breaks us */
187 			WARN_ON(true);
188 			return NULL;
189 		}
190 
191 		/* match cannot be NULL here */
192 		if (match(notifier, sd, asd))
193 			return asd;
194 	}
195 
196 	return NULL;
197 }
198 
199 /* Compare two async sub-device descriptors for equivalence */
200 static bool asd_equal(struct v4l2_async_subdev *asd_x,
201 		      struct v4l2_async_subdev *asd_y)
202 {
203 	if (asd_x->match_type != asd_y->match_type)
204 		return false;
205 
206 	switch (asd_x->match_type) {
207 	case V4L2_ASYNC_MATCH_I2C:
208 		return asd_x->match.i2c.adapter_id ==
209 			asd_y->match.i2c.adapter_id &&
210 			asd_x->match.i2c.address ==
211 			asd_y->match.i2c.address;
212 	case V4L2_ASYNC_MATCH_FWNODE:
213 		return asd_x->match.fwnode == asd_y->match.fwnode;
214 	default:
215 		break;
216 	}
217 
218 	return false;
219 }
220 
221 /* Find the sub-device notifier registered by a sub-device driver. */
222 static struct v4l2_async_notifier *
223 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
224 {
225 	struct v4l2_async_notifier *n;
226 
227 	list_for_each_entry(n, &notifier_list, list)
228 		if (n->sd == sd)
229 			return n;
230 
231 	return NULL;
232 }
233 
234 /* Get v4l2_device related to the notifier if one can be found. */
235 static struct v4l2_device *
236 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
237 {
238 	while (notifier->parent)
239 		notifier = notifier->parent;
240 
241 	return notifier->v4l2_dev;
242 }
243 
244 /*
245  * Return true if all child sub-device notifiers are complete, false otherwise.
246  */
247 static bool
248 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
249 {
250 	struct v4l2_subdev *sd;
251 
252 	if (!list_empty(&notifier->waiting))
253 		return false;
254 
255 	list_for_each_entry(sd, &notifier->done, async_list) {
256 		struct v4l2_async_notifier *subdev_notifier =
257 			v4l2_async_find_subdev_notifier(sd);
258 
259 		if (subdev_notifier &&
260 		    !v4l2_async_nf_can_complete(subdev_notifier))
261 			return false;
262 	}
263 
264 	return true;
265 }
266 
267 /*
268  * Complete the master notifier if possible. This is done when all async
269  * sub-devices have been bound; v4l2_device is also available then.
270  */
271 static int
272 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
273 {
274 	/* Quick check whether there are still more sub-devices here. */
275 	if (!list_empty(&notifier->waiting))
276 		return 0;
277 
278 	/* Check the entire notifier tree; find the root notifier first. */
279 	while (notifier->parent)
280 		notifier = notifier->parent;
281 
282 	/* This is root if it has v4l2_dev. */
283 	if (!notifier->v4l2_dev)
284 		return 0;
285 
286 	/* Is everything ready? */
287 	if (!v4l2_async_nf_can_complete(notifier))
288 		return 0;
289 
290 	return v4l2_async_nf_call_complete(notifier);
291 }
292 
293 static int
294 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
295 
296 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
297 					     struct v4l2_subdev *sd)
298 {
299 	struct media_link *link = NULL;
300 
301 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
302 
303 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
304 	    sd->entity.function != MEDIA_ENT_F_FLASH)
305 		return 0;
306 
307 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
308 
309 #endif
310 
311 	return IS_ERR(link) ? PTR_ERR(link) : 0;
312 }
313 
314 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
315 				   struct v4l2_device *v4l2_dev,
316 				   struct v4l2_subdev *sd,
317 				   struct v4l2_async_subdev *asd)
318 {
319 	struct v4l2_async_notifier *subdev_notifier;
320 	int ret;
321 
322 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
323 	if (ret < 0)
324 		return ret;
325 
326 	ret = v4l2_async_nf_call_bound(notifier, sd, asd);
327 	if (ret < 0) {
328 		v4l2_device_unregister_subdev(sd);
329 		return ret;
330 	}
331 
332 	/*
333 	 * Depending of the function of the entities involved, we may want to
334 	 * create links between them (for example between a sensor and its lens
335 	 * or between a sensor's source pad and the connected device's sink
336 	 * pad).
337 	 */
338 	ret = v4l2_async_create_ancillary_links(notifier, sd);
339 	if (ret) {
340 		v4l2_async_nf_call_unbind(notifier, sd, asd);
341 		v4l2_device_unregister_subdev(sd);
342 		return ret;
343 	}
344 
345 	/* Remove from the waiting list */
346 	list_del(&asd->list);
347 	sd->asd = asd;
348 	sd->notifier = notifier;
349 
350 	/* Move from the global subdevice list to notifier's done */
351 	list_move(&sd->async_list, &notifier->done);
352 
353 	/*
354 	 * See if the sub-device has a notifier. If not, return here.
355 	 */
356 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
357 	if (!subdev_notifier || subdev_notifier->parent)
358 		return 0;
359 
360 	/*
361 	 * Proceed with checking for the sub-device notifier's async
362 	 * sub-devices, and return the result. The error will be handled by the
363 	 * caller.
364 	 */
365 	subdev_notifier->parent = notifier;
366 
367 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
368 }
369 
370 /* Test all async sub-devices in a notifier for a match. */
371 static int
372 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
373 {
374 	struct v4l2_device *v4l2_dev =
375 		v4l2_async_nf_find_v4l2_dev(notifier);
376 	struct v4l2_subdev *sd;
377 
378 	if (!v4l2_dev)
379 		return 0;
380 
381 again:
382 	list_for_each_entry(sd, &subdev_list, async_list) {
383 		struct v4l2_async_subdev *asd;
384 		int ret;
385 
386 		asd = v4l2_async_find_match(notifier, sd);
387 		if (!asd)
388 			continue;
389 
390 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
391 		if (ret < 0)
392 			return ret;
393 
394 		/*
395 		 * v4l2_async_match_notify() may lead to registering a
396 		 * new notifier and thus changing the async subdevs
397 		 * list. In order to proceed safely from here, restart
398 		 * parsing the list from the beginning.
399 		 */
400 		goto again;
401 	}
402 
403 	return 0;
404 }
405 
406 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
407 {
408 	v4l2_device_unregister_subdev(sd);
409 	/*
410 	 * Subdevice driver will reprobe and put the subdev back
411 	 * onto the list
412 	 */
413 	list_del_init(&sd->async_list);
414 	sd->asd = NULL;
415 }
416 
417 /* Unbind all sub-devices in the notifier tree. */
418 static void
419 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
420 				 bool readd)
421 {
422 	struct v4l2_subdev *sd, *tmp;
423 
424 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
425 		struct v4l2_async_notifier *subdev_notifier =
426 			v4l2_async_find_subdev_notifier(sd);
427 
428 		if (subdev_notifier)
429 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);
430 
431 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
432 		if (readd)
433 			list_add_tail(&sd->asd->list, &notifier->waiting);
434 		v4l2_async_cleanup(sd);
435 
436 		list_move(&sd->async_list, &subdev_list);
437 	}
438 
439 	notifier->parent = NULL;
440 }
441 
442 /* See if an async sub-device can be found in a notifier's lists. */
443 static bool
444 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
445 				 struct v4l2_async_subdev *asd)
446 {
447 	struct v4l2_async_subdev *asd_y;
448 	struct v4l2_subdev *sd;
449 
450 	list_for_each_entry(asd_y, &notifier->waiting, list)
451 		if (asd_equal(asd, asd_y))
452 			return true;
453 
454 	list_for_each_entry(sd, &notifier->done, async_list) {
455 		if (WARN_ON(!sd->asd))
456 			continue;
457 
458 		if (asd_equal(asd, sd->asd))
459 			return true;
460 	}
461 
462 	return false;
463 }
464 
465 /*
466  * Find out whether an async sub-device was set up already or
467  * whether it exists in a given notifier before @this_index.
468  * If @this_index < 0, search the notifier's entire @asd_list.
469  */
470 static bool
471 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
472 			       struct v4l2_async_subdev *asd, int this_index)
473 {
474 	struct v4l2_async_subdev *asd_y;
475 	int j = 0;
476 
477 	lockdep_assert_held(&list_lock);
478 
479 	/* Check that an asd is not being added more than once. */
480 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
481 		if (this_index >= 0 && j++ >= this_index)
482 			break;
483 		if (asd_equal(asd, asd_y))
484 			return true;
485 	}
486 
487 	/* Check that an asd does not exist in other notifiers. */
488 	list_for_each_entry(notifier, &notifier_list, list)
489 		if (__v4l2_async_nf_has_async_subdev(notifier, asd))
490 			return true;
491 
492 	return false;
493 }
494 
495 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
496 				   struct v4l2_async_subdev *asd,
497 				   int this_index)
498 {
499 	struct device *dev =
500 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
501 
502 	if (!asd)
503 		return -EINVAL;
504 
505 	switch (asd->match_type) {
506 	case V4L2_ASYNC_MATCH_I2C:
507 	case V4L2_ASYNC_MATCH_FWNODE:
508 		if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
509 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
510 			return -EEXIST;
511 		}
512 		break;
513 	default:
514 		dev_err(dev, "Invalid match type %u on %p\n",
515 			asd->match_type, asd);
516 		return -EINVAL;
517 	}
518 
519 	return 0;
520 }
521 
522 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
523 {
524 	INIT_LIST_HEAD(&notifier->asd_list);
525 }
526 EXPORT_SYMBOL(v4l2_async_nf_init);
527 
528 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
529 {
530 	struct v4l2_async_subdev *asd;
531 	int ret, i = 0;
532 
533 	INIT_LIST_HEAD(&notifier->waiting);
534 	INIT_LIST_HEAD(&notifier->done);
535 
536 	mutex_lock(&list_lock);
537 
538 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
539 		ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
540 		if (ret)
541 			goto err_unlock;
542 
543 		list_add_tail(&asd->list, &notifier->waiting);
544 	}
545 
546 	ret = v4l2_async_nf_try_all_subdevs(notifier);
547 	if (ret < 0)
548 		goto err_unbind;
549 
550 	ret = v4l2_async_nf_try_complete(notifier);
551 	if (ret < 0)
552 		goto err_unbind;
553 
554 	/* Keep also completed notifiers on the list */
555 	list_add(&notifier->list, &notifier_list);
556 
557 	mutex_unlock(&list_lock);
558 
559 	return 0;
560 
561 err_unbind:
562 	/*
563 	 * On failure, unbind all sub-devices registered through this notifier.
564 	 */
565 	v4l2_async_nf_unbind_all_subdevs(notifier, false);
566 
567 err_unlock:
568 	mutex_unlock(&list_lock);
569 
570 	return ret;
571 }
572 
573 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
574 			   struct v4l2_async_notifier *notifier)
575 {
576 	int ret;
577 
578 	if (WARN_ON(!v4l2_dev || notifier->sd))
579 		return -EINVAL;
580 
581 	notifier->v4l2_dev = v4l2_dev;
582 
583 	ret = __v4l2_async_nf_register(notifier);
584 	if (ret)
585 		notifier->v4l2_dev = NULL;
586 
587 	return ret;
588 }
589 EXPORT_SYMBOL(v4l2_async_nf_register);
590 
591 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
592 				  struct v4l2_async_notifier *notifier)
593 {
594 	int ret;
595 
596 	if (WARN_ON(!sd || notifier->v4l2_dev))
597 		return -EINVAL;
598 
599 	notifier->sd = sd;
600 
601 	ret = __v4l2_async_nf_register(notifier);
602 	if (ret)
603 		notifier->sd = NULL;
604 
605 	return ret;
606 }
607 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
608 
609 static void
610 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
611 {
612 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
613 		return;
614 
615 	v4l2_async_nf_unbind_all_subdevs(notifier, false);
616 
617 	notifier->sd = NULL;
618 	notifier->v4l2_dev = NULL;
619 
620 	list_del(&notifier->list);
621 }
622 
623 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
624 {
625 	mutex_lock(&list_lock);
626 
627 	__v4l2_async_nf_unregister(notifier);
628 
629 	mutex_unlock(&list_lock);
630 }
631 EXPORT_SYMBOL(v4l2_async_nf_unregister);
632 
633 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
634 {
635 	struct v4l2_async_subdev *asd, *tmp;
636 
637 	if (!notifier || !notifier->asd_list.next)
638 		return;
639 
640 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
641 		switch (asd->match_type) {
642 		case V4L2_ASYNC_MATCH_FWNODE:
643 			fwnode_handle_put(asd->match.fwnode);
644 			break;
645 		default:
646 			break;
647 		}
648 
649 		list_del(&asd->asd_list);
650 		v4l2_async_nf_call_destroy(notifier, asd);
651 		kfree(asd);
652 	}
653 }
654 
655 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
656 {
657 	mutex_lock(&list_lock);
658 
659 	__v4l2_async_nf_cleanup(notifier);
660 
661 	mutex_unlock(&list_lock);
662 }
663 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
664 
665 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
666 			       struct v4l2_async_subdev *asd)
667 {
668 	int ret;
669 
670 	mutex_lock(&list_lock);
671 
672 	ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
673 	if (ret)
674 		goto unlock;
675 
676 	list_add_tail(&asd->asd_list, &notifier->asd_list);
677 
678 unlock:
679 	mutex_unlock(&list_lock);
680 	return ret;
681 }
682 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
683 
684 struct v4l2_async_subdev *
685 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
686 			   struct fwnode_handle *fwnode,
687 			   unsigned int asd_struct_size)
688 {
689 	struct v4l2_async_subdev *asd;
690 	int ret;
691 
692 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
693 	if (!asd)
694 		return ERR_PTR(-ENOMEM);
695 
696 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
697 	asd->match.fwnode = fwnode_handle_get(fwnode);
698 
699 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
700 	if (ret) {
701 		fwnode_handle_put(fwnode);
702 		kfree(asd);
703 		return ERR_PTR(ret);
704 	}
705 
706 	return asd;
707 }
708 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
709 
710 struct v4l2_async_subdev *
711 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
712 				  struct fwnode_handle *endpoint,
713 				  unsigned int asd_struct_size)
714 {
715 	struct v4l2_async_subdev *asd;
716 	struct fwnode_handle *remote;
717 
718 	remote = fwnode_graph_get_remote_endpoint(endpoint);
719 	if (!remote)
720 		return ERR_PTR(-ENOTCONN);
721 
722 	asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
723 	/*
724 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
725 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
726 	 */
727 	fwnode_handle_put(remote);
728 	return asd;
729 }
730 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
731 
732 struct v4l2_async_subdev *
733 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
734 			unsigned short address, unsigned int asd_struct_size)
735 {
736 	struct v4l2_async_subdev *asd;
737 	int ret;
738 
739 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
740 	if (!asd)
741 		return ERR_PTR(-ENOMEM);
742 
743 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
744 	asd->match.i2c.adapter_id = adapter_id;
745 	asd->match.i2c.address = address;
746 
747 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
748 	if (ret) {
749 		kfree(asd);
750 		return ERR_PTR(ret);
751 	}
752 
753 	return asd;
754 }
755 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
756 
757 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
758 {
759 	struct v4l2_async_notifier *subdev_notifier;
760 	struct v4l2_async_notifier *notifier;
761 	int ret;
762 
763 	/*
764 	 * No reference taken. The reference is held by the device
765 	 * (struct v4l2_subdev.dev), and async sub-device does not
766 	 * exist independently of the device at any point of time.
767 	 */
768 	if (!sd->fwnode && sd->dev)
769 		sd->fwnode = dev_fwnode(sd->dev);
770 
771 	mutex_lock(&list_lock);
772 
773 	INIT_LIST_HEAD(&sd->async_list);
774 
775 	list_for_each_entry(notifier, &notifier_list, list) {
776 		struct v4l2_device *v4l2_dev =
777 			v4l2_async_nf_find_v4l2_dev(notifier);
778 		struct v4l2_async_subdev *asd;
779 
780 		if (!v4l2_dev)
781 			continue;
782 
783 		asd = v4l2_async_find_match(notifier, sd);
784 		if (!asd)
785 			continue;
786 
787 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
788 		if (ret)
789 			goto err_unbind;
790 
791 		ret = v4l2_async_nf_try_complete(notifier);
792 		if (ret)
793 			goto err_unbind;
794 
795 		goto out_unlock;
796 	}
797 
798 	/* None matched, wait for hot-plugging */
799 	list_add(&sd->async_list, &subdev_list);
800 
801 out_unlock:
802 	mutex_unlock(&list_lock);
803 
804 	return 0;
805 
806 err_unbind:
807 	/*
808 	 * Complete failed. Unbind the sub-devices bound through registering
809 	 * this async sub-device.
810 	 */
811 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
812 	if (subdev_notifier)
813 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);
814 
815 	if (sd->asd)
816 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
817 	v4l2_async_cleanup(sd);
818 
819 	mutex_unlock(&list_lock);
820 
821 	return ret;
822 }
823 EXPORT_SYMBOL(v4l2_async_register_subdev);
824 
825 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
826 {
827 	if (!sd->async_list.next)
828 		return;
829 
830 	v4l2_subdev_put_privacy_led(sd);
831 
832 	mutex_lock(&list_lock);
833 
834 	__v4l2_async_nf_unregister(sd->subdev_notifier);
835 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
836 	kfree(sd->subdev_notifier);
837 	sd->subdev_notifier = NULL;
838 
839 	if (sd->asd) {
840 		struct v4l2_async_notifier *notifier = sd->notifier;
841 
842 		list_add(&sd->asd->list, &notifier->waiting);
843 
844 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
845 	}
846 
847 	v4l2_async_cleanup(sd);
848 
849 	mutex_unlock(&list_lock);
850 }
851 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
852 
853 static void print_waiting_subdev(struct seq_file *s,
854 				 struct v4l2_async_subdev *asd)
855 {
856 	switch (asd->match_type) {
857 	case V4L2_ASYNC_MATCH_I2C:
858 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
859 			   asd->match.i2c.address);
860 		break;
861 	case V4L2_ASYNC_MATCH_FWNODE: {
862 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
863 
864 		devnode = fwnode_graph_is_endpoint(fwnode) ?
865 			  fwnode_graph_get_port_parent(fwnode) :
866 			  fwnode_handle_get(fwnode);
867 
868 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
869 			   devnode->dev ? dev_name(devnode->dev) : "nil",
870 			   fwnode);
871 
872 		fwnode_handle_put(devnode);
873 		break;
874 	}
875 	}
876 }
877 
878 static const char *
879 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
880 {
881 	if (notifier->v4l2_dev)
882 		return notifier->v4l2_dev->name;
883 	else if (notifier->sd)
884 		return notifier->sd->name;
885 	else
886 		return "nil";
887 }
888 
889 static int pending_subdevs_show(struct seq_file *s, void *data)
890 {
891 	struct v4l2_async_notifier *notif;
892 	struct v4l2_async_subdev *asd;
893 
894 	mutex_lock(&list_lock);
895 
896 	list_for_each_entry(notif, &notifier_list, list) {
897 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
898 		list_for_each_entry(asd, &notif->waiting, list)
899 			print_waiting_subdev(s, asd);
900 	}
901 
902 	mutex_unlock(&list_lock);
903 
904 	return 0;
905 }
906 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
907 
908 static struct dentry *v4l2_async_debugfs_dir;
909 
910 static int __init v4l2_async_init(void)
911 {
912 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
913 	debugfs_create_file("pending_async_subdevices", 0444,
914 			    v4l2_async_debugfs_dir, NULL,
915 			    &pending_subdevs_fops);
916 
917 	return 0;
918 }
919 
920 static void __exit v4l2_async_exit(void)
921 {
922 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
923 }
924 
925 subsys_initcall(v4l2_async_init);
926 module_exit(v4l2_async_exit);
927 
928 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
929 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
930 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
931 MODULE_LICENSE("GPL");
932