1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
27 #include "v4l2-subdev-priv.h"
28 
29 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
30 				    struct v4l2_subdev *subdev,
31 				    struct v4l2_async_subdev *asd)
32 {
33 	if (!n->ops || !n->ops->bound)
34 		return 0;
35 
36 	return n->ops->bound(n, subdev, asd);
37 }
38 
39 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
40 				      struct v4l2_subdev *subdev,
41 				      struct v4l2_async_subdev *asd)
42 {
43 	if (!n->ops || !n->ops->unbind)
44 		return;
45 
46 	n->ops->unbind(n, subdev, asd);
47 }
48 
49 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
50 {
51 	if (!n->ops || !n->ops->complete)
52 		return 0;
53 
54 	return n->ops->complete(n);
55 }
56 
57 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
58 				       struct v4l2_async_subdev *asd)
59 {
60 	if (!n->ops || !n->ops->destroy)
61 		return;
62 
63 	n->ops->destroy(asd);
64 }
65 
66 static bool match_i2c(struct v4l2_async_notifier *notifier,
67 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
68 {
69 #if IS_ENABLED(CONFIG_I2C)
70 	struct i2c_client *client = i2c_verify_client(sd->dev);
71 
72 	return client &&
73 		asd->match.i2c.adapter_id == client->adapter->nr &&
74 		asd->match.i2c.address == client->addr;
75 #else
76 	return false;
77 #endif
78 }
79 
80 static bool
81 match_fwnode_one(struct v4l2_async_notifier *notifier,
82 		 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
83 		 struct v4l2_async_subdev *asd)
84 {
85 	struct fwnode_handle *other_fwnode;
86 	struct fwnode_handle *dev_fwnode;
87 	bool asd_fwnode_is_ep;
88 	bool sd_fwnode_is_ep;
89 	struct device *dev;
90 
91 	/*
92 	 * Both the subdev and the async subdev can provide either an endpoint
93 	 * fwnode or a device fwnode. Start with the simple case of direct
94 	 * fwnode matching.
95 	 */
96 	if (sd_fwnode == asd->match.fwnode)
97 		return true;
98 
99 	/*
100 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
101 	 * endpoint or a device. If they're of the same type, there's no match.
102 	 * Technically speaking this checks if the nodes refer to a connected
103 	 * endpoint, which is the simplest check that works for both OF and
104 	 * ACPI. This won't make a difference, as drivers should not try to
105 	 * match unconnected endpoints.
106 	 */
107 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
108 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
109 
110 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
111 		return false;
112 
113 	/*
114 	 * The sd and asd fwnodes are of different types. Get the device fwnode
115 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
116 	 */
117 	if (sd_fwnode_is_ep) {
118 		dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
119 		other_fwnode = asd->match.fwnode;
120 	} else {
121 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
122 		other_fwnode = sd_fwnode;
123 	}
124 
125 	fwnode_handle_put(dev_fwnode);
126 
127 	if (dev_fwnode != other_fwnode)
128 		return false;
129 
130 	/*
131 	 * We have a heterogeneous match. Retrieve the struct device of the side
132 	 * that matched on a device fwnode to print its driver name.
133 	 */
134 	if (sd_fwnode_is_ep)
135 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
136 		    : notifier->sd->dev;
137 	else
138 		dev = sd->dev;
139 
140 	if (dev && dev->driver) {
141 		if (sd_fwnode_is_ep)
142 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
143 				 dev->driver->name);
144 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
145 			   dev->driver->name);
146 	}
147 
148 	return true;
149 }
150 
151 static bool match_fwnode(struct v4l2_async_notifier *notifier,
152 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
153 {
154 	if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
155 		return true;
156 
157 	/* Also check the secondary fwnode. */
158 	if (IS_ERR_OR_NULL(sd->fwnode->secondary))
159 		return false;
160 
161 	return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
162 }
163 
164 static LIST_HEAD(subdev_list);
165 static LIST_HEAD(notifier_list);
166 static DEFINE_MUTEX(list_lock);
167 
168 static struct v4l2_async_subdev *
169 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
170 		      struct v4l2_subdev *sd)
171 {
172 	bool (*match)(struct v4l2_async_notifier *notifier,
173 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
174 	struct v4l2_async_subdev *asd;
175 
176 	list_for_each_entry(asd, &notifier->waiting, list) {
177 		/* bus_type has been verified valid before */
178 		switch (asd->match_type) {
179 		case V4L2_ASYNC_MATCH_I2C:
180 			match = match_i2c;
181 			break;
182 		case V4L2_ASYNC_MATCH_FWNODE:
183 			match = match_fwnode;
184 			break;
185 		default:
186 			/* Cannot happen, unless someone breaks us */
187 			WARN_ON(true);
188 			return NULL;
189 		}
190 
191 		/* match cannot be NULL here */
192 		if (match(notifier, sd, asd))
193 			return asd;
194 	}
195 
196 	return NULL;
197 }
198 
199 /* Compare two async sub-device descriptors for equivalence */
200 static bool asd_equal(struct v4l2_async_subdev *asd_x,
201 		      struct v4l2_async_subdev *asd_y)
202 {
203 	if (asd_x->match_type != asd_y->match_type)
204 		return false;
205 
206 	switch (asd_x->match_type) {
207 	case V4L2_ASYNC_MATCH_I2C:
208 		return asd_x->match.i2c.adapter_id ==
209 			asd_y->match.i2c.adapter_id &&
210 			asd_x->match.i2c.address ==
211 			asd_y->match.i2c.address;
212 	case V4L2_ASYNC_MATCH_FWNODE:
213 		return asd_x->match.fwnode == asd_y->match.fwnode;
214 	default:
215 		break;
216 	}
217 
218 	return false;
219 }
220 
221 /* Find the sub-device notifier registered by a sub-device driver. */
222 static struct v4l2_async_notifier *
223 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
224 {
225 	struct v4l2_async_notifier *n;
226 
227 	list_for_each_entry(n, &notifier_list, list)
228 		if (n->sd == sd)
229 			return n;
230 
231 	return NULL;
232 }
233 
234 /* Get v4l2_device related to the notifier if one can be found. */
235 static struct v4l2_device *
236 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
237 {
238 	while (notifier->parent)
239 		notifier = notifier->parent;
240 
241 	return notifier->v4l2_dev;
242 }
243 
244 /*
245  * Return true if all child sub-device notifiers are complete, false otherwise.
246  */
247 static bool
248 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
249 {
250 	struct v4l2_subdev *sd;
251 
252 	if (!list_empty(&notifier->waiting))
253 		return false;
254 
255 	list_for_each_entry(sd, &notifier->done, async_list) {
256 		struct v4l2_async_notifier *subdev_notifier =
257 			v4l2_async_find_subdev_notifier(sd);
258 
259 		if (subdev_notifier &&
260 		    !v4l2_async_nf_can_complete(subdev_notifier))
261 			return false;
262 	}
263 
264 	return true;
265 }
266 
267 /*
268  * Complete the master notifier if possible. This is done when all async
269  * sub-devices have been bound; v4l2_device is also available then.
270  */
271 static int
272 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
273 {
274 	/* Quick check whether there are still more sub-devices here. */
275 	if (!list_empty(&notifier->waiting))
276 		return 0;
277 
278 	/* Check the entire notifier tree; find the root notifier first. */
279 	while (notifier->parent)
280 		notifier = notifier->parent;
281 
282 	/* This is root if it has v4l2_dev. */
283 	if (!notifier->v4l2_dev)
284 		return 0;
285 
286 	/* Is everything ready? */
287 	if (!v4l2_async_nf_can_complete(notifier))
288 		return 0;
289 
290 	return v4l2_async_nf_call_complete(notifier);
291 }
292 
293 static int
294 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
295 
296 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
297 					     struct v4l2_subdev *sd)
298 {
299 	struct media_link *link = NULL;
300 
301 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
302 
303 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
304 	    sd->entity.function != MEDIA_ENT_F_FLASH)
305 		return 0;
306 
307 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
308 
309 #endif
310 
311 	return IS_ERR(link) ? PTR_ERR(link) : 0;
312 }
313 
314 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
315 				   struct v4l2_device *v4l2_dev,
316 				   struct v4l2_subdev *sd,
317 				   struct v4l2_async_subdev *asd)
318 {
319 	struct v4l2_async_notifier *subdev_notifier;
320 	int ret;
321 
322 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
323 	if (ret < 0)
324 		return ret;
325 
326 	ret = v4l2_async_nf_call_bound(notifier, sd, asd);
327 	if (ret < 0) {
328 		v4l2_device_unregister_subdev(sd);
329 		return ret;
330 	}
331 
332 	/*
333 	 * Depending of the function of the entities involved, we may want to
334 	 * create links between them (for example between a sensor and its lens
335 	 * or between a sensor's source pad and the connected device's sink
336 	 * pad).
337 	 */
338 	ret = v4l2_async_create_ancillary_links(notifier, sd);
339 	if (ret) {
340 		v4l2_async_nf_call_unbind(notifier, sd, asd);
341 		v4l2_device_unregister_subdev(sd);
342 		return ret;
343 	}
344 
345 	/* Remove from the waiting list */
346 	list_del(&asd->list);
347 	sd->asd = asd;
348 	sd->notifier = notifier;
349 
350 	/* Move from the global subdevice list to notifier's done */
351 	list_move(&sd->async_list, &notifier->done);
352 
353 	/*
354 	 * See if the sub-device has a notifier. If not, return here.
355 	 */
356 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
357 	if (!subdev_notifier || subdev_notifier->parent)
358 		return 0;
359 
360 	/*
361 	 * Proceed with checking for the sub-device notifier's async
362 	 * sub-devices, and return the result. The error will be handled by the
363 	 * caller.
364 	 */
365 	subdev_notifier->parent = notifier;
366 
367 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
368 }
369 
370 /* Test all async sub-devices in a notifier for a match. */
371 static int
372 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
373 {
374 	struct v4l2_device *v4l2_dev =
375 		v4l2_async_nf_find_v4l2_dev(notifier);
376 	struct v4l2_subdev *sd;
377 
378 	if (!v4l2_dev)
379 		return 0;
380 
381 again:
382 	list_for_each_entry(sd, &subdev_list, async_list) {
383 		struct v4l2_async_subdev *asd;
384 		int ret;
385 
386 		asd = v4l2_async_find_match(notifier, sd);
387 		if (!asd)
388 			continue;
389 
390 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
391 		if (ret < 0)
392 			return ret;
393 
394 		/*
395 		 * v4l2_async_match_notify() may lead to registering a
396 		 * new notifier and thus changing the async subdevs
397 		 * list. In order to proceed safely from here, restart
398 		 * parsing the list from the beginning.
399 		 */
400 		goto again;
401 	}
402 
403 	return 0;
404 }
405 
406 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
407 {
408 	v4l2_device_unregister_subdev(sd);
409 	/*
410 	 * Subdevice driver will reprobe and put the subdev back
411 	 * onto the list
412 	 */
413 	list_del_init(&sd->async_list);
414 	sd->asd = NULL;
415 }
416 
417 /* Unbind all sub-devices in the notifier tree. */
418 static void
419 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
420 {
421 	struct v4l2_subdev *sd, *tmp;
422 
423 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
424 		struct v4l2_async_notifier *subdev_notifier =
425 			v4l2_async_find_subdev_notifier(sd);
426 
427 		if (subdev_notifier)
428 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
429 
430 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
431 		v4l2_async_cleanup(sd);
432 
433 		list_move(&sd->async_list, &subdev_list);
434 	}
435 
436 	notifier->parent = NULL;
437 }
438 
439 /* See if an async sub-device can be found in a notifier's lists. */
440 static bool
441 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
442 				 struct v4l2_async_subdev *asd)
443 {
444 	struct v4l2_async_subdev *asd_y;
445 	struct v4l2_subdev *sd;
446 
447 	list_for_each_entry(asd_y, &notifier->waiting, list)
448 		if (asd_equal(asd, asd_y))
449 			return true;
450 
451 	list_for_each_entry(sd, &notifier->done, async_list) {
452 		if (WARN_ON(!sd->asd))
453 			continue;
454 
455 		if (asd_equal(asd, sd->asd))
456 			return true;
457 	}
458 
459 	return false;
460 }
461 
462 /*
463  * Find out whether an async sub-device was set up already or
464  * whether it exists in a given notifier before @this_index.
465  * If @this_index < 0, search the notifier's entire @asd_list.
466  */
467 static bool
468 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
469 			       struct v4l2_async_subdev *asd, int this_index)
470 {
471 	struct v4l2_async_subdev *asd_y;
472 	int j = 0;
473 
474 	lockdep_assert_held(&list_lock);
475 
476 	/* Check that an asd is not being added more than once. */
477 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
478 		if (this_index >= 0 && j++ >= this_index)
479 			break;
480 		if (asd_equal(asd, asd_y))
481 			return true;
482 	}
483 
484 	/* Check that an asd does not exist in other notifiers. */
485 	list_for_each_entry(notifier, &notifier_list, list)
486 		if (__v4l2_async_nf_has_async_subdev(notifier, asd))
487 			return true;
488 
489 	return false;
490 }
491 
492 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
493 				   struct v4l2_async_subdev *asd,
494 				   int this_index)
495 {
496 	struct device *dev =
497 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
498 
499 	if (!asd)
500 		return -EINVAL;
501 
502 	switch (asd->match_type) {
503 	case V4L2_ASYNC_MATCH_I2C:
504 	case V4L2_ASYNC_MATCH_FWNODE:
505 		if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
506 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
507 			return -EEXIST;
508 		}
509 		break;
510 	default:
511 		dev_err(dev, "Invalid match type %u on %p\n",
512 			asd->match_type, asd);
513 		return -EINVAL;
514 	}
515 
516 	return 0;
517 }
518 
519 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
520 {
521 	INIT_LIST_HEAD(&notifier->asd_list);
522 }
523 EXPORT_SYMBOL(v4l2_async_nf_init);
524 
525 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
526 {
527 	struct v4l2_async_subdev *asd;
528 	int ret, i = 0;
529 
530 	INIT_LIST_HEAD(&notifier->waiting);
531 	INIT_LIST_HEAD(&notifier->done);
532 
533 	mutex_lock(&list_lock);
534 
535 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
536 		ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
537 		if (ret)
538 			goto err_unlock;
539 
540 		list_add_tail(&asd->list, &notifier->waiting);
541 	}
542 
543 	ret = v4l2_async_nf_try_all_subdevs(notifier);
544 	if (ret < 0)
545 		goto err_unbind;
546 
547 	ret = v4l2_async_nf_try_complete(notifier);
548 	if (ret < 0)
549 		goto err_unbind;
550 
551 	/* Keep also completed notifiers on the list */
552 	list_add(&notifier->list, &notifier_list);
553 
554 	mutex_unlock(&list_lock);
555 
556 	return 0;
557 
558 err_unbind:
559 	/*
560 	 * On failure, unbind all sub-devices registered through this notifier.
561 	 */
562 	v4l2_async_nf_unbind_all_subdevs(notifier);
563 
564 err_unlock:
565 	mutex_unlock(&list_lock);
566 
567 	return ret;
568 }
569 
570 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
571 			   struct v4l2_async_notifier *notifier)
572 {
573 	int ret;
574 
575 	if (WARN_ON(!v4l2_dev || notifier->sd))
576 		return -EINVAL;
577 
578 	notifier->v4l2_dev = v4l2_dev;
579 
580 	ret = __v4l2_async_nf_register(notifier);
581 	if (ret)
582 		notifier->v4l2_dev = NULL;
583 
584 	return ret;
585 }
586 EXPORT_SYMBOL(v4l2_async_nf_register);
587 
588 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
589 				  struct v4l2_async_notifier *notifier)
590 {
591 	int ret;
592 
593 	if (WARN_ON(!sd || notifier->v4l2_dev))
594 		return -EINVAL;
595 
596 	notifier->sd = sd;
597 
598 	ret = __v4l2_async_nf_register(notifier);
599 	if (ret)
600 		notifier->sd = NULL;
601 
602 	return ret;
603 }
604 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
605 
606 static void
607 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
608 {
609 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
610 		return;
611 
612 	v4l2_async_nf_unbind_all_subdevs(notifier);
613 
614 	notifier->sd = NULL;
615 	notifier->v4l2_dev = NULL;
616 
617 	list_del(&notifier->list);
618 }
619 
620 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
621 {
622 	mutex_lock(&list_lock);
623 
624 	__v4l2_async_nf_unregister(notifier);
625 
626 	mutex_unlock(&list_lock);
627 }
628 EXPORT_SYMBOL(v4l2_async_nf_unregister);
629 
630 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
631 {
632 	struct v4l2_async_subdev *asd, *tmp;
633 
634 	if (!notifier || !notifier->asd_list.next)
635 		return;
636 
637 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
638 		switch (asd->match_type) {
639 		case V4L2_ASYNC_MATCH_FWNODE:
640 			fwnode_handle_put(asd->match.fwnode);
641 			break;
642 		default:
643 			break;
644 		}
645 
646 		list_del(&asd->asd_list);
647 		v4l2_async_nf_call_destroy(notifier, asd);
648 		kfree(asd);
649 	}
650 }
651 
652 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
653 {
654 	mutex_lock(&list_lock);
655 
656 	__v4l2_async_nf_cleanup(notifier);
657 
658 	mutex_unlock(&list_lock);
659 }
660 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
661 
662 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
663 			       struct v4l2_async_subdev *asd)
664 {
665 	int ret;
666 
667 	mutex_lock(&list_lock);
668 
669 	ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
670 	if (ret)
671 		goto unlock;
672 
673 	list_add_tail(&asd->asd_list, &notifier->asd_list);
674 
675 unlock:
676 	mutex_unlock(&list_lock);
677 	return ret;
678 }
679 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
680 
681 struct v4l2_async_subdev *
682 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
683 			   struct fwnode_handle *fwnode,
684 			   unsigned int asd_struct_size)
685 {
686 	struct v4l2_async_subdev *asd;
687 	int ret;
688 
689 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
690 	if (!asd)
691 		return ERR_PTR(-ENOMEM);
692 
693 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
694 	asd->match.fwnode = fwnode_handle_get(fwnode);
695 
696 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
697 	if (ret) {
698 		fwnode_handle_put(fwnode);
699 		kfree(asd);
700 		return ERR_PTR(ret);
701 	}
702 
703 	return asd;
704 }
705 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
706 
707 struct v4l2_async_subdev *
708 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
709 				  struct fwnode_handle *endpoint,
710 				  unsigned int asd_struct_size)
711 {
712 	struct v4l2_async_subdev *asd;
713 	struct fwnode_handle *remote;
714 
715 	remote = fwnode_graph_get_remote_endpoint(endpoint);
716 	if (!remote)
717 		return ERR_PTR(-ENOTCONN);
718 
719 	asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
720 	/*
721 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
722 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
723 	 */
724 	fwnode_handle_put(remote);
725 	return asd;
726 }
727 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
728 
729 struct v4l2_async_subdev *
730 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
731 			unsigned short address, unsigned int asd_struct_size)
732 {
733 	struct v4l2_async_subdev *asd;
734 	int ret;
735 
736 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
737 	if (!asd)
738 		return ERR_PTR(-ENOMEM);
739 
740 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
741 	asd->match.i2c.adapter_id = adapter_id;
742 	asd->match.i2c.address = address;
743 
744 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
745 	if (ret) {
746 		kfree(asd);
747 		return ERR_PTR(ret);
748 	}
749 
750 	return asd;
751 }
752 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
753 
754 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
755 {
756 	struct v4l2_async_notifier *subdev_notifier;
757 	struct v4l2_async_notifier *notifier;
758 	int ret;
759 
760 	/*
761 	 * No reference taken. The reference is held by the device
762 	 * (struct v4l2_subdev.dev), and async sub-device does not
763 	 * exist independently of the device at any point of time.
764 	 */
765 	if (!sd->fwnode && sd->dev)
766 		sd->fwnode = dev_fwnode(sd->dev);
767 
768 	mutex_lock(&list_lock);
769 
770 	INIT_LIST_HEAD(&sd->async_list);
771 
772 	list_for_each_entry(notifier, &notifier_list, list) {
773 		struct v4l2_device *v4l2_dev =
774 			v4l2_async_nf_find_v4l2_dev(notifier);
775 		struct v4l2_async_subdev *asd;
776 
777 		if (!v4l2_dev)
778 			continue;
779 
780 		asd = v4l2_async_find_match(notifier, sd);
781 		if (!asd)
782 			continue;
783 
784 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
785 		if (ret)
786 			goto err_unbind;
787 
788 		ret = v4l2_async_nf_try_complete(notifier);
789 		if (ret)
790 			goto err_unbind;
791 
792 		goto out_unlock;
793 	}
794 
795 	/* None matched, wait for hot-plugging */
796 	list_add(&sd->async_list, &subdev_list);
797 
798 out_unlock:
799 	mutex_unlock(&list_lock);
800 
801 	return 0;
802 
803 err_unbind:
804 	/*
805 	 * Complete failed. Unbind the sub-devices bound through registering
806 	 * this async sub-device.
807 	 */
808 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
809 	if (subdev_notifier)
810 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
811 
812 	if (sd->asd)
813 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
814 	v4l2_async_cleanup(sd);
815 
816 	mutex_unlock(&list_lock);
817 
818 	return ret;
819 }
820 EXPORT_SYMBOL(v4l2_async_register_subdev);
821 
822 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
823 {
824 	if (!sd->async_list.next)
825 		return;
826 
827 	v4l2_subdev_put_privacy_led(sd);
828 
829 	mutex_lock(&list_lock);
830 
831 	__v4l2_async_nf_unregister(sd->subdev_notifier);
832 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
833 	kfree(sd->subdev_notifier);
834 	sd->subdev_notifier = NULL;
835 
836 	if (sd->asd) {
837 		struct v4l2_async_notifier *notifier = sd->notifier;
838 
839 		list_add(&sd->asd->list, &notifier->waiting);
840 
841 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
842 	}
843 
844 	v4l2_async_cleanup(sd);
845 
846 	mutex_unlock(&list_lock);
847 }
848 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
849 
850 static void print_waiting_subdev(struct seq_file *s,
851 				 struct v4l2_async_subdev *asd)
852 {
853 	switch (asd->match_type) {
854 	case V4L2_ASYNC_MATCH_I2C:
855 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
856 			   asd->match.i2c.address);
857 		break;
858 	case V4L2_ASYNC_MATCH_FWNODE: {
859 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
860 
861 		devnode = fwnode_graph_is_endpoint(fwnode) ?
862 			  fwnode_graph_get_port_parent(fwnode) :
863 			  fwnode_handle_get(fwnode);
864 
865 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
866 			   devnode->dev ? dev_name(devnode->dev) : "nil",
867 			   fwnode);
868 
869 		fwnode_handle_put(devnode);
870 		break;
871 	}
872 	}
873 }
874 
875 static const char *
876 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
877 {
878 	if (notifier->v4l2_dev)
879 		return notifier->v4l2_dev->name;
880 	else if (notifier->sd)
881 		return notifier->sd->name;
882 	else
883 		return "nil";
884 }
885 
886 static int pending_subdevs_show(struct seq_file *s, void *data)
887 {
888 	struct v4l2_async_notifier *notif;
889 	struct v4l2_async_subdev *asd;
890 
891 	mutex_lock(&list_lock);
892 
893 	list_for_each_entry(notif, &notifier_list, list) {
894 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
895 		list_for_each_entry(asd, &notif->waiting, list)
896 			print_waiting_subdev(s, asd);
897 	}
898 
899 	mutex_unlock(&list_lock);
900 
901 	return 0;
902 }
903 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
904 
905 static struct dentry *v4l2_async_debugfs_dir;
906 
907 static int __init v4l2_async_init(void)
908 {
909 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
910 	debugfs_create_file("pending_async_subdevices", 0444,
911 			    v4l2_async_debugfs_dir, NULL,
912 			    &pending_subdevs_fops);
913 
914 	return 0;
915 }
916 
917 static void __exit v4l2_async_exit(void)
918 {
919 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
920 }
921 
922 subsys_initcall(v4l2_async_init);
923 module_exit(v4l2_async_exit);
924 
925 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
926 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
927 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
928 MODULE_LICENSE("GPL");
929