1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
27 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
28 				    struct v4l2_subdev *subdev,
29 				    struct v4l2_async_subdev *asd)
30 {
31 	if (!n->ops || !n->ops->bound)
32 		return 0;
33 
34 	return n->ops->bound(n, subdev, asd);
35 }
36 
37 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
38 				      struct v4l2_subdev *subdev,
39 				      struct v4l2_async_subdev *asd)
40 {
41 	if (!n->ops || !n->ops->unbind)
42 		return;
43 
44 	n->ops->unbind(n, subdev, asd);
45 }
46 
47 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
48 {
49 	if (!n->ops || !n->ops->complete)
50 		return 0;
51 
52 	return n->ops->complete(n);
53 }
54 
55 static bool match_i2c(struct v4l2_async_notifier *notifier,
56 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
57 {
58 #if IS_ENABLED(CONFIG_I2C)
59 	struct i2c_client *client = i2c_verify_client(sd->dev);
60 
61 	return client &&
62 		asd->match.i2c.adapter_id == client->adapter->nr &&
63 		asd->match.i2c.address == client->addr;
64 #else
65 	return false;
66 #endif
67 }
68 
69 static bool match_fwnode(struct v4l2_async_notifier *notifier,
70 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
71 {
72 	struct fwnode_handle *other_fwnode;
73 	struct fwnode_handle *dev_fwnode;
74 	bool asd_fwnode_is_ep;
75 	bool sd_fwnode_is_ep;
76 	struct device *dev;
77 
78 	/*
79 	 * Both the subdev and the async subdev can provide either an endpoint
80 	 * fwnode or a device fwnode. Start with the simple case of direct
81 	 * fwnode matching.
82 	 */
83 	if (sd->fwnode == asd->match.fwnode)
84 		return true;
85 
86 	/*
87 	 * Check the same situation for any possible secondary assigned to the
88 	 * subdev's fwnode
89 	 */
90 	if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
91 	    sd->fwnode->secondary == asd->match.fwnode)
92 		return true;
93 
94 	/*
95 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
96 	 * endpoint or a device. If they're of the same type, there's no match.
97 	 * Technically speaking this checks if the nodes refer to a connected
98 	 * endpoint, which is the simplest check that works for both OF and
99 	 * ACPI. This won't make a difference, as drivers should not try to
100 	 * match unconnected endpoints.
101 	 */
102 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
103 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
104 
105 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
106 		return false;
107 
108 	/*
109 	 * The sd and asd fwnodes are of different types. Get the device fwnode
110 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
111 	 */
112 	if (sd_fwnode_is_ep) {
113 		dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
114 		other_fwnode = asd->match.fwnode;
115 	} else {
116 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
117 		other_fwnode = sd->fwnode;
118 	}
119 
120 	fwnode_handle_put(dev_fwnode);
121 
122 	if (dev_fwnode != other_fwnode)
123 		return false;
124 
125 	/*
126 	 * We have a heterogeneous match. Retrieve the struct device of the side
127 	 * that matched on a device fwnode to print its driver name.
128 	 */
129 	if (sd_fwnode_is_ep)
130 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
131 		    : notifier->sd->dev;
132 	else
133 		dev = sd->dev;
134 
135 	if (dev && dev->driver) {
136 		if (sd_fwnode_is_ep)
137 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
138 				 dev->driver->name);
139 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
140 			   dev->driver->name);
141 	}
142 
143 	return true;
144 }
145 
146 static LIST_HEAD(subdev_list);
147 static LIST_HEAD(notifier_list);
148 static DEFINE_MUTEX(list_lock);
149 
150 static struct v4l2_async_subdev *
151 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
152 		      struct v4l2_subdev *sd)
153 {
154 	bool (*match)(struct v4l2_async_notifier *notifier,
155 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
156 	struct v4l2_async_subdev *asd;
157 
158 	list_for_each_entry(asd, &notifier->waiting, list) {
159 		/* bus_type has been verified valid before */
160 		switch (asd->match_type) {
161 		case V4L2_ASYNC_MATCH_I2C:
162 			match = match_i2c;
163 			break;
164 		case V4L2_ASYNC_MATCH_FWNODE:
165 			match = match_fwnode;
166 			break;
167 		default:
168 			/* Cannot happen, unless someone breaks us */
169 			WARN_ON(true);
170 			return NULL;
171 		}
172 
173 		/* match cannot be NULL here */
174 		if (match(notifier, sd, asd))
175 			return asd;
176 	}
177 
178 	return NULL;
179 }
180 
181 /* Compare two async sub-device descriptors for equivalence */
182 static bool asd_equal(struct v4l2_async_subdev *asd_x,
183 		      struct v4l2_async_subdev *asd_y)
184 {
185 	if (asd_x->match_type != asd_y->match_type)
186 		return false;
187 
188 	switch (asd_x->match_type) {
189 	case V4L2_ASYNC_MATCH_I2C:
190 		return asd_x->match.i2c.adapter_id ==
191 			asd_y->match.i2c.adapter_id &&
192 			asd_x->match.i2c.address ==
193 			asd_y->match.i2c.address;
194 	case V4L2_ASYNC_MATCH_FWNODE:
195 		return asd_x->match.fwnode == asd_y->match.fwnode;
196 	default:
197 		break;
198 	}
199 
200 	return false;
201 }
202 
203 /* Find the sub-device notifier registered by a sub-device driver. */
204 static struct v4l2_async_notifier *
205 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
206 {
207 	struct v4l2_async_notifier *n;
208 
209 	list_for_each_entry(n, &notifier_list, list)
210 		if (n->sd == sd)
211 			return n;
212 
213 	return NULL;
214 }
215 
216 /* Get v4l2_device related to the notifier if one can be found. */
217 static struct v4l2_device *
218 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
219 {
220 	while (notifier->parent)
221 		notifier = notifier->parent;
222 
223 	return notifier->v4l2_dev;
224 }
225 
226 /*
227  * Return true if all child sub-device notifiers are complete, false otherwise.
228  */
229 static bool
230 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
231 {
232 	struct v4l2_subdev *sd;
233 
234 	if (!list_empty(&notifier->waiting))
235 		return false;
236 
237 	list_for_each_entry(sd, &notifier->done, async_list) {
238 		struct v4l2_async_notifier *subdev_notifier =
239 			v4l2_async_find_subdev_notifier(sd);
240 
241 		if (subdev_notifier &&
242 		    !v4l2_async_nf_can_complete(subdev_notifier))
243 			return false;
244 	}
245 
246 	return true;
247 }
248 
249 /*
250  * Complete the master notifier if possible. This is done when all async
251  * sub-devices have been bound; v4l2_device is also available then.
252  */
253 static int
254 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
255 {
256 	/* Quick check whether there are still more sub-devices here. */
257 	if (!list_empty(&notifier->waiting))
258 		return 0;
259 
260 	/* Check the entire notifier tree; find the root notifier first. */
261 	while (notifier->parent)
262 		notifier = notifier->parent;
263 
264 	/* This is root if it has v4l2_dev. */
265 	if (!notifier->v4l2_dev)
266 		return 0;
267 
268 	/* Is everything ready? */
269 	if (!v4l2_async_nf_can_complete(notifier))
270 		return 0;
271 
272 	return v4l2_async_nf_call_complete(notifier);
273 }
274 
275 static int
276 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
277 
278 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
279 					     struct v4l2_subdev *sd)
280 {
281 	struct media_link *link = NULL;
282 
283 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
284 
285 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
286 	    sd->entity.function != MEDIA_ENT_F_FLASH)
287 		return 0;
288 
289 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
290 
291 #endif
292 
293 	return IS_ERR(link) ? PTR_ERR(link) : 0;
294 }
295 
296 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
297 				   struct v4l2_device *v4l2_dev,
298 				   struct v4l2_subdev *sd,
299 				   struct v4l2_async_subdev *asd)
300 {
301 	struct v4l2_async_notifier *subdev_notifier;
302 	int ret;
303 
304 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
305 	if (ret < 0)
306 		return ret;
307 
308 	ret = v4l2_async_nf_call_bound(notifier, sd, asd);
309 	if (ret < 0) {
310 		v4l2_device_unregister_subdev(sd);
311 		return ret;
312 	}
313 
314 	/*
315 	 * Depending of the function of the entities involved, we may want to
316 	 * create links between them (for example between a sensor and its lens
317 	 * or between a sensor's source pad and the connected device's sink
318 	 * pad).
319 	 */
320 	ret = v4l2_async_create_ancillary_links(notifier, sd);
321 	if (ret) {
322 		v4l2_async_nf_call_unbind(notifier, sd, asd);
323 		v4l2_device_unregister_subdev(sd);
324 		return ret;
325 	}
326 
327 	/* Remove from the waiting list */
328 	list_del(&asd->list);
329 	sd->asd = asd;
330 	sd->notifier = notifier;
331 
332 	/* Move from the global subdevice list to notifier's done */
333 	list_move(&sd->async_list, &notifier->done);
334 
335 	/*
336 	 * See if the sub-device has a notifier. If not, return here.
337 	 */
338 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
339 	if (!subdev_notifier || subdev_notifier->parent)
340 		return 0;
341 
342 	/*
343 	 * Proceed with checking for the sub-device notifier's async
344 	 * sub-devices, and return the result. The error will be handled by the
345 	 * caller.
346 	 */
347 	subdev_notifier->parent = notifier;
348 
349 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
350 }
351 
352 /* Test all async sub-devices in a notifier for a match. */
353 static int
354 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
355 {
356 	struct v4l2_device *v4l2_dev =
357 		v4l2_async_nf_find_v4l2_dev(notifier);
358 	struct v4l2_subdev *sd;
359 
360 	if (!v4l2_dev)
361 		return 0;
362 
363 again:
364 	list_for_each_entry(sd, &subdev_list, async_list) {
365 		struct v4l2_async_subdev *asd;
366 		int ret;
367 
368 		asd = v4l2_async_find_match(notifier, sd);
369 		if (!asd)
370 			continue;
371 
372 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
373 		if (ret < 0)
374 			return ret;
375 
376 		/*
377 		 * v4l2_async_match_notify() may lead to registering a
378 		 * new notifier and thus changing the async subdevs
379 		 * list. In order to proceed safely from here, restart
380 		 * parsing the list from the beginning.
381 		 */
382 		goto again;
383 	}
384 
385 	return 0;
386 }
387 
388 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
389 {
390 	v4l2_device_unregister_subdev(sd);
391 	/*
392 	 * Subdevice driver will reprobe and put the subdev back
393 	 * onto the list
394 	 */
395 	list_del_init(&sd->async_list);
396 	sd->asd = NULL;
397 }
398 
399 /* Unbind all sub-devices in the notifier tree. */
400 static void
401 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
402 {
403 	struct v4l2_subdev *sd, *tmp;
404 
405 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
406 		struct v4l2_async_notifier *subdev_notifier =
407 			v4l2_async_find_subdev_notifier(sd);
408 
409 		if (subdev_notifier)
410 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
411 
412 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
413 		v4l2_async_cleanup(sd);
414 
415 		list_move(&sd->async_list, &subdev_list);
416 	}
417 
418 	notifier->parent = NULL;
419 }
420 
421 /* See if an async sub-device can be found in a notifier's lists. */
422 static bool
423 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
424 				 struct v4l2_async_subdev *asd)
425 {
426 	struct v4l2_async_subdev *asd_y;
427 	struct v4l2_subdev *sd;
428 
429 	list_for_each_entry(asd_y, &notifier->waiting, list)
430 		if (asd_equal(asd, asd_y))
431 			return true;
432 
433 	list_for_each_entry(sd, &notifier->done, async_list) {
434 		if (WARN_ON(!sd->asd))
435 			continue;
436 
437 		if (asd_equal(asd, sd->asd))
438 			return true;
439 	}
440 
441 	return false;
442 }
443 
444 /*
445  * Find out whether an async sub-device was set up already or
446  * whether it exists in a given notifier before @this_index.
447  * If @this_index < 0, search the notifier's entire @asd_list.
448  */
449 static bool
450 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
451 			       struct v4l2_async_subdev *asd, int this_index)
452 {
453 	struct v4l2_async_subdev *asd_y;
454 	int j = 0;
455 
456 	lockdep_assert_held(&list_lock);
457 
458 	/* Check that an asd is not being added more than once. */
459 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
460 		if (this_index >= 0 && j++ >= this_index)
461 			break;
462 		if (asd_equal(asd, asd_y))
463 			return true;
464 	}
465 
466 	/* Check that an asd does not exist in other notifiers. */
467 	list_for_each_entry(notifier, &notifier_list, list)
468 		if (__v4l2_async_nf_has_async_subdev(notifier, asd))
469 			return true;
470 
471 	return false;
472 }
473 
474 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
475 				   struct v4l2_async_subdev *asd,
476 				   int this_index)
477 {
478 	struct device *dev =
479 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
480 
481 	if (!asd)
482 		return -EINVAL;
483 
484 	switch (asd->match_type) {
485 	case V4L2_ASYNC_MATCH_I2C:
486 	case V4L2_ASYNC_MATCH_FWNODE:
487 		if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
488 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
489 			return -EEXIST;
490 		}
491 		break;
492 	default:
493 		dev_err(dev, "Invalid match type %u on %p\n",
494 			asd->match_type, asd);
495 		return -EINVAL;
496 	}
497 
498 	return 0;
499 }
500 
501 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
502 {
503 	INIT_LIST_HEAD(&notifier->asd_list);
504 }
505 EXPORT_SYMBOL(v4l2_async_nf_init);
506 
507 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
508 {
509 	struct v4l2_async_subdev *asd;
510 	int ret, i = 0;
511 
512 	INIT_LIST_HEAD(&notifier->waiting);
513 	INIT_LIST_HEAD(&notifier->done);
514 
515 	mutex_lock(&list_lock);
516 
517 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
518 		ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
519 		if (ret)
520 			goto err_unlock;
521 
522 		list_add_tail(&asd->list, &notifier->waiting);
523 	}
524 
525 	ret = v4l2_async_nf_try_all_subdevs(notifier);
526 	if (ret < 0)
527 		goto err_unbind;
528 
529 	ret = v4l2_async_nf_try_complete(notifier);
530 	if (ret < 0)
531 		goto err_unbind;
532 
533 	/* Keep also completed notifiers on the list */
534 	list_add(&notifier->list, &notifier_list);
535 
536 	mutex_unlock(&list_lock);
537 
538 	return 0;
539 
540 err_unbind:
541 	/*
542 	 * On failure, unbind all sub-devices registered through this notifier.
543 	 */
544 	v4l2_async_nf_unbind_all_subdevs(notifier);
545 
546 err_unlock:
547 	mutex_unlock(&list_lock);
548 
549 	return ret;
550 }
551 
552 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
553 			   struct v4l2_async_notifier *notifier)
554 {
555 	int ret;
556 
557 	if (WARN_ON(!v4l2_dev || notifier->sd))
558 		return -EINVAL;
559 
560 	notifier->v4l2_dev = v4l2_dev;
561 
562 	ret = __v4l2_async_nf_register(notifier);
563 	if (ret)
564 		notifier->v4l2_dev = NULL;
565 
566 	return ret;
567 }
568 EXPORT_SYMBOL(v4l2_async_nf_register);
569 
570 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
571 				  struct v4l2_async_notifier *notifier)
572 {
573 	int ret;
574 
575 	if (WARN_ON(!sd || notifier->v4l2_dev))
576 		return -EINVAL;
577 
578 	notifier->sd = sd;
579 
580 	ret = __v4l2_async_nf_register(notifier);
581 	if (ret)
582 		notifier->sd = NULL;
583 
584 	return ret;
585 }
586 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
587 
588 static void
589 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
590 {
591 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
592 		return;
593 
594 	v4l2_async_nf_unbind_all_subdevs(notifier);
595 
596 	notifier->sd = NULL;
597 	notifier->v4l2_dev = NULL;
598 
599 	list_del(&notifier->list);
600 }
601 
602 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
603 {
604 	mutex_lock(&list_lock);
605 
606 	__v4l2_async_nf_unregister(notifier);
607 
608 	mutex_unlock(&list_lock);
609 }
610 EXPORT_SYMBOL(v4l2_async_nf_unregister);
611 
612 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
613 {
614 	struct v4l2_async_subdev *asd, *tmp;
615 
616 	if (!notifier || !notifier->asd_list.next)
617 		return;
618 
619 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
620 		switch (asd->match_type) {
621 		case V4L2_ASYNC_MATCH_FWNODE:
622 			fwnode_handle_put(asd->match.fwnode);
623 			break;
624 		default:
625 			break;
626 		}
627 
628 		list_del(&asd->asd_list);
629 		kfree(asd);
630 	}
631 }
632 
633 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
634 {
635 	mutex_lock(&list_lock);
636 
637 	__v4l2_async_nf_cleanup(notifier);
638 
639 	mutex_unlock(&list_lock);
640 }
641 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
642 
643 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
644 			       struct v4l2_async_subdev *asd)
645 {
646 	int ret;
647 
648 	mutex_lock(&list_lock);
649 
650 	ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
651 	if (ret)
652 		goto unlock;
653 
654 	list_add_tail(&asd->asd_list, &notifier->asd_list);
655 
656 unlock:
657 	mutex_unlock(&list_lock);
658 	return ret;
659 }
660 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
661 
662 struct v4l2_async_subdev *
663 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
664 			   struct fwnode_handle *fwnode,
665 			   unsigned int asd_struct_size)
666 {
667 	struct v4l2_async_subdev *asd;
668 	int ret;
669 
670 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
671 	if (!asd)
672 		return ERR_PTR(-ENOMEM);
673 
674 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
675 	asd->match.fwnode = fwnode_handle_get(fwnode);
676 
677 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
678 	if (ret) {
679 		fwnode_handle_put(fwnode);
680 		kfree(asd);
681 		return ERR_PTR(ret);
682 	}
683 
684 	return asd;
685 }
686 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
687 
688 struct v4l2_async_subdev *
689 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
690 				  struct fwnode_handle *endpoint,
691 				  unsigned int asd_struct_size)
692 {
693 	struct v4l2_async_subdev *asd;
694 	struct fwnode_handle *remote;
695 
696 	remote = fwnode_graph_get_remote_endpoint(endpoint);
697 	if (!remote)
698 		return ERR_PTR(-ENOTCONN);
699 
700 	asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
701 	/*
702 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
703 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
704 	 */
705 	fwnode_handle_put(remote);
706 	return asd;
707 }
708 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
709 
710 struct v4l2_async_subdev *
711 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
712 			unsigned short address, unsigned int asd_struct_size)
713 {
714 	struct v4l2_async_subdev *asd;
715 	int ret;
716 
717 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
718 	if (!asd)
719 		return ERR_PTR(-ENOMEM);
720 
721 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
722 	asd->match.i2c.adapter_id = adapter_id;
723 	asd->match.i2c.address = address;
724 
725 	ret = __v4l2_async_nf_add_subdev(notifier, asd);
726 	if (ret) {
727 		kfree(asd);
728 		return ERR_PTR(ret);
729 	}
730 
731 	return asd;
732 }
733 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
734 
735 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
736 {
737 	struct v4l2_async_notifier *subdev_notifier;
738 	struct v4l2_async_notifier *notifier;
739 	int ret;
740 
741 	/*
742 	 * No reference taken. The reference is held by the device
743 	 * (struct v4l2_subdev.dev), and async sub-device does not
744 	 * exist independently of the device at any point of time.
745 	 */
746 	if (!sd->fwnode && sd->dev)
747 		sd->fwnode = dev_fwnode(sd->dev);
748 
749 	mutex_lock(&list_lock);
750 
751 	INIT_LIST_HEAD(&sd->async_list);
752 
753 	list_for_each_entry(notifier, &notifier_list, list) {
754 		struct v4l2_device *v4l2_dev =
755 			v4l2_async_nf_find_v4l2_dev(notifier);
756 		struct v4l2_async_subdev *asd;
757 
758 		if (!v4l2_dev)
759 			continue;
760 
761 		asd = v4l2_async_find_match(notifier, sd);
762 		if (!asd)
763 			continue;
764 
765 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
766 		if (ret)
767 			goto err_unbind;
768 
769 		ret = v4l2_async_nf_try_complete(notifier);
770 		if (ret)
771 			goto err_unbind;
772 
773 		goto out_unlock;
774 	}
775 
776 	/* None matched, wait for hot-plugging */
777 	list_add(&sd->async_list, &subdev_list);
778 
779 out_unlock:
780 	mutex_unlock(&list_lock);
781 
782 	return 0;
783 
784 err_unbind:
785 	/*
786 	 * Complete failed. Unbind the sub-devices bound through registering
787 	 * this async sub-device.
788 	 */
789 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
790 	if (subdev_notifier)
791 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
792 
793 	if (sd->asd)
794 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
795 	v4l2_async_cleanup(sd);
796 
797 	mutex_unlock(&list_lock);
798 
799 	return ret;
800 }
801 EXPORT_SYMBOL(v4l2_async_register_subdev);
802 
803 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
804 {
805 	if (!sd->async_list.next)
806 		return;
807 
808 	mutex_lock(&list_lock);
809 
810 	__v4l2_async_nf_unregister(sd->subdev_notifier);
811 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
812 	kfree(sd->subdev_notifier);
813 	sd->subdev_notifier = NULL;
814 
815 	if (sd->asd) {
816 		struct v4l2_async_notifier *notifier = sd->notifier;
817 
818 		list_add(&sd->asd->list, &notifier->waiting);
819 
820 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
821 	}
822 
823 	v4l2_async_cleanup(sd);
824 
825 	mutex_unlock(&list_lock);
826 }
827 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
828 
829 static void print_waiting_subdev(struct seq_file *s,
830 				 struct v4l2_async_subdev *asd)
831 {
832 	switch (asd->match_type) {
833 	case V4L2_ASYNC_MATCH_I2C:
834 		seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
835 			   asd->match.i2c.address);
836 		break;
837 	case V4L2_ASYNC_MATCH_FWNODE: {
838 		struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
839 
840 		devnode = fwnode_graph_is_endpoint(fwnode) ?
841 			  fwnode_graph_get_port_parent(fwnode) :
842 			  fwnode_handle_get(fwnode);
843 
844 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
845 			   devnode->dev ? dev_name(devnode->dev) : "nil",
846 			   fwnode);
847 
848 		fwnode_handle_put(devnode);
849 		break;
850 	}
851 	}
852 }
853 
854 static const char *
855 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
856 {
857 	if (notifier->v4l2_dev)
858 		return notifier->v4l2_dev->name;
859 	else if (notifier->sd)
860 		return notifier->sd->name;
861 	else
862 		return "nil";
863 }
864 
865 static int pending_subdevs_show(struct seq_file *s, void *data)
866 {
867 	struct v4l2_async_notifier *notif;
868 	struct v4l2_async_subdev *asd;
869 
870 	mutex_lock(&list_lock);
871 
872 	list_for_each_entry(notif, &notifier_list, list) {
873 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
874 		list_for_each_entry(asd, &notif->waiting, list)
875 			print_waiting_subdev(s, asd);
876 	}
877 
878 	mutex_unlock(&list_lock);
879 
880 	return 0;
881 }
882 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
883 
884 static struct dentry *v4l2_async_debugfs_dir;
885 
886 static int __init v4l2_async_init(void)
887 {
888 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
889 	debugfs_create_file("pending_async_subdevices", 0444,
890 			    v4l2_async_debugfs_dir, NULL,
891 			    &pending_subdevs_fops);
892 
893 	return 0;
894 }
895 
896 static void __exit v4l2_async_exit(void)
897 {
898 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
899 }
900 
901 subsys_initcall(v4l2_async_init);
902 module_exit(v4l2_async_exit);
903 
904 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
905 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
906 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
907 MODULE_LICENSE("GPL");
908